kubernetes.io > Documentation > Concepts > Overview > Working with Kubernetes Objects > Labels and Selectors
show
kubectl run nginx1 --image=nginx --restart=Never --labels=app=v1
kubectl run nginx2 --image=nginx --restart=Never --labels=app=v1
kubectl run nginx3 --image=nginx --restart=Never --labels=app=v1
# or
for i in `seq 1 3`; do kubectl run nginx$i --image=nginx -l app=v1 ; done
show
kubectl get po --show-labels
show
kubectl label po nginx2 app=v2 --overwrite
show
kubectl get po -L app
# or
kubectl get po --label-columns=app
show
kubectl get po -l app=v2
# or
kubectl get po -l 'app in (v2)'
# or
kubectl get po --selector=app=v2
show
kubectl label po -l "app in(v1,v2)" tier=web
show
kubectl annotate po -l "app=v2" owner=marketing
show
kubectl label po nginx1 nginx2 nginx3 app-
# or
kubectl label po nginx{1..3} app-
# or
kubectl label po -l app app-
show
Add the label to a node:
kubectl label nodes <your-node-name> accelerator=nvidia-tesla-p100
kubectl get nodes --show-labels
We can use the 'nodeSelector' property on the Pod YAML:
apiVersion: v1
kind: Pod
metadata:
name: cuda-test
spec:
containers:
- name: cuda-test
image: "k8s.gcr.io/cuda-vector-add:v0.1"
nodeSelector: # add this
accelerator: nvidia-tesla-p100 # the selection label
You can easily find out where in the YAML it should be placed by:
kubectl explain po.spec
OR: Use node affinity (https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/#schedule-a-pod-using-required-node-affinity)
apiVersion: v1
kind: Pod
metadata:
name: affinity-pod
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: accelerator
operator: In
values:
- nvidia-tesla-p100
containers:
...
show
kubectl annotate po nginx1 nginx2 nginx3 description='my description'
#or
kubectl annotate po nginx{1..3} description='my description'
show
kubectl annotate pod nginx1 --list
# or
kubectl describe po nginx1 | grep -i 'annotations'
# or
kubectl get po nginx1 -o custom-columns=Name:metadata.name,ANNOTATIONS:metadata.annotations.description
As an alternative to using | grep
you can use jsonPath like kubectl get po nginx1 -o jsonpath='{.metadata.annotations}{"\n"}'
show
kubectl annotate po nginx{1..3} description-
show
kubectl delete po nginx{1..3}
kubernetes.io > Documentation > Concepts > Workloads > Workload Resources > Deployments
Create a deployment with image nginx:1.18.0, called nginx, having 2 replicas, defining port 80 as the port that this container exposes (don't create a service for this deployment)
show
kubectl create deployment nginx --image=nginx:1.18.0 --dry-run=client -o yaml > deploy.yaml
vi deploy.yaml
# change the replicas field from 1 to 2
# add this section to the container spec and save the deploy.yaml file
# ports:
# - containerPort: 80
kubectl apply -f deploy.yaml
or, do something like:
kubectl create deployment nginx --image=nginx:1.18.0 --dry-run=client -o yaml | sed 's/replicas: 1/replicas: 2/g' | sed 's/image: nginx:1.18.0/image: nginx:1.18.0\n ports:\n - containerPort: 80/g' | kubectl apply -f -
or,
kubectl create deploy nginx --image=nginx:1.18.0 --replicas=2 --port=80
show
kubectl get deploy nginx -o yaml
show
kubectl describe deploy nginx # you'll see the name of the replica set on the Events section and in the 'NewReplicaSet' property
# OR you can find rs directly by:
kubectl get rs -l run=nginx # if you created deployment by 'run' command
kubectl get rs -l app=nginx # if you created deployment by 'create' command
# you could also just do kubectl get rs
kubectl get rs nginx-7bf7478b77 -o yaml
show
kubectl get po # get all the pods
# OR you can find pods directly by:
kubectl get po -l run=nginx # if you created deployment by 'run' command
kubectl get po -l app=nginx # if you created deployment by 'create' command
kubectl get po nginx-7bf7478b77-gjzp8 -o yaml
show
kubectl rollout status deploy nginx
show
kubectl set image deploy nginx nginx=nginx:1.19.8
# alternatively...
kubectl edit deploy nginx # change the .spec.template.spec.containers[0].image
The syntax of the 'kubectl set image' command is kubectl set image (-f FILENAME | TYPE NAME) CONTAINER_NAME_1=CONTAINER_IMAGE_1 ... CONTAINER_NAME_N=CONTAINER_IMAGE_N [options]
show
kubectl rollout history deploy nginx
kubectl get deploy nginx
kubectl get rs # check that a new replica set has been created
kubectl get po
show
kubectl rollout undo deploy nginx
# wait a bit
kubectl get po # select one 'Running' Pod
kubectl describe po nginx-5ff4457d65-nslcl | grep -i image # should be nginx:1.18.0
show
kubectl set image deploy nginx nginx=nginx:1.91
# or
kubectl edit deploy nginx
# change the image to nginx:1.91
# vim tip: type (without quotes) '/image' and Enter, to navigate quickly
show
kubectl rollout status deploy nginx
# or
kubectl get po # you'll see 'ErrImagePull' or 'ImagePullBackOff'
show
kubectl rollout undo deploy nginx --to-revision=2
kubectl describe deploy nginx | grep Image:
kubectl rollout status deploy nginx # Everything should be OK
show
kubectl rollout history deploy nginx --revision=4 # You'll also see the wrong image displayed here
show
kubectl scale deploy nginx --replicas=5
kubectl get po
kubectl describe deploy nginx
show
kubectl autoscale deploy nginx --min=5 --max=10 --cpu-percent=80
# view the horizontalpodautoscalers.autoscaling for nginx
kubectl get hpa nginx
show
kubectl rollout pause deploy nginx
Update the image to nginx:1.19.9 and check that there's nothing going on, since we paused the rollout
show
kubectl set image deploy nginx nginx=nginx:1.19.9
# or
kubectl edit deploy nginx
# change the image to nginx:1.19.9
kubectl rollout history deploy nginx # no new revision
show
kubectl rollout resume deploy nginx
kubectl rollout history deploy nginx
kubectl rollout history deploy nginx --revision=6 # insert the number of your latest revision
show
kubectl delete deploy nginx
kubectl delete hpa nginx
#Or
kubectl delete deploy/nginx hpa/nginx
Implement canary deployment by running two instances of nginx marked as version=v1 and version=v2 so that the load is balanced at 75%-25% ratio
show
Deploy 3 replicas of v1:
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app-v1
labels:
app: my-app
spec:
replicas: 3
selector:
matchLabels:
app: my-app
version: v1
template:
metadata:
labels:
app: my-app
version: v1
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
volumeMounts:
- name: workdir
mountPath: /usr/share/nginx/html
initContainers:
- name: install
image: busybox:1.28
command:
- /bin/sh
- -c
- "echo version-1 > /work-dir/index.html"
volumeMounts:
- name: workdir
mountPath: "/work-dir"
volumes:
- name: workdir
emptyDir: {}
Create the service:
apiVersion: v1
kind: Service
metadata:
name: my-app-svc
labels:
app: my-app
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 80
selector:
app: my-app
Test if the deployment was successful:
curl $(kubectl get svc my-app-svc -o jsonpath="{.spec.clusterIP}")
version-1
Deploy 1 replica of v2:
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-app-v2
labels:
app: my-app
spec:
replicas: 1
selector:
matchLabels:
app: my-app
version: v2
template:
metadata:
labels:
app: my-app
version: v2
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
volumeMounts:
- name: workdir
mountPath: /usr/share/nginx/html
initContainers:
- name: install
image: busybox:1.28
command:
- /bin/sh
- -c
- "echo version-2 > /work-dir/index.html"
volumeMounts:
- name: workdir
mountPath: "/work-dir"
volumes:
- name: workdir
emptyDir: {}
Observe that calling the ip exposed by the service the requests are load balanced across the two versions:
while sleep 0.1; do curl $(kubectl get svc my-app-svc -o jsonpath="{.spec.clusterIP}"); done
version-1
version-1
version-1
version-2
version-2
version-1
If the v2 is stable, scale it up to 4 replicas and shoutdown the v1:
kubectl scale --replicas=4 deploy my-app-v2
kubectl delete deploy my-app-v1
while sleep 0.1; do curl $(kubectl get svc my-app-svc -o jsonpath="{.spec.clusterIP}"); done
version-2
version-2
version-2
version-2
version-2
version-2
Create a job named pi with image perl:5.34 that runs the command with arguments "perl -Mbignum=bpi -wle 'print bpi(2000)'"
show
kubectl create job pi --image=perl:5.34 -- perl -Mbignum=bpi -wle 'print bpi(2000)'
show
kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big)
kubectl get po # get the pod name
kubectl logs pi-**** # get the pi numbers
kubectl delete job pi
OR
kubectl get jobs -w # wait till 'SUCCESSFUL' is 1 (will take some time, perl image might be big)
kubectl logs job/pi
kubectl delete job pi
OR
kubectl wait --for=condition=complete --timeout=300s job pi
kubectl logs job/pi
kubectl delete job pi
show
kubectl create job busybox --image=busybox -- /bin/sh -c 'echo hello;sleep 30;echo world'
show
kubectl get po # find the job pod
kubectl logs busybox-ptx58 -f # follow the logs
show
kubectl get jobs
kubectl describe jobs busybox
kubectl logs job/busybox
show
kubectl delete job busybox
Create a job but ensure that it will be automatically terminated by kubernetes if it takes more than 30 seconds to execute
show
kubectl create job busybox --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'while true; do echo hello; sleep 10;done' > job.yaml
vi job.yaml
Add job.spec.activeDeadlineSeconds=30
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
activeDeadlineSeconds: 30 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- while true; do echo hello; sleep 10;done
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}
show
kubectl create job busybox --image=busybox --dry-run=client -o yaml -- /bin/sh -c 'echo hello;sleep 30;echo world' > job.yaml
vi job.yaml
Add job.spec.completions=5
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
completions: 5 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello;sleep 30;echo world
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}
kubectl create -f job.yaml
Verify that it has been completed:
kubectl get job busybox -w # will take two and a half minutes
kubectl delete jobs busybox
show
vi job.yaml
Add job.spec.parallelism=5
apiVersion: batch/v1
kind: Job
metadata:
creationTimestamp: null
labels:
run: busybox
name: busybox
spec:
parallelism: 5 # add this line
template:
metadata:
creationTimestamp: null
labels:
run: busybox
spec:
containers:
- args:
- /bin/sh
- -c
- echo hello;sleep 30;echo world
image: busybox
name: busybox
resources: {}
restartPolicy: OnFailure
status: {}
kubectl create -f job.yaml
kubectl get jobs
It will take some time for the parallel jobs to finish (>= 30 seconds)
kubectl delete job busybox
kubernetes.io > Documentation > Tasks > Run Jobs > Running Automated Tasks with a CronJob
Create a cron job with image busybox that runs on a schedule of "*/1 * * * *" and writes 'date; echo Hello from the Kubernetes cluster' to standard output
show
kubectl create cronjob busybox --image=busybox --schedule="*/1 * * * *" -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster'
show
kubectl get po # copy the container just created
kubectl logs <container> # you will see the date and message
kubectl delete cj busybox --force #cj stands for cronjob and --force to delete immediately
Create the same cron job again, and watch the status. Once it ran, check which job ran by the created cron job. Check the log, and delete the cron job
show
kubectl get cj
kubectl get jobs --watch
kubectl get po --show-labels # observe that the pods have a label that mentions their 'parent' job
kubectl logs busybox-1529745840-m867r
# Bear in mind that Kubernetes will run a new job/pod for each new cron job
kubectl delete cj busybox
Create a cron job with image busybox that runs every minute and writes 'date; echo Hello from the Kubernetes cluster' to standard output. The cron job should be terminated if it takes more than 17 seconds to start execution after its scheduled time (i.e. the job missed its scheduled time).
show
kubectl create cronjob time-limited-job --image=busybox --restart=Never --dry-run=client --schedule="* * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > time-limited-job.yaml
vi time-limited-job.yaml
Add cronjob.spec.startingDeadlineSeconds=17
apiVersion: batch/v1
kind: CronJob
metadata:
creationTimestamp: null
name: time-limited-job
spec:
startingDeadlineSeconds: 17 # add this line
jobTemplate:
metadata:
creationTimestamp: null
name: time-limited-job
spec:
template:
metadata:
creationTimestamp: null
spec:
containers:
- args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
image: busybox
name: time-limited-job
resources: {}
restartPolicy: Never
schedule: '* * * * *'
status: {}
Create a cron job with image busybox that runs every minute and writes 'date; echo Hello from the Kubernetes cluster' to standard output. The cron job should be terminated if it successfully starts but takes more than 12 seconds to complete execution.
show
kubectl create cronjob time-limited-job --image=busybox --restart=Never --dry-run=client --schedule="* * * * *" -o yaml -- /bin/sh -c 'date; echo Hello from the Kubernetes cluster' > time-limited-job.yaml
vi time-limited-job.yaml
Add cronjob.spec.jobTemplate.spec.activeDeadlineSeconds=12
apiVersion: batch/v1
kind: CronJob
metadata:
creationTimestamp: null
name: time-limited-job
spec:
jobTemplate:
metadata:
creationTimestamp: null
name: time-limited-job
spec:
activeDeadlineSeconds: 12 # add this line
template:
metadata:
creationTimestamp: null
spec:
containers:
- args:
- /bin/sh
- -c
- date; echo Hello from the Kubernetes cluster
image: busybox
name: time-limited-job
resources: {}
restartPolicy: Never
schedule: '* * * * *'
status: {}