A cheat sheet for Kubernetes commands.
Linux
alias k=kubectl
alias k="kubectl"
alias watch="watch"
alias kg="kubectl get"
alias kgdep="kubectl get deployment"
alias ksys="kubectl --namespace=kube-system"
alias kd="kubectl describe"
alias bb="kubectl run busybox --image=busybox:1.30.1 --rm -it --restart=Never --command --"
Windows
Set-Alias -Name k -Value kubectl
- Get clusters
kubectl config get-clusters
NAME
docker-for-desktop-cluster
foo
- Get cluster info.
kubectl cluster-info
Kubernetes master is running at https://172.17.0.58:8443
A context is a cluster, namespace and user.
- Get a list of contexts.
kubectl config get-contexts
CURRENT NAME CLUSTER AUTHINFO NAMESPACE
docker-desktop docker-desktop docker-desktop
* foo foo foo bar
- Get the current context.
kubectl config current-context
foo
- Switch current context.
kubectl config use-context docker-desktop
- Set default namesapce
kubectl config set-context $(kubectl config current-context) --namespace=my-namespace
To switch between contexts, you can also install and use kubectx.
for better security add following securityContext settings to manifest
securityContext:
# Blocking Root Containers
runAsNonRoot: true
# Setting a Read-Only Filesystem
readOnlyRootFilesystem: true
# Disabling Privilege Escalation
allowPrivilegeEscalation: false
# For maximum security, you should drop all capabilities, and only add specific capabilities if they’re needed:
capabilities:
drop: ["all"]
add: ["NET_BIND_SERVICE"]
# generate a kubernetes tls file
kubectl create secret tls keycloak-secrets-tls \
--key tls.key --cert tls.crt \
-o yaml --dry-run > 02-keycloak-secrets-tls.yml
kubectl cluster-info
kubectl config current-context
kubectl config get-contexts
kubectl config use-context docker-desktop
kubectl config view
kubectl port-forward service/ok 8080:8080 8081:80 -n the-project
kubectl version
#nested kubectl commands
kubectl -n istio-system port-forward $(kubectl -n istio-system get pod -l app=servicegraph -o jsonpath='{.items[0].metadata.name}') 8082:8088
#Execute commands in running Pods
kubectl exec -it my-pod-name -- /bin/sh
kubectl get all
kubectl get configmaps
kubectl get ep
kubectl get ep kube-dns --namespace=kube-system
kubectl get endpoints kuard
kubectl get replicaset
kubectl get daemonset
kubectl get pv
kubectl get pvc
kubectl get cronjobs
kubectl get namespaces
kubectl get nodes
kubectl get persistentvolume
kubectl get PersistentVolumeClaim --namespace default
kubectl get pods
kubectl get pods --namespace kube-system
kubectl get rs
kubectl get serviceaccount
kubectl get storageclass
kubectl get svc kuard
Additional switches that can be added to the above commands:
-o wide
- Show more information.--watch
or-w
- watch for changes.
# Left bottom screen was running:
watch kubectl get pods
# Right bottom screen was running:
watch "kubectl get events --sort-by='{.lastTimestamp}' | tail -6"
--namespace
- Get a resource for a specific namespace.
You can set the default namespace for the current context like so:
kubectl config set-context $(kubectl config current-context) --namespace=my-namespace
# Assign dev context to development namespace
kubectl config set-context dev --namespace=dev --cluster=minikube --user=minikube
# Assign qa context to QA namespace
kubectl config set-context qa --namespace=qa --cluster=minikube --user=minikube
# Assign prod context to production namespace
kubectl config set-context prod --namespace=prod --cluster=minikube --user=minikube
# List contexts
kubectl config get-contexts
# Switch to Dev context
kubectl config use-context dev
# Switch to QA context
kubectl config use-context qa
# Switch to Prod context
kubectl config use-context prod
kubectl config current-context
To switch namespaces, you can also install and use kubens.
- Get pods showing labels.
kubectl get pods --show-labels
- Get pods by label.
kubectl get pods -l environment=production,tier!=frontend
kubectl get pods -l 'environment in (production,test),tier notin (frontend,backend)'
kubectl describe nodes [id]
kubectl describe pods [id]
kubectl describe rs [id]
kubectl describe svc kuard [id]
kubectl describe endpoints kuard [id]
# Delete resources under a namespace
kubectl -n my-ns delete po,svc --all
# Delete deployments by labels
kubectl delete deployment -l app=wordpress
kubectl delete endpoints kuard [id]
kubectl delete nodes [id]
kubectl delete pods [id]
kubectl delete pod -l env=test
# Delete all resources filtered by labels
kubectl delete pods,services -l name=myLabel
# delete persist volumes by label
# Delete pods by labels
kubectl delete pvc -l app=wordpress
kubectl delete rs [id]
# Delete statefulset only (not pods)
kubectl delete sts/<stateful_set_name> --cascade=false
kubectl delete svc kuard [id]
Force a deletion of a pod without waiting for it to gracefully shut down
kubectl delete pod-name --grace-period=0 --force
kubectl create
can be used to create new resources while kubectl apply
inserts or updates resources while maintaining any manual changes made like scaling pods.
--record
- Add the current command as an annotation to the resource.--recursive
- Recursively look for yaml in the specified directory.
kubectl run kuard --generator=run-pod/v1 --image=gcr.io/kuar-demo/kuard-amd64:1 --output yaml --export --dry-run > kuard-pod.yml
kubectl apply -f kuard-pod.yml
kubectl run kuard --image=gcr.io/kuar-demo/kuard-amd64:1 --output yaml --export --dry-run > kuard-deployment.yml
kubectl apply -f kuard-deployment.yml
kubectl expose deployment kuard --port 8080 --target-port=8080 --output yaml --export --dry-run > kuard-service.yml
kubectl apply -f kuard-service.yml
# Execute kubectl command for creating namespaces
# Namespace for Developers
kubectl create -f namespace-dev.json
# Namespace for Testers
kubectl create -f namespace-qa.json
# Namespace for Production
kubectl create -f namespace-prod.json
kubectl run my-cool-app —-image=me/my-cool-app:v1 --output yaml --export --dry-run > my-cool-app.yaml
kubectl get deployment my-cool-app --output yaml --export > my-cool-app.yaml
- Get logs.
kubectl logs -l app=kuard
# get all the logs for a given pod:
kubectl logs my-pod-name
# keep monitoring the logs
kubectl -f logs my-pod-name
# Or if you have multiple containers in the same pod, you can do:
kubectl -f logs my-pod-name internal-container-name
# This allows users to view the diff between a locally declared object configuration and the current state of a live object.
kubectl alpha diff -f mything.yml
- Get logs for previously terminated container.
kubectl logs POD_NAME --previous
- Watch logs in real time.
kubectl attach POD_NAME
- Copy files out of pod (Requires
tar
binary in container).
kubectl cp POD_NAME:/var/log .
You can also install and use kail.
kubectl port-forward deployment/kuard 8080:8080
Redeploy newly build image to existing k8s deployment
BUILD_NUMBER = 1.5.0-SNAPSHOT // GIT_SHORT_SHA
kubectl diff -f sample-app-deployment.yaml
kubectl -n=staging set image -f sample-app-deployment.yaml sample-app=xmlking/ngxapp:$BUILD_NUMBER
- Update replicas.
kubectl scale deployment nginx-deployment --replicas=10
- Set autoscaling config.
kubectl autoscale deployment nginx-deployment --min=10 --max=15 --cpu-percent=80
- Get rollout status.
kubectl rollout status deployment/nginx-deployment
Waiting for rollout to finish: 2 out of 3 new replicas have been updated...
deployment "nginx-deployment" successfully rolled out
Once you run kubectl apply -f manifest.yml
# To get all the deploys of a deployment, you can do:
kubectl rollout history deployment/DEPLOYMENT-NAME
# Once you know which deploy you’d like to roll back to, you can run the following command (given you’d like to roll back to the 100th deploy):
kubectl rollout undo deployment/DEPLOYMENT_NAME --to-revision=100
# If you’d like to roll back the last deploy, you can simply do:
kubectl rollout undo deployment/DEPLOYMENT_NAME
- Get rollout history.
kubectl rollout history deployment/nginx-deployment
kubectl rollout history deployment/nginx-deployment --revision=2
- Undo a rollout.
kubectl rollout undo deployment/nginx-deployment
kubectl rollout undo deployment/nginx-deployment --to-revision=2
- Pause/resume a rollout
kubectl rollout pause deployment/nginx-deployment
kubectl rollout resume deploy/nginx-deployment
apiVersion: v1
kind: Pod
metadata:
name: cuda-test
spec:
containers:
- name: cuda-test
image: "k8s.gcr.io/cuda-vector-add:v0.1"
resources:
limits:
nvidia.com/gpu: 1
nodeSelector:
accelerator: nvidia-tesla-p100
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
namespace: my-namespace
labels:
- environment: production,
- teir: frontend
annotations:
- key1: value1,
- key2: value2
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
- Enable proxy
- kubectl proxy creates proxy server between your machine and Kubernetes API server. By default it is only accessible locally (from the machine that started it).
kubectl proxy
kubectl proxy --port=8080
curl http://localhost:8080/api/
curl http://localhost:8080/api/v1/namespaces/default/pods
- Check if RBAC is enabled
kubectl api-versions | grep rbac.authorization.k8s
- If it's not enabled
kube-apiserver --authorization-mode=RBAC
- Create Test User and describe it's data
kubectl create serviceaccount demo
kubectl describe serviceaccount demo
- Create Token for the specified serviceaccount
kubectl create token demo
- Use the Token and switch Context
TOKEN=$(kubectl describe secret demo-token-znwmb | grep token: | awk '{print $2}')
kubectl config set-credentials demo --token=$TOKEN
kubectl config set-context demo --cluster=kubernetes --user=demo
kubectl config use-context demo
- If you try to get pods or anything else from the cluster, it will fail as no role actually exists for this SA
- Creating a Role Switch back to your original context before continuing, so you regain your administrative privileges:
kubectl config use-context default
Sample Role/RoleBinding
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: default
name: Developer
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["create", "get", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
namespace: default
name: DeveloperRoleBinding
subjects:
- kind: ServiceAccount
name: demo
apiGroup: ""
roleRef:
kind: Role
name: Developer
apiGroup: ""
- Save the above code to a YML, afterwards:
kubectl apply -f file.yml
- After switching back to the demo context, you should now be able to get pods, as well as create new ones
- Create Private Key and generate a signing request (CSR)
openssl genrsa -out myuser.key 2048
openssl req -new -key myuser.key -out myuser.csr
- Create Signing Request using API, approve, and get it and use it
kubectl apply -f - <<EOF
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: myuser
spec:
request: $(cat myuser | base64 | tr -d '\n')
signerName: kubernetes.io/kube-apiserver-client
usages: ['digital signature', 'key encipherment',
'client auth']
EOF
#get list of csrs
kubectl get csr
kubectl certificate approve myuser
#get the cert
kubectl get csr myuser -o jsonpath='{.status.certificate}'| base64 -d > myuser.crt
#create role and binding
kubectl create role developer --verb=create --verb=get --verb=list --verb=update --verb=delete --resource=pods
kubectl create rolebinding developer-binding-myuser --role=developer --user=myuser
kubectl config set-credentials myuser --client-key=myuser.key --client-certificate=myuser.crt --embed-certs=true
kubectl config set-context myuser --cluster=kubernetes --user=myuser
kubectl config use-context myuser
- Sample RBAC Yaml for SuperUser (DO NOT USE IN PROD)
#kubectl create role developer --verb=create --verb=get --verb=list --verb=update --verb=delete --resource=pods
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: myuser
namespace: default
rules:
- apiGroups:
- ""
resources:
- '*'
verbs:
- '*'
---
#kubectl create rolebinding developer-binding-myuser --role=developer --user=myuser
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: myuser
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: myuser
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: myuser
---
### DO NOT USE IN PRODUCTION ###
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
# "namespace" omitted since ClusterRoles are not namespaced
name: myuser
rules:
- apiGroups: [""]
#
# at the HTTP level, the name of the resource for accessing Secret
# objects is "secrets"
resources: ["*"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
# This cluster role binding allows anyone in the "manager" group to read secrets in any namespace.
kind: ClusterRoleBinding
metadata:
name: myuser-global
subjects:
- kind: User
name: myuser # Name is case sensitive
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: myuser
apiGroup: rbac.authorization.k8s.io
az aks get-credentials --resource-group <Resource Group Name> --name <AKS Name>
Secure the dashboard like this. Then run:
az aks browse --resource-group <Resource Group Name> --name <AKS Name>
Get updates
az aks get-upgrades --resource-group <Resource Group Name> --name <AKS Name>
For many steps here you will want to see what a Pod running in the k8s cluster sees. The simplest way to do this is to run an interactive busybox Pod:
kubectl run -it --rm --restart=Never busybox --image=busybox sh
# you can use busybox for debuging inside cluster
bb nslookup demo
bb wget -qO- http://demo:8888
bb sh
# Show resource utilization per node:
kubectl top node
# Show resource utilization per pod:
kubectl top pod
# if you want to have a terminal show the output of these commands every 2 seconds without having to run the command over and over you can use the watch command such as
watch kubectl top node
# --v=8 for debuging
kubectl get po --v=8