- Open shell to debug cluster
kubectl run ubuntu -it --image=ubuntu --restart=Never --rm
- Log from current instead of from beginning
kubectl logs -f --tail 0 pod
- Get only pod status
kubectl get pods -o=jsonpath='{.items[*].status.phase}'
- To configure access to multiple clusters, you need to have multiple kubeconfig files.
Each cluster is configured to map to a single context
To add a particular context, you
-
Add cluster open corresponding kubeconfig file and copy values: certificate-authority-data, server
Run command
kubectl config set-cluster <cluster-name> --server=<server> --certificate-authority-data=<certificate-authority-data
For example:
kubectl config set-cluster cluster-name --server=https://170839116A56374EF061B65D0E47872F.gr7.us-west-2.eks.amazonaws.com --certificate-authority-data=LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS
-
Add user
open corresponding kubeconfig file and copy values: tokenRun command
kubectl config set-credentials <user-name> --token=<server>
For example:
kubectl config set-credentials user-name --token=eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdW....
-
Add context Link cluster and user to a context by running command
kubectl config set-context <context-name> --cluster <cluster-name> --user=<user-name>
you can also set optional namespace as ' --namespace='
kubectl config set-context context-name --cluster cluster-name --namespace=myspace --user=user-name
alias sh_pod='kubectl exec -it $(kubectl get pods | grep $POD | awk '"'"'{print $1}'"'"') -- /bin/bash'
kubectl cp local-file pod-name:location-in-pod
kubectl cp pod-name:location-in-pod local-file
kubectl get pods --selector=app.kubernetes.io/instance=[deployment]
kubectl get pods --selector=app.kubernetes.io/name=[deployment-name]