kubectl get pods --selector=bu=finance,tier=frontend,env=prod
kubectl get pods --selector='env in (dev,prod)'
kubectl config set-context $(kubectl config current-context) --namespace=<namespace to set>
alias k='kubectl'
--dry-run -o yaml > <file-name>.yml
kubectl run tester --image=busybox --restart=Never -it --rm
kubectl explain <object.key> | less
kubectl run <pod-name> --image=<pod-image> --restart=Never
kubectl expose pod <pod-name> --port <service-port> --name <service-name> --type <ClusterIP|NodePort|LoadBalancer>
https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
- can be set at the pod level or the container
- container overrides pod
- capabilities can only be set at the container level
https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/
- default location: $HOME/.kube/config
- three sections:
- clusters
- contexts
- users
kubectl proxy # starts a localhost proxy to the kube-apiserver that utilises local KubeConfig
kubectl create deployment <deployment-name> --image=<pod-image>
kubectl expose deplyment <deployment-name> --port <service-port> --name <service-name> --type <ClusterIP|NodePort|LoadBalancer>
kubectl set image deployment/<deployment-name> <container-name>=<new-image>
kubectl rollout status deployment/<deployment-name>
kubectl rollout history deployment/<deployment-name>
kubectl rollout undo deployment/<deployment-name>
kubectl run <job-name> --image=<container-image> --restart=OnFailure
kubectl run <cronjob-name> --image=<container-image> --restart=OnFailure --schedule=<crontab value>
kubectl expose deployment <deployment-name> --type=NodePort --port=<port on service ie 80> --target-port<port on pod ie 8080> --name=<name of new service> -o=yaml --dry-run > service.yml
vim service.yml
... add the nodePort param under ports:
... note, nodePort must be between 30000 and 32767
... save file
... if no nodePort is specified, it will be auto assigned
kubectl apply -f service.yml
kubectl expose deployment <deployment-name> --type=ClusterIP --port=<port on service ie 80> --target-port<port on pod ie 8080> --name=<name of new service>
https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
edit node definition and add spec.nodeName=<name of node to schedule to>
re/create the pod using the updated spec
https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
kubectl taint node <node-name> <key>=<value>:<effect>
effect= NoSchedule|PreferNoSchedule|NoExecute
example:
kubectl taint node node01 app=blue:NoSchedule
1) edit pod definition yaml ...
2) add spec.tolerations:
tollerations:
- key:"app"
operator:"Equal"
value:"blue"
effect:"NoSchedule"
https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
kubectl label node <node-name> <label-key>=<label-value>
1) add the following to pod.spec:
nodeSelector:
<label-key>: <label-value>
https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity
1) add the following to pod.spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExectution:
nodeSelectorTerms:
- matchExpressions:
- key: size
operator: In
values:
- Large
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/ https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/memory-default-namespace/ https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/
- CPU - 1vCPU
- Memory - 512Mi
pod.spec.container.resources.limits -
limits:
memory: "2Gi"
cpu 2
pod.spec.container.resources.requests -
requests:
memory: "1Gi"
cpu 1
https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
- pods run on all nodes
- pod auto added to new nodes
- use cases
- logging
- kube-proxy
- networking
https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/
- reads pod definitions from /etc/kubernetes/manifests and auto creates pods
- if file removed from /etc/kubernetes/manifests then pod is auto removed
- can change directory on kubelet.service using
--pod-manifest-path
param or - modify
staticPodPath
param in the kubeconfig.yaml (assuming--config=kubeconfig.yaml
set on kubectl service) - use docker ps to see running container
- can see pod by running kubectl get pods but cannot remove or edit
- can see static pods on the master where the pod has
-master
appended to its name
- default kube-scheduler.service
--scheduler-name=default-scheduler
- run additional scheduler service with new
--scheduler-name
ie--scheduler-name=my-custom-scheduler
- if single-master:
- set
leader-elect=false
- set
- if multi-master
- set
leader-elect=true
- set
lock-object=<customer-scheduler-name>
- set
add pod.spec.schedulerName: <custom-scheduler-name>
kubectl get events
kubectl logs <scheduler-name> --namespace=kube-system
- kubelet contains cAdvisor for retrieving performance metrics from pods and exposing them throught the kubelet api for the metrics server
git clone https://github.com/kubernetes-incubator/metrics-server.git
kubectl create -f deploy/1.8+/
kubectl top node
kubectl top pod
kubectl logs <pod-name>
kubectl logs -f <pod-name>
kubectl logs <pod-name> <container-name>
command
overrides the entrypoint specified in the dockerfileargs
are appended to the entrypoint specified in the dockerfile- both
command
andargs
should be specified as yaml arrays `["x", "y", "z"] command
should ALWAYS specify the executable as a separate value in the array- CORRECT:
command: ["sleep", "10"]
- INCORRECT:
command: ["sleep 10"]
- CORRECT:
kubectl run <pod-name> --image=<image-name> --restart=Never --command -- <command>
kubectl run <pod-name> --image=<image-name> --restart=Never -- <args>
kubectl run <pod-name> --image=<image-name> --restart=Never --env=key=value --env=key2=value2
kubectl create configmap <configmap-name> --from-literal=key=value --from-literal=key2=value2
Given file:
key=value
key1=value1
kubectl create configmap <configmap-name> --from-env-file=<path-to-file>
https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/
add to pod.spec.container:
envFrom:
- configMapRef:
name: <config-map-name>
- https://kubernetes.io/docs/concepts/configuration/secret/
- encode:
echo -n 'admin' | base64
- decode:
echo 'MWYyZDFlMmU2N2Rm' | base64 --decode
kubectl create secret generic <secret-name> --from-literal=key=value --from-literal=key2=value2
Given file:
key=value
key1=value1
kubectl create secret generic <secret-name> --from-env-file=<path-to-file>
add to pod.spec.container:
envFrom:
- secretRef:
name: <secret-name>
add to pod.spec.container:
env:
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: <secret-name>
key: DB_PASSWORD
- Moves all pods to different nodes
- Marks node as cordened
kubectl drain <node-name>
kubectl corden <node-name>
kubectl uncorden <node-name>
- Components should never be ahead of the
kube-apiserver
controller-manager
andkube-scheduler
can be 1 version behindkube-api-server
kubelet
andkube-proxy
can be 2 versions behindkube-api-server
kubectl
can be 1 version higher or 1 version lower than thekube-api-server
- Kubernetes supports up to 3 minor version
- Recommened to upgrade 1 minor version at a time
- Upgrade master nodes then the worker nodes
- Upgrade worker nodes one at a time to avoid app downtime or
- Add new nodes to the cluster with the new version
https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/
# check what we can upgrade to
kubeadm upgrade plan
# ** MASTER NODE **
# upgrade kubeadm
apt-get upgrade -y kubeadm=1.12.0-00
# upgrade k8s components (except kubelet)
kubeadm upgrade apply v1.12.0
# if master node has apps running on it drain the master
kubectl drain master
# upgrade and restart the kubelet
apt-get upgrade -y kubelet=1.12.0-00
systemctl restart kubelet
# ** WORKER NODES **
# get list of nodes to see which has kubelet installed
kubectl get nodes
# ** for each worker node ... **
# drain the worker
kubectl drain <node-name>
# upgrade kubeadm
apt-get upgrade -y kubeadm=1.12.0-00
# upgrade and restart kubelet
apt-get upgrade -y kubelet=1.12.0-00
kubeadm upgrade node config --kubelet-version v1.12.0
systemctl restart kubelet
# uncorden the node
# kubectl uncorden <node-name>
kubectl get all -A -o=yaml > back.yaml
ETCDCTL_API=3 etdctl snapshot save snapshot.db
ETCDCTL_API=3 etdctl snapshot status snapshot.db
https://medium.com/@imarunrk/certified-kubernetes-administrator-cka-tips-and-tricks-part-3-2e7b44e89a3b https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/ https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/practice-questions-answers/cluster-maintenance/backup-etcd/etcd-backup-and-restore.md
# stop the kube-apiserver service
service kube-apiserver stop
# restore service based
ETCDCTL_API=3 etdctl snapshot restore snapshot.db --d \
--data-dir /var/lib/etcd-from-backup \
--initial-cluster master-1=https://192.168.5.11:2380,master-2=https://192.168.5.12:2380
--initial-cluster-token etcd-cluster-1 \
--initial-advertise-peer-urls https://${INTERNAL_IP}:2380
# restart etcd with the new `data-dir` and `initial-cluster-token` by modifying etcd.service
# restart the etcd service
systemctl daemon-reload
systemctl etcd restart
# restart kube-apiserver
service kube-apiserver start
https://kubernetes.io/docs/concepts/cluster-administration/certificates/ https://kubernetes.io/docs/setup/best-practices/certificates/
- certificate authorities (have ca key/crt pair)
- cluster components
- etcd - can use cluster components ca
- servers (have server key/crt pair)
- kube-apiserver
- etcd
- kubelet
- clients (have client key/crt pair)
- admin
- kube-scheduler
- kube-controller-manager
- kube-proxy
- etcd
- kubelet
# generate the private key
openssl genrsa -out ca.key 2048
# generate certificate signing request
openssl req -new -key ca.key -subj "/CN=KUBERNETES-CA" -out ca.csr
# sign the certificate
openssl x509 -req -in ca.csr -signkey ca.key -out ca.crt
# generate the private key
openssl genrsa -out admin.key 2048
# generate certificate signing request
openssl req -new -key admin.key -subj "/CN=kube-admin/O=system:masters" -out admin.csr
# sign the certificate
openssl x509 -req -in admin.csr -CA ca.key -CAkey ca.key -out admin.crt
# generate the private key
openssl genrsa -out <component>.key 2048
# generate certificate signing request
openssl req -new -key <component>.key -subj "/CN=system:<component>/O=system:masters" -out <component>.csr
# sign the certificate
openssl x509 -req -in <component>.csr -CA ca.key -CAkey ca.key -out <component>.crt
https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#securing-etcd-clusters
# generate the private key for the server
openssl genrsa -out etcd.key 2048
# generate certificate signing request for the server
openssl req -new -key etcd.key -subj "/CN=etcd/O=system:masters" -out etcd.csr
# sign the certificate for the server
openssl x509 -req -in etcd.csr -CA ca.key -CAkey ca.key -out etcd.crt
# generate the private key for the peers
openssl genrsa -out etcd-peer.key 2048
# generate certificate signing request for the peers
openssl req -new -key etcd-peer.key -subj "/CN=etcd-peer/O=system:masters" -out etcd-peer.csr
# sign the certificate for the peers
openssl x509 -req -in etcd-peer.csr -CA ca.key -CAkey ca.key -out etcd-peer.crt
https://kubernetes.io/docs/concepts/cluster-administration/certificates/#openssl
# generate the private key
openssl genrsa -out kube-apiserver.key 2048
# generate certificate signing request config
vim openssl.cnf
[ req ]
default_bits = 2048
prompt = no
default_md = sha256
req_extensions = req_ext
distinguished_name = dn
[ dn ]
C = <country>
ST = <state>
L = <city>
O = <organization>
OU = <organization unit>
CN = <MASTER_IP>
[ req_ext ]
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster
DNS.5 = kubernetes.default.svc.cluster.local
IP.1 = <MASTER_IP>
IP.2 = <MASTER_CLUSTER_IP>
[ v3_ext ]
authorityKeyIdentifier=keyid,issuer:always
basicConstraints=CA:FALSE
keyUsage=keyEncipherment,dataEncipherment
extendedKeyUsage=serverAuth,clientAuth
subjectAltName=@alt_names
# generate certificate signing request
openssl req -new -key kube-apiserverkey -subj "/CN=system:kube-apiserver" -out kube-apiserver.csr -config openssl.cnf
# sign the certificate
openssl x509 -req -in kube-apiserver.csr -CA ca.key -CAkey ca.key -out kube-apiserver.crt
# generate the private key
openssl genrsa -out kubelet-<node-num>.key 2048
# generate certificate signing request
openssl req -new -key kubelet-<node-num>.key -subj "/CN=kubelet-<node-num>" -out kubelet-<node-num>.csr
# sign the certificate
openssl x509 -req -in kubelet-<node-num>.csr -CA ca.key -CAkey ca.key -out kubelet-<node-num>.crt
# generate the private key
openssl genrsa -out kubelet-client-<node-num>.key 2048
# generate certificate signing request
openssl req -new -key kubelet-client-<node-num>.key -subj "/CN=system:node:kubelet-client-<node-num>/O=system:nodes" -out kubelet-client-<node-num>.csr
# sign the certificate
openssl x509 -req -in kubelet-client-<node-num>.csr -CA ca.key -CAkey ca.key -out kubelet-client-<node-num>.crt
https://kubernetes.io/docs/concepts/cluster-administration/certificates/#openssl
openssl x509 -in <path to cert> -text -noout
- Responsibility of the kube-controller-manager
kubectl get csr
kubectl certificate <approve|deny> <csr-name>
https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/#download-the-certificate-and-use-it
kubectl get csr <csr-name> -o jsonpath='{.status.certificate}' | base64 --decode > server.crt
https://kubernetes.io/docs/reference/access-authn-authz/authentication/
- user-details.csv
pass123,user1,u0001,g0001
pass123,user2,u0002,g0002
pass123,user3,u0003,g0001
pass123,user4,u0004,g0002
- add
--basic-auth-file=user-details.csv
to kube-apiserver service params - restart kube-apiserver
curl -v -k <endpoint> -u "<username>:<password>"
- user-token-details.csv
89asdiuajnsd98h,user1,u0001,g0001
98hsdfjkbnsdf0s,user2,u0002,g0002
asd98asdjbo89as,user3,u0003,g0001
09asdjbn8asdasd,user4,u0004,g0002
- add
--token-auth-file=user-token-details.csv
to kube-apiserver service params - restart kube-apiserver
curl -v -k <endpoint> --header "Authorization: Bearer <token>"
curl -v -k <endpoint> --key admin.key --cert admin.crt --cacert ca.crt
https://kubernetes.io/docs/reference/access-authn-authz/rbac/
- to view authorizaion modes check the
--authorization-mode
flag on the kube-apiserver
kubectl auth can-i get pods
kubectl auth can-i get pods --as-user=<user-name>
kubectl create role <role-name> --verb=create,list,delete --resource=pods
kubectl create rolebinding <role-binding-name> --clusterrole=<role-to-bind-to> --user=<user-to-bind-to>
https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
kubectl create secret docker-registry <secret-name> \
--docker-server=<docker-server> \
--docker-username=<docker-username> \
--docker-password=<docker-password> \
--docker-email=<docker-email>
---
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
spec:
containers:
- name: nginx
image: my-private-reg.io/apps/nginx
imagePullSecrets:
- name: <secret-name>
https://kubernetes.io/docs/concepts/services-networking/network-policies/
- default allow all
- network policy is NOT supported in Flannel
https://kubernetes.io/docs/concepts/storage/volumes/
Add to pod.spec:
volumes:
- name: <volume-name>
hostPath:
path: /data
type: Directory
to mount above volume into container:
Add to pod.spec.container:
volumeMounts:
- mountPath: /opt
name: <volume-name>
https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume https://kubernetes.io/docs/concepts/storage/persistent-volumes/
- 1:1 relationship between volume and claim
- can use labels and selectors to claim a particular volume
- claims will be pending until a suitable volume is to podavailable
- when claim is deleting, volume will by default be retained, this can be changed using
persistentVolumeReclaimPolicy
flag
To use a persistent volume claim in a pod, add to pod.spec:
volumes:
- name: <volume-name>
persistentVolumeClaim:
claimName: <pvc-name>
- to list and modify interfaces on the host:
ip link
- see the ip addresses on interfaces:
ip addr
- set ip addresses on interfaces:
ip addr add <cidr> dev <interface>
- see routing table:
ip route
- add entries into the routing table:
ip route add <cidr> via <gateway>
- check if ip forwarding between interfaces is enables (1 = enabled):
cat /proc/sys/net/ipv4/ip_forward
- check ports `netstat -pntl'
- address of DNS server:
/etc/resolv.conf
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#check-required-ports https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/
- Discover networking plugin:
ps -aux | grep kubelet | grep cni-config-dir
>ls /etc/cni/net.d/
- Discover networking bin dir:
ps -aux | grep kubelet | grep cni-bin-dir
>ls /etc/cni/bin
- Which networking plugin is being used:
ps -aux | grep kubelet | grep network-plugin
https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/ https://kubernetes.github.io/ingress-nginx/deploy/#bare-metal
https://kubernetes.io/docs/concepts/services-networking/ingress/
- check nodes
- check system pods/services
- check service names
- check service ports
- check service endpoints
- check pods
- check envionment variables
- check volume/volume mounts
- check config maps