You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
kubectl: 1.19.0
kubelet: 1.19.0
kubeadm: 1.19.0
containerd: 1.4.x
docker: 19.03
kubernetes: 1.19.0
# Docker 19 is compatible with Kubeadm 1.19 and below.# Docker 20 onwards dockershim.sock is deperecated and CRI# should be updated to containerd.sock file path.
# kubeadm-config.yaml
cat <<EOF | sudo tee kubeadm-config.yaml---kind: KubeletConfigurationapiVersion: kubelet.config.k8s.io/v1beta1cgroupDriver: systemd---apiVersion: kubeadm.k8s.io/v1beta2kind: InitConfigurationbootstrapTokens:- groups: - system:bootstrappers:kubeadm:default-node-token token: abcdef.0123456789abcdef ttl: "0" usages: - signing - authenticationlocalAPIEndpoint: advertiseAddress: 192.168.56.51 bindPort: 6443nodeRegistration:# criSocket: /run/containerd/containerd.sock # this will default to dockershim name: cp1.example.com taints: - effect: NoSchedule key: node-role.kubernetes.io/master---apiServer: timeoutForControlPlane: 4m0sapiVersion: kubeadm.k8s.io/v1beta2certificatesDir: /etc/kubernetes/pkiclusterName: kubernetescontrollerManager: {}dns: type: CoreDNSetcd: local: dataDir: /var/lib/etcdimageRepository: k8s.gcr.iokind: ClusterConfigurationkubernetesVersion: v1.19.0networking: dnsDomain: example.com podSubnet: 10.244.0.0/22 serviceSubnet: 10.96.0.0/22scheduler: {}EOF# Execute the commaon only on the control plane node
kubeadm init --config kubeadm-config.yaml
Deploy Network Add On, only on control plane nodes
# For now I will deploy flannel
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# Update the kube-flannel.yaml with correct ip address from above.# Setup your kubeconfig file and deploy once on the CP node before joining the workers
kubectl apply -f kube-flannel.yaml
Join worker nodes
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g)$HOME/.kube/config
kubeadm join 192.168.56.51:6443 --token "abcdef.0123456789abcdef" \
--discovery-token-ca-cert-hash sha256:xxxxx
How to changes certs in the kubernetes using kubeadm
# Generate kubeadm.yaml
kubectl -n kube-system get configmap kubeadm-config -o jsonpath='{.data.ClusterConfiguration}'> kubeadm.yaml
# Update the SAN in the yaml file
apiServer:
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
certSANs:
- kubernetes.example.com
- "x.x.x.x"
- cp1.example.com
- "192.168.56.51"
mv /etc/kubernetes/pki/apiserver.{crt,key} ~
kubeadm init phase certs apiserver --config kubeadm.yaml
#The final step is restarting the API server to pick up the new certificate. The easiest way to do this is to kill the API server container using docker:
Run `docker ps | grep kube-apiserver | grep -v pause` to get the container ID forthe container running the Kubernetes API server. (The container ID will be the very first fieldin the output.)
Run `docker kill<containerID>` to kill the container.
#If your nodes are running containerd as the container runtime, the commands are a bit different:
Run `crictl pods | grep kube-apiserver | cut -d'' -f1` to get the Pod ID for the Kubernetes API server Pod.
Run `crictl stopp <pod-id>` to stop the Pod.
Run `crictl rmp <pod-id>` to remove the Pod.
# verify the change
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -text
# update cluster configurations
kubectl -n kube-system get configmap kubeadm-config -o yaml
kubectl -n kube-system describe configmap kubeadm-config
Upgrade steps
# upgrade the kubeadm to the desired version# upgrade from 1.18 to 1.19.1# On MASTER NODE
VERSION="1.19.1"
yum install -y kubeadm-$VERSION kubectl-$VERSION kubelet-$VERSION --disableexcludes=kubernetes
kubeadm upgrade plan
kubeadm upgrade apply v1.19.1
systemctl daemon-reload ; sleep 5 ;
systemctl restart kubelet
# Worker node ugprades, perform the following from master node
k get nodes
NAME STATUS ROLES AGE VERSION
cp1.example.com Ready master 41d v1.19.1
k8s-worker2.example.com Ready <none> 41d v1.18.20
k8s-worker3.example.com Ready <none> 40d v1.18.20
kubectl drain k8s-worker2.example.com --ignore-daemonsets
k get nodes k8s-worker2.example.com
NAME STATUS ROLES AGE VERSION
k8s-worker2.example.com Ready,SchedulingDisabled <none> 41d v1.18.20
# Login to the worker node and perform following actions
systemctl daemon-reload
systemctl restart kubelet
k get nodes k8s-worker2.example.com
NAME STATUS ROLES AGE VERSION
k8s-worker2.example.com Ready,SchedulingDisabled <none> 41d v1.19.1
k uncordon k8s-worker2.example.com
k get nodes k8s-worker2.example.com
NAME STATUS ROLES AGE VERSION
k8s-worker2.example.com Ready <none> 41d v1.19.1