Testing kubeadm v1.12 - v1.13 stacked control plane upgrade
Install docker on each host:
amazon-linux-extras install docker=18.06.1
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl enable docker
systemctl start docker
Install k8s dependencies on each host:
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
exclude=kube*
EOF
yum install -y kubelet kubeadm kubectl iproute-tc --disableexcludes=kubernetes
systemctl enable kubelet
Initialize the control plane on cp0
cat << EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1alpha3
kind: ClusterConfiguration
kubernetesVersion: stable
apiServerCertSANs:
- "kubeadm-test-48e70848044b35ef.elb.us-east-1.amazonaws.com"
controlPlaneEndpoint: "kubeadm-test-48e70848044b35ef.elb.us-east-1.amazonaws.com:443"
etcd:
local:
extraArgs:
name: "ip-172-31-53-188.ec2.internal"
listen-client-urls: "https://127.0.0.1:2379,https://172.31.53.188:2379"
advertise-client-urls: "https://172.31.53.188:2379"
listen-peer-urls: "https://172.31.53.188:2380"
initial-advertise-peer-urls: "https://172.31.53.188:2380"
initial-cluster: "ip-172-31-53-188.ec2.internal=https://172.31.53.188:2380"
serverCertSANs:
- ip-172-31-53-188.ec2.internal
- 172.31.53.188
peerCertSANs:
- ip-172-31-53-188.ec2.internal
- 172.31.53.188
networking:
# This CIDR is a Calico default. Substitute or remove for your CNI provider.
podSubnet: "192.168.0.0/16"
EOF
kubeadm init --config=kubeadm-config.yaml
Copy the secrets to the other control plane hosts
USER=ec2-user
CONTROL_PLANE_IPS=" 172.31.61.80 172.31.53.8"
for host in ${CONTROL_PLANE_IPS} ; do
scp /etc/kubernetes/pki/ca.crt " ${USER} " @$host :
scp /etc/kubernetes/pki/ca.key " ${USER} " @$host :
scp /etc/kubernetes/pki/sa.key " ${USER} " @$host :
scp /etc/kubernetes/pki/sa.pub " ${USER} " @$host :
scp /etc/kubernetes/pki/front-proxy-ca.crt " ${USER} " @$host :
scp /etc/kubernetes/pki/front-proxy-ca.key " ${USER} " @$host :
scp /etc/kubernetes/pki/etcd/ca.crt " ${USER} " @$host :etcd-ca.crt
scp /etc/kubernetes/pki/etcd/ca.key " ${USER} " @$host :etcd-ca.key
scp /etc/kubernetes/admin.conf " ${USER} " @$host :
done
USER=ec2-user
mkdir -p /etc/kubernetes/pki/etcd
mv /home/${USER} /ca.crt /etc/kubernetes/pki/
mv /home/${USER} /ca.key /etc/kubernetes/pki/
mv /home/${USER} /sa.pub /etc/kubernetes/pki/
mv /home/${USER} /sa.key /etc/kubernetes/pki/
mv /home/${USER} /front-proxy-ca.crt /etc/kubernetes/pki/
mv /home/${USER} /front-proxy-ca.key /etc/kubernetes/pki/
mv /home/${USER} /etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /home/${USER} /etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /home/${USER} /admin.conf /etc/kubernetes/admin.conf
cat << EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1alpha3
kind: ClusterConfiguration
kubernetesVersion: stable
apiServerCertSANs:
- "kubeadm-test-48e70848044b35ef.elb.us-east-1.amazonaws.com"
controlPlaneEndpoint: "kubeadm-test-48e70848044b35ef.elb.us-east-1.amazonaws.com:443"
etcd:
local:
extraArgs:
name: "ip-172-31-61-80.ec2.internal"
listen-client-urls: "https://127.0.0.1:2379,https://172.31.61.80:2379"
advertise-client-urls: "https://172.31.61.80:2379"
listen-peer-urls: "https://172.31.61.80:2380"
initial-advertise-peer-urls: "https://172.31.61.80:2380"
initial-cluster: "ip-172-31-53-188.ec2.internal=https://172.31.53.188:2380,ip-172-31-61-80.ec2.internal=https://172.31.61.80:2380"
initial-cluster-state: existing
serverCertSANs:
- ip-172-31-61-80.ec2.internal
- 172.31.61.80
peerCertSANs:
- ip-172-31-61-80.ec2.internal
- 172.31.61.80
networking:
# This CIDR is a calico default. Substitute or remove for your CNI provider.
podSubnet: "192.168.0.0/16"
EOF
mkdir -p /var/lib/kubelet
echo " KUBELET_KUBEADM_ARGS=--cgroup-driver=systemd --network-plugin=cni" > /var/lib/kubelet/kubeadm-flags.env
kubeadm alpha phase certs all --config kubeadm-config.yaml
kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml
kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml
systemctl start kubelet
export CP0_IP=172.31.53.188
export CP0_HOSTNAME=ip-172-31-53-188.ec2.internal
export CP1_IP=172.31.61.80
export CP1_HOSTNAME=ip-172-31-61-80.ec2.internal
kubeadm alpha phase etcd local --config kubeadm-config.yaml
export KUBECONFIG=/etc/kubernetes/admin.conf
kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP} :2379 member add ${CP1_HOSTNAME} https://${CP1_IP} :2380
kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml
kubeadm alpha phase controlplane all --config kubeadm-config.yaml
kubeadm alpha phase kubelet config annotate-cri --config kubeadm-config.yaml
kubeadm alpha phase mark-master --config kubeadm-config.yaml
USER=ec2-user
mkdir -p /etc/kubernetes/pki/etcd
mv /home/${USER} /ca.crt /etc/kubernetes/pki/
mv /home/${USER} /ca.key /etc/kubernetes/pki/
mv /home/${USER} /sa.pub /etc/kubernetes/pki/
mv /home/${USER} /sa.key /etc/kubernetes/pki/
mv /home/${USER} /front-proxy-ca.crt /etc/kubernetes/pki/
mv /home/${USER} /front-proxy-ca.key /etc/kubernetes/pki/
mv /home/${USER} /etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /home/${USER} /etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /home/${USER} /admin.conf /etc/kubernetes/admin.conf
cat << EOF > kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1alpha3
kind: ClusterConfiguration
kubernetesVersion: stable
apiServerCertSANs:
- "kubeadm-test-48e70848044b35ef.elb.us-east-1.amazonaws.com"
controlPlaneEndpoint: "kubeadm-test-48e70848044b35ef.elb.us-east-1.amazonaws.com:443"
etcd:
local:
extraArgs:
name: "ip-172-31-53-8.ec2.internal"
listen-client-urls: "https://127.0.0.1:2379,https://172.31.53.8:2379"
advertise-client-urls: "https://172.31.53.8:2379"
listen-peer-urls: "https://172.31.53.8:2380"
initial-advertise-peer-urls: "https://172.31.53.8:2380"
initial-cluster: "ip-172-31-53-188.ec2.internal=https://172.31.53.188:2380,ip-172-31-61-80.ec2.internal=https://172.31.61.80:2380,ip-172-31-53-8.ec2.internal=https://172.31.53.8:2380"
initial-cluster-state: existing
serverCertSANs:
- ip-172-31-53-8.ec2.internal
- 172.31.53.8
peerCertSANs:
- ip-172-31-53-8.ec2.internal
- 172.31.53.8
networking:
# This CIDR is a calico default. Substitute or remove for your CNI provider.
podSubnet: "192.168.0.0/16"
EOF
mkdir -p /var/lib/kubelet
echo " KUBELET_KUBEADM_ARGS=--cgroup-driver=systemd --network-plugin=cni" > /var/lib/kubelet/kubeadm-flags.env
kubeadm alpha phase certs all --config kubeadm-config.yaml
kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml
kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml
systemctl start kubelet
export CP0_IP=172.31.53.188
export CP0_HOSTNAME=ip-172-31-53-188.ec2.internal
export CP2_IP=172.31.53.8
export CP2_HOSTNAME=ip-172-31-53-8.ec2.internal
kubeadm alpha phase etcd local --config kubeadm-config.yaml
export KUBECONFIG=/etc/kubernetes/admin.conf
kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP} :2379 member add ${CP2_HOSTNAME} https://${CP2_IP} :2380
kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml
kubeadm alpha phase controlplane all --config kubeadm-config.yaml
kubeadm alpha phase kubelet config annotate-cri --config kubeadm-config.yaml
kubeadm alpha phase mark-master --config kubeadm-config.yaml
kubeadm join kubeadm-test-48e70848044b35ef.elb.us-east-1.amazonaws.com:443 --token w2683e.9e14zsuqnhhmusnl --discovery-token-ca-cert-hash sha256:4bce93f9cda04ad3c79f571d616c09f69d47c9cb0974fb61841fa1c8a9dab653
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
Build the pre-release rpms from the release-1.13 branch
Copy the pre-release rpms to the remote hosts:
for host in ec2-34-204-167-138.compute-1.amazonaws.com ec2-3-80-4-117.compute-1.amazonaws.com ec2-34-237-75-83.compute-1.amazonaws.com ec2-18-204-56-246.compute-1.amazonaws.com; do
scp bazel-bin/build/rpms/{cri-tools,kubeadm,kubectl,kubelet,kubernetes-cni}.rpm ec2-user@$host :
done
Update kubeadm and verify the upgrade plan on cp0
yum install -y /home/ec2-user/kubeadm.rpm
kubeadm upgrade plan v1.13.0-beta.2
Update the cluster stored kubeadm-config
kubectl edit -n kube-system cm kubeadm-config
Remove the etcd configuration under ClusterConfiguration
Update the apiEndpoints under ClusterStatus
kubeadm upgrade apply v1.13.0-beta.2 --allow-experimental-upgrades
yum install -y /home/ec2-user/kubeadm.rpm
kubeadm upgrade node experimental-control-plane
yum install -y /home/ec2-user/kubeadm.rpm
kubeadm upgrade node experimental-control-plane
Upgrade kubelet and kubectl on all nodes
yum install -y /home/ec2-user/{kubectl,kubelet}.rpm
systemctl daemon-reload
systemctl restart kubelet