|
#!/bin/bash |
|
|
|
#kubeadm master on CentOS 7 |
|
|
|
# Housekeeping |
|
yum update -y |
|
swapoff -a |
|
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab |
|
systemctl disable firewalld |
|
systemctl stop firewalld |
|
setenforce 0 |
|
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config |
|
echo "Housekeeping done" |
|
|
|
#Install Docker |
|
yum install -y yum-utils device-mapper-persistent-data lvm2 yum-plugin-versionlock |
|
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo |
|
yum install -y docker-ce-18.06.2.ce-3.el7 docker-ce-cli-18.06.2.ce-3.el7 containerd.io |
|
mkdir /etc/docker |
|
cat > /etc/docker/daemon.json <<EOF |
|
{ |
|
"exec-opts": ["native.cgroupdriver=systemd"], |
|
"log-driver": "json-file", |
|
"log-opts": { |
|
"max-size": "100m" |
|
}, |
|
"storage-driver": "overlay2", |
|
"storage-opts": [ |
|
"overlay2.override_kernel_check=true" |
|
] |
|
} |
|
EOF |
|
mkdir -p /etc/systemd/system/docker.service.d |
|
systemctl daemon-reload |
|
systemctl enable docker |
|
groupadd docker |
|
MAINUSER=$(logname) |
|
usermod -aG docker $MAINUSER |
|
systemctl start docker |
|
yum versionlock docker-ce docker-ce-cli containerd.io |
|
echo "Docker Installation done" |
|
|
|
#Install K8s Stuff |
|
cat <<EOF > /etc/yum.repos.d/kubernetes.repo |
|
[kubernetes] |
|
name=Kubernetes |
|
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 |
|
enabled=1 |
|
gpgcheck=1 |
|
repo_gpgcheck=1 |
|
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg |
|
exclude=kube* |
|
EOF |
|
|
|
yum install -y kubelet-1.14.1-0 kubeadm-1.14.1-0 kubectl-1.14.1-0 --disableexcludes=kubernetes |
|
echo "Kube Stuff done" |
|
systemctl enable --now kubelet |
|
systemctl start kubelet |
|
yum versionlock kubelet kubeadm kubectl |
|
echo "Kubelet started done" |
|
|
|
#Network Stuff |
|
cat <<EOF > /etc/sysctl.d/k8s.conf |
|
net.bridge.bridge-nf-call-ip6tables = 1 |
|
net.bridge.bridge-nf-call-iptables = 1 |
|
EOF |
|
sysctl --system |
|
echo "Network Stuff done" |
|
|
|
#Create the vSphere Config File |
|
tee /etc/kubernetes/vsphere.conf >/dev/null <<EOF |
|
[Global] |
|
user = "[email protected]" |
|
password = "VMware1!" |
|
port = "443" |
|
insecure-flag = "1" |
|
|
|
[VirtualCenter "10.IP.ADD.RESS"] |
|
datacenters = "Datacenter-A" |
|
|
|
[Workspace] |
|
server = "10.IP.ADD.RESS" |
|
datacenter = "Datacenter-A" |
|
default-datastore = "vsanDatastore" |
|
resourcepool-path = "Cluster-A/Resources" |
|
folder = "kubernetes" |
|
|
|
[Disk] |
|
scsicontrollertype = pvscsi |
|
|
|
[Network] |
|
public-network = "K8s" |
|
EOF |
|
|
|
#Activate the vSphere Cloud Provider in our kubeadm init config file. |
|
#Additionally, as we are deploying flannel as our overlay network for pods and |
|
# it requires the below subnet CIDR in order for the overlay to work. |
|
tee /etc/kubernetes/kubeadminitmaster.yaml >/dev/null <<EOF |
|
apiVersion: kubeadm.k8s.io/v1beta1 |
|
kind: InitConfiguration |
|
bootstrapTokens: |
|
- groups: |
|
- system:bootstrappers:kubeadm:default-node-token |
|
token: y7yaev.9dvwxx6ny4ef8vlq |
|
ttl: 0s |
|
usages: |
|
- signing |
|
- authentication |
|
nodeRegistration: |
|
kubeletExtraArgs: |
|
cloud-provider: "vsphere" |
|
cloud-config: "/etc/kubernetes/vsphere.conf" |
|
--- |
|
apiVersion: kubeadm.k8s.io/v1beta1 |
|
kind: ClusterConfiguration |
|
kubernetesVersion: v1.14.1 |
|
apiServer: |
|
extraArgs: |
|
cloud-provider: "vsphere" |
|
cloud-config: "/etc/kubernetes/vsphere.conf" |
|
extraVolumes: |
|
- name: cloud |
|
hostPath: "/etc/kubernetes/vsphere.conf" |
|
mountPath: "/etc/kubernetes/vsphere.conf" |
|
controllerManager: |
|
extraArgs: |
|
cloud-provider: "vsphere" |
|
cloud-config: "/etc/kubernetes/vsphere.conf" |
|
extraVolumes: |
|
- name: cloud |
|
hostPath: "/etc/kubernetes/vsphere.conf" |
|
mountPath: "/etc/kubernetes/vsphere.conf" |
|
networking: |
|
podSubnet: "10.244.0.0/16" |
|
EOF |
|
|
|
# Restart the kubelet daemon to reload the configuration |
|
systemctl daemon-reload |
|
systemctl restart kubelet |
|
|
|
# Verify we have all the images |
|
kubeadm config images pull |
|
|
|
#Init kubeadm |
|
kubeadm init --config /etc/kubernetes/kubeadminitmaster.yaml |
|
|
|
#kubectl work for user |
|
mkdir -p /home/$MAINUSER/.kube |
|
cp -i /etc/kubernetes/admin.conf /home/$MAINUSER/.kube/config |
|
chown -R ${MAINUSER}:${MAINUSER} /home/${MAINUSER}/.kube |
Hi, I used a load balancer as my controlPlaneEndpoint, in order to add a new master using the cloud-provider key, do you know how I can join another master node through the below method or something?
kubectl -n kube-public get configmap cluster-info -o jsonpath='{.data.kubeconfig}' > discovery.yaml
tee /etc/kubernetes/kubeadminitSecondMaster.yaml >/dev/null <<EOF
apiVersion: kubeadm.k8s.io/v1beta1
discovery:
file:
kubeConfigPath: discovery.yaml
timeout: 5m0s
tlsBootstrapToken: y7yaev.9dvwxx6ny4ef8vlq
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
cloud-provider: vsphere
EOF
Thanks