# vagrant 파일 다운로드 : 설정참고-링크
curl -O https://raw.githubusercontent.com/gasida/DKOS/main/K3S/Vagrantfile
# 배포
vagrant up
# ssh 접속 : root / qwe123
# # 마스터 노드 접속
vagrant ssh k3s-m
혹은
ssh [email protected] -p 60010 # 암호는 qwe123
# # k8s 클러스터에 노드 확인
kubectl get node
root@k3s-m:~ # kubectl get node
NAME STATUS ROLES AGE VERSION
k3s-m Ready control-plane,master 3m10s v1.21.1+k3s1
k3s-w1 Ready < none> 98s v1.21.1+k3s1
k3s-w2 Ready < none> 17s v1.21.1+k3s1
kubectl get node -o wide
root@k3s-m:~ # kubectl get node -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k3s-m Ready control-plane,master 3m13s v1.21.1+k3s1 192.168.200.10 < none> Ubuntu 20.04.2 LTS 5.4.0-74-generic docker://20.10.7
k3s-w1 Ready < none> 101s v1.21.1+k3s1 192.168.200.101 < none> Ubuntu 20.04.2 LTS 5.4.0-74-generic docker://20.10.7
k3s-w2 Ready < none> 20s v1.21.1+k3s1 192.168.200.102 < none> Ubuntu 20.04.2 LTS 5.4.0-74-generic docker://20.10.7
# # 워커노드 접속 : root / qwe123
vagrant ssh k3s-w1
혹은
ssh [email protected] -p 60011 # 암호는 qwe123, 워커노드1
vagrant ssh k3s-w2
혹은
ssh [email protected] -p 60012 # 암호는 qwe123, 워커노드2
# 실습 진행 후 중지 : 생성된 순서대로 삭제됨
vagrant halt
# 중지된 VM 시작
vagrant reload
# 종료 및 VM 삭제
vagrant destroy -f && rm -rf .vagrant share
아래 N = 2 는 워커 노드를 2대 생성, N = 3 설정 시 워커 노드 3대 생성
# Base Image
BOX_IMAGE = " ubuntu/focal64"
BOX_VERSION = " 20210603.0.0"
# max number of worker nodes
N = 2
# ssh config
$ssh_config = << -SCRIPT
echo ">>>> root password <<<<<<"
printf "qwe123\nqwe123\n" | passwd
echo ">>>> ssh-config <<<<<<"
sed -i "s/^PasswordAuthentication no/PasswordAuthentication yes/g" /etc/ssh/sshd_config
sed -i "s/^#PermitRootLogin prohibit-password/PermitRootLogin yes/g" /etc/ssh/sshd_config
systemctl restart sshd
SCRIPT
Vagrant.configure(" 2" ) do | config|
# -----Manager Node
config.vm.define " k8s-m" do | subconfig|
subconfig.vm.box = BOX_IMAGE
config.vm.box_version = BOX_VERSION
subconfig.vm.provider " virtualbox" do | v|
v.name = " k8s-m"
v.memory = 2048
v.cpus = 2
v.linked_clone = true
end
subconfig.vm.hostname = " k8s-m"
subconfig.vm.synced_folder " ./" , " /vagrant" , disabled: true
subconfig.vm.network " private_network" , ip: " 192.168.100.10"
subconfig.vm.network " forwarded_port" , guest: 22, host: 50010, auto_correct: true, id: " ssh"
subconfig.vm.provision " shell" , inline: $ssh_config
subconfig.vm.provision " shell" , path: " https://raw.githubusercontent.com/gasida/DKOS/main/2/init_cfg.sh" , args: N
subconfig.vm.provision " shell" , path: " https://raw.githubusercontent.com/gasida/DKOS/main/2/master.sh"
end
# -----Worker Node
(1..N).each do | i|
config.vm.define " k8s-w#{i}" do | subconfig|
subconfig.vm.box = BOX_IMAGE
config.vm.box_version = BOX_VERSION
subconfig.vm.provider " virtualbox" do | v|
v.name = " k8s-w#{i}"
v.memory = 1536
v.cpus = 1
v.linked_clone = true
end
subconfig.vm.hostname = " k8s-w#{i}"
subconfig.vm.synced_folder " ./" , " /vagrant" , disabled: true
subconfig.vm.network " private_network" , ip: " 192.168.100.10#{i}"
subconfig.vm.network " forwarded_port" , guest: 22, host: " 5001#{i}" , auto_correct: true, id: " ssh"
subconfig.vm.provision " shell" , inline: $ssh_config
subconfig.vm.provision " shell" , path: " https://raw.githubusercontent.com/gasida/DKOS/main/2/init_cfg.sh" , args: N
subconfig.vm.provision " shell" , path: " https://raw.githubusercontent.com/gasida/DKOS/main/2/worker.sh"
end
end
end
init_cfg.sh: 공통 설정 (Script) 부분
#! /usr/bin/env bash
# profile bashrc settting
echo ' alias vi=vim' >> /etc/profile
echo " sudo su -" >> .bashrc
# Letting iptables see bridged traffic
modprobe br_netfilter
cat << EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
# local dns setting
echo " 192.168.100.10 k8s-m" >> /etc/hosts
for (( i= 1 ; i<= $1 ; i++ )) ; do echo " 192.168.100.10$i k8s-w$i " >> /etc/hosts; done
# apparmor disable
systemctl stop apparmor && systemctl disable apparmor
# docker install
curl -fsSL https://get.docker.com | sh
# Cgroup Driver systemd
cat << EOF | tee /etc/docker/daemon.json
{"exec-opts": ["native.cgroupdriver=systemd"]}
EOF
systemctl daemon-reload && systemctl restart docker
# package install
apt-get install bridge-utils net-tools jq tree -y
# swap off
swapoff -a
# Installing kubeadm kubelet and kubectl
curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
echo " deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet
master.sh: 마스터 노드 설정 (Script) 파일
#! /usr/bin/env bash
# init kubernetes
kubeadm init --token 123456.1234567890123456 --token-ttl 0 --pod-network-cidr=172.16.0.0/16 --apiserver-advertise-address=192.168.100.10
# config for master node only - root user
mkdir -p $HOME /.kube
cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
chown $( id -u) :$( id -g) $HOME /.kube/config
# calico install
curl -O https://docs.projectcalico.org/manifests/calico.yaml
sed -i ' s/policy\/v1beta1/policy\/v1/g' calico.yaml
kubectl apply -f calico.yaml
# calicoctl install
curl -o kubectl-calico -O -L " https://github.com/projectcalico/calicoctl/releases/download/v3.19.1/calicoctl"
chmod +x kubectl-calico
mv kubectl-calico /usr/bin
# etcdctl install
apt install etcd-client -y
# source bash-completion for kubectl kubeadm
source <( kubectl completion bash)
source <( kubeadm completion bash)
# # Source the completion script in your ~/.bashrc file
echo ' source <(kubectl completion bash)' >> ~/.bashrc
echo ' source <(kubeadm completion bash)' >> ~/.bashrc
# alias kubectl to k
echo ' alias k=kubectl' >> ~ /.bashrc
echo ' complete -F __start_kubectl k' >> ~/.bashrc
worker.sh: 워커 노드 설정 (Script) 파일
#! /usr/bin/env bash
# config for work_nodes only
kubeadm join --token 123456.1234567890123456 --discovery-token-unsafe-skip-ca-verification 192.168.100.10:6443
# vagrant 파일 다운로드
curl -O https://raw.githubusercontent.com/gasida/DKOS/main/2/Vagrantfile
# 배포
vagrant up
# ssh 접속 : root / qwe123
# # 마스터 노드 접속
ssh [email protected] -p 50010 # 암호는 qwe123
혹은
ssh root@< HomePC IP> -p 50010
혹은
ssh [email protected]
# # k8s 클러스터에 노드 확인
root@k8s-m:~ # kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-m Ready control-plane,master 3m27s v1.21.1
k8s-w1 Ready < none> 99s v1.21.1
k8s-w2 Ready < none> 99s v1.21.1
# # 워커노드 접속 : root / qwe123
ssh [email protected] -p 50011 # 암호는 qwe123, 워커노드1
ssh [email protected] -p 50012 # 암호는 qwe123, 워커노드2
ssh [email protected] -p 50013 # 암호는 qwe123, 워커노드3
...
혹은
ssh root@< HomePC IP> -p 50011
ssh root@< HomePC IP> -p 50012
ssh root@< HomePC IP> -p 50013
혹은
혹은
ssh [email protected] # 워커노드1
ssh [email protected] # 워커노드2
ssh [email protected] # 워커노드3
...
# 실습 진행 후 중지 : 생성된 순서대로 삭제됨
vagrant halt
# 중지된 VM 시작
vagrant reload
# 종료 및 VM 삭제
vagrant destroy -f && rm -rf .vagrant share
# 노드 정보 확인
kubectl get nodes
root@k8s-m:~ # kubectl get nodes
NAME STATUS ROLES AGEk VERSION
k8s-m Ready control-plane,master 10m v1.21.1
k8s-w1 Ready < none> 2m15s v1.21.1
k8s-w2 Ready < none> 118s v1.21.1
k8s-w3 Ready < none> 106s v1.21.1
# # AWS 환경
root@ip-172-20-37-224:~ # kubectl get nodes
NAME STATUS ROLES AGE VERSION
ip-172-20-37-224.ap-northeast-2.compute.internal Ready control-plane,master 9m12s v1.20.7
ip-172-20-41-44.ap-northeast-2.compute.internal Ready node 7m16s v1.20.7
ip-172-20-62-129.ap-northeast-2.compute.internal Ready node 7m11s v1.20.7
# 파드 정보 확인 (kube-system 네임스페이스)
kubectl get pod -n kube-system
root@k8s-m:~ # kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-78d6f96c7b-tbfdx 1/1 Running 0 7m41s
calico-node-2lckb 1/1 Running 0 2m5s
...
# 노드 정보에 주요 정보를 추가하여 출력
kubectl get nodes -o wide
root@k8s-m:~ # kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-m Ready control-plane,master 17h v1.21.1 192.168.100.10 < none> Ubuntu 20.04.2 LTS 5.4.0-74-generic docker://20.10.7
k8s-w1 Ready < none> 17h v1.21.1 192.168.100.101 < none> Ubuntu 20.04.2 LTS 5.4.0-74-generic docker://20.10.7
k8s-w2 Ready < none> 17h v1.21.1 192.168.100.102 < none> Ubuntu 20.04.2 LTS 5.4.0-74-generic docker://20.10.7
# 노드의 상세 정보를 출력
kubectl describe node k8s-w1
root@k8s-m:~ # kubectl describe node k8s-w1
Name: k8s-w1
...
InternalIP: 192.168.100.101
Hostname: k8s-w1
Capacity:
cpu: 1
ephemeral-storage: 40593612Ki
hugepages-2Mi: 0
memory: 1519372Ki
pods: 110
Allocatable:
cpu: 1
ephemeral-storage: 37411072758
hugepages-2Mi: 0
memory: 1416972Ki
pods: 110
System Info:
Machine ID: fa7c3510421246e8a45ab79f5003be80
System UUID: 256630a5-fb70-9a41-9385-193ede838e92
Boot ID: 7ee96bc2-5b8a-4f19-bd1c-e6aec6faedd3
Kernel Version: 5.4.0-74-generic
OS Image: Ubuntu 20.04.2 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: docker://20.10.7
Kubelet Version: v1.21.1
Kube-Proxy Version: v1.21.1
PodCIDR: 172.16.1.0/24
PodCIDRs: 172.16.1.0/24
Non-terminated Pods: (2 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system calico-node-cw8nf 250m (25%) 0 (0%) 0 (0%) 0 (0%) 17h
kube-system kube-proxy-rvx5t 0 (0%) 0 (0%) 0 (0%) 0 (0%) 17h
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 250m (25%) 0 (0%)
memory 0 (0%) 0 (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Starting 17h kubelet Starting kubelet.
...