Skip to content

Instantly share code, notes, and snippets.

@markruler
Last active March 24, 2021 14:08
Show Gist options
  • Save markruler/463c052158ac572af91ddf0376ee6903 to your computer and use it in GitHub Desktop.
Save markruler/463c052158ac572af91ddf0376ee6903 to your computer and use it in GitHub Desktop.
Install Kubernetes on an Offline CentOS 7 Machine - (1) ONLINE SERVER

Install Kubernetes on an Offline CentOS 7 Machine - (1) ONLINE SERVER

pkg
├── 1-server
├── 2-cri
├── 3-k8s
├── 4-cni
├── 5-gpu
└── 6-mec
yum install -y epel-release
  • Add yum Repository
yum install -y yum-utils
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo

cat /etc/yum.repos.d/docker-ce.repo
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
  • Build yum cache.
yum makecache fast -y
  • Server
mkdir $HOME/pkg/1-server
repotrack -a x86_64 -p $HOME/pkg/1-server/ \
chrony \
net-tools \
nfs-utils

tar -zcvf $HOME/pkg/1-server/server.tar.gz *.rpm
rm -f *.rpm

docker pull quay.io/kubernetes_incubator/nfs-provisioner:v2.3.0
docker pull bitnami/mariadb:10.3-debian-10
docker pull bitnami/redis:5.0.7-debian-9-r12
docker pull hjacobs/kube-ops-view:19.9.0
docker pull grafana/grafana:6.7.1
docker pull quay.io/coreos/kube-state-metrics:v1.9.5
docker pull quay.io/prometheus/node-exporter:v0.18.1
docker pull quay.io/prometheus/alertmanager:v0.20.0
docker pull squareup/ghostunnel:v1.5.2
docker pull jettech/kube-webhook-certgen:v.1.2.0
docker pull quay.io/coreos/prometheus-operator:v0.38.1
docker pull quay.io/coreos/configmap-reload:v0.0.1
docker pull quay.io/coreos/prometheus-config-reloader:v0.38.1
docker pull quay.io/prometheus/prometheus:v2.16.0
docker pull quay.io/coreos/prometheus-config-reloader:v0.38.1

docker save -o $HOME/pkg/1-server/nfs-provisioner.tar quay.io/kubernetes_incubator/nfs-provisioner:v2.3.0
docker save -o $HOME/pkg/1-server/mariadb.tar bitnami/mariadb:10.3-debian-10
docker save -o $HOME/pkg/1-server/redis.tar bitnami/redis:5.0.7-debian-9-r12
docker save -o $HOME/pkg/1-server/kube-ops-view.tar hjacobs/kube-ops-view:19.9.0
docker save -o $HOME/pkg/1-server/grafana.tar grafana/grafana:6.7.1
docker save -o $HOME/pkg/1-server/kube-state-metrics.tar quay.io/coreos/kube-state-metrics:v1.9.5
docker save -o $HOME/pkg/1-server/node-exporter.tar quay.io/prometheus/node-exporter:v0.18.1
docker save -o $HOME/pkg/1-server/alertmanager.tar quay.io/prometheus/alertmanager:v0.20.0
docker save -o $HOME/pkg/1-server/ghostunnel.tar squareup/ghostunnel:v1.5.2
docker save -o $HOME/pkg/1-server/kube-webhook-certgen.tar jettech/kube-webhook-certgen:v1.2.0
docker save -o $HOME/pkg/1-server/prometheus-operator.tar quay.io/coreos/prometheus-operator:v0.38.1
docker save -o $HOME/pkg/1-server/configmap-reload.tar quay.io/coreos/configmap-reload:v0.0.1
docker save -o $HOME/pkg/1-server/prometheus-config-reloader.tar quay.io/coreos/prometheus-config-reloader:v0.38.1
docker save -o $HOME/pkg/1-server/prometheus.tar quay.io/prometheus/prometheus:v2.16.0
  • GPU Node
mkdir $HOME/pkg/5-gpu
cd $HOME/pkg/5-gpu

curl -O https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0.7.0/nvidia-device-plugin.yml
docker pull nvidia/k8s-device-plugin:v0.7.0
docker pull nvidia/cuda:11.0-base
docker pull nvidia/digits:6.0

docker save -o $HOME/pkg/5-gpu/k8s-device-plugin.tar nvidia/k8s-device-plugin:v0.7.0
docker save -o $HOME/pkg/5-gpu/cuda.tar nvidia/cuda:11.0-base
docker save -o $HOME/pkg/5-gpu/digits.tar nvidia/digits:6.0
  • Kubespray
# git clone --single-branch --branch v2.14.2 https://github.com/kubernetes-sigs/kubespray.git $HOME/pkg/kubespray

# mkdir $HOME/pkg/kubespray/python
# cd $HOME/pkg/kubespray/python

# yumdownloader --resolve \
# python3-pip \
# python36 \
# gdbm-1.10 \
# python3-libs \
# python3-setuptools

# repotrack -a x86_64 -p $HOME/pkg/kubespray/python \
# cpp-4.8.5 \
# gcc-4.8.5 \
# gcc-c++-4.8.5 \
# glibc-headers-2.17 \
# libselinux-python3

# rpm -ivh --replacefiles --replacepkgs $HOME/pkg/kubespray/*.rpm

# pip3 download -r $HOME/pkg/kubespray/requirements.txt -d $HOME/pkg/kubespray/deps
  • Download Packages
mkdir $HOME/pkg
mkdir $HOME/pkg/2-cri
cd $HOME/pkg/2-cri

# Download Kubernetes (K8s) packages using yumdownloader.
# yum deplist docker-ce
repotrack -a x86_64 -p $HOME/pkg/2-cri docker-ce
ls -halt

tar -zcvf $HOME/pkg/2-cri/docker.tar.gz *
rm -f $HOME/pkg/2-cri/*.rpm
ls -halt $HOME/pkg/2-cri
mkdir $HOME/pkg/3-k8s
cd $HOME/pkg/3-k8s

# yum --showduplicates list kubelet
# yum --showduplicates list kubeadm
# yum --showduplicates list kubectl

# Download Kubernetes (K8s) packages using repotrack.
# yumdownloader 는 1레벨의 depth만 다운로드
# yumdownloader --resolve kubelet-1.18.2 kubeadm-1.18.2 kubectl-1.18.2
repotrack -a x86_64 -p $HOME/pkg/3-k8s kubeadm-1.18.2
# 자동으로 다운로드 된 최신 버전 kubelet, kubectl 삭제
yumdownloader --resolve kubelet-1.18.2 kubectl-1.18.2
# repotrack -a x86_64 -p $HOME/pkg/3-k8s kubeadm-1.18.2
# repotrack -a x86_64 -p $HOME/pkg/3-k8s kubectl-1.18.2
ls -hal $HOME/pkg/3-k8s

tar -zcvf $HOME/pkg/3-k8s/kubernetes.tar.gz *.rpm
rm -f $HOME/pkg/3-k8s/*.rpm
ls -hal $HOME/pkg/3-k8s
  • Download Docker Images
# vi /etc/resolv.conf
# nameserver 8.8.8.8
#
# Download Docker images from Docker Hub, as required by Kubernetes (K8s) for node initialization.
# kubeadm init images list
docker pull registry:2.7.1
docker pull k8s.gcr.io/kube-apiserver:v1.18.2
docker pull k8s.gcr.io/kube-controller-manager:v1.18.2
docker pull k8s.gcr.io/kube-scheduler:v1.18.2
docker pull k8s.gcr.io/kube-proxy:v1.18.2
docker pull k8s.gcr.io/pause:3.2
docker pull k8s.gcr.io/etcd:3.4.3-0
docker pull k8s.gcr.io/coredns:1.6.7
docker image ls -a
curl https://get.helm.sh/helm-v3.4.1-linux-amd64.tar.gz --output $HOME/pkg/3-k8s/helm.tar.gz
  • Export Kubernetes (K8s) related Docker images to individual tar files.
docker save k8s.gcr.io/kube-apiserver:v1.18.2 > $HOME/pkg/3-k8s/kube-apiserver.tar
docker save k8s.gcr.io/kube-controller-manager:v1.18.2 > $HOME/pkg/3-k8s/kube-controller-manager.tar
docker save k8s.gcr.io/kube-scheduler:v1.18.2 > $HOME/pkg/3-k8s/kube-scheduler.tar
docker save k8s.gcr.io/kube-proxy:v1.18.2 > $HOME/pkg/3-k8s/kube-proxy.tar
docker save k8s.gcr.io/pause:3.2 > $HOME/pkg/3-k8s/pause.tar
docker save k8s.gcr.io/etcd:3.4.3-0 > $HOME/pkg/3-k8s/etcd.tar
docker save k8s.gcr.io/coredns:1.6.7 > $HOME/pkg/3-k8s/coredns.tar
ls -halt $HOME/pkg/3-k8s/*.tar
  • Install Calico for CNI

Self-managed on-premises

curl https://docs.projectcalico.org/manifests/calico.yaml -O
# curl https://docs.projectcalico.org/manifests/calico-typha.yaml -O
# curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -O

mkdir $HOME/pkg/4-cni
mkdir $HOME/pkg/4-cni/opt-cni-bin
curl -L -o $HOME/pkg/4-cni/opt-cni-bin/calico https://github.com/projectcalico/cni-plugin/releases/download/v3.16.5/calico-amd64
chmod 755 $HOME/pkg/4-cni/opt-cni-bin/calico
curl -L -o $HOME/pkg/4-cni/opt-cni-bin/calico-ipam https://github.com/projectcalico/cni-plugin/releases/download/v3.16.5/calico-ipam-amd64
chmod 755 $HOME/pkg/4-cni/opt-cni-bin/calico-ipam

mkdir $HOME/pkg/4-cni/etc-cni-net.d

cat > $HOME/pkg/4-cni/etc-cni-net.d/10-calico.conflist << EOF
{
  "name": "k8s-pod-network",
  "cniVersion": "0.3.1",
  "plugins": [
    {
      "type": "calico",
      "log_level": "info",
      "log_file_path": "/var/log/calico/cni/cni.log",
      "datastore_type": "kubernetes",
      "nodename": "mec02",
      "mtu": 1440,
      "ipam": {
        "type": "calico-ipam"
      },
      "policy": {
        "type": "k8s"
      },
      "kubernetes": {
        "kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
      }
    },
    {
      "type": "portmap",
      "snat": true,
      "capabilities": {"portMappings": true}
    },
    {
      "type": "bandwidth",
      "capabilities": {"bandwidth": true}
    }
  ]
}
EOF

cat > $HOME/pkg/4-cni/etc-cni-net.d/calico-kubeconfig << EOF
# Kubeconfig file for Calico CNI plugin.
apiVersion: v1
kind: Config
clusters:
- name: local
  cluster:
    server: https://[10.233.64.1]:443
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJd01URXhPVEE0TXpReE5Gb1hEVE13TVRFeE56QTRNelF4TkZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT3U5CmlTY1ZaY3hxMmhNREtXSDB4MFFhTkEzMWREZ0ROejU0eXBOMVAyanZIaE5lTjVnMWp2N0x2UW5oMndlSzVGNksKOFJadVRnVjdiV01teHR3ZHdoUlVhK3M1ZnpkQ09QRXJQV0hKWG1IbXdrRitFbW1nYmxMNkVTTjhvVnBBd2JYMgo1Mm9nT1RoTGU0Z3dramF0endZbUZsZWhYc1J3T3RDeHAxMkRhaWR5dVJyNHRka2FEeDA2eFBvQWpIQXh0OE5KCktIY0dqNEhrODhFS2dHT05vRnJaakVwZ2tIbHFXRTVKK1FETTJKZis4ODNHZ2dyZ0NzU0dVbmpSWExhaVdFUVIKY3dndTN1SWErUGR4Vm1tME1PM01ZMUUzRkFWN3NGekNyZGhDUi9JR1RwbTZBYktrMmNxMG0wUjN2Q1NkM01CTAoyWHM5UDN6dmIrNTZOMDdaUUg4Q0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFNY25RVWFlS2NPcHRMWFV4ekF1TllTY29iR0QKVUtZbVdOekVLYU04cEVENGtCRTZUeEg3Z2hGWDFhR2VweHFvbTVTdndqblBES2dudHhLek55TUpVYktaV3RpUgp6VVV1Slo4aGJVSW15bThvNXR4TmdDTUg2UVNQNUhjZVFESE5ZekJxOTVXUjluOUJTRDc4MWVGODZOZHpOcU1qCjlCSU9ya1V0U2w1MW50VE8reTA4M2pmc3FreEtscHEzVUVxSHQrL1IvSC8rNE9jZ3h3NGtRUXRNL0h6ZVlua3MKdURPMTFwcGRGc2s1cWloK1RXTEFhNEVJQ3dEU0kvNDlXd2VPMko1K2pxcytZN1pJNDI4M3B2aERLcFNRbWEzYwpNR20wRmhUZXFTTmMyQ3VtODNpZU5VRUtyaG5Fd2g3eEh5T3B4NlFmVUYxWDVQQWdMUU1kMTZicVVEVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
users:
- name: calico
  user:
    token: eyJhbGciOiJSUzI1NiIsImtpZCI6IkFmejl6Wi02ZzZRd2hzcXpHbmFxVi1SWVh2MzVsTzc0RTVOa1RtbDhYVlUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJjYWxpY28tbm9kZS10b2tlbi03endiaiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJjYWxpY28tbm9kZSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjE1MGU4ZGRlLTkzOWMtNDQxMi05YTZlLTkzODZhZTM0YjkzYyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTpjYWxpY28tbm9kZSJ9.Imld2vQwbUTC7qdn15b8IjmqCaH2ZHu_MWXOaMWBZ4gLxvsQcd44jWVaCAOTQ0UKbEkjo_RT2mpXKi5OiGmq7CE1QMdUOv28bcRe6eBbRLJ7DASA8fhlZat6vOkt7TVYXRUTMq_O0_3VTAg95Rlb1GH7wSonaHWo5C4b3gYmMiaawioAvNSPHqXwmEC2bJrOpD1gOs1Hlu7FCPKQ2JdjR8tU6Cgr4MbJn0OkMoF6Q3BcBwMtiOMbFvYmmbL7S8vCUT3tWlYiyOgKZs2MUHVujIKxFR0p2-sARRoglCqTyJQwqAzoBRLNI5EbeqE1bewEgqyfcNq6_2-G2vS1OYQoNA
contexts:
- name: calico-context
  context:
    cluster: local
    user: calico
current-context: calico-context
EOF

cat > $HOME/pkg/4-cni/etc-cni-net.d/calico.conflist.template << EOF
{
  "name": "cni0",
  "cniVersion":"0.3.1",
  "plugins":[
    {
      "nodename": "mec02",
          "type": "calico",
      "log_level": "info",
      "etcd_endpoints": "https://192.168.7.221:2379",
      "etcd_cert_file": "/etc/calico/certs/cert.crt",
      "etcd_key_file": "/etc/calico/certs/key.pem",
      "etcd_ca_cert_file": "/etc/calico/certs/ca_cert.crt",
      "ipam": {
        "type": "calico-ipam",
        "assign_ipv4": "true",
        "ipv4_pools": ["172.18.128.0/17"]
      },
      "policy": {
        "type": "k8s"
      },      "kubernetes": {
        "kubeconfig": "__KUBECONFIG_FILEPATH__"
      }
    },
    {
      "type":"portmap",
      "capabilities":{
        "portMappings":true
      }
    }
  ]
}
EOF

docker pull calico/cni:v3.16.5
docker pull calico/pod2daemon-flexvol:v3.16.5
docker pull calico/node:v3.16.5
docker pull calico/kube-controllers:v3.16.5

docker save calico/cni:v3.16.5 > $HOME/pkg/4-cni/cni.tar
docker save calico/pod2daemon-flexvol:v3.16.5 > $HOME/pkg/4-cni/pod2daemon-flexvol.tar
docker save calico/node:v3.16.5 > $HOME/pkg/4-cni/node.tar
docker save calico/kube-controllers:v3.16.5 > $HOME/pkg/4-cni/kube-controllers.tar

ls -halt $HOME/pkg/4-cni/*.tar
mkdir $HOME/pkg/6-mec
git clone http://${GIT}/devCenter/mec.git $HOME/pkg/6-mec
  • Copy files to flash drive
# Mount Drive
mkdir -p /media/usb
# ls /dev/sd* -hal
fdisk -l
mount -t vfat /dev/sdc1 /media/usb
ls -halt /media/usb

# rm -rf /media/usb/pkg

# Copy files
cp -rv $HOME/pkg /media/usb
ls -halt /media/usb

# Unmount Drive
umount /media/usb
ls -halt /media/usb

# 해당 디렉토리를 사용하는 user를 확인하는 방법
# fuser -cu {dir_path}
# 프로세스를 kill하는 방법
# fuser -ck {dir_path}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment