Tested on a Rocky Linux 8.4 VM on DigitalOcean. Run everything as root
. Based on RKE2, Cilium, Nginx and Longhorn.
# SELinux
sestatus
setenforce 0
sed -i --follow-symlinks 's/^SELINUX=.*/SELINUX=permissive/g' /etc/sysconfig/selinux && cat /etc/sysconfig/selinux
sestatus
# Swap
free -h
sudo swapoff -a
sed -i.bak -r 's/(.+ swap .+)/#\1/' /etc/fstab
free -h
dnf update -y
dnf install -y wget git curl nfs-utils iscsi-initiator-utils
reboot
BINARY_DIR="/usr/local/bin"
cd /tmp
# Helm
wget https://get.helm.sh/helm-v3.8.0-linux-amd64.tar.gz
tar -zxvf helm-*-linux-amd64.tar.gz
mv linux-amd64/helm $BINARY_DIR/helm
chmod +x $BINARY_DIR/helm
rm -f helm-v3.8.0-linux-amd64.tar.gz
# Kubectl
curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x ./kubectl
mv ./kubectl $BINARY_DIR/kubectl
dnf install -y bash-completion
echo 'alias k="kubectl"' >>~/.bashrc
echo 'alias kgp="kubectl get pods"' >>~/.bashrc
echo 'alias kgn="kubectl get nodes"' >>~/.bashrc
echo 'alias kga="kubectl get all -A"' >>~/.bashrc
echo 'source <(kubectl completion bash)' >>~/.bashrc
echo 'complete -F __start_kubectl k' >>~/.bashrc
# Kubectl helpers
# Krew installation
(
set -x; cd "$(mktemp -d)" &&
OS="$(uname | tr '[:upper:]' '[:lower:]')" &&
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" &&
KREW="krew-${OS}_${ARCH}" &&
curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" &&
tar zxvf "${KREW}.tar.gz" &&
./"${KREW}" install krew
)
echo 'export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"' >>~/.bashrc
source ~/.bashrc
# Install kubens and kubectx
kubectl krew install ctx
kubectl krew install ns
# Install fzf to use kubens and kubectx in interactive mode
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
~/.fzf/install --all
# Add aliases to bashrc
echo 'alias kctx="kubectl-ctx"' >>~/.bashrc
echo 'alias kns="kubectl-ns"' >>~/.bashrc
# Finally don't forget to source the bashrc again:
source ~/.bashrc
# RKE2
curl -sfL https://get.rke2.io | sh -
mkdir -p /etc/rancher/rke2/
cat <<EOF >> /etc/rancher/rke2/config.yaml
write-kubeconfig-mode: "0644"
tls-san:
- "k8s.$(curl ifconfig.me).xip.puzzle.ch"
cni: "none"
disable-kube-proxy: "true"
cluster-cidr: "100.64.0.0/14"
service-cidr: "100.68.0.0/16"
cluster-dns: "100.68.0.10"
disable:
- rke2-ingress-nginx
EOF
systemctl enable rke2-server --now
mkdir ~/.kube
cp /etc/rancher/rke2/rke2.yaml ~/.kube/config
chmod 600 ~/.kube/config
# Note: The node won't be ready until the CNI is successfully installed!
mkdir -p cilium
helm repo add cilium https://helm.cilium.io/
helm repo update
cat <<EOF >> cilium/values.yaml
kubeProxyReplacement: "strict"
k8sServiceHost: k8s.$(curl ifconfig.me).xip.puzzle.ch
k8sServicePort: 6443
# Since we only have 1 node, we only need 1 replica:
operator:
replicas: 1
ipam:
operator:
clusterPoolIPv4PodCIDRList:
- "100.64.0.0/14"
EOF
helm upgrade -i cilium cilium/cilium \
--version 1.11.2 \
--namespace kube-system \
-f cilium/values.yaml
mkdir -p ingress-nginx
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update
cat <<'EOF' >> ingress-nginx/values.yaml
controller:
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
kind: "DaemonSet"
watchIngressWithoutClass: true
publishService:
enabled: false
service:
enabled: true
type: ClusterIP
podSecurityPolicy:
enabled: true
serviceAccount:
create: true
admissionWebhooks:
enabled: false
EOF
helm upgrade -i --create-namespace nginx ingress-nginx/ingress-nginx \
--version 4.0.18 \
--namespace ingress-nginx \
-f ingress-nginx/values.yaml
mkdir -p longhorn
helm repo add longhorn https://charts.longhorn.io
helm repo update
cat <<'EOF' >> longhorn/values.yaml
persistence:
defaultClassReplicaCount: 1
csi:
attacherReplicaCount: 1
provisionerReplicaCount: 1
resizerReplicaCount: 1
snapshotterReplicaCount: 1
EOF
helm upgrade -i --create-namespace longhorn longhorn/longhorn \
--version 1.2.3 \
--namespace longhorn-system \
-f longhorn/values.yaml