Skip to content

Instantly share code, notes, and snippets.

@jepio
Last active June 13, 2025 15:26
Show Gist options
  • Save jepio/419c208621cf86867c591d63f247ce87 to your computer and use it in GitHub Desktop.
Save jepio/419c208621cf86867c591d63f247ce87 to your computer and use it in GitHub Desktop.
#cloud-config
# cloud-init configuration to disable resource disk formatting
disk_setup:
ephemeral0:
table_type: gpt
layout: [100]
overwrite: false
# Disable mounting the resource disk
mounts:
- [ ephemeral0, null ]
#!/bin/bash
# Usage:
# sudo DEV_MAPPER_DEVICE=/dev/sdb1 KVM_INTEL_EPT_DISABLE=1 ./provision-kata.sh
# skip the DEV_MAPPER_DEVICE if you don't want to use LVM thin pool for containerd
# skip the KVM_INTEL_EPT_DISABLE if you don't want to disable ept for kvm_intel
#
# For DEV_MAPPER_DEVICE to work, the vm must be created by passing the cloud-init.yaml file as custom-data during vm creation
#
# After the script is run, you can apply the pod.yaml using `kubectl apply -f pod.yaml` and monitor progress using `kubectl get pods -w`
# Remove the pods using `kubectl delete -f pod.yaml`
set -xe
# Run as root
systemctl disable --now unattended-upgrades
apt-get update
# apt-transport-https may be a dummy package; if so, you can skip that package
apt-get install -y apt-transport-https ca-certificates curl gpg
# If the directory `/etc/apt/keyrings` does not exist, it should be created before the curl command, read the note below.
# mkdir -p -m 755 /etc/apt/keyrings
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.33/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
# This overwrites any existing configuration in /etc/apt/sources.list.d/kubernetes.list
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.33/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl containerd
apt-mark hold kubelet kubeadm kubectl
mkdir -p /etc/containerd
cat <<EOF >/etc/containerd/config.toml
# MUST HAVE THIS LINE
version = 2
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
EOF
if [ -n "${DEV_MAPPER_DEVICE}" ]; then
echo "Using device ${DEV_MAPPER_DEVICE} for LVM thin pool creation."
# Create a physical volume if not already created
if ! pvs "${DEV_MAPPER_DEVICE}" &>/dev/null; then
pvcreate "${DEV_MAPPER_DEVICE}"
else
echo "Physical volume on ${DEV_MAPPER_DEVICE} already exists."
fi
# Create a volume group named "containerd" if it doesn't exist
if ! vgs containerd &>/dev/null; then
vgcreate containerd "${DEV_MAPPER_DEVICE}"
else
echo "Volume group 'containerd' already exists."
fi
# Create a thin pool LV named "containerd-thinpool" if it doesn't exist
if ! lvs containerd/containerd-thinpool &>/dev/null; then
lvcreate --type thin-pool --zero n -l 95%VG -n containerd-thinpool containerd
else
echo "Thin pool 'containerd-thinpool' already exists in VG 'containerd'."
fi
echo "LVM thin pool 'containerd-thinpool' is ready."
echo "Configuring containerd to use devmapper with thin pool containerd-thinpool"
cat <<EOF >> /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".containerd]
snapshotter = "devmapper"
[plugins."io.containerd.snapshotter.v1.devmapper"]
root_path = "/var/lib/containerd/devmapper"
pool_name = "containerd-containerd--thinpool"
base_image_size = "10GB"
async_remove = true
discard_blocks = true
fs_type = "xfs"
EOF
fi
cp /etc/containerd/config.toml{,.bak}
containerd config dump >/etc/containerd/config.toml.new
mv /etc/containerd/config.toml{.new,}
systemctl restart containerd
systemctl enable --now containerd
cat <<EOF >/etc/sysctl.d/99-k8s.conf
net.ipv4.ip_forward = 1
fs.inotify.max_user_instances = 1024
EOF
sysctl --system --write
echo br_netfilter >>/etc/modules-load.d/k8s.conf
modprobe br_netfilter
# Optional: disable ept for kvm_intel
if [ -n "${KVM_INTEL_EPT_DISABLE}" ]; then
echo "options kvm-intel ept=0" >/etc/modprobe.d/kvm-intel.conf
# Reload kvm and kvm_intel unconditionally
modprobe -r kvm_intel || true
modprobe -r kvm || true
modprobe kvm
modprobe kvm_intel || true
fi
cat <<EOF >/tmp/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: v1.33.1
networking:
podSubnet: 10.244.0.0/16
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
maxPods: 250
EOF
kubeadm init --config /tmp/kubeadm-config.yaml
#kubeadm init --pod-network-cidr 10.244.0.0/16
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
if [ -n "${SUDO_USER}" ]; then
home=$(getent passwd "${SUDO_USER}" | cut -d: -f6)
mkdir -p $home/.kube
cp -i /etc/kubernetes/admin.conf $home/.kube/config
chown -R "$SUDO_USER" $home/.kube
fi
# install network
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
KATA_VERSION=3.17.0
kubectl apply -f "https://raw.githubusercontent.com/kata-containers/kata-containers/${KATA_VERSION}/tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
kubectl apply -f "https://raw.githubusercontent.com/kata-containers/kata-containers/${KATA_VERSION}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
cat <<EOF >pod.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kata-nginx
spec:
replicas: 150
selector:
matchLabels:
app: kata-nginx
template:
metadata:
labels:
app: kata-nginx
spec:
runtimeClassName: kata-clh
containers:
- name: nginx
image: nginx:stable
resources:
requests:
cpu: "0"
limits:
cpu: "3"
EOF
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment