- Bridged gateway with dhcp disabled, configured with a static ip of 192.168.1.254/24
- Base ubuntu 20.04 release, configured with a static ip of 192.168.1.1/24
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common \
git \
jq \
ifupdown
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io
sudo curl -L \
"https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" \
-o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo usermod -aG docker <your user>
git clone https://github.com/tinkerbell/sandbox.git
cd sandbox
git checkout v0.6.0
export TINKERBELL_HOST_IP=192.168.1.1
cd deploy/compose
cat >> registry/registry_images.txt <<EOF
quay.io/tinkerbell-actions/kexec:v1.0.0 kexec:v1.0.0
quay.io/tinkerbell-actions/oci2disk:v1.0.0 oci2disk:v1.0.0
EOF
docker-compose up -d
# TODO: follow up on not needing to pull/tag/push images to internal registry for actions
# TODO: requires changes to tink-worker to avoid internal registry use
cat > hardware-data-a.json <<EOF
{
"id": "3f0c4d3d-00ef-4e46-983d-0e6b38da827a",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {
"id": "3f0c4d3d-00ef-4e46-983d-0e6b38da827a",
"hostname": "hw-a",
"network": {
"addresses": [
{
"address_family": 4,
"public": false,
"address": "192.168.1.105"
}
]
},
"storage": {
"disks": [{"device": "/dev/sda"}]
}
},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"hostname": "hw-a",
"ip": {
"address": "192.168.1.105",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "a8:a1:59:66:42:89",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
cat > hardware-data-b.json <<EOF
{
"id": "ee102515-f6e1-491e-96ff-d14858f171cb",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {
"id": "ee102515-f6e1-491e-96ff-d14858f171cb",
"hostname": "hw-b",
"network": {
"addresses": [
{
"address_family": 4,
"public": false,
"address": "192.168.1.106"
}
]
},
"storage": {
"disks": [{"device": "/dev/sda"}]
}
},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"hostname": "hw-b",
"ip": {
"address": "192.168.1.106",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "a8:a1:59:66:42:ad",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
cat > hardware-data-c.json <<EOF
{
"id": "1f7dcb28-add8-4e6a-8d74-26f1f7d86a2c",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {
"id": "1f7dcb28-add8-4e6a-8d74-26f1f7d86a2c",
"hostname": "hw-c",
"network": {
"addresses": [
{
"address_family": 4,
"public": false,
"address": "192.168.1.107"
}
]
},
"storage": {
"disks": [{"device": "/dev/sda"}]
}
},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"hostname": "hw-c",
"ip": {
"address": "192.168.1.107",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "a8:a1:59:66:42:ba",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
cat > hardware-data-d.json <<EOF
{
"id": "c5481ddc-3527-4a24-9ce1-09bf935e0f7d",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {
"id": "c5481ddc-3527-4a24-9ce1-09bf935e0f7d",
"hostname": "hw-d",
"network": {
"addresses": [
{
"address_family": 4,
"public": false,
"address": "192.168.1.108"
}
]
},
"storage": {
"disks": [{"device": "/dev/sda"}]
}
},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"hostname": "hw-d",
"ip": {
"address": "192.168.1.108",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "a8:a1:59:66:41:f0",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
cat > hardware-data-e.json <<EOF
{
"id": "cfe581a2-1e0e-48f7-a24f-a592c1aa914e",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {
"id": "cfe581a2-1e0e-48f7-a24f-a592c1aa914e",
"hostname": "hw-e",
"network": {
"addresses": [
{
"address_family": 4,
"public": false,
"address": "192.168.1.109"
}
]
},
"storage": {
"disks": [{"device": "/dev/sda"}]
}
},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"hostname": "hw-e",
"ip": {
"address": "192.168.1.109",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "a8:a1:59:66:42:a7",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
for i in a b c d e; do docker exec -i compose_tink-cli_1 tink hardware push < hardware-data-$i.json; done
Get the tilt environment configured and running:
git clone https://github.com/tinkerbell/cluster-api-provider-tinkerbell
git clone https://github.com/kubernetes-sigs/cluster-api
cd cluster-api
cat > tilt-settings.json <<EOF
{
"default_registry": "gcr.io/detiber",
"provider_repos": ["../../tinkerbell/cluster-api-provider-tinkerbell"],
"enable_providers": ["tinkerbell", "kubeadm-bootstrap", "kubeadm-control-plane"],
"kustomize_substitutions": {
"EXP_CLUSTER_RESOURCE_SET": "true",
"TINKERBELL_GRPC_AUTHORITY": "192.168.1.1:42113",
"TINKERBELL_CERT_URL": "http://192.168.1.1:42114/cert",
"TINKERBELL_IP": "192.168.1.1"
}
}
EOF
kind create cluster
tilt up
cat > testhardware.yml <<EOF
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: hw-a
spec:
id: 3f0c4d3d-00ef-4e46-983d-0e6b38da827a
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: hw-b
spec:
id: ee102515-f6e1-491e-96ff-d14858f171cb
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: hw-c
spec:
id: 1f7dcb28-add8-4e6a-8d74-26f1f7d86a2c
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: hw-d
spec:
id: c5481ddc-3527-4a24-9ce1-09bf935e0f7d
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: hw-e
spec:
id: cfe581a2-1e0e-48f7-a24f-a592c1aa914e
EOF
kubectl create -f testhardware.yml
# Available versions can be found here: https://github.com/tinkerbell/cluster-api-provider-tinkerbell/pkgs/container/cluster-api-provider-tinkerbell%2Fubuntu-2004
CONTROL_PLANE_VIP=192.168.1.110 POD_CIDR=172.25.0.0/16 clusterctl generate cluster demo --from templates/cluster-template.yaml --control-plane-machine-count 1 --worker-machine-count 0 --kubernetes-version v1.20.11 | kubectl create -f -
# Wait for kubeconfig to be available
clusterctl get kubeconfig demo > demo.kubeconfig
# Wait for the apiserver to respond and for the Node to be present
kubectl --kubeconfig=demo.kubeconfig get nodes
# Deploy cilium
kubectl --kubeconfig=demo.kubeconfig create -f https://raw.githubusercontent.com/cilium/cilium/v1.9/install/kubernetes/quick-install.yaml
- power cycle necessary hardware to bring cluster up