- Bridged gateway with dhcp disabled, configured with a static ip of 192.168.1.254/24
- Base ubuntu 20.04 release, configured with a static ip of 192.168.1.5/24
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common \
git \
jq \
ifupdown
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io
sudo curl -L \
"https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" \
-o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo usermod -aG docker <your user>
git clone https://github.com/tinkerbell/sandbox.git
cd sandbox
./generate-envrc.sh enp3s0 > .env
Modify the generated .env file as needed, for example, I changed TINKERBELL_CIDR from 29 to 24
source .env
sudo -E ./setup.sh
sudo chown -R detiber:detiber ~/.docker
cd ./deploy
docker-compose up -d
# Hack to work around limitations of current Tinkerbell event system (postgres triggers)
PGPASSWORD=tinkerbell docker exec deploy_db_1 psql -U tinkerbell -c 'drop trigger events_channel ON events;'
TODO: replace osie with tinkie TODO: custom build of linuxkit for tinkie (if needed)
hosts:
A:
MAC: b8:ae:ed:3f:9c:bf
ID: 097d8c63-d83d-4940-b94d-c6256aa7685a
IP: 192.168.1.105
B:
MAC: b8:ae:ed:3f:9c:b6
ID: df5db23e-f69f-4faf-b4c2-6e9b6e47704b
IP: 192.168.1.106
C:
MAC: b8:ae:ed:3f:9d:29
ID: ba1a14d7-a593-4352-9028-4a0976e44c7c
IP: 192.168.1.107
D:
MAC: b8:ae:ed:3f:9c:28
ID: c24dbe9d-524f-4866-aeb8-e7c38800191d
IP: 192.168.1.108
E:
MAC: b8:ae:ed:39:d8:ef
ID: f4d0484f-53fb-4f9c-9bce-c7705ec84c3e
IP: 192.168.1.109
TODO: automate this with uuidgen
cat > hardware-data-a.json <<EOF
{
"id": "097d8c63-d83d-4940-b94d-c6256aa7685a",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"ip": {
"address": "192.168.1.105",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "b8:ae:ed:3f:9c:bf",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
cat > hardware-data-b.json <<EOF
{
"id": "df5db23e-f69f-4faf-b4c2-6e9b6e47704b",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"ip": {
"address": "192.168.1.106",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "b8:ae:ed:3f:9c:b6",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
cat > hardware-data-c.json <<EOF
{
"id": "ba1a14d7-a593-4352-9028-4a0976e44c7c",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"ip": {
"address": "192.168.1.107",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "b8:ae:ed:3f:9d:29",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
cat > hardware-data-d.json <<EOF
{
"id": "c24dbe9d-524f-4866-aeb8-e7c38800191d",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"ip": {
"address": "192.168.1.108",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "b8:ae:ed:3f:9c:28",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
cat > hardware-data-e.json <<EOF
{
"id": "f4d0484f-53fb-4f9c-9bce-c7705ec84c3e",
"metadata": {
"facility": {
"facility_code": "onprem"
},
"instance": {},
"state": ""
},
"network": {
"interfaces": [
{
"dhcp": {
"arch": "x86_64",
"ip": {
"address": "192.168.1.109",
"gateway": "192.168.1.254",
"netmask": "255.255.255.0"
},
"mac": "b8:ae:ed:39:d8:ef",
"name_servers": ["8.8.8.8"],
"uefi": true
},
"netboot": {
"allow_pxe": true,
"allow_workflow": true
}
}
]
}
}
EOF
for i in a b c d e; do docker exec -i deploy_tink-cli_1 tink hardware push < hardware-data-$i.json; done
docker pull detiber/ubuntu-install
docker tag detiber/ubuntu-install 192.168.1.1/ubuntu-install
docker push 192.168.1.1/ubuntu-install
TODO: setup repos
kind create cluster
tilt up
cat <<EOF | kubectl create -f -
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: demo-a
spec:
id: 097d8c63-d83d-4940-b94d-c6256aa7685a
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: demo-b
spec:
id: df5db23e-f69f-4faf-b4c2-6e9b6e47704b
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: demo-c
spec:
id: ba1a14d7-a593-4352-9028-4a0976e44c7c
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: demo-d
spec:
id: c24dbe9d-524f-4866-aeb8-e7c38800191d
---
kind: Hardware
apiVersion: tinkerbell.org/v1alpha1
metadata:
name: demo-e
spec:
id: f4d0484f-53fb-4f9c-9bce-c7705ec84c3e
EOF
TODO: generate cluster template from repo using clusterctl TODO: modify template to add users for troubleshooting. TODO: try to do it without setting an actual password, only an ssh key
kubectl create -f testCluster.yaml
cat > wipe-disk-template.yaml <<EOF
version: "0.1"
name: wipe_disks
global_timeout: 6000
tasks:
- name: "wipe_disk"
worker: "{{.device_1}}"
volumes:
- /dev:/dev
actions:
- name: "disk-wipe"
image: ubuntu-install
command:
- sh
- -c
- |
target_device=$(lsblk -l | grep disk | grep -v SWAP | head -n 1 | cut -d ' ' -f 1)
sgdisk -Z /dev/$target_device
EOF
template_id=$(docker exec deploy_tink-cli_1 tink template create --name wipe-disk < ./wipe-disk-template.yaml)
template_id=$(echo $template_id | cut -d ':' -f 2)
docker exec deploy_tink-cli_1 tink workflow create -t $template_id -r '{"device_1": "192.168.1.105"}'
docker exec deploy_tink-cli_1 tink workflow create -t $template_id -r '{"device_1": "192.168.1.106"}'
docker exec deploy_tink-cli_1 tink workflow create -t $template_id -r '{"device_1": "192.168.1.107"}'
docker exec deploy_tink-cli_1 tink workflow create -t $template_id -r '{"device_1": "192.168.1.108"}'
docker exec deploy_tink-cli_1 tink workflow create -t $template_id -r '{"device_1": "192.168.1.109"}'