Skip to content

Instantly share code, notes, and snippets.

@danehans
Last active January 10, 2017 18:34
Show Gist options
  • Select an option

  • Save danehans/d67111d71a7babe377ad0f25710be4f5 to your computer and use it in GitHub Desktop.

Select an option

Save danehans/d67111d71a7babe377ad0f25710be4f5 to your computer and use it in GitHub Desktop.
coreos-bm setup
$ systemctl cat dnsmasq
# /etc/systemd/system/bootcfg.service
[Unit]
Description=coreos-baremetal project
After=docker.service
Requires=docker.service
[Service]
Restart=always
TimeoutStartSec=0
ExecStart=/usr/bin/docker run --net=host --rm \
--name bootcfg \
-v /home/localadmin/code/go/src/github.com/coreos/coreos-baremetal/examples:/var/lib/bootcfg:Z \
-v /home/localadmin/code/go/src/github.com/coreos/coreos-baremetal/examples/groups/etcd:/var/lib/bootcfg/groups:Z \
-v /home/localadmin/code/go/src/github.com/coreos/coreos-baremetal/examples/etc/bootcfg:/etc/bootcfg:Z \
coreos/bootcfg:7217cf2e4cbfd732e922fb243acee02ac963e905 \
-address=10.30.118.133:8080 -log-level=debug \
-ca-file=/etc/bootcfg/ca.crt -cert-file=/etc/bootcfg/server.crt -key-file=/etc/bootcfg/server.key \
-rpc-address=10.30.118.133:8081
[Install]
WantedBy=multi-user.target
$ systemctl cat dnsmasq
# /etc/systemd/system/dnsmasq.service
[Unit]
Description=Dnsmasq running DHCP, proxy DHCP, DNS, and/or TFTP
After=docker.service
Requires=docker.service
[Service]
Restart=always
TimeoutStartSec=0
ExecStart=/usr/bin/docker run --rm --cap-add=NET_ADMIN --net=host \
--name dnsmasq \
coreos/dnsmasq:latest \
--no-daemon \
--log-dhcp \
--log-queries \
--dhcp-range=10.30.118.145,10.30.118.150 \
--enable-tftp \
--tftp-root=/var/lib/tftpboot \
--dhcp-userclass=set:ipxe,iPXE \
--dhcp-boot=tag:#ipxe,undionly.kpxe \
--dhcp-boot=tag:ipxe,http://gem-master01.ctocllab.cisco.com:8080/boot.ipxe \
--dhcp-option=3,10.30.118.129 \
--address=/gem-master01.ctocllab.cisco.com/10.30.118.133
[Install]
WantedBy=multi-user.target
[localadmin@gem-master01 examples]$ pwd
/home/localadmin/code/go/src/github.com/coreos/coreos-baremetal/examples
[localadmin@gem-master01 examples]$ cat groups/bootkube/node134.json
{
"id": "node134",
"name": "Controller Node",
"profile": "bootkube-controller",
"selector": {
"uuid": "4cefeb51-7cbf-4d4c-827c-784e332b6071"
},
"metadata": {
"dns_server": "10.30.118.133",
"ntp_server": "173.38.201.67",
"ip_address": "10.30.118.155/27",
"gateway": "10.30.118.129",
"domain_name": "node134.ctocllab.cisco.com",
"etcd_initial_cluster": "node134=http://node134.ctocllab.cisco.com:2380",
"etcd_name": "node134",
"k8s_controller_endpoint": "https://node134.ctocllab.cisco.com:443",
"k8s_dns_service_ip": "10.3.0.10",
"k8s_etcd_endpoints": "http://node134.ctocllab.cisco.com:2379",
"k8s_pod_network": "10.2.0.0/16",
"k8s_service_ip_range": "10.3.0.0/24",
"pxe": "true",
"ssh_authorized_keys": [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5apXdSDoo/Jlk28Ytam8XvJOONnGi2McrUZAaHnsOl0eDddbJoNzY1OOQjvbP4J3jXi+ekWqTQoR97WlGc6Vz2mDW3ZJF/7KGgFSLWTbM6aNpGvPLP2I6Ha2S8EzTXmLlPF0XtogaHRulal1yb1V7LHRM+GAz0RojHtPvgDtAjqywAPySX5bhWFRnk8GmT+2xrtAU4qc649YGInSsc2B5Acy5mO5Dt1kAEXvWBSJdZZoUn8sH8g43eLcASa33iaHVWpB0+s39NoXco9CRpRVPFOZ3ymwT2ZuO/AaORX8KXuPBXJUFWavP7kBvCG11nm2tYz1vP4QjYzGQLOF77rVR localadmin@gem-master01", "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcvyHsR2wu3e9WzJi2F03CQnFKON/VAjpgXGmanLnwaAh8ciB26w9ACkd/pb8D90JU/hMK18C+8nPsd66hhSJd49wUTAxbCDomvok/f44IITIqZUcMvkpBKXgrG5UwTrvgnZ2/S9fI/Yaoz3BYAQ5wu6FrbD4JOmY1AnKqsA7JaBgAxSDnUSOZDNoFUllUhZfH1liMCWPwqstfKsmqVUXnhjXPSikYGfOpxquqLNS9gHXoyB/5JH+OzA0A+chIy3M3EcH9G7cLN3jQ3hRFrP6NlmhEvzm+kSOZpU6szWpykjQlmKQeoXM4krp7p3MZGx86RHk2AXvwnoIqKnve772B [email protected]"
]
}
}
$ cat ignition/bootkube-controller.yaml
---
networkd:
units:
- name: 10-enp.network
contents: |
[Match]
Name=enp*s0
[Network]
Bond=bond0
- name: 20-bond0.netdev
contents: |
[NetDev]
Name=bond0
Kind=bond
[Bond]
Mode=802.3ad
TransmitHashPolicy=layer2
MIIMonitorSec=1
- name: 30-bond0.network
contents: |
[Match]
Name=bond0
[Network]
VLAN=public
- name: 40-public.netdev
contents: |
[NetDev]
Name=public
Kind=vlan
[VLAN]
Id=128
- name: 60-public.network
contents: |
[Match]
Name=public
[Network]
Address={{.ip_address}}
Gateway={{.gateway}}
DNS={{.dns_server}}
NTP={{.ntp_server}}
systemd:
units:
- name: etcd2.service
enable: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_NAME={{.etcd_name}}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379"
Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380"
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
- name: flanneld.service
enable: true
dropins:
- name: 40-ExecStartPre-symlink.conf
contents: |
[Service]
ExecStartPre=/opt/init-flannel
- name: docker.service
enable: true
dropins:
- name: 40-flannel.conf
contents: |
[Unit]
Requires=flanneld.service
After=flanneld.service
- name: locksmithd.service
dropins:
- name: 40-etcd-lock.conf
contents: |
[Service]
Environment="REBOOT_STRATEGY=etcd-lock"
- name: kubelet.path
enable: true
contents: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- name: kubelet.service
contents: |
[Unit]
Description=Kubelet via Hyperkube ACI
Wants=flanneld.service
[Service]
Environment="RKT_OPTS=--volume=resolv,kind=host,source=/etc/resolv.conf --mount volume=resolv,target=/etc/resolv.conf --volume var-log,kind=host,source=/var/log --mount volume=var-log,target=/var/log"
EnvironmentFile=/etc/kubernetes/kubelet.env
ExecStartPre=/usr/bin/systemctl is-active flanneld.service
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /srv/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--api-servers={{.k8s_controller_endpoint}} \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--hostname-override={{.domain_name}} \
--node-labels=master=true \
--minimum-container-ttl-duration=6m0s \
--cluster_dns={{.k8s_dns_service_ip}} \
--cluster_domain=cluster.local
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
storage:
{{ if index . "pxe" }}
disks:
- device: /dev/sda
wipe_table: true
partitions:
- label: ROOT
filesystems:
- name: root
mount:
device: "/dev/sda1"
format: "ext4"
create:
force: true
options:
- "-LROOT"
{{end}}
files:
- path: /etc/kubernetes/kubelet.env
filesystem: root
mode: 0644
contents:
inline: |
KUBELET_ACI=quay.io/coreos/hyperkube
KUBELET_VERSION=v1.4.6_coreos.0
- path: /etc/hostname
filesystem: root
mode: 0644
contents:
inline:
{{.domain_name}}
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /home/core/bootkube-start
filesystem: root
mode: 0544
user:
id: 500
group:
id: 500
contents:
inline: |
#!/bin/bash
# Wrapper for bootkube start
set -e
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.2.5}"
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/home/core/assets}"
exec /usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
--mount volume=assets,target=/assets \
$RKT_OPTS \
${BOOTKUBE_ACI}:${BOOTKUBE_VERSION} --net=host --exec=/bootkube -- start --asset-dir=/assets --etcd-server=http://127.0.0.1:2379 "$@"
- path: /opt/init-flannel
filesystem: root
mode: 0544
contents:
inline: |
#!/bin/bash
function init_flannel {
echo "Waiting for etcd..."
while true
do
IFS=',' read -ra ES <<< "{{.k8s_etcd_endpoints}}"
for ETCD in "${ES[@]}"; do
echo "Trying: $ETCD"
if [ -n "$(curl --silent "$ETCD/v2/machines")" ]; then
local ACTIVE_ETCD=$ETCD
break
fi
sleep 1
done
if [ -n "$ACTIVE_ETCD" ]; then
break
fi
done
RES=$(curl --silent -X PUT -d "value={\"Network\":\"{{.k8s_pod_network}}\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ACTIVE_ETCD/v2/keys/coreos.com/network/config?prevExist=false")
if [ -z "$(echo $RES | grep '"action":"create"')" ] && [ -z "$(echo $RES | grep 'Key already exists')" ]; then
echo "Unexpected error configuring flannel pod network: $RES"
fi
}
init_flannel
{{ if index . "ssh_authorized_keys" }}
passwd:
users:
- name: core
ssh_authorized_keys:
{{ range $element := .ssh_authorized_keys }}
- {{$element}}
{{end}}
{{end}}
make sure to enable forewall rules for pxe and for bootcfg:
$ firewall-cmd --zone=internal --list-all
internal (active)
interfaces: enp14s0
sources:
services: dhcp dhcpv6-client dns ipp-client mdns samba-client ssh tftp
ports: 3128/tcp 4080/tcp 8888/tcp 67/udp 53/udp 3129/tcp 8080-8082/tcp
masquerade: no
forward-ports: port=80:proto=tcp:toport=3128:toaddr=10.30.118.164
icmp-blocks:
rich rules:
[localadmin@gem-master01 examples]$ cat profiles/bootkube-controller.json
{
"id": "bootkube-controller",
"name": "bootkube Ready Controller",
"boot": {
"kernel": "http://beta.release.core-os.net/amd64-usr/1185.1.0/coreos_production_pxe.vmlinuz",
"initrd": ["http://beta.release.core-os.net/amd64-usr/1185.1.0/coreos_production_pxe_image.cpio.gz"],
"cmdline": {
"root": "/dev/sda1",
"coreos.config.url": "http://gem-master01.ctocllab.cisco.com:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}",
"coreos.autologin": "",
"coreos.first_boot": ""
}
},
"cloud_id": "",
"ignition_id": "bootkube-controller.yaml"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment