Skip to content

Instantly share code, notes, and snippets.

@tintoy
Last active January 19, 2023 01:12
Show Gist options
  • Save tintoy/84bc8b41c9f69aa5753740c0fac758bc to your computer and use it in GitHub Desktop.
Save tintoy/84bc8b41c9f69aa5753740c0fac758bc to your computer and use it in GitHub Desktop.
Terraform - DigitalOcean droplet with Docker
#!/usr/bin/python
import click
import json
import yaml
from collections import defaultdict
from os import path
RKE_IMAGES = {
'etcd': 'quay.io/coreos/etcd:latest',
'kube-api': 'rancher/k8s:v1.8.3-rancher2',
'kube-controller': 'rancher/k8s:v1.8.3-rancher2',
'scheduler': 'rancher/k8s:v1.8.3-rancher2',
'kubelet': 'rancher/k8s:v1.8.3-rancher2',
'kubeproxy': 'rancher/k8s:v1.8.3-rancher2'
}
@click.command()
@click.option('--terraform-state-file', required=True)
@click.option('--ssh-key-file', required=True)
@click.option('--cluster-manifest-file', default='cluster.yml')
def main(terraform_state_file=None, ssh_key_file=None, cluster_manifest_file=None):
'''
Generate RKE cluster configuration from Terraform outputs.
The following outputs must be defined:
- rke_control_plane_nodes
- rke_etcd_nodes
- rke_worker_nodes
The outputs must be lists of node IP addresses
(SSH-accessible from the system where RKE will be run).
'''
print("Terraform state file is '{0}'.".format(
terraform_state_file
))
print("SSH key file is '{0}'.".format(
ssh_key_file
))
with open(terraform_state_file) as state_file:
terraform_state = json.load(state_file)
outputs = {
output_name: output['value']
for module in terraform_state['modules']
for (output_name, output) in module['outputs'].items()
}
rke_control_plane_nodes = []
rke_etcd_nodes = []
rke_worker_nodes = []
for output in outputs.keys():
if not output.startswith('rke_'):
continue
value = outputs[output]
if not isinstance(value, list):
value = [value]
if output == 'rke_control_plane_nodes':
rke_control_plane_nodes.extend(value)
if output == 'rke_etcd_nodes':
rke_etcd_nodes.extend(value)
if output == 'rke_worker_nodes':
rke_worker_nodes.extend(value)
node_roles = defaultdict(set)
for node_ip in rke_control_plane_nodes:
node_roles[node_ip].add('controlplane')
for node_ip in rke_etcd_nodes:
node_roles[node_ip].add('etcd')
for node_ip in rke_worker_nodes:
node_roles[node_ip].add('worker')
rke_manifest_nodes = []
for (node_ip, roles) in node_roles.items():
rke_manifest_nodes.append({
'address': str(node_ip),
'user': 'root',
'role': list(roles)
})
rke_manifest_services = {}
for (service, image) in RKE_IMAGES.items():
rke_manifest_services[service] = {
'image': image
}
rke_manifest = {
'nodes': rke_manifest_nodes,
'network': {
'plugin': 'flannel'
},
'ssh_key_path': str(path.abspath(ssh_key_file)),
'services': rke_manifest_services
}
print('Writing RKE cluster manifest...')
with open(cluster_manifest_file, 'w') as manifest_file:
manifest_file.write('# RKE Manifest\n')
yaml.dump(rke_manifest, manifest_file,
default_flow_style=False # Ugh, flow style is ugly
)
print('Done.')
if __name__ == '__main__':
main()
###########
# Variables
variable "image" {
default = "ubuntu-16-04-x64"
}
variable "dns_subdomain" { /* provided elsewhere (e.g. TF_VAR_dns_subdomain environment variable) */ }
variable "kube_host_size" {
default = "4gb" # AF: This can't be smaller than 2gb or RKE freaks out when Kubernetes runs out of memory.
}
variable "kube_host_count" {
default = 3
}
variable "ssh_key_file" { /* provided elsewhere */ }
variable "ssh_public_key_file" { /* provided elsewhere */ }
# Our SSH key
resource "digitalocean_ssh_key" "kube_host" {
name = "kube-host.glider-gun.${var.dns_subdomain}"
public_key = "${file(var.ssh_public_key_file)}"
}
# The virtual server (Digital Ocean).
resource "digitalocean_droplet" "kube_host" {
count = "${var.kube_host_count}"
image = "${var.image}"
name = "kube-${count.index + 1}"
region = "nyc3"
size = "${var.kube_host_size}"
ssh_keys = [
"${digitalocean_ssh_key.kube_host.fingerprint}"
]
connection {
type = "ssh"
user = "root"
private_key = "${file(var.ssh_key_file)}"
}
# Install Docker
provisioner "remote-exec" {
inline = [
"apt-get update -qq",
"apt-get install -q -y --no-install-recommends apt-transport-https curl software-properties-common",
"apt-get install -q -y --no-install-recommends linux-image-extra-$(uname -r) linux-image-extra-virtual",
"curl -fsSL 'https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e' | sudo apt-key add -",
"add-apt-repository -y \"deb https://packages.docker.com/1.12/apt/repo/ ubuntu-$(lsb_release -cs) main\"",
"apt-get update -qq",
"apt-get -q -y install docker-engine"
]
}
# Create host directories
provisioner "remote-exec" {
inline = [
"mkdir -p /etc/glider-gun /var/run/glider-gun/state"
]
}
# Copy SSH key
provisioner "file" {
source = "${var.ssh_key_file}"
destination = "/etc/glider-gun/keys/ssh"
}
}
# DNS record for the virtual server (CloudFlare).
resource "cloudflare_record" "kube_host" {
count = "${digitalocean_droplet.kube_host.count}"
domain = "tintoy.io"
name = "${element(digitalocean_droplet.kube_host.*.name, count.index)}.${var.dns_subdomain}"
value = "${element(digitalocean_droplet.kube_host.*.ipv4_address, count.index)}"
type = "A"
ttl = 120
proxied = false
}
#########
# Outputs
output "kube_host_ips" {
value = [
"${digitalocean_droplet.kube_host.*.ipv4_address}"
]
}
output "kube_host_names" {
value = [
"${formatlist("%s.%s.tintoy.io", digitalocean_droplet.kube_host.*.name, var.dns_subdomain)}"
]
}
# Outputs used to generate RKE cluster manifest
output "rke_control_plane_nodes" {
value = [
"${element(digitalocean_droplet.kube_host.*.ipv4_address, 0)}"
]
}
output "rke_etcd_nodes" {
value = [
"${element(digitalocean_droplet.kube_host.*.ipv4_address, 0)}"
]
}
output "rke_worker_nodes" {
value = [
"${digitalocean_droplet.kube_host.*.ipv4_address}"
]
}
###########
# Providers
variable "do_token" { /* provided elsewhere */ }
provider "digitalocean" {
token = "${var.do_token}"
}
variable "cloudflare_email" { /* provided elsewhere */ }
variable "cloudflare_token" { /* provided elsewhere */ }
provider "cloudflare" {
email = "${var.cloudflare_email}"
token = "${var.cloudflare_token}"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment