Skip to content

Instantly share code, notes, and snippets.

@tom-butler
Last active October 26, 2018 04:56
Show Gist options
  • Select an option

  • Save tom-butler/27b7b1b8b1bad8d0171a68a3819cbd90 to your computer and use it in GitHub Desktop.

Select an option

Save tom-butler/27b7b1b8b1bad8d0171a68a3819cbd90 to your computer and use it in GitHub Desktop.
Terraform-Kops-pipeline
#!/bin/bash
set -e
echo "deploying $1"
rm -rf .terraform
# Deploy Network Infrastructure
export WORKSPACE=$1
terraform init -backend-config workspaces/$WORKSPACE/backend.cfg
terraform workspace new $WORKSPACE || terraform workspace select $WORKSPACE
terraform plan -input=false -var-file="workspaces/$WORKSPACE/terraform.tfvars"
terraform apply -auto-approve -input=false -var-file="workspaces/$WORKSPACE/terraform.tfvars"
# Create Kubernetes Manifests
rm -f cluster.yaml
rm -f secret.yaml
rm -f cluster_defaults.yaml
rm -f users.yaml
terraform output cluster > cluster.yaml
terraform output secret > secret.yaml
terraform output cluster_defaults > cluster_defaults.yaml
terraform output users > users.yaml
export KOPS_STATE_STORE=$(terraform output state_store)
# Create a new cluster and overwrite it with our config, or just overwrite it
kops create cluster \
--zones ap-southeast-2a,ap-southeast-2b,ap-southeast-2c \
--master-zones ap-southeast-2a,ap-southeast-2b,ap-southeast-2c \
--master-count=3 \
$(terraform output name) -v 10 \
&& kops replace -f cluster.yaml -v 10 \
|| kops replace -f cluster.yaml --force -v 10
kops update cluster $(terraform output name) --yes -v 10
# Wait 15 mins for cluster to be up
max_wait=900
while [[ $max_wait -gt 0 ]]; do
kops validate cluster $(terraform output name) -v 10 && break || sleep 30
max_wait=$(($max_wait - 30))
echo "Waited 30 seconds. Still waiting max. $max_wait"
done
if [[ $max_wait -le 0 ]]; then
echo "Timeout: cluster does not validate after 15 minutes"
exit 1
fi
kops rolling-update cluster --yes -v 10
# Deploy Kubernetes Manifests
kubectl apply -f secret.yaml
kubectl apply -f users.yaml
output "cluster" {
value = <<EOF
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: ${timestamp()}
labels:
kops.k8s.io/cluster: ${var.name}
name: master-${var.region}a
spec:
image: ${data.aws_ami.k8s.image_id}
machineType: ${var.master_type}
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: master-${var.region}a
role: Master
subnets:
- ${var.region}a
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: ${timestamp()}
labels:
kops.k8s.io/cluster: ${var.name}
name: master-${var.region}b
spec:
image: ${data.aws_ami.k8s.image_id}
machineType: ${var.master_type}
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: master-${var.region}b
role: Master
subnets:
- ${var.region}b
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: ${timestamp()}
labels:
kops.k8s.io/cluster: ${var.name}
name: master-${var.region}c
spec:
image: ${data.aws_ami.k8s.image_id}
machineType: ${var.master_type}
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: master-${var.region}c
role: Master
subnets:
- ${var.region}c
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: ${timestamp()}
labels:
kops.k8s.io/cluster: ${var.name}
name: nodes
spec:
image: ${data.aws_ami.k8s.image_id}
machineType: ${var.node_type}
maxSize: ${var.max_nodes}
minSize: ${var.min_nodes}
nodeLabels:
kops.k8s.io/instancegroup: nodes
role: Node
additionalSecurityGroups: [${module.db.instance_sg}, ${module.public.nlb_sg}]
subnets:
- ${var.region}a
- ${var.region}c
externalLoadBalancers:
- targetGroupARN: ${module.public.lb_http_target_group_arn}
- targetGroupARN: ${module.public.lb_https_target_group_arn}
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: ${timestamp()}
labels:
kops.k8s.io/cluster: ${var.name}
name: spot
spec:
maxPrice: "${var.max_spot_price}"
image: ${data.aws_ami.k8s.image_id}
machineType: ${var.node_type}
maxSize: ${var.max_spot_nodes}
minSize: 0
nodeLabels:
kops.k8s.io/instancegroup: nodes
role: Node
additionalSecurityGroups: [${module.db.instance_sg}, ${module.public.nlb_sg}]
subnets:
- ${var.region}a
- ${var.region}b
- ${var.region}c
externalLoadBalancers:
- targetGroupARN: ${module.public.lb_http_target_group_arn}
- targetGroupARN: ${module.public.lb_https_target_group_arn}
---
apiVersion: kops/v1alpha2
Kind: Cluster
metadata:
creationTimestamp: ${timestamp()}
name: ${var.name}
spec:
api:
loadBalancer:
type: Public
authorization:
rbac: {}
channel: stable
cloudProvider: aws
configBase: s3://${aws_s3_bucket.kubernetes_state.id}/${var.name}
dnsZone: ${var.name}
etcdClusters:
- etcdMembers:
- encryptedVolume: true
instanceGroup: master-${var.region}a
name: a
- encryptedVolume: true
instanceGroup: master-${var.region}b
name: b
- encryptedVolume: true
instanceGroup: master-${var.region}c
name: c
name: main
- etcdMembers:
- encryptedVolume: true
instanceGroup: master-${var.region}a
name: a
- encryptedVolume: true
instanceGroup: master-${var.region}b
name: b
- encryptedVolume: true
instanceGroup: master-${var.region}c
name: c
name: events
iam:
allowContainerRegistry: true
legacy: false
kubernetesApiAccess:
- 0.0.0.0/0
kubernetesVersion: 1.9.11
kubeControllerManager:
horizontalPodAutoscalerSyncPeriod: 15s
horizontalPodAutoscalerDownscaleDelay: 5m0s
horizontalPodAutoscalerUpscaleDelay: 1m30s
masterInternalName: api.internal.${var.name}
masterPublicName: api.${var.name}
networkCIDR: ${var.vpc_cidr}
networkID: ${module.vpc.vpc_id}
networking:
weave:
mtu: 8912
nonMasqueradeCIDR: 100.64.0.0/10
subnets:
- cidr: ${element(var.private_subnet_cidrs, 0)}
id: ${element(module.vpc.private_subnets, 0)}
egress: ${element(module.vpc.natgw_ids, 0)}
name: ${var.region}a
type: Private
zone: ${var.region}a
- cidr: ${element(var.private_subnet_cidrs, 1)}
id: ${element(module.vpc.private_subnets, 1)}
egress: ${element(module.vpc.natgw_ids, 1)}
name: ${var.region}b
type: Private
zone: ${var.region}b
- cidr: ${element(var.private_subnet_cidrs, 2)}
id: ${element(module.vpc.private_subnets, 2)}
egress: ${element(module.vpc.natgw_ids, 2)}
name: ${var.region}c
type: Private
zone: ${var.region}c
- cidr: ${element(var.public_subnet_cidrs, 0)}
id: ${element(module.vpc.public_subnets, 0)}
name: utility-${var.region}a
type: Utility
zone: ${var.region}a
- cidr: ${element(var.public_subnet_cidrs, 1)}
id: ${element(module.vpc.public_subnets, 1)}
name: utility-${var.region}b
type: Utility
zone: ${var.region}b
- cidr: ${element(var.public_subnet_cidrs, 2)}
id: ${element(module.vpc.public_subnets, 2)}
name: utility-${var.region}c
type: Utility
zone: ${var.region}c
topology:
dns:
type: Public
masters: private
nodes: private
additionalPolicies:
node: |
[
{
"Action": [
"sts:AssumeRole"
],
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/kubernetes-*"
},
{
"Effect": "Allow",
"Action": [
"ssm:DescribeAssociation",
"ssm:GetDeployablePatchSnapshotForInstance",
"ssm:GetDocument",
"ssm:GetManifest",
"ssm:GetParameters",
"ssm:ListAssociations",
"ssm:ListInstanceAssociations",
"ssm:PutInventory",
"ssm:PutComplianceItems",
"ssm:PutConfigurePackageResult",
"ssm:UpdateAssociationStatus",
"ssm:UpdateInstanceAssociationStatus",
"ssm:UpdateInstanceInformation"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"ssmmessages:CreateControlChannel",
"ssmmessages:CreateDataChannel",
"ssmmessages:OpenControlChannel",
"ssmmessages:OpenDataChannel"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetEncryptionConfiguration"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:GetEncryptionConfiguration",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts",
"s3:ListBucket",
"s3:ListBucketMultipartUploads"
],
"Resource": "*"
}
]
master: |
[
{
"Effect": "Allow",
"Action": [
"ssm:DescribeAssociation",
"ssm:GetDeployablePatchSnapshotForInstance",
"ssm:GetDocument",
"ssm:GetManifest",
"ssm:GetParameters",
"ssm:ListAssociations",
"ssm:ListInstanceAssociations",
"ssm:PutInventory",
"ssm:PutComplianceItems",
"ssm:PutConfigurePackageResult",
"ssm:UpdateAssociationStatus",
"ssm:UpdateInstanceAssociationStatus",
"ssm:UpdateInstanceInformation"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"ssmmessages:CreateControlChannel",
"ssmmessages:CreateDataChannel",
"ssmmessages:OpenControlChannel",
"ssmmessages:OpenDataChannel"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetEncryptionConfiguration"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:GetEncryptionConfiguration",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts",
"s3:ListBucket",
"s3:ListBucketMultipartUploads"
],
"Resource": "*"
}
]
spot: |
[
{
"Action": [
"sts:AssumeRole"
],
"Effect": "Allow",
"Resource": "arn:aws:iam::*:role/kubernetes-*"
},
{
"Effect": "Allow",
"Action": [
"ssm:DescribeAssociation",
"ssm:GetDeployablePatchSnapshotForInstance",
"ssm:GetDocument",
"ssm:GetManifest",
"ssm:GetParameters",
"ssm:ListAssociations",
"ssm:ListInstanceAssociations",
"ssm:PutInventory",
"ssm:PutComplianceItems",
"ssm:PutConfigurePackageResult",
"ssm:UpdateAssociationStatus",
"ssm:UpdateInstanceAssociationStatus",
"ssm:UpdateInstanceInformation"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"ssmmessages:CreateControlChannel",
"ssmmessages:CreateDataChannel",
"ssmmessages:OpenControlChannel",
"ssmmessages:OpenDataChannel"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetEncryptionConfiguration"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams",
"logs:PutLogEvents"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:GetEncryptionConfiguration",
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts",
"s3:ListBucket",
"s3:ListBucketMultipartUploads"
],
"Resource": "*"
}
]
EOF
}
output "secret" {
sensitive = true
value = <<EOF
apiVersion: v1
kind: Secret
metadata:
name: ${var.name}
type: Opaque
data:
state-bucket: "${base64encode(aws_s3_bucket.tf_state.id)}"
postgres-username: "${base64encode(module.db.db_username)}"
postgres-password: "${base64encode(module.db.db_password)}"
EOF
}
output "state_store" {
value = "s3://${aws_s3_bucket.kubernetes_state.id}"
}
output "name" {
value = "${var.name}"
}
@tom-butler
Copy link
Author

We use Terraform to create our default infrastructure and provision the cluster.yaml file, which is passed to kops to make changes to our clusters.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment