Skip to content

Instantly share code, notes, and snippets.

@samof76
Last active August 7, 2020 03:44
Show Gist options
  • Save samof76/ccbb00d29157b5cb049a9835358eecdb to your computer and use it in GitHub Desktop.
Save samof76/ccbb00d29157b5cb049a9835358eecdb to your computer and use it in GitHub Desktop.
Terraforming EKS Blog
data "template_file" "cluster_autoscaler" {
template = "${file("cluster_autoscaler.yml.tpl")}"
vars = {
worker_node_min_size = "${var.nodes_min_size}"
worker_node_max_size = "${var.nodes_max_size}"
worker_node_asg_name = "${aws_autoscaling_group.fd_eks_nodes_asg.name}"
aws_region = "${var.region}"
}
}
resource "local_file" "cluster_autoscaler" {
filename = "cluster_autoscaler.yml"
content = "${data.template_file.cluster_autoscaler.rendered}"
}
#!/bin/bash
# REQUIREMENTS
# Run this from the directory it resides
# Edit the and populate the required values of the terraform.tfvars
TFPWD=$(pwd)
function print_log()
{
echo -e "$(date +'[%F %T %Z]') $*"
}
function run_terraform()
{
terraform init
terraform plan
# This following command will ask for your confirmation
terraform apply
}
function setup_kubectl()
{
pushd $TFPWD
mkdir -p $HOME/.kube
terraform output kubeconfig > $HOME/.kube/config
# Downdload kubectl
kubectl_url=$(terraform output kubectl_url)
curl -o kubectl ${kubectl_url}
chmod +x kubectl
# Setup home bin
mkdir -p $HOME/bin
mv kubectl $HOME/bin/kubectl
# Setup aws-iam-authenticator
aws_iam_authenticator_url=$(terraform output aws_iam_authenticator_url)
curl -o aws-iam-authenticator ${aws_iam_authenticator_url}
chmod +x aws-iam-authenticator
mv aws-iam-authenticator $HOME/bin/aws-iam-authenticator
# Set PATH
echo 'export PATH=$HOME/bin:$PATH' >> ~/.bashrc
source ~/.bashrc
# Run kubectl test command
kubectl get svc
popd
}
function setup_nodes()
{
pushd $TFPWD
# Again use the terraform output for aws-auth-config-map
terraform output config_map_aws_auth > $HOME/aws-auth-cm.yaml
# Apply the yaml using kubectl
kubectl apply -f $HOME/aws-auth-cm.yaml
timeout 10 kubectl get nodes --watch
popd
}
function setup_roles() {
pushd $TFPWD
terraform output cluster_roles_yml > cluster_roles.yml
kubectl apply -f cluster_roles.yml
popd
}
function disable_snat()
{
cd $TFPWD
# Apply the patch using kubectl, this is to disable snat on worker nodes
kubectl patch daemonset aws-node -n kube-system -p '{"spec":{"template":{"spec":{"containers":[{"name":"aws-node","env":[{"name": "AWS_VPC_K8S_CNI_EXTERNALSNAT", "value": "true"}]}]}}}}'
res=$?
if [[ $res > 0 ]]; then
echo "AWS node daemonset is not patched"
else
echo "AWS node is patched - disabled SNAT"
fi
timeout 10 kubectl get nodes --watch
}
function add_prometheus_annotation_for_eks_cni()
{
cd $TFPWD
kubectl --namespace kube-system patch daemonset aws-node --patch '{"spec":{"template":{"metadata":{"annotations":{"prometheus.io/scrape": "true", "prometheus.io/path": "/metrics", "prometheus.io/port": "61678"}}}}}'
res=$?
if [[ $res > 0 ]]; then
echo "Annotate for prometheus is not added"
else
echo "AWS node is patched - Annotation added for prometheus"
fi
}
print_log "Running terraform"
run_terraform
print_log "Setting up kubectl"
setup_kubectl
print_log "Setting up nodes"
setup_nodes
disable_snat
add_prometheus_annotation_for_eks_cni
print_log "Setting up cluster roles"
setup_roles
module "subnets_from_list" {
source = "git.url/repo?ref=esl2.0"
cluster_name = var.cluster_name
vpc_id = var.vpc_id
subnets_list = var.subnets_list
route_table = var.route_table
tags = local.tags
}
module "eks_cluster" {
source = "git.url/repo?ref=em2.14"
cluster_name = var.cluster_name
cluster_version = var.cluster_version
cluster_log_types = var.cluster_log_types
environment = var.stage
subnet_ids = module.subnets_from_list.eks_subnet_ids
vpc_id = var.vpc_id
master_associated_policies = var.master_associated_policies
allowed_sgs_master = var.allowed_sgs_master
allowed_sgs_cidrs_master = var.allowed_sgs_cidrs_master
tags = local.tags
}
resource "aws_autoscaling_group" "eks_nodes_asg" {
count = length(var.vpc_zone_identifier)
desired_capacity = var.nodes_desired_capacity
launch_configuration = aws_launch_configuration.eks_nodes_lc.id
max_size = var.nodes_max_size
min_size = var.nodes_min_size
name = "${var.cluster_name}-node-asg-${count.index}"
vpc_zone_identifier = [var.vpc_zone_identifier[count.index]]
termination_policies = var.termination_policies
tags = module.node_label.tags_as_list_of_maps
lifecycle {
create_before_destroy = true
ignore_changes = [
desired_capacity,
vpc_zone_identifier,
]
}
}
module "eks_cluster_node" {
source = "git.url/repo?ref=en2.6"
cluster_name = var.cluster_name
cluster_version = module.eks_cluster.eks_cluster_version
environment = module.eks_cluster.eks_cluster_environment
vpc_id = module.eks_cluster.eks_cluster_vpc_id
vpc_zone_identifier = module.subnets_from_list.eks_subnet_ids
instance_type = var.instance_type
eks_master_sg_id = module.eks_cluster.eks_master_sg
eks_master_endpoint = module.eks_cluster.eks_cluster_endpoint
eks_master_certificate_authority = module.eks_cluster.eks_cluster_certificate_authority
node_associated_policies = [
"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess",
"${aws_iam_policy.eks_node_access_kms.arn}",
"${aws_iam_policy.eks_node_access_secrets_manager_policy.arn}",
"${aws_iam_policy.eks_node_access_opsworks.arn}",
"${aws_iam_policy.eks_node_access_elb.arn}",
"${aws_iam_policy.eks_node_access_cloudwatch.arn}"
]
allowed_sgs_nodes = var.allowed_sgs_nodes
allowed_sgs_cidrs = var.allowed_sgs_cidrs
additional_node_sgs = var.additional_node_sgs
ssh_public_key = var.ssh_public_key
volume_size = var.volume_size
nodes_desired_capacity = var.nodes_desired_capacity
nodes_max_size = var.nodes_max_size
nodes_min_size = var.nodes_min_size
lambda_role_arn = var.lambda_role_arn
supreme_role_arn = var.supreme_role_arn
tags = local.tags
logrotate_s3_bucket = var.logrotate_s3_bucket
eks_ami_account_id = var.eks_ami_account_id
golden_ami_name = var.golden_ami_name
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment