Created
September 29, 2020 03:18
-
-
Save noproto/556d5548989c5e52ab0feeeb1d272ba5 to your computer and use it in GitHub Desktop.
Terraform Quickstart Template Config (Local Docker DEV, AWS ECS Fargate+AWS Route53 PROD)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// This Terraform config is a working template for a simple infrastructure pipeline | |
// You'll need the AWS v2 CLI and the ECS CLI | |
// | |
// First, make a basic directory structure: mkdir -p ~/terraform/example/hello/data | |
// | |
// Files to be placed in the 'data' directory: hello.py requirements.txt | |
// hello.py: | |
// from flask import Flask | |
// app = Flask(__name__) | |
// @app.route("/") | |
// def hello(): | |
// return "Hello World!" | |
// if __name__ == "__main__": | |
// app.run(host='0.0.0.0', port=80) | |
// requirements.txt: | |
// Flask==1.1.1 | |
// | |
// Place the following content in main.tf in the 'hello' directory. | |
// Start by running the commands listed below in the 'hello' directory. | |
// To destroy, run `terraform destroy`. Enjoy! | |
/*----------------------------------------------------------------------------- | |
Deployment steps | |
------------------------------------------------------------------------------- | |
DEV | terraform apply -target=null_resource.dev | |
PROD | terraform apply -target=null_resource.prod | |
-----------------------------------------------------------------------------*/ | |
terraform { | |
required_version = ">= 0.12" | |
} | |
/* Runtime variables: -------------------------------------------------------*/ | |
variable "domain" { | |
description = "Domain for service" | |
default = "example.com" | |
} | |
variable "subdomain" { | |
description = "Subdomain for service" | |
default = "hello" | |
} | |
variable "aws_region" { | |
description = "AWS region to launch services" | |
default = "us-east-2" | |
} | |
variable "aws_profile" { | |
description = "AWS profile for authentication" | |
default = "default" | |
} | |
variable "fargate_cpu" { | |
description = "Fargate instance CPU units to provision (1 vCPU = 1024 CPU units)" | |
default = "256" | |
} | |
variable "fargate_memory" { | |
description = "Fargate instance memory to provision (in MiB)" | |
default = "512" | |
} | |
locals { | |
path_components = reverse(slice(reverse(split("/", abspath("."))), 0, 2)) | |
dash_repository_name = join("-", local.path_components) | |
slash_repository_name = join("/", local.path_components) | |
dockerfile = <<-EOF | |
FROM ubuntu:latest | |
# Config | |
ENV data_dir='./data' | |
WORKDIR $${data_dir} | |
# Install dependencies | |
RUN apt update && apt-get -y install python3 python3-pip | |
RUN pip3 install --upgrade pip | |
# Copy data files | |
RUN mkdir -p /data | |
COPY ./ /data | |
# Install Python requirements | |
RUN python3 -m pip install --upgrade pip | |
RUN pip3 install -r $${data_dir}/requirements.txt | |
EXPOSE 80/tcp | |
CMD cd $${data_dir} && python3 hello.py | |
EOF | |
} | |
/* AWS setup ----------------------------------------------------------------*/ | |
provider "aws" { | |
profile = var.aws_profile | |
region = var.aws_region | |
} | |
data "aws_caller_identity" "current" { | |
} | |
/* ECR setup ----------------------------------------------------------------*/ | |
// Create AWS ECR repository | |
resource "aws_ecr_repository" "repository_001" { | |
name = local.slash_repository_name | |
} | |
// Login Docker to AWS ECR repository | |
resource "null_resource" "docker_login" { | |
provisioner "local-exec" { | |
command = "aws ecr get-login-password --region ${var.aws_region} | docker login --username AWS --password-stdin ${data.aws_caller_identity.current.account_id}.dkr.ecr.${var.aws_region}.amazonaws.com" | |
} | |
depends_on = [aws_ecr_repository.repository_001] | |
} | |
/* Docker setup -------------------------------------------------------------*/ | |
// Build Docker image | |
resource "null_resource" "docker_build" { | |
triggers = { | |
slash_repository_name = local.slash_repository_name | |
} | |
provisioner "local-exec" { | |
command = "echo local.dockerfile | terraform console -state=console.tfstate | docker build -t ${local.slash_repository_name} -f - ." | |
} | |
provisioner "local-exec" { | |
when = destroy | |
command = "docker rmi ${self.triggers.slash_repository_name} && docker rmi ubuntu:latest" | |
} | |
} | |
// Tag Docker image in ECR | |
resource "null_resource" "docker_tag" { | |
triggers = { | |
repository_url = aws_ecr_repository.repository_001.repository_url | |
} | |
provisioner "local-exec" { | |
command = "docker tag ${local.slash_repository_name} ${aws_ecr_repository.repository_001.repository_url}" | |
} | |
provisioner "local-exec" { | |
when = destroy | |
command = "docker rmi ${self.triggers.repository_url}" | |
} | |
depends_on = [null_resource.docker_login, null_resource.docker_build] | |
} | |
// Push Docker container to AWS ECR repository | |
resource "null_resource" "docker_push" { | |
provisioner "local-exec" { | |
command = "docker push ${aws_ecr_repository.repository_001.repository_url}" | |
} | |
depends_on = [null_resource.docker_tag] | |
} | |
/* AWS networking setup -----------------------------------------------------*/ | |
// Create VPC for cluster | |
resource "aws_vpc" "vpc_001" { | |
cidr_block = "10.0.0.0/16" | |
tags = { | |
Name = "${local.dash_repository_name} VPC" | |
} | |
} | |
// Create first VPC subnet for Docker containers | |
resource "aws_subnet" "subnet_001" { | |
vpc_id = aws_vpc.vpc_001.id | |
cidr_block = "10.0.101.0/24" | |
availability_zone = "${var.aws_region}a" | |
tags = { | |
Name = "${local.dash_repository_name} subnet_001" | |
} | |
} | |
// Create Internet Gateway for VPC | |
resource "aws_internet_gateway" "igw_001" { | |
vpc_id = aws_vpc.vpc_001.id | |
tags = { | |
Name = "${local.dash_repository_name} igw_001" | |
} | |
} | |
// Create Route Table | |
resource "aws_route_table" "rt_001" { | |
vpc_id = aws_vpc.vpc_001.id | |
route { | |
cidr_block = "0.0.0.0/0" | |
gateway_id = aws_internet_gateway.igw_001.id | |
} | |
tags = { | |
Name = "${local.dash_repository_name} rt_001" | |
} | |
} | |
// Create Route Table Association subnet_001 | |
resource "aws_route_table_association" "rta_001" { | |
subnet_id = aws_subnet.subnet_001.id | |
route_table_id = aws_route_table.rt_001.id | |
} | |
/* AWS security setup -------------------------------------------------------*/ | |
// Create security group to allow http traffic | |
resource "aws_security_group" "allow_http" { | |
name = "Allow HTTP" | |
description = "Allow HTTP inbound traffic" | |
vpc_id = aws_vpc.vpc_001.id | |
ingress { | |
from_port = 80 | |
to_port = 80 | |
protocol = "tcp" | |
cidr_blocks = ["0.0.0.0/0"] | |
} | |
egress { | |
from_port = 0 | |
to_port = 0 | |
protocol = "-1" | |
cidr_blocks = ["0.0.0.0/0"] | |
} | |
tags = { | |
Name = "Allow HTTP" | |
} | |
} | |
resource "aws_iam_role" "ecs_execution_role" { | |
name = "ecs_execution_role" | |
assume_role_policy = <<EOF | |
{ | |
"Version": "2012-10-17", | |
"Statement": { | |
"Effect": "Allow", | |
"Principal": {"Service": ["ecs-tasks.amazonaws.com", "ec2.amazonaws.com"]}, | |
"Action": "sts:AssumeRole" | |
} | |
} | |
EOF | |
} | |
resource "aws_iam_role_policy" "ecs_execution_role_policy" { | |
role = aws_iam_role.ecs_execution_role.id | |
policy = <<EOF | |
{ | |
"Version": "2012-10-17", | |
"Statement": [ | |
{ | |
"Effect": "Allow", | |
"Action": [ | |
"ecr:GetAuthorizationToken", | |
"ecr:BatchCheckLayerAvailability", | |
"ecr:GetDownloadUrlForLayer", | |
"ecr:BatchGetImage", | |
"logs:CreateLogStream", | |
"logs:PutLogEvents" | |
], | |
"Resource": "*" | |
} | |
] | |
} | |
EOF | |
} | |
/* ECS setup ----------------------------------------------------------------*/ | |
// Create ECS cluster | |
resource "aws_ecs_cluster" "cluster_001" { | |
name = "${local.dash_repository_name}-cluster" | |
} | |
// Create Service | |
resource "aws_ecs_service" "service_001" { | |
name = "${local.dash_repository_name}-service" | |
cluster = aws_ecs_cluster.cluster_001.id | |
task_definition = aws_ecs_task_definition.td_001.arn | |
desired_count = 1 | |
deployment_minimum_healthy_percent = 100 | |
launch_type = "FARGATE" | |
platform_version = "1.4.0" | |
network_configuration { | |
security_groups = [aws_security_group.allow_http.id] | |
subnets = [aws_subnet.subnet_001.id] | |
assign_public_ip = true | |
} | |
depends_on = [aws_route_table_association.rta_001] | |
} | |
// Create Task Definition | |
resource "aws_ecs_task_definition" "td_001" { | |
family = "${local.dash_repository_name}-task-definition" | |
network_mode = "awsvpc" | |
requires_compatibilities = ["FARGATE"] | |
cpu = var.fargate_cpu | |
memory = var.fargate_memory | |
execution_role_arn = aws_iam_role.ecs_execution_role.arn | |
container_definitions = <<EOF | |
[ | |
{ | |
"name": "${local.dash_repository_name}-container", | |
"cpu": ${var.fargate_cpu}, | |
"memory": ${var.fargate_memory}, | |
"image": "${aws_ecr_repository.repository_001.repository_url}", | |
"networkMode": "awsvpc", | |
"portMappings": [ | |
{ | |
"containerPort": 80, | |
"hostPort": 80 | |
} | |
] | |
} | |
] | |
EOF | |
depends_on = [aws_iam_role_policy.ecs_execution_role_policy, | |
aws_ecr_repository.repository_001, | |
null_resource.docker_push] | |
} | |
data "external" "aws_ecs_task_meta" { | |
program = ["/bin/bash", "-c", "while [ 1 ]; do container_ip=$(ecs-cli ps --region ${var.aws_region} --aws-profile ${var.aws_profile} --cluster ${local.dash_repository_name}-cluster | grep -oP '([0-9]{1,3})(?<!10)\\.([0-9]{1,3})\\.([0-9]{1,3})\\.([0-9]{1,3})'); [[ ! -z \"$container_ip\" ]] && break || sleep 5; done; echo \"{\\\"public_ip\\\":\\\"$container_ip\\\"}\""] | |
depends_on = [aws_ecs_service.service_001] | |
} | |
/* DNS ----------------------------------------------------------------------*/ | |
data "aws_route53_zone" "zone_001" { | |
name = var.domain | |
} | |
resource "aws_route53_record" "record_001" { | |
zone_id = data.aws_route53_zone.zone_001.zone_id | |
name = "${var.subdomain}.${var.domain}" | |
type = "A" | |
ttl = "300" | |
records = [data.external.aws_ecs_task_meta.result.public_ip] | |
depends_on = [data.external.aws_ecs_task_meta] | |
} | |
/* Targets ------------------------------------------------------------------*/ | |
resource "null_resource" "dev" { | |
triggers = { | |
dash_repository_name = local.dash_repository_name | |
} | |
provisioner "local-exec" { | |
command = "docker run --network host --name ${local.dash_repository_name} --detach ${local.slash_repository_name}" | |
} | |
provisioner "local-exec" { | |
when = destroy | |
command = "docker stop ${self.triggers.dash_repository_name} && docker rm ${self.triggers.dash_repository_name}" | |
} | |
depends_on = [null_resource.docker_build] | |
} | |
resource "null_resource" "prod" { | |
depends_on = [aws_route53_record.record_001] | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
You thought I was going to roast you for putting your dockerfile inline but really I'm going to roast you for the structure of the dockerfile:
https://gist.github.com/noproto/556d5548989c5e52ab0feeeb1d272ba5#file-main-tf-L70-L71 declares an env var data_dir and then https://gist.github.com/noproto/556d5548989c5e52ab0feeeb1d272ba5#file-main-tf-L78-L79 doesn't even use the variable you just set.
I have no idea what you're trying to accomplish with
${{data_dir}}
when$data_dir
is functionally equivalent.This line https://gist.github.com/noproto/556d5548989c5e52ab0feeeb1d272ba5#file-main-tf-L78 is completely useless, because WORKDIR ./data creates the directory if it doesn't exist, and then changes directory into it.
Here you're conflating application code with the concept of data, I don't understand why and it doesn't really make sense:
https://gist.github.com/noproto/556d5548989c5e52ab0feeeb1d272ba5#file-main-tf-L79
https://gist.github.com/noproto/556d5548989c5e52ab0feeeb1d272ba5#file-main-tf-L83 and https://gist.github.com/noproto/556d5548989c5e52ab0feeeb1d272ba5#file-main-tf-L87 see previous comment, you're already in $data_dir from the WORKDIR command.
https://gist.github.com/noproto/556d5548989c5e52ab0feeeb1d272ba5#file-main-tf-L74 here you want --no-install-recommends and you want to also clear logs afterwards, otherwise you've got a bunch of extra garbage in your container:
Also you could skip the python installation steps entirely by just using a better base image, such as
Anyways I'm just giving you a hard time. Do whatever works, who cares about tomorrow.