Skip to content

Instantly share code, notes, and snippets.

@yob
Last active February 3, 2020 04:54
Show Gist options
  • Save yob/c57009bb106e1e9a64f6e98f6b968991 to your computer and use it in GitHub Desktop.
Save yob/c57009bb106e1e9a64f6e98f6b968991 to your computer and use it in GitHub Desktop.
minimal fargate setup via terraform
provider "aws" {
region = "us-east-1"
version = "~> 2.45"
}
data "aws_region" "primary" {
}
data "aws_availability_zones" "primary" {
}
////////////////////////////////////////////
// VPC / Networking
////////////////////////////////////////////
resource "aws_vpc" "jh-test" {
cidr_block = "172.16.0.0/16"
tags = {
Name = "jh-test"
}
}
resource "aws_subnet" "private-a" {
cidr_block = cidrsubnet(aws_vpc.jh-test.cidr_block, 8, 0)
availability_zone = "us-east-1a"
vpc_id = aws_vpc.jh-test.id
tags = {
Name = "jh-test-private-a"
}
}
resource "aws_subnet" "private-b" {
cidr_block = cidrsubnet(aws_vpc.jh-test.cidr_block, 8, 1)
availability_zone = "us-east-1b"
vpc_id = aws_vpc.jh-test.id
tags = {
Name = "jh-test-private-b"
}
}
resource "aws_subnet" "private-c" {
cidr_block = cidrsubnet(aws_vpc.jh-test.cidr_block, 8, 2)
availability_zone = "us-east-1c"
vpc_id = aws_vpc.jh-test.id
tags = {
Name = "jh-test-private-c"
}
}
resource "aws_subnet" "public-a" {
cidr_block = cidrsubnet(aws_vpc.jh-test.cidr_block, 8, 3)
availability_zone = "us-east-1a"
vpc_id = aws_vpc.jh-test.id
map_public_ip_on_launch = true
tags = {
Name = "jh-test-public-a"
}
}
resource "aws_subnet" "public-b" {
cidr_block = cidrsubnet(aws_vpc.jh-test.cidr_block, 8, 4)
availability_zone = "us-east-1b"
vpc_id = aws_vpc.jh-test.id
map_public_ip_on_launch = true
tags = {
Name = "jh-test-public-b"
}
}
resource "aws_subnet" "public-c" {
cidr_block = cidrsubnet(aws_vpc.jh-test.cidr_block, 8, 5)
availability_zone = "us-east-1c"
vpc_id = aws_vpc.jh-test.id
map_public_ip_on_launch = true
tags = {
Name = "jh-test-public-c"
}
}
# IGW for the public subnet
resource "aws_internet_gateway" "gw" {
vpc_id = aws_vpc.jh-test.id
tags = {
Name = "jh-test"
}
}
# Route the public subnet traffic through the IGW
resource "aws_route" "internet_access" {
route_table_id = aws_vpc.jh-test.main_route_table_id
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.gw.id
}
# Create a NAT gateway with an EIP for each private subnet to get internet connectivity
resource "aws_eip" "gw-a" {
vpc = true
depends_on = [aws_internet_gateway.gw]
}
resource "aws_nat_gateway" "gw-a" {
subnet_id = aws_subnet.public-a.id
allocation_id = aws_eip.gw-a.id
}
resource "aws_eip" "gw-b" {
vpc = true
depends_on = [aws_internet_gateway.gw]
}
resource "aws_nat_gateway" "gw-b" {
subnet_id = aws_subnet.public-b.id
allocation_id = aws_eip.gw-b.id
}
resource "aws_eip" "gw-c" {
vpc = true
depends_on = [aws_internet_gateway.gw]
}
resource "aws_nat_gateway" "gw-c" {
subnet_id = aws_subnet.public-c.id
allocation_id = aws_eip.gw-c.id
}
# Create a new route table for the private subnets
# And make it route non-local traffic through the NAT gateway to the internet
resource "aws_route_table" "private-a" {
vpc_id = aws_vpc.jh-test.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.gw-a.id
}
}
resource "aws_route_table" "private-b" {
vpc_id = aws_vpc.jh-test.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.gw-b.id
}
}
resource "aws_route_table" "private-c" {
vpc_id = aws_vpc.jh-test.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.gw-c.id
}
}
# Explicitely associate the newly created route tables to the private subnets (so they don't default to the main route table)
resource "aws_route_table_association" "private-a" {
subnet_id = aws_subnet.private-a.id
route_table_id = aws_route_table.private-a.id
}
resource "aws_route_table_association" "private-b" {
subnet_id = aws_subnet.private-b.id
route_table_id = aws_route_table.private-b.id
}
resource "aws_route_table_association" "private-c" {
subnet_id = aws_subnet.private-c.id
route_table_id = aws_route_table.private-c.id
}
////////////////////////////////////////////
// Security
////////////////////////////////////////////
# ALB Security group
# This is the group you need to edit if you want to restrict access to your application
resource "aws_security_group" "lb" {
name = "jh-ecs-alb"
description = "controls access to the ALB"
vpc_id = aws_vpc.jh-test.id
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# Traffic to the ECS Cluster should only come from the ALB
resource "aws_security_group" "ecs_tasks" {
name = "jh-ecs-tasks"
description = "allow inbound access from the ALB only"
vpc_id = aws_vpc.jh-test.id
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
security_groups = [aws_security_group.lb.id]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
}
////////////////////////////////////////////
// Load Balancing
////////////////////////////////////////////
resource "aws_alb" "jh-test" {
name = "jh-test"
subnets = [
aws_subnet.public-a.id,
aws_subnet.public-b.id,
aws_subnet.public-c.id,
]
security_groups = [aws_security_group.lb.id]
}
resource "aws_alb_target_group" "jh-test" {
name = "jh-test"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.jh-test.id
target_type = "ip"
}
# Redirect all traffic from the ALB to the target group
resource "aws_alb_listener" "front_end" {
load_balancer_arn = aws_alb.jh-test.id
port = 80
protocol = "HTTP"
default_action {
target_group_arn = aws_alb_target_group.jh-test.id
type = "forward"
}
}
////////////////////////////////////////////
// ECS / Fargate
////////////////////////////////////////////
resource "aws_ecs_cluster" "jh-ecs" {
name = "jh-test-ecs"
}
resource "aws_ecs_task_definition" "app" {
family = "app"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = "512"
memory = "1024"
container_definitions = jsonencode(
[
{
image = "nginxdemos/hello:plain-text"
name = "app"
networkMode = "awsvpc"
portMappings = [
{
containerPort = 80
hostPort = 80
protocol = "TCP"
}
]
}
]
)
}
resource "aws_ecs_service" "jh-test" {
name = "jh-test-service"
cluster = aws_ecs_cluster.jh-ecs.id
task_definition = aws_ecs_task_definition.app.arn
desired_count = 3
launch_type = "FARGATE"
network_configuration {
security_groups = [aws_security_group.ecs_tasks.id]
subnets = [aws_subnet.private-a.id, aws_subnet.private-b.id, aws_subnet.private-c.id]
}
load_balancer {
target_group_arn = aws_alb_target_group.jh-test.id
container_name = "app"
container_port = 80
}
depends_on = [
aws_alb_listener.front_end,
]
}
$ curl -v jh-test-1443945769.us-east-1.elb.amazonaws.com
* Trying 52.206.218.193:80...
* TCP_NODELAY set
* Connected to jh-test-1443945769.us-east-1.elb.amazonaws.com (52.206.218.193) port 80 (#0)
> GET / HTTP/1.1
> Host: jh-test-1443945769.us-east-1.elb.amazonaws.com
> User-Agent: curl/7.67.0
> Accept: */*
>
* Mark bundle as not supporting multiuse
< HTTP/1.1 200 OK
< Date: Mon, 03 Feb 2020 04:53:51 GMT
< Content-Type: text/plain
< Content-Length: 143
< Connection: keep-alive
< Server: nginx/1.13.8
< Expires: Mon, 03 Feb 2020 04:53:50 GMT
< Cache-Control: no-cache
<
Server address: 172.16.0.184:80
Server name: 50dbac0c9d56
Date: 03/Feb/2020:04:53:51 +0000
URI: /
Request ID: e1a6ded346c44253888f52c8ae0e617f
* Connection #0 to host jh-test-1443945769.us-east-1.elb.amazonaws.com left intact
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment