Skip to content

Instantly share code, notes, and snippets.

@babldev
Last active March 6, 2024 16:37
Show Gist options
  • Save babldev/b0e5624dd8b35e999e32301350aaf177 to your computer and use it in GitHub Desktop.
Save babldev/b0e5624dd8b35e999e32301350aaf177 to your computer and use it in GitHub Desktop.
Next.js + worker orchestration on Google Cloud example

Next.js + worker orchestration on Google Cloud example

Example of how to use Terraform to orchestrate a build + deploy pipeline for a Next.JS + Postgres + Node.js worker on Google Cloud. In this example, the project is named "teamfeed".

Terraform allows you declaratively describe your infrastructure and deploy process vs have to do so manually in the web UI.

Many things here are specific to my project, or are outedated, so please update your own project accordingly.

Prod images

Build image

docker build -t "teamfeed-web" .

Running

docker run -t -i --env-file .env.local -p 3000:3000 teamfeed-web

Interactive shell

docker exec -it teamfeed-web /bin/sh

Multi-container dev

Start the database

docker compose up -d db

Start all services

docker compose up

Kill all services

docker compose down

Build all services

docker compose build

Connect to cron

gcloud compute ssh --zone "us-west1-b" "teamfeed-cron-1"  --project "teamfeed"

Init VM into cron (missing step to add POSTGRES_URL to ~/.bashrc)

sudo apt update
sudo apt install git postgresql screen
curl -fsSL https://deb.nodesource.com/setup_18.x | sudo -E bash -
sudo apt-get install -y nodejs
git clone [email protected]:tfdotdev/teamfeed.git
cd teammfeed/
npm install
screen -ARd

Run worker

npm run workers-build
npm run workers-start

Terraform

gcloud auth application-default login
terraform plan
terraform apply
services:
db:
image: postgres:14.1-alpine
container_name: teamfeed-pgdb
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
ports:
- '5432:5432'
volumes:
- db:/var/lib/postgresql/data
restart: unless-stopped
web:
build:
context: .
container_name: teamfeed-web
depends_on:
- db
env_file:
- .env.local
ports:
- '3000:3000'
restart: always
volumes:
db:
# Alpine image is smaller but Prisma is broken on M1 https://github.com/prisma/prisma/issues/8478
# FROM node:18-alpine AS builder
# RUN apk update
# RUN apk add --no-cache openssl
FROM node:18-slim AS runner
RUN apt-get update \
&& apt-get install -y openssl
ENV NODE_ENV production
ENV NEXT_TELEMETRY_DISABLED 1
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
# Alpine image is smaller but Prisma is broken on M1 https://github.com/prisma/prisma/issues/8478
# FROM node:18-alpine AS builder
# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
# RUN apk update
# RUN apk add --no-cache libc6-compat openssl
FROM node:18-slim AS builder
RUN apt-get update \
&& apt-get install -y openssl
WORKDIR /app
ENV NEXT_TELEMETRY_DISABLED 1
COPY \
jest.config.ts \
next-env.d.ts \
next.config.js \
package-lock.json \
package.json \
tsconfig.json \
./
COPY src/ src/
COPY prisma/ prisma/
COPY types/ types/
RUN npm ci
RUN npx prisma generate
RUN npm run build
#
# Production image, copy all the files and run next
#
FROM runner
WORKDIR /app
# You only need to copy next.config.js if you are NOT using the default configuration
COPY --from=builder /app/next.config.js ./
# COPY --from=builder /app/public ./public
COPY --from=builder /app/package.json ./package.json
# Automatically leverage output traces to reduce image size
# https://nextjs.org/docs/advanced-features/output-file-tracing
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
COPY public/ /app/public/
USER nextjs
EXPOSE 3000
ENV PORT 3000
CMD ["node", "server.js"]
terraform {
required_version = ">= 1.3.1"
required_providers {
google = {
source = "hashicorp/google"
version = "4.38.0"
}
}
}
provider "google" {
project = var.project
region = var.region
}
resource "google_sql_database_instance" "postgres" {
name = "teamfeed-pgdb-3"
database_version = "POSTGRES_14"
region = var.region
settings {
tier = "db-f1-micro"
availability_type = "ZONAL"
backup_configuration {
binary_log_enabled = false
enabled = true
location = "us"
point_in_time_recovery_enabled = true
start_time = "08:00"
transaction_log_retention_days = 7
backup_retention_settings {
retained_backups = 7
retention_unit = "COUNT"
}
}
insights_config {
query_insights_enabled = true
query_string_length = 1024
record_application_tags = false
record_client_address = false
}
ip_configuration {
ipv4_enabled = true
require_ssl = false
authorized_networks {
name = "teamfeed-cron"
value = google_compute_address.cron.address
}
}
maintenance_window {
day = 6
hour = 10
}
}
}
locals {
web_secrets = [
"NEXTAUTH_SECRET",
"NEXTAUTH_URL",
"POSTGRES_URL",
]
}
resource "google_secret_manager_secret" "web" {
for_each = toset(local.web_secrets)
secret_id = each.value
replication {
automatic = true
}
}
resource "google_service_account" "teamfeed_runner" {
account_id = "teamfeed-runner"
display_name = "Teamfeed app Cloud Run account"
}
resource "google_project_iam_member" "teamfeed_runner" {
for_each = toset([
"roles/cloudsql.client",
"roles/logging.logWriter",
"roles/secretmanager.secretAccessor",
])
project = var.project
role = each.key
member = "serviceAccount:${google_service_account.teamfeed_runner.email}"
}
resource "google_cloud_run_service" "teamfeed" {
name = "teamfeed"
location = var.region
lifecycle {
ignore_changes = [
template[0].metadata[0].annotations["client.knative.dev/user-image"],
template[0].metadata[0].annotations["run.googleapis.com/client-name"],
template[0].metadata[0].annotations["run.googleapis.com/client-version"],
template[0].metadata[0].labels
]
}
template {
metadata {
annotations = {
"autoscaling.knative.dev/maxScale" = "10"
"autoscaling.knative.dev/minScale" = "1"
"run.googleapis.com/cloudsql-instances" = "teamfeed:us-west1:teamfeed-pgdb-3"
}
}
spec {
container_concurrency = 80
timeout_seconds = 300
service_account_name = google_service_account.teamfeed_runner.email
containers {
image = "us-west1-docker.pkg.dev/teamfeed/docker/teamfeed/teamfeed:latest"
# Environment variable as a secret for all values in local.web_secrets
dynamic "env" {
for_each = local.web_secrets
content {
name = env.value
value_from {
secret_key_ref {
key = "latest"
name = env.value
}
}
}
}
}
}
}
depends_on = [
google_project_iam_member.teamfeed_runner,
google_secret_manager_secret.web,
]
}
resource "google_cloud_run_service_iam_binding" "teamfeed" {
location = google_cloud_run_service.teamfeed.location
service = google_cloud_run_service.teamfeed.name
role = "roles/run.invoker"
members = [
"allUsers"
]
}
resource "google_cloud_run_domain_mapping" "teamfeed_dev" {
name = "teamfeed.dev"
location = var.region
metadata {
namespace = "teamfeed"
}
spec {
route_name = google_cloud_run_service.teamfeed.name
}
}
resource "google_cloudbuild_trigger" "deploy" {
description = "Build and deploy to Cloud Run service teamfeed on push to \"^master$\""
substitutions = {
"_DEPLOY_REGION" = var.region
"_GCR_HOSTNAME" = format("us-west1-docker.pkg.dev/%s/docker", var.project)
"_LABELS" = "gcb-trigger-id=8bd1307e-f700-40e6-b6aa-885538c71038"
"_PLATFORM" = "managed"
"_SERVICE_NAME" = "teamfeed"
"_TRIGGER_ID" = "8bd1307e-f700-40e6-b6aa-885538c71038"
}
tags = [
"gcp-cloud-build-deploy-cloud-run",
"gcp-cloud-build-deploy-cloud-run-managed",
"teamfeed",
]
build {
images = [
"$_GCR_HOSTNAME/$REPO_NAME/$_SERVICE_NAME:$COMMIT_SHA",
"$_GCR_HOSTNAME/$REPO_NAME/$_SERVICE_NAME:latest",
]
tags = [
"gcp-cloud-build-deploy-cloud-run",
"gcp-cloud-build-deploy-cloud-run-managed",
"teamfeed",
]
step {
args = [
"build",
"--no-cache",
"-t",
"$_GCR_HOSTNAME/$REPO_NAME/$_SERVICE_NAME:$COMMIT_SHA",
"-t",
"$_GCR_HOSTNAME/$REPO_NAME/$_SERVICE_NAME:latest",
".",
]
id = "Build"
name = "gcr.io/cloud-builders/docker"
}
step {
args = [
"push",
"--all-tags",
"$_GCR_HOSTNAME/$REPO_NAME/$_SERVICE_NAME",
]
id = "Push"
name = "gcr.io/cloud-builders/docker"
}
step {
args = [
"run",
"services",
"update",
"$_SERVICE_NAME",
"--platform=managed",
"--image=$_GCR_HOSTNAME/$REPO_NAME/$_SERVICE_NAME:latest",
"--labels=managed-by=gcp-cloud-build-deploy-cloud-run,commit-sha=$COMMIT_SHA,gcb-build-id=$BUILD_ID,gcb-trigger-id=$_TRIGGER_ID,$_LABELS",
"--region=$_DEPLOY_REGION",
"--quiet",
]
entrypoint = "gcloud"
id = "Deploy"
name = "gcr.io/google.com/cloudsdktool/cloud-sdk:slim"
}
}
github {
name = "teamfeed"
owner = "your_github_here"
push {
branch = "^master$"
}
}
}
resource "google_compute_address" "cron" {
name = "teamfeed-db-ip"
}
resource "google_compute_instance" "cron" {
name = "teamfeed-cron-1"
machine_type = "e2-small"
network_interface {
network = "default"
access_config {
nat_ip = google_compute_address.cron.address
network_tier = "PREMIUM"
}
}
boot_disk {
initialize_params {
image = "debian-11-bullseye-v20220822"
}
}
service_account {
email = google_service_account.teamfeed_runner.email
# Compute instances have scopes in addition to to IAM roles?
# https://cloud.google.com/compute/docs/access/service-accounts
scopes = [
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/trace.append",
]
}
# TODO: Add a startup script to install the worker so you don't have to start it manually
}
resource "google_artifact_registry_repository" "docker" {
location = var.region
repository_id = "docker"
format = "DOCKER"
}
# TODO: Move terraform state to Google Cloud Storage when there are multiple employees
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment