Skip to content

Instantly share code, notes, and snippets.

@Sonictherocketman
Created September 11, 2020 22:15
Show Gist options
  • Save Sonictherocketman/989415b74713cc06259b2db861aa32f2 to your computer and use it in GitHub Desktop.
Save Sonictherocketman/989415b74713cc06259b2db861aa32f2 to your computer and use it in GitHub Desktop.
A sample set of configurations for running Django/Gunicorn apps in Docker. This config even supports zero-downtime deploys! Some implementation required.
#! /bin/bash
set -e;
MODE="api"
log() {
echo "[$(date)] $@"
}
load_secrets() {
if [ ! -f ./secrets.sh ]; then
log "No secrets found!"
else
log "Sourcing secrets..."
source ./secrets.sh;
fi
}
update_codebase() {
log "Updating codebase..."
git pull
}
stop_scheduler() {
log "Stopping scheduled tasks..."
docker-compose stop beat
}
shutdown_queues() {
log "Shutting down queues..."
docker-compose run --rm api ./manage.py shutdown_queues
log "Waiting for queues to become idle (15 sec)..."
sleep 15;
}
do_migrate() {
log "Migrating database..."
docker-compose run --rm api ./manage.py migrate
}
deploy_worker() {
docker-compose up -d --build worker
}
deploy_local_services() {
log "Building new images..."
docker-compose build beat monitor
log "Deploying new containers..."
docker-compose up -d beat monitor
}
deploy_www() {
log "Building new images..."
docker-compose build api
log "Starting backup API version (zero-downtime)"
docker-compose up -d api_backup
sleep 5;
log "Deploying new containers..."
docker-compose up -d api
sleep 5;
do_migrate
log "Removing old version (zero-downtime)..."
docker-compose stop api_backup
log "Rebuilding the backup API for later use (zero-downtime)..."
docker-compose build api_backup
}
# Commands
do_up() {
load_secrets
if [ "$MODE" == "worker" ]; then
deploy_worker
elif [ "$MODE" == "api-only" ]; then
deploy_www
else
deploy_www
deploy_local_services
deploy_worker
fi
}
do_down() {
load_secrets
docker-compose down
}
do_upgrade() {
load_secrets
update_codebase
if [ "$MODE" == "worker" ]; then
shutdown_queues
deploy_worker
elif [ "$MODE" == "api-only" ]; then
deploy_www
else
stop_scheduler
shutdown_queues
deploy_worker
deploy_www
deploy_local_services
fi
}
# Begin Arg Parsing
if [ -n "$2" ]; then
case $2 in
-w|--worker)
MODE="worker"
;;
-a|--api)
MODE="api"
;;
--api-only)
MODE="api-only"
;;
*)
log "Error: Unknown Mode: $2"
exit -1;
;;
esac
fi
case $1 in
up)
do_up
;;
down)
do_down
;;
upgrade)
do_upgrade
;;
*)
log "Error: Unknown Command"
exit -1;
;;
esac
version: "3"
services:
api:
build:
context: ./api
restart: always
depends_on:
- memcached
- db
environment:
- RUN_MODE=api
- MEMCACHED_URL=memcached:11211
- ALLOWED_HOSTS
- SECRET_KEY
- DATABASE_URL
- BROKER_URL
- EMAIL_HOST
- EMAIL_PORT
- DEFAULT_FROM_EMAIL
- EMAIL_HOST_USER
- EMAIL_HOST_PASSWORD
- STATIC_ROOT
volumes:
- /opt/volumes/staticfiles/:/opt/staticfiles
ports:
- 127.0.0.1:8000:8000
api_backup:
build:
context: ./api
restart: always
depends_on:
- memcached
- db
environment:
- RUN_MODE=api
- MEMCACHED_URL=memcached:11211
- ALLOWED_HOSTS
- SECRET_KEY
- DATABASE_URL
- BROKER_URL
- EMAIL_HOST
- EMAIL_PORT
- DEFAULT_FROM_EMAIL
- EMAIL_HOST_USER
- EMAIL_HOST_PASSWORD
- STATIC_ROOT
volumes:
- /opt/volumes/staticfiles/:/opt/staticfiles
ports:
- 127.0.0.1:8001:8000
db:
image: postgres:11
restart: always
expose:
- "5432"
environment:
- POSTGRES_DB
- POSTGRES_USER
- POSTGRES_PASSWORD
volumes:
- /opt/volumes/data:/var/lib/postgresql/data
# Auxiliary Services
worker:
build:
context: ./api
restart: always
depends_on:
- db
environment:
- RUN_MODE=worker
- ALLOWED_HOSTS
- SECRET_KEY
- DATABASE_URL
- BROKER_URL
- EMAIL_HOST
- EMAIL_PORT
- DEFAULT_FROM_EMAIL
- EMAIL_HOST_USER
- EMAIL_HOST_PASSWORD
- STATIC_ROOT
beat:
build:
context: ./api
restart: always
depends_on:
- db
volumes:
- /opt/volumes/scheduler/:/opt/scheduler
environment:
- RUN_MODE=beat
- ALLOWED_HOSTS
- SECRET_KEY
- DATABASE_URL
- BROKER_URL
- EMAIL_HOST
- EMAIL_PORT
- DEFAULT_FROM_EMAIL
- EMAIL_HOST_USER
- EMAIL_HOST_PASSWORD
- STATIC_ROOT
memcached:
image: bitnami/memcached:latest
environment:
- MEMCACHED_CACHE_SIZE=128
# Task Brokering
broker:
build:
context: ./broker
restart: always
ports:
- 127.0.0.1:15672:15672
expose:
- 5672
environment:
- BROKER_DEFAULT_USER
- BROKER_DEFAULT_PASS
volumes:
- /opt/volumes/rabbitmq:/var/lib/rabbitmq
#! /bin/sh
# A basic entrypoint script
set -e;
log() {
echo "[$(date)] $1";
}
start_api() {
python manage.py collectstatic --no-input
gunicorn <app name>.wsgi \
-k gevent \
-w 4 \
--worker-connections=2000 \
--backlog=1000 \
-p gunicorn.pid \
--bind=0.0.0.0:8000
}
start_monitor() {
python manage.py monitor_cluster
}
start_beat() {
celery -A <app name> beat -s /opt/scheduler/celerybeat-schedule
}
start_worker() {
celery \
-A <app name> worker \
-l info \
-Q <list all queues> \
-E \
--autoscale=20,3 \
--max-tasks-per-child 100
}
# Begin Main
if [ -z "$RUN_MODE" ]; then
log "No mode provided. Use: api, worker, beat, or monitor";
exit 1;
fi
if [ "$RUN_MODE" = "worker" ]; then
start_worker
elif [ "$RUN_MODE" = "monitor" ]; then
start_monitor
elif [ "$RUN_MODE" = "beat" ]; then
start_beat
elif [ "$RUN_MODE" = "api" ]; then
start_api
else
echo "Invalid RUN MODE $RUN_MODE";
fi
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment