Skip to content

Instantly share code, notes, and snippets.

@jwalsh
Last active August 21, 2025 18:20
Show Gist options
  • Save jwalsh/eb1839d2415d42a50b9a0a09231d9709 to your computer and use it in GitHub Desktop.
Save jwalsh/eb1839d2415d42a50b9a0a09231d9709 to your computer and use it in GitHub Desktop.

Database Proxy Patterns - Architecture Diagrams

Overview

This document illustrates various patterns for implementing database observability and proxy layers.

Application-Level Patterns

Decorator Pattern

graph TB
    subgraph "Application Layer"
        APP[FastAPI App]
        DEC[["@db_observer<br/>Decorator"]]
        FUNC[Route Handler]
    end
    
    subgraph "Data Layer"
        POOL[Connection Pool]
        DB[(Database)]
    end
    
    subgraph "Observability"
        LOG[Query Logger]
        MET[Metrics Collector]
        TRACE[Trace Storage]
    end
    
    APP --> FUNC
    FUNC --> DEC
    DEC --> POOL
    POOL --> DB
    DEC -.-> LOG
    DEC -.-> MET
    DEC -.-> TRACE
    
    style DEC fill:#f9f,stroke:#333,stroke-width:4px
Loading

Middleware Pattern

graph LR
    subgraph "Request Flow"
        REQ[HTTP Request] --> MW1[Auth Middleware]
        MW1 --> MW2[Logging Middleware]
        MW2 --> MW3[["DB Tracking<br/>Middleware"]]
        MW3 --> ROUTE[Route Handler]
        ROUTE --> DB[(Database)]
    end
    
    subgraph "Response Flow"
        DB --> ROUTE2[Route Handler]
        ROUTE2 --> MW3R[["DB Tracking<br/>Middleware"]]
        MW3R --> MW2R[Logging Middleware]
        MW2R --> MW1R[Auth Middleware]
        MW1R --> RESP[HTTP Response]
    end
    
    subgraph "Metrics Collection"
        MW3 -.-> METRICS[Query Metrics]
        MW3R -.-> METRICS
        METRICS --> DASH[Dashboard]
    end
    
    style MW3 fill:#f96,stroke:#333,stroke-width:4px
    style MW3R fill:#f96,stroke:#333,stroke-width:4px
Loading

Repository Pattern with Instrumentation

classDiagram
    class BaseRepository {
        <<abstract>>
        +find(id)
        +find_all()
        +create(entity)
        +update(entity)
        +delete(id)
    }
    
    class ObservableRepository {
        <<decorator>>
        -repository: BaseRepository
        -observer: QueryObserver
        +find(id)
        +find_all()
        +create(entity)
        +update(entity)
        +delete(id)
        -log_query(operation, params)
        -measure_timing(operation)
    }
    
    class UserRepository {
        +find_by_email(email)
        +find_active_users()
    }
    
    class QueryObserver {
        +before_query(context)
        +after_query(context, result)
        +on_error(context, error)
    }
    
    BaseRepository <|-- UserRepository
    BaseRepository <|-- ObservableRepository
    ObservableRepository o-- QueryObserver
    ObservableRepository o-- BaseRepository
Loading

Infrastructure-Level Patterns

Database Proxy Pattern

graph TB
    subgraph "Application Tier"
        APP1[FastAPI Instance 1]
        APP2[FastAPI Instance 2]
        APP3[FastAPI Instance 3]
    end
    
    subgraph "Proxy Layer"
        PROXY[["Database Proxy<br/>:5433"]]
        subgraph "Proxy Features"
            QP[Query Parser]
            QL[Query Logger]
            QC[Query Cache]
            CB[Circuit Breaker]
        end
    end
    
    subgraph "Database Tier"
        PRIMARY[(Primary DB<br/>:5432)]
        REPLICA1[(Read Replica 1)]
        REPLICA2[(Read Replica 2)]
    end
    
    subgraph "Observability Stack"
        ES[Elasticsearch]
        GRAF[Grafana]
        PROM[Prometheus]
    end
    
    APP1 --> PROXY
    APP2 --> PROXY
    APP3 --> PROXY
    
    PROXY --> QP
    QP --> QL
    QP --> QC
    QP --> CB
    
    CB --> PRIMARY
    CB --> REPLICA1
    CB --> REPLICA2
    
    QL -.-> ES
    PROXY -.-> PROM
    ES -.-> GRAF
    PROM -.-> GRAF
    
    style PROXY fill:#9f9,stroke:#333,stroke-width:4px
Loading

Sidecar Pattern

graph TB
    subgraph "Pod/Container Group"
        subgraph "Application Container"
            APP[FastAPI App<br/>:8000]
            APPDB[DB Client]
        end
        
        subgraph "Sidecar Container"
            SIDE[["DB Proxy Sidecar<br/>:5433"]]
            LOG[Log Shipper]
            METRIC[Metrics Agent]
            TRACE[Trace Collector]
        end
    end
    
    subgraph "External Services"
        DB[(PostgreSQL<br/>:5432)]
        LOGSTORE[Log Storage]
        METRICSTORE[Metric Storage]
        TRACESTORE[Trace Storage]
    end
    
    APP --> APPDB
    APPDB -->|localhost:5433| SIDE
    SIDE --> DB
    
    SIDE --> LOG
    SIDE --> METRIC
    SIDE --> TRACE
    
    LOG -.-> LOGSTORE
    METRIC -.-> METRICSTORE
    TRACE -.-> TRACESTORE
    
    style SIDE fill:#ff9,stroke:#333,stroke-width:4px
Loading

Service Mesh Pattern

graph TB
    subgraph "Service A Pod"
        APPA[FastAPI Service A]
        SIDEA[["Envoy Proxy"]]
    end
    
    subgraph "Service B Pod"
        APPB[FastAPI Service B]
        SIDEB[["Envoy Proxy"]]
    end
    
    subgraph "Database Pod"
        DBSIDE[["Envoy Proxy"]]
        DB[(PostgreSQL)]
    end
    
    subgraph "Control Plane"
        ISTIO[Istio Control Plane]
        CONF[Configuration]
        POLICY[Policies]
        TELEM[Telemetry]
    end
    
    APPA --> SIDEA
    SIDEA --> SIDEB
    SIDEB --> APPB
    
    APPB --> SIDEB
    SIDEB --> DBSIDE
    DBSIDE --> DB
    
    ISTIO --> CONF
    ISTIO --> POLICY
    ISTIO --> TELEM
    
    CONF -.-> SIDEA
    CONF -.-> SIDEB
    CONF -.-> DBSIDE
    
    SIDEA -.-> TELEM
    SIDEB -.-> TELEM
    DBSIDE -.-> TELEM
    
    style SIDEA fill:#9ff,stroke:#333,stroke-width:4px
    style SIDEB fill:#9ff,stroke:#333,stroke-width:4px
    style DBSIDE fill:#9ff,stroke:#333,stroke-width:4px
Loading

Hybrid Patterns

Read/Write Split Proxy

graph TB
    subgraph "Application"
        APP[FastAPI App]
        WRITE[Write Operations]
        READ[Read Operations]
    end
    
    subgraph "Smart Proxy Layer"
        ROUTER[["Query Router<br/>:5433"]]
        PARSER[SQL Parser]
        CLASSIFIER[Query Classifier]
        CACHE[Result Cache]
    end
    
    subgraph "Database Cluster"
        PRIMARY[(Primary<br/>Write Only)]
        REPLICA1[(Replica 1<br/>Read Only)]
        REPLICA2[(Replica 2<br/>Read Only)]
        REPLICA3[(Replica 3<br/>Read Only)]
    end
    
    subgraph "Monitoring"
        LAG[Replication Lag Monitor]
        HEALTH[Health Checker]
    end
    
    APP --> ROUTER
    ROUTER --> PARSER
    PARSER --> CLASSIFIER
    
    CLASSIFIER -->|"INSERT/UPDATE/DELETE"| PRIMARY
    CLASSIFIER -->|"SELECT"| CACHE
    CACHE -->|"Cache Miss"| REPLICA1
    CACHE -->|"Cache Miss"| REPLICA2
    CACHE -->|"Cache Miss"| REPLICA3
    
    PRIMARY -.->|Replication| REPLICA1
    PRIMARY -.->|Replication| REPLICA2
    PRIMARY -.->|Replication| REPLICA3
    
    HEALTH --> PRIMARY
    HEALTH --> REPLICA1
    HEALTH --> REPLICA2
    HEALTH --> REPLICA3
    
    LAG --> REPLICA1
    LAG --> REPLICA2
    LAG --> REPLICA3
    
    style ROUTER fill:#f99,stroke:#333,stroke-width:4px
    style CLASSIFIER fill:#ff9,stroke:#333,stroke-width:4px
Loading

Event Sourcing with Proxy

graph LR
    subgraph "Application Layer"
        APP[FastAPI App]
        CMD[Command Handler]
        QUERY[Query Handler]
    end
    
    subgraph "Proxy Layer"
        CMDPROXY[["Command Proxy"]]
        QUERYPROXY[["Query Proxy"]]
        EVENTLOG[Event Logger]
    end
    
    subgraph "Storage"
        EVENTSTORE[(Event Store)]
        READMODEL[(Read Model)]
        SNAPSHOT[(Snapshots)]
    end
    
    subgraph "Processors"
        PROJ[Projection Builder]
        REPLAY[Event Replayer]
    end
    
    APP --> CMD
    APP --> QUERY
    
    CMD --> CMDPROXY
    CMDPROXY --> EVENTSTORE
    CMDPROXY --> EVENTLOG
    
    QUERY --> QUERYPROXY
    QUERYPROXY --> READMODEL
    
    EVENTSTORE --> PROJ
    PROJ --> READMODEL
    
    EVENTSTORE --> REPLAY
    REPLAY --> SNAPSHOT
    SNAPSHOT --> READMODEL
    
    style CMDPROXY fill:#9f9,stroke:#333,stroke-width:4px
    style QUERYPROXY fill:#99f,stroke:#333,stroke-width:4px
Loading

Chain of Responsibility Pattern

graph TB
    subgraph "Request Pipeline"
        REQ[Database Request]
        
        H1[["Auth Handler"]]
        H2[["Rate Limiter"]]
        H3[["Query Validator"]]
        H4[["Cache Handler"]]
        H5[["Logger Handler"]]
        H6[["Executor Handler"]]
        
        REQ --> H1
        H1 -->|pass| H2
        H2 -->|pass| H3
        H3 -->|pass| H4
        H4 -->|miss| H5
        H5 -->|pass| H6
        H6 --> DB[(Database)]
        
        H1 -->|reject| ERR1[401 Unauthorized]
        H2 -->|reject| ERR2[429 Too Many Requests]
        H3 -->|reject| ERR3[400 Bad Query]
        H4 -->|hit| RESP[Cached Response]
    end
    
    subgraph "Response Pipeline"
        DB --> R6[["Executor Handler"]]
        R6 --> R5[["Logger Handler"]]
        R5 --> R4[["Cache Handler"]]
        R4 --> R3[["Transformer"]]
        R3 --> R2[["Metrics Handler"]]
        R2 --> R1[["Response Builder"]]
        R1 --> RESULT[Final Response]
    end
    
    style H1 fill:#f99,stroke:#333,stroke-width:2px
    style H2 fill:#f99,stroke:#333,stroke-width:2px
    style H3 fill:#f99,stroke:#333,stroke-width:2px
    style H4 fill:#9f9,stroke:#333,stroke-width:2px
    style H5 fill:#99f,stroke:#333,stroke-width:2px
    style H6 fill:#fff,stroke:#333,stroke-width:2px
Loading

Observability Patterns

Distributed Tracing with Proxy

graph TB
    subgraph "Service A"
        SA[FastAPI Service A]
        SPA[["Span Creator"]]
    end
    
    subgraph "Service B"
        SB[FastAPI Service B]
        SPB[["Span Creator"]]
    end
    
    subgraph "Database Proxy"
        PROXY[["Observability Proxy"]]
        SPDB[["DB Span Creator"]]
        QUERY[Query Analyzer]
        TIMING[Timing Collector]
    end
    
    subgraph "Database"
        DB[(PostgreSQL)]
        EXPLAIN[Query Planner]
    end
    
    subgraph "Tracing Infrastructure"
        JAEGER[Jaeger Collector]
        UI[Jaeger UI]
    end
    
    SA --> SPA
    SPA -->|"TraceID: 123"| SB
    SB --> SPB
    SPB -->|"TraceID: 123"| PROXY
    PROXY --> SPDB
    SPDB --> QUERY
    QUERY --> TIMING
    TIMING --> DB
    DB --> EXPLAIN
    
    SPA -.->|"Span: service-a"| JAEGER
    SPB -.->|"Span: service-b"| JAEGER
    SPDB -.->|"Span: db-query"| JAEGER
    TIMING -.->|"Timing Metrics"| JAEGER
    EXPLAIN -.->|"Query Plan"| JAEGER
    
    JAEGER --> UI
    
    style PROXY fill:#f9f,stroke:#333,stroke-width:4px
    style SPDB fill:#ff9,stroke:#333,stroke-width:2px
Loading

Query Performance Monitoring

graph TB
    subgraph "Application"
        APP[FastAPI App]
    end
    
    subgraph "Monitoring Proxy"
        PROXY[["Performance Proxy"]]
        subgraph "Analyzers"
            QA[Query Analyzer]
            PA[Plan Analyzer]
            IA[Index Advisor]
            SA[Slow Query Logger]
        end
    end
    
    subgraph "Database"
        DB[(PostgreSQL)]
        STATS[pg_stat_statements]
        PLANS[Query Plans]
    end
    
    subgraph "Alerting"
        ALERT[Alert Manager]
        SLACK[Slack]
        PAGE[PagerDuty]
        DASH[Dashboard]
    end
    
    APP --> PROXY
    PROXY --> QA
    QA --> PA
    PA --> IA
    QA --> SA
    
    PA --> DB
    DB --> PLANS
    DB --> STATS
    
    SA -->|"Query > 1s"| ALERT
    IA -->|"Missing Index"| ALERT
    PA -->|"Full Scan"| ALERT
    
    ALERT --> SLACK
    ALERT --> PAGE
    ALERT --> DASH
    
    STATS -.-> DASH
    
    style PROXY fill:#9ff,stroke:#333,stroke-width:4px
Loading

Deployment Patterns

Multi-Environment Configuration

graph TB
    subgraph "Development"
        APPDEV[FastAPI Dev]
        PROXYDEV[["Simple Proxy<br/>Verbose Logging"]]
        DBDEV[(Local PostgreSQL)]
    end
    
    subgraph "Staging"
        APPSTG[FastAPI Staging]
        PROXYSTG[["Observability Proxy<br/>Sampling: 10%"]]
        DBSTG[(Staging PostgreSQL)]
        REPLAY[Query Replayer]
    end
    
    subgraph "Production"
        APPPROD1[FastAPI Prod 1]
        APPPROD2[FastAPI Prod 2]
        APPPROD3[FastAPI Prod 3]
        LB[Load Balancer]
        PROXYPROD[["HA Proxy Cluster<br/>Sampling: 1%"]]
        DBPROD[(Production PostgreSQL)]
    end
    
    subgraph "Observability Platform"
        COLLECT[Collector]
        STORE[Time Series DB]
        ANALYZE[Analytics Engine]
        ML[ML Anomaly Detection]
    end
    
    APPDEV --> PROXYDEV --> DBDEV
    APPSTG --> PROXYSTG --> DBSTG
    
    LB --> APPPROD1
    LB --> APPPROD2
    LB --> APPPROD3
    
    APPPROD1 --> PROXYPROD
    APPPROD2 --> PROXYPROD
    APPPROD3 --> PROXYPROD
    PROXYPROD --> DBPROD
    
    PROXYDEV -.-> COLLECT
    PROXYSTG -.-> COLLECT
    PROXYPROD -.-> COLLECT
    
    COLLECT --> STORE
    STORE --> ANALYZE
    ANALYZE --> ML
    
    REPLAY -.->|"Replay Prod Queries"| PROXYSTG
    
    style PROXYDEV fill:#9f9,stroke:#333,stroke-width:2px
    style PROXYSTG fill:#ff9,stroke:#333,stroke-width:2px
    style PROXYPROD fill:#f99,stroke:#333,stroke-width:4px
Loading
version: '3.8'
services:
postgres:
image: postgres:16-alpine
environment:
POSTGRES_PASSWORD: postgres
POSTGRES_DB: myapp
POSTGRES_INITDB_ARGS: "-c wal_level=logical"
command: |
postgres
-c wal_level=logical
-c max_replication_slots=4
-c max_wal_senders=4
-c logging_collector=on
-c log_statement=all
-c log_directory='/var/log/postgresql'
-c shared_preload_libraries='pg_stat_statements'
ports:
- "5432:5432"
volumes:
- ./postgres-data:/var/lib/postgresql/data
- ./postgres-logs:/var/log/postgresql
pgadmin:
image: dpage/pgadmin4:latest
environment:
PGADMIN_DEFAULT_EMAIL: [email protected]
PGADMIN_DEFAULT_PASSWORD: admin
ports:
- "5050:80"
depends_on:
- postgres
# Optional: Debezium for CDC (requires Kafka)
# debezium:
# image: debezium/connect:2.4
# environment:
# BOOTSTRAP_SERVERS: kafka:9092
# CONFIG_STORAGE_TOPIC: connect_configs
# OFFSET_STORAGE_TOPIC: connect_offsets
# ports:
# - "8083:8083"
# depends_on:
# - postgres

Docker-Only Database Proxy Setup

Overview

This guide demonstrates how to set up a complete database proxy architecture using only Docker containers - no local service installations required! Everything runs in containers with docker-compose.

Requirements

# Install Docker Desktop for macOS
brew install --cask docker

# Start Docker Desktop
open -a Docker

# Verify installation
docker --version
docker-compose --version

Quick Start

Create Project Directory

# Create project structure
mkdir -p docker-proxy-setup/{configs,scripts,data}
cd docker-proxy-setup

echo "βœ… Project directory created"
echo "πŸ“ Run 'org-babel-tangle' in Emacs to generate all files"
echo "πŸš€ Then run: docker-compose up -d"

Option 1: Traefik Universal Proxy

Modern reverse proxy with automatic service discovery and observability built-in.

Docker Compose Configuration

version: '3.8'

networks:
  proxy-net:
    driver: bridge

services:
  # Traefik - Modern reverse proxy with auto-discovery
  traefik:
    image: traefik:v3.0
    container_name: traefik
    command:
      # API and Dashboard
      - "--api.insecure=true"
      - "--api.dashboard=true"
      # Docker provider
      - "--providers.docker=true"
      - "--providers.docker.exposedbydefault=false"
      - "--providers.docker.network=proxy-net"
      # Entrypoints
      - "--entrypoints.web.address=:80"
      - "--entrypoints.postgres.address=:5432"
      - "--entrypoints.mysql.address=:3306"
      - "--entrypoints.metrics.address=:8080"
      # Logging
      - "--log.level=INFO"
      - "--accesslog=true"
      # Metrics
      - "--metrics.prometheus=true"
      - "--metrics.prometheus.buckets=0.1,0.3,1.2,5.0"
    ports:
      - "80:80"        # HTTP
      - "8080:8080"    # Traefik Dashboard
      - "5432:5432"    # PostgreSQL
      - "3306:3306"    # MySQL
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - ./configs/traefik:/etc/traefik
    networks:
      - proxy-net

  # PostgreSQL with automatic Traefik routing
  postgres:
    image: postgres:16-alpine
    container_name: postgres-main
    environment:
      POSTGRES_USER: postgres
      POSTGRES_PASSWORD: postgres
      POSTGRES_DB: myapp
      POSTGRES_INITDB_ARGS: "-c shared_preload_libraries=pg_stat_statements"
    volumes:
      - postgres_data:/var/lib/postgresql/data
    labels:
      - "traefik.enable=true"
      # TCP Router for PostgreSQL
      - "traefik.tcp.routers.postgres.rule=HostSNI(`*`)"
      - "traefik.tcp.routers.postgres.entrypoints=postgres"
      - "traefik.tcp.routers.postgres.service=postgres"
      - "traefik.tcp.services.postgres.loadbalancer.server.port=5432"
    networks:
      - proxy-net

  # MySQL with automatic Traefik routing
  mysql:
    image: mysql:8
    container_name: mysql-main
    environment:
      MYSQL_ROOT_PASSWORD: mysql
      MYSQL_DATABASE: myapp
      MYSQL_USER: myapp
      MYSQL_PASSWORD: myapp
    volumes:
      - mysql_data:/var/lib/mysql
    labels:
      - "traefik.enable=true"
      # TCP Router for MySQL
      - "traefik.tcp.routers.mysql.rule=HostSNI(`*`)"
      - "traefik.tcp.routers.mysql.entrypoints=mysql"
      - "traefik.tcp.routers.mysql.service=mysql"
      - "traefik.tcp.services.mysql.loadbalancer.server.port=3306"
    networks:
      - proxy-net

  # PgBouncer connection pooler
  pgbouncer:
    image: pgbouncer/pgbouncer:latest
    container_name: pgbouncer
    environment:
      DATABASES_HOST: postgres
      DATABASES_PORT: 5432
      DATABASES_USER: postgres
      DATABASES_PASSWORD: postgres
      DATABASES_DBNAME: myapp
      POOL_MODE: transaction
      MAX_CLIENT_CONN: 1000
      DEFAULT_POOL_SIZE: 25
    ports:
      - "6432:6432"
    networks:
      - proxy-net
    depends_on:
      - postgres

  # Prometheus for metrics
  prometheus:
    image: prom/prometheus:latest
    container_name: prometheus
    command:
      - '--config.file=/etc/prometheus/prometheus.yml'
      - '--storage.tsdb.path=/prometheus'
      - '--web.console.libraries=/usr/share/prometheus/console_libraries'
      - '--web.console.templates=/usr/share/prometheus/consoles'
    ports:
      - "9090:9090"
    volumes:
      - ./configs/prometheus.yml:/etc/prometheus/prometheus.yml:ro
      - prometheus_data:/prometheus
    networks:
      - proxy-net

  # Grafana for visualization
  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    environment:
      - GF_SECURITY_ADMIN_USER=admin
      - GF_SECURITY_ADMIN_PASSWORD=admin
      - GF_USERS_ALLOW_SIGN_UP=false
    ports:
      - "3000:3000"
    volumes:
      - grafana_data:/var/lib/grafana
      - ./configs/grafana/provisioning:/etc/grafana/provisioning
    networks:
      - proxy-net
    depends_on:
      - prometheus

  # Jaeger for distributed tracing
  jaeger:
    image: jaegertracing/all-in-one:latest
    container_name: jaeger
    environment:
      - COLLECTOR_OTLP_ENABLED=true
      - SPAN_STORAGE_TYPE=memory
    ports:
      - "5775:5775/udp"   # Zipkin/thrift
      - "6831:6831/udp"   # Jaeger thrift
      - "6832:6832/udp"   # Jaeger thrift
      - "5778:5778"       # Serve configs
      - "16686:16686"     # Jaeger UI
      - "14268:14268"     # Jaeger collector
      - "14250:14250"     # gRPC
      - "9411:9411"       # Zipkin
      - "4317:4317"       # OTLP gRPC
      - "4318:4318"       # OTLP HTTP
    networks:
      - proxy-net

volumes:
  postgres_data:
  mysql_data:
  prometheus_data:
  grafana_data:

Prometheus Configuration

global:
  scrape_interval: 15s
  evaluation_interval: 15s

scrape_configs:
  # Traefik metrics
  - job_name: 'traefik'
    static_configs:
      - targets: ['traefik:8080']

  # PostgreSQL exporter (if added)
  - job_name: 'postgres'
    static_configs:
      - targets: ['postgres-exporter:9187']

  # MySQL exporter (if added)
  - job_name: 'mysql'
    static_configs:
      - targets: ['mysql-exporter:9104']

  # PgBouncer metrics
  - job_name: 'pgbouncer'
    static_configs:
      - targets: ['pgbouncer-exporter:9127']

Option 2: Simple Port Mapping

Everything on different ports - straightforward and easy to understand.

Docker Compose Configuration

version: '3.8'

services:
  # Database Layer
  postgres-primary:
    image: postgres:16-alpine
    container_name: postgres-primary
    ports:
      - "5432:5432"
    environment:
      POSTGRES_USER: postgres
      POSTGRES_PASSWORD: postgres
      POSTGRES_DB: myapp
    volumes:
      - postgres_primary_data:/var/lib/postgresql/data

  postgres-replica:
    image: postgres:16-alpine
    container_name: postgres-replica
    ports:
      - "5433:5432"
    environment:
      POSTGRES_USER: postgres
      POSTGRES_PASSWORD: postgres
      POSTGRES_DB: myapp
    volumes:
      - postgres_replica_data:/var/lib/postgresql/data

  mysql-primary:
    image: mysql:8
    container_name: mysql-primary
    ports:
      - "3306:3306"
    environment:
      MYSQL_ROOT_PASSWORD: mysql
      MYSQL_DATABASE: myapp
    volumes:
      - mysql_primary_data:/var/lib/mysql

  mysql-replica:
    image: mysql:8
    container_name: mysql-replica
    ports:
      - "3307:3306"
    environment:
      MYSQL_ROOT_PASSWORD: mysql
      MYSQL_DATABASE: myapp
    volumes:
      - mysql_replica_data:/var/lib/mysql

  # Proxy Layer
  pgbouncer:
    image: pgbouncer/pgbouncer:latest
    container_name: pgbouncer
    ports:
      - "6432:6432"
    environment:
      DATABASES_HOST: postgres-primary
      DATABASES_PORT: 5432
      DATABASES_USER: postgres
      DATABASES_PASSWORD: postgres
      DATABASES_DBNAME: myapp

  haproxy:
    image: haproxy:alpine
    container_name: haproxy
    ports:
      - "5434:5434"  # PostgreSQL Load Balanced
      - "3308:3308"  # MySQL Load Balanced
      - "8404:8404"  # Stats Page
    volumes:
      - ./configs/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
    depends_on:
      - postgres-primary
      - postgres-replica
      - mysql-primary
      - mysql-replica

  # Observability Layer
  prometheus:
    image: prom/prometheus:latest
    container_name: prometheus
    ports:
      - "9090:9090"
    volumes:
      - ./configs/prometheus.yml:/etc/prometheus/prometheus.yml:ro
      - prometheus_data:/prometheus

  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    ports:
      - "3000:3000"
    environment:
      - GF_SECURITY_ADMIN_PASSWORD=admin
    volumes:
      - grafana_data:/var/lib/grafana

  jaeger:
    image: jaegertracing/all-in-one:latest
    container_name: jaeger
    ports:
      - "16686:16686"  # Jaeger UI
      - "14268:14268"  # HTTP collector
      - "4317:4317"    # OTLP gRPC
      - "4318:4318"    # OTLP HTTP

volumes:
  postgres_primary_data:
  postgres_replica_data:
  mysql_primary_data:
  mysql_replica_data:
  prometheus_data:
  grafana_data:

HAProxy Configuration

global
    maxconn 4096
    log stdout local0

defaults
    mode tcp
    timeout connect 5s
    timeout client 30s
    timeout server 30s
    option tcplog

# PostgreSQL Load Balancing
listen postgres
    bind *:5434
    balance roundrobin
    option pgsql-check user postgres
    server pg_primary postgres-primary:5432 check
    server pg_replica postgres-replica:5432 check backup

# MySQL Load Balancing  
listen mysql
    bind *:3308
    balance roundrobin
    option mysql-check user root
    server mysql_primary mysql-primary:3306 check
    server mysql_replica mysql-replica:3306 check backup

# Statistics
stats enable
stats uri /stats
stats refresh 10s
stats bind *:8404

Option 3: Minimal Setup with Internal Networking

Only expose essential ports - everything else on internal network.

Docker Compose Configuration

version: '3.8'

networks:
  internal:
    driver: bridge

services:
  # Single entry point - Nginx proxy
  nginx-proxy:
    image: nginx:alpine
    container_name: nginx-proxy
    ports:
      - "80:80"       # Web UI access
      - "5432:5432"   # PostgreSQL access
      - "3306:3306"   # MySQL access
    volumes:
      - ./configs/nginx.conf:/etc/nginx/nginx.conf:ro
    networks:
      - internal
    depends_on:
      - postgres
      - mysql
      - grafana

  # All databases on internal network only
  postgres:
    image: postgres:16-alpine
    container_name: postgres
    environment:
      POSTGRES_PASSWORD: postgres
      POSTGRES_DB: myapp
    volumes:
      - postgres_data:/var/lib/postgresql/data
    networks:
      - internal
    # No ports exposed to host!

  mysql:
    image: mysql:8
    container_name: mysql
    environment:
      MYSQL_ROOT_PASSWORD: mysql
      MYSQL_DATABASE: myapp
    volumes:
      - mysql_data:/var/lib/mysql
    networks:
      - internal
    # No ports exposed to host!

  # Observability stack - internal only
  prometheus:
    image: prom/prometheus:latest
    container_name: prometheus
    volumes:
      - ./configs/prometheus-minimal.yml:/etc/prometheus/prometheus.yml:ro
    networks:
      - internal

  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    environment:
      - GF_SECURITY_ADMIN_PASSWORD=admin
      - GF_SERVER_ROOT_URL=http://localhost/grafana
      - GF_SERVER_SERVE_FROM_SUB_PATH=true
    networks:
      - internal

  jaeger:
    image: jaegertracing/all-in-one:latest
    container_name: jaeger
    environment:
      - QUERY_BASE_PATH=/jaeger
    networks:
      - internal

volumes:
  postgres_data:
  mysql_data:

Nginx Configuration

events {
    worker_connections 1024;
}

stream {
    # PostgreSQL proxy
    upstream postgres_backend {
        server postgres:5432;
    }
    
    server {
        listen 5432;
        proxy_pass postgres_backend;
        proxy_connect_timeout 1s;
    }
    
    # MySQL proxy
    upstream mysql_backend {
        server mysql:3306;
    }
    
    server {
        listen 3306;
        proxy_pass mysql_backend;
        proxy_connect_timeout 1s;
    }
}

http {
    # Grafana proxy
    server {
        listen 80;
        
        location /grafana/ {
            proxy_pass http://grafana:3000/;
            proxy_set_header Host $http_host;
        }
        
        location /prometheus/ {
            proxy_pass http://prometheus:9090/;
            proxy_set_header Host $http_host;
        }
        
        location /jaeger/ {
            proxy_pass http://jaeger:16686/;
            proxy_set_header Host $http_host;
        }
        
        location / {
            return 301 /grafana/;
        }
    }
}

Management Scripts

Start Services

# Start all services

echo "πŸš€ Starting Database Proxy Stack..."

# Check which compose file to use
if [ -f "docker-compose.yml" ]; then
    COMPOSE_FILE="docker-compose.yml"
elif [ -f "docker-compose-traefik.yml" ]; then
    COMPOSE_FILE="docker-compose-traefik.yml"
else
    echo "❌ No docker-compose file found!"
    exit 1
fi

echo "πŸ“„ Using: $COMPOSE_FILE"

# Start services
docker-compose -f $COMPOSE_FILE up -d

# Wait for services
echo "⏳ Waiting for services to start..."
sleep 10

# Show status
docker-compose -f $COMPOSE_FILE ps

echo "βœ… Stack is running!"
echo ""
echo "πŸ“Š Access points:"
echo "  - Databases:"
echo "    - PostgreSQL: localhost:5432 (user: postgres, pass: postgres)"
echo "    - MySQL: localhost:3306 (user: root, pass: mysql)"
echo "    - PgBouncer: localhost:6432"
echo ""
echo "  - Dashboards:"
echo "    - Grafana: http://localhost:3000 (admin/admin)"
echo "    - Prometheus: http://localhost:9090"
echo "    - Jaeger: http://localhost:16686"
echo "    - Traefik: http://localhost:8080 (if using Traefik)"

Stop Services

# Stop all services

echo "πŸ›‘ Stopping Database Proxy Stack..."

# Find compose file
if [ -f "docker-compose.yml" ]; then
    COMPOSE_FILE="docker-compose.yml"
elif [ -f "docker-compose-traefik.yml" ]; then
    COMPOSE_FILE="docker-compose-traefik.yml"
else
    COMPOSE_FILE="docker-compose-*.yml"
fi

# Stop services
docker-compose -f $COMPOSE_FILE down

echo "βœ… Stack stopped"

Test Connections

# Test database connections

echo "πŸ” Testing Database Connections..."
echo "================================"

# Test PostgreSQL
echo "πŸ“˜ Testing PostgreSQL..."
docker run --rm --network docker-proxy-setup_default postgres:16-alpine \
    psql -h postgres-primary -U postgres -d myapp -c "SELECT version();" \
    || echo "❌ PostgreSQL connection failed"

# Test MySQL
echo "πŸ“™ Testing MySQL..."
docker run --rm --network docker-proxy-setup_default mysql:8 \
    mysql -h mysql-primary -u root -pmysql -e "SELECT VERSION();" \
    || echo "❌ MySQL connection failed"

# Test PgBouncer
echo "πŸ“— Testing PgBouncer..."
docker run --rm --network host postgres:16-alpine \
    psql -h localhost -p 6432 -U postgres -d myapp -c "SELECT 1;" \
    || echo "❌ PgBouncer connection failed"

echo ""
echo "βœ… Connection tests complete"

Generate Load

# Generate test load on databases

echo "πŸ”„ Generating test load..."

# PostgreSQL load
echo "πŸ“˜ PostgreSQL load test..."
for i in {1..10}; do
    docker run --rm -d --network docker-proxy-setup_default postgres:16-alpine \
        psql -h postgres-primary -U postgres -d myapp \
        -c "SELECT pg_sleep(random() * 2); SELECT COUNT(*) FROM pg_stat_activity;" &
done

# MySQL load
echo "πŸ“™ MySQL load test..."
for i in {1..10}; do
    docker run --rm -d --network docker-proxy-setup_default mysql:8 \
        mysql -h mysql-primary -u root -pmysql myapp \
        -e "SELECT SLEEP(RAND() * 2); SHOW PROCESSLIST;" &
done

echo "βœ… Load generation started (20 concurrent connections)"
echo "View metrics in Grafana: http://localhost:3000"

View Logs

# View logs from all services

if [ -z "$1" ]; then
    echo "πŸ“‹ Usage: ./logs.sh [service-name|all]"
    echo "Available services:"
    docker-compose ps --services
    exit 1
fi

if [ "$1" = "all" ]; then
    docker-compose logs -f --tail=100
else
    docker-compose logs -f --tail=100 $1
fi

Quick Start Guide

1. Tangle this file in Emacs

;; In Emacs, run:
(org-babel-tangle)
;; Or press C-c C-v t

2. Choose your setup option

echo "🎯 Docker Database Proxy Quick Start"
echo "==================================="
echo ""
echo "Choose your setup:"
echo "1) Traefik Universal Proxy (Recommended)"
echo "2) Simple Port Mapping"
echo "3) Minimal Internal Network"
echo ""
read -p "Enter choice (1-3): " choice

case $choice in
    1)
        cp docker-compose-traefik.yml docker-compose.yml
        echo "βœ… Using Traefik setup"
        ;;
    2)
        cp docker-compose-ports.yml docker-compose.yml
        echo "βœ… Using port mapping setup"
        ;;
    3)
        cp docker-compose-minimal.yml docker-compose.yml
        echo "βœ… Using minimal setup"
        ;;
    *)
        echo "❌ Invalid choice"
        exit 1
        ;;
esac

echo ""
echo "Starting services..."
./scripts/start.sh

3. Access your services

ServiceURLCredentials
PostgreSQLlocalhost:5432postgres/postgres
MySQLlocalhost:3306root/mysql
PgBouncerlocalhost:6432postgres/postgres
Grafanahttp://localhost:3000admin/admin
Prometheushttp://localhost:9090-
Jaegerhttp://localhost:16686-
Traefikhttp://localhost:8080-

Integration Examples

Python Connection Example

import psycopg2
import mysql.connector
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor

# Setup OpenTelemetry
trace.set_tracer_provider(TracerProvider())
tracer = trace.get_tracer(__name__)

# Configure OTLP exporter to Jaeger
otlp_exporter = OTLPSpanExporter(
    endpoint="localhost:4317",
    insecure=True
)
span_processor = BatchSpanProcessor(otlp_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)

# PostgreSQL through proxy
with tracer.start_as_current_span("postgres-query"):
    conn = psycopg2.connect(
        host="localhost",
        port=5432,
        database="myapp",
        user="postgres",
        password="postgres"
    )
    cur = conn.cursor()
    cur.execute("SELECT version()")
    print(f"PostgreSQL: {cur.fetchone()[0]}")
    conn.close()

# MySQL through proxy
with tracer.start_as_current_span("mysql-query"):
    conn = mysql.connector.connect(
        host="localhost",
        port=3306,
        database="myapp",
        user="root",
        password="mysql"
    )
    cur = conn.cursor()
    cur.execute("SELECT VERSION()")
    print(f"MySQL: {cur.fetchone()[0]}")
    conn.close()

Node.js Connection Example

const { Client } = require('pg');
const mysql = require('mysql2/promise');
const { trace } = require('@opentelemetry/api');
const { NodeTracerProvider } = require('@opentelemetry/sdk-trace-node');
const { OTLPTraceExporter } = require('@opentelemetry/exporter-trace-otlp-grpc');
const { BatchSpanProcessor } = require('@opentelemetry/sdk-trace-base');

// Setup OpenTelemetry
const provider = new NodeTracerProvider();
const exporter = new OTLPTraceExporter({
  url: 'localhost:4317',
});
provider.addSpanProcessor(new BatchSpanProcessor(exporter));
provider.register();

const tracer = trace.getTracer('db-proxy-example');

async function testConnections() {
  // PostgreSQL test
  await tracer.startActiveSpan('postgres-query', async (span) => {
    const pgClient = new Client({
      host: 'localhost',
      port: 5432,
      database: 'myapp',
      user: 'postgres',
      password: 'postgres',
    });
    
    await pgClient.connect();
    const res = await pgClient.query('SELECT version()');
    console.log('PostgreSQL:', res.rows[0].version);
    await pgClient.end();
    span.end();
  });

  // MySQL test
  await tracer.startActiveSpan('mysql-query', async (span) => {
    const connection = await mysql.createConnection({
      host: 'localhost',
      port: 3306,
      database: 'myapp',
      user: 'root',
      password: 'mysql',
    });
    
    const [rows] = await connection.execute('SELECT VERSION() as version');
    console.log('MySQL:', rows[0].version);
    await connection.end();
    span.end();
  });
}

testConnections().catch(console.error);

Troubleshooting

Common Issues

echo "πŸ”§ Troubleshooting Database Proxy Stack"
echo "======================================"

# Check Docker
echo "🐳 Docker Status:"
docker version > /dev/null 2>&1 || echo "❌ Docker not running!"

# Check containers
echo ""
echo "πŸ“¦ Container Status:"
docker-compose ps

# Check networks
echo ""
echo "🌐 Networks:"
docker network ls | grep proxy

# Check logs for errors
echo ""
echo "πŸ“‹ Recent Errors:"
docker-compose logs --tail=20 | grep -i error || echo "βœ… No recent errors"

# Test connectivity
echo ""
echo "πŸ”Œ Port Availability:"
for port in 5432 3306 6432 3000 9090 16686 8080; do
    nc -zv localhost $port 2>&1 | grep -q succeeded && \
        echo "βœ… Port $port is accessible" || \
        echo "❌ Port $port is not accessible"
done

echo ""
echo "πŸ’‘ Common fixes:"
echo "  - Restart Docker Desktop"
echo "  - Run: docker-compose down -v && docker-compose up -d"
echo "  - Check for port conflicts: lsof -i :5432"

Reset Everything

echo "πŸ—‘οΈ  Resetting Database Proxy Stack..."
echo "WARNING: This will delete all data!"
echo ""
read -p "Are you sure? (y/N): " confirm

if [ "$confirm" = "y" ]; then
    docker-compose down -v
    docker system prune -f
    echo "βœ… Reset complete"
else
    echo "❌ Reset cancelled"
fi

Conclusion

You now have a complete database proxy setup running entirely in Docker containers! No local service installations required.

Key benefits:

  • πŸš€ Quick setup with docker-compose
  • 🎯 Multiple architecture options
  • πŸ“Š Full observability stack included
  • 🧹 Easy cleanup with Docker
  • πŸ”„ Portable across platforms

Happy proxying! πŸŽ‰

Comments are disabled for this gist.