Skip to content

Instantly share code, notes, and snippets.

@alperreha
Created February 17, 2026 11:12
Show Gist options
  • Select an option

  • Save alperreha/8768aa3990815c597acf103abb75ae33 to your computer and use it in GitHub Desktop.

Select an option

Save alperreha/8768aa3990815c597acf103abb75ae33 to your computer and use it in GitHub Desktop.
open-webui .env and docker-compose.yaml template for quick-start.
# https://docs.openwebui.com/getting-started/env-configuration/
# find-best-llm: https://deepeval.com/docs/getting-started-llm-arena
# look https://docs.openwebui.com/getting-started/env-configuration/#thread_pool_size
ENABLE_PERSISTENT_CONFIG=False
WEBUI_URL=http://localhost:3000
ENV="dev|prod"
WEBUI_NAME="Open WebUI"
ENABLE_API_KEYS=True
API_KEYS_ALLOWED_ENDPOINTS="/api/v1/messages,/api/v1/channels,/api/v1/chat/completions"
GLOBAL_LOG_LEVEL=info
# Add banner: https://docs.openwebui.com/getting-started/env-configuration/#webui_banners.
# Volumes-Dir: https://docs.openwebui.com/getting-started/env-configuration/#directories.
# Ollama: https://docs.openwebui.com/getting-started/env-configuration/#ollama.
# OpenAI-compatible: https://docs.openwebui.com/getting-started/env-configuration/#openai.
# MCP: https://docs.openwebui.com/getting-started/env-configuration/#direct-connections-openapimcpo-tool-servers
OPENAI_API_BASE_URL="http://localhost:11434/v1/"
OPENAI_API_KEY="sk-xxxxxx" # set your openai key here
ENABLE_IMAGE_GENERATION=False
# Prompts: https://docs.openwebui.com/getting-started/env-configuration/#default_prompt_suggestions
# prompt-tool-calling: DEFAULT_FOLLOW_UP_GENERATION_PROMPT_TEMPLATE
# code-execution-local-or-remote: https://docs.openwebui.com/getting-started/env-configuration/#code-execution.
# multi-node-same-encryption-key.
WEBUI_SECRET_KEY="t0p-s3cr3t"
OFFLINE_MODE=True
CORS_ALLOW_ORIGIN="*"
# DB
VECTOR_DB="pgvector"
PGVECTOR_DB_URL="postgresql://postgres:postgres@localhost:5432/postgres"
PGVECTOR_CREATE_EXTENSION=False
# RAG: https://docs.openwebui.com/getting-started/env-configuration/#rag-content-extraction-engine
RAG_EMBEDDING_ENGINE=openai
RAG_EMBEDDING_MODEL=text-embedding-3-small
RAG_ALLOWED_FILE_EXTENSIONS=["pdf,docx,txt"]
RAG_FILE_MAX_SIZE=5 # 5 MB
# thread pool bg-jobs: ENABLE_ASYNC_EMBEDDING
# UI Utilities.
ENABLE_RETRIEVAL_QUERY_GENERATION=False
# fetch-proxy: WEB_SEARCH, WEB_FETCH_FILTER_LIST="!internal.company.com,!192.168.1.1".
# SST-local
WHISPER_MODEL=None # disable "base"
# LDAP: https://docs.openwebui.com/getting-started/env-configuration/#ldap
# S3
STORAGE_PROVIDER=s3
S3_ENDPOINT_URL="http://localhost:9000"
S3_REGION_NAME="us-east-1"
S3_BUCKET_NAME="openwebui-uploads"
# AWS_CA_BUNDLE="${DATA_DIR}/certs/minio.ca.crt"
S3_ADDRESSING_STYLE="path"
S3_ACCESS_KEY_ID="minio"
S3_SECRET_ACCESS_KEY="minioadmin"
S3_KEY_PREFIX="uploads/"
# DB-bussiness-logic
DATABASE_TYPE=postgresql
DATABASE_URL="postgresql://postgres:postgres@localhost:5432/postgres"
# Redis for multinode
REDIS_URL="redis://:password@localhost:6379/0"
# Uvicorn worker sayısı 1 olusa iyi olur bu aynı makinede birden fazla process kaldırıyormuş. Bunu helm ve kubernetes yapyıor zaten. Sen bunu 1 tut. Kubenretes pod-scaling ile yönetsin.
UVICORN_WORKERS=1
# Default templates Bu template'ler bakarak ajan yaz:
# DEFAULT_AUTOCOMPLETE_GENERATION_PROMPT_TEMPLATE
# DEFAULT_TAGS_GENERATION_PROMPT_TEMPLATE
# DEFAULT_RAG_TEMPLATE
# DEFAULT_QUERY_GENERATION_PROMPT_TEMPLATE
version: '3.8'
services:
openwebui:
image: ghcr.io/open-webui/open-webui:main-slim
networks:
- aifnnet
ports:
- "3000:8080"
restart: always
volumes:
- ./volumes/openwebui:/app/backend/data
profiles:
- dev
environment:
- ENV=dev
- WEBUI_NAME=OpenWebUI
- WEBUI_URL=http://localhost:3000
- WEBUI_SECRET_KEY=t0p-s3cr3t
- ENABLE_PERSISTENT_CONFIG=True
- GLOBAL_LOG_LEVEL=info
- CORS_ALLOW_ORIGIN=*
- ENABLE_API_KEYS=True
- ENABLE_API_KEY=True
- OFFLINE_MODE=True
- VECTOR_DB=pgvector
- PGVECTOR_DB_URL=postgresql://postgres:postgres@postgresql:5432/postgres
- PGVECTOR_CREATE_EXTENSION=True
- OPENAI_API_BASE_URL=http://host.docker.internal:11434/v1
- OPENAI_API_KEY=ollama
- OLLAMA_BASE_URL=http://host.docker.internal:11434
- RAG_EMBEDDING_ENGINE=openai
- RAG_OPENAI_API_BASE_URL=http://host.docker.internal:11434/v1
- RAG_OPENAI_API_KEY=ollama
- RAG_EMBEDDING_MODEL=mxbai-embed-large:335m
- RAG_ALLOWED_FILE_EXTENSIONS=pdf,docx,txt,md
- RAG_FILE_MAX_SIZE=5
- ENABLE_RETRIEVAL_QUERY_GENERATION=False
- WHISPER_MODEL=None
- ENABLE_IMAGE_GENERATION=False
- DATABASE_TYPE=postgresql
- DATABASE_URL=postgres://postgres:postgres@postgresql:5432/postgres
- REDIS_URL=redis://:myredissecret@redis:6379/0
- UVICORN_WORKERS=1
# S3 Configuration
- STORAGE_PROVIDER=s3
- S3_ENDPOINT_URL=http://minio:9000
- S3_REGION_NAME=us-east-1
- S3_BUCKET_NAME=openwebui-uploads
# - AWS_CA_BUNDLE=${DATA_DIR}/certs/minio.ca.crt
- S3_ADDRESSING_STYLE=path
- S3_ACCESS_KEY_ID=minio
- S3_SECRET_ACCESS_KEY=minioadmin
- S3_KEY_PREFIX=uploads/
extra_hosts:
- "host.docker.internal:host-gateway"
depends_on:
- postgresql
- redis
# postgresql service - `psql -h localhost -U user -d postgres`
postgresql:
# Default Postgres Image - image: postgres:15.2-alpine
image: pgvector/pgvector:pg15-bookworm
networks:
- aifnnet
ports:
- "5432:5432"
volumes:
- ./volumes/postgres:/var/lib/postgresql/data
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=postgres
- PGDATA=/var/lib/postgresql/data/pgdata
restart: always
healthcheck:
test: pg_isready -U "$$POSTGRES_USER" -d "$$POSTGRES_DB"
interval: 10s
timeout: 2s
retries: 10
start_period: 30s
# minio service
minio:
# image: minio/minio:RELEASE.2023-01-31T02-24-19Z
# image: minio/minio:RELEASE.2025-09-07T16-13-09Z
image: minio/minio:RELEASE.2023-10-24T04-42-36Z
networks:
- aifnnet
ports:
- 9090:9000
- 127.0.0.1:9091:9001
environment:
- MINIO_ROOT_USER=minio
- MINIO_ROOT_PASSWORD=minioadmin
restart: always
volumes:
- ./volumes/minio:/data
# create the 'langfuse' bucket before starting the service
# command: server /data --address ":9000" --console-address ":9001"
entrypoint: sh
command: -c 'mkdir -p /data/langfuse /data/openwebui-uploads && minio server --address ":9000" --console-address ":9001" /data'
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 1s
timeout: 5s
retries: 5
start_period: 30s
# pgadmin service
pgadmin:
image: dpage/pgadmin4:9.7.0
networks:
- aifnnet
ports:
- "5050:80"
environment:
- PGADMIN_DEFAULT_EMAIL=user@domain.com
- PGADMIN_DEFAULT_PASSWORD=1020304050
restart: always
profiles:
- dev
redis:
image: docker.io/redis:7
restart: always
networks:
- aifnnet
# CHANGEME: row below to secure redis password
command: >
--requirepass ${REDIS_AUTH:-myredissecret}
ports:
- 6379:6379
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 3s
timeout: 10s
retries: 10
networks:
aifnnet:
name: aifnnet
driver: bridge
ENV="dev"
WEBUI_NAME="Open WebUI"
WEBUI_URL=http://localhost:3000
WEBUI_SECRET_KEY="t0p-s3cr3t"
UVICORN_WORKERS=1
GLOBAL_LOG_LEVEL=info
ENABLE_PERSISTENT_CONFIG=False
CORS_ALLOW_ORIGIN="*"
OFFLINE_MODE=True
ENABLE_API_KEYS=True
# DB-Embedings
VECTOR_DB="pgvector"
PGVECTOR_DB_URL="postgresql://postgres:postgres@localhost:5432/postgres"
PGVECTOR_CREATE_EXTENSION=False
# ollama's OpenAI-Compatible-Endpoint
OPENAI_API_BASE_URL="http://localhost:11434/v1/"
OPENAI_API_KEY="ollama" # set your openai key here
RAG_EMBEDDING_ENGINE=openai
RAG_EMBEDDING_MODEL=mxbai-embed-large:335m
RAG_ALLOWED_FILE_EXTENSIONS=["pdf,docx,txt,md"]
RAG_FILE_MAX_SIZE=32 # 32 MB
ENABLE_RETRIEVAL_QUERY_GENERATION=False
WHISPER_MODEL=None # disable "base"
ENABLE_IMAGE_GENERATION=False
# DB environment values
DATABASE_TYPE=postgresql
DATABASE_URL="postgresql://postgres:postgres@localhost:5432/postgres"
REDIS_URL="redis://:password@localhost:6379/0"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment