Last active
February 11, 2025 14:47
-
-
Save Geczy/fd7175a8a6479edf3637bb084a4d698a to your computer and use it in GitHub Desktop.
supabase
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# This just provides you with a new docker-compose.yaml file. | |
# 1. You then have to edit your Supabase stack's docker compose and paste it in | |
# 2. Then you have to re-deploy to get the new versions installed | |
# TODO: Be able to run this script on your VPS and have it auto-update your compose yaml | |
# Local Docker Compose file | |
LOCAL_FILE="docker-compose.yaml" | |
# Check if sed, yq, and curl are installed | |
if ! command -v sed &>/dev/null; then | |
echo "sed is required but not installed. Aborting." | |
exit 1 | |
fi | |
if ! command -v yq &>/dev/null; then | |
echo "yq is required but not installed. Aborting." | |
exit 1 | |
fi | |
if ! command -v curl &>/dev/null; then | |
echo "curl is required but not installed. Aborting." | |
exit 1 | |
fi | |
# Flag for dry run | |
DRY_RUN=false | |
# Parse command line arguments | |
while getopts "d" opt; do | |
case ${opt} in | |
d) | |
DRY_RUN=true | |
;; | |
\?) | |
echo "Invalid option: -$OPTARG" >&2 | |
exit 1 | |
;; | |
esac | |
done | |
# Check if the local Docker Compose file exists | |
if [ ! -f "$LOCAL_FILE" ]; then | |
echo "File not found: $LOCAL_FILE" | |
echo "Downloading from coollabsio/coolify/main/templates/compose/supabase.yaml" | |
curl -s -L https://raw.githubusercontent.com/coollabsio/coolify/main/templates/compose/supabase.yaml >$LOCAL_FILE | |
fi | |
# Check if the file is not empty and has valid YAML content | |
if [ ! -s "$LOCAL_FILE" ] || ! yq e . $LOCAL_FILE &>/dev/null; then | |
echo "Invalid file: $LOCAL_FILE" | |
exit 1 | |
fi | |
echo "Running on Docker Compose file: $LOCAL_FILE" | |
# Fetch the remote Docker Compose file | |
REMOTE_YML=$(curl -s https://raw.githubusercontent.com/supabase/supabase/master/docker/docker-compose.yml) | |
# Parse the remote Docker Compose file to get the images and their versions | |
images=$(echo "$REMOTE_YML" | yq e '.services.*.image' -) | |
# Initialize summary | |
summary="" | |
echo "LOCAL_FILE: $LOCAL_FILE" | |
# Loop over the images | |
for image in $images; do | |
# Extract the image name and version | |
name=$(echo "$image" | cut -d: -f1) | |
version=$(echo "$image" | cut -d: -f2-) | |
# Get the current version | |
current_version=$(yq e ".services.*.image | select(test(\"^$name:\"))" $LOCAL_FILE | cut -d: -f2-) | |
if [ "$DRY_RUN" = false ]; then | |
# Replace the image version in the local Docker Compose file | |
if ! sed -i "" "s#$name:$current_version#$name:$version#g" $LOCAL_FILE; then | |
echo "Failed to update the image version in the local Docker Compose file. Aborting." | |
exit 1 | |
fi | |
fi | |
# Update the summary | |
if [ "$current_version" = "$version" ]; then | |
summary="${summary}${name}: not updated\n" | |
else | |
summary="${summary}${name}: ${current_version} -> ${version}\n" | |
fi | |
done | |
# Print the summary | |
echo -e "\nSummary:\n$summary" | |
if [ "$DRY_RUN" = true ]; then | |
echo "Dry run completed. No changes were made." | |
else | |
echo "Update completed. The local Docker Compose file has been updated." | |
fi |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# documentation: https://supabase.io | |
# slogan: The open source Firebase alternative. | |
# tags: firebase, alternative, open-source | |
# minversion: 4.0.0-beta.228 | |
# logo: svgs/supabase.svg | |
# port: 8000 | |
services: | |
supabase-kong: | |
image: kong:2.8.1 | |
# https://unix.stackexchange.com/a/294837 | |
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start' | |
depends_on: | |
supabase-analytics: | |
condition: service_healthy | |
environment: | |
- SERVICE_FQDN_SUPABASEKONG | |
- JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
- KONG_DATABASE=off | |
- KONG_DECLARATIVE_CONFIG=/home/kong/kong.yml | |
# https://github.com/supabase/cli/issues/14 | |
- KONG_DNS_ORDER=LAST,A,CNAME | |
- KONG_PLUGINS=request-transformer,cors,key-auth,acl,basic-auth | |
- KONG_NGINX_PROXY_PROXY_BUFFER_SIZE=160k | |
- KONG_NGINX_PROXY_PROXY_BUFFERS=64 160k | |
- SUPABASE_ANON_KEY=${SERVICE_SUPABASEANON_KEY} | |
- SUPABASE_SERVICE_KEY=${SERVICE_SUPABASESERVICE_KEY} | |
- DASHBOARD_USERNAME=${SERVICE_USER_ADMIN} | |
- DASHBOARD_PASSWORD=${SERVICE_PASSWORD_ADMIN} | |
volumes: | |
# https://github.com/supabase/supabase/issues/12661 | |
- type: bind | |
source: ./volumes/api/kong.yml | |
target: /home/kong/temp.yml | |
content: | | |
_format_version: '2.1' | |
_transform: true | |
### | |
### Consumers / Users | |
### | |
consumers: | |
- username: DASHBOARD | |
- username: anon | |
keyauth_credentials: | |
- key: $SUPABASE_ANON_KEY | |
- username: service_role | |
keyauth_credentials: | |
- key: $SUPABASE_SERVICE_KEY | |
### | |
### Access Control List | |
### | |
acls: | |
- consumer: anon | |
group: anon | |
- consumer: service_role | |
group: admin | |
### | |
### Dashboard credentials | |
### | |
basicauth_credentials: | |
- consumer: DASHBOARD | |
username: $DASHBOARD_USERNAME | |
password: $DASHBOARD_PASSWORD | |
### | |
### API Routes | |
### | |
services: | |
## Open Auth routes | |
- name: auth-v1-open | |
url: http://supabase-auth:9999/verify | |
routes: | |
- name: auth-v1-open | |
strip_path: true | |
paths: | |
- /auth/v1/verify | |
plugins: | |
- name: cors | |
- name: auth-v1-open-callback | |
url: http://supabase-auth:9999/callback | |
routes: | |
- name: auth-v1-open-callback | |
strip_path: true | |
paths: | |
- /auth/v1/callback | |
plugins: | |
- name: cors | |
- name: auth-v1-open-authorize | |
url: http://supabase-auth:9999/authorize | |
routes: | |
- name: auth-v1-open-authorize | |
strip_path: true | |
paths: | |
- /auth/v1/authorize | |
plugins: | |
- name: cors | |
## Secure Auth routes | |
- name: auth-v1 | |
_comment: 'GoTrue: /auth/v1/* -> http://supabase-auth:9999/*' | |
url: http://supabase-auth:9999/ | |
routes: | |
- name: auth-v1-all | |
strip_path: true | |
paths: | |
- /auth/v1/ | |
plugins: | |
- name: cors | |
- name: key-auth | |
config: | |
hide_credentials: false | |
- name: acl | |
config: | |
hide_groups_header: true | |
allow: | |
- admin | |
- anon | |
## Secure REST routes | |
- name: rest-v1 | |
_comment: 'PostgREST: /rest/v1/* -> http://supabase-rest:3000/*' | |
url: http://supabase-rest:3000/ | |
routes: | |
- name: rest-v1-all | |
strip_path: true | |
paths: | |
- /rest/v1/ | |
plugins: | |
- name: cors | |
- name: key-auth | |
config: | |
hide_credentials: true | |
- name: acl | |
config: | |
hide_groups_header: true | |
allow: | |
- admin | |
- anon | |
## Secure GraphQL routes | |
- name: graphql-v1 | |
_comment: 'PostgREST: /graphql/v1/* -> http://supabase-rest:3000/rpc/graphql' | |
url: http://supabase-rest:3000/rpc/graphql | |
routes: | |
- name: graphql-v1-all | |
strip_path: true | |
paths: | |
- /graphql/v1 | |
plugins: | |
- name: cors | |
- name: key-auth | |
config: | |
hide_credentials: true | |
- name: request-transformer | |
config: | |
add: | |
headers: | |
- Content-Profile:graphql_public | |
- name: acl | |
config: | |
hide_groups_header: true | |
allow: | |
- admin | |
- anon | |
## Secure Realtime routes | |
- name: realtime-v1-ws | |
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' | |
url: http://realtime-dev:4000/socket | |
protocol: ws | |
routes: | |
- name: realtime-v1-ws | |
strip_path: true | |
paths: | |
- /realtime/v1/ | |
plugins: | |
- name: cors | |
- name: key-auth | |
config: | |
hide_credentials: false | |
- name: acl | |
config: | |
hide_groups_header: true | |
allow: | |
- admin | |
- anon | |
- name: realtime-v1-rest | |
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' | |
url: http://realtime-dev:4000/api | |
protocol: http | |
routes: | |
- name: realtime-v1-rest | |
strip_path: true | |
paths: | |
- /realtime/v1/api | |
plugins: | |
- name: cors | |
- name: key-auth | |
config: | |
hide_credentials: false | |
- name: acl | |
config: | |
hide_groups_header: true | |
allow: | |
- admin | |
- anon | |
## Storage routes: the storage server manages its own auth | |
- name: storage-v1 | |
_comment: 'Storage: /storage/v1/* -> http://supabase-storage:5000/*' | |
url: http://supabase-storage:5000/ | |
routes: | |
- name: storage-v1-all | |
strip_path: true | |
paths: | |
- /storage/v1/ | |
plugins: | |
- name: cors | |
## Edge Functions routes | |
- name: functions-v1 | |
_comment: 'Edge Functions: /functions/v1/* -> http://supabase-edge-functions:9000/*' | |
url: http://supabase-edge-functions:9000/ | |
routes: | |
- name: functions-v1-all | |
strip_path: true | |
paths: | |
- /functions/v1/ | |
plugins: | |
- name: cors | |
## Analytics routes | |
- name: analytics-v1 | |
_comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*' | |
url: http://supabase-analytics:4000/ | |
routes: | |
- name: analytics-v1-all | |
strip_path: true | |
paths: | |
- /analytics/v1/ | |
## Secure Database routes | |
- name: meta | |
_comment: 'pg-meta: /pg/* -> http://supabase-meta:8080/*' | |
url: http://supabase-meta:8080/ | |
routes: | |
- name: meta-all | |
strip_path: true | |
paths: | |
- /pg/ | |
plugins: | |
- name: key-auth | |
config: | |
hide_credentials: false | |
- name: acl | |
config: | |
hide_groups_header: true | |
allow: | |
- admin | |
## Protected Dashboard - catch all remaining routes | |
- name: dashboard | |
_comment: 'Studio: /* -> http://studio:3000/*' | |
url: http://supabase-studio:3000/ | |
routes: | |
- name: dashboard-all | |
strip_path: true | |
paths: | |
- / | |
plugins: | |
- name: cors | |
- name: basic-auth | |
config: | |
hide_credentials: true | |
supabase-studio: | |
image: supabase/studio:20240422-5cf8f30 | |
healthcheck: | |
test: | |
[ | |
"CMD", | |
"node", | |
"-e", | |
"require('http').get('http://127.0.0.1:3000/api/profile', (r) => {if (r.statusCode !== 200) process.exit(1); else process.exit(0); }).on('error', () => process.exit(1))", | |
] | |
timeout: 5s | |
interval: 5s | |
retries: 3 | |
depends_on: | |
supabase-analytics: | |
condition: service_healthy | |
environment: | |
- HOSTNAME=0.0.0.0 | |
- STUDIO_PG_META_URL=http://supabase-meta:8080 | |
- POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES} | |
- DEFAULT_ORGANIZATION_NAME=${STUDIO_DEFAULT_ORGANIZATION:-Default Organization} | |
- DEFAULT_PROJECT_NAME=${STUDIO_DEFAULT_PROJECT:-Default Project} | |
- SUPABASE_URL=http://supabase-kong:8000 | |
- SUPABASE_PUBLIC_URL=${SERVICE_FQDN_SUPABASEKONG} | |
- SUPABASE_ANON_KEY=${SERVICE_SUPABASEANON_KEY} | |
- SUPABASE_SERVICE_KEY=${SERVICE_SUPABASESERVICE_KEY} | |
- AUTH_JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
- LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE} | |
- LOGFLARE_URL=http://supabase-analytics:4000 | |
- NEXT_PUBLIC_ENABLE_LOGS=true | |
# Comment to use Big Query backend for analytics | |
- NEXT_ANALYTICS_BACKEND_PROVIDER=postgres | |
# Uncomment to use Big Query backend for analytics | |
# NEXT_ANALYTICS_BACKEND_PROVIDER=bigquery | |
supabase-db: | |
image: supabase/postgres:15.1.1.61 | |
healthcheck: | |
test: pg_isready -U postgres -h 127.0.0.1 | |
interval: 5s | |
timeout: 5s | |
retries: 10 | |
depends_on: | |
supabase-vector: | |
condition: service_healthy | |
command: | |
- postgres | |
- -c | |
- config_file=/etc/postgresql/postgresql.conf | |
- -c | |
- log_min_messages=fatal | |
restart: unless-stopped | |
environment: | |
- POSTGRES_HOST=/var/run/postgresql | |
- PGPORT=${POSTGRES_PORT:-5432} | |
- POSTGRES_PORT=${POSTGRES_PORT:-5432} | |
- PGPASSWORD=${SERVICE_PASSWORD_POSTGRES} | |
- POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES} | |
- PGDATABASE=${POSTGRES_DB:-postgres} | |
- POSTGRES_DB=${POSTGRES_DB:-postgres} | |
- JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
- JWT_EXP=${JWT_EXPIRY:-3600} | |
volumes: | |
- supabase-db-data:/var/lib/postgresql/data | |
- type: bind | |
source: ./volumes/db/realtime.sql | |
target: /docker-entrypoint-initdb.d/migrations/99-realtime.sql | |
content: | | |
\set pguser `echo "supabase_admin"` | |
create schema if not exists _realtime; | |
alter schema _realtime owner to :pguser; | |
- type: bind | |
source: ./volumes/db/webhooks.sql | |
target: /docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql | |
content: | | |
BEGIN; | |
-- Create pg_net extension | |
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions; | |
-- Create supabase_functions schema | |
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin; | |
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role; | |
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role; | |
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role; | |
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role; | |
-- supabase_functions.migrations definition | |
CREATE TABLE supabase_functions.migrations ( | |
version text PRIMARY KEY, | |
inserted_at timestamptz NOT NULL DEFAULT NOW() | |
); | |
-- Initial supabase_functions migration | |
INSERT INTO supabase_functions.migrations (version) VALUES ('initial'); | |
-- supabase_functions.hooks definition | |
CREATE TABLE supabase_functions.hooks ( | |
id bigserial PRIMARY KEY, | |
hook_table_id integer NOT NULL, | |
hook_name text NOT NULL, | |
created_at timestamptz NOT NULL DEFAULT NOW(), | |
request_id bigint | |
); | |
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id); | |
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name); | |
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.'; | |
CREATE FUNCTION supabase_functions.http_request() | |
RETURNS trigger | |
LANGUAGE plpgsql | |
AS $function$ | |
DECLARE | |
request_id bigint; | |
payload jsonb; | |
url text := TG_ARGV[0]::text; | |
method text := TG_ARGV[1]::text; | |
headers jsonb DEFAULT '{}'::jsonb; | |
params jsonb DEFAULT '{}'::jsonb; | |
timeout_ms integer DEFAULT 1000; | |
BEGIN | |
IF url IS NULL OR url = 'null' THEN | |
RAISE EXCEPTION 'url argument is missing'; | |
END IF; | |
IF method IS NULL OR method = 'null' THEN | |
RAISE EXCEPTION 'method argument is missing'; | |
END IF; | |
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN | |
headers = '{"Content-Type": "application/json"}'::jsonb; | |
ELSE | |
headers = TG_ARGV[2]::jsonb; | |
END IF; | |
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN | |
params = '{}'::jsonb; | |
ELSE | |
params = TG_ARGV[3]::jsonb; | |
END IF; | |
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN | |
timeout_ms = 1000; | |
ELSE | |
timeout_ms = TG_ARGV[4]::integer; | |
END IF; | |
CASE | |
WHEN method = 'GET' THEN | |
SELECT http_get INTO request_id FROM net.http_get( | |
url, | |
params, | |
headers, | |
timeout_ms | |
); | |
WHEN method = 'POST' THEN | |
payload = jsonb_build_object( | |
'old_record', OLD, | |
'record', NEW, | |
'type', TG_OP, | |
'table', TG_TABLE_NAME, | |
'schema', TG_TABLE_SCHEMA | |
); | |
SELECT http_post INTO request_id FROM net.http_post( | |
url, | |
payload, | |
params, | |
headers, | |
timeout_ms | |
); | |
ELSE | |
RAISE EXCEPTION 'method argument % is invalid', method; | |
END CASE; | |
INSERT INTO supabase_functions.hooks | |
(hook_table_id, hook_name, request_id) | |
VALUES | |
(TG_RELID, TG_NAME, request_id); | |
RETURN NEW; | |
END | |
$function$; | |
-- Supabase super admin | |
DO | |
$$ | |
BEGIN | |
IF NOT EXISTS ( | |
SELECT 1 | |
FROM pg_roles | |
WHERE rolname = 'supabase_functions_admin' | |
) | |
THEN | |
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; | |
END IF; | |
END | |
$$; | |
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin; | |
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin; | |
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin; | |
ALTER USER supabase_functions_admin SET search_path = "supabase_functions"; | |
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin; | |
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin; | |
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin; | |
GRANT supabase_functions_admin TO postgres; | |
-- Remove unused supabase_pg_net_admin role | |
DO | |
$$ | |
BEGIN | |
IF EXISTS ( | |
SELECT 1 | |
FROM pg_roles | |
WHERE rolname = 'supabase_pg_net_admin' | |
) | |
THEN | |
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin; | |
DROP OWNED BY supabase_pg_net_admin; | |
DROP ROLE supabase_pg_net_admin; | |
END IF; | |
END | |
$$; | |
-- pg_net grants when extension is already enabled | |
DO | |
$$ | |
BEGIN | |
IF EXISTS ( | |
SELECT 1 | |
FROM pg_extension | |
WHERE extname = 'pg_net' | |
) | |
THEN | |
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; | |
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; | |
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; | |
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; | |
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; | |
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; | |
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; | |
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; | |
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; | |
END IF; | |
END | |
$$; | |
-- Event trigger for pg_net | |
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() | |
RETURNS event_trigger | |
LANGUAGE plpgsql | |
AS $$ | |
BEGIN | |
IF EXISTS ( | |
SELECT 1 | |
FROM pg_event_trigger_ddl_commands() AS ev | |
JOIN pg_extension AS ext | |
ON ev.objid = ext.oid | |
WHERE ext.extname = 'pg_net' | |
) | |
THEN | |
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; | |
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; | |
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; | |
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; | |
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; | |
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; | |
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; | |
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; | |
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; | |
END IF; | |
END; | |
$$; | |
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; | |
DO | |
$$ | |
BEGIN | |
IF NOT EXISTS ( | |
SELECT 1 | |
FROM pg_event_trigger | |
WHERE evtname = 'issue_pg_net_access' | |
) THEN | |
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION') | |
EXECUTE PROCEDURE extensions.grant_pg_net_access(); | |
END IF; | |
END | |
$$; | |
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants'); | |
ALTER function supabase_functions.http_request() SECURITY DEFINER; | |
ALTER function supabase_functions.http_request() SET search_path = supabase_functions; | |
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC; | |
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role; | |
COMMIT; | |
- type: bind | |
source: ./volumes/db/roles.sql | |
target: /docker-entrypoint-initdb.d/init-scripts/99-roles.sql | |
content: | | |
-- NOTE: change to your own passwords for production environments | |
\set pgpass `echo "$POSTGRES_PASSWORD"` | |
ALTER USER authenticator WITH PASSWORD :'pgpass'; | |
ALTER USER pgbouncer WITH PASSWORD :'pgpass'; | |
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass'; | |
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass'; | |
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; | |
- type: bind | |
source: ./volumes/db/jwt.sql | |
target: /docker-entrypoint-initdb.d/init-scripts/99-jwt.sql | |
content: | | |
\set jwt_secret `echo "$JWT_SECRET"` | |
\set jwt_exp `echo "$JWT_EXP"` | |
\set db_name `echo "${POSTGRES_DB:-postgres}"` | |
ALTER DATABASE :db_name SET "app.settings.jwt_secret" TO :'jwt_secret'; | |
ALTER DATABASE :db_name SET "app.settings.jwt_exp" TO :'jwt_exp'; | |
- type: bind | |
source: ./volumes/db/logs.sql | |
target: /docker-entrypoint-initdb.d/migrations/99-logs.sql | |
content: | | |
\set pguser `echo "supabase_admin"` | |
create schema if not exists _analytics; | |
alter schema _analytics owner to :pguser; | |
# Use named volume to persist pgsodium decryption key between restarts | |
- supabase-db-config:/etc/postgresql-custom | |
supabase-analytics: | |
image: supabase/logflare:1.4.0 | |
healthcheck: | |
test: ["CMD", "curl", "http://127.0.0.1:4000/health"] | |
timeout: 5s | |
interval: 5s | |
retries: 10 | |
restart: unless-stopped | |
depends_on: | |
supabase-db: | |
condition: service_healthy | |
# volumes: | |
# - type: bind | |
# source: ./volumes/gcloud.json | |
# target: /opt/app/rel/logflare/bin/gcloud.json | |
# read_only: true | |
environment: | |
- LOGFLARE_NODE_HOST=127.0.0.1 | |
- DB_USERNAME=supabase_admin | |
- DB_DATABASE=${POSTGRES_DB:-postgres} | |
- DB_HOSTNAME=${POSTGRES_HOST:-supabase-db} | |
- DB_PORT=${POSTGRES_PORT:-5432} | |
- DB_PASSWORD=${SERVICE_PASSWORD_POSTGRES} | |
- DB_SCHEMA=_analytics | |
- LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE} | |
- LOGFLARE_SINGLE_TENANT=true | |
- LOGFLARE_SINGLE_TENANT_MODE=true | |
- LOGFLARE_SUPABASE_MODE=true | |
- LOGFLARE_MIN_CLUSTER_SIZE=1 | |
# Comment variables to use Big Query backend for analytics | |
- POSTGRES_BACKEND_URL=postgresql://supabase_admin:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOST:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres} | |
- POSTGRES_BACKEND_SCHEMA=_analytics | |
- LOGFLARE_FEATURE_FLAG_OVERRIDE=multibackend=true | |
# Uncomment to use Big Query backend for analytics | |
# GOOGLE_PROJECT_ID=${GOOGLE_PROJECT_ID} | |
# GOOGLE_PROJECT_NUMBER=${GOOGLE_PROJECT_NUMBER} | |
supabase-vector: | |
image: timberio/vector:0.28.1-alpine | |
healthcheck: | |
test: | |
[ | |
"CMD", | |
"wget", | |
"--no-verbose", | |
"--tries=1", | |
"--spider", | |
"http://supabase-vector:9001/health", | |
] | |
timeout: 5s | |
interval: 5s | |
retries: 3 | |
volumes: | |
- type: bind | |
source: ./volumes/logs/vector.yml | |
target: /etc/vector/vector.yml | |
read_only: true | |
content: | | |
api: | |
enabled: true | |
address: 0.0.0.0:9001 | |
sources: | |
docker_host: | |
type: docker_logs | |
exclude_containers: | |
- supabase-vector | |
transforms: | |
project_logs: | |
type: remap | |
inputs: | |
- docker_host | |
source: |- | |
.project = "default" | |
.event_message = del(.message) | |
.appname = del(.container_name) | |
del(.container_created_at) | |
del(.container_id) | |
del(.source_type) | |
del(.stream) | |
del(.label) | |
del(.image) | |
del(.host) | |
del(.stream) | |
router: | |
type: route | |
inputs: | |
- project_logs | |
route: | |
kong: 'starts_with(string!(.appname), "supabase-kong")' | |
auth: 'starts_with(string!(.appname), "supabase-auth")' | |
rest: 'starts_with(string!(.appname), "supabase-rest")' | |
realtime: 'starts_with(string!(.appname), "realtime-dev")' | |
storage: 'starts_with(string!(.appname), "supabase-storage")' | |
functions: 'starts_with(string!(.appname), "supabase-functions")' | |
db: 'starts_with(string!(.appname), "supabase-db")' | |
# Ignores non nginx errors since they are related with kong booting up | |
kong_logs: | |
type: remap | |
inputs: | |
- router.kong | |
source: |- | |
req, err = parse_nginx_log(.event_message, "combined") | |
if err == null { | |
.timestamp = req.timestamp | |
.metadata.request.headers.referer = req.referer | |
.metadata.request.headers.user_agent = req.agent | |
.metadata.request.headers.cf_connecting_ip = req.client | |
.metadata.request.method = req.method | |
.metadata.request.path = req.path | |
.metadata.request.protocol = req.protocol | |
.metadata.response.status_code = req.status | |
} | |
if err != null { | |
abort | |
} | |
# Ignores non nginx errors since they are related with kong booting up | |
kong_err: | |
type: remap | |
inputs: | |
- router.kong | |
source: |- | |
.metadata.request.method = "GET" | |
.metadata.response.status_code = 200 | |
parsed, err = parse_nginx_log(.event_message, "error") | |
if err == null { | |
.timestamp = parsed.timestamp | |
.severity = parsed.severity | |
.metadata.request.host = parsed.host | |
.metadata.request.headers.cf_connecting_ip = parsed.client | |
url, err = split(parsed.request, " ") | |
if err == null { | |
.metadata.request.method = url[0] | |
.metadata.request.path = url[1] | |
.metadata.request.protocol = url[2] | |
} | |
} | |
if err != null { | |
abort | |
} | |
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency. | |
auth_logs: | |
type: remap | |
inputs: | |
- router.auth | |
source: |- | |
parsed, err = parse_json(.event_message) | |
if err == null { | |
.metadata.timestamp = parsed.time | |
.metadata = merge!(.metadata, parsed) | |
} | |
# PostgREST logs are structured so we separate timestamp from message using regex | |
rest_logs: | |
type: remap | |
inputs: | |
- router.rest | |
source: |- | |
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$') | |
if err == null { | |
.event_message = parsed.msg | |
.timestamp = to_timestamp!(parsed.time) | |
.metadata.host = .project | |
} | |
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date) | |
realtime_logs: | |
type: remap | |
inputs: | |
- router.realtime | |
source: |- | |
.metadata.project = del(.project) | |
.metadata.external_id = .metadata.project | |
parsed, err = parse_regex(.event_message, r'^(?P<time>\d+:\d+:\d+\.\d+) \[(?P<level>\w+)\] (?P<msg>.*)$') | |
if err == null { | |
.event_message = parsed.msg | |
.metadata.level = parsed.level | |
} | |
# Storage logs may contain json objects so we parse them for completeness | |
storage_logs: | |
type: remap | |
inputs: | |
- router.storage | |
source: |- | |
.metadata.project = del(.project) | |
.metadata.tenantId = .metadata.project | |
parsed, err = parse_json(.event_message) | |
if err == null { | |
.event_message = parsed.msg | |
.metadata.level = parsed.level | |
.metadata.timestamp = parsed.time | |
.metadata.context[0].host = parsed.hostname | |
.metadata.context[0].pid = parsed.pid | |
} | |
# Postgres logs some messages to stderr which we map to warning severity level | |
db_logs: | |
type: remap | |
inputs: | |
- router.db | |
source: |- | |
.metadata.host = "db-default" | |
.metadata.parsed.timestamp = .timestamp | |
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true) | |
if err != null || parsed == null { | |
.metadata.parsed.error_severity = "info" | |
} | |
if parsed != null { | |
.metadata.parsed.error_severity = parsed.level | |
} | |
if .metadata.parsed.error_severity == "info" { | |
.metadata.parsed.error_severity = "log" | |
} | |
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity) | |
sinks: | |
logflare_auth: | |
type: 'http' | |
inputs: | |
- auth_logs | |
encoding: | |
codec: 'json' | |
method: 'post' | |
request: | |
retry_max_duration_secs: 10 | |
uri: 'http://supabase-analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' | |
logflare_realtime: | |
type: 'http' | |
inputs: | |
- realtime_logs | |
encoding: | |
codec: 'json' | |
method: 'post' | |
request: | |
retry_max_duration_secs: 10 | |
uri: 'http://supabase-analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' | |
logflare_rest: | |
type: 'http' | |
inputs: | |
- rest_logs | |
encoding: | |
codec: 'json' | |
method: 'post' | |
request: | |
retry_max_duration_secs: 10 | |
uri: 'http://supabase-analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' | |
logflare_db: | |
type: 'http' | |
inputs: | |
- db_logs | |
encoding: | |
codec: 'json' | |
method: 'post' | |
request: | |
retry_max_duration_secs: 10 | |
# We must route the sink through kong because ingesting logs before logflare is fully initialised will | |
# lead to broken queries from studio. This works by the assumption that containers are started in the | |
# following order: vector > db > logflare > kong | |
uri: 'http://supabase-kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' | |
logflare_functions: | |
type: 'http' | |
inputs: | |
- router.functions | |
encoding: | |
codec: 'json' | |
method: 'post' | |
request: | |
retry_max_duration_secs: 10 | |
uri: 'http://supabase-analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' | |
logflare_storage: | |
type: 'http' | |
inputs: | |
- storage_logs | |
encoding: | |
codec: 'json' | |
method: 'post' | |
request: | |
retry_max_duration_secs: 10 | |
uri: 'http://supabase-analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' | |
logflare_kong: | |
type: 'http' | |
inputs: | |
- kong_logs | |
- kong_err | |
encoding: | |
codec: 'json' | |
method: 'post' | |
request: | |
retry_max_duration_secs: 10 | |
uri: 'http://supabase-analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}' | |
- /var/run/docker.sock:/var/run/docker.sock:ro | |
environment: | |
- LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE} | |
command: ["--config", "etc/vector/vector.yml"] | |
supabase-rest: | |
image: postgrest/postgrest:v12.0.1 | |
depends_on: | |
supabase-db: | |
# Disable this if you are using an external Postgres database | |
condition: service_healthy | |
supabase-analytics: | |
condition: service_healthy | |
restart: unless-stopped | |
environment: | |
- PGRST_DB_URI=postgres://authenticator:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOST:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres} | |
- PGRST_DB_SCHEMAS=${PGRST_DB_SCHEMAS:-public} | |
- PGRST_DB_ANON_ROLE=anon | |
- PGRST_JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
- PGRST_DB_USE_LEGACY_GUCS=false | |
- PGRST_APP_SETTINGS_JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
- PGRST_APP_SETTINGS_JWT_EXP=${JWT_EXPIRY:-3600} | |
command: "postgrest" | |
exclude_from_hc: true | |
supabase-auth: | |
image: supabase/gotrue:v2.151.0 | |
depends_on: | |
supabase-db: | |
# Disable this if you are using an external Postgres database | |
condition: service_healthy | |
supabase-analytics: | |
condition: service_healthy | |
healthcheck: | |
test: | |
[ | |
"CMD", | |
"wget", | |
"--no-verbose", | |
"--tries=1", | |
"--spider", | |
"http://127.0.0.1:9999/health", | |
] | |
timeout: 5s | |
interval: 5s | |
retries: 3 | |
environment: | |
- GOTRUE_API_HOST=0.0.0.0 | |
- GOTRUE_API_PORT=9999 | |
- API_EXTERNAL_URL=${API_EXTERNAL_URL:-http://supabase-kong:8000} | |
- GOTRUE_DB_DRIVER=postgres | |
- GOTRUE_DB_DATABASE_URL=postgres://supabase_auth_admin:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOST:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres} | |
- GOTRUE_SITE_URL=${SERVICE_FQDN_SUPABASEKONG} | |
- GOTRUE_URI_ALLOW_LIST=${ADDITIONAL_REDIRECT_URLS} | |
- GOTRUE_DISABLE_SIGNUP=${DISABLE_SIGNUP:-false} | |
- GOTRUE_JWT_ADMIN_ROLES=service_role | |
- GOTRUE_JWT_AUD=authenticated | |
- GOTRUE_JWT_DEFAULT_GROUP_NAME=authenticated | |
- GOTRUE_JWT_EXP=${JWT_EXPIRY:-3600} | |
- GOTRUE_JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
- GOTRUE_EXTERNAL_EMAIL_ENABLED=${ENABLE_EMAIL_SIGNUP:-true} | |
- GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED=${ENABLE_ANONYMOUS_USERS:-false} | |
- GOTRUE_MAILER_AUTOCONFIRM=${ENABLE_EMAIL_AUTOCONFIRM:-false} | |
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED=true | |
# GOTRUE_SMTP_MAX_FREQUENCY=1s | |
- GOTRUE_SMTP_ADMIN_EMAIL=${SMTP_ADMIN_EMAIL} | |
- GOTRUE_SMTP_HOST=${SMTP_HOST} | |
- GOTRUE_SMTP_PORT=${SMTP_PORT:-587} | |
- GOTRUE_SMTP_USER=${SMTP_USER} | |
- GOTRUE_SMTP_PASS=${SMTP_PASS} | |
- GOTRUE_SMTP_SENDER_NAME=${SMTP_SENDER_NAME} | |
- GOTRUE_MAILER_URLPATHS_INVITE=${MAILER_URLPATHS_INVITE:-/auth/v1/verify} | |
- GOTRUE_MAILER_URLPATHS_CONFIRMATION=${MAILER_URLPATHS_CONFIRMATION:-/auth/v1/verify} | |
- GOTRUE_MAILER_URLPATHS_RECOVERY=${MAILER_URLPATHS_RECOVERY:-/auth/v1/verify} | |
- GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE=${MAILER_URLPATHS_EMAIL_CHANGE:-/auth/v1/verify} | |
- GOTRUE_MAILER_TEMPLATES_INVITE=${MAILER_TEMPLATES_INVITE} | |
- GOTRUE_MAILER_TEMPLATES_CONFIRMATION=${MAILER_TEMPLATES_CONFIRMATION} | |
- GOTRUE_MAILER_TEMPLATES_RECOVERY=${MAILER_TEMPLATES_RECOVERY} | |
- GOTRUE_MAILER_TEMPLATES_MAGIC_LINK=${MAILER_TEMPLATES_MAGIC_LINK} | |
- GOTRUE_MAILER_TEMPLATES_EMAIL_CHANGE=${MAILER_TEMPLATES_EMAIL_CHANGE} | |
- GOTRUE_MAILER_SUBJECTS_CONFIRMATION=${MAILER_SUBJECTS_CONFIRMATION} | |
- GOTRUE_MAILER_SUBJECTS_RECOVERY=${MAILER_SUBJECTS_RECOVERY} | |
- GOTRUE_MAILER_SUBJECTS_MAGIC_LINK=${MAILER_SUBJECTS_MAGIC_LINK} | |
- GOTRUE_MAILER_SUBJECTS_EMAIL_CHANGE=${MAILER_SUBJECTS_EMAIL_CHANGE} | |
- GOTRUE_MAILER_SUBJECTS_INVITE=${MAILER_SUBJECTS_INVITE} | |
- GOTRUE_EXTERNAL_PHONE_ENABLED=${ENABLE_PHONE_SIGNUP:-true} | |
- GOTRUE_SMS_AUTOCONFIRM=${ENABLE_PHONE_AUTOCONFIRM:-true} | |
# Uncomment to enable custom access token hook. You'll need to create a public.custom_access_token_hook function and grant necessary permissions. | |
# See: https://supabase.com/docs/guides/auth/auth-hooks#hook-custom-access-token for details | |
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED="true" | |
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI="pg-functions://postgres/public/custom_access_token_hook" | |
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED="true" | |
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI="pg-functions://postgres/public/mfa_verification_attempt" | |
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED="true" | |
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI="pg-functions://postgres/public/password_verification_attempt" | |
realtime-dev: | |
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain | |
image: supabase/realtime:v2.28.32 | |
container_name: realtime-dev.supabase-realtime | |
depends_on: | |
supabase-db: | |
# Disable this if you are using an external Postgres database | |
condition: service_healthy | |
supabase-analytics: | |
condition: service_healthy | |
healthcheck: | |
test: | |
[ | |
"CMD", | |
"curl", | |
"-sSfL", | |
"--head", | |
"-o", | |
"/dev/null", | |
"-H", | |
"Authorization: Bearer ${ANON_KEY}", | |
"http://127.0.0.1:4000/api/tenants/realtime-dev/health", | |
] | |
timeout: 5s | |
interval: 5s | |
retries: 3 | |
environment: | |
- PORT=4000 | |
- DB_HOST=${POSTGRES_HOST:-supabase-db} | |
- DB_PORT=${POSTGRES_PORT:-5432} | |
- DB_USER=supabase_admin | |
- DB_PASSWORD=${SERVICE_PASSWORD_POSTGRES} | |
- DB_NAME=${POSTGRES_DB:-postgres} | |
- DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime | |
- DB_ENC_KEY=supabaserealtime | |
- API_JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
- FLY_ALLOC_ID=fly123 | |
- FLY_APP_NAME=realtime | |
- SECRET_KEY_BASE=${SECRET_PASSWORD_REALTIME} | |
- ERL_AFLAGS=-proto_dist inet_tcp | |
- ENABLE_TAILSCALE=false | |
- DNS_NODES='' | |
command: > | |
sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server" | |
supabase-minio: | |
image: minio/minio | |
environment: | |
- MINIO_ROOT_USER=${SERVICE_USER_MINIO} | |
- MINIO_ROOT_PASSWORD=${SERVICE_PASSWORD_MINIO} | |
command: server --console-address ":9001" /data | |
healthcheck: | |
test: sleep 5 && exit 0 | |
interval: 2s | |
timeout: 10s | |
retries: 5 | |
volumes: | |
- ./volumes/storage:/data | |
minio-createbucket: | |
image: minio/mc | |
restart: "no" | |
environment: | |
- MINIO_ROOT_USER=${SERVICE_USER_MINIO} | |
- MINIO_ROOT_PASSWORD=${SERVICE_PASSWORD_MINIO} | |
depends_on: | |
supabase-minio: | |
condition: service_healthy | |
entrypoint: ["/entrypoint.sh"] | |
volumes: | |
- type: bind | |
source: ./entrypoint.sh | |
target: /entrypoint.sh | |
content: | | |
#!/bin/sh | |
/usr/bin/mc alias set supabase-minio http://supabase-minio:9000 ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD}; | |
/usr/bin/mc mb --ignore-existing supabase-minio/stub; | |
exit 0 | |
supabase-storage: | |
image: supabase/storage-api:v1.0.6 | |
depends_on: | |
supabase-db: | |
# Disable this if you are using an external Postgres database | |
condition: service_healthy | |
supabase-rest: | |
condition: service_started | |
imgproxy: | |
condition: service_started | |
healthcheck: | |
test: | |
[ | |
"CMD", | |
"wget", | |
"--no-verbose", | |
"--tries=1", | |
"--spider", | |
"http://127.0.0.1:5000/status", | |
] | |
timeout: 5s | |
interval: 5s | |
retries: 3 | |
environment: | |
- SERVER_PORT=5000 | |
- SERVER_REGION=local | |
- MULTI_TENANT=false | |
- AUTH_JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
- DATABASE_URL=postgres://supabase_storage_admin:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOST:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres} | |
- DB_INSTALL_ROLES=false | |
- STORAGE_BACKEND=s3 | |
- STORAGE_S3_BUCKET=stub | |
- STORAGE_S3_ENDPOINT=http://supabase-minio:9000 | |
- STORAGE_S3_FORCE_PATH_STYLE=true | |
- STORAGE_S3_REGION=us-east-1 | |
- AWS_ACCESS_KEY_ID=${SERVICE_USER_MINIO} | |
- AWS_SECRET_ACCESS_KEY=${SERVICE_PASSWORD_MINIO} | |
- UPLOAD_FILE_SIZE_LIMIT=524288000 | |
- UPLOAD_FILE_SIZE_LIMIT_STANDARD=524288000 | |
- UPLOAD_SIGNED_URL_EXPIRATION_TIME=120 | |
- TUS_URL_PATH=/upload/resumable | |
- TUS_MAX_SIZE=3600000 | |
- IMAGE_TRANSFORMATION_ENABLED=true | |
- IMGPROXY_URL=http://imgproxy:8080 | |
- IMGPROXY_REQUEST_TIMEOUT=15 | |
- DATABASE_SEARCH_PATH=storage | |
# - ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogImFub24iLAogICJpc3MiOiAic3VwYWJhc2UiLAogICJpYXQiOiAxNzA4OTg4NDAwLAogICJleHAiOiAxODY2ODQxMjAwCn0.jCDqsoXGT58JnAjf27KOowNQsokkk0aR7rdbGG18P-8 | |
# - SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogInNlcnZpY2Vfcm9sZSIsCiAgImlzcyI6ICJzdXBhYmFzZSIsCiAgImlhdCI6IDE3MDg5ODg0MDAsCiAgImV4cCI6IDE4NjY4NDEyMDAKfQ.GA7yF2BmqTzqGkP_oqDdJAQVt0djjIxGYuhE0zFDJV4 | |
# - POSTGREST_URL=http://supabase-rest:3000 | |
# - PGRST_JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
# - DATABASE_URL=postgres://supabase_storage_admin:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOST:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres} | |
# - FILE_SIZE_LIMIT=52428800 | |
# - STORAGE_BACKEND=s3 | |
# - STORAGE_S3_BUCKET=stub | |
# - STORAGE_S3_ENDPOINT=http://supabase-minio:9000 | |
# - STORAGE_S3_PROTOCOL=http | |
# - STORAGE_S3_REGION=stub | |
# - STORAGE_S3_FORCE_PATH_STYLE=true | |
# - AWS_ACCESS_KEY_ID=${SERVICE_USER_MINIO} | |
# - AWS_SECRET_ACCESS_KEY=${SERVICE_PASSWORD_MINIO} | |
# - AWS_DEFAULT_REGION=stub | |
# - FILE_STORAGE_BACKEND_PATH=/var/lib/storage | |
# - TENANT_ID=stub | |
# # TODO: https://github.com/supabase/storage-api/issues/55 | |
# - REGION=stub | |
# - ENABLE_IMAGE_TRANSFORMATION=true | |
# - IMGPROXY_URL=http://imgproxy:8080 | |
volumes: | |
- ./volumes/storage:/var/lib/storage | |
imgproxy: | |
image: darthsim/imgproxy:v3.8.0 | |
healthcheck: | |
test: ["CMD", "imgproxy", "health"] | |
timeout: 5s | |
interval: 5s | |
retries: 3 | |
environment: | |
- IMGPROXY_LOCAL_FILESYSTEM_ROOT=/ | |
- IMGPROXY_USE_ETAG=true | |
- IMGPROXY_ENABLE_WEBP_DETECTION=${IMGPROXY_ENABLE_WEBP_DETECTION:-true} | |
volumes: | |
- ./volumes/storage:/var/lib/storage | |
supabase-meta: | |
image: supabase/postgres-meta:v0.80.0 | |
depends_on: | |
supabase-db: | |
# Disable this if you are using an external Postgres database | |
condition: service_healthy | |
supabase-analytics: | |
condition: service_healthy | |
environment: | |
- PG_META_PORT=8080 | |
- PG_META_DB_HOST=${POSTGRES_HOST:-supabase-db} | |
- PG_META_DB_PORT=${POSTGRES_PORT:-5432} | |
- PG_META_DB_NAME=${POSTGRES_DB:-postgres} | |
- PG_META_DB_USER=supabase_admin | |
- PG_META_DB_PASSWORD=${SERVICE_PASSWORD_POSTGRES} | |
supabase-edge-functions: | |
image: supabase/edge-runtime:v1.54.2 | |
depends_on: | |
supabase-analytics: | |
condition: service_healthy | |
healthcheck: | |
test: ["CMD", "echo", "Edge Functions is healthy"] | |
timeout: 5s | |
interval: 5s | |
retries: 3 | |
environment: | |
- JWT_SECRET=${SERVICE_PASSWORD_JWT} | |
- SUPABASE_URL=http://supabase-kong:8000 | |
- SUPABASE_ANON_KEY=${SERVICE_SUPABASEANON_KEY} | |
- SUPABASE_SERVICE_ROLE_KEY=${SERVICE_SUPABASESERVICE_KEY} | |
- SUPABASE_DB_URL=postgresql://postgres:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOST:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres} | |
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786 | |
- VERIFY_JWT=${FUNCTIONS_VERIFY_JWT:-false} | |
volumes: | |
- ./volumes/functions:/home/deno/functions | |
- type: bind | |
source: ./volumes/functions/main/index.ts | |
target: /home/deno/functions/main/index.ts | |
content: | | |
import { serve } from 'https://deno.land/[email protected]/http/server.ts' | |
import * as jose from 'https://deno.land/x/[email protected]/index.ts' | |
console.log('main function started') | |
const JWT_SECRET = Deno.env.get('JWT_SECRET') | |
const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true' | |
function getAuthToken(req: Request) { | |
const authHeader = req.headers.get('authorization') | |
if (!authHeader) { | |
throw new Error('Missing authorization header') | |
} | |
const [bearer, token] = authHeader.split(' ') | |
if (bearer !== 'Bearer') { | |
throw new Error(`Auth header is not 'Bearer {token}'`) | |
} | |
return token | |
} | |
async function verifyJWT(jwt: string): Promise<boolean> { | |
const encoder = new TextEncoder() | |
const secretKey = encoder.encode(JWT_SECRET) | |
try { | |
await jose.jwtVerify(jwt, secretKey) | |
} catch (err) { | |
console.error(err) | |
return false | |
} | |
return true | |
} | |
serve(async (req: Request) => { | |
if (req.method !== 'OPTIONS' && VERIFY_JWT) { | |
try { | |
const token = getAuthToken(req) | |
const isValidJWT = await verifyJWT(token) | |
if (!isValidJWT) { | |
return new Response(JSON.stringify({ msg: 'Invalid JWT' }), { | |
status: 401, | |
headers: { 'Content-Type': 'application/json' }, | |
}) | |
} | |
} catch (e) { | |
console.error(e) | |
return new Response(JSON.stringify({ msg: e.toString() }), { | |
status: 401, | |
headers: { 'Content-Type': 'application/json' }, | |
}) | |
} | |
} | |
const url = new URL(req.url) | |
const { pathname } = url | |
const path_parts = pathname.split('/') | |
const service_name = path_parts[1] | |
if (!service_name || service_name === '') { | |
const error = { msg: 'missing function name in request' } | |
return new Response(JSON.stringify(error), { | |
status: 400, | |
headers: { 'Content-Type': 'application/json' }, | |
}) | |
} | |
const servicePath = `/home/deno/functions/${service_name}` | |
console.error(`serving the request with ${servicePath}`) | |
const memoryLimitMb = 150 | |
const workerTimeoutMs = 1 * 60 * 1000 | |
const noModuleCache = false | |
const importMapPath = null | |
const envVarsObj = Deno.env.toObject() | |
const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]]) | |
try { | |
const worker = await EdgeRuntime.userWorkers.create({ | |
servicePath, | |
memoryLimitMb, | |
workerTimeoutMs, | |
noModuleCache, | |
importMapPath, | |
envVars, | |
}) | |
return await worker.fetch(req) | |
} catch (e) { | |
const error = { msg: e.toString() } | |
return new Response(JSON.stringify(error), { | |
status: 500, | |
headers: { 'Content-Type': 'application/json' }, | |
}) | |
} | |
}) | |
- type: bind | |
source: ./volumes/functions/hello/index.ts | |
target: /home/deno/functions/hello/index.ts | |
content: | | |
// Follow this setup guide to integrate the Deno language server with your editor: | |
// https://deno.land/manual/getting_started/setup_your_environment | |
// This enables autocomplete, go to definition, etc. | |
import { serve } from "https://deno.land/[email protected]/http/server.ts" | |
serve(async () => { | |
return new Response( | |
`"Hello from Edge Functions!"`, | |
{ headers: { "Content-Type": "application/json" } }, | |
) | |
}) | |
// To invoke: | |
// curl 'http://localhost:<KONG_HTTP_PORT>/functions/v1/hello' \ | |
// --header 'Authorization: Bearer <anon/service_role API key>' | |
command: | |
- start | |
- --main-service | |
- /home/deno/functions/main |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment