Redesign deployment: only .env + docker-compose.yml needed on server
Build and Push Docker Image / build (push) Successful in 2m17s Details

Custom Docker images embed all config so production servers no longer
need SQL files, kong.yml, or shell scripts. Kong generates config from
env vars at startup. Migrate container auto-detects fresh vs existing
DB and runs appropriate scripts.

New images: monacousa-db, monacousa-kong, monacousa-migrate
New commands: deploy.sh build-images, deploy.sh push-images

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Matt 2026-02-06 10:33:25 +01:00
parent 1a6bce9c67
commit 2ed04cd9f7
13 changed files with 2197 additions and 307 deletions

332
deploy.sh
View File

@ -21,8 +21,9 @@ YELLOW='\033[1;33m'
NC='\033[0m' # No Color NC='\033[0m' # No Color
# Configuration # Configuration
COMPOSE_FILE="docker-compose.nginx.yml" COMPOSE_FILE="docker-compose.yml"
PROJECT_NAME="monacousa" PROJECT_NAME="monacousa"
REGISTRY="code.letsbe.solutions/letsbe"
log_info() { log_info() {
echo -e "${GREEN}[INFO]${NC} $1" echo -e "${GREEN}[INFO]${NC} $1"
@ -141,265 +142,32 @@ generate_secrets() {
log_info "Copy these values to your .env file" log_info "Copy these values to your .env file"
} }
# Generate Kong configuration with API keys from .env # Build custom Docker images (run from dev machine)
generate_kong_config() { build_images() {
log_info "Generating Kong configuration..." log_info "Building custom Docker images..."
# Load environment variables # Sync latest SQL files into build contexts
if [ -f .env ]; then cp supabase/docker/00-init-schemas.sql docker/db/00-init-schemas.sql
export $(grep -v '^#' .env | grep -E '^(ANON_KEY|SERVICE_ROLE_KEY)=' | xargs) cp supabase/docker/migrate.sh docker/db/migrate.sh
fi cp deploy/init.sql docker/migrate/init.sql
cp deploy/post-deploy.sql docker/migrate/post-deploy.sql
if [ -z "$ANON_KEY" ] || [ -z "$SERVICE_ROLE_KEY" ]; then docker build -t ${REGISTRY}/monacousa-db:latest docker/db/
log_error "ANON_KEY and SERVICE_ROLE_KEY must be set in .env" docker build -t ${REGISTRY}/monacousa-kong:latest docker/kong/
exit 1 docker build -t ${REGISTRY}/monacousa-migrate:latest docker/migrate/
fi
cat > supabase/docker/kong.yml << KONG_EOF log_info "All images built successfully"
_format_version: "2.1"
_transform: true
consumers:
- username: ANON
keyauth_credentials:
- key: ${ANON_KEY}
- username: SERVICE_ROLE
keyauth_credentials:
- key: ${SERVICE_ROLE_KEY}
acls:
- consumer: ANON
group: anon
- consumer: SERVICE_ROLE
group: admin
services:
- name: auth-verify-redirect
url: http://portal:3000/auth/verify
routes:
- name: auth-verify-redirect
strip_path: false
paths:
- /auth/verify
preserve_host: false
plugins:
- name: cors
- name: auth-v1-open
url: http://auth:9999/verify
routes:
- name: auth-v1-open
strip_path: true
paths:
- /auth/v1/verify
plugins:
- name: cors
- name: auth-v1-open-callback
url: http://auth:9999/callback
routes:
- name: auth-v1-open-callback
strip_path: true
paths:
- /auth/v1/callback
plugins:
- name: cors
- name: auth-v1-open-authorize
url: http://auth:9999/authorize
routes:
- name: auth-v1-open-authorize
strip_path: true
paths:
- /auth/v1/authorize
plugins:
- name: cors
- name: auth-v1
url: http://auth:9999/
routes:
- name: auth-v1
strip_path: true
paths:
- /auth/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: rest-v1
url: http://rest:3000/
routes:
- name: rest-v1
strip_path: true
paths:
- /rest/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: realtime-v1-ws
url: http://realtime:4000/socket
routes:
- name: realtime-v1-ws
strip_path: true
paths:
- /realtime/v1/websocket
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: realtime-v1
url: http://realtime:4000/
routes:
- name: realtime-v1
strip_path: true
paths:
- /realtime/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: storage-v1-public
url: http://storage:5000/object/public
routes:
- name: storage-v1-public
strip_path: true
paths:
- /storage/v1/object/public
plugins:
- name: cors
- name: storage-v1
url: http://storage:5000/
routes:
- name: storage-v1
strip_path: true
paths:
- /storage/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: meta
url: http://meta:8080/
routes:
- name: meta
strip_path: true
paths:
- /pg/
plugins:
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
KONG_EOF
log_info "Kong configuration generated with production API keys"
} }
# Run post-deploy database migrations and fixes # Push custom Docker images to registry
migrate() { push_images() {
log_info "Running post-deploy database migrations..." log_info "Pushing images to ${REGISTRY}..."
# Determine the script path (check both deploy/ subdir and current dir) docker push ${REGISTRY}/monacousa-db:latest
local sql_file="" docker push ${REGISTRY}/monacousa-kong:latest
if [ -f "deploy/post-deploy.sql" ]; then docker push ${REGISTRY}/monacousa-migrate:latest
sql_file="deploy/post-deploy.sql"
elif [ -f "post-deploy.sql" ]; then
sql_file="post-deploy.sql"
else
log_error "post-deploy.sql not found in deploy/ or current directory"
return 1
fi
# Wait for the database to be ready log_info "All images pushed successfully"
log_info "Waiting for database to be ready..."
local retries=30
while [ $retries -gt 0 ]; do
if docker compose -f $COMPOSE_FILE -p $PROJECT_NAME exec -T db pg_isready -U postgres > /dev/null 2>&1; then
break
fi
retries=$((retries - 1))
sleep 2
done
if [ $retries -eq 0 ]; then
log_error "Database did not become ready in time"
return 1
fi
# Wait a bit more for storage-api to create its tables
log_info "Waiting for storage service to initialize..."
local storage_retries=15
while [ $storage_retries -gt 0 ]; do
local has_tables=$(docker compose -f $COMPOSE_FILE -p $PROJECT_NAME exec -T db \
psql -U postgres -tAc "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'storage' AND table_name = 'objects'" 2>/dev/null)
if [ "$has_tables" = "1" ]; then
break
fi
storage_retries=$((storage_retries - 1))
sleep 2
done
if [ $storage_retries -eq 0 ]; then
log_warn "storage.objects table not found - storage policies will be skipped"
log_warn "Run './deploy.sh migrate' again after storage service is healthy"
fi
# Run the post-deploy SQL
docker compose -f $COMPOSE_FILE -p $PROJECT_NAME exec -T db \
psql -U postgres -f - < "$sql_file"
if [ $? -eq 0 ]; then
log_info "Post-deploy migrations completed successfully"
else
log_error "Post-deploy migrations failed - check output above"
return 1
fi
} }
# Deploy/start services # Deploy/start services
@ -408,43 +176,33 @@ deploy() {
# Check for .env file # Check for .env file
if [ ! -f .env ]; then if [ ! -f .env ]; then
log_error ".env file not found! Copy .env.production.example to .env first." log_error ".env file not found! Create .env with required variables first."
exit 1 exit 1
fi fi
# Generate Kong config with production API keys # Pull latest images and start all services
generate_kong_config # Kong config is generated at startup from env vars
# Migrations run automatically via the migrate container
# Pull latest portal image and start all services docker compose -f $COMPOSE_FILE -p $PROJECT_NAME pull
docker compose -f $COMPOSE_FILE -p $PROJECT_NAME pull portal
docker compose -f $COMPOSE_FILE -p $PROJECT_NAME up -d docker compose -f $COMPOSE_FILE -p $PROJECT_NAME up -d
log_info "Waiting for services to be healthy..." log_info "Waiting for services to start..."
sleep 15 sleep 10
# Run post-deploy migrations
migrate
# Show status # Show status
docker compose -f $COMPOSE_FILE -p $PROJECT_NAME ps docker compose -f $COMPOSE_FILE -p $PROJECT_NAME ps
log_info "Migrations run automatically via the migrate container."
log_info "Check migrate logs: docker compose -f $COMPOSE_FILE -p $PROJECT_NAME logs migrate"
log_info "Portal should be available at https://\$(grep DOMAIN .env | cut -d '=' -f2)" log_info "Portal should be available at https://\$(grep DOMAIN .env | cut -d '=' -f2)"
} }
# Update and rebuild # Update all services (pull latest images and restart)
update() { update() {
log_info "Updating Monaco USA Portal..." log_info "Updating Monaco USA Portal..."
# Pull latest code (if git repo) docker compose -f $COMPOSE_FILE -p $PROJECT_NAME pull
if [ -d .git ]; then docker compose -f $COMPOSE_FILE -p $PROJECT_NAME up -d
git pull origin main
fi
# Rebuild only the portal service
docker compose -f $COMPOSE_FILE -p $PROJECT_NAME build --no-cache portal
# Restart portal with zero downtime
docker compose -f $COMPOSE_FILE -p $PROJECT_NAME up -d --no-deps portal
log_info "Update complete!" log_info "Update complete!"
} }
@ -538,12 +296,11 @@ help() {
echo "" echo ""
echo "Usage: ./deploy.sh [command]" echo "Usage: ./deploy.sh [command]"
echo "" echo ""
echo "Commands:" echo "Server Commands (only need .env + docker-compose.yml):"
echo " setup First-time server setup (install Docker, firewall)" echo " setup First-time server setup (install Docker, firewall)"
echo " generate-secrets Generate random secrets for .env" echo " generate-secrets Generate random secrets for .env"
echo " deploy Build and start all services (includes migrate)" echo " deploy Pull images and start all services"
echo " update Pull latest code and rebuild portal" echo " update Pull latest images and restart"
echo " migrate Run post-deploy database migrations and fixes"
echo " stop Stop all services" echo " stop Stop all services"
echo " restart Restart all services" echo " restart Restart all services"
echo " status Show service status and resource usage" echo " status Show service status and resource usage"
@ -551,13 +308,17 @@ help() {
echo " backup Backup database to file" echo " backup Backup database to file"
echo " restore <file> Restore database from backup" echo " restore <file> Restore database from backup"
echo " cleanup Remove unused Docker resources" echo " cleanup Remove unused Docker resources"
echo " help Show this help message" echo ""
echo "Dev Commands (run from the repo with docker/ directory):"
echo " build-images Build custom Docker images (db, kong, migrate)"
echo " push-images Push custom images to registry"
echo "" echo ""
echo "Examples:" echo "Examples:"
echo " sudo ./deploy.sh setup # First-time setup" echo " sudo ./deploy.sh setup # First-time setup"
echo " ./deploy.sh deploy # Deploy the portal" echo " ./deploy.sh deploy # Deploy the portal"
echo " ./deploy.sh logs portal # View portal logs" echo " ./deploy.sh logs portal # View portal logs"
echo " ./deploy.sh backup # Backup database" echo " ./deploy.sh build-images # Build custom images (dev)"
echo " ./deploy.sh push-images # Push images to registry (dev)"
} }
# Main command handler # Main command handler
@ -568,12 +329,15 @@ case "${1:-help}" in
generate-secrets) generate-secrets)
generate_secrets generate_secrets
;; ;;
build-images)
build_images
;;
push-images)
push_images
;;
deploy) deploy)
deploy deploy
;; ;;
migrate)
migrate
;;
update) update)
update update
;; ;;

View File

@ -1,10 +1,12 @@
# Monaco USA Portal - Production Docker Compose (with Nginx on host) # Monaco USA Portal - Production Docker Compose (with Nginx on host)
# For deployment on Debian/Linux servers using Nginx as reverse proxy # For deployment on Debian/Linux servers using Nginx as reverse proxy
# #
# PRODUCTION DEPLOYMENT: Only needs this file + .env
# All config is embedded in custom Docker images.
#
# Usage: # Usage:
# 1. Copy .env.production.example to .env # 1. Create .env with required variables
# 2. Configure all environment variables # 2. Run: docker compose -f docker-compose.nginx.yml up -d
# 3. Run: docker compose -f docker-compose.nginx.yml up -d
# #
# Ports exposed to localhost (nginx proxies to these): # Ports exposed to localhost (nginx proxies to these):
# - 7453: Portal (SvelteKit) # - 7453: Portal (SvelteKit)
@ -16,7 +18,7 @@ services:
# PostgreSQL Database # PostgreSQL Database
# ============================================ # ============================================
db: db:
image: supabase/postgres:15.8.1.060 image: code.letsbe.solutions/letsbe/monacousa-db:latest
container_name: monacousa-db container_name: monacousa-db
restart: unless-stopped restart: unless-stopped
environment: environment:
@ -27,9 +29,6 @@ services:
JWT_EXP: ${JWT_EXPIRY} JWT_EXP: ${JWT_EXPIRY}
volumes: volumes:
- db-data:/var/lib/postgresql/data - db-data:/var/lib/postgresql/data
# Migrations are mounted separately - run them manually after DB is initialized
# DO NOT mount to /docker-entrypoint-initdb.d as it overwrites Supabase's init scripts
- ./supabase/migrations:/migrations:ro
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"] test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s interval: 10s

View File

@ -1,12 +1,13 @@
# Monaco USA Portal - Full Stack Docker Compose # Monaco USA Portal - Docker Compose
# Includes: PostgreSQL, Supabase Services, and SvelteKit App # PRODUCTION: Only needs this file + .env — all config embedded in custom images.
# Includes: PostgreSQL, Supabase Services, SvelteKit App, and Migration Runner
services: services:
# ============================================ # ============================================
# PostgreSQL Database # PostgreSQL Database
# ============================================ # ============================================
db: db:
image: supabase/postgres:15.8.1.060 image: code.letsbe.solutions/letsbe/monacousa-db:latest
container_name: monacousa-db container_name: monacousa-db
restart: unless-stopped restart: unless-stopped
ports: ports:
@ -19,12 +20,6 @@ services:
JWT_EXP: ${JWT_EXPIRY:-3600} JWT_EXP: ${JWT_EXPIRY:-3600}
volumes: volumes:
- db-data:/var/lib/postgresql/data - db-data:/var/lib/postgresql/data
# Init script to create schemas (runs first due to 00- prefix)
- ./supabase/docker/00-init-schemas.sql:/docker-entrypoint-initdb.d/00-init-schemas.sql:ro
# Override built-in migrate.sh to prevent supabase_admin auth failures
- ./supabase/docker/migrate.sh:/docker-entrypoint-initdb.d/migrate.sh:ro
# Migrations mounted separately for manual execution
- ./supabase/migrations:/migrations:ro
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"] test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s interval: 10s
@ -67,7 +62,7 @@ services:
# Kong API Gateway # Kong API Gateway
# ============================================ # ============================================
kong: kong:
image: kong:2.8.1 image: code.letsbe.solutions/letsbe/monacousa-kong:latest
container_name: monacousa-kong container_name: monacousa-kong
restart: unless-stopped restart: unless-stopped
ports: ports:
@ -80,8 +75,8 @@ services:
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
volumes: ANON_KEY: ${ANON_KEY}
- ./supabase/docker/kong.yml:/var/lib/kong/kong.yml:ro SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
depends_on: depends_on:
auth: auth:
condition: service_healthy condition: service_healthy
@ -323,21 +318,18 @@ services:
# Idempotent - safe to run on every `docker compose up`. # Idempotent - safe to run on every `docker compose up`.
# ============================================ # ============================================
migrate: migrate:
image: supabase/postgres:15.8.1.060 image: code.letsbe.solutions/letsbe/monacousa-migrate:latest
container_name: monacousa-migrate container_name: monacousa-migrate
depends_on: depends_on:
db: db:
condition: service_healthy condition: service_healthy
storage: storage:
condition: service_started condition: service_started
volumes:
- ./deploy/post-deploy.sql:/post-deploy.sql:ro
environment: environment:
PGHOST: db PGHOST: db
PGUSER: ${POSTGRES_USER:-postgres} PGUSER: ${POSTGRES_USER:-postgres}
PGPASSWORD: ${POSTGRES_PASSWORD:-postgres} PGPASSWORD: ${POSTGRES_PASSWORD:-postgres}
PGDATABASE: ${POSTGRES_DB:-postgres} PGDATABASE: ${POSTGRES_DB:-postgres}
entrypoint: ["sh", "-c", "echo 'Waiting for storage tables...' && sleep 10 && psql -f /post-deploy.sql && echo 'Migrations complete.'"]
networks: networks:
- monacousa-network - monacousa-network
restart: "no" restart: "no"

View File

@ -0,0 +1,93 @@
-- Initialize required schemas and roles for Supabase services
-- This runs FIRST (00- prefix) before other init scripts
-- Create roles if they don't exist
DO $$
BEGIN
-- Create anon role
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'anon') THEN
CREATE ROLE anon NOLOGIN NOINHERIT;
END IF;
-- Create authenticated role
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'authenticated') THEN
CREATE ROLE authenticated NOLOGIN NOINHERIT;
END IF;
-- Create service_role
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'service_role') THEN
CREATE ROLE service_role NOLOGIN NOINHERIT BYPASSRLS;
END IF;
-- Create supabase_admin role
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_admin') THEN
CREATE ROLE supabase_admin LOGIN SUPERUSER CREATEDB CREATEROLE REPLICATION BYPASSRLS;
END IF;
-- Create authenticator role
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'authenticator') THEN
CREATE ROLE authenticator NOINHERIT LOGIN;
END IF;
-- Create supabase_auth_admin role
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_auth_admin') THEN
CREATE ROLE supabase_auth_admin NOLOGIN NOINHERIT;
END IF;
-- Create supabase_storage_admin role
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'supabase_storage_admin') THEN
CREATE ROLE supabase_storage_admin NOLOGIN NOINHERIT;
END IF;
END
$$;
-- Grant roles
GRANT anon TO authenticator;
GRANT authenticated TO authenticator;
GRANT service_role TO authenticator;
GRANT supabase_admin TO postgres;
-- Set passwords (use the same as postgres password from env)
-- Note: These are set via ALTER ROLE since we can't use variables in CREATE ROLE
ALTER ROLE supabase_admin WITH PASSWORD 'postgres';
ALTER ROLE authenticator WITH PASSWORD 'postgres';
-- Create schemas
CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION supabase_auth_admin;
CREATE SCHEMA IF NOT EXISTS storage AUTHORIZATION supabase_storage_admin;
CREATE SCHEMA IF NOT EXISTS extensions;
CREATE SCHEMA IF NOT EXISTS _realtime;
CREATE SCHEMA IF NOT EXISTS graphql;
CREATE SCHEMA IF NOT EXISTS graphql_public;
-- Grant schema usage
GRANT USAGE ON SCHEMA public TO anon, authenticated, service_role;
GRANT USAGE ON SCHEMA auth TO anon, authenticated, service_role, supabase_auth_admin;
GRANT USAGE ON SCHEMA storage TO anon, authenticated, service_role, supabase_storage_admin;
GRANT USAGE ON SCHEMA extensions TO anon, authenticated, service_role;
GRANT USAGE ON SCHEMA graphql_public TO anon, authenticated, service_role;
-- Grant auth schema to supabase_auth_admin
GRANT ALL ON SCHEMA auth TO supabase_auth_admin;
GRANT ALL ON ALL TABLES IN SCHEMA auth TO supabase_auth_admin;
GRANT ALL ON ALL SEQUENCES IN SCHEMA auth TO supabase_auth_admin;
GRANT ALL ON ALL ROUTINES IN SCHEMA auth TO supabase_auth_admin;
-- Grant storage schema to supabase_storage_admin
GRANT ALL ON SCHEMA storage TO supabase_storage_admin;
GRANT ALL ON ALL TABLES IN SCHEMA storage TO supabase_storage_admin;
GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO supabase_storage_admin;
GRANT ALL ON ALL ROUTINES IN SCHEMA storage TO supabase_storage_admin;
-- Set default privileges
ALTER DEFAULT PRIVILEGES IN SCHEMA auth GRANT ALL ON TABLES TO supabase_auth_admin;
ALTER DEFAULT PRIVILEGES IN SCHEMA auth GRANT ALL ON SEQUENCES TO supabase_auth_admin;
ALTER DEFAULT PRIVILEGES IN SCHEMA storage GRANT ALL ON TABLES TO supabase_storage_admin;
ALTER DEFAULT PRIVILEGES IN SCHEMA storage GRANT ALL ON SEQUENCES TO supabase_storage_admin;
-- Set search path
ALTER DATABASE postgres SET search_path TO public, extensions;
-- Create extensions in extensions schema
CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA extensions;
CREATE EXTENSION IF NOT EXISTS "pgcrypto" WITH SCHEMA extensions;

5
docker/db/Dockerfile Normal file
View File

@ -0,0 +1,5 @@
FROM supabase/postgres:15.8.1.060
# Embed init scripts into the image so no volume mounts are needed
COPY 00-init-schemas.sql /docker-entrypoint-initdb.d/00-init-schemas.sql
COPY migrate.sh /docker-entrypoint-initdb.d/migrate.sh

5
docker/db/migrate.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/bash
# Override the default Supabase migrate.sh
# Our migrations are handled via init.sql which runs as postgres user
echo "Skipping built-in migrate.sh - migrations handled by init.sql"
exit 0

9
docker/kong/Dockerfile Normal file
View File

@ -0,0 +1,9 @@
FROM kong:2.8.1
# Embed the config template and wrapper script
COPY kong.yml.template /var/lib/kong/kong.yml.template
COPY docker-entrypoint-wrapper.sh /docker-entrypoint-wrapper.sh
RUN chmod +x /docker-entrypoint-wrapper.sh
ENTRYPOINT ["/docker-entrypoint-wrapper.sh"]
CMD ["kong", "docker-start"]

View File

@ -0,0 +1,18 @@
#!/bin/sh
set -e
# Generate kong.yml from template by substituting env vars
if [ -z "$ANON_KEY" ] || [ -z "$SERVICE_ROLE_KEY" ]; then
echo "ERROR: ANON_KEY and SERVICE_ROLE_KEY must be set"
exit 1
fi
sed \
-e "s|__ANON_KEY__|${ANON_KEY}|g" \
-e "s|__SERVICE_ROLE_KEY__|${SERVICE_ROLE_KEY}|g" \
/var/lib/kong/kong.yml.template > /var/lib/kong/kong.yml
echo "Kong config generated from template with production API keys"
# Hand off to the original Kong entrypoint
exec /docker-entrypoint.sh "$@"

View File

@ -0,0 +1,180 @@
_format_version: "2.1"
_transform: true
consumers:
- username: ANON
keyauth_credentials:
- key: __ANON_KEY__
- username: SERVICE_ROLE
keyauth_credentials:
- key: __SERVICE_ROLE_KEY__
acls:
- consumer: ANON
group: anon
- consumer: SERVICE_ROLE
group: admin
services:
- name: auth-verify-redirect
url: http://portal:3000/auth/verify
routes:
- name: auth-verify-redirect
strip_path: false
paths:
- /auth/verify
preserve_host: false
plugins:
- name: cors
- name: auth-v1-open
url: http://auth:9999/verify
routes:
- name: auth-v1-open
strip_path: true
paths:
- /auth/v1/verify
plugins:
- name: cors
- name: auth-v1-open-callback
url: http://auth:9999/callback
routes:
- name: auth-v1-open-callback
strip_path: true
paths:
- /auth/v1/callback
plugins:
- name: cors
- name: auth-v1-open-authorize
url: http://auth:9999/authorize
routes:
- name: auth-v1-open-authorize
strip_path: true
paths:
- /auth/v1/authorize
plugins:
- name: cors
- name: auth-v1
url: http://auth:9999/
routes:
- name: auth-v1
strip_path: true
paths:
- /auth/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: rest-v1
url: http://rest:3000/
routes:
- name: rest-v1
strip_path: true
paths:
- /rest/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: realtime-v1-ws
url: http://realtime:4000/socket
routes:
- name: realtime-v1-ws
strip_path: true
paths:
- /realtime/v1/websocket
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: realtime-v1
url: http://realtime:4000/
routes:
- name: realtime-v1
strip_path: true
paths:
- /realtime/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: storage-v1-public
url: http://storage:5000/object/public
routes:
- name: storage-v1-public
strip_path: true
paths:
- /storage/v1/object/public
plugins:
- name: cors
- name: storage-v1
url: http://storage:5000/
routes:
- name: storage-v1
strip_path: true
paths:
- /storage/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: meta
url: http://meta:8080/
routes:
- name: meta
strip_path: true
paths:
- /pg/
plugins:
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin

View File

@ -0,0 +1,9 @@
FROM supabase/postgres:15.8.1.060
# Embed SQL scripts and entrypoint
COPY init.sql /sql/init.sql
COPY post-deploy.sql /sql/post-deploy.sql
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

View File

@ -0,0 +1,58 @@
#!/bin/bash
set -e
echo "=== Monaco USA Portal - Migration Runner ==="
# Wait for database to be ready
echo "Waiting for database..."
retries=30
while [ $retries -gt 0 ]; do
if pg_isready -h "$PGHOST" -U "$PGUSER" > /dev/null 2>&1; then
echo "Database is ready."
break
fi
retries=$((retries - 1))
sleep 2
done
if [ $retries -eq 0 ]; then
echo "ERROR: Database did not become ready in time"
exit 1
fi
# Wait for storage-api to create its tables (it starts after db)
echo "Waiting for storage-api to create tables..."
storage_retries=30
while [ $storage_retries -gt 0 ]; do
has_table=$(psql -h "$PGHOST" -U "$PGUSER" -d "$PGDATABASE" -tAc \
"SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'storage' AND table_name = 'objects'" 2>/dev/null || echo "0")
if [ "$has_table" = "1" ]; then
echo "Storage tables found."
break
fi
storage_retries=$((storage_retries - 1))
sleep 2
done
if [ $storage_retries -eq 0 ]; then
echo "WARNING: storage.objects table not found after 60s - storage policies will be skipped in post-deploy"
fi
# Detect fresh vs existing database
has_members=$(psql -h "$PGHOST" -U "$PGUSER" -d "$PGDATABASE" -tAc \
"SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = 'public' AND table_name = 'members'" 2>/dev/null || echo "0")
if [ "$has_members" = "0" ]; then
echo "=== Fresh database detected - running init.sql ==="
psql -h "$PGHOST" -U "$PGUSER" -d "$PGDATABASE" -f /sql/init.sql
echo "=== init.sql completed ==="
else
echo "=== Existing database detected - skipping init.sql ==="
fi
# Always run post-deploy (idempotent)
echo "=== Running post-deploy.sql ==="
psql -h "$PGHOST" -U "$PGUSER" -d "$PGDATABASE" -f /sql/post-deploy.sql
echo "=== post-deploy.sql completed ==="
echo "=== Migration runner finished successfully ==="

1532
docker/migrate/init.sql Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,226 @@
-- Monaco USA Portal - Post-Deployment Database Fixes
-- This script is IDEMPOTENT - safe to run multiple times.
-- Run after `docker compose up` once all containers are healthy.
--
-- Handles:
-- 1. Storage RLS policies (storage-api creates tables AFTER db init)
-- 2. Service role access grants
-- 3. Incremental migrations for existing databases
-- 4. Notifications table (if missing)
-- ============================================================================
-- ============================================
-- 1. STORAGE POLICIES
-- storage-api creates storage.objects and storage.buckets with RLS enabled
-- but no policies. We need to add service_role policies so the portal
-- can upload/delete files via supabaseAdmin.
-- ============================================
DO $$
BEGIN
-- Ensure service_role has BYPASSRLS
IF EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'service_role' AND NOT rolbypassrls) THEN
ALTER ROLE service_role BYPASSRLS;
END IF;
EXCEPTION
WHEN insufficient_privilege THEN
RAISE NOTICE 'Could not grant BYPASSRLS to service_role - will rely on explicit policies';
END $$;
-- storage.objects policies
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = 'storage' AND table_name = 'objects') THEN
-- Drop and recreate to ensure clean state
DROP POLICY IF EXISTS "service_role_all_select" ON storage.objects;
DROP POLICY IF EXISTS "service_role_all_insert" ON storage.objects;
DROP POLICY IF EXISTS "service_role_all_update" ON storage.objects;
DROP POLICY IF EXISTS "service_role_all_delete" ON storage.objects;
CREATE POLICY "service_role_all_select" ON storage.objects
FOR SELECT TO service_role USING (true);
CREATE POLICY "service_role_all_insert" ON storage.objects
FOR INSERT TO service_role WITH CHECK (true);
CREATE POLICY "service_role_all_update" ON storage.objects
FOR UPDATE TO service_role USING (true);
CREATE POLICY "service_role_all_delete" ON storage.objects
FOR DELETE TO service_role USING (true);
-- Public read access for avatars and event images
DROP POLICY IF EXISTS "public_read_avatars" ON storage.objects;
CREATE POLICY "public_read_avatars" ON storage.objects
FOR SELECT USING (bucket_id IN ('avatars', 'event-images'));
-- Authenticated users can read documents
DROP POLICY IF EXISTS "authenticated_read_documents" ON storage.objects;
CREATE POLICY "authenticated_read_documents" ON storage.objects
FOR SELECT TO authenticated USING (bucket_id = 'documents');
GRANT ALL ON storage.objects TO service_role;
RAISE NOTICE 'storage.objects policies applied';
ELSE
RAISE NOTICE 'storage.objects table not found - storage-api may not have started yet';
END IF;
END $$;
-- storage.buckets policies
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = 'storage' AND table_name = 'buckets') THEN
DROP POLICY IF EXISTS "service_role_all_buckets_select" ON storage.buckets;
DROP POLICY IF EXISTS "service_role_all_buckets_insert" ON storage.buckets;
DROP POLICY IF EXISTS "service_role_all_buckets_update" ON storage.buckets;
DROP POLICY IF EXISTS "service_role_all_buckets_delete" ON storage.buckets;
CREATE POLICY "service_role_all_buckets_select" ON storage.buckets
FOR SELECT TO service_role USING (true);
CREATE POLICY "service_role_all_buckets_insert" ON storage.buckets
FOR INSERT TO service_role WITH CHECK (true);
CREATE POLICY "service_role_all_buckets_update" ON storage.buckets
FOR UPDATE TO service_role USING (true);
CREATE POLICY "service_role_all_buckets_delete" ON storage.buckets
FOR DELETE TO service_role USING (true);
-- Allow authenticated users to read bucket info (needed for uploads)
DROP POLICY IF EXISTS "authenticated_read_buckets" ON storage.buckets;
CREATE POLICY "authenticated_read_buckets" ON storage.buckets
FOR SELECT TO authenticated USING (true);
GRANT ALL ON storage.buckets TO service_role;
RAISE NOTICE 'storage.buckets policies applied';
ELSE
RAISE NOTICE 'storage.buckets table not found - storage-api may not have started yet';
END IF;
END $$;
-- Ensure schema and general grants
GRANT USAGE ON SCHEMA storage TO service_role;
GRANT USAGE ON SCHEMA storage TO authenticated;
-- ============================================
-- 2. STORAGE BUCKETS
-- Ensure required buckets exist
-- ============================================
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = 'storage' AND table_name = 'buckets') THEN
INSERT INTO storage.buckets (id, name, public, file_size_limit, allowed_mime_types)
VALUES (
'avatars', 'avatars', true, 5242880,
ARRAY['image/jpeg', 'image/png', 'image/webp', 'image/gif']
) ON CONFLICT (id) DO UPDATE SET public = true, file_size_limit = EXCLUDED.file_size_limit, allowed_mime_types = EXCLUDED.allowed_mime_types;
INSERT INTO storage.buckets (id, name, public, file_size_limit, allowed_mime_types)
VALUES (
'documents', 'documents', true, 52428800,
ARRAY['application/pdf', 'application/msword', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'application/vnd.ms-excel', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', 'application/vnd.ms-powerpoint', 'application/vnd.openxmlformats-officedocument.presentationml.presentation', 'text/plain', 'text/csv', 'application/json', 'image/jpeg', 'image/png', 'image/webp', 'image/gif']
) ON CONFLICT (id) DO UPDATE SET public = true, file_size_limit = EXCLUDED.file_size_limit, allowed_mime_types = EXCLUDED.allowed_mime_types;
INSERT INTO storage.buckets (id, name, public, file_size_limit, allowed_mime_types)
VALUES (
'event-images', 'event-images', true, 10485760,
ARRAY['image/jpeg', 'image/png', 'image/webp']
) ON CONFLICT (id) DO UPDATE SET public = true, file_size_limit = EXCLUDED.file_size_limit, allowed_mime_types = EXCLUDED.allowed_mime_types;
RAISE NOTICE 'Storage buckets ensured';
END IF;
END $$;
-- ============================================
-- 3. NOTIFICATIONS TABLE (added post-migration-016)
-- ============================================
CREATE TABLE IF NOT EXISTS public.notifications (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
member_id UUID NOT NULL REFERENCES public.members(id) ON DELETE CASCADE,
type TEXT NOT NULL CHECK (type IN ('welcome', 'event', 'payment', 'membership', 'system', 'announcement')),
title TEXT NOT NULL,
message TEXT NOT NULL,
link TEXT,
read_at TIMESTAMPTZ,
created_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_notifications_member ON public.notifications(member_id);
CREATE INDEX IF NOT EXISTS idx_notifications_unread ON public.notifications(member_id) WHERE read_at IS NULL;
CREATE INDEX IF NOT EXISTS idx_notifications_created ON public.notifications(created_at DESC);
ALTER TABLE public.notifications ENABLE ROW LEVEL SECURITY;
-- Idempotent policy creation
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_policy WHERE polrelid = 'public.notifications'::regclass AND polname = 'Members can view own notifications') THEN
CREATE POLICY "Members can view own notifications"
ON public.notifications FOR SELECT TO authenticated
USING (member_id = auth.uid());
END IF;
IF NOT EXISTS (SELECT 1 FROM pg_policy WHERE polrelid = 'public.notifications'::regclass AND polname = 'Members can update own notifications') THEN
CREATE POLICY "Members can update own notifications"
ON public.notifications FOR UPDATE TO authenticated
USING (member_id = auth.uid());
END IF;
IF NOT EXISTS (SELECT 1 FROM pg_policy WHERE polrelid = 'public.notifications'::regclass AND polname = 'Admin can manage all notifications') THEN
CREATE POLICY "Admin can manage all notifications"
ON public.notifications FOR ALL TO authenticated
USING (EXISTS (SELECT 1 FROM public.members WHERE id = auth.uid() AND role = 'admin'))
WITH CHECK (EXISTS (SELECT 1 FROM public.members WHERE id = auth.uid() AND role = 'admin'));
END IF;
END $$;
GRANT SELECT, UPDATE ON public.notifications TO authenticated;
GRANT ALL ON public.notifications TO service_role;
-- Welcome notification trigger
CREATE OR REPLACE FUNCTION create_welcome_notification()
RETURNS TRIGGER AS $$
BEGIN
INSERT INTO public.notifications (member_id, type, title, message, link)
VALUES (
NEW.id, 'welcome',
'Welcome to Monaco USA!',
'Thank you for joining our community. Complete your profile and explore upcoming events.',
'/profile'
);
RETURN NEW;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
DROP TRIGGER IF EXISTS on_member_created_notification ON public.members;
CREATE TRIGGER on_member_created_notification
AFTER INSERT ON public.members
FOR EACH ROW
EXECUTE FUNCTION create_welcome_notification();
-- ============================================
-- 4. MIGRATION 017: Fix RLS role escalation
-- ============================================
DROP POLICY IF EXISTS "Users can update own profile" ON public.members;
CREATE POLICY "Users can update own profile"
ON public.members FOR UPDATE
TO authenticated
USING (auth.uid() = id)
WITH CHECK (
auth.uid() = id
AND role = (SELECT m.role FROM public.members m WHERE m.id = auth.uid())
);
-- ============================================
-- 5. ENSURE SERVICE_ROLE ACCESS TO ALL TABLES
-- ============================================
GRANT ALL ON ALL TABLES IN SCHEMA public TO service_role;
GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO service_role;
-- ============================================
-- DONE
-- ============================================
DO $$ BEGIN RAISE NOTICE '=== Post-deploy script completed successfully ==='; END $$;