add supabase cluster

This commit is contained in:
jigoong
2026-03-02 22:11:43 +07:00
parent 6f6009d63e
commit ac304e465d
11 changed files with 3314 additions and 0 deletions

View File

@@ -0,0 +1,94 @@
############
# Supabase Configuration
############
# PostgreSQL Database
POSTGRES_HOST=supabase-db
POSTGRES_PORT=5432
POSTGRES_DB=postgres
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
# API Gateway (Kong)
KONG_HTTP_PORT=8100
KONG_HTTPS_PORT=8443
# Studio
STUDIO_PORT=3010
STUDIO_DEFAULT_ORGANIZATION=Sriphat Data Platform
STUDIO_DEFAULT_PROJECT=Default Project
# JWT Secret (generate with: openssl rand -base64 32)
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
JWT_EXPIRY=3600
# Anonymous Key (generate with supabase CLI or use default for development)
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0
# Service Role Key (generate with supabase CLI or use default for development)
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU
# Dashboard Credentials
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
# API URLs
SUPABASE_PUBLIC_URL=http://localhost:8100
API_EXTERNAL_URL=http://localhost:8100
SITE_URL=http://localhost:3010
# Email Configuration (Optional)
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=false
SMTP_ADMIN_EMAIL=admin@sriphat.local
SMTP_HOST=mail.sriphat.local
SMTP_PORT=587
SMTP_USER=
SMTP_PASS=
SMTP_SENDER_NAME=Sriphat Supabase
# Phone Configuration (Optional)
ENABLE_PHONE_SIGNUP=false
ENABLE_PHONE_AUTOCONFIRM=false
# Anonymous Users
ENABLE_ANONYMOUS_USERS=false
DISABLE_SIGNUP=false
# Additional Settings
ADDITIONAL_REDIRECT_URLS=
PGRST_DB_SCHEMAS=public,storage,graphql_public
SECRET_KEY_BASE=your-secret-key-base-change-this-to-a-random-string
VAULT_ENC_KEY=your-vault-encryption-key-change-this
PG_META_CRYPTO_KEY=your-pg-meta-crypto-key-change-this
# Storage
STORAGE_TENANT_ID=stub
REGION=us-east-1
GLOBAL_S3_BUCKET=stub
S3_PROTOCOL_ACCESS_KEY_ID=
S3_PROTOCOL_ACCESS_KEY_SECRET=
# Image Proxy
IMGPROXY_ENABLE_WEBP_DETECTION=true
# Analytics (Logflare)
LOGFLARE_PUBLIC_ACCESS_TOKEN=your-super-secret-logflare-token
LOGFLARE_PRIVATE_ACCESS_TOKEN=your-super-secret-logflare-token
# Edge Functions
FUNCTIONS_VERIFY_JWT=false
# Pooler
POOLER_PROXY_PORT_TRANSACTION=6544
# Docker Socket (for vector logs)
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
# Mailer URL Paths
MAILER_URLPATHS_INVITE=/auth/v1/verify
MAILER_URLPATHS_CONFIRMATION=/auth/v1/verify
MAILER_URLPATHS_RECOVERY=/auth/v1/verify
MAILER_URLPATHS_EMAIL_CHANGE=/auth/v1/verify
# OpenAI (Optional - for AI features in Studio)
OPENAI_API_KEY=

View File

@@ -0,0 +1,144 @@
############
# Secrets
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
############
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
DASHBOARD_USERNAME=supabase
DASHBOARD_PASSWORD=this_password_is_insecure_and_should_be_updated
SECRET_KEY_BASE=UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
VAULT_ENC_KEY=your-32-character-encryption-key
PG_META_CRYPTO_KEY=your-encryption-key-32-chars-min
############
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
############
POSTGRES_HOST=db
POSTGRES_DB=postgres
POSTGRES_PORT=5432
# default user is postgres
############
# Supavisor -- Database pooler
############
# Port Supavisor listens on for transaction pooling connections
POOLER_PROXY_PORT_TRANSACTION=6543
# Maximum number of PostgreSQL connections Supavisor opens per pool
POOLER_DEFAULT_POOL_SIZE=20
# Maximum number of client connections Supavisor accepts per pool
POOLER_MAX_CLIENT_CONN=100
# Unique tenant identifier
POOLER_TENANT_ID=your-tenant-id
# Pool size for internal metadata storage used by Supavisor
# This is separate from client connections and used only by Supavisor itself
POOLER_DB_POOL_SIZE=5
############
# API Proxy - Configuration for the Kong Reverse proxy.
############
KONG_HTTP_PORT=8000
KONG_HTTPS_PORT=8443
############
# API - Configuration for PostgREST.
############
PGRST_DB_SCHEMAS=public,storage,graphql_public
############
# Auth - Configuration for the GoTrue authentication server.
############
## General
SITE_URL=http://localhost:3000
ADDITIONAL_REDIRECT_URLS=
JWT_EXPIRY=3600
DISABLE_SIGNUP=false
API_EXTERNAL_URL=http://localhost:8000
## Mailer Config
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
MAILER_URLPATHS_INVITE="/auth/v1/verify"
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
## Email auth
ENABLE_EMAIL_SIGNUP=true
ENABLE_EMAIL_AUTOCONFIRM=false
SMTP_ADMIN_EMAIL=admin@example.com
SMTP_HOST=supabase-mail
SMTP_PORT=2500
SMTP_USER=fake_mail_user
SMTP_PASS=fake_mail_password
SMTP_SENDER_NAME=fake_sender
ENABLE_ANONYMOUS_USERS=false
## Phone auth
ENABLE_PHONE_SIGNUP=true
ENABLE_PHONE_AUTOCONFIRM=true
############
# Studio - Configuration for the Dashboard
############
STUDIO_DEFAULT_ORGANIZATION=Default Organization
STUDIO_DEFAULT_PROJECT=Default Project
# replace if you intend to use Studio outside of localhost
SUPABASE_PUBLIC_URL=http://localhost:8000
# Enable webp support
IMGPROXY_ENABLE_WEBP_DETECTION=true
# Add your OpenAI API key to enable SQL Editor Assistant
OPENAI_API_KEY=
############
# Functions - Configuration for Functions
############
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
FUNCTIONS_VERIFY_JWT=false
############
# Logs - Configuration for Analytics
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
############
# Change vector.toml sinks to reflect this change
# these cannot be the same value
LOGFLARE_PUBLIC_ACCESS_TOKEN=your-super-secret-and-long-logflare-key-public
LOGFLARE_PRIVATE_ACCESS_TOKEN=your-super-secret-and-long-logflare-key-private
# Docker socket location - this value will differ depending on your OS
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
# Google Cloud Project details
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
############
# Storage - Configuration for the storage
############
STORAGE_TENANT_ID=stub
GLOBAL_S3_BUCKET=stub
REGION=stub
S3_PROTOCOL_ACCESS_KEY_ID=625729a08b95bf1b7ff351a663f3a23c
S3_PROTOCOL_ACCESS_KEY_SECRET=850181e4652dd023b7a98c58ae0d2d34bd487ee0cc3254aed6eda37307425907
# Used in docker-compose.s3.yml for minio
MINIO_ROOT_USER=supa-storage
MINIO_ROOT_PASSWORD=secret1234

View File

@@ -0,0 +1,464 @@
# Supabase Setup - Official Method (setup2.sh)
## 📖 Overview
`setup2.sh` เป็น setup script ที่ทำตามขั้นตอนจาก [Official Supabase Self-Hosting Guide](https://supabase.com/docs/guides/self-hosting/docker) อย่างเคร่งครัด
## 🔄 ความแตกต่างระหว่าง setup.sh และ setup2.sh
| Feature | setup.sh (Custom) | setup2.sh (Official) |
|---------|-------------------|----------------------|
| **แหล่งที่มา** | ดาวน์โหลด config files แยก | Clone official Supabase repo |
| **docker-compose.yml** | Custom version | Official version (latest) |
| **การ generate secrets** | Manual instructions | ใช้ official `generate-keys.sh` |
| **การอัปเดต** | Manual | ตาม official releases |
| **Port configuration** | Pre-configured | Auto-adjusted |
| **แนะนำสำหรับ** | Quick setup, custom config | Production, official support |
## ✅ Prerequisites
ต้องติดตั้งก่อนรัน script:
1. **Git** - สำหรับ clone repository
```bash
git --version
```
2. **Docker & Docker Compose**
```bash
docker --version
docker compose version
```
3. **OpenSSL** - สำหรับ generate secrets (มักติดตั้งมาแล้ว)
```bash
openssl version
```
## 🚀 วิธีใช้งาน
### 1. รัน Setup Script
```bash
cd 02-supabase
bash setup2.sh
```
Script จะทำสิ่งต่อไปนี้อัตโนมัติ:
1. ✅ ตรวจสอบ prerequisites (git, docker)
2. 📥 Clone Supabase repository (depth=1 เพื่อความเร็ว)
3. 📁 Copy official docker-compose.yml และ config files
4. 🔧 ปรับ ports ให้ไม่ชนกับ services อื่น:
- Kong HTTP: `8100` (แทน 8000)
- PostgreSQL: `5434` (แทน 5432)
- Pooler: `6544` (แทน 6543)
5. 🌐 เพิ่ม `shared_data_network` configuration
6. 🔐 รัน official `generate-keys.sh` เพื่อสร้าง JWT secrets และ API keys
7. 📦 Pull Docker images ทั้งหมด
8. 🧹 ลบ temporary files
### 2. Review และแก้ไข .env
หลังจากรัน script แล้ว ต้องแก้ไข `.env`:
```bash
nano .env
```
**สิ่งที่ต้องแก้ไข:**
```bash
# 1. Database Password (REQUIRED)
POSTGRES_PASSWORD=your-super-secret-postgres-password
# 2. Dashboard Credentials (REQUIRED)
DASHBOARD_USERNAME=admin
DASHBOARD_PASSWORD=your-secure-password-with-letters
# ⚠️ Password ต้องมีตัวอักษรอย่างน้อย 1 ตัว (ห้ามใช้แต่ตัวเลข)
# 3. Public URLs (ถ้าใช้ domain name)
SUPABASE_PUBLIC_URL=http://your-domain:8100
API_EXTERNAL_URL=http://your-domain:8100
SITE_URL=http://your-domain:3000
# หรือใช้ localhost สำหรับ development
SUPABASE_PUBLIC_URL=http://localhost:8100
API_EXTERNAL_URL=http://localhost:8100
SITE_URL=http://localhost:3000
```
**ไม่ต้องแก้ (auto-generated แล้ว):**
- `JWT_SECRET` ✅
- `ANON_KEY` ✅
- `SERVICE_ROLE_KEY` ✅
- `SECRET_KEY_BASE` ✅
- `VAULT_ENC_KEY` ✅
- `PG_META_CRYPTO_KEY` ✅
- `LOGFLARE_PUBLIC_ACCESS_TOKEN` ✅
- `LOGFLARE_PRIVATE_ACCESS_TOKEN` ✅
- `S3_PROTOCOL_ACCESS_KEY_ID` ✅
- `S3_PROTOCOL_ACCESS_KEY_SECRET` ✅
### 3. สร้าง Network (ถ้ายังไม่มี)
```bash
cd ../00-network
bash create-network.sh
```
### 4. Start Supabase
```bash
cd ../02-supabase
docker compose up -d
```
### 5. ตรวจสอบ Services
```bash
# ดู status ทั้งหมด
docker compose ps
# ดู logs
docker compose logs -f
# ดู logs ของ service เฉพาะ
docker compose logs -f studio
docker compose logs -f auth
docker compose logs -f db
```
รอประมาณ 1-2 นาที จนทุก service มี status `Up (healthy)`
## 🌐 Access Points
หลังจาก services ทั้งหมดรันแล้ว:
| Service | URL | Credentials |
|---------|-----|-------------|
| **Studio Dashboard** | http://localhost:8100 | DASHBOARD_USERNAME / DASHBOARD_PASSWORD |
| **REST API** | http://localhost:8100/rest/v1/ | ANON_KEY or SERVICE_ROLE_KEY |
| **Auth API** | http://localhost:8100/auth/v1/ | ANON_KEY or SERVICE_ROLE_KEY |
| **Storage API** | http://localhost:8100/storage/v1/ | ANON_KEY or SERVICE_ROLE_KEY |
| **Realtime** | http://localhost:8100/realtime/v1/ | ANON_KEY |
| **PostgreSQL** | localhost:5434 | postgres / POSTGRES_PASSWORD |
| **Pooler (Session)** | localhost:5434 | postgres.your-tenant-id / POSTGRES_PASSWORD |
| **Pooler (Transaction)** | localhost:6544 | postgres.your-tenant-id / POSTGRES_PASSWORD |
## 📝 ไฟล์และ Directories ที่สร้างขึ้น
หลังจากรัน `setup2.sh`:
```
02-supabase/
├── docker-compose.yml # Official Supabase compose file
├── .env # Environment variables (with generated secrets)
├── .env.example.new # Backup of official .env.example
├── volumes/ # Configuration and data
│ ├── api/
│ │ └── kong.yml # Kong API Gateway config
│ ├── db/ # Database init scripts
│ │ ├── realtime.sql
│ │ ├── webhooks.sql
│ │ ├── roles.sql
│ │ ├── jwt.sql
│ │ ├── _supabase.sql
│ │ ├── logs.sql
│ │ ├── pooler.sql
│ │ └── data/ # PostgreSQL data (created on first run)
│ ├── functions/ # Edge Functions
│ │ └── main/
│ ├── logs/
│ │ └── vector.yml # Log collection config
│ ├── pooler/
│ │ └── pooler.exs # Connection pooler config
│ ├── storage/ # File storage (created on first run)
│ └── snippets/ # SQL snippets
└── utils/
└── generate-keys.sh # Official key generation script
```
## 🔐 API Keys และ Secrets
### JWT Secret และ API Keys
Script จะ auto-generate ให้:
- **JWT_SECRET**: ใช้สำหรับ sign และ verify JWT tokens
- **ANON_KEY**: Public API key (ใช้ใน client-side)
- Role: `anon`
- Expires: 5 years
- **SERVICE_ROLE_KEY**: Private API key (ใช้ใน server-side)
- Role: `service_role`
- Bypass Row Level Security (RLS)
- **ห้ามเปิดเผยใน client code!**
### ดู API Keys
```bash
# ดู ANON_KEY
grep ANON_KEY .env
# ดู SERVICE_ROLE_KEY
grep SERVICE_ROLE_KEY .env
```
### Verify JWT Tokens
ไปที่ [jwt.io](https://jwt.io) แล้ว:
1. Paste `ANON_KEY` หรือ `SERVICE_ROLE_KEY`
2. ใส่ `JWT_SECRET` ใน "Verify Signature" section
3. ตรวจสอบ payload และ expiration
## 🔄 การอัปเดต Supabase
### วิธีที่ 1: รัน setup2.sh ใหม่
```bash
cd 02-supabase
# Backup .env ปัจจุบัน
cp .env .env.backup
# รัน setup2.sh ใหม่
bash setup2.sh
# Restore .env settings ที่คุณแก้ไขไว้
# (JWT secrets จะถูก generate ใหม่)
# Restart services
docker compose down
docker compose up -d
```
### วิธีที่ 2: Update แบบ Manual
```bash
# Pull latest images
docker compose pull
# Restart services
docker compose down
docker compose up -d
```
### วิธีที่ 3: Update Service เฉพาะ
```bash
# 1. ดู version ล่าสุดที่ Docker Hub
# https://hub.docker.com/u/supabase
# 2. แก้ไข docker-compose.yml
# เช่น: image: supabase/studio:2026.02.16-sha-26c615c
# 3. Pull และ restart
docker compose pull studio
docker compose up -d studio
```
## 🛠️ Maintenance
### Backup Database
```bash
# Backup
docker exec supabase-db pg_dump -U postgres postgres > backup_$(date +%Y%m%d).sql
# Restore
docker exec -i supabase-db psql -U postgres postgres < backup_20260218.sql
```
### Backup Storage
```bash
# Backup storage files
tar -czf storage_backup_$(date +%Y%m%d).tar.gz volumes/storage/
# Restore
tar -xzf storage_backup_20260218.tar.gz
```
### View Logs
```bash
# All services
docker compose logs -f
# Specific service
docker compose logs -f studio
docker compose logs -f auth
docker compose logs -f db
docker compose logs -f kong
```
### Restart Services
```bash
# Restart all
docker compose restart
# Restart specific service
docker compose restart studio
docker compose restart auth
```
## 🐛 Troubleshooting
### Services ไม่ start
```bash
# ดู logs
docker compose logs
# ตรวจสอบ network
docker network inspect shared_data_network
# ตรวจสอบ .env
cat .env | grep -v '^#' | grep -v '^$'
```
### Database connection error
```bash
# ตรวจสอบ database พร้อมหรือยัง
docker exec supabase-db pg_isready -U postgres
# ดู database logs
docker compose logs db
```
### Port conflicts
ถ้า port ชน แก้ไขใน `.env`:
```bash
KONG_HTTP_PORT=8101
POSTGRES_PORT=5435
POOLER_PROXY_PORT_TRANSACTION=6545
```
แล้วแก้ใน `docker-compose.yml` ตรง ports mapping ด้วย
### "container supabase-vector exited (0)"
ถ้าใช้ rootless Docker ให้แก้ไข `.env`:
```bash
DOCKER_SOCKET_LOCATION=/run/user/1000/docker.sock
```
## 🔒 Security Best Practices
1. **เปลี่ยน default passwords ทั้งหมด**
- `POSTGRES_PASSWORD`
- `DASHBOARD_PASSWORD`
2. **ใช้ HTTPS ใน production**
- Setup Nginx Proxy Manager
- ติดตั้ง SSL certificate
3. **Enable Row Level Security (RLS)**
- สำหรับทุก table ที่มี sensitive data
4. **เก็บ SERVICE_ROLE_KEY ปลอดภัย**
- ใช้เฉพาะ server-side
- ห้ามเปิดเผยใน client code
5. **Backup เป็นประจำ**
- Database
- Storage files
- .env file
6. **Monitor logs**
- ตรวจสอบ suspicious activities
- ใช้ Logflare analytics
7. **Update เป็นประจำ**
- ติดตาม [Supabase Changelog](https://github.com/supabase/supabase/blob/master/docker/CHANGELOG.md)
- Update images สำหรับ security patches
## 🗑️ Uninstall
**⚠️ คำเตือน: จะลบข้อมูลทั้งหมด!**
```bash
# Stop และลบ containers + volumes
docker compose down -v
# ลบ database data
rm -rf volumes/db/data
# ลบ storage data
rm -rf volumes/storage
# ลบทุกอย่าง (optional)
cd ..
rm -rf 02-supabase
```
## 📚 เอกสารเพิ่มเติม
- [Official Self-Hosting Guide](https://supabase.com/docs/guides/self-hosting/docker)
- [Supabase Docker Changelog](https://github.com/supabase/supabase/blob/master/docker/CHANGELOG.md)
- [Supabase GitHub](https://github.com/supabase/supabase)
- [Docker Hub - Supabase Images](https://hub.docker.com/u/supabase)
- [PostgREST Documentation](https://postgrest.org/)
- [GoTrue (Auth) Documentation](https://github.com/supabase/gotrue)
## 💡 Tips
1. **ใช้ generate-keys.sh ใหม่เมื่อต้องการ**
```bash
bash utils/generate-keys.sh
```
2. **Test API ด้วย curl**
```bash
# Get ANON_KEY from .env
ANON_KEY=$(grep ANON_KEY .env | cut -d '=' -f2)
# Test REST API
curl http://localhost:8100/rest/v1/ \
-H "apikey: $ANON_KEY" \
-H "Authorization: Bearer $ANON_KEY"
```
3. **Connect จาก client application**
```javascript
import { createClient } from '@supabase/supabase-js'
const supabase = createClient(
'http://localhost:8100',
'YOUR_ANON_KEY'
)
```
4. **ใช้ Supabase CLI สำหรับ development**
```bash
npm install -g supabase
supabase link --project-ref your-project
```
## 🆚 เมื่อไหร่ควรใช้ setup2.sh
ใช้ **setup2.sh** เมื่อ:
- ✅ ต้องการ official configuration ล่าสุด
- ✅ ต้องการ update ง่ายตาม official releases
- ✅ ต้องการใช้ official tools (generate-keys.sh)
- ✅ Production deployment
- ✅ ต้องการ official support
ใช้ **setup.sh** เมื่อ:
- ✅ ต้องการ quick setup
- ✅ ต้องการ custom configuration
- ✅ ต้องการ minimal setup
- ✅ Development/testing เท่านั้น
## 📞 Support
หากมีปัญหา:
1. ตรวจสอบ logs: `docker compose logs`
2. อ่าน [Troubleshooting Guide](https://supabase.com/docs/guides/self-hosting/docker#troubleshooting)
3. ดู [GitHub Discussions](https://github.com/supabase/supabase/discussions)
4. ตรวจสอบ [GitHub Issues](https://github.com/supabase/supabase/issues)

View File

@@ -0,0 +1,358 @@
# Supabase - Backend as a Service
Supabase เป็น open-source Firebase alternative ที่ให้บริการ:
- PostgreSQL Database พร้อม Realtime subscriptions
- Authentication & Authorization
- RESTful API (PostgREST)
- Storage สำหรับไฟล์
- Edge Functions
- Studio UI สำหรับจัดการ
## 🚀 Quick Start
### 1. Setup Configuration Files
```bash
cd 02-supabase
bash setup.sh
```
Script นี้จะ:
- สร้าง directories ที่จำเป็น
- ดาวน์โหลด config files จาก Supabase repository
- สร้าง `.env` file จาก template
### 2. Configure Environment Variables
แก้ไขไฟล์ `.env`:
```bash
nano .env
```
**สิ่งที่ต้องแก้ไข:**
```bash
# Generate secure passwords and secrets
POSTGRES_PASSWORD=<your-secure-password>
JWT_SECRET=$(openssl rand -base64 32)
SECRET_KEY_BASE=$(openssl rand -base64 32)
VAULT_ENC_KEY=$(openssl rand -base64 32)
PG_META_CRYPTO_KEY=$(openssl rand -base64 32)
LOGFLARE_PUBLIC_ACCESS_TOKEN=$(openssl rand -base64 32)
LOGFLARE_PRIVATE_ACCESS_TOKEN=$(openssl rand -base64 32)
# Dashboard credentials
DASHBOARD_USERNAME=admin
DASHBOARD_PASSWORD=<your-secure-password>
# Update URLs if needed
SUPABASE_PUBLIC_URL=http://localhost:8100
SITE_URL=http://localhost:3010
```
### 3. Start Supabase
```bash
# Ensure network exists
cd ../00-network
bash create-network.sh
# Start Supabase
cd ../02-supabase
docker compose up -d
```
### 4. Verify Services
```bash
docker compose ps
```
คุณควรเห็น containers:
- `supabase-studio` - Web UI
- `supabase-kong` - API Gateway
- `supabase-auth` - Authentication service
- `supabase-rest` - PostgREST API
- `supabase-realtime` - Realtime subscriptions
- `supabase-storage` - File storage
- `supabase-db` - PostgreSQL database
- `supabase-meta` - Database metadata
- `supabase-analytics` - Logflare analytics
- และอื่นๆ
## 🔑 Access Points
| Service | URL | Port | Description |
|---------|-----|------|-------------|
| **Studio** | http://localhost:3010 | 3010 | Web UI สำหรับจัดการ |
| **API Gateway** | http://localhost:8100 | 8100 | REST API endpoint |
| **PostgreSQL** | localhost:5434 | 5434 | Database (internal) |
| **Pooler** | localhost:6544 | 6544 | Connection pooler |
## 📝 Port Configuration
Supabase ใช้ port ที่ไม่ชนกับ services อื่นใน stack:
- **3010**: Studio (แทน default 3000)
- **8100**: Kong HTTP (แทน default 8000)
- **8443**: Kong HTTPS
- **5434**: PostgreSQL (แทน default 5432, เพราะ 5435 ใช้โดย main PostgreSQL)
- **6544**: Pooler (แทน default 6543)
## 🔐 Authentication
### Default API Keys
ใน `.env` จะมี API keys 2 ประเภท:
1. **ANON_KEY** - ใช้สำหรับ client-side (public)
2. **SERVICE_ROLE_KEY** - ใช้สำหรับ server-side (private, bypass RLS)
**⚠️ สำคัญ:** ใน production ต้อง generate keys ใหม่ด้วย Supabase CLI:
```bash
# Install Supabase CLI
npm install -g supabase
# Generate new JWT secrets
supabase gen keys jwt
```
### Dashboard Access
เข้า Studio ที่ http://localhost:3010 แล้วใช้:
- Username: `DASHBOARD_USERNAME` จาก `.env`
- Password: `DASHBOARD_PASSWORD` จาก `.env`
## 📊 Database Management
### Connect to PostgreSQL
```bash
# Using psql
psql -h localhost -p 5434 -U postgres -d postgres
# Using connection string
postgresql://postgres:<POSTGRES_PASSWORD>@localhost:5434/postgres
```
### Database Roles
Supabase สร้าง roles หลายตัวอัตโนมัติ:
- `postgres` - Superuser
- `authenticator` - สำหรับ PostgREST
- `anon` - Anonymous access
- `authenticated` - Authenticated users
- `service_role` - Service role (bypass RLS)
- `supabase_admin` - Admin operations
- `supabase_auth_admin` - Auth service
- `supabase_storage_admin` - Storage service
## 🔄 Realtime Subscriptions
Supabase Realtime ให้ subscribe การเปลี่ยนแปลงใน database:
```javascript
import { createClient } from '@supabase/supabase-js'
const supabase = createClient(
'http://localhost:8100',
'YOUR_ANON_KEY'
)
// Subscribe to changes
const channel = supabase
.channel('table-changes')
.on('postgres_changes',
{ event: '*', schema: 'public', table: 'your_table' },
(payload) => console.log(payload)
)
.subscribe()
```
## 📦 Storage
### Upload Files
```javascript
const { data, error } = await supabase
.storage
.from('bucket-name')
.upload('file-path', file)
```
### Download Files
```javascript
const { data, error } = await supabase
.storage
.from('bucket-name')
.download('file-path')
```
## 🔧 Edge Functions
Edge Functions อยู่ใน `volumes/functions/`:
```bash
# Create new function
mkdir -p volumes/functions/hello
cat > volumes/functions/hello/index.ts << 'EOF'
import { serve } from "https://deno.land/std@0.168.0/http/server.ts"
serve(async (req) => {
return new Response(
JSON.stringify({ message: "Hello from Supabase Edge Functions!" }),
{ headers: { "Content-Type": "application/json" } },
)
})
EOF
# Restart functions service
docker compose restart functions
```
## 🔍 Monitoring & Logs
### View Logs
```bash
# All services
docker compose logs -f
# Specific service
docker compose logs -f studio
docker compose logs -f auth
docker compose logs -f db
```
### Analytics
Supabase Analytics (Logflare) รวบรวม logs และ metrics ที่:
- http://localhost:3010 (ใน Studio UI)
## 🛠️ Maintenance
### Backup Database
```bash
# Backup
docker exec supabase-db pg_dump -U postgres postgres > backup_$(date +%Y%m%d).sql
# Restore
docker exec -i supabase-db psql -U postgres postgres < backup_20260218.sql
```
### Update Supabase
```bash
# Pull latest images
docker compose pull
# Restart services
docker compose up -d
```
### Reset Everything
```bash
# Stop and remove containers
docker compose down
# Remove volumes (⚠️ This deletes all data!)
docker compose down -v
rm -rf volumes/db/data
# Start fresh
bash setup.sh
docker compose up -d
```
## 🔗 Integration with Other Services
### Connect from API Service (FastAPI)
```python
from supabase import create_client, Client
supabase: Client = create_client(
"http://supabase-kong:8000", # Internal network
"YOUR_SERVICE_ROLE_KEY"
)
# Query data
response = supabase.table('users').select("*").execute()
```
### Connect via Nginx Proxy Manager
เพิ่ม Proxy Host:
- Domain: `supabase.sriphat.local`
- Forward Hostname: `supabase-kong`
- Forward Port: `8000`
## 📚 Documentation
- [Supabase Official Docs](https://supabase.com/docs)
- [PostgREST API Reference](https://postgrest.org/)
- [Supabase JavaScript Client](https://supabase.com/docs/reference/javascript/introduction)
- [Self-Hosting Guide](https://supabase.com/docs/guides/self-hosting)
## 🐛 Troubleshooting
### Services not starting
```bash
# Check logs
docker compose logs
# Check network
docker network inspect shared_data_network
# Verify .env file
cat .env | grep -v '^#' | grep -v '^$'
```
### Database connection issues
```bash
# Check database is ready
docker exec supabase-db pg_isready -U postgres
# Check database logs
docker compose logs db
```
### Port conflicts
ถ้า port ชน ให้แก้ไขใน `.env`:
```bash
STUDIO_PORT=3011
KONG_HTTP_PORT=8101
```
แล้ว restart:
```bash
docker compose down
docker compose up -d
```
## 🔒 Security Best Practices
1. **เปลี่ยน default passwords** ทั้งหมดใน `.env`
2. **Generate JWT secrets ใหม่** สำหรับ production
3. **Enable Row Level Security (RLS)** สำหรับทุก table
4. **ใช้ HTTPS** ใน production (ผ่าน Nginx Proxy Manager)
5. **Backup database** เป็นประจำ
6. **Monitor logs** สำหรับ suspicious activities
7. **Update images** เป็นประจำเพื่อ security patches
## 📞 Support
หากมีปัญหาหรือคำถาม:
1. ตรวจสอบ logs: `docker compose logs`
2. ดู [Supabase Discussions](https://github.com/supabase/supabase/discussions)
3. อ่าน [Self-Hosting Troubleshooting](https://supabase.com/docs/guides/self-hosting/docker#troubleshooting)

View File

@@ -0,0 +1,551 @@
# Usage
# Start: docker compose up
# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up
# Stop: docker compose down
# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
# Reset everything: ./reset.sh
name: supabase
services:
sdp-studio:
container_name: sdp-supabase-studio
image: supabase/studio:2026.02.16-sha-26c615c
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"node",
"-e",
"fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})"
]
timeout: 10s
interval: 5s
retries: 3
depends_on:
sdp-analytics:
condition: service_healthy
environment:
# Binds nestjs listener to both IPv4 and IPv6 network interfaces
HOSTNAME: "::"
STUDIO_PG_META_URL: http://sdp-meta:8080
POSTGRES_PORT: ${POSTGRES_PORT}
POSTGRES_HOST: ${POSTGRES_HOST}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PG_META_CRYPTO_KEY: ${PG_META_CRYPTO_KEY}
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
SUPABASE_URL: http://sdp-kong:8000
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
AUTH_JWT_SECRET: ${JWT_SECRET}
# LOGFLARE_API_KEY is deprecated
LOGFLARE_API_KEY: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_URL: http://sdp-analytics:4000
NEXT_PUBLIC_ENABLE_LOGS: true
# Comment to use Big Query backend for analytics
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
# Uncomment to use Big Query backend for analytics
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
SNIPPETS_MANAGEMENT_FOLDER: /app/snippets
EDGE_FUNCTIONS_MANAGEMENT_FOLDER: /app/edge-functions
volumes:
- ./volumes/snippets:/app/snippets:Z
- ./volumes/functions:/app/edge-functions:Z
sdp-kong:
container_name: sdp-supabase-kong
image: kong:2.8.1
restart: unless-stopped
ports:
- 8100:8000/tcp
- 8443:8443/tcp
volumes:
# https://github.com/supabase/supabase/issues/12661
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro,z
depends_on:
sdp-analytics:
condition: service_healthy
environment:
KONG_DATABASE: "off"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
# https://github.com/supabase/cli/issues/14
KONG_DNS_ORDER: LAST,A,CNAME
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth,request-termination,ip-restriction
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
# https://unix.stackexchange.com/a/294837
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
sdp-auth:
container_name: sdp-supabase-auth
image: supabase/gotrue:v2.186.0
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9999/health"
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
GOTRUE_API_HOST: 0.0.0.0
GOTRUE_API_PORT: 9999
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
GOTRUE_DB_DRIVER: postgres
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
GOTRUE_SITE_URL: ${SITE_URL}
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
GOTRUE_JWT_ADMIN_ROLES: service_role
GOTRUE_JWT_AUD: authenticated
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
GOTRUE_JWT_SECRET: ${JWT_SECRET}
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
# Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile.
# GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
GOTRUE_SMTP_HOST: ${SMTP_HOST}
GOTRUE_SMTP_PORT: ${SMTP_PORT}
GOTRUE_SMTP_USER: ${SMTP_USER}
GOTRUE_SMTP_PASS: ${SMTP_PASS}
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
# Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true"
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook"
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "<standard-base64-secret>"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt"
# GOTRUE_HOOK_SEND_SMS_ENABLED: "false"
# GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook"
# GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
# GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false"
# GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender"
# GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
sdp-rest:
container_name: sdp-supabase-rest
image: postgrest/postgrest:v14.5
restart: unless-stopped
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
PGRST_DB_ANON_ROLE: anon
PGRST_JWT_SECRET: ${JWT_SECRET}
PGRST_DB_USE_LEGACY_GUCS: "false"
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
command:
[
"postgrest"
]
sdp-realtime:
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
container_name: sdp-realtime-dev.supabase-realtime
image: supabase/realtime:v2.76.5
restart: unless-stopped
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD-SHELL",
"curl -sSfL --head -o /dev/null -H \"Authorization: Bearer ${ANON_KEY}\" http://localhost:4000/api/tenants/realtime-dev/health"
]
timeout: 5s
interval: 30s
retries: 3
start_period: 10s
environment:
PORT: 4000
DB_HOST: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_USER: supabase_admin
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_NAME: ${POSTGRES_DB}
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
DB_ENC_KEY: supabaserealtime
API_JWT_SECRET: ${JWT_SECRET}
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
ERL_AFLAGS: -proto_dist inet_tcp
DNS_NODES: "''"
RLIMIT_NOFILE: "10000"
APP_NAME: realtime
SEED_SELF_HOST: "true"
RUN_JANITOR: "true"
DISABLE_HEALTHCHECK_LOGGING: "true"
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
sdp-storage:
container_name: sdp-supabase-storage
image: supabase/storage-api:v1.37.8
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://storage:5000/status"
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-rest:
condition: service_started
sdp-imgproxy:
condition: service_started
environment:
ANON_KEY: ${ANON_KEY}
SERVICE_KEY: ${SERVICE_ROLE_KEY}
POSTGREST_URL: http://sdp-rest:3000
PGRST_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
REQUEST_ALLOW_X_FORWARDED_PATH: "true"
FILE_SIZE_LIMIT: 52428800
STORAGE_BACKEND: file
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
TENANT_ID: ${STORAGE_TENANT_ID}
# TODO: https://github.com/supabase/storage-api/issues/55
REGION: ${REGION}
GLOBAL_S3_BUCKET: ${GLOBAL_S3_BUCKET}
ENABLE_IMAGE_TRANSFORMATION: "true"
IMGPROXY_URL: http://sdp-imgproxy:5001
S3_PROTOCOL_ACCESS_KEY_ID: ${S3_PROTOCOL_ACCESS_KEY_ID}
S3_PROTOCOL_ACCESS_KEY_SECRET: ${S3_PROTOCOL_ACCESS_KEY_SECRET}
sdp-imgproxy:
container_name: sdp-supabase-imgproxy
image: darthsim/imgproxy:v3.30.1
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck:
test:
[
"CMD",
"imgproxy",
"health"
]
timeout: 5s
interval: 5s
retries: 3
environment:
IMGPROXY_BIND: ":5001"
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
IMGPROXY_USE_ETAG: "true"
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
IMGPROXY_MAX_SRC_RESOLUTION: 16.8
sdp-meta:
container_name: sdp-supabase-meta
image: supabase/postgres-meta:v0.95.2
restart: unless-stopped
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
PG_META_PORT: 8080
PG_META_DB_HOST: ${POSTGRES_HOST}
PG_META_DB_PORT: ${POSTGRES_PORT}
PG_META_DB_NAME: ${POSTGRES_DB}
PG_META_DB_USER: supabase_admin
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
CRYPTO_KEY: ${PG_META_CRYPTO_KEY}
sdp-functions:
container_name: sdp-supabase-edge-functions
image: supabase/edge-runtime:v1.70.3
restart: unless-stopped
volumes:
- ./volumes/functions:/home/deno/functions:Z
depends_on:
sdp-analytics:
condition: service_healthy
environment:
JWT_SECRET: ${JWT_SECRET}
SUPABASE_URL: http://sdp-kong:8000
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
command:
[
"start",
"--main-service",
"/home/deno/functions/main"
]
sdp-analytics:
container_name: sdp-supabase-analytics
image: supabase/logflare:1.31.2
restart: unless-stopped
# ports:
# - 4000:4000
# Uncomment to use Big Query backend for analytics
# volumes:
# - type: bind
# source: ${PWD}/gcloud.json
# target: /opt/app/rel/logflare/bin/gcloud.json
# read_only: true
healthcheck:
test:
[
"CMD",
"curl",
"http://localhost:4000/health"
]
timeout: 5s
interval: 5s
retries: 10
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
environment:
LOGFLARE_NODE_HOST: 127.0.0.1
DB_USERNAME: supabase_admin
DB_DATABASE: _supabase
DB_HOSTNAME: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_SCHEMA: _analytics
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_SINGLE_TENANT: true
LOGFLARE_SUPABASE_MODE: true
# Comment variables to use Big Query backend for analytics
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
POSTGRES_BACKEND_SCHEMA: _analytics
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
# Uncomment to use Big Query backend for analytics
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
# Comment out everything below this point if you are using an external Postgres database
sdp-db:
container_name: sdp-supabase-db
image: supabase/postgres:15.8.1.085
restart: unless-stopped
volumes:
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
# Must be superuser to create event trigger
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
# Must be superuser to alter reserved role
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
# Initialize the database settings with JWT_SECRET and JWT_EXP
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
# PGDATA directory is persisted between restarts
- ./volumes/db/data:/var/lib/postgresql/data:Z
# Changes required for internal supabase data such as _analytics
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
# Changes required for Analytics support
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
# Changes required for Pooler support
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
# Use named volume to persist pgsodium decryption key between restarts
- db-config:/etc/postgresql-custom
healthcheck:
test:
[
"CMD",
"pg_isready",
"-U",
"postgres",
"-h",
"localhost"
]
interval: 5s
timeout: 5s
retries: 10
depends_on:
sdp-vector:
condition: service_healthy
environment:
POSTGRES_HOST: /var/run/postgresql
PGPORT: ${POSTGRES_PORT}
POSTGRES_PORT: ${POSTGRES_PORT}
PGPASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGDATABASE: ${POSTGRES_DB}
POSTGRES_DB: ${POSTGRES_DB}
JWT_SECRET: ${JWT_SECRET}
JWT_EXP: ${JWT_EXPIRY}
command:
[
"postgres",
"-c",
"config_file=/etc/postgresql/postgresql.conf",
"-c",
"log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs
]
sdp-vector:
container_name: sdp-supabase-vector
image: timberio/vector:0.53.0-alpine
restart: unless-stopped
volumes:
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro,z
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://vector:9001/health"
]
timeout: 5s
interval: 5s
retries: 3
environment:
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
command:
[
"--config",
"/etc/vector/vector.yml"
]
security_opt:
- "label=disable"
# Update the DATABASE_URL if you are using an external Postgres database
sdp-supavisor:
container_name: sdp-supabase-pooler
image: supabase/supavisor:2.7.4
restart: unless-stopped
ports:
- 5434:5432
- 6544:6543
volumes:
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z
healthcheck:
test:
[
"CMD",
"curl",
"-sSfL",
"--head",
"-o",
"/dev/null",
"http://127.0.0.1:4000/api/health"
]
interval: 10s
timeout: 5s
retries: 5
depends_on:
sdp-db:
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
PORT: 4000
POSTGRES_PORT: ${POSTGRES_PORT}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
CLUSTER_POSTGRES: true
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
VAULT_ENC_KEY: ${VAULT_ENC_KEY}
API_JWT_SECRET: ${JWT_SECRET}
METRICS_JWT_SECRET: ${JWT_SECRET}
REGION: local
ERL_AFLAGS: -proto_dist inet_tcp
POOLER_TENANT_ID: ${POOLER_TENANT_ID}
POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE}
POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN}
POOLER_POOL_MODE: transaction
DB_POOL_SIZE: ${POOLER_DB_POOL_SIZE}
command:
[
"/bin/sh",
"-c",
"/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server"
]
volumes:
db-config:
# External network configuration for integration with other services
networks:
default:
name: shared_data_network
external: true

View File

@@ -0,0 +1,448 @@
version: '3.8'
services:
sdp-studio:
container_name: sdp-supabase-studio
image: supabase/studio:2026.02.16-sha-26c615c
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"node",
"-e",
"fetch('http://localhost:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})"
]
timeout: 10s
interval: 5s
retries: 3
depends_on:
sdp-analytics:
condition: service_healthy
environment:
HOSTNAME: "::"
STUDIO_PG_META_URL: http://sdp-meta:8080
POSTGRES_PORT: ${POSTGRES_PORT}
POSTGRES_HOST: ${POSTGRES_HOST}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PG_META_CRYPTO_KEY: ${PG_META_CRYPTO_KEY}
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
SUPABASE_URL: http://sdp-kong:8000
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
AUTH_JWT_SECRET: ${JWT_SECRET}
LOGFLARE_API_KEY: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_URL: http://sdp-analytics:4000
NEXT_PUBLIC_ENABLE_LOGS: true
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
volumes:
- ./volumes/snippets:/app/snippets:Z
- ./volumes/functions:/app/supabase/functions:Z
ports:
- "${STUDIO_PORT:-3010}:3000"
networks:
- shared_data_network
sdp-kong:
container_name: sdp-supabase-kong
image: kong:2.8.1
restart: unless-stopped
ports:
- "${KONG_HTTP_PORT:-8100}:8000"
- "${KONG_HTTPS_PORT:-8444}:8443"
volumes:
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro,z
depends_on:
sdp-analytics:
condition: service_healthy
environment:
KONG_DATABASE: "off"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
KONG_DNS_ORDER: LAST,A,CNAME
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth,request-termination,ip-restriction
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
networks:
- shared_data_network
sdp-auth:
container_name: sdp-supabase-auth
image: supabase/gotrue:v2.186.0
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9999/health"
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
sdp-db:
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
GOTRUE_API_HOST: 0.0.0.0
GOTRUE_API_PORT: 9999
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
GOTRUE_DB_DRIVER: postgres
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
GOTRUE_SITE_URL: ${SITE_URL}
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
GOTRUE_JWT_ADMIN_ROLES: service_role
GOTRUE_JWT_AUD: authenticated
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
GOTRUE_JWT_SECRET: ${JWT_SECRET}
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
GOTRUE_SMTP_HOST: ${SMTP_HOST}
GOTRUE_SMTP_PORT: ${SMTP_PORT}
GOTRUE_SMTP_USER: ${SMTP_USER}
GOTRUE_SMTP_PASS: ${SMTP_PASS}
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
networks:
- shared_data_network
sdp-rest:
container_name: sdp-supabase-rest
image: postgrest/postgrest:v12.2.3
restart: unless-stopped
depends_on:
sdp-db:
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
PGRST_DB_ANON_ROLE: anon
PGRST_JWT_SECRET: ${JWT_SECRET}
PGRST_DB_USE_LEGACY_GUCS: "false"
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
command: ["postgrest"]
networks:
- shared_data_network
sdp-realtime:
container_name: sdp-realtime-dev.supabase-realtime
image: supabase/realtime:v2.76.5
restart: unless-stopped
depends_on:
sdp-db:
condition: service_healthy
sdp-analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD-SHELL",
"curl -sSfL --head -o /dev/null -H \"Authorization: Bearer ${ANON_KEY}\" http://localhost:4000/api/tenants/realtime-dev/health"
]
timeout: 5s
interval: 30s
retries: 3
start_period: 10s
environment:
PORT: 4000
DB_HOST: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_USER: supabase_admin
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_NAME: ${POSTGRES_DB}
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
DB_ENC_KEY: supabaserealtime
API_JWT_SECRET: ${JWT_SECRET}
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
ERL_AFLAGS: -proto_dist inet_tcp
DNS_NODES: "''"
RLIMIT_NOFILE: "10000"
APP_NAME: realtime
SEED_SELF_HOST: "true"
RUN_JANITOR: "true"
DISABLE_HEALTHCHECK_LOGGING: "true"
networks:
- shared_data_network
sdp-storage:
container_name: sdp-supabase-storage
image: supabase/storage-api:v1.37.8
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:5000/status"
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
sdp-db:
condition: service_healthy
sdp-rest:
condition: service_started
sdp-imgproxy:
condition: service_started
environment:
ANON_KEY: ${ANON_KEY}
SERVICE_KEY: ${SERVICE_ROLE_KEY}
POSTGREST_URL: http://sdp-rest:3000
PGRST_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
REQUEST_ALLOW_X_FORWARDED_PATH: "true"
FILE_SIZE_LIMIT: 52428800
STORAGE_BACKEND: file
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
TENANT_ID: ${STORAGE_TENANT_ID}
REGION: ${REGION}
GLOBAL_S3_BUCKET: ${GLOBAL_S3_BUCKET}
ENABLE_IMAGE_TRANSFORMATION: "true"
IMGPROXY_URL: http://sdp-imgproxy:5001
S3_PROTOCOL_ACCESS_KEY_ID: ${S3_PROTOCOL_ACCESS_KEY_ID}
S3_PROTOCOL_ACCESS_KEY_SECRET: ${S3_PROTOCOL_ACCESS_KEY_SECRET}
networks:
- shared_data_network
sdp-imgproxy:
container_name: sdp-supabase-imgproxy
image: darthsim/imgproxy:v3.30.1
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck:
test: ["CMD", "imgproxy", "health"]
timeout: 5s
interval: 5s
retries: 3
environment:
IMGPROXY_BIND: ":5001"
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
IMGPROXY_USE_ETAG: "true"
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
IMGPROXY_MAX_SRC_RESOLUTION: 16.8
networks:
- shared_data_network
sdp-meta:
container_name: sdp-supabase-meta
image: supabase/postgres-meta:v0.95.2
restart: unless-stopped
depends_on:
sdp-db:
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
PG_META_PORT: 8080
PG_META_DB_HOST: ${POSTGRES_HOST}
PG_META_DB_PORT: ${POSTGRES_PORT}
PG_META_DB_NAME: ${POSTGRES_DB}
PG_META_DB_USER: supabase_admin
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
CRYPTO_KEY: ${PG_META_CRYPTO_KEY}
networks:
- shared_data_network
sdp-functions:
container_name: sdp-supabase-edge-functions
image: supabase/edge-runtime:v1.70.3
restart: unless-stopped
volumes:
- ./volumes/functions:/home/deno/functions:Z
depends_on:
sdp-analytics:
condition: service_healthy
environment:
JWT_SECRET: ${JWT_SECRET}
SUPABASE_URL: http://sdp-kong:8000
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
command: ["start", "--main-service", "/home/deno/functions/main"]
networks:
- shared_data_network
sdp-analytics:
container_name: sdp-supabase-analytics
image: supabase/logflare:1.31.2
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "http://localhost:4000/health"]
timeout: 5s
interval: 5s
retries: 10
depends_on:
sdp-db:
condition: service_healthy
environment:
LOGFLARE_NODE_HOST: 127.0.0.1
DB_USERNAME: supabase_admin
DB_DATABASE: _supabase
DB_HOSTNAME: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_SCHEMA: _analytics
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_SINGLE_TENANT: true
LOGFLARE_SUPABASE_MODE: true
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
POSTGRES_BACKEND_SCHEMA: _analytics
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
networks:
- shared_data_network
sdp-db:
container_name: sdp-supabase-db
image: supabase/postgres:15.8.1.085
restart: unless-stopped
volumes:
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
- ./volumes/db/data:/var/lib/postgresql/data:Z
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
- db-config:/etc/postgresql-custom
healthcheck:
test: ["CMD", "pg_isready", "-U", "postgres", "-h", "localhost"]
interval: 5s
timeout: 5s
retries: 10
# depends_on:
# sdp-vector:
# condition: service_healthy
environment:
POSTGRES_HOST: /var/run/postgresql
PGPORT: ${POSTGRES_PORT}
POSTGRES_PORT: ${POSTGRES_PORT}
PGPASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGDATABASE: ${POSTGRES_DB}
POSTGRES_DB: ${POSTGRES_DB}
JWT_SECRET: ${JWT_SECRET}
JWT_EXP: ${JWT_EXPIRY}
command:
[
"postgres",
"-c",
"config_file=/etc/postgresql/postgresql.conf",
"-c",
"log_min_messages=fatal"
]
ports:
- "5434:5432"
networks:
- shared_data_network
sdp-vector:
container_name: sdp-supabase-vector
image: timberio/vector:0.53.0-alpine
restart: unless-stopped
volumes:
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro,z
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9001/health"
]
timeout: 5s
interval: 5s
retries: 3
environment:
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
command: ["--config", "/etc/vector/vector.yml"]
security_opt:
- "label=disable"
networks:
- shared_data_network
sdp-supavisor:
container_name: sdp-supabase-pooler
image: supabase/supavisor:2.7.4
restart: unless-stopped
ports:
- "${POOLER_PROXY_PORT_TRANSACTION:-6544}:6543"
volumes:
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z
healthcheck:
test:
[
"CMD",
"curl",
"-sSfL",
"--head",
"-o",
"/dev/null",
"http://127.0.0.1:4000/api/health"
]
interval: 10s
timeout: 5s
retries: 5
depends_on:
sdp-db:
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
PORT: 4000
POSTGRES_PORT: ${POSTGRES_PORT}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
CLUSTER_POSTGRES: true
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
VAULT_ENC_KEY: ${VAULT_ENC_KEY}
API_JWT_SECRET: ${JWT_SECRET}
METRICS_JWT_SECRET: ${JWT_SECRET}
networks:
- shared_data_network
volumes:
db-config:
networks:
shared_data_network:
external: true

View File

@@ -0,0 +1,551 @@
# Usage
# Start: docker compose up
# With helpers: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml up
# Stop: docker compose down
# Destroy: docker compose -f docker-compose.yml -f ./dev/docker-compose.dev.yml down -v --remove-orphans
# Reset everything: ./reset.sh
name: supabase
services:
sdp-studio:
container_name: sdp-supabase-studio
image: supabase/studio:2026.02.16-sha-26c615c
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"node",
"-e",
"fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})"
]
timeout: 10s
interval: 5s
retries: 3
depends_on:
sdp-analytics:
condition: service_healthy
environment:
# Binds nestjs listener to both IPv4 and IPv6 network interfaces
HOSTNAME: "::"
STUDIO_PG_META_URL: http://sdp-meta:8080
POSTGRES_PORT: ${POSTGRES_PORT}
POSTGRES_HOST: ${POSTGRES_HOST}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PG_META_CRYPTO_KEY: ${PG_META_CRYPTO_KEY}
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
SUPABASE_URL: http://sdp-kong:8000
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
AUTH_JWT_SECRET: ${JWT_SECRET}
# LOGFLARE_API_KEY is deprecated
LOGFLARE_API_KEY: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_URL: http://sdp-analytics:4000
NEXT_PUBLIC_ENABLE_LOGS: true
# Comment to use Big Query backend for analytics
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
# Uncomment to use Big Query backend for analytics
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
SNIPPETS_MANAGEMENT_FOLDER: /app/snippets
EDGE_FUNCTIONS_MANAGEMENT_FOLDER: /app/edge-functions
volumes:
- ./volumes/snippets:/app/snippets:Z
- ./volumes/functions:/app/edge-functions:Z
sdp-kong:
container_name: sdp-supabase-kong
image: kong:2.8.1
restart: unless-stopped
ports:
- 8100:8000/tcp
- 8443:8443/tcp
volumes:
# https://github.com/supabase/supabase/issues/12661
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro,z
depends_on:
sdp-analytics:
condition: service_healthy
environment:
KONG_DATABASE: "off"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
# https://github.com/supabase/cli/issues/14
KONG_DNS_ORDER: LAST,A,CNAME
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth,request-termination,ip-restriction
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
# https://unix.stackexchange.com/a/294837
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
sdp-auth:
container_name: sdp-supabase-auth
image: supabase/gotrue:v2.186.0
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9999/health"
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
GOTRUE_API_HOST: 0.0.0.0
GOTRUE_API_PORT: 9999
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
GOTRUE_DB_DRIVER: postgres
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
GOTRUE_SITE_URL: ${SITE_URL}
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
GOTRUE_JWT_ADMIN_ROLES: service_role
GOTRUE_JWT_AUD: authenticated
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
GOTRUE_JWT_SECRET: ${JWT_SECRET}
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
# Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile.
# GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
GOTRUE_SMTP_HOST: ${SMTP_HOST}
GOTRUE_SMTP_PORT: ${SMTP_PORT}
GOTRUE_SMTP_USER: ${SMTP_USER}
GOTRUE_SMTP_PASS: ${SMTP_PASS}
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
# Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true"
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook"
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "<standard-base64-secret>"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt"
# GOTRUE_HOOK_SEND_SMS_ENABLED: "false"
# GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook"
# GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
# GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false"
# GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender"
# GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n"
sdp-rest:
container_name: sdp-supabase-rest
image: postgrest/postgrest:v14.5
restart: unless-stopped
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
PGRST_DB_ANON_ROLE: anon
PGRST_JWT_SECRET: ${JWT_SECRET}
PGRST_DB_USE_LEGACY_GUCS: "false"
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
command:
[
"postgrest"
]
sdp-realtime:
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
container_name: sdp-realtime-dev.supabase-realtime
image: supabase/realtime:v2.76.5
restart: unless-stopped
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD-SHELL",
"curl -sSfL --head -o /dev/null -H \"Authorization: Bearer ${ANON_KEY}\" http://localhost:4000/api/tenants/realtime-dev/health"
]
timeout: 5s
interval: 30s
retries: 3
start_period: 10s
environment:
PORT: 4000
DB_HOST: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_USER: supabase_admin
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_NAME: ${POSTGRES_DB}
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
DB_ENC_KEY: supabaserealtime
API_JWT_SECRET: ${JWT_SECRET}
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
ERL_AFLAGS: -proto_dist inet_tcp
DNS_NODES: "''"
RLIMIT_NOFILE: "10000"
APP_NAME: realtime
SEED_SELF_HOST: "true"
RUN_JANITOR: "true"
DISABLE_HEALTHCHECK_LOGGING: "true"
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
sdp-storage:
container_name: sdp-supabase-storage
image: supabase/storage-api:v1.37.8
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://storage:5000/status"
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-rest:
condition: service_started
sdp-imgproxy:
condition: service_started
environment:
ANON_KEY: ${ANON_KEY}
SERVICE_KEY: ${SERVICE_ROLE_KEY}
POSTGREST_URL: http://sdp-rest:3000
PGRST_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
REQUEST_ALLOW_X_FORWARDED_PATH: "true"
FILE_SIZE_LIMIT: 52428800
STORAGE_BACKEND: file
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
TENANT_ID: ${STORAGE_TENANT_ID}
# TODO: https://github.com/supabase/storage-api/issues/55
REGION: ${REGION}
GLOBAL_S3_BUCKET: ${GLOBAL_S3_BUCKET}
ENABLE_IMAGE_TRANSFORMATION: "true"
IMGPROXY_URL: http://sdp-imgproxy:5001
S3_PROTOCOL_ACCESS_KEY_ID: ${S3_PROTOCOL_ACCESS_KEY_ID}
S3_PROTOCOL_ACCESS_KEY_SECRET: ${S3_PROTOCOL_ACCESS_KEY_SECRET}
sdp-imgproxy:
container_name: sdp-supabase-imgproxy
image: darthsim/imgproxy:v3.30.1
restart: unless-stopped
volumes:
- ./volumes/storage:/var/lib/storage:z
healthcheck:
test:
[
"CMD",
"imgproxy",
"health"
]
timeout: 5s
interval: 5s
retries: 3
environment:
IMGPROXY_BIND: ":5001"
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
IMGPROXY_USE_ETAG: "true"
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
IMGPROXY_MAX_SRC_RESOLUTION: 16.8
sdp-meta:
container_name: sdp-supabase-meta
image: supabase/postgres-meta:v0.95.2
restart: unless-stopped
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
PG_META_PORT: 8080
PG_META_DB_HOST: ${POSTGRES_HOST}
PG_META_DB_PORT: ${POSTGRES_PORT}
PG_META_DB_NAME: ${POSTGRES_DB}
PG_META_DB_USER: supabase_admin
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
CRYPTO_KEY: ${PG_META_CRYPTO_KEY}
sdp-functions:
container_name: sdp-supabase-edge-functions
image: supabase/edge-runtime:v1.70.3
restart: unless-stopped
volumes:
- ./volumes/functions:/home/deno/functions:Z
depends_on:
sdp-analytics:
condition: service_healthy
environment:
JWT_SECRET: ${JWT_SECRET}
SUPABASE_URL: http://sdp-kong:8000
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
command:
[
"start",
"--main-service",
"/home/deno/functions/main"
]
sdp-analytics:
container_name: sdp-supabase-analytics
image: supabase/logflare:1.31.2
restart: unless-stopped
# ports:
# - 4000:4000
# Uncomment to use Big Query backend for analytics
# volumes:
# - type: bind
# source: ${PWD}/gcloud.json
# target: /opt/app/rel/logflare/bin/gcloud.json
# read_only: true
healthcheck:
test:
[
"CMD",
"curl",
"http://localhost:4000/health"
]
timeout: 5s
interval: 5s
retries: 10
depends_on:
sdp-db:
# Disable this if you are using an external Postgres database
condition: service_healthy
environment:
LOGFLARE_NODE_HOST: 127.0.0.1
DB_USERNAME: supabase_admin
DB_DATABASE: _supabase
DB_HOSTNAME: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_SCHEMA: _analytics
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN}
LOGFLARE_SINGLE_TENANT: true
LOGFLARE_SUPABASE_MODE: true
# Comment variables to use Big Query backend for analytics
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
POSTGRES_BACKEND_SCHEMA: _analytics
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
# Uncomment to use Big Query backend for analytics
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
# Comment out everything below this point if you are using an external Postgres database
sdp-db:
container_name: sdp-supabase-db
image: supabase/postgres:15.8.1.085
restart: unless-stopped
volumes:
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
# Must be superuser to create event trigger
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
# Must be superuser to alter reserved role
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
# Initialize the database settings with JWT_SECRET and JWT_EXP
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
# PGDATA directory is persisted between restarts
- ./volumes/db/data:/var/lib/postgresql/data:Z
# Changes required for internal supabase data such as _analytics
- ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z
# Changes required for Analytics support
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
# Changes required for Pooler support
- ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z
# Use named volume to persist pgsodium decryption key between restarts
- db-config:/etc/postgresql-custom
healthcheck:
test:
[
"CMD",
"pg_isready",
"-U",
"postgres",
"-h",
"localhost"
]
interval: 5s
timeout: 5s
retries: 10
depends_on:
sdp-vector:
condition: service_healthy
environment:
POSTGRES_HOST: /var/run/postgresql
PGPORT: ${POSTGRES_PORT}
POSTGRES_PORT: ${POSTGRES_PORT}
PGPASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGDATABASE: ${POSTGRES_DB}
POSTGRES_DB: ${POSTGRES_DB}
JWT_SECRET: ${JWT_SECRET}
JWT_EXP: ${JWT_EXPIRY}
command:
[
"postgres",
"-c",
"config_file=/etc/postgresql/postgresql.conf",
"-c",
"log_min_messages=fatal" # prevents Realtime polling queries from appearing in logs
]
sdp-vector:
container_name: sdp-supabase-vector
image: timberio/vector:0.53.0-alpine
restart: unless-stopped
volumes:
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro,z
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://vector:9001/health"
]
timeout: 5s
interval: 5s
retries: 3
environment:
LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN}
command:
[
"--config",
"/etc/vector/vector.yml"
]
security_opt:
- "label=disable"
# Update the DATABASE_URL if you are using an external Postgres database
sdp-supavisor:
container_name: sdp-supabase-pooler
image: supabase/supavisor:2.7.4
restart: unless-stopped
ports:
- 5434:5432
- 6544:6543
volumes:
- ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z
healthcheck:
test:
[
"CMD",
"curl",
"-sSfL",
"--head",
"-o",
"/dev/null",
"http://127.0.0.1:4000/api/health"
]
interval: 10s
timeout: 5s
retries: 5
depends_on:
sdp-db:
condition: service_healthy
sdp-analytics:
condition: service_healthy
environment:
PORT: 4000
POSTGRES_PORT: ${POSTGRES_PORT}
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase
CLUSTER_POSTGRES: true
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
VAULT_ENC_KEY: ${VAULT_ENC_KEY}
API_JWT_SECRET: ${JWT_SECRET}
METRICS_JWT_SECRET: ${JWT_SECRET}
REGION: local
ERL_AFLAGS: -proto_dist inet_tcp
POOLER_TENANT_ID: ${POOLER_TENANT_ID}
POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE}
POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN}
POOLER_POOL_MODE: transaction
DB_POOL_SIZE: ${POOLER_DB_POOL_SIZE}
command:
[
"/bin/sh",
"-c",
"/app/bin/migrate && /app/bin/supavisor eval \"$$(cat /etc/pooler/pooler.exs)\" && /app/bin/server"
]
volumes:
db-config:
# External network configuration for integration with other services
networks:
default:
name: shared_data_network
external: true

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Supabase Setup Script
# This script downloads necessary configuration files and sets up Supabase
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
echo "🚀 Setting up Supabase..."
# Create necessary directories
echo "📁 Creating directories..."
mkdir -p volumes/api
mkdir -p volumes/db
mkdir -p volumes/functions
mkdir -p volumes/logs
mkdir -p volumes/pooler
mkdir -p volumes/storage
mkdir -p volumes/snippets
# Download configuration files from Supabase repository
SUPABASE_REPO="https://raw.githubusercontent.com/supabase/supabase/master/docker"
echo "📥 Downloading configuration files..."
# Kong API Gateway configuration
curl -fsSL "$SUPABASE_REPO/volumes/api/kong.yml" -o volumes/api/kong.yml
# Database initialization scripts
curl -fsSL "$SUPABASE_REPO/volumes/db/realtime.sql" -o volumes/db/realtime.sql
curl -fsSL "$SUPABASE_REPO/volumes/db/webhooks.sql" -o volumes/db/webhooks.sql
curl -fsSL "$SUPABASE_REPO/volumes/db/roles.sql" -o volumes/db/roles.sql
curl -fsSL "$SUPABASE_REPO/volumes/db/jwt.sql" -o volumes/db/jwt.sql
curl -fsSL "$SUPABASE_REPO/volumes/db/_supabase.sql" -o volumes/db/_supabase.sql
curl -fsSL "$SUPABASE_REPO/volumes/db/logs.sql" -o volumes/db/logs.sql
curl -fsSL "$SUPABASE_REPO/volumes/db/pooler.sql" -o volumes/db/pooler.sql
# Vector logs configuration
curl -fsSL "$SUPABASE_REPO/volumes/logs/vector.yml" -o volumes/logs/vector.yml
# Pooler configuration
curl -fsSL "$SUPABASE_REPO/volumes/pooler/pooler.exs" -o volumes/pooler/pooler.exs
# Create .env file if it doesn't exist
if [ ! -f .env ]; then
echo "📝 Creating .env file from .env.example..."
cp .env.example .env
echo ""
echo "⚠️ IMPORTANT: Please edit .env file and update the following:"
echo " - POSTGRES_PASSWORD"
echo " - JWT_SECRET (generate with: openssl rand -base64 32)"
echo " - SECRET_KEY_BASE (generate with: openssl rand -base64 32)"
echo " - VAULT_ENC_KEY (generate with: openssl rand -base64 32)"
echo " - PG_META_CRYPTO_KEY (generate with: openssl rand -base64 32)"
echo " - LOGFLARE_PUBLIC_ACCESS_TOKEN (generate with: openssl rand -base64 32)"
echo " - LOGFLARE_PRIVATE_ACCESS_TOKEN (generate with: openssl rand -base64 32)"
echo " - DASHBOARD_USERNAME and DASHBOARD_PASSWORD"
echo ""
fi
echo "✅ Supabase setup completed!"
echo ""
echo "Next steps:"
echo "1. Edit .env file with your configuration"
echo "2. Ensure shared_data_network exists: cd ../00-network && bash create-network.sh"
echo "3. Start Supabase: docker compose up -d"
echo "4. Access Supabase Studio at: http://localhost:3010"
echo "5. Access Supabase API at: http://localhost:8100"

View File

@@ -0,0 +1,354 @@
#!/bin/bash
# Supabase Setup Script (Official Method)
# Based on: https://supabase.com/docs/guides/self-hosting/docker
# This script follows the official Supabase self-hosting guide
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TEMP_DIR="$SCRIPT_DIR/temp"
PROJECT_DIR="$SCRIPT_DIR"
echo "🚀 Setting up Supabase (Official Method)..."
echo ""
# Check prerequisites
echo "📋 Checking prerequisites..."
if ! command -v git &> /dev/null; then
echo "❌ Error: git is not installed. Please install git first."
exit 1
fi
if ! command -v docker &> /dev/null; then
echo "❌ Error: docker is not installed. Please install Docker first."
exit 1
fi
if ! docker compose version &> /dev/null; then
echo "❌ Error: docker compose is not available. Please install Docker Compose."
exit 1
fi
echo "✅ Prerequisites check passed"
echo ""
# Step 1: Clone Supabase repository
echo "📥 Step 1: Cloning Supabase repository..."
if [ -d "$TEMP_DIR" ]; then
echo " Removing existing temp directory..."
rm -rf "$TEMP_DIR"
fi
git clone --depth 1 https://github.com/supabase/supabase "$TEMP_DIR"
echo "✅ Repository cloned"
echo ""
# Step 2: Copy docker files
echo "📁 Step 2: Copying Docker configuration files..."
# Copy docker-compose.yml and related files
cp -f "$TEMP_DIR/docker/docker-compose.yml" "$PROJECT_DIR/"
echo " ✓ docker-compose.yml"
# Copy volumes directory structure and config files
if [ -d "$PROJECT_DIR/volumes" ]; then
echo " Backing up existing volumes directory..."
mv "$PROJECT_DIR/volumes" "$PROJECT_DIR/volumes.backup.$(date +%Y%m%d_%H%M%S)"
fi
mkdir -p "$PROJECT_DIR/volumes"
cp -rf "$TEMP_DIR/docker/volumes" "$PROJECT_DIR/"
echo " ✓ volumes/ (config files)"
# Copy utils directory (contains generate-keys.sh)
if [ -d "$PROJECT_DIR/utils" ]; then
rm -rf "$PROJECT_DIR/utils"
fi
cp -rf "$TEMP_DIR/docker/utils" "$PROJECT_DIR/"
chmod +x "$PROJECT_DIR/utils/generate-keys.sh"
echo " ✓ utils/ (including generate-keys.sh)"
# Copy .env.example if .env doesn't exist
if [ ! -f "$PROJECT_DIR/.env" ]; then
echo " Creating .env from official .env.example..."
cp "$TEMP_DIR/docker/.env.example" "$PROJECT_DIR/.env"
echo " ✓ .env (from official template)"
else
echo " ⚠️ .env already exists, skipping. Backup created as .env.example.new"
cp "$TEMP_DIR/docker/.env.example" "$PROJECT_DIR/.env.example.new"
fi
echo "✅ Files copied"
echo ""
# Step 3: Update ports to avoid conflicts
echo "🔧 Step 3: Updating ports to avoid conflicts with existing services..."
# Update Kong ports in docker-compose.yml
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
sed -i '' 's/\${KONG_HTTP_PORT}:8000/8100:8000/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/\${KONG_HTTPS_PORT}:8443/8443:8443/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/\${POSTGRES_PORT}:5432/5434:5432/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/\${POOLER_PROXY_PORT_TRANSACTION}:6543/6544:6543/g' "$PROJECT_DIR/docker-compose.yml"
else
# Linux
sed -i 's/\${KONG_HTTP_PORT}:8000/8100:8000/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/\${KONG_HTTPS_PORT}:8443/8443:8443/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/\${POSTGRES_PORT}:5432/5434:5432/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/\${POOLER_PROXY_PORT_TRANSACTION}:6543/6544:6543/g' "$PROJECT_DIR/docker-compose.yml"
fi
# Update .env with custom ports
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' 's/KONG_HTTP_PORT=8000/KONG_HTTP_PORT=8100/g' "$PROJECT_DIR/.env"
sed -i '' 's/POSTGRES_PORT=5432/POSTGRES_PORT=5434/g' "$PROJECT_DIR/.env"
sed -i '' 's/POOLER_PROXY_PORT_TRANSACTION=6543/POOLER_PROXY_PORT_TRANSACTION=6544/g' "$PROJECT_DIR/.env"
else
sed -i 's/KONG_HTTP_PORT=8000/KONG_HTTP_PORT=8100/g' "$PROJECT_DIR/.env"
sed -i 's/POSTGRES_PORT=5432/POSTGRES_PORT=5434/g' "$PROJECT_DIR/.env"
sed -i 's/POOLER_PROXY_PORT_TRANSACTION=6543/POOLER_PROXY_PORT_TRANSACTION=6544/g' "$PROJECT_DIR/.env"
fi
echo " ✓ Kong HTTP port: 8100 (instead of 8000)"
echo " ✓ PostgreSQL port: 5434 (instead of 5432)"
echo " ✓ Pooler port: 6544 (instead of 6543)"
echo "✅ Ports updated"
echo ""
# Step 3.5: Update service names to use sdp- prefix
echo "🔧 Step 3.5: Updating service names to use sdp- prefix..."
# Update docker-compose.yml - add sdp- prefix to all service names
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
sed -i '' 's/^ studio:/ sdp-studio:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ kong:/ sdp-kong:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ auth:/ sdp-auth:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ rest:/ sdp-rest:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ realtime:/ sdp-realtime:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ storage:/ sdp-storage:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ imgproxy:/ sdp-imgproxy:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ meta:/ sdp-meta:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ functions:/ sdp-functions:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ analytics:/ sdp-analytics:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ db:/ sdp-db:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ vector:/ sdp-vector:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/^ supavisor:/ sdp-supavisor:/g' "$PROJECT_DIR/docker-compose.yml"
# Update container names
sed -i '' 's/container_name: supabase-studio/container_name: sdp-supabase-studio/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-kong/container_name: sdp-supabase-kong/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-auth/container_name: sdp-supabase-auth/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-rest/container_name: sdp-supabase-rest/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: realtime-dev.supabase-realtime/container_name: sdp-realtime-dev.supabase-realtime/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-storage/container_name: sdp-supabase-storage/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-imgproxy/container_name: sdp-supabase-imgproxy/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-meta/container_name: sdp-supabase-meta/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-edge-functions/container_name: sdp-supabase-edge-functions/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-analytics/container_name: sdp-supabase-analytics/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-db/container_name: sdp-supabase-db/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-vector/container_name: sdp-supabase-vector/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/container_name: supabase-pooler/container_name: sdp-supabase-pooler/g' "$PROJECT_DIR/docker-compose.yml"
# Update depends_on references
sed -i '' 's/ analytics:/ sdp-analytics:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/ db:/ sdp-db:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/ rest:/ sdp-rest:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/ imgproxy:/ sdp-imgproxy:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's/ vector:/ sdp-vector:/g' "$PROJECT_DIR/docker-compose.yml"
# Update environment variable URLs
sed -i '' 's|http://meta:8080|http://sdp-meta:8080|g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's|http://kong:8000|http://sdp-kong:8000|g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's|http://analytics:4000|http://sdp-analytics:4000|g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's|http://rest:3000|http://sdp-rest:3000|g' "$PROJECT_DIR/docker-compose.yml"
sed -i '' 's|http://imgproxy:5001|http://sdp-imgproxy:5001|g' "$PROJECT_DIR/docker-compose.yml"
else
# Linux
sed -i 's/^ studio:/ sdp-studio:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ kong:/ sdp-kong:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ auth:/ sdp-auth:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ rest:/ sdp-rest:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ realtime:/ sdp-realtime:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ storage:/ sdp-storage:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ imgproxy:/ sdp-imgproxy:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ meta:/ sdp-meta:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ functions:/ sdp-functions:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ analytics:/ sdp-analytics:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ db:/ sdp-db:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ vector:/ sdp-vector:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/^ supavisor:/ sdp-supavisor:/g' "$PROJECT_DIR/docker-compose.yml"
# Update container names
sed -i 's/container_name: supabase-studio/container_name: sdp-supabase-studio/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-kong/container_name: sdp-supabase-kong/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-auth/container_name: sdp-supabase-auth/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-rest/container_name: sdp-supabase-rest/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: realtime-dev.supabase-realtime/container_name: sdp-realtime-dev.supabase-realtime/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-storage/container_name: sdp-supabase-storage/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-imgproxy/container_name: sdp-supabase-imgproxy/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-meta/container_name: sdp-supabase-meta/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-edge-functions/container_name: sdp-supabase-edge-functions/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-analytics/container_name: sdp-supabase-analytics/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-db/container_name: sdp-supabase-db/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-vector/container_name: sdp-supabase-vector/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/container_name: supabase-pooler/container_name: sdp-supabase-pooler/g' "$PROJECT_DIR/docker-compose.yml"
# Update depends_on references
sed -i 's/ analytics:/ sdp-analytics:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/ db:/ sdp-db:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/ rest:/ sdp-rest:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/ imgproxy:/ sdp-imgproxy:/g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's/ vector:/ sdp-vector:/g' "$PROJECT_DIR/docker-compose.yml"
# Update environment variable URLs
sed -i 's|http://meta:8080|http://sdp-meta:8080|g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's|http://kong:8000|http://sdp-kong:8000|g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's|http://analytics:4000|http://sdp-analytics:4000|g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's|http://rest:3000|http://sdp-rest:3000|g' "$PROJECT_DIR/docker-compose.yml"
sed -i 's|http://imgproxy:5001|http://sdp-imgproxy:5001|g' "$PROJECT_DIR/docker-compose.yml"
fi
echo " ✓ docker-compose.yml updated"
# Update kong.yml - replace service names with sdp- prefix
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
sed -i '' 's|http://auth:|http://sdp-auth:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i '' 's|http://rest:|http://sdp-rest:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i '' 's|http://realtime-dev\.supabase-realtime:|http://sdp-realtime-dev.supabase-realtime:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i '' 's|http://storage:|http://sdp-storage:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i '' 's|http://functions:|http://sdp-functions:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i '' 's|http://analytics:|http://sdp-analytics:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i '' 's|http://meta:|http://sdp-meta:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i '' 's|http://studio:|http://sdp-studio:|g' "$PROJECT_DIR/volumes/api/kong.yml"
else
# Linux
sed -i 's|http://auth:|http://sdp-auth:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i 's|http://rest:|http://sdp-rest:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i 's|http://realtime-dev\.supabase-realtime:|http://sdp-realtime-dev.supabase-realtime:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i 's|http://storage:|http://sdp-storage:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i 's|http://functions:|http://sdp-functions:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i 's|http://analytics:|http://sdp-analytics:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i 's|http://meta:|http://sdp-meta:|g' "$PROJECT_DIR/volumes/api/kong.yml"
sed -i 's|http://studio:|http://sdp-studio:|g' "$PROJECT_DIR/volumes/api/kong.yml"
fi
echo " ✓ kong.yml updated"
# Update vector.yml - replace service names with sdp- prefix
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
sed -i '' 's|http://analytics:|http://sdp-analytics:|g' "$PROJECT_DIR/volumes/logs/vector.yml"
else
# Linux
sed -i 's|http://analytics:|http://sdp-analytics:|g' "$PROJECT_DIR/volumes/logs/vector.yml"
sed -i 's|supabase|sdp-|g' "$PROJECT_DIR/volumes/logs/vector.yml"
sed -i 's|realtime-dev.sdp--realtime|sdp-realtime-dev.supabase-realtime|g' "$PROJECT_DIR/volumes/logs/vector.yml"
fi
echo " ✓ vector.yml updated"
echo "✅ Service names updated in config files"
echo ""
# Step 4: Add external network configuration
echo "🌐 Step 4: Configuring external network..."
cat >> "$PROJECT_DIR/docker-compose.yml" << 'EOF'
# External network configuration for integration with other services
networks:
default:
name: shared_data_network
external: true
EOF
echo "✅ Network configured"
echo ""
# Step 5: Generate secrets
echo "🔐 Step 5: Generating secrets and API keys..."
echo ""
echo " Running official generate-keys.sh script..."
echo " (This will update your .env file with secure random keys)"
echo ""
cd "$PROJECT_DIR"
bash ./utils/generate-keys.sh
echo ""
echo "✅ Secrets generated"
echo ""
# Step 6: Pull Docker images
echo "📦 Step 6: Pulling Docker images..."
docker compose pull
echo "✅ Images pulled"
echo ""
# Cleanup
echo "🧹 Cleaning up temporary files..."
rm -rf "$TEMP_DIR"
echo "✅ Cleanup complete"
echo ""
# Final instructions
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Supabase setup completed successfully!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "📝 IMPORTANT: Review and update the following in .env file:"
echo ""
echo " 1. Database Password:"
echo " POSTGRES_PASSWORD=<your-secure-password>"
echo ""
echo " 2. Dashboard Credentials:"
echo " DASHBOARD_USERNAME=<your-username>"
echo " DASHBOARD_PASSWORD=<your-secure-password>"
echo " (Password must include at least one letter)"
echo ""
echo " 3. Public URLs (if using domain):"
echo " SUPABASE_PUBLIC_URL=http://your-domain:8100"
echo " API_EXTERNAL_URL=http://your-domain:8100"
echo " SITE_URL=http://your-domain:3000"
echo ""
echo " Note: JWT secrets and API keys have been auto-generated"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🚀 Next steps:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo " 1. Review .env file:"
echo " nano .env"
echo ""
echo " 2. Ensure shared_data_network exists:"
echo " cd ../00-network && bash create-network.sh"
echo ""
echo " 3. Start Supabase:"
echo " cd ../02-supabase"
echo " docker compose up -d"
echo ""
echo " 4. Check service status:"
echo " docker compose ps"
echo ""
echo " 5. View logs:"
echo " docker compose logs -f"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🌐 Access Points (after starting):"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo " Studio: http://localhost:8100"
echo " API Gateway: http://localhost:8100"
echo " PostgreSQL: localhost:5434"
echo " Pooler: localhost:6544"
echo ""
echo " REST API: http://localhost:8100/rest/v1/"
echo " Auth API: http://localhost:8100/auth/v1/"
echo " Storage API: http://localhost:8100/storage/v1/"
echo " Realtime: http://localhost:8100/realtime/v1/"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "📚 Documentation: https://supabase.com/docs/guides/self-hosting/docker"
echo ""

View File

@@ -0,0 +1,157 @@
#!/bin/sh
#
# Portions of this code are derived from Inder Singh's update-db-pass.sh
# Copyright 2025 Inder Singh. Licensed under Apache License 2.0.
# Original source:
# https://github.com/singh-inder/supabase-automated-self-host/blob/main/docker/update-db-pass.sh
#
# GitHub discussion here:
# https://github.com/supabase/supabase/issues/22605#issuecomment-3323382144
#
# Changed:
# - POSIX shell compatibility
# - No hardcoded values for database service and admin user
# - Use .env for the admin user and database service port
# - Does _not_ set password for supabase_read_only_user (this role is not
# supposed to have a password)
# - Print all values and confirm before updating
# - Stop on any errors
#
# Heads up:
# - Updating _analytics.source_backends is not needed after PR logflare#2069
# - Newer Logflare versions use a different table and update connection string
#
set -e
if ! docker compose version > /dev/null 2>&1; then
echo "Docker Compose not found."
exit 1
fi
if [ ! -f .env ]; then
echo "Missing .env file. Exiting."
exit 1
fi
# Generate random hex-only password to avoid issues with SQL/shell
new_passwd="$(openssl rand -hex 16)"
# If replacing with a custom password, avoid using @/?#:&
# https://supabase.com/docs/guides/database/postgres/roles#passwords
# new_passwd="d0notUseSpecialSymbolsForPq123-"
# Check Postgres service
db_image_prefix="supabase.postgres:"
compose_output=$(docker compose ps \
--format '{{.Image}}\t{{.Service}}\t{{.Status}}' 2>/dev/null | \
grep -m1 "^$db_image_prefix" || true)
if [ -z "$compose_output" ]; then
echo "Postgres container not found. Exiting."
exit 1
fi
db_image=$(echo "$compose_output" | cut -f1)
db_srv_name=$(echo "$compose_output" | cut -f2)
db_srv_status=$(echo "$compose_output" | cut -f3)
case "$db_srv_status" in
Up*)
;;
*)
echo "Postgres container status: $db_srv_status"
echo "Exiting."
exit 1
;;
esac
db_srv_port=$(grep "^POSTGRES_PORT=" .env | cut -d '=' -f 2)
port_source=" (.env):"
if [ -z "$db_srv_port" ]; then
db_srv_port="5432"
port_source=" (default):"
fi
db_admin_user="supabase_admin"
echo ""
echo "*** Check configuration below before updating database passwords! ***"
echo ""
echo "Service name: $db_srv_name"
echo "Service status: $db_srv_status"
echo "Service port${port_source} $db_srv_port"
echo "Image: $db_image"
echo ""
echo "Admin user: $db_admin_user"
if ! test -t 0; then
echo ""
echo "Running non-interactively. Not updating passwords."
exit 0
fi
echo "New database password: $new_passwd"
echo ""
printf "Update database passwords? (y/N) "
read -r REPLY
case "$REPLY" in
[Yy])
;;
*)
echo "Canceled. Not updating passwords."
exit 0
;;
esac
echo "Updating passwords..."
echo "Connecting to the database service container..."
docker compose exec -T "$db_srv_name" psql -U "$db_admin_user" -d "_supabase" -v ON_ERROR_STOP=1 <<EOF
alter user anon with password '${new_passwd}';
alter user authenticated with password '${new_passwd}';
alter user authenticator with password '${new_passwd}';
alter user dashboard_user with password '${new_passwd}';
alter user pgbouncer with password '${new_passwd}';
alter user postgres with password '${new_passwd}';
alter user service_role with password '${new_passwd}';
alter user supabase_admin with password '${new_passwd}';
alter user supabase_auth_admin with password '${new_passwd}';
alter user supabase_functions_admin with password '${new_passwd}';
alter user supabase_replication_admin with password '${new_passwd}';
alter user supabase_storage_admin with password '${new_passwd}';
DROP SCHEMA _supavisor CASCADE;
create schema if not exists _supavisor;
alter schema _supavisor owner to supabase_admin;
DO \$\$
BEGIN
IF EXISTS (
SELECT 1
FROM information_schema.tables
WHERE table_schema = '_analytics'
AND table_name = 'source_backends'
) THEN
UPDATE _analytics.source_backends
SET config = jsonb_set(
config,
'{url}',
'"postgresql://${db_admin_user}:${new_passwd}@${db_srv_name}:${db_srv_port}/postgres"',
false
)
WHERE type = 'postgres';
END IF;
END
\$\$;
EOF
echo "Updating POSTGRES_PASSWORD in .env..."
sed -i.old "s|^POSTGRES_PASSWORD=.*$|POSTGRES_PASSWORD=$new_passwd|" .env
echo ""
echo "Success. To update and restart containers use:"
echo ""
echo "docker compose up -d --force-recreate"
echo ""

View File

@@ -0,0 +1,123 @@
#!/bin/sh
#
# Portions of this code are derived from Inder Singh's setup.sh shell script.
# Copyright 2025 Inder Singh. Licensed under Apache License 2.0.
# Original source: https://github.com/singh-inder/supabase-automated-self-host/blob/main/setup.sh
#
set -e
gen_hex() {
openssl rand -hex "$1"
}
gen_base64() {
openssl rand -base64 "$1"
}
base64_url_encode() {
openssl enc -base64 -A | tr '+/' '-_' | tr -d '='
}
gen_token() {
payload=$1
payload_base64=$(printf %s "$payload" | base64_url_encode)
header_base64=$(printf %s "$header" | base64_url_encode)
signed_content="${header_base64}.${payload_base64}"
signature=$(printf %s "$signed_content" | openssl dgst -binary -sha256 -hmac "$jwt_secret" | base64_url_encode)
printf '%s' "${signed_content}.${signature}"
}
if ! command -v openssl >/dev/null 2>&1; then
echo "Error: openssl is required but not found."
exit 1
fi
jwt_secret="$(gen_base64 30)"
# Used in get_token()
header='{"alg":"HS256","typ":"JWT"}'
iat=$(date +%s)
exp=$((iat + 5 * 3600 * 24 * 365)) # 5 years
# Normalizes JSON formatting so that the token matches https://www.jwt.io/ results
anon_payload="{\"role\":\"anon\",\"iss\":\"supabase\",\"iat\":$iat,\"exp\":$exp}"
service_role_payload="{\"role\":\"service_role\",\"iss\":\"supabase\",\"iat\":$iat,\"exp\":$exp}"
#echo "anon_payload=$anon_payload"
#echo "service_role_payload=$service_role_payload"
anon_key=$(gen_token "$anon_payload")
service_role_key=$(gen_token "$service_role_payload")
secret_key_base=$(gen_base64 48)
vault_enc_key=$(gen_hex 16)
pg_meta_crypto_key=$(gen_base64 24)
logflare_public_access_token=$(gen_base64 24)
logflare_private_access_token=$(gen_base64 24)
s3_protocol_access_key_id=$(gen_hex 16)
s3_protocol_access_key_secret=$(gen_hex 32)
minio_root_password=$(gen_hex 16)
echo ""
echo "JWT_SECRET=${jwt_secret}"
echo ""
#echo "Issued at: $iat"
#echo "Expire: $exp"
echo "ANON_KEY=${anon_key}"
echo "SERVICE_ROLE_KEY=${service_role_key}"
echo ""
echo "SECRET_KEY_BASE=${secret_key_base}"
echo "VAULT_ENC_KEY=${vault_enc_key}"
echo "PG_META_CRYPTO_KEY=${pg_meta_crypto_key}"
echo "LOGFLARE_PUBLIC_ACCESS_TOKEN=${logflare_public_access_token}"
echo "LOGFLARE_PRIVATE_ACCESS_TOKEN=${logflare_private_access_token}"
echo "S3_PROTOCOL_ACCESS_KEY_ID=${s3_protocol_access_key_id}"
echo "S3_PROTOCOL_ACCESS_KEY_SECRET=${s3_protocol_access_key_secret}"
echo "MINIO_ROOT_PASSWORD=${minio_root_password}"
echo ""
postgres_password=$(gen_hex 16)
dashboard_password=$(gen_hex 16)
echo "POSTGRES_PASSWORD=${postgres_password}"
echo "DASHBOARD_PASSWORD=${dashboard_password}"
echo ""
if ! test -t 0; then
echo "Running non-interactively. Skipping .env update."
exit 0
fi
printf "Update .env file? (y/N) "
read -r REPLY
case "$REPLY" in
[Yy])
;;
*)
echo "Not updating .env"
exit 0
;;
esac
echo "Updating .env..."
sed \
-i.old \
-e "s|^JWT_SECRET=.*$|JWT_SECRET=${jwt_secret}|" \
-e "s|^ANON_KEY=.*$|ANON_KEY=${anon_key}|" \
-e "s|^SERVICE_ROLE_KEY=.*$|SERVICE_ROLE_KEY=${service_role_key}|" \
-e "s|^SECRET_KEY_BASE=.*$|SECRET_KEY_BASE=${secret_key_base}|" \
-e "s|^VAULT_ENC_KEY=.*$|VAULT_ENC_KEY=${vault_enc_key}|" \
-e "s|^PG_META_CRYPTO_KEY=.*$|PG_META_CRYPTO_KEY=${pg_meta_crypto_key}|" \
-e "s|^LOGFLARE_PUBLIC_ACCESS_TOKEN=.*$|LOGFLARE_PUBLIC_ACCESS_TOKEN=${logflare_public_access_token}|" \
-e "s|^LOGFLARE_PRIVATE_ACCESS_TOKEN=.*$|LOGFLARE_PRIVATE_ACCESS_TOKEN=${logflare_private_access_token}|" \
-e "s|^S3_PROTOCOL_ACCESS_KEY_ID=.*$|S3_PROTOCOL_ACCESS_KEY_ID=${s3_protocol_access_key_id}|" \
-e "s|^S3_PROTOCOL_ACCESS_KEY_SECRET=.*$|S3_PROTOCOL_ACCESS_KEY_SECRET=${s3_protocol_access_key_secret}|" \
-e "s|^MINIO_ROOT_PASSWORD=.*$|MINIO_ROOT_PASSWORD=${minio_root_password}|" \
-e "s|^POSTGRES_PASSWORD=.*$|POSTGRES_PASSWORD=${postgres_password}|" \
-e "s|^DASHBOARD_PASSWORD=.*$|DASHBOARD_PASSWORD=${dashboard_password}|" \
.env