add files infra docker service for data platform
This commit is contained in:
25
.env.global
Normal file
25
.env.global
Normal file
@@ -0,0 +1,25 @@
|
||||
PROJECT_NAME=sriphat-data
|
||||
DOMAIN=sriphat.local
|
||||
TZ=Asia/Bangkok
|
||||
|
||||
DB_HOST=postgres
|
||||
DB_PORT=5432
|
||||
DB_USER=postgres
|
||||
DB_PASSWORD=Secure_Hospital_Pass_2026
|
||||
DB_NAME=postgres
|
||||
DB_SSLMODE=prefer
|
||||
|
||||
POSTGRES_PASSWORD=Secure_Hospital_Pass_2026
|
||||
|
||||
KEYCLOAK_ADMIN=admin
|
||||
KEYCLOAK_ADMIN_PASSWORD=admin_secret_pass_2026
|
||||
|
||||
SUPERSET_SECRET_KEY=superset_random_secret_key_change_me_2026
|
||||
SUPERSET_ADMIN_USERNAME=admin
|
||||
SUPERSET_ADMIN_PASSWORD=admin
|
||||
|
||||
ROOT_PATH=/apiservice
|
||||
APP_NAME=APIsService
|
||||
ADMIN_SECRET_KEY=apiservice_admin_secret_2026
|
||||
ADMIN_USERNAME=admin
|
||||
ADMIN_PASSWORD=change_me_2026
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
.env
|
||||
.env.local
|
||||
__pycache__/
|
||||
*.pyc
|
||||
.venv/
|
||||
@@ -7,4 +8,7 @@ venv/
|
||||
.pytest_cache/
|
||||
.mypy_cache/
|
||||
ruff_cache/
|
||||
|
||||
*/data/
|
||||
01-infra/letsencrypt/
|
||||
.windsurf/
|
||||
|
||||
18
00-network/README.md
Normal file
18
00-network/README.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# 00-network: Shared Network Setup
|
||||
|
||||
## Purpose
|
||||
Creates the `shared_data_network` Docker network that all services use to communicate.
|
||||
|
||||
## Run
|
||||
```bash
|
||||
bash create-network.sh
|
||||
```
|
||||
|
||||
## Verify
|
||||
```bash
|
||||
docker network ls | grep shared_data_network
|
||||
docker network inspect shared_data_network
|
||||
```
|
||||
|
||||
## Note
|
||||
This network must be created before starting any other services.
|
||||
2
00-network/create-network.sh
Normal file
2
00-network/create-network.sh
Normal file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
docker network create shared_data_network 2>/dev/null || echo "Network shared_data_network already exists"
|
||||
17
01-infra/README.md
Normal file
17
01-infra/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# 01-infra: Infrastructure Layer
|
||||
|
||||
## Services
|
||||
- **Nginx Proxy Manager** (port 80, 443, 81)
|
||||
- **Keycloak** (port 8080)
|
||||
- **PostgreSQL** (internal only)
|
||||
|
||||
## Start
|
||||
```bash
|
||||
docker compose --env-file ../.env.global up -d
|
||||
```
|
||||
|
||||
## Access
|
||||
- Nginx Proxy Manager: http://localhost:81
|
||||
- Default: admin@example.com / changeme
|
||||
- Keycloak: http://localhost:8080
|
||||
- Admin: see KEYCLOAK_ADMIN in .env.global
|
||||
62
01-infra/docker-compose.yml
Normal file
62
01-infra/docker-compose.yml
Normal file
@@ -0,0 +1,62 @@
|
||||
services:
|
||||
nginx-proxy:
|
||||
image: jc21/nginx-proxy-manager:latest
|
||||
container_name: nginx-proxy-manager
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "81:81"
|
||||
volumes:
|
||||
- ./data:/data
|
||||
- ./letsencrypt:/etc/letsencrypt
|
||||
environment:
|
||||
- TZ=${TZ:-Asia/Bangkok}
|
||||
networks:
|
||||
- shared_data_network
|
||||
restart: unless-stopped
|
||||
|
||||
keycloak:
|
||||
image: quay.io/keycloak/keycloak:23.0
|
||||
container_name: keycloak
|
||||
command: start-dev
|
||||
environment:
|
||||
KC_BOOTSTRAP_ADMIN_USERNAME: ${KEYCLOAK_ADMIN}
|
||||
KC_BOOTSTRAP_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD}
|
||||
KC_DB: postgres
|
||||
KC_DB_URL: jdbc:postgresql://postgres:5432/${DB_NAME}
|
||||
KC_DB_USERNAME: ${DB_USER}
|
||||
KC_DB_PASSWORD: ${DB_PASSWORD}
|
||||
KC_HOSTNAME_STRICT: "false"
|
||||
KC_HTTP_ENABLED: "true"
|
||||
KC_PROXY: edge
|
||||
ports:
|
||||
- "8080:8080"
|
||||
networks:
|
||||
- shared_data_network
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- postgres
|
||||
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
container_name: postgres
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_USER: ${DB_USER}
|
||||
POSTGRES_DB: ${DB_NAME}
|
||||
TZ: ${TZ:-Asia/Bangkok}
|
||||
volumes:
|
||||
- ./data/postgres:/var/lib/postgresql/data
|
||||
- ./init:/docker-entrypoint-initdb.d
|
||||
networks:
|
||||
- shared_data_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${DB_USER}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
networks:
|
||||
shared_data_network:
|
||||
external: true
|
||||
9
01-infra/init/01-create-schemas.sql
Normal file
9
01-infra/init/01-create-schemas.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
CREATE SCHEMA IF NOT EXISTS fastapi;
|
||||
CREATE SCHEMA IF NOT EXISTS operationbi;
|
||||
CREATE SCHEMA IF NOT EXISTS raw_data;
|
||||
CREATE SCHEMA IF NOT EXISTS analytics;
|
||||
|
||||
GRANT ALL ON SCHEMA fastapi TO postgres;
|
||||
GRANT ALL ON SCHEMA operationbi TO postgres;
|
||||
GRANT ALL ON SCHEMA raw_data TO postgres;
|
||||
GRANT ALL ON SCHEMA analytics TO postgres;
|
||||
10
03-apiservice/.gitignore
vendored
Normal file
10
03-apiservice/.gitignore
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
.env
|
||||
__pycache__/
|
||||
*.pyc
|
||||
.venv/
|
||||
venv/
|
||||
.python-version
|
||||
.pytest_cache/
|
||||
.mypy_cache/
|
||||
ruff_cache/
|
||||
.windsurf/
|
||||
17
03-apiservice/Dockerfile
Normal file
17
03-apiservice/Dockerfile
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt /app/requirements.txt
|
||||
RUN pip install --no-cache-dir -r /app/requirements.txt
|
||||
|
||||
COPY ./app /app/app
|
||||
|
||||
ENV TZ=Asia/Bangkok
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["gunicorn","-k","uvicorn.workers.UvicornWorker","app.main:app","--bind","0.0.0.0:8000","--workers","2","--access-logfile","-","--error-logfile","-"]
|
||||
13
03-apiservice/README.md
Normal file
13
03-apiservice/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# 03-apiservice: Custom FastAPI Service
|
||||
|
||||
## Build & Start
|
||||
```bash
|
||||
docker compose --env-file ../.env.global up --build -d
|
||||
```
|
||||
|
||||
## Access
|
||||
Internal only - access via Nginx Proxy Manager at `/apiservice`
|
||||
|
||||
## Admin UI
|
||||
- Login: http://<domain>/apiservice/admin/
|
||||
- Generate API Key: POST /apiservice/admin/api-keys/generate
|
||||
0
03-apiservice/app/__init__.py
Normal file
0
03-apiservice/app/__init__.py
Normal file
107
03-apiservice/app/admin.py
Normal file
107
03-apiservice/app/admin.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from fastapi import HTTPException, Request, status
|
||||
from sqladmin import Admin, ModelView
|
||||
from sqladmin.authentication import AuthenticationBackend
|
||||
from starlette.responses import RedirectResponse
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from wtforms import StringField
|
||||
from wtforms.validators import Optional
|
||||
|
||||
from app.core.config import settings
|
||||
from app.db.engine import engine
|
||||
from app.db.models import ApiClient, ApiKey
|
||||
from app.security.api_key import generate_api_key, get_prefix, hash_api_key
|
||||
|
||||
|
||||
class AdminAuth(AuthenticationBackend):
|
||||
async def login(self, request: Request) -> bool:
|
||||
form = await request.form()
|
||||
username = form.get("username")
|
||||
password = form.get("password")
|
||||
|
||||
if username == settings.ADMIN_USERNAME and password == settings.ADMIN_PASSWORD:
|
||||
request.session.update({"admin": True})
|
||||
return True
|
||||
return False
|
||||
|
||||
async def logout(self, request: Request) -> bool:
|
||||
request.session.clear()
|
||||
return True
|
||||
|
||||
async def authenticate(self, request: Request) -> bool:
|
||||
return bool(request.session.get("admin"))
|
||||
|
||||
|
||||
class ApiClientAdmin(ModelView, model=ApiClient):
|
||||
column_list = [ApiClient.id, ApiClient.name, ApiClient.is_active]
|
||||
|
||||
|
||||
class ApiKeyAdmin(ModelView, model=ApiKey):
|
||||
form_excluded_columns = [ApiKey.key_hash, ApiKey.key_prefix, ApiKey.created_at]
|
||||
|
||||
form_extra_fields = {
|
||||
"plain_key": StringField("Plain Key", validators=[Optional()]),
|
||||
"permissions_csv": StringField("Permissions (comma)", validators=[Optional()]),
|
||||
}
|
||||
|
||||
async def on_model_change(self, data: dict, model: ApiKey, is_created: bool, request: Request) -> None:
|
||||
plain_key = data.get("plain_key")
|
||||
if plain_key:
|
||||
model.key_prefix = get_prefix(plain_key)
|
||||
model.key_hash = hash_api_key(plain_key)
|
||||
|
||||
permissions_csv = data.get("permissions_csv")
|
||||
if permissions_csv is not None:
|
||||
perms = [p.strip() for p in permissions_csv.split(",") if p.strip()]
|
||||
model.permissions = perms
|
||||
|
||||
|
||||
def mount_admin(app):
|
||||
auth_backend = AdminAuth(secret_key=settings.ADMIN_SECRET_KEY)
|
||||
admin = Admin(app=app, engine=engine, authentication_backend=auth_backend)
|
||||
|
||||
SessionLocal = sessionmaker(bind=engine, autoflush=False, autocommit=False)
|
||||
|
||||
admin.add_view(ApiClientAdmin)
|
||||
admin.add_view(ApiKeyAdmin)
|
||||
|
||||
@app.get("/admin")
|
||||
async def _admin_redirect(request: Request):
|
||||
root_path = request.scope.get("root_path") or ""
|
||||
return RedirectResponse(url=f"{root_path}/admin/")
|
||||
|
||||
@app.post("/admin/api-keys/generate")
|
||||
async def _admin_generate_api_key(
|
||||
request: Request,
|
||||
client_id: int,
|
||||
permissions: str = "",
|
||||
name: str | None = None,
|
||||
):
|
||||
if not request.session.get("admin"):
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated")
|
||||
|
||||
perms = [p.strip() for p in permissions.split(",") if p.strip()]
|
||||
plain_key = generate_api_key()
|
||||
|
||||
db = SessionLocal()
|
||||
try:
|
||||
client = db.get(ApiClient, client_id)
|
||||
if not client:
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Client not found")
|
||||
|
||||
api_key = ApiKey(
|
||||
client_id=client_id,
|
||||
name=name,
|
||||
key_prefix=get_prefix(plain_key),
|
||||
key_hash=hash_api_key(plain_key),
|
||||
permissions=perms,
|
||||
is_active=True,
|
||||
)
|
||||
db.add(api_key)
|
||||
db.commit()
|
||||
db.refresh(api_key)
|
||||
|
||||
return {"key_id": api_key.id, "api_key": plain_key, "permissions": perms}
|
||||
finally:
|
||||
db.close()
|
||||
0
03-apiservice/app/api/__init__.py
Normal file
0
03-apiservice/app/api/__init__.py
Normal file
0
03-apiservice/app/api/v1/__init__.py
Normal file
0
03-apiservice/app/api/v1/__init__.py
Normal file
67
03-apiservice/app/api/v1/routes.py
Normal file
67
03-apiservice/app/api/v1/routes.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from sqlalchemy.dialects.postgresql import insert
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.v1.schemas import FeedCheckpointIn
|
||||
from app.core.config import settings
|
||||
from app.db.models import RawOpdCheckpoint
|
||||
from app.security.dependencies import get_db, require_permission
|
||||
|
||||
|
||||
router = APIRouter(prefix="/api/v1")
|
||||
|
||||
PERM_FEED_CHECKPOINT_WRITE = "feed.checkpoint:write"
|
||||
|
||||
|
||||
def _to_tz(dt):
|
||||
if dt is None:
|
||||
return None
|
||||
if dt.tzinfo is None:
|
||||
return dt.replace(tzinfo=ZoneInfo(settings.TIMEZONE))
|
||||
return dt.astimezone(ZoneInfo(settings.TIMEZONE))
|
||||
|
||||
|
||||
@router.post("/feed/checkpoint")
|
||||
def upsert_feed_checkpoint(
|
||||
payload: list[FeedCheckpointIn],
|
||||
_: Annotated[object, Depends(require_permission(PERM_FEED_CHECKPOINT_WRITE))],
|
||||
db: Annotated[Session, Depends(get_db)],
|
||||
):
|
||||
rows = []
|
||||
for item in payload:
|
||||
rows.append(
|
||||
{
|
||||
"id": item.id,
|
||||
"hn": item.hn,
|
||||
"vn": item.vn,
|
||||
"location": item.location,
|
||||
"type": item.type,
|
||||
"timestamp_in": _to_tz(item.timestamp_in),
|
||||
"timestamp_out": _to_tz(item.timestamp_out),
|
||||
"waiting_time": item.waiting_time,
|
||||
"bu": item.bu,
|
||||
}
|
||||
)
|
||||
|
||||
stmt = insert(RawOpdCheckpoint).values(rows)
|
||||
update_cols = {
|
||||
"hn": stmt.excluded.hn,
|
||||
"vn": stmt.excluded.vn,
|
||||
"location": stmt.excluded.location,
|
||||
"type": stmt.excluded.type,
|
||||
"timestamp_in": stmt.excluded.timestamp_in,
|
||||
"timestamp_out": stmt.excluded.timestamp_out,
|
||||
"waiting_time": stmt.excluded.waiting_time,
|
||||
"bu": stmt.excluded.bu,
|
||||
}
|
||||
|
||||
stmt = stmt.on_conflict_do_update(index_elements=[RawOpdCheckpoint.id], set_=update_cols)
|
||||
result = db.execute(stmt)
|
||||
db.commit()
|
||||
|
||||
return {"upserted": len(rows), "rowcount": result.rowcount}
|
||||
15
03-apiservice/app/api/v1/schemas.py
Normal file
15
03-apiservice/app/api/v1/schemas.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from datetime import datetime
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class FeedCheckpointIn(BaseModel):
|
||||
id: int
|
||||
hn: int
|
||||
vn: int
|
||||
location: str
|
||||
type: str
|
||||
timestamp_in: datetime
|
||||
timestamp_out: datetime | None = None
|
||||
waiting_time: int | None = None
|
||||
bu: str | None = None
|
||||
0
03-apiservice/app/core/__init__.py
Normal file
0
03-apiservice/app/core/__init__.py
Normal file
25
03-apiservice/app/core/config.py
Normal file
25
03-apiservice/app/core/config.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
model_config = SettingsConfigDict(env_file=".env", extra="ignore")
|
||||
|
||||
APP_NAME: str = "APIsService"
|
||||
|
||||
DB_HOST: str
|
||||
DB_PORT: int = 5432
|
||||
DB_USER: str
|
||||
DB_PASSWORD: str
|
||||
DB_NAME: str
|
||||
DB_SSLMODE: str = "prefer"
|
||||
|
||||
ROOT_PATH: str = ""
|
||||
|
||||
TIMEZONE: str = "Asia/Bangkok"
|
||||
|
||||
ADMIN_SECRET_KEY: str
|
||||
ADMIN_USERNAME: str
|
||||
ADMIN_PASSWORD: str
|
||||
|
||||
|
||||
settings = Settings()
|
||||
0
03-apiservice/app/db/__init__.py
Normal file
0
03-apiservice/app/db/__init__.py
Normal file
5
03-apiservice/app/db/base.py
Normal file
5
03-apiservice/app/db/base.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from sqlalchemy.orm import DeclarativeBase
|
||||
|
||||
|
||||
class Base(DeclarativeBase):
|
||||
pass
|
||||
21
03-apiservice/app/db/engine.py
Normal file
21
03-apiservice/app/db/engine.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from urllib.parse import quote_plus
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
|
||||
def build_db_url() -> str:
|
||||
user = quote_plus(settings.DB_USER)
|
||||
password = quote_plus(settings.DB_PASSWORD)
|
||||
host = settings.DB_HOST
|
||||
port = settings.DB_PORT
|
||||
db = quote_plus(settings.DB_NAME)
|
||||
|
||||
return (
|
||||
f"postgresql+psycopg://{user}:{password}@{host}:{port}/{db}"
|
||||
f"?sslmode={quote_plus(settings.DB_SSLMODE)}"
|
||||
)
|
||||
|
||||
|
||||
engine = create_engine(build_db_url(), pool_pre_ping=True)
|
||||
12
03-apiservice/app/db/init_db.py
Normal file
12
03-apiservice/app/db/init_db.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from sqlalchemy import text
|
||||
|
||||
from app.db.base import Base
|
||||
from app.db.engine import engine
|
||||
|
||||
|
||||
def init_db() -> None:
|
||||
with engine.begin() as conn:
|
||||
conn.execute(text("CREATE SCHEMA IF NOT EXISTS fastapi"))
|
||||
conn.execute(text("CREATE SCHEMA IF NOT EXISTS operationbi"))
|
||||
|
||||
Base.metadata.create_all(bind=conn)
|
||||
62
03-apiservice/app/db/models.py
Normal file
62
03-apiservice/app/db/models.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from sqlalchemy import BigInteger, Boolean, DateTime, ForeignKey, Integer, String, Text, func
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
|
||||
from app.db.base import Base
|
||||
|
||||
|
||||
class RawOpdCheckpoint(Base):
|
||||
__tablename__ = "raw_opd_checkpoint"
|
||||
__table_args__ = {"schema": "operationbi"}
|
||||
|
||||
id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
|
||||
hn: Mapped[int] = mapped_column(BigInteger, nullable=False)
|
||||
vn: Mapped[int] = mapped_column(BigInteger, nullable=False)
|
||||
location: Mapped[str] = mapped_column(Text, nullable=False)
|
||||
type: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||
timestamp_in: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False)
|
||||
timestamp_out: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True)
|
||||
waiting_time: Mapped[int | None] = mapped_column(Integer, nullable=True)
|
||||
bu: Mapped[str | None] = mapped_column(String(128), nullable=True)
|
||||
|
||||
|
||||
class ApiClient(Base):
|
||||
__tablename__ = "api_client"
|
||||
__table_args__ = {"schema": "fastapi"}
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
||||
name: Mapped[str] = mapped_column(String(128), unique=True, nullable=False)
|
||||
is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True)
|
||||
|
||||
api_keys: Mapped[list[ApiKey]] = relationship(
|
||||
back_populates="client",
|
||||
cascade="all, delete-orphan",
|
||||
passive_deletes=True,
|
||||
)
|
||||
|
||||
|
||||
class ApiKey(Base):
|
||||
__tablename__ = "api_key"
|
||||
__table_args__ = {"schema": "fastapi"}
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
||||
client_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("fastapi.api_client.id", ondelete="CASCADE"), nullable=False
|
||||
)
|
||||
name: Mapped[str | None] = mapped_column(String(128), nullable=True)
|
||||
|
||||
key_prefix: Mapped[str] = mapped_column(String(12), nullable=False)
|
||||
key_hash: Mapped[str] = mapped_column(Text, nullable=False)
|
||||
|
||||
permissions: Mapped[list[str]] = mapped_column(JSONB, nullable=False, default=list)
|
||||
is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True)
|
||||
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), nullable=False, server_default=func.now()
|
||||
)
|
||||
|
||||
client: Mapped[ApiClient] = relationship(back_populates="api_keys")
|
||||
21
03-apiservice/app/main.py
Normal file
21
03-apiservice/app/main.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import FastAPI
|
||||
from starlette.middleware.sessions import SessionMiddleware
|
||||
|
||||
from app.admin import mount_admin
|
||||
from app.api.v1.routes import router as v1_router
|
||||
from app.core.config import settings
|
||||
from app.db.init_db import init_db
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(_: FastAPI):
|
||||
init_db()
|
||||
yield
|
||||
|
||||
|
||||
app = FastAPI(title=settings.APP_NAME, root_path=settings.ROOT_PATH, lifespan=lifespan)
|
||||
app.add_middleware(SessionMiddleware, secret_key=settings.ADMIN_SECRET_KEY)
|
||||
app.include_router(v1_router)
|
||||
mount_admin(app)
|
||||
0
03-apiservice/app/security/__init__.py
Normal file
0
03-apiservice/app/security/__init__.py
Normal file
22
03-apiservice/app/security/api_key.py
Normal file
22
03-apiservice/app/security/api_key.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import secrets
|
||||
|
||||
import bcrypt
|
||||
|
||||
|
||||
def generate_api_key(prefix_len: int = 8, token_bytes: int = 32) -> str:
|
||||
prefix = secrets.token_urlsafe(prefix_len)[:prefix_len]
|
||||
token = secrets.token_urlsafe(token_bytes)
|
||||
return f"{prefix}.{token}"
|
||||
|
||||
|
||||
def get_prefix(api_key: str) -> str:
|
||||
return api_key.split(".", 1)[0]
|
||||
|
||||
|
||||
def hash_api_key(api_key: str) -> str:
|
||||
hashed = bcrypt.hashpw(api_key.encode("utf-8"), bcrypt.gensalt())
|
||||
return hashed.decode("utf-8")
|
||||
|
||||
|
||||
def verify_api_key(api_key: str, api_key_hash: str) -> bool:
|
||||
return bcrypt.checkpw(api_key.encode("utf-8"), api_key_hash.encode("utf-8"))
|
||||
54
03-apiservice/app/security/dependencies.py
Normal file
54
03-apiservice/app/security/dependencies.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from typing import Annotated
|
||||
|
||||
from fastapi import Depends, HTTPException, Request, status
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
|
||||
from app.db.engine import engine
|
||||
from app.db.models import ApiKey
|
||||
from app.security.api_key import get_prefix, verify_api_key
|
||||
|
||||
|
||||
SessionLocal = sessionmaker(bind=engine, autoflush=False, autocommit=False)
|
||||
|
||||
|
||||
def get_db():
|
||||
db = SessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
def get_bearer_token(request: Request) -> str:
|
||||
auth = request.headers.get("authorization")
|
||||
if not auth:
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Missing Authorization")
|
||||
|
||||
parts = auth.split(" ", 1)
|
||||
if len(parts) != 2 or parts[0].lower() != "bearer" or not parts[1].strip():
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid Authorization")
|
||||
|
||||
return parts[1].strip()
|
||||
|
||||
|
||||
def require_permission(permission: str):
|
||||
def _dep(
|
||||
token: Annotated[str, Depends(get_bearer_token)],
|
||||
db: Annotated[Session, Depends(get_db)],
|
||||
) -> ApiKey:
|
||||
prefix = get_prefix(token)
|
||||
stmt = select(ApiKey).where(ApiKey.key_prefix == prefix, ApiKey.is_active.is_(True))
|
||||
api_key = db.execute(stmt).scalar_one_or_none()
|
||||
if not api_key:
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key")
|
||||
|
||||
if not verify_api_key(token, api_key.key_hash):
|
||||
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API key")
|
||||
|
||||
if permission not in (api_key.permissions or []):
|
||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Permission denied")
|
||||
|
||||
return api_key
|
||||
|
||||
return _dep
|
||||
30
03-apiservice/docker-compose.yml
Normal file
30
03-apiservice/docker-compose.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
services:
|
||||
apiservice:
|
||||
build: .
|
||||
container_name: apiservice
|
||||
environment:
|
||||
- TZ=${TZ:-Asia/Bangkok}
|
||||
- DB_HOST=${DB_HOST}
|
||||
- DB_PORT=${DB_PORT}
|
||||
- DB_USER=${DB_USER}
|
||||
- DB_PASSWORD=${DB_PASSWORD}
|
||||
- DB_NAME=${DB_NAME}
|
||||
- DB_SSLMODE=${DB_SSLMODE}
|
||||
- ROOT_PATH=${ROOT_PATH}
|
||||
- APP_NAME=${APP_NAME}
|
||||
- ADMIN_SECRET_KEY=${ADMIN_SECRET_KEY}
|
||||
- ADMIN_USERNAME=${ADMIN_USERNAME}
|
||||
- ADMIN_PASSWORD=${ADMIN_PASSWORD}
|
||||
networks:
|
||||
- shared_data_network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/apiservice/docs"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
networks:
|
||||
shared_data_network:
|
||||
external: true
|
||||
12
03-apiservice/requirements.txt
Normal file
12
03-apiservice/requirements.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
fastapi==0.115.8
|
||||
uvicorn==0.34.0
|
||||
gunicorn==23.0.0
|
||||
SQLAlchemy==2.0.38
|
||||
psycopg==3.2.5
|
||||
pydantic==2.10.6
|
||||
pydantic-settings==2.7.1
|
||||
sqladmin==0.20.1
|
||||
itsdangerous==2.2.0
|
||||
bcrypt==4.3.0
|
||||
python-multipart==0.0.20
|
||||
WTForms==3.2.1
|
||||
19
04-ingestion/README.md
Normal file
19
04-ingestion/README.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# 04-ingestion: Airbyte Data Ingestion
|
||||
|
||||
## Services
|
||||
- Airbyte Webapp
|
||||
- Airbyte Server
|
||||
- Airbyte Worker
|
||||
- Temporal (workflow engine)
|
||||
|
||||
## Start
|
||||
```bash
|
||||
docker compose --env-file ../.env.global up -d
|
||||
```
|
||||
|
||||
## Access
|
||||
Internal - configure Nginx Proxy Manager to expose at `/airbyte`
|
||||
|
||||
## First Time Setup
|
||||
1. Create database: `docker exec postgres psql -U postgres -c "CREATE DATABASE airbyte;"`
|
||||
2. Access webapp and configure sources/destinations
|
||||
70
04-ingestion/docker-compose.yml
Normal file
70
04-ingestion/docker-compose.yml
Normal file
@@ -0,0 +1,70 @@
|
||||
services:
|
||||
airbyte-webapp:
|
||||
image: airbyte/webapp:latest
|
||||
container_name: airbyte-webapp
|
||||
environment:
|
||||
- AIRBYTE_VERSION=latest
|
||||
- API_URL=/api/v1/
|
||||
- TRACKING_STRATEGY=segment
|
||||
networks:
|
||||
- shared_data_network
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- airbyte-server
|
||||
|
||||
airbyte-server:
|
||||
image: airbyte/server:latest
|
||||
container_name: airbyte-server
|
||||
environment:
|
||||
- DATABASE_HOST=postgres
|
||||
- DATABASE_PORT=5432
|
||||
- DATABASE_USER=${DB_USER}
|
||||
- DATABASE_PASSWORD=${DB_PASSWORD}
|
||||
- DATABASE_DB=airbyte
|
||||
- CONFIG_DATABASE_USER=${DB_USER}
|
||||
- CONFIG_DATABASE_PASSWORD=${DB_PASSWORD}
|
||||
- WORKSPACE_ROOT=/tmp/workspace
|
||||
- TRACKING_STRATEGY=segment
|
||||
- TZ=${TZ:-Asia/Bangkok}
|
||||
volumes:
|
||||
- ./data/workspace:/tmp/workspace
|
||||
- ./data/airbyte:/data
|
||||
networks:
|
||||
- shared_data_network
|
||||
restart: unless-stopped
|
||||
|
||||
airbyte-worker:
|
||||
image: airbyte/worker:latest
|
||||
container_name: airbyte-worker
|
||||
environment:
|
||||
- DATABASE_HOST=postgres
|
||||
- DATABASE_PORT=5432
|
||||
- DATABASE_USER=${DB_USER}
|
||||
- DATABASE_PASSWORD=${DB_PASSWORD}
|
||||
- DATABASE_DB=airbyte
|
||||
- WORKSPACE_ROOT=/tmp/workspace
|
||||
- TZ=${TZ:-Asia/Bangkok}
|
||||
volumes:
|
||||
- ./data/workspace:/tmp/workspace
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
networks:
|
||||
- shared_data_network
|
||||
restart: unless-stopped
|
||||
|
||||
airbyte-temporal:
|
||||
image: temporalio/auto-setup:1.20.0
|
||||
container_name: airbyte-temporal
|
||||
environment:
|
||||
- DB=postgresql
|
||||
- DB_PORT=5432
|
||||
- POSTGRES_USER=${DB_USER}
|
||||
- POSTGRES_PWD=${DB_PASSWORD}
|
||||
- POSTGRES_SEEDS=postgres
|
||||
- DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development.yaml
|
||||
networks:
|
||||
- shared_data_network
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
shared_data_network:
|
||||
external: true
|
||||
20
06-analytics/README.md
Normal file
20
06-analytics/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# 06-analytics: Apache Superset BI
|
||||
|
||||
## Start
|
||||
```bash
|
||||
# Create superset database first
|
||||
docker exec postgres psql -U postgres -c "CREATE DATABASE superset;"
|
||||
|
||||
# Start superset
|
||||
docker compose --env-file ../.env.global up -d
|
||||
```
|
||||
|
||||
## Access
|
||||
Internal - configure Nginx Proxy Manager to expose at `/superset`
|
||||
|
||||
## Default Login
|
||||
- Username: see SUPERSET_ADMIN_USERNAME in .env.global
|
||||
- Password: see SUPERSET_ADMIN_PASSWORD in .env.global
|
||||
|
||||
## Keycloak Integration
|
||||
Configure OAuth in superset_config.py after Keycloak setup
|
||||
31
06-analytics/docker-compose.yml
Normal file
31
06-analytics/docker-compose.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
services:
|
||||
superset:
|
||||
image: apache/superset:latest
|
||||
container_name: superset
|
||||
environment:
|
||||
- SUPERSET_SECRET_KEY=${SUPERSET_SECRET_KEY}
|
||||
- DATABASE_DIALECT=postgresql
|
||||
- DATABASE_HOST=postgres
|
||||
- DATABASE_PORT=5432
|
||||
- DATABASE_DB=superset
|
||||
- DATABASE_USER=${DB_USER}
|
||||
- DATABASE_PASSWORD=${DB_PASSWORD}
|
||||
- SUPERSET_LOAD_EXAMPLES=no
|
||||
- TZ=${TZ:-Asia/Bangkok}
|
||||
volumes:
|
||||
- ./data/superset_home:/app/superset_home
|
||||
- ./superset_config.py:/app/pythonpath/superset_config.py
|
||||
networks:
|
||||
- shared_data_network
|
||||
restart: unless-stopped
|
||||
command: >
|
||||
sh -c "
|
||||
superset db upgrade &&
|
||||
superset fab create-admin --username ${SUPERSET_ADMIN_USERNAME} --firstname Admin --lastname User --email admin@sriphat.local --password ${SUPERSET_ADMIN_PASSWORD} || true &&
|
||||
superset init &&
|
||||
gunicorn --bind 0.0.0.0:8088 --workers 4 --timeout 120 --limit-request-line 0 --limit-request-field_size 0 'superset.app:create_app()'
|
||||
"
|
||||
|
||||
networks:
|
||||
shared_data_network:
|
||||
external: true
|
||||
10
06-analytics/superset_config.py
Normal file
10
06-analytics/superset_config.py
Normal file
@@ -0,0 +1,10 @@
|
||||
import os
|
||||
|
||||
SECRET_KEY = os.environ.get('SUPERSET_SECRET_KEY')
|
||||
SQLALCHEMY_DATABASE_URI = f"postgresql://{os.environ.get('DATABASE_USER')}:{os.environ.get('DATABASE_PASSWORD')}@{os.environ.get('DATABASE_HOST')}:{os.environ.get('DATABASE_PORT')}/{os.environ.get('DATABASE_DB')}"
|
||||
|
||||
ENABLE_PROXY_FIX = True
|
||||
PUBLIC_ROLE_LIKE = "Gamma"
|
||||
|
||||
WTF_CSRF_ENABLED = True
|
||||
WTF_CSRF_TIME_LIMIT = None
|
||||
277
BACKUP.md
Normal file
277
BACKUP.md
Normal file
@@ -0,0 +1,277 @@
|
||||
# Backup & Restore Guide
|
||||
|
||||
## 📂 โครงสร้าง Data Folders
|
||||
|
||||
ทุก service ใช้ bind mount (./data) เพื่อให้ backup ง่าย:
|
||||
|
||||
```
|
||||
sriphat-dataplatform/
|
||||
├── 01-infra/
|
||||
│ ├── data/
|
||||
│ │ └── postgres/ # PostgreSQL database files
|
||||
│ ├── data/ # Nginx Proxy Manager config
|
||||
│ └── letsencrypt/ # SSL certificates
|
||||
├── 04-ingestion/
|
||||
│ └── data/
|
||||
│ ├── workspace/ # Airbyte workspace
|
||||
│ └── airbyte/ # Airbyte metadata
|
||||
└── 06-analytics/
|
||||
└── data/
|
||||
└── superset_home/ # Superset config & metadata
|
||||
```
|
||||
|
||||
## 🔄 Backup Strategy
|
||||
|
||||
### Option 1: Full Backup (Recommended)
|
||||
|
||||
สำรองทั้งโปรเจกต์ (รวม config + data):
|
||||
|
||||
```bash
|
||||
# หยุด services ก่อน (เพื่อความสมบูรณ์ของข้อมูล)
|
||||
bash stop-all.sh
|
||||
|
||||
# Backup ทั้งโฟลเดอร์
|
||||
cd e:\git3
|
||||
tar -czf sriphat-dataplatform-backup-$(date +%Y%m%d-%H%M%S).tar.gz sriphat-dataplatform/
|
||||
|
||||
# หรือใช้ robocopy บน Windows
|
||||
robocopy sriphat-dataplatform E:\backups\sriphat-dataplatform-$(Get-Date -Format 'yyyyMMdd-HHmmss') /MIR /R:3 /W:5
|
||||
|
||||
# รัน services ต่อ
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
### Option 2: Backup เฉพาะ Data Folders
|
||||
|
||||
```bash
|
||||
# สร้างโฟลเดอร์ backup
|
||||
mkdir -p E:\backups\data-$(date +%Y%m%d)
|
||||
|
||||
# Backup PostgreSQL
|
||||
docker exec postgres pg_dumpall -U postgres > E:\backups\data-$(date +%Y%m%d)\postgres-dump.sql
|
||||
|
||||
# Backup data folders
|
||||
robocopy 01-infra\data E:\backups\data-$(date +%Y%m%d)\01-infra-data /MIR
|
||||
robocopy 01-infra\letsencrypt E:\backups\data-$(date +%Y%m%d)\01-infra-letsencrypt /MIR
|
||||
robocopy 04-ingestion\data E:\backups\data-$(date +%Y%m%d)\04-ingestion-data /MIR
|
||||
robocopy 06-analytics\data E:\backups\data-$(date +%Y%m%d)\06-analytics-data /MIR
|
||||
```
|
||||
|
||||
### Option 3: Hot Backup (ไม่ต้องหยุด service)
|
||||
|
||||
```bash
|
||||
# Backup PostgreSQL (แบบ online)
|
||||
docker exec postgres pg_dumpall -U postgres | gzip > postgres-backup-$(date +%Y%m%d).sql.gz
|
||||
|
||||
# Backup Nginx config
|
||||
tar -czf nginx-backup-$(date +%Y%m%d).tar.gz 01-infra/data 01-infra/letsencrypt
|
||||
|
||||
# Backup Airbyte
|
||||
tar -czf airbyte-backup-$(date +%Y%m%d).tar.gz 04-ingestion/data
|
||||
|
||||
# Backup Superset
|
||||
tar -czf superset-backup-$(date +%Y%m%d).tar.gz 06-analytics/data
|
||||
```
|
||||
|
||||
## 📥 Restore
|
||||
|
||||
### Full Restore
|
||||
|
||||
```bash
|
||||
# หยุด services ทั้งหมด
|
||||
bash stop-all.sh
|
||||
|
||||
# ลบข้อมูลเก่า (ระวัง!)
|
||||
rm -rf 01-infra/data 04-ingestion/data 06-analytics/data
|
||||
|
||||
# แตกไฟล์ backup
|
||||
tar -xzf sriphat-dataplatform-backup-YYYYMMDD-HHMMSS.tar.gz
|
||||
|
||||
# รัน services
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
### Restore PostgreSQL Only
|
||||
|
||||
```bash
|
||||
# หยุด services ที่ใช้ database
|
||||
cd 03-apiservice && docker compose down
|
||||
cd ../04-ingestion && docker compose down
|
||||
cd ../06-analytics && docker compose down
|
||||
|
||||
# Restore database
|
||||
docker exec -i postgres psql -U postgres < postgres-backup-20260216.sql
|
||||
|
||||
# หรือ restore จาก dump file
|
||||
gunzip < postgres-backup-20260216.sql.gz | docker exec -i postgres psql -U postgres
|
||||
|
||||
# รัน services ต่อ
|
||||
cd ../03-apiservice && docker compose --env-file ../.env.global up -d
|
||||
cd ../04-ingestion && docker compose --env-file ../.env.global up -d
|
||||
cd ../06-analytics && docker compose --env-file ../.env.global up -d
|
||||
```
|
||||
|
||||
### Restore Specific Service
|
||||
|
||||
```bash
|
||||
# หยุด service
|
||||
cd 06-analytics
|
||||
docker compose down
|
||||
|
||||
# Restore data
|
||||
rm -rf data/superset_home
|
||||
tar -xzf ../backups/superset-backup-20260216.tar.gz
|
||||
|
||||
# รัน service
|
||||
docker compose --env-file ../.env.global up -d
|
||||
```
|
||||
|
||||
## ⏰ Automated Backup (Scheduled)
|
||||
|
||||
### Windows Task Scheduler
|
||||
|
||||
สร้าง script `backup-daily.ps1`:
|
||||
|
||||
```powershell
|
||||
# backup-daily.ps1
|
||||
$BackupPath = "E:\backups\sriphat-data"
|
||||
$Date = Get-Date -Format "yyyyMMdd-HHmmss"
|
||||
$BackupFolder = "$BackupPath\$Date"
|
||||
|
||||
# สร้างโฟลเดอร์
|
||||
New-Item -ItemType Directory -Path $BackupFolder -Force
|
||||
|
||||
# Backup PostgreSQL
|
||||
docker exec postgres pg_dumpall -U postgres | Out-File "$BackupFolder\postgres.sql"
|
||||
|
||||
# Backup data folders
|
||||
robocopy "E:\git3\sriphat-dataplatform\01-infra\data" "$BackupFolder\01-infra-data" /MIR /R:3 /W:5
|
||||
robocopy "E:\git3\sriphat-dataplatform\01-infra\letsencrypt" "$BackupFolder\01-infra-letsencrypt" /MIR /R:3 /W:5
|
||||
robocopy "E:\git3\sriphat-dataplatform\04-ingestion\data" "$BackupFolder\04-ingestion-data" /MIR /R:3 /W:5
|
||||
robocopy "E:\git3\sriphat-dataplatform\06-analytics\data" "$BackupFolder\06-analytics-data" /MIR /R:3 /W:5
|
||||
|
||||
# Compress
|
||||
Compress-Archive -Path $BackupFolder -DestinationPath "$BackupPath\backup-$Date.zip"
|
||||
|
||||
# ลบ backup เก่า (เก็บไว้ 30 วัน)
|
||||
Get-ChildItem $BackupPath -Filter "backup-*.zip" | Where-Object {$_.LastWriteTime -lt (Get-Date).AddDays(-30)} | Remove-Item
|
||||
```
|
||||
|
||||
ตั้งเวลารัน Task Scheduler:
|
||||
1. เปิด Task Scheduler
|
||||
2. Create Basic Task
|
||||
3. Trigger: Daily เวลา 02:00
|
||||
4. Action: Start a program
|
||||
- Program: `powershell.exe`
|
||||
- Arguments: `-ExecutionPolicy Bypass -File "E:\git3\sriphat-dataplatform\backup-daily.ps1"`
|
||||
|
||||
### Linux Cron Job
|
||||
|
||||
```bash
|
||||
# เพิ่มใน crontab
|
||||
crontab -e
|
||||
|
||||
# Backup ทุกวันเวลา 02:00
|
||||
0 2 * * * /path/to/sriphat-dataplatform/backup-daily.sh
|
||||
```
|
||||
|
||||
สร้าง `backup-daily.sh`:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
BACKUP_DIR="/backups/sriphat-data"
|
||||
DATE=$(date +%Y%m%d-%H%M%S)
|
||||
|
||||
mkdir -p $BACKUP_DIR/$DATE
|
||||
|
||||
# Backup PostgreSQL
|
||||
docker exec postgres pg_dumpall -U postgres | gzip > $BACKUP_DIR/$DATE/postgres.sql.gz
|
||||
|
||||
# Backup data folders
|
||||
tar -czf $BACKUP_DIR/$DATE/01-infra-data.tar.gz 01-infra/data 01-infra/letsencrypt
|
||||
tar -czf $BACKUP_DIR/$DATE/04-ingestion-data.tar.gz 04-ingestion/data
|
||||
tar -czf $BACKUP_DIR/$DATE/06-analytics-data.tar.gz 06-analytics/data
|
||||
|
||||
# ลบ backup เก่า (เก็บไว้ 30 วัน)
|
||||
find $BACKUP_DIR -name "*.tar.gz" -mtime +30 -delete
|
||||
```
|
||||
|
||||
## 🔐 Backup Security
|
||||
|
||||
### Encrypt Backup
|
||||
|
||||
```bash
|
||||
# Backup และ encrypt ด้วย GPG
|
||||
tar -czf - 01-infra/data | gpg --symmetric --cipher-algo AES256 -o backup-encrypted-$(date +%Y%m%d).tar.gz.gpg
|
||||
|
||||
# Decrypt และ restore
|
||||
gpg --decrypt backup-encrypted-20260216.tar.gz.gpg | tar -xzf -
|
||||
```
|
||||
|
||||
### Remote Backup
|
||||
|
||||
```bash
|
||||
# Sync ไปยัง remote server (rsync)
|
||||
rsync -avz --delete E:\git3\sriphat-dataplatform\01-infra\data\ user@backup-server:/backups/sriphat/01-infra-data/
|
||||
|
||||
# หรือใช้ rclone (Google Drive, OneDrive, S3)
|
||||
rclone sync E:\git3\sriphat-dataplatform\01-infra\data remote:sriphat-backup/01-infra-data
|
||||
```
|
||||
|
||||
## 📊 Backup Checklist
|
||||
|
||||
- [ ] PostgreSQL database (pg_dumpall)
|
||||
- [ ] Nginx Proxy Manager config (01-infra/data)
|
||||
- [ ] SSL certificates (01-infra/letsencrypt)
|
||||
- [ ] Airbyte connections (04-ingestion/data)
|
||||
- [ ] Superset dashboards (06-analytics/data)
|
||||
- [ ] Environment files (.env.global)
|
||||
- [ ] Custom configs (superset_config.py, etc.)
|
||||
|
||||
## 🚨 Disaster Recovery
|
||||
|
||||
### Scenario 1: PostgreSQL Corruption
|
||||
|
||||
```bash
|
||||
# หยุด services
|
||||
bash stop-all.sh
|
||||
|
||||
# ลบ data folder
|
||||
rm -rf 01-infra/data/postgres
|
||||
|
||||
# Restore จาก backup
|
||||
docker compose -f 01-infra/docker-compose.yml --env-file .env.global up -d postgres
|
||||
sleep 10
|
||||
gunzip < postgres-backup-latest.sql.gz | docker exec -i postgres psql -U postgres
|
||||
|
||||
# รัน services ทั้งหมด
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
### Scenario 2: Complete System Failure
|
||||
|
||||
```bash
|
||||
# ติดตั้ง Docker ใหม่
|
||||
# Clone repository
|
||||
git clone <repo-url> sriphat-dataplatform
|
||||
cd sriphat-dataplatform
|
||||
|
||||
# Restore backup
|
||||
tar -xzf /path/to/backup.tar.gz
|
||||
|
||||
# Start
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
## 📝 Best Practices
|
||||
|
||||
1. **Backup ทุกวัน** - ตั้ง automated backup
|
||||
2. **Test restore** - ทดสอบ restore อย่างน้อยเดือนละครั้ง
|
||||
3. **3-2-1 Rule**:
|
||||
- 3 copies ของข้อมูล
|
||||
- 2 media types ที่แตกต่างกัน
|
||||
- 1 offsite backup
|
||||
4. **Monitor backup** - ตรวจสอบว่า backup สำเร็จทุกวัน
|
||||
5. **Document** - บันทึกขั้นตอน restore ไว้ชัดเจน
|
||||
6. **Encrypt** - เข้ารหัส backup ที่มีข้อมูลสำคัญ
|
||||
7. **Version control** - เก็บ backup หลายเวอร์ชัน (อย่างน้อย 30 วัน)
|
||||
298
DEPLOYMENT.md
Normal file
298
DEPLOYMENT.md
Normal file
@@ -0,0 +1,298 @@
|
||||
# Sriphat Data Platform - Deployment Guide
|
||||
|
||||
## 📋 Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Nginx Proxy Manager │
|
||||
│ (Gateway + SSL + Domain Routing) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
┌─────────────────────┼─────────────────────┐
|
||||
│ │ │
|
||||
┌───────▼────────┐ ┌────────▼────────┐ ┌───────▼────────┐
|
||||
│ Keycloak │ │ API Service │ │ Superset │
|
||||
│ (SSO) │ │ (FastAPI) │ │ (BI) │
|
||||
└────────────────┘ └─────────────────┘ └────────────────┘
|
||||
│ │ │
|
||||
└─────────────────────┼─────────────────────┘
|
||||
│
|
||||
┌─────────▼─────────┐
|
||||
│ PostgreSQL │
|
||||
│ (Data Warehouse) │
|
||||
└───────────────────┘
|
||||
│
|
||||
┌─────────▼─────────┐
|
||||
│ Airbyte │
|
||||
│ (Data Ingestion) │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Prerequisites
|
||||
- Docker & Docker Compose installed
|
||||
- Minimum 8GB RAM
|
||||
- 50GB disk space
|
||||
|
||||
### Step 1: Clone & Configure
|
||||
```bash
|
||||
cd e:\git3\sriphat-dataplatform
|
||||
|
||||
# Review and update credentials in .env.global
|
||||
notepad .env.global
|
||||
```
|
||||
|
||||
### Step 2: Start All Services
|
||||
```bash
|
||||
# On Linux/Mac
|
||||
bash start-all.sh
|
||||
|
||||
# On Windows (PowerShell)
|
||||
bash start-all.sh
|
||||
# OR manually:
|
||||
# 1. cd 00-network && bash create-network.sh
|
||||
# 2. cd ../01-infra && docker compose --env-file ../.env.global up -d
|
||||
# 3. Wait 30 seconds for PostgreSQL
|
||||
# 4. cd ../03-apiservice && docker compose --env-file ../.env.global up --build -d
|
||||
# 5. cd ../04-ingestion && docker compose --env-file ../.env.global up -d
|
||||
# 6. cd ../06-analytics && docker compose --env-file ../.env.global up -d
|
||||
```
|
||||
|
||||
### Step 3: Verify Services
|
||||
```bash
|
||||
docker ps
|
||||
```
|
||||
|
||||
You should see:
|
||||
- nginx-proxy-manager
|
||||
- keycloak
|
||||
- postgres
|
||||
- apiservice
|
||||
- airbyte-webapp, airbyte-server, airbyte-worker, airbyte-temporal
|
||||
- superset
|
||||
|
||||
## 🔑 Access Points
|
||||
|
||||
| Service | URL | Default Credentials |
|
||||
|---------|-----|---------------------|
|
||||
| **Nginx Proxy Manager** | http://localhost:81 | admin@example.com / changeme |
|
||||
| **Keycloak Admin** | http://localhost:8080 | See KEYCLOAK_ADMIN in .env.global |
|
||||
| **API Service** | http://localhost/apiservice | See ADMIN_USERNAME in .env.global |
|
||||
| **Airbyte** | http://localhost/airbyte | Configure via Nginx first |
|
||||
| **Superset** | http://localhost/superset | See SUPERSET_ADMIN_USERNAME in .env.global |
|
||||
|
||||
## 📝 Post-Installation Setup
|
||||
|
||||
### 1. Configure Nginx Proxy Manager
|
||||
|
||||
1. Access http://localhost:81
|
||||
2. Login with default credentials (change on first login)
|
||||
3. Add Proxy Hosts:
|
||||
|
||||
**API Service:**
|
||||
- Domain: `api.sriphat.local` (or your domain)
|
||||
- Forward Hostname: `apiservice`
|
||||
- Forward Port: `8000`
|
||||
- Custom locations:
|
||||
- Location: `/apiservice`
|
||||
- Forward Hostname: `apiservice`
|
||||
- Forward Port: `8000`
|
||||
|
||||
**Keycloak:**
|
||||
- Domain: `auth.sriphat.local`
|
||||
- Forward Hostname: `keycloak`
|
||||
- Forward Port: `8080`
|
||||
|
||||
**Superset:**
|
||||
- Domain: `bi.sriphat.local`
|
||||
- Forward Hostname: `superset`
|
||||
- Forward Port: `8088`
|
||||
|
||||
**Airbyte:**
|
||||
- Domain: `etl.sriphat.local`
|
||||
- Forward Hostname: `airbyte-webapp`
|
||||
- Forward Port: `8000`
|
||||
|
||||
### 2. Setup Keycloak SSO
|
||||
|
||||
1. Access Keycloak admin console
|
||||
2. Create new Realm: `sriphat`
|
||||
3. Create Clients:
|
||||
- **superset-client** (for Superset OAuth)
|
||||
- **apiservice-client** (for API Service)
|
||||
4. Configure OIDC settings
|
||||
5. Create Users and assign roles
|
||||
|
||||
### 3. Initialize API Service
|
||||
|
||||
```bash
|
||||
# Access admin UI
|
||||
# http://api.sriphat.local/apiservice/admin/
|
||||
|
||||
# Create API Client
|
||||
# 1. Go to ApiClient menu
|
||||
# 2. Create new client (e.g., "mobile-app")
|
||||
|
||||
# Generate API Key
|
||||
curl -X POST "http://api.sriphat.local/apiservice/admin/api-keys/generate?client_id=1&permissions=feed.checkpoint:write&name=production-key" \
|
||||
-H "Cookie: session=<your-admin-session>"
|
||||
|
||||
# Test API
|
||||
curl -X POST "http://api.sriphat.local/apiservice/api/v1/feed/checkpoint" \
|
||||
-H "Authorization: Bearer <api-key>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '[{"id":1,"hn":123,"vn":456,"location":"OPD","type":"Scan","timestamp_in":"2026-02-16T10:00:00","timestamp_out":null,"waiting_time":null,"bu":"SRIPHAT"}]'
|
||||
```
|
||||
|
||||
### 4. Configure Airbyte Sources
|
||||
|
||||
1. Access Airbyte UI
|
||||
2. Setup Sources:
|
||||
- SQL Server (HIS Database)
|
||||
- Oracle (Lab System)
|
||||
- REST API endpoints
|
||||
3. Setup Destination:
|
||||
- PostgreSQL (host: `postgres`, database: `postgres`, schemas: `raw_data`)
|
||||
4. Create Connections and schedule syncs
|
||||
|
||||
### 5. Setup Superset Dashboards
|
||||
|
||||
1. Access Superset
|
||||
2. Add Database Connection:
|
||||
- PostgreSQL: `postgresql://postgres:password@postgres:5432/postgres`
|
||||
3. Create Datasets from `analytics` schema
|
||||
4. Build Dashboards
|
||||
|
||||
## 🔒 Security Checklist
|
||||
|
||||
- [ ] Change all default passwords in `.env.global`
|
||||
- [ ] Enable SSL in Nginx Proxy Manager (Let's Encrypt)
|
||||
- [ ] Configure Keycloak with hospital LDAP/AD
|
||||
- [ ] Enable Row-Level Security (RLS) in PostgreSQL
|
||||
- [ ] Restrict network access (firewall rules)
|
||||
- [ ] Setup backup strategy for PostgreSQL data
|
||||
- [ ] Enable audit logging in all services
|
||||
- [ ] Configure session timeouts
|
||||
|
||||
## 🛠️ Maintenance
|
||||
|
||||
### View Logs
|
||||
```bash
|
||||
# All services
|
||||
docker compose -f 01-infra/docker-compose.yml logs -f
|
||||
|
||||
# Specific service
|
||||
docker logs -f apiservice
|
||||
docker logs -f keycloak
|
||||
docker logs -f superset
|
||||
```
|
||||
|
||||
### Backup Database
|
||||
```bash
|
||||
docker exec postgres pg_dump -U postgres postgres > backup_$(date +%Y%m%d).sql
|
||||
```
|
||||
|
||||
### Restore Database
|
||||
```bash
|
||||
docker exec -i postgres psql -U postgres postgres < backup_20260216.sql
|
||||
```
|
||||
|
||||
### Update Services
|
||||
```bash
|
||||
# Stop all
|
||||
bash stop-all.sh
|
||||
|
||||
# Pull latest images
|
||||
docker compose -f 01-infra/docker-compose.yml pull
|
||||
docker compose -f 04-ingestion/docker-compose.yml pull
|
||||
docker compose -f 06-analytics/docker-compose.yml pull
|
||||
|
||||
# Rebuild API service
|
||||
cd 03-apiservice
|
||||
docker compose --env-file ../.env.global build
|
||||
|
||||
# Start all
|
||||
cd ..
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
## 🐛 Troubleshooting
|
||||
|
||||
### PostgreSQL connection issues
|
||||
```bash
|
||||
# Check if PostgreSQL is ready
|
||||
docker exec postgres pg_isready -U postgres
|
||||
|
||||
# Check schemas
|
||||
docker exec postgres psql -U postgres -c "\dn"
|
||||
```
|
||||
|
||||
### Keycloak not starting
|
||||
```bash
|
||||
# Check logs
|
||||
docker logs keycloak
|
||||
|
||||
# Ensure PostgreSQL is ready first
|
||||
docker restart keycloak
|
||||
```
|
||||
|
||||
### API Service can't connect to DB
|
||||
```bash
|
||||
# Verify network
|
||||
docker network inspect shared_data_network
|
||||
|
||||
# Check environment variables
|
||||
docker exec apiservice env | grep DB_
|
||||
```
|
||||
|
||||
### Airbyte worker issues
|
||||
```bash
|
||||
# Ensure Docker socket is mounted
|
||||
docker exec airbyte-worker ls -la /var/run/docker.sock
|
||||
|
||||
# Check Temporal
|
||||
docker logs airbyte-temporal
|
||||
```
|
||||
|
||||
## 📊 Monitoring
|
||||
|
||||
### Resource Usage
|
||||
```bash
|
||||
docker stats
|
||||
```
|
||||
|
||||
### Health Checks
|
||||
```bash
|
||||
# PostgreSQL
|
||||
curl http://localhost:5432 || echo "PostgreSQL internal only - OK"
|
||||
|
||||
# Nginx Proxy Manager
|
||||
curl -I http://localhost:81
|
||||
|
||||
# Keycloak
|
||||
curl -I http://localhost:8080
|
||||
|
||||
# API Service (via network)
|
||||
docker exec nginx-proxy-manager curl -I http://apiservice:8000/apiservice/docs
|
||||
```
|
||||
|
||||
## 🔄 Scaling
|
||||
|
||||
### Increase API Service Workers
|
||||
Edit `03-apiservice/Dockerfile`:
|
||||
```dockerfile
|
||||
CMD ["gunicorn","-k","uvicorn.workers.UvicornWorker","app.main:app","--bind","0.0.0.0:8000","--workers","4"]
|
||||
```
|
||||
|
||||
### Add Read Replicas (PostgreSQL)
|
||||
- Configure streaming replication
|
||||
- Update connection strings for read-only queries
|
||||
|
||||
## 📞 Support
|
||||
|
||||
For issues:
|
||||
1. Check logs: `docker logs <container-name>`
|
||||
2. Verify network: `docker network inspect shared_data_network`
|
||||
3. Review configuration: `.env.global`
|
||||
4. Restart specific service: `docker restart <container-name>`
|
||||
384
README-UBUNTU.md
Normal file
384
README-UBUNTU.md
Normal file
@@ -0,0 +1,384 @@
|
||||
# Sriphat Data Platform - Ubuntu Server Installation Guide
|
||||
|
||||
## 📋 System Requirements
|
||||
|
||||
- **OS**: Ubuntu Server 20.04 LTS or 22.04 LTS
|
||||
- **RAM**: Minimum 8GB (16GB recommended)
|
||||
- **Disk**: 50GB free space
|
||||
- **CPU**: 4 cores (8 cores recommended)
|
||||
- **Network**: Static IP recommended
|
||||
|
||||
## 🚀 Quick Install (Recommended)
|
||||
|
||||
### Option 1: Automated Installation
|
||||
|
||||
```bash
|
||||
# Clone repository
|
||||
git clone <repository-url> /opt/sriphat-dataplatform
|
||||
cd /opt/sriphat-dataplatform
|
||||
|
||||
# Run install script
|
||||
bash install.sh
|
||||
```
|
||||
|
||||
Script จะทำให้อัตโนมัติ:
|
||||
- ติดตั้ง Docker และ Docker Compose
|
||||
- สร้าง .env.global พร้อม random passwords
|
||||
- สร้าง backup directory
|
||||
- รัน services ทั้งหมด
|
||||
|
||||
### Option 2: Manual Installation
|
||||
|
||||
#### Step 1: Setup Ubuntu Server
|
||||
|
||||
```bash
|
||||
# Update system
|
||||
sudo apt-get update
|
||||
sudo apt-get upgrade -y
|
||||
|
||||
# Run setup script
|
||||
sudo bash setup-ubuntu.sh
|
||||
```
|
||||
|
||||
#### Step 2: Logout and Login
|
||||
|
||||
```bash
|
||||
# Logout to apply docker group permissions
|
||||
exit
|
||||
|
||||
# Login again via SSH
|
||||
ssh user@server
|
||||
```
|
||||
|
||||
#### Step 3: Configure Environment
|
||||
|
||||
```bash
|
||||
cd /opt/sriphat-dataplatform
|
||||
|
||||
# Copy and edit .env.global
|
||||
cp .env.global.example .env.global
|
||||
nano .env.global
|
||||
|
||||
# Update these values:
|
||||
# - DB_PASSWORD (strong password)
|
||||
# - KEYCLOAK_ADMIN_PASSWORD
|
||||
# - SUPERSET_SECRET_KEY
|
||||
# - ADMIN_SECRET_KEY
|
||||
# - ADMIN_PASSWORD
|
||||
```
|
||||
|
||||
#### Step 4: Start Services
|
||||
|
||||
```bash
|
||||
# Make scripts executable
|
||||
chmod +x *.sh
|
||||
chmod +x 00-network/*.sh
|
||||
|
||||
# Start all services
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
## 🔧 Post-Installation
|
||||
|
||||
### 1. Check Services Status
|
||||
|
||||
```bash
|
||||
# View running containers
|
||||
docker ps
|
||||
|
||||
# Check logs
|
||||
docker logs nginx-proxy-manager
|
||||
docker logs keycloak
|
||||
docker logs postgres
|
||||
docker logs apiservice
|
||||
```
|
||||
|
||||
### 2. Access Services
|
||||
|
||||
```bash
|
||||
# Get server IP
|
||||
hostname -I
|
||||
|
||||
# Access points:
|
||||
# - Nginx Proxy Manager: http://<server-ip>:81
|
||||
# - Keycloak: http://<server-ip>:8080
|
||||
```
|
||||
|
||||
### 3. Configure Firewall (if needed)
|
||||
|
||||
```bash
|
||||
# Check firewall status
|
||||
sudo ufw status
|
||||
|
||||
# Allow additional ports if needed
|
||||
sudo ufw allow 8088/tcp # Superset (if direct access needed)
|
||||
```
|
||||
|
||||
### 4. Setup Domain Names
|
||||
|
||||
In Nginx Proxy Manager (port 81):
|
||||
1. Add Proxy Hosts for each service
|
||||
2. Configure SSL with Let's Encrypt
|
||||
3. Point your domain DNS to server IP
|
||||
|
||||
## 📦 Directory Structure
|
||||
|
||||
```bash
|
||||
/opt/sriphat-dataplatform/ # Main directory
|
||||
├── 01-infra/
|
||||
│ └── data/postgres/ # PostgreSQL data
|
||||
├── 04-ingestion/
|
||||
│ └── data/ # Airbyte data
|
||||
├── 06-analytics/
|
||||
│ └── data/ # Superset data
|
||||
└── /backups/sriphat-data/ # Backup location
|
||||
```
|
||||
|
||||
## 🔄 Backup Setup
|
||||
|
||||
### Automatic Daily Backup
|
||||
|
||||
```bash
|
||||
# Edit crontab
|
||||
crontab -e
|
||||
|
||||
# Add this line (backup at 2 AM daily)
|
||||
0 2 * * * /opt/sriphat-dataplatform/backup-daily.sh
|
||||
|
||||
# Verify cron job
|
||||
crontab -l
|
||||
```
|
||||
|
||||
### Manual Backup
|
||||
|
||||
```bash
|
||||
# Run backup script
|
||||
bash backup-daily.sh
|
||||
|
||||
# Or backup manually
|
||||
bash stop-all.sh
|
||||
sudo tar -czf /backups/sriphat-backup-$(date +%Y%m%d).tar.gz /opt/sriphat-dataplatform
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
## 🛠️ Maintenance Commands
|
||||
|
||||
### Start/Stop Services
|
||||
|
||||
```bash
|
||||
# Start all
|
||||
bash start-all.sh
|
||||
|
||||
# Stop all
|
||||
bash stop-all.sh
|
||||
|
||||
# Restart specific service
|
||||
cd 03-apiservice
|
||||
docker compose --env-file ../.env.global restart
|
||||
```
|
||||
|
||||
### View Logs
|
||||
|
||||
```bash
|
||||
# All services
|
||||
docker compose -f 01-infra/docker-compose.yml logs -f
|
||||
|
||||
# Specific service
|
||||
docker logs -f apiservice
|
||||
docker logs -f postgres
|
||||
```
|
||||
|
||||
### Update Services
|
||||
|
||||
```bash
|
||||
# Stop services
|
||||
bash stop-all.sh
|
||||
|
||||
# Pull latest images
|
||||
docker compose -f 01-infra/docker-compose.yml pull
|
||||
docker compose -f 04-ingestion/docker-compose.yml pull
|
||||
docker compose -f 06-analytics/docker-compose.yml pull
|
||||
|
||||
# Rebuild API service
|
||||
cd 03-apiservice
|
||||
docker compose --env-file ../.env.global build --no-cache
|
||||
|
||||
# Start services
|
||||
cd ..
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
### Clean Up
|
||||
|
||||
```bash
|
||||
# Remove unused images
|
||||
docker image prune -a
|
||||
|
||||
# Remove unused volumes (careful!)
|
||||
docker volume prune
|
||||
|
||||
# Clean build cache
|
||||
docker builder prune
|
||||
```
|
||||
|
||||
## 🐛 Troubleshooting
|
||||
|
||||
### Docker Permission Denied
|
||||
|
||||
```bash
|
||||
# Add user to docker group
|
||||
sudo usermod -aG docker $USER
|
||||
|
||||
# Logout and login again
|
||||
exit
|
||||
```
|
||||
|
||||
### Port Already in Use
|
||||
|
||||
```bash
|
||||
# Check what's using the port
|
||||
sudo netstat -tulpn | grep :80
|
||||
sudo netstat -tulpn | grep :8080
|
||||
|
||||
# Kill process or change port in docker-compose.yml
|
||||
```
|
||||
|
||||
### PostgreSQL Won't Start
|
||||
|
||||
```bash
|
||||
# Check logs
|
||||
docker logs postgres
|
||||
|
||||
# Check permissions
|
||||
sudo chown -R 999:999 01-infra/data/postgres
|
||||
|
||||
# Restart
|
||||
docker restart postgres
|
||||
```
|
||||
|
||||
### Services Can't Connect to PostgreSQL
|
||||
|
||||
```bash
|
||||
# Check network
|
||||
docker network inspect shared_data_network
|
||||
|
||||
# Verify PostgreSQL is ready
|
||||
docker exec postgres pg_isready -U postgres
|
||||
|
||||
# Restart dependent services
|
||||
cd 03-apiservice
|
||||
docker compose --env-file ../.env.global restart
|
||||
```
|
||||
|
||||
### Disk Space Issues
|
||||
|
||||
```bash
|
||||
# Check disk usage
|
||||
df -h
|
||||
|
||||
# Check Docker disk usage
|
||||
docker system df
|
||||
|
||||
# Clean up
|
||||
docker system prune -a --volumes
|
||||
```
|
||||
|
||||
## 🔒 Security Hardening
|
||||
|
||||
### 1. Change Default Passwords
|
||||
|
||||
```bash
|
||||
# Edit .env.global
|
||||
nano .env.global
|
||||
|
||||
# Update all passwords
|
||||
# Restart services
|
||||
bash stop-all.sh
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
### 2. Setup SSL
|
||||
|
||||
In Nginx Proxy Manager:
|
||||
1. Add domain
|
||||
2. Request SSL certificate (Let's Encrypt)
|
||||
3. Force SSL redirect
|
||||
|
||||
### 3. Restrict Firewall
|
||||
|
||||
```bash
|
||||
# Close unnecessary ports after Nginx setup
|
||||
sudo ufw delete allow 8080/tcp # Keycloak (access via Nginx only)
|
||||
|
||||
# Allow only from specific IPs
|
||||
sudo ufw allow from 192.168.1.0/24 to any port 81
|
||||
```
|
||||
|
||||
### 4. Enable Fail2ban
|
||||
|
||||
```bash
|
||||
# Install fail2ban
|
||||
sudo apt-get install fail2ban
|
||||
|
||||
# Configure for SSH
|
||||
sudo systemctl enable fail2ban
|
||||
sudo systemctl start fail2ban
|
||||
```
|
||||
|
||||
## 📊 Monitoring
|
||||
|
||||
### System Resources
|
||||
|
||||
```bash
|
||||
# Real-time monitoring
|
||||
htop
|
||||
|
||||
# Docker stats
|
||||
docker stats
|
||||
|
||||
# Disk usage
|
||||
df -h
|
||||
du -sh /opt/sriphat-dataplatform/*
|
||||
```
|
||||
|
||||
### Service Health
|
||||
|
||||
```bash
|
||||
# Check all containers
|
||||
docker ps -a
|
||||
|
||||
# Check specific service health
|
||||
docker inspect --format='{{.State.Health.Status}}' postgres
|
||||
```
|
||||
|
||||
## 🔄 Migration from Windows
|
||||
|
||||
If migrating from Windows development:
|
||||
|
||||
```bash
|
||||
# 1. Backup data on Windows
|
||||
# (use backup-daily.ps1)
|
||||
|
||||
# 2. Copy backup to Ubuntu
|
||||
scp backup-*.zip user@ubuntu-server:/tmp/
|
||||
|
||||
# 3. Extract on Ubuntu
|
||||
cd /opt/sriphat-dataplatform
|
||||
unzip /tmp/backup-*.zip
|
||||
|
||||
# 4. Fix permissions
|
||||
sudo chown -R $USER:$USER .
|
||||
sudo chown -R 999:999 01-infra/data/postgres
|
||||
|
||||
# 5. Start services
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
## 📞 Support
|
||||
|
||||
For issues:
|
||||
1. Check logs: `docker logs <container-name>`
|
||||
2. Verify network: `docker network inspect shared_data_network`
|
||||
3. Check disk space: `df -h`
|
||||
4. Review firewall: `sudo ufw status`
|
||||
5. Consult DEPLOYMENT.md for detailed troubleshooting
|
||||
75
README.md
75
README.md
@@ -1,27 +1,78 @@
|
||||
# apiservice
|
||||
# Sriphat Hospital Data Platform
|
||||
|
||||
## Run
|
||||
Modern Data Stack สำหรับโรงพยาบาลศรีพัฒน์ ประกอบด้วย:
|
||||
|
||||
1. Copy env
|
||||
- **Nginx Proxy Manager** - Gateway + SSL
|
||||
- **Keycloak** - Single Sign-On (SSO)
|
||||
- **PostgreSQL** - Data Warehouse
|
||||
- **API Service** - Custom FastAPI endpoints
|
||||
- **Airbyte** - Data Ingestion
|
||||
- **Apache Superset** - Business Intelligence
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Ubuntu Server (Production)
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Quick install (recommended)
|
||||
bash install.sh
|
||||
|
||||
# Or manual setup
|
||||
sudo bash setup-ubuntu.sh
|
||||
# (logout/login, then continue)
|
||||
bash start-all.sh
|
||||
```
|
||||
|
||||
2. Update DB connection env values
|
||||
See **[README-UBUNTU.md](README-UBUNTU.md)** for detailed Ubuntu installation guide.
|
||||
|
||||
3. Start
|
||||
### Development/Windows
|
||||
|
||||
```bash
|
||||
docker compose up --build
|
||||
# 1. Configure environment
|
||||
notepad .env.global
|
||||
|
||||
# 2. Start all services
|
||||
bash start-all.sh
|
||||
|
||||
# 3. Access services
|
||||
# - Nginx Proxy Manager: http://localhost:81
|
||||
# - Keycloak: http://localhost:8080
|
||||
# - API Service: http://localhost/apiservice
|
||||
```
|
||||
|
||||
## Base path
|
||||
## 📁 Project Structure
|
||||
|
||||
Set `ROOT_PATH=/apiservice` when running behind reverse proxy.
|
||||
```
|
||||
├── 00-network/ # Shared Docker network
|
||||
├── 01-infra/ # Nginx + Keycloak + PostgreSQL
|
||||
├── 03-apiservice/ # Custom FastAPI service
|
||||
├── 04-ingestion/ # Airbyte ETL
|
||||
├── 06-analytics/ # Apache Superset
|
||||
├── .env.global # Global configuration
|
||||
├── start-all.sh # Start all services
|
||||
├── stop-all.sh # Stop all services
|
||||
└── DEPLOYMENT.md # Full deployment guide
|
||||
```
|
||||
|
||||
## Permissions
|
||||
## 📖 Documentation
|
||||
|
||||
The checkpoint endpoint requires permission:
|
||||
- **[DEPLOYMENT.md](DEPLOYMENT.md)** - Complete deployment guide
|
||||
- **[tech_stack.md](tech_stack.md)** - Architecture blueprint
|
||||
- **[01-infra/README.md](01-infra/README.md)** - Infrastructure layer
|
||||
- **[03-apiservice/README.md](03-apiservice/README.md)** - API service details
|
||||
- **[04-ingestion/README.md](04-ingestion/README.md)** - Airbyte setup
|
||||
- **[06-analytics/README.md](06-analytics/README.md)** - Superset configuration
|
||||
|
||||
- `feed.checkpoint:write`
|
||||
## 🔒 Security
|
||||
|
||||
All services communicate via `shared_data_network` and are exposed through Nginx Proxy Manager only. Keycloak provides centralized authentication (SSO) for all components.
|
||||
|
||||
## 📊 API Service
|
||||
|
||||
Custom FastAPI service with:
|
||||
- Admin UI for managing API keys
|
||||
- Permission-based access control
|
||||
- Integration with PostgreSQL schemas (fastapi, operationbi)
|
||||
- Endpoint: `POST /api/v1/feed/checkpoint`
|
||||
|
||||
Required permission: `feed.checkpoint:write`
|
||||
|
||||
@@ -32,7 +32,11 @@ class ApiClient(Base):
|
||||
name: Mapped[str] = mapped_column(String(128), unique=True, nullable=False)
|
||||
is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True)
|
||||
|
||||
api_keys: Mapped[list[ApiKey]] = relationship(back_populates="client")
|
||||
api_keys: Mapped[list[ApiKey]] = relationship(
|
||||
back_populates="client",
|
||||
cascade="all, delete-orphan",
|
||||
passive_deletes=True,
|
||||
)
|
||||
|
||||
|
||||
class ApiKey(Base):
|
||||
@@ -40,7 +44,9 @@ class ApiKey(Base):
|
||||
__table_args__ = {"schema": "fastapi"}
|
||||
|
||||
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
||||
client_id: Mapped[int] = mapped_column(ForeignKey("fastapi.api_client.id"), nullable=False)
|
||||
client_id: Mapped[int] = mapped_column(
|
||||
ForeignKey("fastapi.api_client.id", ondelete="CASCADE"), nullable=False
|
||||
)
|
||||
name: Mapped[str | None] = mapped_column(String(128), nullable=True)
|
||||
|
||||
key_prefix: Mapped[str] = mapped_column(String(12), nullable=False)
|
||||
|
||||
82
backup-daily.ps1
Normal file
82
backup-daily.ps1
Normal file
@@ -0,0 +1,82 @@
|
||||
# Sriphat Data Platform - Daily Backup Script (Windows)
|
||||
# ตั้งเวลารันใน Task Scheduler ทุกวันเวลา 02:00
|
||||
|
||||
$ProjectPath = "E:\git3\sriphat-dataplatform"
|
||||
$BackupPath = "E:\backups\sriphat-data"
|
||||
$Date = Get-Date -Format "yyyyMMdd-HHmmss"
|
||||
$BackupFolder = "$BackupPath\$Date"
|
||||
|
||||
Write-Host "=== Sriphat Data Platform Backup Started ===" -ForegroundColor Green
|
||||
Write-Host "Date: $Date"
|
||||
Write-Host "Backup Location: $BackupFolder"
|
||||
Write-Host ""
|
||||
|
||||
# สร้างโฟลเดอร์ backup
|
||||
New-Item -ItemType Directory -Path $BackupFolder -Force | Out-Null
|
||||
|
||||
# Backup PostgreSQL
|
||||
Write-Host "[1/5] Backing up PostgreSQL database..." -ForegroundColor Yellow
|
||||
docker exec postgres pg_dumpall -U postgres | Out-File "$BackupFolder\postgres.sql" -Encoding UTF8
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
Write-Host "✓ PostgreSQL backup completed" -ForegroundColor Green
|
||||
} else {
|
||||
Write-Host "✗ PostgreSQL backup failed" -ForegroundColor Red
|
||||
}
|
||||
|
||||
# Backup 01-infra data
|
||||
Write-Host "[2/5] Backing up Infrastructure data..." -ForegroundColor Yellow
|
||||
robocopy "$ProjectPath\01-infra\data" "$BackupFolder\01-infra-data" /MIR /R:3 /W:5 /NFL /NDL /NJH /NJS | Out-Null
|
||||
robocopy "$ProjectPath\01-infra\letsencrypt" "$BackupFolder\01-infra-letsencrypt" /MIR /R:3 /W:5 /NFL /NDL /NJH /NJS | Out-Null
|
||||
Write-Host "✓ Infrastructure backup completed" -ForegroundColor Green
|
||||
|
||||
# Backup 04-ingestion data
|
||||
Write-Host "[3/5] Backing up Airbyte data..." -ForegroundColor Yellow
|
||||
robocopy "$ProjectPath\04-ingestion\data" "$BackupFolder\04-ingestion-data" /MIR /R:3 /W:5 /NFL /NDL /NJH /NJS | Out-Null
|
||||
Write-Host "✓ Airbyte backup completed" -ForegroundColor Green
|
||||
|
||||
# Backup 06-analytics data
|
||||
Write-Host "[4/5] Backing up Superset data..." -ForegroundColor Yellow
|
||||
robocopy "$ProjectPath\06-analytics\data" "$BackupFolder\06-analytics-data" /MIR /R:3 /W:5 /NFL /NDL /NJH /NJS | Out-Null
|
||||
Write-Host "✓ Superset backup completed" -ForegroundColor Green
|
||||
|
||||
# Backup config files
|
||||
Write-Host "[5/5] Backing up configuration files..." -ForegroundColor Yellow
|
||||
Copy-Item "$ProjectPath\.env.global" "$BackupFolder\.env.global" -Force
|
||||
Copy-Item "$ProjectPath\06-analytics\superset_config.py" "$BackupFolder\superset_config.py" -Force
|
||||
Write-Host "✓ Configuration backup completed" -ForegroundColor Green
|
||||
|
||||
# Compress backup
|
||||
Write-Host ""
|
||||
Write-Host "Compressing backup..." -ForegroundColor Yellow
|
||||
Compress-Archive -Path $BackupFolder -DestinationPath "$BackupPath\backup-$Date.zip" -Force
|
||||
$BackupSize = (Get-Item "$BackupPath\backup-$Date.zip").Length / 1MB
|
||||
Write-Host "✓ Backup compressed: backup-$Date.zip ($([math]::Round($BackupSize, 2)) MB)" -ForegroundColor Green
|
||||
|
||||
# ลบโฟลเดอร์ที่ยังไม่ compress
|
||||
Remove-Item -Path $BackupFolder -Recurse -Force
|
||||
|
||||
# ลบ backup เก่า (เก็บไว้ 30 วัน)
|
||||
Write-Host ""
|
||||
Write-Host "Cleaning old backups (keeping last 30 days)..." -ForegroundColor Yellow
|
||||
$OldBackups = Get-ChildItem $BackupPath -Filter "backup-*.zip" | Where-Object {$_.LastWriteTime -lt (Get-Date).AddDays(-30)}
|
||||
if ($OldBackups) {
|
||||
$OldBackups | ForEach-Object {
|
||||
Write-Host " Removing: $($_.Name)" -ForegroundColor Gray
|
||||
Remove-Item $_.FullName -Force
|
||||
}
|
||||
Write-Host "✓ Removed $($OldBackups.Count) old backup(s)" -ForegroundColor Green
|
||||
} else {
|
||||
Write-Host "✓ No old backups to remove" -ForegroundColor Green
|
||||
}
|
||||
|
||||
# Summary
|
||||
Write-Host ""
|
||||
Write-Host "=== Backup Completed Successfully ===" -ForegroundColor Green
|
||||
Write-Host "Backup file: backup-$Date.zip"
|
||||
Write-Host "Size: $([math]::Round($BackupSize, 2)) MB"
|
||||
Write-Host "Location: $BackupPath"
|
||||
Write-Host ""
|
||||
|
||||
# Log to file
|
||||
$LogFile = "$BackupPath\backup.log"
|
||||
"$Date - Backup completed successfully - Size: $([math]::Round($BackupSize, 2)) MB" | Out-File $LogFile -Append
|
||||
74
backup-daily.sh
Normal file
74
backup-daily.sh
Normal file
@@ -0,0 +1,74 @@
|
||||
#!/bin/bash
|
||||
# Sriphat Data Platform - Daily Backup Script (Linux/Ubuntu)
|
||||
# Add to crontab: 0 2 * * * /opt/sriphat-dataplatform/backup-daily.sh
|
||||
|
||||
set -e
|
||||
|
||||
# Get script directory
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_PATH="$SCRIPT_DIR"
|
||||
BACKUP_DIR="/backups/sriphat-data"
|
||||
DATE=$(date +%Y%m%d-%H%M%S)
|
||||
BACKUP_FOLDER="$BACKUP_DIR/$DATE"
|
||||
|
||||
echo "=== Sriphat Data Platform Backup Started ==="
|
||||
echo "Date: $DATE"
|
||||
echo "Backup Location: $BACKUP_FOLDER"
|
||||
echo ""
|
||||
|
||||
# Create backup directory
|
||||
mkdir -p "$BACKUP_FOLDER"
|
||||
|
||||
# Backup PostgreSQL
|
||||
echo "[1/5] Backing up PostgreSQL database..."
|
||||
docker exec postgres pg_dumpall -U postgres | gzip > "$BACKUP_FOLDER/postgres.sql.gz"
|
||||
echo "✓ PostgreSQL backup completed"
|
||||
|
||||
# Backup 01-infra data
|
||||
echo "[2/5] Backing up Infrastructure data..."
|
||||
tar -czf "$BACKUP_FOLDER/01-infra-data.tar.gz" -C "$PROJECT_PATH" 01-infra/data 01-infra/letsencrypt
|
||||
echo "✓ Infrastructure backup completed"
|
||||
|
||||
# Backup 04-ingestion data
|
||||
echo "[3/5] Backing up Airbyte data..."
|
||||
tar -czf "$BACKUP_FOLDER/04-ingestion-data.tar.gz" -C "$PROJECT_PATH" 04-ingestion/data
|
||||
echo "✓ Airbyte backup completed"
|
||||
|
||||
# Backup 06-analytics data
|
||||
echo "[4/5] Backing up Superset data..."
|
||||
tar -czf "$BACKUP_FOLDER/06-analytics-data.tar.gz" -C "$PROJECT_PATH" 06-analytics/data
|
||||
echo "✓ Superset backup completed"
|
||||
|
||||
# Backup config files
|
||||
echo "[5/5] Backing up configuration files..."
|
||||
cp "$PROJECT_PATH/.env.global" "$BACKUP_FOLDER/.env.global"
|
||||
cp "$PROJECT_PATH/06-analytics/superset_config.py" "$BACKUP_FOLDER/superset_config.py"
|
||||
echo "✓ Configuration backup completed"
|
||||
|
||||
# Create final archive
|
||||
echo ""
|
||||
echo "Creating final backup archive..."
|
||||
cd "$BACKUP_DIR"
|
||||
tar -czf "backup-$DATE.tar.gz" "$DATE"
|
||||
BACKUP_SIZE=$(du -h "backup-$DATE.tar.gz" | cut -f1)
|
||||
echo "✓ Backup compressed: backup-$DATE.tar.gz ($BACKUP_SIZE)"
|
||||
|
||||
# Remove uncompressed folder
|
||||
rm -rf "$BACKUP_FOLDER"
|
||||
|
||||
# Clean old backups (keep 30 days)
|
||||
echo ""
|
||||
echo "Cleaning old backups (keeping last 30 days)..."
|
||||
find "$BACKUP_DIR" -name "backup-*.tar.gz" -mtime +30 -delete
|
||||
echo "✓ Old backups cleaned"
|
||||
|
||||
# Summary
|
||||
echo ""
|
||||
echo "=== Backup Completed Successfully ==="
|
||||
echo "Backup file: backup-$DATE.tar.gz"
|
||||
echo "Size: $BACKUP_SIZE"
|
||||
echo "Location: $BACKUP_DIR"
|
||||
echo ""
|
||||
|
||||
# Log
|
||||
echo "$DATE - Backup completed successfully - Size: $BACKUP_SIZE" >> "$BACKUP_DIR/backup.log"
|
||||
113
install.sh
Normal file
113
install.sh
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/bin/bash
|
||||
# Sriphat Data Platform - Quick Install Script for Ubuntu Server
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== Sriphat Data Platform - Quick Install ==="
|
||||
echo ""
|
||||
|
||||
# Check if running as root
|
||||
if [ "$EUID" -eq 0 ]; then
|
||||
echo "Please run as normal user (not root)"
|
||||
echo "The script will ask for sudo password when needed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get script directory
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Check if Docker is installed
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "Docker is not installed. Running setup script..."
|
||||
sudo bash setup-ubuntu.sh
|
||||
echo ""
|
||||
echo "Please logout and login again, then run this script again."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if user is in docker group
|
||||
if ! groups | grep -q docker; then
|
||||
echo "Your user is not in the docker group."
|
||||
echo "Adding you to docker group..."
|
||||
sudo usermod -aG docker $USER
|
||||
echo ""
|
||||
echo "Please logout and login again, then run this script again."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Create .env.global if not exists
|
||||
if [ ! -f .env.global ]; then
|
||||
echo "Creating .env.global from template..."
|
||||
cp .env.global .env.global.backup 2>/dev/null || true
|
||||
|
||||
# Generate random secrets
|
||||
POSTGRES_PASS=$(openssl rand -base64 32)
|
||||
KEYCLOAK_PASS=$(openssl rand -base64 32)
|
||||
SUPERSET_SECRET=$(openssl rand -base64 32)
|
||||
ADMIN_SECRET=$(openssl rand -base64 32)
|
||||
|
||||
cat > .env.global << EOF
|
||||
PROJECT_NAME=sriphat-data
|
||||
DOMAIN=sriphat.local
|
||||
TZ=Asia/Bangkok
|
||||
|
||||
DB_HOST=postgres
|
||||
DB_PORT=5432
|
||||
DB_USER=postgres
|
||||
DB_PASSWORD=$POSTGRES_PASS
|
||||
DB_NAME=postgres
|
||||
DB_SSLMODE=prefer
|
||||
|
||||
POSTGRES_PASSWORD=$POSTGRES_PASS
|
||||
|
||||
KEYCLOAK_ADMIN=admin
|
||||
KEYCLOAK_ADMIN_PASSWORD=$KEYCLOAK_PASS
|
||||
|
||||
SUPERSET_SECRET_KEY=$SUPERSET_SECRET
|
||||
SUPERSET_ADMIN_USERNAME=admin
|
||||
SUPERSET_ADMIN_PASSWORD=admin
|
||||
|
||||
ROOT_PATH=/apiservice
|
||||
APP_NAME=APIsService
|
||||
ADMIN_SECRET_KEY=$ADMIN_SECRET
|
||||
ADMIN_USERNAME=admin
|
||||
ADMIN_PASSWORD=admin
|
||||
EOF
|
||||
|
||||
echo "✓ Created .env.global with random passwords"
|
||||
echo ""
|
||||
echo "IMPORTANT: Save these credentials!"
|
||||
echo "Keycloak Admin Password: $KEYCLOAK_PASS"
|
||||
echo "PostgreSQL Password: $POSTGRES_PASS"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Create backup directory
|
||||
echo "Creating backup directory..."
|
||||
sudo mkdir -p /backups/sriphat-data
|
||||
sudo chown $USER:$USER /backups/sriphat-data
|
||||
|
||||
# Make scripts executable
|
||||
echo "Making scripts executable..."
|
||||
chmod +x *.sh
|
||||
chmod +x 00-network/*.sh
|
||||
|
||||
# Start services
|
||||
echo ""
|
||||
echo "Starting all services..."
|
||||
bash start-all.sh
|
||||
|
||||
echo ""
|
||||
echo "=== Installation Completed! ==="
|
||||
echo ""
|
||||
echo "Services are starting up. Wait 30-60 seconds, then access:"
|
||||
echo "- Nginx Proxy Manager: http://$(hostname -I | awk '{print $1}'):81"
|
||||
echo "- Keycloak: http://$(hostname -I | awk '{print $1}'):8080"
|
||||
echo ""
|
||||
echo "Default credentials are in .env.global"
|
||||
echo ""
|
||||
echo "To setup automatic backup:"
|
||||
echo " crontab -e"
|
||||
echo " Add: 0 2 * * * $SCRIPT_DIR/backup-daily.sh"
|
||||
echo ""
|
||||
111
setup-ubuntu.sh
Normal file
111
setup-ubuntu.sh
Normal file
@@ -0,0 +1,111 @@
|
||||
#!/bin/bash
|
||||
# Sriphat Data Platform - Ubuntu Server Setup Script
|
||||
# Run as root or with sudo
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== Sriphat Data Platform - Ubuntu Server Setup ==="
|
||||
echo ""
|
||||
|
||||
# Check if running as root
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
echo "Please run as root or with sudo"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Update system
|
||||
echo "[1/6] Updating system packages..."
|
||||
apt-get update
|
||||
apt-get upgrade -y
|
||||
|
||||
# Install Docker
|
||||
echo "[2/6] Installing Docker..."
|
||||
if ! command -v docker &> /dev/null; then
|
||||
# Remove old versions
|
||||
apt-get remove -y docker docker-engine docker.io containerd runc 2>/dev/null || true
|
||||
|
||||
# Install dependencies
|
||||
apt-get install -y \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg \
|
||||
lsb-release
|
||||
|
||||
# Add Docker's official GPG key
|
||||
install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
|
||||
# Set up repository
|
||||
echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
|
||||
# Install Docker Engine
|
||||
apt-get update
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
|
||||
# Start and enable Docker
|
||||
systemctl start docker
|
||||
systemctl enable docker
|
||||
|
||||
echo "✓ Docker installed successfully"
|
||||
else
|
||||
echo "✓ Docker already installed"
|
||||
fi
|
||||
|
||||
# Install Docker Compose (standalone - backup)
|
||||
echo "[3/6] Installing Docker Compose standalone..."
|
||||
if ! command -v docker-compose &> /dev/null; then
|
||||
DOCKER_COMPOSE_VERSION=$(curl -s https://api.github.com/repos/docker/compose/releases/latest | grep 'tag_name' | cut -d\" -f4)
|
||||
curl -L "https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
echo "✓ Docker Compose installed: $DOCKER_COMPOSE_VERSION"
|
||||
else
|
||||
echo "✓ Docker Compose already installed"
|
||||
fi
|
||||
|
||||
# Add current user to docker group (if not root)
|
||||
echo "[4/6] Configuring Docker permissions..."
|
||||
if [ -n "$SUDO_USER" ]; then
|
||||
usermod -aG docker $SUDO_USER
|
||||
echo "✓ Added $SUDO_USER to docker group (logout and login to apply)"
|
||||
fi
|
||||
|
||||
# Install additional tools
|
||||
echo "[5/6] Installing additional tools..."
|
||||
apt-get install -y \
|
||||
git \
|
||||
curl \
|
||||
wget \
|
||||
vim \
|
||||
htop \
|
||||
net-tools \
|
||||
ufw
|
||||
|
||||
# Configure firewall
|
||||
echo "[6/6] Configuring firewall..."
|
||||
ufw --force enable
|
||||
ufw allow 22/tcp # SSH
|
||||
ufw allow 80/tcp # HTTP
|
||||
ufw allow 443/tcp # HTTPS
|
||||
ufw allow 81/tcp # Nginx Proxy Manager Admin
|
||||
ufw allow 8080/tcp # Keycloak (optional - can be removed after Nginx setup)
|
||||
ufw status
|
||||
|
||||
echo ""
|
||||
echo "=== Setup Completed Successfully ==="
|
||||
echo ""
|
||||
echo "Docker version: $(docker --version)"
|
||||
echo "Docker Compose version: $(docker compose version)"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Logout and login again (to apply docker group permissions)"
|
||||
echo "2. Clone/copy the sriphat-dataplatform project"
|
||||
echo "3. Configure .env.global"
|
||||
echo "4. Run: bash start-all.sh"
|
||||
echo ""
|
||||
echo "Optional: Setup automatic backup"
|
||||
echo " sudo crontab -e"
|
||||
echo " Add: 0 2 * * * /opt/sriphat-dataplatform/backup-daily.sh"
|
||||
echo ""
|
||||
56
start-all.sh
Normal file
56
start-all.sh
Normal file
@@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== Sriphat Data Platform Startup ==="
|
||||
echo ""
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
echo "[1/7] Creating shared network..."
|
||||
cd 00-network
|
||||
bash create-network.sh
|
||||
cd ..
|
||||
|
||||
echo "[2/7] Starting Infrastructure (Nginx + Keycloak + PostgreSQL)..."
|
||||
cd 01-infra
|
||||
docker compose --env-file ../.env.global up -d
|
||||
cd ..
|
||||
|
||||
echo "Waiting for PostgreSQL to be ready..."
|
||||
sleep 10
|
||||
|
||||
echo "[3/7] Creating databases for Airbyte and Superset..."
|
||||
docker exec postgres psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'airbyte'" | grep -q 1 || docker exec postgres psql -U postgres -c "CREATE DATABASE airbyte;"
|
||||
docker exec postgres psql -U postgres -tc "SELECT 1 FROM pg_database WHERE datname = 'superset'" | grep -q 1 || docker exec postgres psql -U postgres -c "CREATE DATABASE superset;"
|
||||
|
||||
echo "[4/7] Starting API Service..."
|
||||
cd 03-apiservice
|
||||
docker compose --env-file ../.env.global up --build -d
|
||||
cd ..
|
||||
|
||||
echo "[5/7] Starting Airbyte (Data Ingestion)..."
|
||||
cd 04-ingestion
|
||||
docker compose --env-file ../.env.global up -d
|
||||
cd ..
|
||||
|
||||
echo "[6/7] Starting Superset (Analytics)..."
|
||||
cd 06-analytics
|
||||
docker compose --env-file ../.env.global up -d
|
||||
cd ..
|
||||
|
||||
echo ""
|
||||
echo "=== All services started! ==="
|
||||
echo ""
|
||||
echo "Access points:"
|
||||
echo "- Nginx Proxy Manager: http://localhost:81"
|
||||
echo "- Keycloak Admin: http://localhost:8080"
|
||||
echo "- API Service: http://localhost/apiservice (via Nginx)"
|
||||
echo "- Airbyte: http://localhost/airbyte (configure in Nginx)"
|
||||
echo "- Superset: http://localhost/superset (configure in Nginx)"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Configure domains in Nginx Proxy Manager (port 81)"
|
||||
echo "2. Setup Keycloak realm and clients"
|
||||
echo "3. Configure Airbyte sources/destinations"
|
||||
echo "4. Setup Superset dashboards"
|
||||
31
stop-all.sh
Normal file
31
stop-all.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
echo "=== Stopping Sriphat Data Platform ==="
|
||||
echo ""
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
echo "[1/4] Stopping Analytics..."
|
||||
cd 06-analytics
|
||||
docker compose down
|
||||
cd ..
|
||||
|
||||
echo "[2/4] Stopping Ingestion..."
|
||||
cd 04-ingestion
|
||||
docker compose down
|
||||
cd ..
|
||||
|
||||
echo "[3/4] Stopping API Service..."
|
||||
cd 03-apiservice
|
||||
docker compose down
|
||||
cd ..
|
||||
|
||||
echo "[4/4] Stopping Infrastructure..."
|
||||
cd 01-infra
|
||||
docker compose down
|
||||
cd ..
|
||||
|
||||
echo ""
|
||||
echo "=== All services stopped ==="
|
||||
165
tech_stack.md
Normal file
165
tech_stack.md
Normal file
@@ -0,0 +1,165 @@
|
||||
Sriphat Hospital Data Platform Blueprint
|
||||
|
||||
พิมพ์เขียวชุดนี้ออกแบบมาเพื่อสร้างระบบ Data Platform ที่ทันสมัย (Modern Data Stack) โดยเน้นความปลอดภัย (Security), การรองรับข้อมูลหลายรูปแบบ (Versatility), และการเชื่อมต่อแบบ Single Sign-On (SSO)
|
||||
|
||||
🏗️ 1. Architecture Overview (Tech Stack)
|
||||
|
||||
เราใช้แนวคิดแบบ "Modular Architecture" ผ่าน Docker Compose เพื่อให้ระบบยืดหยุ่นและดูแลรักษาง่าย
|
||||
|
||||
Layer
|
||||
|
||||
Tools
|
||||
|
||||
Functionality
|
||||
|
||||
Gateway
|
||||
|
||||
Nginx Proxy Manager
|
||||
|
||||
จัดการ Domain, SSL (HTTPS) และทางเข้า Service ทั้งหมด
|
||||
|
||||
Identity (SSO)
|
||||
|
||||
Keycloak
|
||||
|
||||
ระบบยืนยันตัวตนกลาง (OIDC/OAuth2) รองรับ LDAP/AD โรงพยาบาล
|
||||
|
||||
Ingestion
|
||||
|
||||
Airbyte
|
||||
|
||||
ดึงข้อมูลจาก SQL Server, Oracle, REST API, Excel, CSV
|
||||
|
||||
Warehouse
|
||||
|
||||
Supabase (PostgreSQL)
|
||||
|
||||
จัดเก็บข้อมูล ประมวลผล และสร้าง API อัตโนมัติ (PostgREST)
|
||||
|
||||
Transformation
|
||||
|
||||
dbt (data build tool)
|
||||
|
||||
จัดการ Logic การแปลงข้อมูลดิบให้เป็นข้อมูลพร้อมใช้ด้วย SQL
|
||||
|
||||
BI Layer
|
||||
|
||||
Apache Superset
|
||||
|
||||
สร้าง Dashboard และ Visualization เชื่อมต่อ SSO กับ Keycloak
|
||||
|
||||
📂 2. Project Folder Structure
|
||||
|
||||
การแยกโฟลเดอร์ช่วยให้การ Update และจัดการ Resource ทำได้ง่าย (Isolation)
|
||||
|
||||
sriphat-data-stack/
|
||||
├── .env # ไฟล์รวมรหัสผ่านและค่า Config ทั้งหมด (สำคัญมาก)
|
||||
├── start-all.sh # สคริปต์สำหรับสั่งรันทุกโฟลเดอร์พร้อมกัน
|
||||
├── 01-infra/ # Nginx Proxy Manager และ Keycloak
|
||||
│ └── docker-compose.yml
|
||||
├── 02-storage/ # Supabase (Postgres, Studio, PostgREST)
|
||||
│ └── docker-compose.yml
|
||||
├── 03-ingestion/ # Airbyte
|
||||
│ └── docker-compose.yml
|
||||
└── 04-analytics/ # Apache Superset
|
||||
└── docker-compose.yml
|
||||
|
||||
|
||||
🔑 3. Global Environment Variables (.env)
|
||||
|
||||
ใช้ไฟล์นี้ไฟล์เดียวเพื่อคุมความลับทั้งระบบ (Single Source of Truth)
|
||||
|
||||
# --- GENERAL ---
|
||||
PROJECT_NAME=sriphat-data
|
||||
DOMAIN=sriphat.local
|
||||
|
||||
# --- DATABASE (Supabase) ---
|
||||
DB_PASSWORD=Secure_Hospital_Pass_2026
|
||||
JWT_SECRET=long-random-string-for-supabase-security
|
||||
# กำหนด Schema ที่จะให้ API เข้าถึงได้
|
||||
PGRST_DBSCHEMAS=public,raw_data,analytics
|
||||
|
||||
# --- AUTH (Keycloak) ---
|
||||
KEYCLOAK_ADMIN=admin
|
||||
KEYCLOAK_ADMIN_PASSWORD=admin_secret_pass
|
||||
|
||||
# --- BI (Superset) ---
|
||||
SUPERSET_SECRET_KEY=another-random-string
|
||||
|
||||
|
||||
🛠️ 4. Docker Compose Samples
|
||||
|
||||
01-Infra: Authentication & Gateway
|
||||
|
||||
# 01-infra/docker-compose.yml
|
||||
services:
|
||||
nginx-proxy:
|
||||
image: jc21/nginx-proxy-manager:latest
|
||||
ports: ['80:80', '443:443', '81:81']
|
||||
volumes: ['./data:/data', './letsencrypt:/etc/letsencrypt']
|
||||
networks: ['shared_data_network']
|
||||
|
||||
keycloak:
|
||||
image: quay.io/keycloak/keycloak:latest
|
||||
command: start-dev
|
||||
environment:
|
||||
KC_BOOTSTRAP_ADMIN_USERNAME: ${KEYCLOAK_ADMIN}
|
||||
KC_BOOTSTRAP_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD}
|
||||
networks: ['shared_data_network']
|
||||
|
||||
networks:
|
||||
shared_data_network:
|
||||
external: true
|
||||
|
||||
|
||||
02-Storage: Supabase Layer (Core)
|
||||
|
||||
# 02-storage/docker-compose.yml
|
||||
services:
|
||||
db:
|
||||
image: supabase/postgres:15.1.0.117
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
volumes: ['./data:/var/lib/postgresql/data']
|
||||
networks: ['shared_data_network']
|
||||
|
||||
rest-api:
|
||||
image: postgrest/postgrest:v10.1.1
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://postgres:${DB_PASSWORD}@db:5432/postgres
|
||||
PGRST_DB_SCHEMAS: ${PGRST_DBSCHEMAS}
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
networks: ['shared_data_network']
|
||||
|
||||
networks:
|
||||
shared_data_network:
|
||||
external: true
|
||||
|
||||
|
||||
🔒 5. Security Strategy (Hospital Standard)
|
||||
|
||||
Centralized Auth (SSO): ผู้ใช้ล็อกอินผ่าน Keycloak เพียงที่เดียว เพื่อเข้าถึง Superset และดูข้อมูลใน Supabase
|
||||
|
||||
Schema Separation:
|
||||
|
||||
raw_data: เก็บข้อมูลดิบจาก Airbyte (จำกัดสิทธิ์สูงสุด)
|
||||
|
||||
analytics: เก็บข้อมูลที่คลีนแล้วสำหรับ Superset (Read-only for BI)
|
||||
|
||||
Row-Level Security (RLS): ใช้ฟีเจอร์ของ PostgreSQL ใน Supabase เพื่อกำหนดให้ "แพทย์แผนก A เห็นได้เฉพาะคนไข้แผนก A" แม้จะอยู่ในตารางเดียวกัน
|
||||
|
||||
Network Isolation: ทุก Service ทำงานใน shared_data_network และเปิดออกภายนอกผ่าน Nginx Proxy Manager เท่านั้น
|
||||
|
||||
🚀 6. Steps to Launch
|
||||
|
||||
เตรียม Network: docker network create shared_data_network
|
||||
|
||||
เตรียม Folder: สร้างโฟลเดอร์และไฟล์ตามโครงสร้างด้านบน
|
||||
|
||||
รัน Infra: เข้าไปที่ 01-infra แล้วสั่ง docker-compose up -d
|
||||
|
||||
ตั้งค่า Keycloak: สร้าง Realm และ Client สำหรับ Superset/Supabase
|
||||
|
||||
รัน Storage & Analytics: รันโฟลเดอร์ 02, 03 และ 04 ตามลำดับ
|
||||
|
||||
Config Proxy: ใน Nginx Proxy Manager ให้ชี้ Domain ไปที่ IP/Port ของแต่ละ Service
|
||||
Reference in New Issue
Block a user