diff --git a/.gitignore b/.gitignore index b45eb4e2..2eabf49c 100644 --- a/.gitignore +++ b/.gitignore @@ -24,5 +24,10 @@ www/.env.production .secrets opencode.json +certs/ +docker-compose.ca.yml +docker-compose.gpu-ca.yml +Caddyfile.gpu-host +.env.gpu-host vibedocs/ server/tests/integration/logs/ diff --git a/CLAUDE.md b/CLAUDE.md index 64bf5a3b..3d534efd 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -193,6 +193,11 @@ Modal.com integration for scalable ML processing: If you need to do any worker/pipeline related work, search for "Pipeline" classes and their "create" or "build" methods to find the main processor sequence. Look for task orchestration patterns (like "chord", "group", or "chain") to identify the post-processing flow with parallel execution chains. This will give you abstract vision on how processing pipeling is organized. +## Documentation + +- New documentation files go in `docsv2/`, not in `docs/docs/`. +- Existing `docs/` directory contains legacy Docusaurus docs. + ## Code Style - Always put imports at the top of the file. Let ruff/pre-commit handle sorting and formatting of imports. diff --git a/docker-compose.gpu-host.yml b/docker-compose.gpu-host.yml new file mode 100644 index 00000000..be08ba03 --- /dev/null +++ b/docker-compose.gpu-host.yml @@ -0,0 +1,106 @@ +# Standalone GPU host for Reflector — transcription, diarization, translation. +# +# Usage: ./scripts/setup-gpu-host.sh [--domain DOMAIN] [--custom-ca PATH] [--api-key KEY] [--cpu] +# or: docker compose -f docker-compose.gpu-host.yml --profile gpu [--profile caddy] up -d +# +# Processing mode (pick ONE — mutually exclusive, both bind port 8000): +# --profile gpu NVIDIA GPU container (requires nvidia-container-toolkit) +# --profile cpu CPU-only container (no GPU required, slower) +# +# Optional: +# --profile caddy Caddy reverse proxy with HTTPS +# +# This file is checked into the repo. The setup script generates: +# - .env.gpu-host (HF_TOKEN, API key, port config) +# - Caddyfile.gpu-host (Caddy config, only with --domain) +# - docker-compose.gpu-ca.yml (CA cert mounts, only with --custom-ca) + +services: + # =========================================================== + # GPU service — NVIDIA GPU accelerated + # Activated with: --profile gpu + # =========================================================== + + gpu: + build: + context: ./gpu/self_hosted + dockerfile: Dockerfile + profiles: [gpu] + restart: unless-stopped + ports: + - "${GPU_HOST_PORT:-8000}:8000" + environment: + HF_TOKEN: ${HF_TOKEN:-} + REFLECTOR_GPU_APIKEY: ${REFLECTOR_GPU_APIKEY:-} + volumes: + - gpu_cache:/root/.cache + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: [gpu] + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/docs"] + interval: 15s + timeout: 5s + retries: 10 + start_period: 120s + networks: + default: + aliases: + - transcription + + # =========================================================== + # CPU service — no GPU required, uses Dockerfile.cpu + # Activated with: --profile cpu + # Mutually exclusive with gpu (both bind port 8000) + # =========================================================== + + cpu: + build: + context: ./gpu/self_hosted + dockerfile: Dockerfile.cpu + profiles: [cpu] + restart: unless-stopped + ports: + - "${GPU_HOST_PORT:-8000}:8000" + environment: + HF_TOKEN: ${HF_TOKEN:-} + REFLECTOR_GPU_APIKEY: ${REFLECTOR_GPU_APIKEY:-} + volumes: + - gpu_cache:/root/.cache + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8000/docs"] + interval: 15s + timeout: 5s + retries: 10 + start_period: 120s + networks: + default: + aliases: + - transcription + + # =========================================================== + # Caddy — reverse proxy with HTTPS (optional) + # Activated with: --profile caddy + # Proxies to "transcription" network alias (works for both gpu and cpu) + # =========================================================== + + caddy: + image: caddy:2-alpine + profiles: [caddy] + restart: unless-stopped + ports: + - "80:80" + - "${CADDY_HTTPS_PORT:-443}:443" + volumes: + - ./Caddyfile.gpu-host:/etc/caddy/Caddyfile:ro + - caddy_data:/data + - caddy_config:/config + +volumes: + gpu_cache: + caddy_data: + caddy_config: diff --git a/docsv2/custom-ca-setup.md b/docsv2/custom-ca-setup.md new file mode 100644 index 00000000..142717bc --- /dev/null +++ b/docsv2/custom-ca-setup.md @@ -0,0 +1,337 @@ +# Custom CA Certificate Setup + +Use a private Certificate Authority (CA) with Reflector self-hosted deployments. This covers two scenarios: + +1. **Custom local domain** — Serve Reflector over HTTPS on an internal domain (e.g., `reflector.local`) using certs signed by your own CA +2. **Backend CA trust** — Let Reflector's backend services (server, workers, GPU) make HTTPS calls to GPU, LLM, or other internal services behind your private CA + +Both can be used independently or together. + +## Quick Start + +### Generate test certificates + +```bash +./scripts/generate-certs.sh reflector.local +``` + +This creates `certs/` with: +- `ca.key` + `ca.crt` — Root CA (10-year validity) +- `server-key.pem` + `server.pem` — Server certificate (1-year, SAN: domain + localhost + 127.0.0.1) + +### Deploy with custom CA + domain + +```bash +# Add domain to /etc/hosts on the server (use 127.0.0.1 for local, or server LAN IP for network access) +echo "127.0.0.1 reflector.local" | sudo tee -a /etc/hosts + +# Run setup — pass the certs directory +./scripts/setup-selfhosted.sh --gpu --caddy --domain reflector.local --custom-ca certs/ + +# Trust the CA on your machine (see "Trust the CA" section below) +``` + +### Deploy with CA trust only (GPU/LLM behind private CA) + +```bash +# Only need the CA cert file — no Caddy TLS certs needed +./scripts/setup-selfhosted.sh --hosted --custom-ca /path/to/corporate-ca.crt +``` + +## How `--custom-ca` Works + +The flag accepts a **directory** or a **single file**: + +### Directory mode + +```bash +--custom-ca certs/ +``` + +Looks for these files by convention: +- `ca.crt` (required) — CA certificate to trust +- `server.pem` + `server-key.pem` (optional) — TLS certificate/key for Caddy + +If `server.pem` + `server-key.pem` are found AND `--domain` is provided: +- Caddy serves HTTPS using those certs +- Backend containers trust the CA for outbound calls + +If only `ca.crt` is found: +- Backend containers trust the CA for outbound calls +- Caddy is unaffected (uses Let's Encrypt, self-signed, or no Caddy) + +### Single file mode + +```bash +--custom-ca /path/to/corporate-ca.crt +``` + +Only injects CA trust into backend containers. No Caddy TLS changes. + +## Scenarios + +### Scenario 1: Custom local domain + +Your Reflector instance runs on an internal network. You want `https://reflector.local` with proper TLS (no browser warnings). + +```bash +# 1. Generate certs +./scripts/generate-certs.sh reflector.local + +# 2. Add to /etc/hosts on the server +echo "127.0.0.1 reflector.local" | sudo tee -a /etc/hosts + +# 3. Deploy +./scripts/setup-selfhosted.sh --gpu --garage --caddy --domain reflector.local --custom-ca certs/ + +# 4. Trust the CA on your machine (see "Trust the CA" section below) +``` + +If other machines on the network need to access it, add the server's LAN IP to `/etc/hosts` on those machines instead: +```bash +echo "192.168.1.100 reflector.local" | sudo tee -a /etc/hosts +``` + +And include that IP as an extra SAN when generating certs: +```bash +./scripts/generate-certs.sh reflector.local "IP:192.168.1.100" +``` + +### Scenario 2: GPU/LLM behind corporate CA + +Your GPU or LLM server (e.g., `https://gpu.internal.corp`) uses certificates signed by your corporate CA. Reflector's backend needs to trust that CA for outbound HTTPS calls. + +```bash +# Get the CA certificate from your IT team (PEM format) +# Then deploy — Caddy can still use Let's Encrypt or self-signed +./scripts/setup-selfhosted.sh --hosted --garage --caddy --custom-ca /path/to/corporate-ca.crt +``` + +This works because: +- **TLS cert/key** = "this is my identity" — for Caddy to serve HTTPS to browsers +- **CA cert** = "I trust this authority" — for backend containers to verify outbound connections + +Your Reflector frontend can use Let's Encrypt (public domain) or self-signed certs, while the backend trusts a completely different CA for GPU/LLM calls. + +### Scenario 3: Both combined (same CA) + +Custom domain + GPU/LLM all behind the same CA: + +```bash +./scripts/generate-certs.sh reflector.local "DNS:gpu.local" +./scripts/setup-selfhosted.sh --gpu --garage --caddy --domain reflector.local --custom-ca certs/ +``` + +### Scenario 4: Multiple CAs (local domain + remote GPU on different CA) + +Your Reflector uses one CA for `reflector.local`, but the GPU host uses a different CA: + +```bash +# Your local domain setup +./scripts/generate-certs.sh reflector.local + +# Deploy with your CA + trust the GPU host's CA too +./scripts/setup-selfhosted.sh --hosted --garage --caddy \ + --domain reflector.local \ + --custom-ca certs/ \ + --extra-ca /path/to/gpu-machine-ca.crt +``` + +`--extra-ca` appends additional CA certs to the trust bundle. Backend containers trust ALL CAs — your local domain AND the GPU host's certs both work. + +You can repeat `--extra-ca` for multiple remote services: +```bash +--extra-ca /path/to/gpu-ca.crt --extra-ca /path/to/llm-ca.crt +``` + +For setting up a dedicated GPU host, see [Standalone GPU Host Setup](gpu-host-setup.md). + +## Trust the CA on Client Machines + +After deploying, clients need to trust the CA to avoid browser warnings. + +### macOS + +```bash +sudo security add-trusted-cert -d -r trustRoot \ + -k /Library/Keychains/System.keychain certs/ca.crt +``` + +### Linux (Ubuntu/Debian) + +```bash +sudo cp certs/ca.crt /usr/local/share/ca-certificates/reflector-ca.crt +sudo update-ca-certificates +``` + +### Linux (RHEL/Fedora) + +```bash +sudo cp certs/ca.crt /etc/pki/ca-trust/source/anchors/reflector-ca.crt +sudo update-ca-trust +``` + +### Windows (PowerShell as admin) + +```powershell +Import-Certificate -FilePath .\certs\ca.crt -CertStoreLocation Cert:\LocalMachine\Root +``` + +### Firefox (all platforms) + +Firefox uses its own certificate store: +1. Settings > Privacy & Security > View Certificates +2. Authorities tab > Import +3. Select `ca.crt` and check "Trust this CA to identify websites" + +## How It Works Internally + +### Docker entrypoint CA injection + +Each backend container (server, worker, beat, hatchet workers, GPU) has an entrypoint script (`docker-entrypoint.sh`) that: + +1. Checks if a CA cert is mounted at `/usr/local/share/ca-certificates/custom-ca.crt` +2. If present, runs `update-ca-certificates` to create a **combined bundle** (system CAs + custom CA) +3. Sets environment variables so all Python/gRPC libraries use the combined bundle: + +| Env var | Covers | +|---------|--------| +| `SSL_CERT_FILE` | httpx, OpenAI SDK, llama-index, Python ssl module | +| `REQUESTS_CA_BUNDLE` | requests library (transitive dependencies) | +| `CURL_CA_BUNDLE` | curl CLI (container healthchecks) | +| `GRPC_DEFAULT_SSL_ROOTS_FILE_PATH` | grpcio (Hatchet gRPC client) | + +When no CA cert is mounted, the entrypoint is a no-op — containers behave exactly as before. + +### Why this replaces manual certifi patching + +Previously, the workaround for trusting a private CA in Python was to patch certifi's bundle directly: + +```bash +# OLD approach — fragile, do NOT use +cat custom-ca.crt >> $(python -c "import certifi; print(certifi.where())") +``` + +This breaks whenever certifi is updated (any `pip install`/`uv sync` overwrites the bundle and the CA is lost). + +Our entrypoint approach is permanent because: + +1. `SSL_CERT_FILE` is checked by Python's `ssl.create_default_context()` **before** falling back to `certifi.where()`. When set, certifi's bundle is never read. +2. `REQUESTS_CA_BUNDLE` similarly overrides certifi for the `requests` library. +3. The CA is injected at container startup (runtime), not baked into the Python environment. It survives image rebuilds, dependency updates, and `uv sync`. + +``` +Python SSL lookup chain: + ssl.create_default_context() + → SSL_CERT_FILE env var? → YES → use combined bundle (system + custom CA) ✓ + → (certifi.where() is never reached) +``` + +This covers all outbound HTTPS calls: httpx (transcription, diarization, translation, webhooks), OpenAI SDK (transcription), llama-index (LLM/summarization), and requests (transitive dependencies). + +### Compose override + +The setup script generates `docker-compose.ca.yml` which mounts the CA cert into every backend container as a read-only bind mount. This file is: +- Only generated when `--custom-ca` is passed +- Deleted on re-runs without `--custom-ca` (prevents stale overrides) +- Added to `.gitignore` + +### Node.js (frontend) + +The web container uses `NODE_EXTRA_CA_CERTS` which **adds** to Node's trust store (unlike Python's `SSL_CERT_FILE` which replaces it). This is set via the compose override. + +## Generate Your Own CA (Manual) + +If you prefer not to use `generate-certs.sh`: + +```bash +# 1. Create CA +openssl genrsa -out ca.key 4096 +openssl req -x509 -new -nodes -key ca.key -sha256 -days 3650 \ + -out ca.crt -subj "/CN=My CA/O=My Organization" + +# 2. Create server key +openssl genrsa -out server-key.pem 2048 + +# 3. Create CSR with SANs +openssl req -new -key server-key.pem -out server.csr \ + -subj "/CN=reflector.local" \ + -addext "subjectAltName=DNS:reflector.local,DNS:localhost,IP:127.0.0.1" + +# 4. Sign with CA +openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \ + -CAcreateserial -out server.pem -days 365 -sha256 \ + -copy_extensions copyall + +# 5. Clean up +rm server.csr ca.srl +``` + +## Using Existing Corporate Certificates + +If your organization already has a CA: + +1. Get the CA certificate in PEM format from your IT team +2. If you have a PKCS#12 (.p12/.pfx) bundle, extract the CA cert: + ```bash + openssl pkcs12 -in bundle.p12 -cacerts -nokeys -out ca.crt + ``` +3. If you have multiple intermediate CAs, concatenate them into one PEM file: + ```bash + cat intermediate-ca.crt root-ca.crt > ca.crt + ``` + +## Troubleshooting + +### Browser: "Your connection is not private" + +The CA is not trusted on the client machine. See "Trust the CA" section above. + +Check certificate expiry: +```bash +openssl x509 -noout -dates -in certs/server.pem +``` + +### Backend: `SSL: CERTIFICATE_VERIFY_FAILED` + +CA cert not mounted or not loaded. Check inside the container: +```bash +docker compose exec server env | grep SSL_CERT_FILE +docker compose exec server python -c " +import ssl, os +print('SSL_CERT_FILE:', os.environ.get('SSL_CERT_FILE', 'not set')) +ctx = ssl.create_default_context() +print('CA certs loaded:', ctx.cert_store_stats()) +" +``` + +### Caddy: "certificate is not valid for any names" + +Domain in Caddyfile doesn't match the certificate's SAN/CN. Check: +```bash +openssl x509 -noout -text -in certs/server.pem | grep -A1 "Subject Alternative Name" +``` + +### Certificate chain issues + +If you have intermediate CAs, concatenate them into `server.pem`: +```bash +cat server-cert.pem intermediate-ca.pem > certs/server.pem +``` + +Verify the chain: +```bash +openssl verify -CAfile certs/ca.crt certs/server.pem +``` + +### Certificate renewal + +Custom CA certs are NOT auto-renewed (unlike Let's Encrypt). Replace cert files and restart: +```bash +# Replace certs +cp new-server.pem certs/server.pem +cp new-server-key.pem certs/server-key.pem + +# Restart Caddy to pick up new certs +docker compose restart caddy +``` diff --git a/docsv2/gpu-host-setup.md b/docsv2/gpu-host-setup.md new file mode 100644 index 00000000..20d5d158 --- /dev/null +++ b/docsv2/gpu-host-setup.md @@ -0,0 +1,294 @@ +# Standalone GPU Host Setup + +Deploy Reflector's GPU transcription/diarization/translation service on a dedicated machine, separate from the main Reflector instance. Useful when: + +- Your GPU machine is on a different network than the Reflector server +- You want to share one GPU service across multiple Reflector instances +- The GPU machine has special hardware/drivers that can't run the full stack +- You need to scale GPU processing independently + +## Architecture + +``` +┌─────────────────────┐ HTTPS ┌────────────────────┐ +│ Reflector Server │ ────────────────────── │ GPU Host │ +│ (server, worker, │ TRANSCRIPT_URL │ (transcription, │ +│ web, postgres, │ DIARIZATION_URL │ diarization, │ +│ redis, hatchet) │ TRANSLATE_URL │ translation) │ +│ │ │ │ +│ setup-selfhosted.sh │ │ setup-gpu-host.sh │ +│ --hosted │ │ │ +└─────────────────────┘ └────────────────────┘ +``` + +The GPU service is a standalone FastAPI app that exposes transcription, diarization, translation, and audio padding endpoints. It has **no dependencies** on PostgreSQL, Redis, Hatchet, or any other Reflector service. + +## Quick Start + +### On the GPU machine + +```bash +git clone +cd reflector + +# Set HuggingFace token (required for diarization models) +export HF_TOKEN=your-huggingface-token + +# Deploy with HTTPS (Let's Encrypt) +./scripts/setup-gpu-host.sh --domain gpu.example.com --api-key my-secret-key + +# Or deploy with custom CA +./scripts/generate-certs.sh gpu.local +./scripts/setup-gpu-host.sh --domain gpu.local --custom-ca certs/ --api-key my-secret-key +``` + +### On the Reflector machine + +```bash +# If the GPU host uses a custom CA, trust it +./scripts/setup-selfhosted.sh --hosted --garage --caddy \ + --extra-ca /path/to/gpu-machine-ca.crt + +# Or if you already have --custom-ca for your local domain +./scripts/setup-selfhosted.sh --hosted --garage --caddy \ + --domain reflector.local --custom-ca certs/ \ + --extra-ca /path/to/gpu-machine-ca.crt +``` + +Then configure `server/.env` to point to the GPU host: + +```bash +TRANSCRIPT_BACKEND=modal +TRANSCRIPT_URL=https://gpu.example.com +TRANSCRIPT_MODAL_API_KEY=my-secret-key + +DIARIZATION_BACKEND=modal +DIARIZATION_URL=https://gpu.example.com +DIARIZATION_MODAL_API_KEY=my-secret-key + +TRANSLATION_BACKEND=modal +TRANSLATE_URL=https://gpu.example.com +TRANSLATION_MODAL_API_KEY=my-secret-key +``` + +## Script Options + +``` +./scripts/setup-gpu-host.sh [OPTIONS] + +Options: + --domain DOMAIN Domain name for HTTPS (Let's Encrypt or custom cert) + --custom-ca PATH Custom CA (directory or single PEM file) + --extra-ca FILE Additional CA cert to trust (repeatable) + --api-key KEY API key to protect the service (strongly recommended) + --cpu CPU-only mode (no NVIDIA GPU required) + --port PORT Host port (default: 443 with Caddy, 8000 without) +``` + +## Deployment Scenarios + +### Public internet with Let's Encrypt + +GPU machine has a public IP and domain: + +```bash +./scripts/setup-gpu-host.sh --domain gpu.example.com --api-key my-secret-key +``` + +Requirements: +- DNS A record: `gpu.example.com` → GPU machine's public IP +- Ports 80 and 443 open +- Caddy auto-provisions Let's Encrypt certificate + +### Internal network with custom CA + +GPU machine on a private network: + +```bash +# Generate certs on the GPU machine +./scripts/generate-certs.sh gpu.internal "IP:192.168.1.200" + +# Deploy +./scripts/setup-gpu-host.sh --domain gpu.internal --custom-ca certs/ --api-key my-secret-key +``` + +On each machine that connects (including the Reflector server), add DNS: +```bash +echo "192.168.1.200 gpu.internal" | sudo tee -a /etc/hosts +``` + +### IP-only (no domain) + +No domain needed — just use the machine's IP: + +```bash +./scripts/setup-gpu-host.sh --api-key my-secret-key +``` + +Caddy is not used; the GPU service runs directly on port 8000 (HTTP). For HTTPS without a domain, the Reflector machine connects via `http://:8000`. + +### CPU-only (no NVIDIA GPU) + +Works on any machine — transcription will be slower: + +```bash +./scripts/setup-gpu-host.sh --cpu --domain gpu.example.com --api-key my-secret-key +``` + +## DNS Resolution + +The Reflector server must be able to reach the GPU host by name or IP. + +| Setup | DNS Method | TRANSCRIPT_URL example | +|-------|------------|----------------------| +| Public domain | DNS A record | `https://gpu.example.com` | +| Internal domain | `/etc/hosts` on both machines | `https://gpu.internal` | +| IP only | No DNS needed | `http://192.168.1.200:8000` | + +For internal domains, add the GPU machine's IP to `/etc/hosts` on the Reflector machine: +```bash +echo "192.168.1.200 gpu.internal" | sudo tee -a /etc/hosts +``` + +If the Reflector server runs in Docker, the containers resolve DNS from the host (Docker's default DNS behavior). So adding to the host's `/etc/hosts` is sufficient. + +## Multi-CA Setup + +When your Reflector instance has its own CA (for `reflector.local`) and the GPU host has a different CA: + +**On the GPU machine:** +```bash +./scripts/generate-certs.sh gpu.local +./scripts/setup-gpu-host.sh --domain gpu.local --custom-ca certs/ --api-key my-key +``` + +**On the Reflector machine:** +```bash +# Your local CA for reflector.local + the GPU host's CA +./scripts/setup-selfhosted.sh --hosted --garage --caddy \ + --domain reflector.local \ + --custom-ca certs/ \ + --extra-ca /path/to/gpu-machine-ca.crt +``` + +The `--extra-ca` flag appends the GPU host's CA to the trust bundle. Backend containers trust both CAs — your local domain works AND outbound calls to the GPU host succeed. + +You can repeat `--extra-ca` for multiple remote services: +```bash +--extra-ca /path/to/gpu-ca.crt --extra-ca /path/to/llm-ca.crt +``` + +## API Key Authentication + +The GPU service uses Bearer token authentication via `REFLECTOR_GPU_APIKEY`: + +```bash +# Test from the Reflector machine +curl -s https://gpu.example.com/docs # No auth needed for docs +curl -s -X POST https://gpu.example.com/v1/audio/transcriptions \ + -H "Authorization: Bearer " \ #gitleaks:allow + -F "file=@audio.wav" +``` + +If `REFLECTOR_GPU_APIKEY` is not set, the service accepts all requests (open access). Always use `--api-key` for internet-facing deployments. + +The same key goes in Reflector's `server/.env` as `TRANSCRIPT_MODAL_API_KEY` and `DIARIZATION_MODAL_API_KEY`. + +## Files + +| File | Checked in? | Purpose | +|------|-------------|---------| +| `docker-compose.gpu-host.yml` | Yes | Static compose file with profiles (`gpu`, `cpu`, `caddy`) | +| `.env.gpu-host` | No (generated) | Environment variables (HF_TOKEN, API key, ports) | +| `Caddyfile.gpu-host` | No (generated) | Caddy config (only when using HTTPS) | +| `docker-compose.gpu-ca.yml` | No (generated) | CA cert mounts override (only with --custom-ca) | +| `certs/` | No (generated) | Staged certificates (when using --custom-ca) | + +The compose file is checked into the repo — you can read it to understand exactly what runs. The script only generates env vars, Caddyfile, and CA overrides. Profiles control which service starts: + +```bash +# What the script does under the hood: +docker compose -f docker-compose.gpu-host.yml --profile gpu --profile caddy \ + --env-file .env.gpu-host up -d + +# CPU mode: +docker compose -f docker-compose.gpu-host.yml --profile cpu --profile caddy \ + --env-file .env.gpu-host up -d +``` + +Both `gpu` and `cpu` services get the network alias `transcription`, so Caddy's config works with either. + +## Management + +```bash +# View logs +docker compose -f docker-compose.gpu-host.yml --profile gpu logs -f gpu + +# Restart +docker compose -f docker-compose.gpu-host.yml --profile gpu restart gpu + +# Stop +docker compose -f docker-compose.gpu-host.yml --profile gpu --profile caddy down + +# Re-run setup +./scripts/setup-gpu-host.sh [same flags] + +# Rebuild after code changes +docker compose -f docker-compose.gpu-host.yml --profile gpu build gpu +docker compose -f docker-compose.gpu-host.yml --profile gpu up -d gpu +``` + +If you deployed with `--custom-ca`, include the CA override in manual commands: +```bash +docker compose -f docker-compose.gpu-host.yml -f docker-compose.gpu-ca.yml \ + --profile gpu logs -f gpu +``` + +## Troubleshooting + +### GPU service won't start + +Check logs: +```bash +docker compose -f docker-compose.gpu-host.yml logs gpu +``` + +Common causes: +- NVIDIA driver not installed or `nvidia-container-toolkit` missing +- `HF_TOKEN` not set (diarization model download fails) +- Port already in use + +### Reflector can't connect to GPU host + +From the Reflector machine: +```bash +# Test HTTPS connectivity +curl -v https://gpu.example.com/docs + +# If using custom CA, test with explicit CA +curl --cacert /path/to/gpu-ca.crt https://gpu.internal/docs +``` + +From inside the Reflector container: +```bash +docker compose exec server python -c " +import httpx +r = httpx.get('https://gpu.internal/docs') +print(r.status_code) +" +``` + +### SSL: CERTIFICATE_VERIFY_FAILED + +The Reflector backend doesn't trust the GPU host's CA. Fix: +```bash +# Re-run Reflector setup with the GPU host's CA +./scripts/setup-selfhosted.sh --hosted --extra-ca /path/to/gpu-ca.crt +``` + +### Diarization returns errors + +- Accept pyannote model licenses on HuggingFace: + - https://huggingface.co/pyannote/speaker-diarization-3.1 + - https://huggingface.co/pyannote/segmentation-3.0 +- Verify `HF_TOKEN` is set in `.env.gpu-host` diff --git a/gpu/self_hosted/Dockerfile b/gpu/self_hosted/Dockerfile index 8fd56b66..f39a33d4 100644 --- a/gpu/self_hosted/Dockerfile +++ b/gpu/self_hosted/Dockerfile @@ -42,6 +42,7 @@ COPY pyproject.toml uv.lock /app/ COPY ./app /app/app COPY ./main.py /app/ COPY ./runserver.sh /app/ +COPY ./docker-entrypoint.sh /app/ # prevent uv failing with too many open files on big cpus ENV UV_CONCURRENT_INSTALLS=16 @@ -52,6 +53,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \ EXPOSE 8000 -CMD ["sh", "/app/runserver.sh"] +RUN chmod +x /app/docker-entrypoint.sh + +CMD ["sh", "/app/docker-entrypoint.sh"] diff --git a/gpu/self_hosted/Dockerfile.cpu b/gpu/self_hosted/Dockerfile.cpu index 7e02ac5c..798f7c3f 100644 --- a/gpu/self_hosted/Dockerfile.cpu +++ b/gpu/self_hosted/Dockerfile.cpu @@ -26,6 +26,7 @@ COPY pyproject.toml uv.lock /app/ COPY ./app /app/app COPY ./main.py /app/ COPY ./runserver.sh /app/ +COPY ./docker-entrypoint.sh /app/ # prevent uv failing with too many open files on big cpus ENV UV_CONCURRENT_INSTALLS=16 @@ -36,4 +37,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \ EXPOSE 8000 -CMD ["sh", "/app/runserver.sh"] +RUN chmod +x /app/docker-entrypoint.sh + +CMD ["sh", "/app/docker-entrypoint.sh"] diff --git a/gpu/self_hosted/docker-entrypoint.sh b/gpu/self_hosted/docker-entrypoint.sh new file mode 100644 index 00000000..aab14ad6 --- /dev/null +++ b/gpu/self_hosted/docker-entrypoint.sh @@ -0,0 +1,22 @@ +#!/bin/sh +set -e + +# Custom CA certificate injection +# If a CA cert is mounted at this path (via docker-compose.ca.yml), +# add it to the system trust store and configure all Python SSL libraries. +CUSTOM_CA_PATH="/usr/local/share/ca-certificates/custom-ca.crt" + +if [ -s "$CUSTOM_CA_PATH" ]; then + echo "[entrypoint] Custom CA certificate detected, updating trust store..." + update-ca-certificates 2>/dev/null + + # update-ca-certificates creates a combined bundle (system + custom CAs) + COMBINED_BUNDLE="/etc/ssl/certs/ca-certificates.crt" + export SSL_CERT_FILE="$COMBINED_BUNDLE" + export REQUESTS_CA_BUNDLE="$COMBINED_BUNDLE" + export CURL_CA_BUNDLE="$COMBINED_BUNDLE" + export GRPC_DEFAULT_SSL_ROOTS_FILE_PATH="$COMBINED_BUNDLE" + echo "[entrypoint] CA trust store updated (SSL_CERT_FILE=$COMBINED_BUNDLE)" +fi + +exec sh /app/runserver.sh diff --git a/scripts/generate-certs.sh b/scripts/generate-certs.sh new file mode 100755 index 00000000..16daf4d2 --- /dev/null +++ b/scripts/generate-certs.sh @@ -0,0 +1,130 @@ +#!/usr/bin/env bash +# +# Generate a local CA and server certificate for Reflector self-hosted deployments. +# +# Usage: +# ./scripts/generate-certs.sh DOMAIN [EXTRA_SANS...] +# +# Examples: +# ./scripts/generate-certs.sh reflector.local +# ./scripts/generate-certs.sh reflector.local "DNS:gpu.local,IP:192.168.1.100" +# +# Generates in certs/: +# ca.key — CA private key (keep secret) +# ca.crt — CA certificate (distribute to clients) +# server-key.pem — Server private key +# server.pem — Server certificate (signed by CA) +# +# Then use with setup-selfhosted.sh: +# ./scripts/setup-selfhosted.sh --gpu --caddy --domain DOMAIN --custom-ca certs/ +# +set -euo pipefail + +DOMAIN="${1:?Usage: $0 DOMAIN [EXTRA_SANS...]}" +EXTRA_SANS="${2:-}" + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +CERTS_DIR="$(cd "$SCRIPT_DIR/.." && pwd)/certs" + +# Colors +GREEN='\033[0;32m' +CYAN='\033[0;36m' +NC='\033[0m' +info() { echo -e "${CYAN}==>${NC} $*"; } +ok() { echo -e "${GREEN} ✓${NC} $*"; } + +# Check for openssl +if ! command -v openssl &>/dev/null; then + echo "Error: openssl is required but not found. Install it first." >&2 + exit 1 +fi + +mkdir -p "$CERTS_DIR" + +# Build SAN list +SAN_LIST="DNS:$DOMAIN,DNS:localhost,IP:127.0.0.1" +if [[ -n "$EXTRA_SANS" ]]; then + SAN_LIST="$SAN_LIST,$EXTRA_SANS" +fi + +info "Generating CA and server certificate for: $DOMAIN" +echo " SANs: $SAN_LIST" +echo "" + +# --- Step 1: Generate CA --- +if [[ -f "$CERTS_DIR/ca.key" ]] && [[ -f "$CERTS_DIR/ca.crt" ]]; then + ok "CA already exists at certs/ca.key + certs/ca.crt — reusing" +else + info "Generating CA key and certificate..." + openssl genrsa -out "$CERTS_DIR/ca.key" 4096 2>/dev/null + openssl req -x509 -new -nodes \ + -key "$CERTS_DIR/ca.key" \ + -sha256 -days 3650 \ + -out "$CERTS_DIR/ca.crt" \ + -subj "/CN=Reflector Local CA/O=Reflector Self-Hosted" + ok "CA certificate generated (valid for 10 years)" +fi + +# --- Step 2: Generate server key --- +info "Generating server key..." +openssl genrsa -out "$CERTS_DIR/server-key.pem" 2048 2>/dev/null +ok "Server key generated" + +# --- Step 3: Create CSR with SANs --- +info "Creating certificate signing request..." +openssl req -new \ + -key "$CERTS_DIR/server-key.pem" \ + -out "$CERTS_DIR/server.csr" \ + -subj "/CN=$DOMAIN" \ + -addext "subjectAltName=$SAN_LIST" +ok "CSR created" + +# --- Step 4: Sign with CA --- +info "Signing server certificate with CA..." +openssl x509 -req \ + -in "$CERTS_DIR/server.csr" \ + -CA "$CERTS_DIR/ca.crt" \ + -CAkey "$CERTS_DIR/ca.key" \ + -CAcreateserial \ + -out "$CERTS_DIR/server.pem" \ + -days 365 -sha256 \ + -copy_extensions copyall \ + 2>/dev/null +ok "Server certificate signed (valid for 1 year)" + +# --- Cleanup --- +rm -f "$CERTS_DIR/server.csr" "$CERTS_DIR/ca.srl" + +# --- Set permissions --- +chmod 644 "$CERTS_DIR/ca.crt" "$CERTS_DIR/server.pem" +chmod 600 "$CERTS_DIR/ca.key" "$CERTS_DIR/server-key.pem" + +echo "" +echo "==========================================" +echo -e " ${GREEN}Certificates generated in certs/${NC}" +echo "==========================================" +echo "" +echo " certs/ca.key CA private key (keep secret)" +echo " certs/ca.crt CA certificate (distribute to clients)" +echo " certs/server-key.pem Server private key" +echo " certs/server.pem Server certificate for $DOMAIN" +echo "" +echo " SANs: $SAN_LIST" +echo "" +echo "Use with setup-selfhosted.sh:" +echo " ./scripts/setup-selfhosted.sh --gpu --caddy --domain $DOMAIN --custom-ca certs/" +echo "" +echo "Trust the CA on your machine:" +case "$(uname -s)" in + Darwin) + echo " sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain certs/ca.crt" + ;; + Linux) + echo " sudo cp certs/ca.crt /usr/local/share/ca-certificates/reflector-ca.crt" + echo " sudo update-ca-certificates" + ;; + *) + echo " See docsv2/custom-ca-setup.md for your platform" + ;; +esac +echo "" diff --git a/scripts/setup-gpu-host.sh b/scripts/setup-gpu-host.sh new file mode 100755 index 00000000..0c41e915 --- /dev/null +++ b/scripts/setup-gpu-host.sh @@ -0,0 +1,496 @@ +#!/usr/bin/env bash +# +# Standalone GPU service setup for Reflector. +# Deploys ONLY the GPU transcription/diarization/translation service on a dedicated machine. +# The main Reflector instance connects to this machine over HTTPS. +# +# Usage: +# ./scripts/setup-gpu-host.sh [--domain DOMAIN] [--custom-ca PATH] [--extra-ca FILE] [--api-key KEY] [--cpu] [--build] +# +# Options: +# --domain DOMAIN Domain name for this GPU host (e.g., gpu.example.com) +# With --custom-ca: uses custom TLS cert. Without: uses Let's Encrypt. +# --custom-ca PATH Custom CA certificate (dir with ca.crt + server.pem + server-key.pem, or single PEM file) +# --extra-ca FILE Additional CA cert to trust (repeatable) +# --api-key KEY API key to protect the GPU service (recommended for internet-facing deployments) +# --cpu Use CPU-only Dockerfile (no NVIDIA GPU required) +# --build Build image from source (default: build, since no pre-built GPU image is published) +# --port PORT Host port to expose (default: 443 with Caddy, 8000 without) +# +# Examples: +# # GPU on LAN with custom CA +# ./scripts/generate-certs.sh gpu.local +# ./scripts/setup-gpu-host.sh --domain gpu.local --custom-ca certs/ --api-key my-secret-key +# +# # GPU on public internet with Let's Encrypt +# ./scripts/setup-gpu-host.sh --domain gpu.example.com --api-key my-secret-key +# +# # GPU on LAN, IP access only (self-signed cert) +# ./scripts/setup-gpu-host.sh --api-key my-secret-key +# +# # CPU-only mode (no NVIDIA GPU) +# ./scripts/setup-gpu-host.sh --cpu --api-key my-secret-key +# +# After setup, configure the main Reflector instance to use this GPU: +# In server/.env on the Reflector machine: +# TRANSCRIPT_BACKEND=modal +# TRANSCRIPT_URL=https://gpu.example.com +# TRANSCRIPT_MODAL_API_KEY=my-secret-key +# DIARIZATION_BACKEND=modal +# DIARIZATION_URL=https://gpu.example.com +# DIARIZATION_MODAL_API_KEY=my-secret-key +# TRANSLATION_BACKEND=modal +# TRANSLATE_URL=https://gpu.example.com +# TRANSLATION_MODAL_API_KEY=my-secret-key +# +# DNS Resolution: +# - Public domain: Create a DNS A record pointing to this machine's public IP. +# - Internal domain (e.g., gpu.local): Add to /etc/hosts on both machines: +# gpu.local +# - IP-only: Use the machine's IP directly in TRANSCRIPT_URL/DIARIZATION_URL. +# The Reflector backend must trust the CA or accept self-signed certs. +# +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" + +GPU_DIR="$ROOT_DIR/gpu/self_hosted" +OS="$(uname -s)" + +# --- Colors --- +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +NC='\033[0m' + +info() { echo -e "${CYAN}==>${NC} $*"; } +ok() { echo -e "${GREEN} ✓${NC} $*"; } +warn() { echo -e "${YELLOW} !${NC} $*"; } +err() { echo -e "${RED} ✗${NC} $*" >&2; } + +# --- Parse arguments --- +CUSTOM_DOMAIN="" +CUSTOM_CA="" +EXTRA_CA_FILES=() +API_KEY="" +USE_CPU=false +HOST_PORT="" + +SKIP_NEXT=false +ARGS=("$@") +for i in "${!ARGS[@]}"; do + if [[ "$SKIP_NEXT" == "true" ]]; then + SKIP_NEXT=false + continue + fi + arg="${ARGS[$i]}" + case "$arg" in + --domain) + next_i=$((i + 1)) + if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then + err "--domain requires a domain name" + exit 1 + fi + CUSTOM_DOMAIN="${ARGS[$next_i]}" + SKIP_NEXT=true ;; + --custom-ca) + next_i=$((i + 1)) + if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then + err "--custom-ca requires a path to a directory or PEM certificate file" + exit 1 + fi + CUSTOM_CA="${ARGS[$next_i]}" + SKIP_NEXT=true ;; + --extra-ca) + next_i=$((i + 1)) + if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then + err "--extra-ca requires a path to a PEM certificate file" + exit 1 + fi + if [[ ! -f "${ARGS[$next_i]}" ]]; then + err "--extra-ca file not found: ${ARGS[$next_i]}" + exit 1 + fi + EXTRA_CA_FILES+=("${ARGS[$next_i]}") + SKIP_NEXT=true ;; + --api-key) + next_i=$((i + 1)) + if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then + err "--api-key requires a key value" + exit 1 + fi + API_KEY="${ARGS[$next_i]}" + SKIP_NEXT=true ;; + --cpu) + USE_CPU=true ;; + --port) + next_i=$((i + 1)) + if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then + err "--port requires a port number" + exit 1 + fi + HOST_PORT="${ARGS[$next_i]}" + SKIP_NEXT=true ;; + --build) + ;; # Always build from source for GPU, flag accepted for compatibility + *) + err "Unknown argument: $arg" + err "Usage: $0 [--domain DOMAIN] [--custom-ca PATH] [--extra-ca FILE] [--api-key KEY] [--cpu] [--port PORT]" + exit 1 + ;; + esac +done + +# --- Resolve CA paths --- +CA_CERT_PATH="" +TLS_CERT_PATH="" +TLS_KEY_PATH="" +USE_CUSTOM_CA=false +USE_CADDY=false + +if [[ -n "$CUSTOM_CA" ]] || [[ -n "${EXTRA_CA_FILES[0]+x}" ]]; then + USE_CUSTOM_CA=true +fi + +if [[ -n "$CUSTOM_CA" ]]; then + CUSTOM_CA="${CUSTOM_CA%/}" + if [[ -d "$CUSTOM_CA" ]]; then + [[ -f "$CUSTOM_CA/ca.crt" ]] || { err "$CUSTOM_CA/ca.crt not found"; exit 1; } + CA_CERT_PATH="$CUSTOM_CA/ca.crt" + if [[ -f "$CUSTOM_CA/server.pem" ]] && [[ -f "$CUSTOM_CA/server-key.pem" ]]; then + TLS_CERT_PATH="$CUSTOM_CA/server.pem" + TLS_KEY_PATH="$CUSTOM_CA/server-key.pem" + elif [[ -f "$CUSTOM_CA/server.pem" ]] || [[ -f "$CUSTOM_CA/server-key.pem" ]]; then + warn "Found only one of server.pem/server-key.pem — both needed for TLS. Skipping." + fi + elif [[ -f "$CUSTOM_CA" ]]; then + CA_CERT_PATH="$CUSTOM_CA" + else + err "--custom-ca path not found: $CUSTOM_CA" + exit 1 + fi +elif [[ -n "${EXTRA_CA_FILES[0]+x}" ]]; then + CA_CERT_PATH="${EXTRA_CA_FILES[0]}" + unset 'EXTRA_CA_FILES[0]' + EXTRA_CA_FILES=("${EXTRA_CA_FILES[@]+"${EXTRA_CA_FILES[@]}"}") +fi + +# Caddy if we have a domain or TLS certs +if [[ -n "$CUSTOM_DOMAIN" ]] || [[ -n "$TLS_CERT_PATH" ]]; then + USE_CADDY=true +fi + +# Default port +if [[ -z "$HOST_PORT" ]]; then + if [[ "$USE_CADDY" == "true" ]]; then + HOST_PORT="443" + else + HOST_PORT="8000" + fi +fi + +# Detect primary IP +PRIMARY_IP="" +if [[ "$OS" == "Linux" ]]; then + PRIMARY_IP=$(hostname -I 2>/dev/null | awk '{print $1}' || true) + if [[ "$PRIMARY_IP" == "127."* ]] || [[ -z "$PRIMARY_IP" ]]; then + PRIMARY_IP=$(ip -4 route get 1 2>/dev/null | sed -n 's/.*src \([0-9.]*\).*/\1/p' || true) + fi +fi + +# --- Display config --- +echo "" +echo "==========================================" +echo " Reflector — Standalone GPU Host Setup" +echo "==========================================" +echo "" +echo " Mode: $(if [[ "$USE_CPU" == "true" ]]; then echo "CPU-only"; else echo "NVIDIA GPU"; fi)" +echo " Caddy: $USE_CADDY" +[[ -n "$CUSTOM_DOMAIN" ]] && echo " Domain: $CUSTOM_DOMAIN" +[[ "$USE_CUSTOM_CA" == "true" ]] && echo " CA: Custom" +[[ -n "$TLS_CERT_PATH" ]] && echo " TLS: Custom cert" +[[ -n "$API_KEY" ]] && echo " Auth: API key protected" +[[ -z "$API_KEY" ]] && echo " Auth: NONE (open access — use --api-key for production!)" +echo " Port: $HOST_PORT" +echo "" + +# --- Prerequisites --- +info "Checking prerequisites" + +if ! command -v docker &>/dev/null; then + err "Docker not found. Install Docker first." + exit 1 +fi +ok "Docker available" + +if ! docker compose version &>/dev/null; then + err "Docker Compose V2 not found." + exit 1 +fi +ok "Docker Compose V2 available" + +if [[ "$USE_CPU" != "true" ]]; then + if ! docker info 2>/dev/null | grep -qi nvidia; then + warn "NVIDIA runtime not detected in Docker. GPU mode may fail." + warn "Install nvidia-container-toolkit if you have an NVIDIA GPU." + else + ok "NVIDIA Docker runtime available" + fi +fi + +# --- Stage certificates --- +CERTS_DIR="$ROOT_DIR/certs" +if [[ "$USE_CUSTOM_CA" == "true" ]]; then + info "Staging certificates" + mkdir -p "$CERTS_DIR" + + if [[ -n "$CA_CERT_PATH" ]]; then + local_ca_dest="$CERTS_DIR/ca.crt" + src_id=$(ls -i "$CA_CERT_PATH" 2>/dev/null | awk '{print $1}') + dst_id=$(ls -i "$local_ca_dest" 2>/dev/null | awk '{print $1}') + if [[ "$src_id" != "$dst_id" ]] || [[ -z "$dst_id" ]]; then + cp "$CA_CERT_PATH" "$local_ca_dest" + fi + chmod 644 "$local_ca_dest" + ok "CA certificate staged" + + # Append extra CAs + for extra_ca in "${EXTRA_CA_FILES[@]+"${EXTRA_CA_FILES[@]}"}"; do + echo "" >> "$local_ca_dest" + cat "$extra_ca" >> "$local_ca_dest" + ok "Appended extra CA: $extra_ca" + done + fi + + if [[ -n "$TLS_CERT_PATH" ]]; then + cert_dest="$CERTS_DIR/server.pem" + key_dest="$CERTS_DIR/server-key.pem" + src_id=$(ls -i "$TLS_CERT_PATH" 2>/dev/null | awk '{print $1}') + dst_id=$(ls -i "$cert_dest" 2>/dev/null | awk '{print $1}') + if [[ "$src_id" != "$dst_id" ]] || [[ -z "$dst_id" ]]; then + cp "$TLS_CERT_PATH" "$cert_dest" + cp "$TLS_KEY_PATH" "$key_dest" + fi + chmod 644 "$cert_dest" + chmod 600 "$key_dest" + ok "TLS cert/key staged" + fi +fi + +# --- Build profiles and compose command --- +COMPOSE_FILE="$ROOT_DIR/docker-compose.gpu-host.yml" +COMPOSE_PROFILES=() +GPU_SERVICE="gpu" + +if [[ "$USE_CPU" == "true" ]]; then + COMPOSE_PROFILES+=("cpu") + GPU_SERVICE="cpu" +else + COMPOSE_PROFILES+=("gpu") +fi +if [[ "$USE_CADDY" == "true" ]]; then + COMPOSE_PROFILES+=("caddy") +fi + +# Compose command helper +compose_cmd() { + local profiles="" files="-f $COMPOSE_FILE" + if [[ "$USE_CUSTOM_CA" == "true" ]] && [[ -f "$ROOT_DIR/docker-compose.gpu-ca.yml" ]]; then + files="$files -f $ROOT_DIR/docker-compose.gpu-ca.yml" + fi + for p in "${COMPOSE_PROFILES[@]}"; do + profiles="$profiles --profile $p" + done + docker compose $files $profiles "$@" +} + +# Generate CA compose override if needed (mounts certs into containers) +if [[ "$USE_CUSTOM_CA" == "true" ]]; then + info "Generating docker-compose.gpu-ca.yml override" + ca_override="$ROOT_DIR/docker-compose.gpu-ca.yml" + cat > "$ca_override" << 'CAEOF' +# Generated by setup-gpu-host.sh — custom CA trust. +# Do not edit manually; re-run setup-gpu-host.sh with --custom-ca to regenerate. +services: + gpu: + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro + cpu: + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro +CAEOF + + if [[ -n "$TLS_CERT_PATH" ]]; then + cat >> "$ca_override" << 'CADDYCAEOF' + caddy: + volumes: + - ./certs:/etc/caddy/certs:ro +CADDYCAEOF + fi + ok "Generated docker-compose.gpu-ca.yml" +else + rm -f "$ROOT_DIR/docker-compose.gpu-ca.yml" +fi + +# --- Generate Caddyfile --- +if [[ "$USE_CADDY" == "true" ]]; then + info "Generating Caddyfile.gpu-host" + + CADDYFILE="$ROOT_DIR/Caddyfile.gpu-host" + + if [[ -n "$TLS_CERT_PATH" ]] && [[ -n "$CUSTOM_DOMAIN" ]]; then + cat > "$CADDYFILE" << CADDYEOF +# Generated by setup-gpu-host.sh — Custom TLS cert for $CUSTOM_DOMAIN +$CUSTOM_DOMAIN { + tls /etc/caddy/certs/server.pem /etc/caddy/certs/server-key.pem + reverse_proxy transcription:8000 +} +CADDYEOF + ok "Caddyfile: custom TLS for $CUSTOM_DOMAIN" + elif [[ -n "$CUSTOM_DOMAIN" ]]; then + cat > "$CADDYFILE" << CADDYEOF +# Generated by setup-gpu-host.sh — Let's Encrypt for $CUSTOM_DOMAIN +$CUSTOM_DOMAIN { + reverse_proxy transcription:8000 +} +CADDYEOF + ok "Caddyfile: Let's Encrypt for $CUSTOM_DOMAIN" + else + cat > "$CADDYFILE" << 'CADDYEOF' +# Generated by setup-gpu-host.sh — self-signed cert for IP access +:443 { + tls internal + reverse_proxy transcription:8000 +} +CADDYEOF + ok "Caddyfile: self-signed cert for IP access" + fi +fi + +# --- Generate .env --- +info "Generating GPU service .env" + +GPU_ENV="$ROOT_DIR/.env.gpu-host" +cat > "$GPU_ENV" << EOF +# Generated by setup-gpu-host.sh +# HuggingFace token for pyannote diarization models +HF_TOKEN=${HF_TOKEN:-} +# API key to protect the GPU service (set via --api-key) +REFLECTOR_GPU_APIKEY=${API_KEY:-} +# Port configuration +GPU_HOST_PORT=${HOST_PORT} +CADDY_HTTPS_PORT=${HOST_PORT} +EOF + +if [[ -z "${HF_TOKEN:-}" ]]; then + warn "HF_TOKEN not set. Diarization requires a HuggingFace token." + warn "Set it: export HF_TOKEN=your-token-here and re-run, or edit .env.gpu-host" +fi + +ok "Generated .env.gpu-host" + +# --- Build and start --- +info "Building $GPU_SERVICE image (first build downloads ML models — may take a while)..." +compose_cmd --env-file "$GPU_ENV" build "$GPU_SERVICE" +ok "$GPU_SERVICE image built" + +info "Starting services..." +compose_cmd --env-file "$GPU_ENV" up -d +ok "Services started" + +# --- Wait for health --- +info "Waiting for GPU service to be healthy (model loading takes 1-2 minutes)..." +local_url="http://localhost:8000" +for i in $(seq 1 40); do + if curl -sf "$local_url/docs" >/dev/null 2>&1; then + ok "GPU service is healthy!" + break + fi + if [[ $i -eq 40 ]]; then + err "GPU service did not become healthy after 5 minutes." + err "Check logs: docker compose -f docker-compose.gpu-host.yml logs gpu" + exit 1 + fi + sleep 8 +done + +# --- Summary --- +echo "" +echo "==========================================" +echo -e " ${GREEN}GPU service is running!${NC}" +echo "==========================================" +echo "" + +if [[ "$USE_CADDY" == "true" ]]; then + if [[ -n "$CUSTOM_DOMAIN" ]]; then + echo " URL: https://$CUSTOM_DOMAIN" + elif [[ -n "$PRIMARY_IP" ]]; then + echo " URL: https://$PRIMARY_IP" + else + echo " URL: https://localhost" + fi +else + if [[ -n "$PRIMARY_IP" ]]; then + echo " URL: http://$PRIMARY_IP:$HOST_PORT" + else + echo " URL: http://localhost:$HOST_PORT" + fi +fi + +echo " Health: curl \$(URL)/docs" +[[ -n "$API_KEY" ]] && echo " API key: $API_KEY" +echo "" +echo " Configure the main Reflector instance (in server/.env):" +echo "" + +local_gpu_url="" +if [[ "$USE_CADDY" == "true" ]]; then + if [[ -n "$CUSTOM_DOMAIN" ]]; then + local_gpu_url="https://$CUSTOM_DOMAIN" + elif [[ -n "$PRIMARY_IP" ]]; then + local_gpu_url="https://$PRIMARY_IP" + else + local_gpu_url="https://localhost" + fi +else + if [[ -n "$PRIMARY_IP" ]]; then + local_gpu_url="http://$PRIMARY_IP:$HOST_PORT" + else + local_gpu_url="http://localhost:$HOST_PORT" + fi +fi + +echo " TRANSCRIPT_BACKEND=modal" +echo " TRANSCRIPT_URL=$local_gpu_url" +[[ -n "$API_KEY" ]] && echo " TRANSCRIPT_MODAL_API_KEY=$API_KEY" +echo " DIARIZATION_BACKEND=modal" +echo " DIARIZATION_URL=$local_gpu_url" +[[ -n "$API_KEY" ]] && echo " DIARIZATION_MODAL_API_KEY=$API_KEY" +echo " TRANSLATION_BACKEND=modal" +echo " TRANSLATE_URL=$local_gpu_url" +[[ -n "$API_KEY" ]] && echo " TRANSLATION_MODAL_API_KEY=$API_KEY" +echo "" + +if [[ "$USE_CUSTOM_CA" == "true" ]]; then + echo " The Reflector instance must also trust this CA." + echo " On the Reflector machine, run setup-selfhosted.sh with:" + echo " --extra-ca /path/to/this-machines-ca.crt" + echo "" +fi + +echo " DNS Resolution:" +if [[ -n "$CUSTOM_DOMAIN" ]]; then + echo " Ensure '$CUSTOM_DOMAIN' resolves to this machine's IP." + echo " Public: Create a DNS A record." + echo " Internal: Add to /etc/hosts on the Reflector machine:" + echo " ${PRIMARY_IP:-} $CUSTOM_DOMAIN" +else + echo " Use this machine's IP directly in TRANSCRIPT_URL/DIARIZATION_URL." +fi +echo "" +echo " To stop: docker compose -f docker-compose.gpu-host.yml down" +echo " To re-run: ./scripts/setup-gpu-host.sh $*" +echo " Logs: docker compose -f docker-compose.gpu-host.yml logs -f gpu" +echo "" diff --git a/scripts/setup-selfhosted.sh b/scripts/setup-selfhosted.sh index 63a4f454..ac519f93 100755 --- a/scripts/setup-selfhosted.sh +++ b/scripts/setup-selfhosted.sh @@ -4,7 +4,7 @@ # Single script to configure and launch everything on one server. # # Usage: -# ./scripts/setup-selfhosted.sh <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--password PASSWORD] [--build] +# ./scripts/setup-selfhosted.sh <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--custom-ca PATH] [--password PASSWORD] [--build] # # ML processing modes (pick ONE — required): # --gpu NVIDIA GPU container for transcription/diarization/translation @@ -23,6 +23,13 @@ # --domain DOMAIN Use a real domain for Caddy (enables Let's Encrypt auto-HTTPS) # Requires: DNS pointing to this server + ports 80/443 open # Without --domain: Caddy uses self-signed cert for IP access +# --custom-ca PATH Custom CA certificate for private HTTPS services +# PATH can be a directory (containing ca.crt, optionally server.pem + server-key.pem) +# or a single PEM file (CA trust only, no Caddy TLS) +# With server.pem+server-key.pem: Caddy serves HTTPS using those certs (requires --domain) +# Without: only injects CA trust into backend containers for outbound calls +# --extra-ca FILE Additional CA cert to trust (can be repeated for multiple CAs) +# Appended to the CA bundle so backends trust multiple authorities # --password PASS Enable password auth with admin@localhost user # --build Build backend and frontend images from source instead of pulling # @@ -35,6 +42,8 @@ # ./scripts/setup-selfhosted.sh --gpu --garage --caddy --password mysecretpass # ./scripts/setup-selfhosted.sh --gpu --garage --caddy # ./scripts/setup-selfhosted.sh --cpu +# ./scripts/setup-selfhosted.sh --gpu --caddy --domain reflector.local --custom-ca certs/ +# ./scripts/setup-selfhosted.sh --hosted --custom-ca /path/to/corporate-ca.crt # # The script auto-detects Daily.co (DAILY_API_KEY) and Whereby (WHEREBY_API_KEY) # from server/.env. If Daily.co is configured, Hatchet workflow services are @@ -154,16 +163,19 @@ env_set() { } compose_cmd() { - local profiles="" + local profiles="" files="-f $COMPOSE_FILE" + [[ "$USE_CUSTOM_CA" == "true" ]] && files="$files -f $ROOT_DIR/docker-compose.ca.yml" for p in "${COMPOSE_PROFILES[@]}"; do profiles="$profiles --profile $p" done - docker compose -f "$COMPOSE_FILE" $profiles "$@" + docker compose $files $profiles "$@" } # Compose command with only garage profile (for garage-only operations before full stack start) compose_garage_cmd() { - docker compose -f "$COMPOSE_FILE" --profile garage "$@" + local files="-f $COMPOSE_FILE" + [[ "$USE_CUSTOM_CA" == "true" ]] && files="$files -f $ROOT_DIR/docker-compose.ca.yml" + docker compose $files --profile garage "$@" } # --- Parse arguments --- @@ -174,6 +186,9 @@ USE_CADDY=false CUSTOM_DOMAIN="" # optional domain for Let's Encrypt HTTPS BUILD_IMAGES=false # build backend/frontend from source ADMIN_PASSWORD="" # optional admin password for password auth +CUSTOM_CA="" # --custom-ca: path to dir or CA cert file +USE_CUSTOM_CA=false # derived flag: true when --custom-ca is provided +EXTRA_CA_FILES=() # --extra-ca: additional CA certs to trust (can be repeated) SKIP_NEXT=false ARGS=("$@") @@ -227,18 +242,95 @@ for i in "${!ARGS[@]}"; do CUSTOM_DOMAIN="${ARGS[$next_i]}" USE_CADDY=true # --domain implies --caddy SKIP_NEXT=true ;; + --custom-ca) + next_i=$((i + 1)) + if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then + err "--custom-ca requires a path to a directory or PEM certificate file" + exit 1 + fi + CUSTOM_CA="${ARGS[$next_i]}" + USE_CUSTOM_CA=true + SKIP_NEXT=true ;; + --extra-ca) + next_i=$((i + 1)) + if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then + err "--extra-ca requires a path to a PEM certificate file" + exit 1 + fi + extra_ca_file="${ARGS[$next_i]}" + if [[ ! -f "$extra_ca_file" ]]; then + err "--extra-ca file not found: $extra_ca_file" + exit 1 + fi + EXTRA_CA_FILES+=("$extra_ca_file") + USE_CUSTOM_CA=true + SKIP_NEXT=true ;; *) err "Unknown argument: $arg" - err "Usage: $0 <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--password PASS] [--build]" + err "Usage: $0 <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--custom-ca PATH] [--password PASS] [--build]" exit 1 ;; esac done +# --- Resolve --custom-ca flag --- +CA_CERT_PATH="" # resolved path to CA certificate +TLS_CERT_PATH="" # resolved path to server cert (optional, for Caddy TLS) +TLS_KEY_PATH="" # resolved path to server key (optional, for Caddy TLS) + +if [[ "$USE_CUSTOM_CA" == "true" ]]; then + # Strip trailing slashes to avoid double-slash paths + CUSTOM_CA="${CUSTOM_CA%/}" + + if [[ -z "$CUSTOM_CA" ]] && [[ -n "${EXTRA_CA_FILES[0]+x}" ]]; then + # --extra-ca only (no --custom-ca): use first extra CA as the base + CA_CERT_PATH="${EXTRA_CA_FILES[0]}" + unset 'EXTRA_CA_FILES[0]' + EXTRA_CA_FILES=("${EXTRA_CA_FILES[@]+"${EXTRA_CA_FILES[@]}"}") + elif [[ -d "$CUSTOM_CA" ]]; then + # Directory mode: look for convention files + if [[ ! -f "$CUSTOM_CA/ca.crt" ]]; then + err "CA certificate not found: $CUSTOM_CA/ca.crt" + err "Directory must contain ca.crt (and optionally server.pem + server-key.pem)" + exit 1 + fi + CA_CERT_PATH="$CUSTOM_CA/ca.crt" + # Server cert/key are optional — if both present, use for Caddy TLS + if [[ -f "$CUSTOM_CA/server.pem" ]] && [[ -f "$CUSTOM_CA/server-key.pem" ]]; then + TLS_CERT_PATH="$CUSTOM_CA/server.pem" + TLS_KEY_PATH="$CUSTOM_CA/server-key.pem" + elif [[ -f "$CUSTOM_CA/server.pem" ]] || [[ -f "$CUSTOM_CA/server-key.pem" ]]; then + warn "Found only one of server.pem/server-key.pem in $CUSTOM_CA — both are needed for Caddy TLS. Skipping." + fi + elif [[ -f "$CUSTOM_CA" ]]; then + # Single file mode: CA trust only (no Caddy TLS certs) + CA_CERT_PATH="$CUSTOM_CA" + else + err "--custom-ca path not found: $CUSTOM_CA" + exit 1 + fi + + # Validate PEM format + if ! head -1 "$CA_CERT_PATH" | grep -q "BEGIN"; then + err "CA certificate does not appear to be PEM format: $CA_CERT_PATH" + exit 1 + fi + + # If server cert/key found, require --domain and imply --caddy + if [[ -n "$TLS_CERT_PATH" ]]; then + if [[ -z "$CUSTOM_DOMAIN" ]]; then + err "Server cert/key found in $CUSTOM_CA but --domain not set." + err "Provide --domain to specify the domain name matching the certificate." + exit 1 + fi + USE_CADDY=true # custom TLS certs imply --caddy + fi +fi + if [[ -z "$MODEL_MODE" ]]; then err "No model mode specified. You must choose --gpu, --cpu, or --hosted." err "" - err "Usage: $0 <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--password PASS] [--build]" + err "Usage: $0 <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--custom-ca PATH] [--password PASS] [--build]" err "" err "ML processing modes (required):" err " --gpu NVIDIA GPU container for transcription/diarization/translation" @@ -255,6 +347,8 @@ if [[ -z "$MODEL_MODE" ]]; then err " --garage Local S3-compatible storage (Garage)" err " --caddy Caddy reverse proxy with self-signed cert" err " --domain DOMAIN Use a real domain with Let's Encrypt HTTPS (implies --caddy)" + err " --custom-ca PATH Custom CA cert (dir with ca.crt[+server.pem+server-key.pem] or single PEM file)" + err " --extra-ca FILE Additional CA cert to trust (repeatable for multiple CAs)" err " --password PASS Enable password auth (admin@localhost) instead of public mode" err " --build Build backend/frontend images from source instead of pulling" exit 1 @@ -366,6 +460,103 @@ print(f'pbkdf2:sha256:100000\$\$' + salt + '\$\$' + dk.hex()) ok "Secrets ready" } +# ========================================================= +# Step 1b: Custom CA certificate setup +# ========================================================= +step_custom_ca() { + if [[ "$USE_CUSTOM_CA" != "true" ]]; then + # Clean up stale override from previous runs + rm -f "$ROOT_DIR/docker-compose.ca.yml" + return + fi + + info "Configuring custom CA certificate" + local certs_dir="$ROOT_DIR/certs" + mkdir -p "$certs_dir" + + # Stage CA certificate (skip copy if source and dest are the same file) + local ca_dest="$certs_dir/ca.crt" + local src_id dst_id + src_id=$(ls -i "$CA_CERT_PATH" 2>/dev/null | awk '{print $1}') + dst_id=$(ls -i "$ca_dest" 2>/dev/null | awk '{print $1}') + if [[ "$src_id" != "$dst_id" ]] || [[ -z "$dst_id" ]]; then + cp "$CA_CERT_PATH" "$ca_dest" + fi + chmod 644 "$ca_dest" + ok "CA certificate staged at certs/ca.crt" + + # Append extra CA certs (--extra-ca flags) + for extra_ca in "${EXTRA_CA_FILES[@]+"${EXTRA_CA_FILES[@]}"}"; do + if ! head -1 "$extra_ca" | grep -q "BEGIN"; then + warn "Skipping $extra_ca — does not appear to be PEM format" + continue + fi + echo "" >> "$ca_dest" + cat "$extra_ca" >> "$ca_dest" + ok "Appended extra CA: $extra_ca" + done + + # Stage TLS cert/key if present (for Caddy) + if [[ -n "$TLS_CERT_PATH" ]]; then + local cert_dest="$certs_dir/server.pem" + local key_dest="$certs_dir/server-key.pem" + src_id=$(ls -i "$TLS_CERT_PATH" 2>/dev/null | awk '{print $1}') + dst_id=$(ls -i "$cert_dest" 2>/dev/null | awk '{print $1}') + if [[ "$src_id" != "$dst_id" ]] || [[ -z "$dst_id" ]]; then + cp "$TLS_CERT_PATH" "$cert_dest" + cp "$TLS_KEY_PATH" "$key_dest" + fi + chmod 644 "$cert_dest" + chmod 600 "$key_dest" + ok "TLS cert/key staged at certs/server.pem, certs/server-key.pem" + fi + + # Generate docker-compose.ca.yml override + local ca_override="$ROOT_DIR/docker-compose.ca.yml" + cat > "$ca_override" << 'CAEOF' +# Generated by setup-selfhosted.sh — custom CA trust for backend services. +# Do not edit manually; re-run setup-selfhosted.sh with --custom-ca to regenerate. +services: + server: + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro + worker: + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro + beat: + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro + hatchet-worker-llm: + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro + hatchet-worker-cpu: + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro + gpu: + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro + cpu: + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro + web: + environment: + NODE_EXTRA_CA_CERTS: /usr/local/share/ca-certificates/custom-ca.crt + volumes: + - ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro +CAEOF + + # If TLS cert/key present, also mount certs dir into Caddy + if [[ -n "$TLS_CERT_PATH" ]]; then + cat >> "$ca_override" << 'CADDYCAEOF' + caddy: + volumes: + - ./certs:/etc/caddy/certs:ro +CADDYCAEOF + fi + + ok "Generated docker-compose.ca.yml override" +} + # ========================================================= # Step 2: Generate server/.env # ========================================================= @@ -799,7 +990,25 @@ step_caddyfile() { rm -rf "$caddyfile" fi - if [[ -n "$CUSTOM_DOMAIN" ]]; then + if [[ -n "$TLS_CERT_PATH" ]] && [[ -n "$CUSTOM_DOMAIN" ]]; then + # Custom domain with user-provided TLS certificate (from --custom-ca directory) + cat > "$caddyfile" << CADDYEOF +# Generated by setup-selfhosted.sh — Custom TLS cert for $CUSTOM_DOMAIN +$CUSTOM_DOMAIN { + tls /etc/caddy/certs/server.pem /etc/caddy/certs/server-key.pem + handle /v1/* { + reverse_proxy server:1250 + } + handle /health { + reverse_proxy server:1250 + } + handle { + reverse_proxy web:3000 + } +} +CADDYEOF + ok "Created Caddyfile for $CUSTOM_DOMAIN (custom TLS certificate)" + elif [[ -n "$CUSTOM_DOMAIN" ]]; then # Real domain: Caddy auto-provisions Let's Encrypt certificate cat > "$caddyfile" << CADDYEOF # Generated by setup-selfhosted.sh — Let's Encrypt HTTPS for $CUSTOM_DOMAIN @@ -1170,6 +1379,8 @@ main() { echo " Garage: $USE_GARAGE" echo " Caddy: $USE_CADDY" [[ -n "$CUSTOM_DOMAIN" ]] && echo " Domain: $CUSTOM_DOMAIN" + [[ "$USE_CUSTOM_CA" == "true" ]] && echo " CA: Custom ($CUSTOM_CA)" + [[ -n "$TLS_CERT_PATH" ]] && echo " TLS: Custom cert (from $CUSTOM_CA)" [[ "$BUILD_IMAGES" == "true" ]] && echo " Build: from source" echo "" @@ -1200,6 +1411,8 @@ main() { echo "" step_secrets echo "" + step_custom_ca + echo "" step_server_env echo "" @@ -1282,7 +1495,17 @@ EOF [[ "$DAILY_DETECTED" == "true" ]] && echo " Video: Daily.co (live rooms + multitrack processing via Hatchet)" [[ "$WHEREBY_DETECTED" == "true" ]] && echo " Video: Whereby (live rooms)" [[ "$ANY_PLATFORM_DETECTED" != "true" ]] && echo " Video: None (rooms disabled)" + if [[ "$USE_CUSTOM_CA" == "true" ]]; then + echo " CA: Custom (certs/ca.crt)" + [[ -n "$TLS_CERT_PATH" ]] && echo " TLS: Custom cert (certs/server.pem)" + fi echo "" + if [[ "$USE_CUSTOM_CA" == "true" ]]; then + echo " NOTE: Clients must trust the CA certificate to avoid browser warnings." + echo " CA cert location: certs/ca.crt" + echo " See docsv2/custom-ca-setup.md for instructions." + echo "" + fi echo " To stop: docker compose -f docker-compose.selfhosted.yml down" echo " To re-run: ./scripts/setup-selfhosted.sh $*" echo "" diff --git a/server/Dockerfile b/server/Dockerfile index b4fc8c4a..16b6b20b 100644 --- a/server/Dockerfile +++ b/server/Dockerfile @@ -6,7 +6,7 @@ ENV PYTHONUNBUFFERED=1 \ # builder install base dependencies WORKDIR /tmp -RUN apt-get update && apt-get install -y curl ffmpeg && apt-get clean +RUN apt-get update && apt-get install -y curl ffmpeg ca-certificates && apt-get clean ADD https://astral.sh/uv/install.sh /uv-installer.sh RUN sh /uv-installer.sh && rm /uv-installer.sh ENV PATH="/root/.local/bin/:$PATH" @@ -18,7 +18,7 @@ COPY pyproject.toml uv.lock README.md /app/ RUN uv sync --compile-bytecode --locked # bootstrap -COPY alembic.ini runserver.sh /app/ +COPY alembic.ini docker-entrypoint.sh runserver.sh /app/ COPY images /app/images COPY migrations /app/migrations COPY reflector /app/reflector @@ -35,4 +35,6 @@ RUN if [ "$(uname -m)" = "aarch64" ] && [ ! -f /usr/lib/libgomp.so.1 ]; then \ # Pre-check just to make sure the image will not fail RUN uv run python -c "import silero_vad.model" -CMD ["./runserver.sh"] +RUN chmod +x /app/docker-entrypoint.sh + +CMD ["./docker-entrypoint.sh"] diff --git a/server/docker-entrypoint.sh b/server/docker-entrypoint.sh new file mode 100644 index 00000000..bfdaa1ad --- /dev/null +++ b/server/docker-entrypoint.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# Custom CA certificate injection +# If a CA cert is mounted at this path (via docker-compose.ca.yml), +# add it to the system trust store and configure all Python SSL libraries. +CUSTOM_CA_PATH="/usr/local/share/ca-certificates/custom-ca.crt" + +if [ -s "$CUSTOM_CA_PATH" ]; then + echo "[entrypoint] Custom CA certificate detected, updating trust store..." + update-ca-certificates 2>/dev/null + + # update-ca-certificates creates a combined bundle (system + custom CAs) + COMBINED_BUNDLE="/etc/ssl/certs/ca-certificates.crt" + export SSL_CERT_FILE="$COMBINED_BUNDLE" + export REQUESTS_CA_BUNDLE="$COMBINED_BUNDLE" + export CURL_CA_BUNDLE="$COMBINED_BUNDLE" + export GRPC_DEFAULT_SSL_ROOTS_FILE_PATH="$COMBINED_BUNDLE" + echo "[entrypoint] CA trust store updated (SSL_CERT_FILE=$COMBINED_BUNDLE)" +fi + +exec ./runserver.sh