# Standalone services for fully local deployment (no external dependencies). # Usage: docker compose -f docker-compose.yml -f docker-compose.standalone.yml up -d # # On Linux with NVIDIA GPU, also pass: --profile ollama-gpu # On Linux without GPU: --profile ollama-cpu # On Mac: Ollama runs natively (Metal GPU) — no profile needed, services here unused. services: garage: image: dxflrs/garage:v1.1.0 ports: - "3900:3900" # S3 API - "3903:3903" # Admin API volumes: - garage_data:/var/lib/garage/data - garage_meta:/var/lib/garage/meta - ./data/garage.toml:/etc/garage.toml:ro restart: unless-stopped healthcheck: test: ["CMD", "/garage", "stats"] interval: 10s timeout: 5s retries: 5 start_period: 5s ollama: image: ollama/ollama:latest profiles: ["ollama-gpu"] ports: - "11434:11434" volumes: - ollama_data:/root/.ollama deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"] interval: 10s timeout: 5s retries: 5 ollama-cpu: image: ollama/ollama:latest profiles: ["ollama-cpu"] ports: - "11434:11434" volumes: - ollama_data:/root/.ollama restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"] interval: 10s timeout: 5s retries: 5 # Override server to use standard compose networking instead of network_mode:host. # host mode breaks on macOS Docker Desktop and prevents Docker DNS resolution. server: network_mode: !reset null ports: - "1250:1250" extra_hosts: - "host.docker.internal:host-gateway" depends_on: postgres: condition: service_healthy redis: condition: service_started environment: # Override base compose's localhost URLs with Docker DNS names DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector REDIS_HOST: redis CELERY_BROKER_URL: redis://redis:6379/1 CELERY_RESULT_BACKEND: redis://redis:6379/1 # Standalone doesn't run Hatchet — blank out localhost URLs inherited from base HATCHET_CLIENT_SERVER_URL: "" HATCHET_CLIENT_HOST_PORT: "" # Self-hosted transcription/diarization via CPU service TRANSCRIPT_BACKEND: modal TRANSCRIPT_URL: http://cpu:8000 TRANSCRIPT_MODAL_API_KEY: local DIARIZATION_BACKEND: modal DIARIZATION_URL: http://cpu:8000 worker: environment: TRANSCRIPT_BACKEND: modal TRANSCRIPT_URL: http://cpu:8000 TRANSCRIPT_MODAL_API_KEY: local DIARIZATION_BACKEND: modal DIARIZATION_URL: http://cpu:8000 web: image: reflector-frontend-standalone build: context: ./www command: ["node", "server.js"] volumes: !reset [] environment: NODE_ENV: production # Browser-facing URLs (host-accessible ports) API_URL: http://localhost:1250 WEBSOCKET_URL: ws://localhost:1250 SITE_URL: http://localhost:3000 # Server-side URLs (docker-network internal) SERVER_API_URL: http://server:1250 KV_URL: redis://redis:6379 KV_USE_TLS: "false" # Standalone: no external auth provider FEATURE_REQUIRE_LOGIN: "false" NEXTAUTH_URL: http://localhost:3000 NEXTAUTH_SECRET: standalone-local-secret # Nullify partial auth vars inherited from base env_file AUTHENTIK_ISSUER: "" AUTHENTIK_REFRESH_TOKEN_URL: "" cpu: build: context: ./gpu/self_hosted dockerfile: Dockerfile.cpu ports: - "8100:8000" volumes: - gpu_cache:/root/.cache restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/docs"] interval: 15s timeout: 5s retries: 10 start_period: 120s gpu-nvidia: build: context: ./gpu/self_hosted profiles: ["gpu-nvidia"] volumes: - gpu_cache:/root/.cache deploy: resources: reservations: devices: - driver: nvidia count: all capabilities: [gpu] restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/docs"] interval: 15s timeout: 5s retries: 10 start_period: 120s volumes: garage_data: garage_meta: ollama_data: gpu_cache: