mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2026-03-21 22:56:47 +00:00
* fix: local processing instead of http server for cpu * add fallback token if service worker doesnt work * chore: rename processors to keep processor pattern up to date and allow other processors to be createed and used with env vars
166 lines
6.6 KiB
Plaintext
166 lines
6.6 KiB
Plaintext
# =======================================================
|
|
# Reflector Self-Hosted Production — Backend Configuration
|
|
# Generated by: ./scripts/setup-selfhosted.sh
|
|
# Reference: server/reflector/settings.py
|
|
# =======================================================
|
|
|
|
# =======================================================
|
|
# Database & Infrastructure
|
|
# Pre-filled for Docker internal networking (docker-compose.selfhosted.yml)
|
|
# =======================================================
|
|
DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
|
REDIS_HOST=redis
|
|
REDIS_PORT=6379
|
|
CELERY_BROKER_URL=redis://redis:6379/1
|
|
CELERY_RESULT_BACKEND=redis://redis:6379/1
|
|
|
|
# Secret key — auto-generated by setup script
|
|
# Generate manually with: openssl rand -hex 32
|
|
SECRET_KEY=changeme-generate-a-secure-random-string
|
|
|
|
# =======================================================
|
|
# Authentication
|
|
# Disabled by default. Enable Authentik for multi-user access.
|
|
# See docsv2/selfhosted-production.md for setup instructions.
|
|
# =======================================================
|
|
AUTH_BACKEND=none
|
|
# AUTH_BACKEND=jwt
|
|
# AUTH_JWT_AUDIENCE=
|
|
# AUTH_BACKEND=password
|
|
# ADMIN_EMAIL=admin@localhost
|
|
# ADMIN_PASSWORD_HASH=pbkdf2:sha256:100000$<salt>$<hash>
|
|
|
|
# =======================================================
|
|
# Specialized Models (Transcription, Diarization, Translation)
|
|
# These do NOT use an LLM. Configured per mode by the setup script:
|
|
#
|
|
# --gpu mode: modal backends → GPU container (http://transcription:8000)
|
|
# --cpu mode: whisper/pyannote/marian/pyav → in-process ML on server/worker
|
|
# --hosted mode: modal backends → user-provided remote GPU service URL
|
|
# =======================================================
|
|
|
|
# --- --gpu mode (default) ---
|
|
TRANSCRIPT_BACKEND=modal
|
|
TRANSCRIPT_URL=http://transcription:8000
|
|
TRANSCRIPT_MODAL_API_KEY=selfhosted
|
|
DIARIZATION_ENABLED=true
|
|
DIARIZATION_BACKEND=modal
|
|
DIARIZATION_URL=http://transcription:8000
|
|
TRANSLATION_BACKEND=modal
|
|
TRANSLATE_URL=http://transcription:8000
|
|
PADDING_BACKEND=modal
|
|
PADDING_URL=http://transcription:8000
|
|
|
|
# --- --cpu mode (set by setup script) ---
|
|
# TRANSCRIPT_BACKEND=whisper
|
|
# DIARIZATION_BACKEND=pyannote
|
|
# TRANSLATION_BACKEND=marian
|
|
# PADDING_BACKEND=pyav
|
|
|
|
# --- --hosted mode (set by setup script) ---
|
|
# TRANSCRIPT_BACKEND=modal
|
|
# TRANSCRIPT_URL=https://your-gpu-service.example.com
|
|
# DIARIZATION_BACKEND=modal
|
|
# DIARIZATION_URL=https://your-gpu-service.example.com
|
|
# ... (all URLs point to one remote service)
|
|
|
|
# Whisper model sizes for local transcription (--cpu mode)
|
|
# Options: "tiny", "base", "small", "medium", "large-v2"
|
|
# WHISPER_CHUNK_MODEL=tiny
|
|
# WHISPER_FILE_MODEL=tiny
|
|
|
|
# HuggingFace token — for gated models (e.g. pyannote diarization).
|
|
# Required for --gpu and --cpu modes; falls back to public S3 bundle if not set.
|
|
# Not needed for --hosted mode (remote service handles its own auth).
|
|
# HF_TOKEN=hf_xxxxx
|
|
|
|
# =======================================================
|
|
# LLM for Summarization & Topic Detection
|
|
# Only summaries and topics use an LLM. Everything else
|
|
# (transcription, diarization, translation) uses specialized models above.
|
|
#
|
|
# Supports any OpenAI-compatible endpoint.
|
|
# Auto-configured by setup script if using --ollama-gpu or --ollama-cpu.
|
|
# For --gpu or --cpu modes, you MUST configure an external LLM.
|
|
# =======================================================
|
|
|
|
# --- Option A: External OpenAI-compatible API ---
|
|
# LLM_URL=https://api.openai.com/v1
|
|
# LLM_API_KEY=sk-your-api-key
|
|
# LLM_MODEL=gpt-4o-mini
|
|
|
|
# --- Option B: Local Ollama (auto-set by --ollama-gpu/--ollama-cpu) ---
|
|
# LLM_URL=http://ollama:11435/v1
|
|
# LLM_API_KEY=not-needed
|
|
# LLM_MODEL=llama3.1
|
|
|
|
LLM_CONTEXT_WINDOW=16000
|
|
|
|
# =======================================================
|
|
# S3 Storage (REQUIRED)
|
|
# Where to store audio files and transcripts.
|
|
#
|
|
# Option A: Use --garage flag (auto-configured by setup script)
|
|
# Option B: Any S3-compatible endpoint (AWS, MinIO, etc.)
|
|
# Set TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL for non-AWS endpoints.
|
|
# =======================================================
|
|
TRANSCRIPT_STORAGE_BACKEND=aws
|
|
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=
|
|
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=
|
|
TRANSCRIPT_STORAGE_AWS_BUCKET_NAME=reflector-media
|
|
TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
|
|
|
|
# For non-AWS S3-compatible endpoints (Garage, MinIO, etc.):
|
|
# TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL=http://garage:3900
|
|
|
|
# =======================================================
|
|
# Daily.co Live Rooms (Optional)
|
|
# Enable real-time meeting rooms with Daily.co integration.
|
|
# Configure these BEFORE running setup-selfhosted.sh and the
|
|
# script will auto-detect and start Hatchet workflow services.
|
|
#
|
|
# Prerequisites:
|
|
# 1. Daily.co account: https://www.daily.co/
|
|
# 2. API key: Dashboard → Developers → API Keys
|
|
# 3. S3 bucket for recordings: https://docs.daily.co/guides/products/live-streaming-recording/storing-recordings-in-a-custom-s3-bucket
|
|
# 4. IAM role ARN for Daily.co to write recordings to your bucket
|
|
#
|
|
# After configuring, run: ./scripts/setup-selfhosted.sh <your-flags>
|
|
# The script will detect DAILY_API_KEY and automatically:
|
|
# - Start Hatchet workflow engine + CPU/LLM workers
|
|
# - Generate a Hatchet API token
|
|
# - Enable FEATURE_ROOMS in the frontend
|
|
# =======================================================
|
|
# DAILY_API_KEY=your-daily-api-key
|
|
# DAILY_SUBDOMAIN=your-subdomain
|
|
# DEFAULT_VIDEO_PLATFORM=daily
|
|
# DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
|
# DAILYCO_STORAGE_AWS_REGION=us-east-1
|
|
# DAILYCO_STORAGE_AWS_ROLE_ARN=arn:aws:iam::role/DailyCoAccess
|
|
# Worker credentials for reading/deleting from Daily's recording bucket
|
|
# Required when transcript storage is separate from Daily's bucket (e.g., selfhosted with Garage)
|
|
# DAILYCO_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
|
|
# DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
|
|
# DAILY_WEBHOOK_SECRET=your-daily-webhook-secret # optional, for faster recording discovery
|
|
|
|
# =======================================================
|
|
# Hatchet Workflow Engine (Auto-configured for Daily.co)
|
|
# Required for Daily.co multitrack recording processing.
|
|
# The setup script generates HATCHET_CLIENT_TOKEN automatically.
|
|
# Do not set these manually unless you know what you're doing.
|
|
# =======================================================
|
|
# HATCHET_CLIENT_TOKEN=<auto-generated-by-script>
|
|
# HATCHET_CLIENT_SERVER_URL=http://hatchet:8888
|
|
# HATCHET_CLIENT_HOST_PORT=hatchet:7077
|
|
|
|
# =======================================================
|
|
# Feature Flags
|
|
# =======================================================
|
|
PUBLIC_MODE=true
|
|
# FEATURE_ROOMS=true
|
|
|
|
# =======================================================
|
|
# Sentry (Optional)
|
|
# =======================================================
|
|
# SENTRY_DSN=
|