from pydantic_settings import BaseSettings, SettingsConfigDict class Settings(BaseSettings): model_config = SettingsConfigDict( env_file=".env", env_file_encoding="utf-8", extra="ignore", ) # CORS CORS_ORIGIN: str = "*" CORS_ALLOW_CREDENTIALS: bool = False # Database DATABASE_URL: str = "sqlite:///./reflector.sqlite3" # local data directory (audio for no) DATA_DIR: str = "./data" # Audio Transcription # backends: whisper, modal TRANSCRIPT_BACKEND: str = "whisper" TRANSCRIPT_URL: str | None = None TRANSCRIPT_TIMEOUT: int = 90 # Translate into the target language TRANSLATE_URL: str | None = None TRANSLATE_TIMEOUT: int = 90 # Audio transcription modal.com configuration TRANSCRIPT_MODAL_API_KEY: str | None = None # Audio transcription storage TRANSCRIPT_STORAGE_BACKEND: str | None = None # Storage configuration for AWS TRANSCRIPT_STORAGE_AWS_BUCKET_NAME: str = "reflector-bucket" TRANSCRIPT_STORAGE_AWS_REGION: str = "us-east-1" TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID: str | None = None TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None # LLM # available backend: openai, modal LLM_BACKEND: str = "modal" # LLM common configuration LLM_URL: str | None = None LLM_HOST: str = "localhost" LLM_PORT: int = 7860 LLM_OPENAI_KEY: str | None = None LLM_OPENAI_MODEL: str = "gpt-3.5-turbo" LLM_OPENAI_TEMPERATURE: float = 0.7 LLM_TIMEOUT: int = 60 * 5 # take cold start into account LLM_MAX_TOKENS: int = 1024 LLM_TEMPERATURE: float = 0.7 ZEPHYR_LLM_URL: str | None = None HERMES_3_8B_LLM_URL: str | None = None # LLM Modal configuration LLM_MODAL_API_KEY: str | None = None # per-task cases SUMMARY_MODEL: str = "monadical/private/smart" SUMMARY_LLM_URL: str | None = None SUMMARY_LLM_API_KEY: str | None = None SUMMARY_LLM_CONTEXT_SIZE_TOKENS: int = 16000 # Diarization DIARIZATION_ENABLED: bool = True DIARIZATION_BACKEND: str = "modal" DIARIZATION_URL: str | None = None # Sentry SENTRY_DSN: str | None = None # User authentication (none, jwt) AUTH_BACKEND: str = "none" # User authentication using JWT AUTH_JWT_ALGORITHM: str = "RS256" AUTH_JWT_PUBLIC_KEY: str | None = "authentik.monadical.com_public.pem" AUTH_JWT_AUDIENCE: str | None = None # API public mode # if set, all anonymous record will be public PUBLIC_MODE: bool = False # Default LLM model name DEFAULT_LLM: str = "lmsys/vicuna-13b-v1.5" # Cache directory for all model storage CACHE_DIR: str = "./data" # Min transcript length to generate topic + summary MIN_TRANSCRIPT_LENGTH: int = 750 # Celery CELERY_BROKER_URL: str = "redis://localhost:6379/1" CELERY_RESULT_BACKEND: str = "redis://localhost:6379/1" # Redis REDIS_HOST: str = "localhost" REDIS_PORT: int = 6379 REDIS_CACHE_DB: int = 2 # Secret key SECRET_KEY: str = "changeme-f02f86fd8b3e4fd892c6043e5a298e21" # Current hosting/domain BASE_URL: str = "http://localhost:1250" # Profiling PROFILING: bool = False # Healthcheck HEALTHCHECK_URL: str | None = None AWS_PROCESS_RECORDING_QUEUE_URL: str | None = None SQS_POLLING_TIMEOUT_SECONDS: int = 60 WHEREBY_API_URL: str = "https://api.whereby.dev/v1" WHEREBY_API_KEY: str | None = None AWS_WHEREBY_S3_BUCKET: str | None = None AWS_WHEREBY_ACCESS_KEY_ID: str | None = None AWS_WHEREBY_ACCESS_KEY_SECRET: str | None = None ZULIP_REALM: str | None = None ZULIP_API_KEY: str | None = None ZULIP_BOT_EMAIL: str | None = None UI_BASE_URL: str = "http://localhost:3000" WHEREBY_WEBHOOK_SECRET: str | None = None settings = Settings()