# # This file serve as an example of possible configuration # All the settings are described here: reflector/settings.py # ## ======================================================= ## Database ## ======================================================= #DATABASE_URL=sqlite://./reflector.db #DATABASE_URL=postgresql://reflector:reflector@localhost:5432/reflector ## ======================================================= ## User authentication ## ======================================================= ## No authentication #AUTH_BACKEND=none ## Using fief (fief.dev) #AUTH_BACKEND=fief #AUTH_FIEF_URL=https://your-fief-instance.... #AUTH_FIEF_CLIENT_ID=xxx #AUTH_FIEF_CLIENT_SECRET=xxx ## ======================================================= ## Public mode ## ======================================================= ## If set to true, anonymous transcripts will be ## accessible to anybody. #PUBLIC_MODE=false ## ======================================================= ## Transcription backend ## ## Check reflector/processors/audio_transcript_* for the ## full list of available transcription backend ## ======================================================= ## Using local whisper (default) #TRANSCRIPT_BACKEND=whisper #WHISPER_MODEL_SIZE=tiny ## Using serverless modal.com (require reflector-gpu-modal deployed) #TRANSCRIPT_BACKEND=modal #TRANSCRIPT_URL=https://xxxxx--reflector-transcriber-web.modal.run #TRANSLATE_URL=https://xxxxx--reflector-translator-web.modal.run #TRANSCRIPT_MODAL_API_KEY=xxxxx ## ======================================================= ## LLM backend ## ## Check reflector/llm/* for the full list of available ## llm backend implementation ## ======================================================= ## Use oobabooga (default) #LLM_BACKEND=oobabooga #LLM_URL=http://xxx:7860/api/generate/v1 ## Using serverless modal.com (require reflector-gpu-modal deployed) #LLM_BACKEND=modal #LLM_URL=https://xxxxxx--reflector-llm-web.modal.run #LLM_MODAL_API_KEY=xxx ## Using OpenAI #LLM_BACKEND=openai #LLM_OPENAI_KEY=xxx #LLM_OPENAI_MODEL=gpt-3.5-turbo ## Using GPT4ALL #LLM_BACKEND=openai #LLM_URL=http://localhost:4891/v1/completions #LLM_OPENAI_MODEL="GPT4All Falcon" ## Default LLM MODEL NAME DEFAULT_LLM=lmsys/vicuna-13b-v1.5 ## Cache directory to store models CACHE_DIR=data ## ======================================================= ## Sentry ## ======================================================= ## Sentry DSN configuration #SENTRY_DSN=