# # This file serve as an example of possible configuration # All the settings are described here: reflector/settings.py # ## ======================================================= ## Sentry ## ======================================================= ## Sentry DSN configuration #SENTRY_DSN= ## ======================================================= ## Transcription backend ## ## Check reflector/processors/audio_transcript_* for the ## full list of available transcription backend ## ======================================================= ## Using local whisper (default) #TRANSCRIPT_BACKEND=whisper #WHISPER_MODEL_SIZE=tiny ## Using serverless modal.com (require reflector-gpu-modal deployed) #TRANSCRIPT_BACKEND=modal #TRANSCRIPT_URL=https://xxxxx--reflector-transcriber-web.modal.run #TRANSCRIPT_MODAL_API_KEY=xxxxx ## Using serverless banana.dev (require reflector-gpu-banana deployed) ## XXX this service is buggy do not use at the moment ## XXX it also require the audio to be saved to S3 #TRANSCRIPT_BACKEND=banana #TRANSCRIPT_URL=https://reflector-gpu-banana-xxxxx.run.banana.dev #TRANSCRIPT_BANANA_API_KEY=xxx #TRANSCRIPT_BANANA_MODEL_KEY=xxx #TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=xxx #TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=xxx #TRANSCRIPT_STORAGE_AWS_BUCKET_NAME="reflector-bucket/chunks" ## ======================================================= ## LLM backend ## ## Check reflector/llm/* for the full list of available ## llm backend implementation ## ======================================================= ## Use oobagooda (default) #LLM_BACKEND=oobagooda #LLM_URL=http://xxx:7860/api/generate/v1 ## Using serverless modal.com (require reflector-gpu-modal deployed) #LLM_BACKEND=modal #LLM_URL=https://xxxxxx--reflector-llm-web.modal.run #LLM_MODAL_API_KEY=xxx ## Using serverless banana.dev (require reflector-gpu-banana deployed) ## XXX this service is buggy do not use at the moment #LLM_BACKEND=banana #LLM_URL=https://reflector-gpu-banana-xxxxx.run.banana.dev #LLM_BANANA_API_KEY=xxxxx #LLM_BANANA_MODEL_KEY=xxxxx ## Using OpenAI #LLM_BACKEND=openai #LLM_OPENAI_KEY=xxx #LLM_OPENAI_MODEL=gpt-3.5-turbo ## Using GPT4ALL #LLM_BACKEND=openai #LLM_URL=http://localhost:4891/v1/completions #LLM_OPENAI_MODEL="GPT4All Falcon"