server: add env.example

Closes #95
This commit is contained in:
Mathieu Virbel
2023-08-11 19:14:49 +02:00
committed by Mathieu Virbel
parent 01806ce037
commit 93acea4ad9
2 changed files with 74 additions and 1 deletions

72
server/env.example Normal file
View File

@@ -0,0 +1,72 @@
#
# This file serve as an example of possible configuration
# All the settings are described here: reflector/settings.py
#
## =======================================================
## Sentry
## =======================================================
## Sentry DSN configuration
#SENTRY_DSN=
## =======================================================
## Transcription backend
##
## Check reflector/processors/audio_transcript_* for the
## full list of available transcription backend
## =======================================================
## Using local whisper (default)
#TRANSCRIPT_BACKEND=whisper
#WHISPER_MODEL_SIZE=tiny
## Using serverless modal.com (require reflector-gpu-modal deployed)
#TRANSCRIPT_BACKEND=modal
#TRANSCRIPT_URL=https://xxxxx--reflector-transcriber-web.modal.run
#TRANSCRIPT_MODAL_API_KEY=xxxxx
## Using serverless banana.dev (require reflector-gpu-banana deployed)
## XXX this service is buggy do not use at the moment
## XXX it also require the audio to be saved to S3
#TRANSCRIPT_BACKEND=banana
#TRANSCRIPT_URL=https://reflector-gpu-banana-xxxxx.run.banana.dev
#TRANSCRIPT_BANANA_API_KEY=xxx
#TRANSCRIPT_BANANA_MODEL_KEY=xxx
#TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=xxx
#TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=xxx
#TRANSCRIPT_STORAGE_AWS_BUCKET_NAME="reflector-bucket/chunks"
## =======================================================
## LLM backend
##
## Check reflector/llm/* for the full list of available
## llm backend implementation
## =======================================================
## Use oobagooda (default)
#LLM_BACKEND=oobagooda
#LLM_URL=http://xxx:7860/api/generate/v1
## Using serverless modal.com (require reflector-gpu-modal deployed)
#LLM_BACKEND=modal
#LLM_URL=https://xxxxxx--reflector-llm-web.modal.run
#LLM_MODAL_API_KEY=xxx
## Using serverless banana.dev (require reflector-gpu-banana deployed)
## XXX this service is buggy do not use at the moment
#LLM_BACKEND=banana
#LLM_URL=https://reflector-gpu-banana-xxxxx.run.banana.dev
#LLM_BANANA_API_KEY=xxxxx
#LLM_BANANA_MODEL_KEY=xxxxx
## Using OpenAI
#LLM_BACKEND=openai
#LLM_OPENAI_KEY=xxx
#LLM_OPENAI_MODEL=gpt-3.5-turbo
## Using GPT4ALL
#LLM_BACKEND=openai
#LLM_URL=http://localhost:4891/v1/completions
#LLM_OPENAI_MODEL="GPT4All Falcon"

View File

@@ -27,7 +27,7 @@ class Settings(BaseSettings):
AUDIO_BUFFER_SIZE: int = 256 * 960
# Audio Transcription
# backends: whisper, banana
# backends: whisper, banana, modal
TRANSCRIPT_BACKEND: str = "whisper"
TRANSCRIPT_URL: str | None = None
TRANSCRIPT_TIMEOUT: int = 90
@@ -49,6 +49,7 @@ class Settings(BaseSettings):
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
# LLM
# available backend: openai, banana, modal, oobagooda
LLM_BACKEND: str = "oobagooda"
# LLM common configuration