diff --git a/server_executor_cleaned.py b/server_executor_cleaned.py index 579ea94d..2c2e54a3 100644 --- a/server_executor_cleaned.py +++ b/server_executor_cleaned.py @@ -14,7 +14,7 @@ from gpt4all import GPT4All from loguru import logger from whisper_jax import FlaxWhisperPipline -from utils.run_utils import run_in_executor +from utils.run_utils import run_in_executor, config pcs = set() relay = MediaRelay() @@ -28,7 +28,8 @@ RATE = 48000 audio_buffer = AudioFifo() executor = ThreadPoolExecutor() transcription_text = "" -llm = GPT4All("/Users/gokulmohanarangan/Library/Application Support/nomic.ai/GPT4All/ggml-vicuna-13b-1.1-q4_2.bin") +# Load your locally downloaded Vicuna model and load it here. Set this path in the config.ini file +llm = GPT4All(config["DEFAULT"]["LLM_PATH"]) def get_title_and_summary(): diff --git a/utils/config.ini b/utils/config.ini new file mode 100644 index 00000000..976f4a32 --- /dev/null +++ b/utils/config.ini @@ -0,0 +1,24 @@ +[DEFAULT] +#SetexceptionruleforOpenMPerrortoallowduplicatelibinitialization +KMP_DUPLICATE_LIB_OK=TRUE +#ExportOpenAIAPIKey +OPENAI_APIKEY= +#ExportWhisperModelSize +WHISPER_MODEL_SIZE=tiny +WHISPER_REAL_TIME_MODEL_SIZE=tiny +#AWSconfig +AWS_ACCESS_KEY=***REMOVED*** +AWS_SECRET_KEY=***REMOVED*** +BUCKET_NAME=reflector-bucket +#Summarizerconfig +SUMMARY_MODEL=facebook/bart-large-cnn +INPUT_ENCODING_MAX_LENGTH=1024 +MAX_LENGTH=2048 +BEAM_SIZE=6 +MAX_CHUNK_LENGTH=1024 +SUMMARIZE_USING_CHUNKS=YES +#Audiodevice +BLACKHOLE_INPUT_AGGREGATOR_DEVICE_NAME=aggregator +AV_FOUNDATION_DEVICE_ID=1 +# LLM PATH +LLM_PATH=