From f26c530f9e1cb862e84284ac244ebdc4a31a9302 Mon Sep 17 00:00:00 2001 From: projects-g <63178974+projects-g@users.noreply.github.com> Date: Tue, 18 Jul 2023 22:54:38 +0530 Subject: [PATCH 1/2] Create config.ini --- utils/config.ini | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 utils/config.ini diff --git a/utils/config.ini b/utils/config.ini new file mode 100644 index 00000000..976f4a32 --- /dev/null +++ b/utils/config.ini @@ -0,0 +1,24 @@ +[DEFAULT] +#SetexceptionruleforOpenMPerrortoallowduplicatelibinitialization +KMP_DUPLICATE_LIB_OK=TRUE +#ExportOpenAIAPIKey +OPENAI_APIKEY= +#ExportWhisperModelSize +WHISPER_MODEL_SIZE=tiny +WHISPER_REAL_TIME_MODEL_SIZE=tiny +#AWSconfig +AWS_ACCESS_KEY=***REMOVED*** +AWS_SECRET_KEY=***REMOVED*** +BUCKET_NAME=reflector-bucket +#Summarizerconfig +SUMMARY_MODEL=facebook/bart-large-cnn +INPUT_ENCODING_MAX_LENGTH=1024 +MAX_LENGTH=2048 +BEAM_SIZE=6 +MAX_CHUNK_LENGTH=1024 +SUMMARIZE_USING_CHUNKS=YES +#Audiodevice +BLACKHOLE_INPUT_AGGREGATOR_DEVICE_NAME=aggregator +AV_FOUNDATION_DEVICE_ID=1 +# LLM PATH +LLM_PATH= From 8bea715c8f5721be9a943902ad12c48c798fc184 Mon Sep 17 00:00:00 2001 From: projects-g <63178974+projects-g@users.noreply.github.com> Date: Tue, 18 Jul 2023 22:56:53 +0530 Subject: [PATCH 2/2] Update server_executor_cleaned.py --- server_executor_cleaned.py | 1 + 1 file changed, 1 insertion(+) diff --git a/server_executor_cleaned.py b/server_executor_cleaned.py index c9f61d4b..2c2e54a3 100644 --- a/server_executor_cleaned.py +++ b/server_executor_cleaned.py @@ -28,6 +28,7 @@ RATE = 48000 audio_buffer = AudioFifo() executor = ThreadPoolExecutor() transcription_text = "" +# Load your locally downloaded Vicuna model and load it here. Set this path in the config.ini file llm = GPT4All(config["DEFAULT"]["LLM_PATH"])