diff --git a/server_executor_cleaned.py b/server_executor_cleaned.py index c9f61d4b..2c2e54a3 100644 --- a/server_executor_cleaned.py +++ b/server_executor_cleaned.py @@ -28,6 +28,7 @@ RATE = 48000 audio_buffer = AudioFifo() executor = ThreadPoolExecutor() transcription_text = "" +# Load your locally downloaded Vicuna model and load it here. Set this path in the config.ini file llm = GPT4All(config["DEFAULT"]["LLM_PATH"])