From 82ce8202bdc18326a037db717de02f317d22322c Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Fri, 11 Aug 2023 19:29:48 +0200 Subject: [PATCH] server: improve llm warmup exception handling If LLM is stuck to warm or an exception happen in the pipeline, then the processor responsible for the exception fail, and there is no fallback. So audio continue to arrive, but no processing happen.While this should be done right especially after disconnection, still, we should ignore llm warmup issue and just go. Closes #140 --- server/reflector/llm/base.py | 3 +-- server/reflector/processors/base.py | 7 ++++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/server/reflector/llm/base.py b/server/reflector/llm/base.py index 0f2c91c7..e528a3e6 100644 --- a/server/reflector/llm/base.py +++ b/server/reflector/llm/base.py @@ -39,8 +39,7 @@ class LLM: duration = monotonic() - start logger.info(f"LLM[{name}] warmup took {duration:.2f} seconds") except Exception: - logger.exception(f"LLM[{name}] warmup failed") - raise + logger.exception(f"LLM[{name}] warmup failed, ignoring") async def _warmup(self, logger: reflector_logger): pass diff --git a/server/reflector/processors/base.py b/server/reflector/processors/base.py index 85cbc3fd..4a7f2bc2 100644 --- a/server/reflector/processors/base.py +++ b/server/reflector/processors/base.py @@ -143,7 +143,12 @@ class ThreadedProcessor(Processor): self.logger.debug(f"Warming up {self.processor.__class__.__name__}") await self.processor.warmup() continue - await self.processor.push(data) + try: + await self.processor.push(data) + except Exception: + self.logger.error( + f"Error in push {self.processor.__class__.__name__}, continue" + ) finally: self.queue.task_done()