server: fixes pipeline logger not transmitted to processors

Closes #110
This commit is contained in:
Mathieu Virbel
2023-08-04 12:02:18 +02:00
parent 6d2085ce61
commit dce92e0cf7
6 changed files with 32 additions and 9 deletions

View File

@@ -1,6 +1,6 @@
from reflector.logger import logger
from reflector.settings import settings
from reflector.utils.retry import retry
from reflector.logger import logger as reflector_logger
import importlib
import json
import re
@@ -29,15 +29,18 @@ class LLM:
importlib.import_module(module_name)
return cls._registry[name]()
async def generate(self, prompt: str, **kwargs) -> dict:
async def generate(self, prompt: str, logger: reflector_logger, **kwargs) -> dict:
logger.info("LLM generate", prompt=repr(prompt))
try:
result = await retry(self._generate)(prompt=prompt, **kwargs)
except Exception:
logger.exception("Failed to call llm after retrying")
raise
logger.debug("LLM result [raw]", result=repr(result))
if isinstance(result, str):
result = self._parse_json(result)
logger.debug("LLM result [parsed]", result=repr(result))
return result

View File

@@ -21,7 +21,6 @@ class OpenAILLM(LLM):
"Authorization": f"Bearer {self.openai_key}",
}
logger.debug(f"LLM openai prompt: {prompt}")
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.post(
@@ -36,7 +35,6 @@ class OpenAILLM(LLM):
)
response.raise_for_status()
result = response.json()
logger.info(f"LLM openai result: {result}")
return result["choices"][0]["text"]