mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-21 12:49:06 +00:00
server: fixes pipeline logger not transmitted to processors
Closes #110
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
from reflector.utils.retry import retry
|
||||
from reflector.logger import logger as reflector_logger
|
||||
import importlib
|
||||
import json
|
||||
import re
|
||||
@@ -29,15 +29,18 @@ class LLM:
|
||||
importlib.import_module(module_name)
|
||||
return cls._registry[name]()
|
||||
|
||||
async def generate(self, prompt: str, **kwargs) -> dict:
|
||||
async def generate(self, prompt: str, logger: reflector_logger, **kwargs) -> dict:
|
||||
logger.info("LLM generate", prompt=repr(prompt))
|
||||
try:
|
||||
result = await retry(self._generate)(prompt=prompt, **kwargs)
|
||||
except Exception:
|
||||
logger.exception("Failed to call llm after retrying")
|
||||
raise
|
||||
|
||||
logger.debug("LLM result [raw]", result=repr(result))
|
||||
if isinstance(result, str):
|
||||
result = self._parse_json(result)
|
||||
logger.debug("LLM result [parsed]", result=repr(result))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ class OpenAILLM(LLM):
|
||||
"Authorization": f"Bearer {self.openai_key}",
|
||||
}
|
||||
|
||||
logger.debug(f"LLM openai prompt: {prompt}")
|
||||
|
||||
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
||||
response = await client.post(
|
||||
@@ -36,7 +35,6 @@ class OpenAILLM(LLM):
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
logger.info(f"LLM openai result: {result}")
|
||||
return result["choices"][0]["text"]
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user