mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-21 12:49:06 +00:00
* initial * add LLM features * update LLM logic * update llm functions: change control flow * add generation config * update return types * update processors and tests * update rtc_offer * revert new title processor change * fix unit tests * add comments and fix HTTP 500 * adjust prompt * test with reflector app * revert new event for final title * update * move onus onto processors * move onus onto processors * stash * add provision for gen config * dynamically pack the LLM input using context length * tune final summary params * update consolidated class structures * update consolidated class structures * update precommit * add broadcast processors * working baseline * Organize LLMParams * minor fixes * minor fixes * minor fixes * fix unit tests * fix unit tests * fix unit tests * update tests * update tests * edit pipeline response events * update summary return types * configure tests * alembic db migration * change LLM response flow * edit main llm functions * edit main llm functions * change llm name and gen cf * Update transcript_topic_detector.py * PR review comments * checkpoint before db event migration * update DB migration of past events * update DB migration of past events * edit LLM classes * Delete unwanted file * remove List typing * remove List typing * update oobabooga API call * topic enhancements * update UI event handling * move ensure_casing to llm base * update tests * update tests
55 lines
1.5 KiB
Python
55 lines
1.5 KiB
Python
import httpx
|
|
|
|
from reflector.llm.base import LLM
|
|
from reflector.settings import settings
|
|
from reflector.utils.retry import retry
|
|
|
|
|
|
class BananaLLM(LLM):
|
|
def __init__(self):
|
|
super().__init__()
|
|
self.timeout = settings.LLM_TIMEOUT
|
|
self.headers = {
|
|
"X-Banana-API-Key": settings.LLM_BANANA_API_KEY,
|
|
"X-Banana-Model-Key": settings.LLM_BANANA_MODEL_KEY,
|
|
}
|
|
|
|
async def _generate(
|
|
self, prompt: str, gen_schema: dict | None, gen_cfg: dict | None, **kwargs
|
|
):
|
|
json_payload = {"prompt": prompt}
|
|
if gen_schema:
|
|
json_payload["gen_schema"] = gen_schema
|
|
if gen_cfg:
|
|
json_payload["gen_cfg"] = gen_cfg
|
|
async with httpx.AsyncClient() as client:
|
|
response = await retry(client.post)(
|
|
settings.LLM_URL,
|
|
headers=self.headers,
|
|
json=json_payload,
|
|
timeout=self.timeout,
|
|
retry_timeout=300, # as per their sdk
|
|
)
|
|
response.raise_for_status()
|
|
text = response.json()["text"]
|
|
return text
|
|
|
|
|
|
LLM.register("banana", BananaLLM)
|
|
|
|
if __name__ == "__main__":
|
|
from reflector.logger import logger
|
|
|
|
async def main():
|
|
llm = BananaLLM()
|
|
prompt = llm.create_prompt(
|
|
instruct="Complete the following task",
|
|
text="Tell me a joke about programming.",
|
|
)
|
|
result = await llm.generate(prompt=prompt, logger=logger)
|
|
print(result)
|
|
|
|
import asyncio
|
|
|
|
asyncio.run(main())
|