server: implement modal backend for llm and transcription

This commit is contained in:
Mathieu Virbel
2023-08-11 12:34:11 +02:00
parent a822c9d482
commit 445d3c1221
4 changed files with 113 additions and 0 deletions

View File

@@ -0,0 +1,43 @@
from reflector.llm.base import LLM
from reflector.settings import settings
from reflector.utils.retry import retry
import httpx
class ModalLLM(LLM):
def __init__(self):
super().__init__()
self.timeout = settings.LLM_TIMEOUT
self.llm_url = settings.LLM_URL + "/llm"
self.headers = {
"Authorization": f"Bearer {settings.LLM_MODAL_API_KEY}",
}
async def _generate(self, prompt: str, **kwargs):
async with httpx.AsyncClient() as client:
response = await retry(client.post)(
self.llm_url,
headers=self.headers,
json={"prompt": prompt},
timeout=self.timeout,
retry_timeout=60 * 5,
)
response.raise_for_status()
text = response.json()["text"]
text = text[len(prompt) :] # remove prompt
return text
LLM.register("modal", ModalLLM)
if __name__ == "__main__":
from reflector.logger import logger
async def main():
llm = ModalLLM()
result = await llm.generate("Hello, my name is", logger=logger)
print(result)
import asyncio
asyncio.run(main())