From e4f2b785cae8493270502b14f5d6f48a7fd47b56 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Tue, 1 Aug 2023 20:16:54 +0200 Subject: [PATCH] server: update process tools and tests --- server/README.md | 7 +++++++ server/reflector/app.py | 1 + server/reflector/tools/process.py | 6 ++---- server/reflector/views/rtc_offer.py | 5 +---- server/tests/test_processors_pipeline.py | 6 +++--- 5 files changed, 14 insertions(+), 11 deletions(-) diff --git a/server/README.md b/server/README.md index a70fb7e7..b0ca34b7 100644 --- a/server/README.md +++ b/server/README.md @@ -48,6 +48,13 @@ $ LLM_URL=http://.../api/v1/generate python -m reflector.app - Ensure the API server is activated in GPT4all - Run with: `LLM_BACKEND=openai LLM_URL=http://localhost:4891/v1/completions LLM_OPENAI_MODEL="GPT4All Falcon" python -m reflector.app` + +### Using local files + +``` +poetry run python -m reflector.tools.process path/to/audio.wav +``` + # Old documentation This is the code base for the Reflector demo (formerly called agenda-talk-diff) for the leads : Troy Web Consulting diff --git a/server/reflector/app.py b/server/reflector/app.py index 4a10b685..b07ef54e 100644 --- a/server/reflector/app.py +++ b/server/reflector/app.py @@ -29,4 +29,5 @@ app.include_router(rtc_offer_router) if __name__ == "__main__": import uvicorn + uvicorn.run("reflector.app:app", host="0.0.0.0", port=1250, reload=True) diff --git a/server/reflector/tools/process.py b/server/reflector/tools/process.py index 0c8611d8..071907ea 100644 --- a/server/reflector/tools/process.py +++ b/server/reflector/tools/process.py @@ -7,7 +7,7 @@ from reflector.processors import ( AudioTranscriptAutoProcessor, TranscriptLinerProcessor, TranscriptTopicDetectorProcessor, - # TranscriptSummarizerProcessor, + TranscriptFinalSummaryProcessor, ) import asyncio @@ -29,9 +29,7 @@ async def process_audio_file(filename, event_callback): AudioTranscriptAutoProcessor.as_threaded(), TranscriptLinerProcessor(callback=on_transcript), TranscriptTopicDetectorProcessor.as_threaded(callback=on_topic), - # TranscriptSummarizerProcessor.as_threaded( - # callback=on_summary - # ), + TranscriptFinalSummaryProcessor.as_threaded(callback=on_summary), ) pipeline.describe() diff --git a/server/reflector/views/rtc_offer.py b/server/reflector/views/rtc_offer.py index c4eaddd8..11c98009 100644 --- a/server/reflector/views/rtc_offer.py +++ b/server/reflector/views/rtc_offer.py @@ -2,10 +2,7 @@ import asyncio from fastapi import Request, APIRouter from reflector.events import subscribers_shutdown from pydantic import BaseModel -from reflector.models import ( - TranscriptionContext, - TranscriptionOutput, -) +from reflector.models import TranscriptionContext from reflector.logger import logger from aiortc import RTCPeerConnection, RTCSessionDescription, MediaStreamTrack from json import loads, dumps diff --git a/server/tests/test_processors_pipeline.py b/server/tests/test_processors_pipeline.py index a807b6fd..ab836550 100644 --- a/server/tests/test_processors_pipeline.py +++ b/server/tests/test_processors_pipeline.py @@ -26,7 +26,7 @@ async def test_basic_process(event_loop): marks = { "transcript": 0, "topic": 0, - # "summary": 0, + "summary": 0, } async def event_callback(event, data): @@ -40,5 +40,5 @@ async def test_basic_process(event_loop): # validate the events assert marks["transcript"] == 5 - assert marks["topic"] == 4 - # assert marks["summary"] == 1 + assert marks["topic"] == 2 + assert marks["summary"] == 1