move client files

This commit is contained in:
Gokul Mohanarangan
2023-07-25 12:50:43 +05:30
parent b0b47cca83
commit 2d5c464d3b
5 changed files with 28 additions and 11 deletions

2
.gitignore vendored
View File

@@ -165,7 +165,7 @@ cython_debug/
transcript_*.txt
test_*.txt
wordcloud*.png
*.ini
utils/config.ini
test_samples/
*.wav
*.mp3

6
client-local/__init__.py Normal file
View File

@@ -0,0 +1,6 @@
import sys
import os
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
sys.path.append(parent_dir)

View File

@@ -5,15 +5,15 @@ import signal
from aiortc.contrib.signaling import (add_signaling_arguments,
create_signaling)
from ..utils.log_utils import logger
from stream_client import StreamClient
from utils.log_utils import logger
async def main():
parser = argparse.ArgumentParser(description="Data channels ping/pong")
parser.add_argument(
"--url", type=str, nargs="?", default="http://127.0.0.1:1250/offer"
"--url", type=str, nargs="?", default="http://0.0.0.0:1250/offer"
)
parser.add_argument(

View File

@@ -9,15 +9,15 @@ import stamina
from aiortc import (RTCPeerConnection, RTCSessionDescription)
from aiortc.contrib.media import (MediaPlayer, MediaRelay)
from utils.log_utils import logger
from utils.run_utils import config
from ..utils.log_utils import logger
from ..utils.run_utils import config
class StreamClient:
def __init__(
self,
signaling,
url="http://127.0.0.1:1250",
url="http://0.0.0.0:1250",
play_from=None,
ping_pong=False
):

View File

@@ -1,3 +1,4 @@
import argparse
import asyncio
import datetime
import json
@@ -16,7 +17,7 @@ from faster_whisper import WhisperModel
from loguru import logger
from sortedcontainers import SortedDict
from utils.run_utils import run_in_executor
from utils.run_utils import run_in_executor, config
pcs = set()
relay = MediaRelay()
@@ -31,8 +32,8 @@ audio_buffer = AudioFifo()
executor = ThreadPoolExecutor()
transcription_text = ""
last_transcribed_time = 0.0
LLM_MACHINE_IP = "216.153.52.83"
LLM_MACHINE_PORT = "5000"
LLM_MACHINE_IP = config["DEFAULT"]["LLM_MACHINE_IP"]
LLM_MACHINE_PORT = config["DEFAULT"]["LLM_MACHINE_PORT"]
LLM_URL = f"http://{LLM_MACHINE_IP}:{LLM_MACHINE_PORT}/api/v1/generate"
incremental_responses = []
sorted_transcripts = SortedDict()
@@ -43,7 +44,7 @@ blacklisted_messages = [" Thank you.", " See you next time!",
def get_title_and_summary(llm_input_text, last_timestamp):
("Generating title and summary")
logger.info("Generating title and summary")
# output = llm.generate(prompt)
# Use monadical-ml to fire this query to an LLM and get result
@@ -306,6 +307,16 @@ async def on_shutdown(app):
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="WebRTC based server for Reflector"
)
parser.add_argument(
"--host", default="0.0.0.0", help="Server host IP (def: 0.0.0.0)"
)
parser.add_argument(
"--port", type=int, default=1250, help="Server port (def: 1250)"
)
args = parser.parse_args()
app = web.Application()
cors = aiohttp_cors.setup(
app,
@@ -321,4 +332,4 @@ if __name__ == "__main__":
offer_resource = cors.add(app.router.add_resource("/offer"))
cors.add(offer_resource.add_route("POST", offer))
app.on_shutdown.append(on_shutdown)
web.run_app(app, access_log=None, host="127.0.0.1", port=1250)
web.run_app(app, access_log=None, host=args.host, port=args.port)