mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-21 04:39:06 +00:00
Merge branch 'main' of github.com:Monadical-SAS/reflector into sara/recorder-memory
This commit is contained in:
2
.github/workflows/deploy.yml
vendored
2
.github/workflows/deploy.yml
vendored
@@ -43,3 +43,5 @@ jobs:
|
|||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest
|
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|||||||
21
.github/workflows/test_server.yml
vendored
21
.github/workflows/test_server.yml
vendored
@@ -13,23 +13,14 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
- name: Install poetry
|
||||||
|
run: pipx install poetry
|
||||||
- name: Set up Python 3.x
|
- name: Set up Python 3.x
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: 3.11
|
python-version: '3.11'
|
||||||
- uses: Gr1N/setup-poetry@v8
|
cache: 'poetry'
|
||||||
- name: Cache Python requirements
|
cache-dependency-path: 'server/poetry.lock'
|
||||||
uses: actions/cache@v2
|
|
||||||
id: cache-pip
|
|
||||||
with:
|
|
||||||
path: ~/.cache/pypoetry/virtualenvs
|
|
||||||
key: ${{ runner.os }}-poetry-${{ hashFiles('poetry.lock') }}
|
|
||||||
restore-keys: |
|
|
||||||
- ${{ runner.os }}-poetry-
|
|
||||||
- name: Install tests dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y portaudio19-dev build-essential
|
|
||||||
- name: Install requirements
|
- name: Install requirements
|
||||||
run: |
|
run: |
|
||||||
cd server
|
cd server
|
||||||
@@ -81,3 +72,5 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
context: server
|
context: server
|
||||||
platforms: linux/amd64,linux/arm64
|
platforms: linux/amd64,linux/arm64
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|||||||
@@ -48,6 +48,7 @@
|
|||||||
## Using serverless modal.com (require reflector-gpu-modal deployed)
|
## Using serverless modal.com (require reflector-gpu-modal deployed)
|
||||||
#TRANSCRIPT_BACKEND=modal
|
#TRANSCRIPT_BACKEND=modal
|
||||||
#TRANSCRIPT_URL=https://xxxxx--reflector-transcriber-web.modal.run
|
#TRANSCRIPT_URL=https://xxxxx--reflector-transcriber-web.modal.run
|
||||||
|
#TRANSLATE_URL=https://xxxxx--reflector-translator-web.modal.run
|
||||||
#TRANSCRIPT_MODAL_API_KEY=xxxxx
|
#TRANSCRIPT_MODAL_API_KEY=xxxxx
|
||||||
|
|
||||||
## Using serverless banana.dev (require reflector-gpu-banana deployed)
|
## Using serverless banana.dev (require reflector-gpu-banana deployed)
|
||||||
|
|||||||
@@ -14,34 +14,12 @@ WHISPER_MODEL: str = "large-v2"
|
|||||||
WHISPER_COMPUTE_TYPE: str = "float16"
|
WHISPER_COMPUTE_TYPE: str = "float16"
|
||||||
WHISPER_NUM_WORKERS: int = 1
|
WHISPER_NUM_WORKERS: int = 1
|
||||||
|
|
||||||
# Seamless M4T
|
|
||||||
SEAMLESSM4T_MODEL_SIZE: str = "medium"
|
|
||||||
SEAMLESSM4T_MODEL_CARD_NAME: str = f"seamlessM4T_{SEAMLESSM4T_MODEL_SIZE}"
|
|
||||||
SEAMLESSM4T_VOCODER_CARD_NAME: str = "vocoder_36langs"
|
|
||||||
|
|
||||||
HF_SEAMLESS_M4TEPO: str = f"facebook/seamless-m4t-{SEAMLESSM4T_MODEL_SIZE}"
|
|
||||||
HF_SEAMLESS_M4T_VOCODEREPO: str = "facebook/seamless-m4t-vocoder"
|
|
||||||
|
|
||||||
SEAMLESS_GITEPO: str = "https://github.com/facebookresearch/seamless_communication.git"
|
|
||||||
SEAMLESS_MODEL_DIR: str = "m4t"
|
|
||||||
|
|
||||||
WHISPER_MODEL_DIR = "/root/transcription_models"
|
WHISPER_MODEL_DIR = "/root/transcription_models"
|
||||||
|
|
||||||
stub = Stub(name="reflector-transcriber")
|
stub = Stub(name="reflector-transcriber")
|
||||||
|
|
||||||
|
|
||||||
def install_seamless_communication():
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
initial_dir = os.getcwd()
|
|
||||||
subprocess.run(["ssh-keyscan", "-t", "rsa", "github.com", ">>", "~/.ssh/known_hosts"])
|
|
||||||
subprocess.run(["rm", "-rf", "seamless_communication"])
|
|
||||||
subprocess.run(["git", "clone", SEAMLESS_GITEPO, "." + "/seamless_communication"])
|
|
||||||
os.chdir("seamless_communication")
|
|
||||||
subprocess.run(["pip", "install", "-e", "."])
|
|
||||||
os.chdir(initial_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def download_whisper():
|
def download_whisper():
|
||||||
from faster_whisper.utils import download_model
|
from faster_whisper.utils import download_model
|
||||||
|
|
||||||
@@ -50,18 +28,6 @@ def download_whisper():
|
|||||||
print("Whisper model downloaded")
|
print("Whisper model downloaded")
|
||||||
|
|
||||||
|
|
||||||
def download_seamlessm4t_model():
|
|
||||||
from huggingface_hub import snapshot_download
|
|
||||||
|
|
||||||
print("Downloading Transcriber model & tokenizer")
|
|
||||||
snapshot_download(HF_SEAMLESS_M4TEPO, cache_dir=SEAMLESS_MODEL_DIR)
|
|
||||||
print("Transcriber model & tokenizer downloaded")
|
|
||||||
|
|
||||||
print("Downloading vocoder weights")
|
|
||||||
snapshot_download(HF_SEAMLESS_M4T_VOCODEREPO, cache_dir=SEAMLESS_MODEL_DIR)
|
|
||||||
print("Vocoder weights downloaded")
|
|
||||||
|
|
||||||
|
|
||||||
def migrate_cache_llm():
|
def migrate_cache_llm():
|
||||||
"""
|
"""
|
||||||
XXX The cache for model files in Transformers v4.22.0 has been updated.
|
XXX The cache for model files in Transformers v4.22.0 has been updated.
|
||||||
@@ -76,52 +42,6 @@ def migrate_cache_llm():
|
|||||||
print("LLM cache moved")
|
print("LLM cache moved")
|
||||||
|
|
||||||
|
|
||||||
def configure_seamless_m4t():
|
|
||||||
import os
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
ASSETS_DIR: str = "./seamless_communication/src/seamless_communication/assets/cards"
|
|
||||||
|
|
||||||
with open(f'{ASSETS_DIR}/seamlessM4T_{SEAMLESSM4T_MODEL_SIZE}.yaml', 'r') as file:
|
|
||||||
model_yaml_data = yaml.load(file, Loader=yaml.FullLoader)
|
|
||||||
with open(f'{ASSETS_DIR}/vocoder_36langs.yaml', 'r') as file:
|
|
||||||
vocoder_yaml_data = yaml.load(file, Loader=yaml.FullLoader)
|
|
||||||
with open(f'{ASSETS_DIR}/unity_nllb-100.yaml', 'r') as file:
|
|
||||||
unity_100_yaml_data = yaml.load(file, Loader=yaml.FullLoader)
|
|
||||||
with open(f'{ASSETS_DIR}/unity_nllb-200.yaml', 'r') as file:
|
|
||||||
unity_200_yaml_data = yaml.load(file, Loader=yaml.FullLoader)
|
|
||||||
|
|
||||||
model_dir = f"{SEAMLESS_MODEL_DIR}/models--facebook--seamless-m4t-{SEAMLESSM4T_MODEL_SIZE}/snapshots"
|
|
||||||
available_model_versions = os.listdir(model_dir)
|
|
||||||
latest_model_version = sorted(available_model_versions)[-1]
|
|
||||||
model_name = f"multitask_unity_{SEAMLESSM4T_MODEL_SIZE}.pt"
|
|
||||||
model_path = os.path.join(os.getcwd(), model_dir, latest_model_version, model_name)
|
|
||||||
|
|
||||||
vocoder_dir = f"{SEAMLESS_MODEL_DIR}/models--facebook--seamless-m4t-vocoder/snapshots"
|
|
||||||
available_vocoder_versions = os.listdir(vocoder_dir)
|
|
||||||
latest_vocoder_version = sorted(available_vocoder_versions)[-1]
|
|
||||||
vocoder_name = "vocoder_36langs.pt"
|
|
||||||
vocoder_path = os.path.join(os.getcwd(), vocoder_dir, latest_vocoder_version, vocoder_name)
|
|
||||||
|
|
||||||
tokenizer_name = "tokenizer.model"
|
|
||||||
tokenizer_path = os.path.join(os.getcwd(), model_dir, latest_model_version, tokenizer_name)
|
|
||||||
|
|
||||||
model_yaml_data['checkpoint'] = f"file:/{model_path}"
|
|
||||||
vocoder_yaml_data['checkpoint'] = f"file:/{vocoder_path}"
|
|
||||||
unity_100_yaml_data['tokenizer'] = f"file:/{tokenizer_path}"
|
|
||||||
unity_200_yaml_data['tokenizer'] = f"file:/{tokenizer_path}"
|
|
||||||
|
|
||||||
with open(f'{ASSETS_DIR}/seamlessM4T_{SEAMLESSM4T_MODEL_SIZE}.yaml', 'w') as file:
|
|
||||||
yaml.dump(model_yaml_data, file)
|
|
||||||
with open(f'{ASSETS_DIR}/vocoder_36langs.yaml', 'w') as file:
|
|
||||||
yaml.dump(vocoder_yaml_data, file)
|
|
||||||
with open(f'{ASSETS_DIR}/unity_nllb-100.yaml', 'w') as file:
|
|
||||||
yaml.dump(unity_100_yaml_data, file)
|
|
||||||
with open(f'{ASSETS_DIR}/unity_nllb-200.yaml', 'w') as file:
|
|
||||||
yaml.dump(unity_200_yaml_data, file)
|
|
||||||
|
|
||||||
|
|
||||||
transcriber_image = (
|
transcriber_image = (
|
||||||
Image.debian_slim(python_version="3.10.8")
|
Image.debian_slim(python_version="3.10.8")
|
||||||
.apt_install("git")
|
.apt_install("git")
|
||||||
@@ -131,7 +51,7 @@ transcriber_image = (
|
|||||||
"faster-whisper",
|
"faster-whisper",
|
||||||
"requests",
|
"requests",
|
||||||
"torch",
|
"torch",
|
||||||
"transformers",
|
"transformers==4.34.0",
|
||||||
"sentencepiece",
|
"sentencepiece",
|
||||||
"protobuf",
|
"protobuf",
|
||||||
"huggingface_hub==0.16.4",
|
"huggingface_hub==0.16.4",
|
||||||
@@ -141,9 +61,6 @@ transcriber_image = (
|
|||||||
"pyyaml",
|
"pyyaml",
|
||||||
"hf-transfer~=0.1"
|
"hf-transfer~=0.1"
|
||||||
)
|
)
|
||||||
.run_function(install_seamless_communication)
|
|
||||||
.run_function(download_seamlessm4t_model)
|
|
||||||
.run_function(configure_seamless_m4t)
|
|
||||||
.run_function(download_whisper)
|
.run_function(download_whisper)
|
||||||
.run_function(migrate_cache_llm)
|
.run_function(migrate_cache_llm)
|
||||||
.env(
|
.env(
|
||||||
@@ -167,7 +84,6 @@ class Transcriber:
|
|||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
import faster_whisper
|
import faster_whisper
|
||||||
import torch
|
import torch
|
||||||
from seamless_communication.models.inference.translator import Translator
|
|
||||||
|
|
||||||
self.use_gpu = torch.cuda.is_available()
|
self.use_gpu = torch.cuda.is_available()
|
||||||
self.device = "cuda" if self.use_gpu else "cpu"
|
self.device = "cuda" if self.use_gpu else "cpu"
|
||||||
@@ -178,12 +94,6 @@ class Transcriber:
|
|||||||
num_workers=WHISPER_NUM_WORKERS,
|
num_workers=WHISPER_NUM_WORKERS,
|
||||||
download_root=WHISPER_MODEL_DIR
|
download_root=WHISPER_MODEL_DIR
|
||||||
)
|
)
|
||||||
self.translator = Translator(
|
|
||||||
SEAMLESSM4T_MODEL_CARD_NAME,
|
|
||||||
SEAMLESSM4T_VOCODER_CARD_NAME,
|
|
||||||
torch.device(self.device),
|
|
||||||
dtype=torch.float32
|
|
||||||
)
|
|
||||||
|
|
||||||
@method()
|
@method()
|
||||||
def transcribe_segment(
|
def transcribe_segment(
|
||||||
@@ -229,38 +139,6 @@ class Transcriber:
|
|||||||
"words": words
|
"words": words
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_seamless_lang_code(self, lang_code: str):
|
|
||||||
"""
|
|
||||||
The codes for SeamlessM4T is different from regular standards.
|
|
||||||
For ex, French is "fra" and not "fr".
|
|
||||||
"""
|
|
||||||
# TODO: Enhance with complete list of lang codes
|
|
||||||
seamless_lang_code = {
|
|
||||||
"en": "eng",
|
|
||||||
"fr": "fra"
|
|
||||||
}
|
|
||||||
return seamless_lang_code.get(lang_code, "eng")
|
|
||||||
|
|
||||||
@method()
|
|
||||||
def translate_text(
|
|
||||||
self,
|
|
||||||
text: str,
|
|
||||||
source_language: str,
|
|
||||||
target_language: str
|
|
||||||
):
|
|
||||||
translated_text, _, _ = self.translator.predict(
|
|
||||||
text,
|
|
||||||
"t2tt",
|
|
||||||
src_lang=self.get_seamless_lang_code(source_language),
|
|
||||||
tgt_lang=self.get_seamless_lang_code(target_language),
|
|
||||||
ngram_filtering=True
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
"text": {
|
|
||||||
source_language: text,
|
|
||||||
target_language: str(translated_text)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
# -------------------------------------------------------------------
|
# -------------------------------------------------------------------
|
||||||
# Web API
|
# Web API
|
||||||
# -------------------------------------------------------------------
|
# -------------------------------------------------------------------
|
||||||
@@ -316,18 +194,4 @@ def web():
|
|||||||
result = func.get()
|
result = func.get()
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@app.post("/translate", dependencies=[Depends(apikey_auth)])
|
|
||||||
async def translate(
|
|
||||||
text: str,
|
|
||||||
source_language: Annotated[str, Body(...)] = "en",
|
|
||||||
target_language: Annotated[str, Body(...)] = "fr",
|
|
||||||
) -> TranscriptResponse:
|
|
||||||
func = transcriberstub.translate_text.spawn(
|
|
||||||
text=text,
|
|
||||||
source_language=source_language,
|
|
||||||
target_language=target_language,
|
|
||||||
)
|
|
||||||
result = func.get()
|
|
||||||
return result
|
|
||||||
|
|
||||||
return app
|
return app
|
||||||
|
|||||||
237
server/gpu/modal/reflector_translator.py
Normal file
237
server/gpu/modal/reflector_translator.py
Normal file
@@ -0,0 +1,237 @@
|
|||||||
|
"""
|
||||||
|
Reflector GPU backend - transcriber
|
||||||
|
===================================
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from modal import Image, Secret, Stub, asgi_app, method
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
# Seamless M4T
|
||||||
|
SEAMLESSM4T_MODEL_SIZE: str = "medium"
|
||||||
|
SEAMLESSM4T_MODEL_CARD_NAME: str = f"seamlessM4T_{SEAMLESSM4T_MODEL_SIZE}"
|
||||||
|
SEAMLESSM4T_VOCODER_CARD_NAME: str = "vocoder_36langs"
|
||||||
|
|
||||||
|
HF_SEAMLESS_M4TEPO: str = f"facebook/seamless-m4t-{SEAMLESSM4T_MODEL_SIZE}"
|
||||||
|
HF_SEAMLESS_M4T_VOCODEREPO: str = "facebook/seamless-m4t-vocoder"
|
||||||
|
|
||||||
|
SEAMLESS_GITEPO: str = "https://github.com/facebookresearch/seamless_communication.git"
|
||||||
|
SEAMLESS_MODEL_DIR: str = "m4t"
|
||||||
|
|
||||||
|
stub = Stub(name="reflector-translator")
|
||||||
|
|
||||||
|
|
||||||
|
def install_seamless_communication():
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
initial_dir = os.getcwd()
|
||||||
|
subprocess.run(["ssh-keyscan", "-t", "rsa", "github.com", ">>", "~/.ssh/known_hosts"])
|
||||||
|
subprocess.run(["rm", "-rf", "seamless_communication"])
|
||||||
|
subprocess.run(["git", "clone", SEAMLESS_GITEPO, "." + "/seamless_communication"])
|
||||||
|
os.chdir("seamless_communication")
|
||||||
|
subprocess.run(["pip", "install", "-e", "."])
|
||||||
|
os.chdir(initial_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def download_seamlessm4t_model():
|
||||||
|
from huggingface_hub import snapshot_download
|
||||||
|
|
||||||
|
print("Downloading Transcriber model & tokenizer")
|
||||||
|
snapshot_download(HF_SEAMLESS_M4TEPO, cache_dir=SEAMLESS_MODEL_DIR)
|
||||||
|
print("Transcriber model & tokenizer downloaded")
|
||||||
|
|
||||||
|
print("Downloading vocoder weights")
|
||||||
|
snapshot_download(HF_SEAMLESS_M4T_VOCODEREPO, cache_dir=SEAMLESS_MODEL_DIR)
|
||||||
|
print("Vocoder weights downloaded")
|
||||||
|
|
||||||
|
|
||||||
|
def configure_seamless_m4t():
|
||||||
|
import os
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
ASSETS_DIR: str = "./seamless_communication/src/seamless_communication/assets/cards"
|
||||||
|
|
||||||
|
with open(f'{ASSETS_DIR}/seamlessM4T_{SEAMLESSM4T_MODEL_SIZE}.yaml', 'r') as file:
|
||||||
|
model_yaml_data = yaml.load(file, Loader=yaml.FullLoader)
|
||||||
|
with open(f'{ASSETS_DIR}/vocoder_36langs.yaml', 'r') as file:
|
||||||
|
vocoder_yaml_data = yaml.load(file, Loader=yaml.FullLoader)
|
||||||
|
with open(f'{ASSETS_DIR}/unity_nllb-100.yaml', 'r') as file:
|
||||||
|
unity_100_yaml_data = yaml.load(file, Loader=yaml.FullLoader)
|
||||||
|
with open(f'{ASSETS_DIR}/unity_nllb-200.yaml', 'r') as file:
|
||||||
|
unity_200_yaml_data = yaml.load(file, Loader=yaml.FullLoader)
|
||||||
|
|
||||||
|
model_dir = f"{SEAMLESS_MODEL_DIR}/models--facebook--seamless-m4t-{SEAMLESSM4T_MODEL_SIZE}/snapshots"
|
||||||
|
available_model_versions = os.listdir(model_dir)
|
||||||
|
latest_model_version = sorted(available_model_versions)[-1]
|
||||||
|
model_name = f"multitask_unity_{SEAMLESSM4T_MODEL_SIZE}.pt"
|
||||||
|
model_path = os.path.join(os.getcwd(), model_dir, latest_model_version, model_name)
|
||||||
|
|
||||||
|
vocoder_dir = f"{SEAMLESS_MODEL_DIR}/models--facebook--seamless-m4t-vocoder/snapshots"
|
||||||
|
available_vocoder_versions = os.listdir(vocoder_dir)
|
||||||
|
latest_vocoder_version = sorted(available_vocoder_versions)[-1]
|
||||||
|
vocoder_name = "vocoder_36langs.pt"
|
||||||
|
vocoder_path = os.path.join(os.getcwd(), vocoder_dir, latest_vocoder_version, vocoder_name)
|
||||||
|
|
||||||
|
tokenizer_name = "tokenizer.model"
|
||||||
|
tokenizer_path = os.path.join(os.getcwd(), model_dir, latest_model_version, tokenizer_name)
|
||||||
|
|
||||||
|
model_yaml_data['checkpoint'] = f"file:/{model_path}"
|
||||||
|
vocoder_yaml_data['checkpoint'] = f"file:/{vocoder_path}"
|
||||||
|
unity_100_yaml_data['tokenizer'] = f"file:/{tokenizer_path}"
|
||||||
|
unity_200_yaml_data['tokenizer'] = f"file:/{tokenizer_path}"
|
||||||
|
|
||||||
|
with open(f'{ASSETS_DIR}/seamlessM4T_{SEAMLESSM4T_MODEL_SIZE}.yaml', 'w') as file:
|
||||||
|
yaml.dump(model_yaml_data, file)
|
||||||
|
with open(f'{ASSETS_DIR}/vocoder_36langs.yaml', 'w') as file:
|
||||||
|
yaml.dump(vocoder_yaml_data, file)
|
||||||
|
with open(f'{ASSETS_DIR}/unity_nllb-100.yaml', 'w') as file:
|
||||||
|
yaml.dump(unity_100_yaml_data, file)
|
||||||
|
with open(f'{ASSETS_DIR}/unity_nllb-200.yaml', 'w') as file:
|
||||||
|
yaml.dump(unity_200_yaml_data, file)
|
||||||
|
|
||||||
|
|
||||||
|
transcriber_image = (
|
||||||
|
Image.debian_slim(python_version="3.10.8")
|
||||||
|
.apt_install("git")
|
||||||
|
.apt_install("wget")
|
||||||
|
.apt_install("libsndfile-dev")
|
||||||
|
.pip_install(
|
||||||
|
"requests",
|
||||||
|
"torch",
|
||||||
|
"transformers==4.34.0",
|
||||||
|
"sentencepiece",
|
||||||
|
"protobuf",
|
||||||
|
"huggingface_hub==0.16.4",
|
||||||
|
"gitpython",
|
||||||
|
"torchaudio",
|
||||||
|
"fairseq2",
|
||||||
|
"pyyaml",
|
||||||
|
"hf-transfer~=0.1"
|
||||||
|
)
|
||||||
|
.run_function(install_seamless_communication)
|
||||||
|
.run_function(download_seamlessm4t_model)
|
||||||
|
.run_function(configure_seamless_m4t)
|
||||||
|
.env(
|
||||||
|
{
|
||||||
|
"LD_LIBRARY_PATH": (
|
||||||
|
"/usr/local/lib/python3.10/site-packages/nvidia/cudnn/lib/:"
|
||||||
|
"/opt/conda/lib/python3.10/site-packages/nvidia/cublas/lib/"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@stub.cls(
|
||||||
|
gpu="A10G",
|
||||||
|
timeout=60 * 5,
|
||||||
|
container_idle_timeout=60 * 5,
|
||||||
|
image=transcriber_image,
|
||||||
|
)
|
||||||
|
class Translator:
|
||||||
|
def __enter__(self):
|
||||||
|
import torch
|
||||||
|
from seamless_communication.models.inference.translator import Translator
|
||||||
|
|
||||||
|
self.use_gpu = torch.cuda.is_available()
|
||||||
|
self.device = "cuda" if self.use_gpu else "cpu"
|
||||||
|
self.translator = Translator(
|
||||||
|
SEAMLESSM4T_MODEL_CARD_NAME,
|
||||||
|
SEAMLESSM4T_VOCODER_CARD_NAME,
|
||||||
|
torch.device(self.device),
|
||||||
|
dtype=torch.float32
|
||||||
|
)
|
||||||
|
|
||||||
|
@method()
|
||||||
|
def warmup(self):
|
||||||
|
return {"status": "ok"}
|
||||||
|
|
||||||
|
def get_seamless_lang_code(self, lang_code: str):
|
||||||
|
"""
|
||||||
|
The codes for SeamlessM4T is different from regular standards.
|
||||||
|
For ex, French is "fra" and not "fr".
|
||||||
|
"""
|
||||||
|
# TODO: Enhance with complete list of lang codes
|
||||||
|
seamless_lang_code = {
|
||||||
|
"en": "eng",
|
||||||
|
"fr": "fra"
|
||||||
|
}
|
||||||
|
return seamless_lang_code.get(lang_code, "eng")
|
||||||
|
|
||||||
|
@method()
|
||||||
|
def translate_text(
|
||||||
|
self,
|
||||||
|
text: str,
|
||||||
|
source_language: str,
|
||||||
|
target_language: str
|
||||||
|
):
|
||||||
|
translated_text, _, _ = self.translator.predict(
|
||||||
|
text,
|
||||||
|
"t2tt",
|
||||||
|
src_lang=self.get_seamless_lang_code(source_language),
|
||||||
|
tgt_lang=self.get_seamless_lang_code(target_language),
|
||||||
|
ngram_filtering=True
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"text": {
|
||||||
|
source_language: text,
|
||||||
|
target_language: str(translated_text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
# Web API
|
||||||
|
# -------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@stub.function(
|
||||||
|
container_idle_timeout=60,
|
||||||
|
timeout=60,
|
||||||
|
secrets=[
|
||||||
|
Secret.from_name("reflector-gpu"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@asgi_app()
|
||||||
|
def web():
|
||||||
|
from fastapi import Body, Depends, FastAPI, HTTPException, status
|
||||||
|
from fastapi.security import OAuth2PasswordBearer
|
||||||
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
|
translatorstub = Translator()
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
|
||||||
|
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||||
|
|
||||||
|
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||||
|
if apikey != os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||||
|
detail="Invalid API key",
|
||||||
|
headers={"WWW-Authenticate": "Bearer"},
|
||||||
|
)
|
||||||
|
|
||||||
|
class TranslateResponse(BaseModel):
|
||||||
|
result: dict
|
||||||
|
|
||||||
|
@app.post("/translate", dependencies=[Depends(apikey_auth)])
|
||||||
|
async def translate(
|
||||||
|
text: str,
|
||||||
|
source_language: Annotated[str, Body(...)] = "en",
|
||||||
|
target_language: Annotated[str, Body(...)] = "fr",
|
||||||
|
) -> TranslateResponse:
|
||||||
|
func = translatorstub.translate_text.spawn(
|
||||||
|
text=text,
|
||||||
|
source_language=source_language,
|
||||||
|
target_language=target_language,
|
||||||
|
)
|
||||||
|
result = func.get()
|
||||||
|
return result
|
||||||
|
|
||||||
|
@app.post("/warmup", dependencies=[Depends(apikey_auth)])
|
||||||
|
async def warmup():
|
||||||
|
return translatorstub.warmup.spawn().get()
|
||||||
|
|
||||||
|
return app
|
||||||
25
server/poetry.lock
generated
25
server/poetry.lock
generated
@@ -2173,29 +2173,6 @@ files = [
|
|||||||
{file = "protobuf-4.24.4.tar.gz", hash = "sha256:5a70731910cd9104762161719c3d883c960151eea077134458503723b60e3667"},
|
{file = "protobuf-4.24.4.tar.gz", hash = "sha256:5a70731910cd9104762161719c3d883c960151eea077134458503723b60e3667"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "pyaudio"
|
|
||||||
version = "0.2.13"
|
|
||||||
description = "Cross-platform audio I/O with PortAudio"
|
|
||||||
optional = false
|
|
||||||
python-versions = "*"
|
|
||||||
files = [
|
|
||||||
{file = "PyAudio-0.2.13-cp310-cp310-win32.whl", hash = "sha256:48e29537ea22ae2ae323eebe297bfb2683831cee4f20d96964e131f65ab2161d"},
|
|
||||||
{file = "PyAudio-0.2.13-cp310-cp310-win_amd64.whl", hash = "sha256:87137cfd0ef8608a2a383be3f6996f59505e322dab9d16531f14cf542fa294f1"},
|
|
||||||
{file = "PyAudio-0.2.13-cp311-cp311-win32.whl", hash = "sha256:13915faaa780e6bbbb6d745ef0e761674fd461b1b1b3f9c1f57042a534bfc0c3"},
|
|
||||||
{file = "PyAudio-0.2.13-cp311-cp311-win_amd64.whl", hash = "sha256:59cc3cc5211b729c7854e3989058a145872cc58b1a7b46c6d4d88448a343d890"},
|
|
||||||
{file = "PyAudio-0.2.13-cp37-cp37m-win32.whl", hash = "sha256:d294e3f85b2238649b1ff49ce3412459a8a312569975a89d14646536362d7576"},
|
|
||||||
{file = "PyAudio-0.2.13-cp37-cp37m-win_amd64.whl", hash = "sha256:ff7f5e44ef51fe61da1e09c6f632f0b5808198edd61b363855cc7dd03bf4a8ac"},
|
|
||||||
{file = "PyAudio-0.2.13-cp38-cp38-win32.whl", hash = "sha256:c6b302b048c054b7463936d8ba884b73877dc47012f3c94665dba92dd658ae04"},
|
|
||||||
{file = "PyAudio-0.2.13-cp38-cp38-win_amd64.whl", hash = "sha256:1505d766ee718df6f5a18b73ac42307ba1cb4d2c0397873159254a34f67515d6"},
|
|
||||||
{file = "PyAudio-0.2.13-cp39-cp39-win32.whl", hash = "sha256:eb128e4a6ea9b98d9a31f33c44978885af27dbe8ae53d665f8790cbfe045517e"},
|
|
||||||
{file = "PyAudio-0.2.13-cp39-cp39-win_amd64.whl", hash = "sha256:910ef09225cce227adbba92622d4a3e3c8375117f7dd64039f287d9ffc0e02a1"},
|
|
||||||
{file = "PyAudio-0.2.13.tar.gz", hash = "sha256:26bccc81e4243d1c0ff5487e6b481de6329fcd65c79365c267cef38f363a2b56"},
|
|
||||||
]
|
|
||||||
|
|
||||||
[package.extras]
|
|
||||||
test = ["numpy"]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pycparser"
|
name = "pycparser"
|
||||||
version = "2.21"
|
version = "2.21"
|
||||||
@@ -3861,4 +3838,4 @@ multidict = ">=4.0"
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = "^3.11"
|
python-versions = "^3.11"
|
||||||
content-hash = "a85cb09a0e4b68b29c4272d550e618d2e24ace5f16b707f29e8ac4ce915c1fae"
|
content-hash = "61578467a70980ff9c2dc0cd787b6410b91d7c5fd2bb4c46b6951ec82690ef67"
|
||||||
|
|||||||
@@ -40,10 +40,6 @@ black = "^23.7.0"
|
|||||||
stamina = "^23.1.0"
|
stamina = "^23.1.0"
|
||||||
|
|
||||||
|
|
||||||
[tool.poetry.group.client.dependencies]
|
|
||||||
pyaudio = "^0.2.13"
|
|
||||||
|
|
||||||
|
|
||||||
[tool.poetry.group.tests.dependencies]
|
[tool.poetry.group.tests.dependencies]
|
||||||
pytest-cov = "^4.1.0"
|
pytest-cov = "^4.1.0"
|
||||||
pytest-aiohttp = "^1.0.4"
|
pytest-aiohttp = "^1.0.4"
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ class TranscriptTranslatorProcessor(Processor):
|
|||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.transcript_url = settings.TRANSCRIPT_URL
|
self.translate_url = settings.TRANSLATE_URL
|
||||||
self.timeout = settings.TRANSCRIPT_TIMEOUT
|
self.timeout = settings.TRANSLATE_TIMEOUT
|
||||||
self.headers = {"Authorization": f"Bearer {settings.LLM_MODAL_API_KEY}"}
|
self.headers = {"Authorization": f"Bearer {settings.LLM_MODAL_API_KEY}"}
|
||||||
|
|
||||||
async def _push(self, data: Transcript):
|
async def _push(self, data: Transcript):
|
||||||
@@ -46,7 +46,7 @@ class TranscriptTranslatorProcessor(Processor):
|
|||||||
|
|
||||||
async with httpx.AsyncClient() as client:
|
async with httpx.AsyncClient() as client:
|
||||||
response = await retry(client.post)(
|
response = await retry(client.post)(
|
||||||
settings.TRANSCRIPT_URL + "/translate",
|
self.translate_url + "/translate",
|
||||||
headers=self.headers,
|
headers=self.headers,
|
||||||
params=json_payload,
|
params=json_payload,
|
||||||
timeout=self.timeout,
|
timeout=self.timeout,
|
||||||
|
|||||||
@@ -38,6 +38,10 @@ class Settings(BaseSettings):
|
|||||||
TRANSCRIPT_URL: str | None = None
|
TRANSCRIPT_URL: str | None = None
|
||||||
TRANSCRIPT_TIMEOUT: int = 90
|
TRANSCRIPT_TIMEOUT: int = 90
|
||||||
|
|
||||||
|
# Translate into the target language
|
||||||
|
TRANSLATE_URL: str | None = None
|
||||||
|
TRANSLATE_TIMEOUT: int = 90
|
||||||
|
|
||||||
# Audio transcription banana.dev configuration
|
# Audio transcription banana.dev configuration
|
||||||
TRANSCRIPT_BANANA_API_KEY: str | None = None
|
TRANSCRIPT_BANANA_API_KEY: str | None = None
|
||||||
TRANSCRIPT_BANANA_MODEL_KEY: str | None = None
|
TRANSCRIPT_BANANA_MODEL_KEY: str | None = None
|
||||||
|
|||||||
@@ -38,7 +38,9 @@ def _get_range_header(range_header: str, file_size: int) -> tuple[int, int]:
|
|||||||
return start, end
|
return start, end
|
||||||
|
|
||||||
|
|
||||||
def range_requests_response(request: Request, file_path: str, content_type: str):
|
def range_requests_response(
|
||||||
|
request: Request, file_path: str, content_type: str, content_disposition: str
|
||||||
|
):
|
||||||
"""Returns StreamingResponse using Range Requests of a given file"""
|
"""Returns StreamingResponse using Range Requests of a given file"""
|
||||||
|
|
||||||
file_size = os.stat(file_path).st_size
|
file_size = os.stat(file_path).st_size
|
||||||
@@ -54,6 +56,10 @@ def range_requests_response(request: Request, file_path: str, content_type: str)
|
|||||||
"content-range, content-encoding"
|
"content-range, content-encoding"
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if content_disposition:
|
||||||
|
headers["Content-Disposition"] = content_disposition
|
||||||
|
|
||||||
start = 0
|
start = 0
|
||||||
end = file_size - 1
|
end = file_size - 1
|
||||||
status_code = status.HTTP_200_OK
|
status_code = status.HTTP_200_OK
|
||||||
|
|||||||
@@ -356,10 +356,14 @@ async def transcript_get_audio_mp3(
|
|||||||
if not transcript.audio_mp3_filename.exists():
|
if not transcript.audio_mp3_filename.exists():
|
||||||
raise HTTPException(status_code=404, detail="Audio not found")
|
raise HTTPException(status_code=404, detail="Audio not found")
|
||||||
|
|
||||||
|
truncated_id = str(transcript.id).split("-")[0]
|
||||||
|
filename = f"recording_{truncated_id}.mp3"
|
||||||
|
|
||||||
return range_requests_response(
|
return range_requests_response(
|
||||||
request,
|
request,
|
||||||
transcript.audio_mp3_filename,
|
transcript.audio_mp3_filename,
|
||||||
content_type="audio/mp3",
|
content_type="audio/mpeg",
|
||||||
|
content_disposition=f"attachment; filename={filename}",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ async def fake_transcript(tmpdir):
|
|||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"url_suffix,content_type",
|
"url_suffix,content_type",
|
||||||
[
|
[
|
||||||
["/mp3", "audio/mp3"],
|
["/mp3", "audio/mpeg"],
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
async def test_transcript_audio_download(fake_transcript, url_suffix, content_type):
|
async def test_transcript_audio_download(fake_transcript, url_suffix, content_type):
|
||||||
@@ -51,7 +51,7 @@ async def test_transcript_audio_download(fake_transcript, url_suffix, content_ty
|
|||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"url_suffix,content_type",
|
"url_suffix,content_type",
|
||||||
[
|
[
|
||||||
["/mp3", "audio/mp3"],
|
["/mp3", "audio/mpeg"],
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
async def test_transcript_audio_download_range(
|
async def test_transcript_audio_download_range(
|
||||||
@@ -74,7 +74,7 @@ async def test_transcript_audio_download_range(
|
|||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"url_suffix,content_type",
|
"url_suffix,content_type",
|
||||||
[
|
[
|
||||||
["/mp3", "audio/mp3"],
|
["/mp3", "audio/mpeg"],
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
async def test_transcript_audio_download_range_with_seek(
|
async def test_transcript_audio_download_range_with_seek(
|
||||||
|
|||||||
@@ -31,28 +31,43 @@ class ThreadedUvicorn:
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.fixture
|
||||||
async def test_transcript_rtc_and_websocket(
|
async def appserver(tmpdir):
|
||||||
tmpdir, dummy_llm, dummy_transcript, dummy_processors, ensure_casing
|
|
||||||
):
|
|
||||||
# goal: start the server, exchange RTC, receive websocket events
|
|
||||||
# because of that, we need to start the server in a thread
|
|
||||||
# to be able to connect with aiortc
|
|
||||||
|
|
||||||
from reflector.settings import settings
|
from reflector.settings import settings
|
||||||
from reflector.app import app
|
from reflector.app import app
|
||||||
|
|
||||||
|
DATA_DIR = settings.DATA_DIR
|
||||||
settings.DATA_DIR = Path(tmpdir)
|
settings.DATA_DIR = Path(tmpdir)
|
||||||
|
|
||||||
# start server
|
# start server
|
||||||
host = "127.0.0.1"
|
host = "127.0.0.1"
|
||||||
port = 1255
|
port = 1255
|
||||||
base_url = f"http://{host}:{port}/v1"
|
|
||||||
config = Config(app=app, host=host, port=port)
|
config = Config(app=app, host=host, port=port)
|
||||||
server = ThreadedUvicorn(config)
|
server = ThreadedUvicorn(config)
|
||||||
await server.start()
|
await server.start()
|
||||||
|
|
||||||
|
yield (server, host, port)
|
||||||
|
|
||||||
|
server.stop()
|
||||||
|
settings.DATA_DIR = DATA_DIR
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_transcript_rtc_and_websocket(
|
||||||
|
tmpdir,
|
||||||
|
dummy_llm,
|
||||||
|
dummy_transcript,
|
||||||
|
dummy_processors,
|
||||||
|
ensure_casing,
|
||||||
|
appserver,
|
||||||
|
):
|
||||||
|
# goal: start the server, exchange RTC, receive websocket events
|
||||||
|
# because of that, we need to start the server in a thread
|
||||||
|
# to be able to connect with aiortc
|
||||||
|
server, host, port = appserver
|
||||||
|
|
||||||
# create a transcript
|
# create a transcript
|
||||||
|
base_url = f"http://{host}:{port}/v1"
|
||||||
ac = AsyncClient(base_url=base_url)
|
ac = AsyncClient(base_url=base_url)
|
||||||
response = await ac.post("/transcripts", json={"name": "Test RTC"})
|
response = await ac.post("/transcripts", json={"name": "Test RTC"})
|
||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
@@ -167,35 +182,26 @@ async def test_transcript_rtc_and_websocket(
|
|||||||
# check that audio/mp3 is available
|
# check that audio/mp3 is available
|
||||||
resp = await ac.get(f"/transcripts/{tid}/audio/mp3")
|
resp = await ac.get(f"/transcripts/{tid}/audio/mp3")
|
||||||
assert resp.status_code == 200
|
assert resp.status_code == 200
|
||||||
assert resp.headers["Content-Type"] == "audio/mp3"
|
assert resp.headers["Content-Type"] == "audio/mpeg"
|
||||||
|
|
||||||
# stop server
|
|
||||||
server.stop()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_transcript_rtc_and_websocket_and_fr(
|
async def test_transcript_rtc_and_websocket_and_fr(
|
||||||
tmpdir, dummy_llm, dummy_transcript, dummy_processors, ensure_casing
|
tmpdir,
|
||||||
|
dummy_llm,
|
||||||
|
dummy_transcript,
|
||||||
|
dummy_processors,
|
||||||
|
ensure_casing,
|
||||||
|
appserver,
|
||||||
):
|
):
|
||||||
# goal: start the server, exchange RTC, receive websocket events
|
# goal: start the server, exchange RTC, receive websocket events
|
||||||
# because of that, we need to start the server in a thread
|
# because of that, we need to start the server in a thread
|
||||||
# to be able to connect with aiortc
|
# to be able to connect with aiortc
|
||||||
# with target french language
|
# with target french language
|
||||||
|
server, host, port = appserver
|
||||||
from reflector.settings import settings
|
|
||||||
from reflector.app import app
|
|
||||||
|
|
||||||
settings.DATA_DIR = Path(tmpdir)
|
|
||||||
|
|
||||||
# start server
|
|
||||||
host = "127.0.0.1"
|
|
||||||
port = 1255
|
|
||||||
base_url = f"http://{host}:{port}/v1"
|
|
||||||
config = Config(app=app, host=host, port=port)
|
|
||||||
server = ThreadedUvicorn(config)
|
|
||||||
await server.start()
|
|
||||||
|
|
||||||
# create a transcript
|
# create a transcript
|
||||||
|
base_url = f"http://{host}:{port}/v1"
|
||||||
ac = AsyncClient(base_url=base_url)
|
ac = AsyncClient(base_url=base_url)
|
||||||
response = await ac.post(
|
response = await ac.post(
|
||||||
"/transcripts", json={"name": "Test RTC", "target_language": "fr"}
|
"/transcripts", json={"name": "Test RTC", "target_language": "fr"}
|
||||||
@@ -303,6 +309,3 @@ async def test_transcript_rtc_and_websocket_and_fr(
|
|||||||
# ensure the last event received is ended
|
# ensure the last event received is ended
|
||||||
assert events[-1]["event"] == "STATUS"
|
assert events[-1]["event"] == "STATUS"
|
||||||
assert events[-1]["data"]["value"] == "ended"
|
assert events[-1]["data"]["value"] == "ended"
|
||||||
|
|
||||||
# stop server
|
|
||||||
server.stop()
|
|
||||||
|
|||||||
@@ -54,10 +54,6 @@ export interface V1TranscriptGetRequest {
|
|||||||
transcriptId: any;
|
transcriptId: any;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface V1TranscriptGetAudioRequest {
|
|
||||||
transcriptId: any;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface V1TranscriptGetAudioMp3Request {
|
export interface V1TranscriptGetAudioMp3Request {
|
||||||
transcriptId: any;
|
transcriptId: any;
|
||||||
}
|
}
|
||||||
@@ -310,69 +306,6 @@ export class DefaultApi extends runtime.BaseAPI {
|
|||||||
return await response.value();
|
return await response.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Transcript Get Audio
|
|
||||||
*/
|
|
||||||
async v1TranscriptGetAudioRaw(
|
|
||||||
requestParameters: V1TranscriptGetAudioRequest,
|
|
||||||
initOverrides?: RequestInit | runtime.InitOverrideFunction,
|
|
||||||
): Promise<runtime.ApiResponse<any>> {
|
|
||||||
if (
|
|
||||||
requestParameters.transcriptId === null ||
|
|
||||||
requestParameters.transcriptId === undefined
|
|
||||||
) {
|
|
||||||
throw new runtime.RequiredError(
|
|
||||||
"transcriptId",
|
|
||||||
"Required parameter requestParameters.transcriptId was null or undefined when calling v1TranscriptGetAudio.",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const queryParameters: any = {};
|
|
||||||
|
|
||||||
const headerParameters: runtime.HTTPHeaders = {};
|
|
||||||
|
|
||||||
if (this.configuration && this.configuration.accessToken) {
|
|
||||||
// oauth required
|
|
||||||
headerParameters["Authorization"] = await this.configuration.accessToken(
|
|
||||||
"OAuth2AuthorizationCodeBearer",
|
|
||||||
[],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const response = await this.request(
|
|
||||||
{
|
|
||||||
path: `/v1/transcripts/{transcript_id}/audio`.replace(
|
|
||||||
`{${"transcript_id"}}`,
|
|
||||||
encodeURIComponent(String(requestParameters.transcriptId)),
|
|
||||||
),
|
|
||||||
method: "GET",
|
|
||||||
headers: headerParameters,
|
|
||||||
query: queryParameters,
|
|
||||||
},
|
|
||||||
initOverrides,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (this.isJsonMime(response.headers.get("content-type"))) {
|
|
||||||
return new runtime.JSONApiResponse<any>(response);
|
|
||||||
} else {
|
|
||||||
return new runtime.TextApiResponse(response) as any;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Transcript Get Audio
|
|
||||||
*/
|
|
||||||
async v1TranscriptGetAudio(
|
|
||||||
requestParameters: V1TranscriptGetAudioRequest,
|
|
||||||
initOverrides?: RequestInit | runtime.InitOverrideFunction,
|
|
||||||
): Promise<any> {
|
|
||||||
const response = await this.v1TranscriptGetAudioRaw(
|
|
||||||
requestParameters,
|
|
||||||
initOverrides,
|
|
||||||
);
|
|
||||||
return await response.value();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Transcript Get Audio Mp3
|
* Transcript Get Audio Mp3
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import "../../styles/button.css";
|
|||||||
import FinalSummary from "../finalSummary";
|
import FinalSummary from "../finalSummary";
|
||||||
import ShareLink from "../shareLink";
|
import ShareLink from "../shareLink";
|
||||||
import QRCode from "react-qr-code";
|
import QRCode from "react-qr-code";
|
||||||
|
import TranscriptTitle from "../transcriptTitle";
|
||||||
|
|
||||||
type TranscriptDetails = {
|
type TranscriptDetails = {
|
||||||
params: {
|
params: {
|
||||||
@@ -50,13 +51,18 @@ export default function TranscriptDetails(details: TranscriptDetails) {
|
|||||||
<Modal title="Loading" text={"Loading transcript..."} />
|
<Modal title="Loading" text={"Loading transcript..."} />
|
||||||
) : (
|
) : (
|
||||||
<>
|
<>
|
||||||
<Recorder
|
<div className="flex flex-col">
|
||||||
topics={topics?.topics || []}
|
{transcript?.response?.title && (
|
||||||
useActiveTopic={useActiveTopic}
|
<TranscriptTitle title={transcript.response.title} />
|
||||||
waveform={waveform?.waveform}
|
)}
|
||||||
isPastMeeting={true}
|
<Recorder
|
||||||
transcriptId={transcript?.response?.id}
|
topics={topics?.topics || []}
|
||||||
/>
|
useActiveTopic={useActiveTopic}
|
||||||
|
waveform={waveform?.waveform}
|
||||||
|
isPastMeeting={true}
|
||||||
|
transcriptId={transcript?.response?.id}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
<div className="grid grid-cols-1 lg:grid-cols-2 grid-rows-2 lg:grid-rows-1 gap-2 lg:gap-4 h-full">
|
<div className="grid grid-cols-1 lg:grid-cols-2 grid-rows-2 lg:grid-rows-1 gap-2 lg:gap-4 h-full">
|
||||||
<TopicList
|
<TopicList
|
||||||
topics={topics?.topics || []}
|
topics={topics?.topics || []}
|
||||||
|
|||||||
@@ -301,6 +301,9 @@ export default function Recorder(props: RecorderProps) {
|
|||||||
<a
|
<a
|
||||||
title="Download recording"
|
title="Download recording"
|
||||||
className="text-center cursor-pointer text-blue-400 hover:text-blue-700 ml-2 md:ml:4 p-2 rounded-lg outline-blue-400"
|
className="text-center cursor-pointer text-blue-400 hover:text-blue-700 ml-2 md:ml:4 p-2 rounded-lg outline-blue-400"
|
||||||
|
download={`recording-${
|
||||||
|
props.transcriptId?.split("-")[0] || "0000"
|
||||||
|
}`}
|
||||||
href={`${process.env.NEXT_PUBLIC_API_URL}/v1/transcripts/${props.transcriptId}/audio/mp3`}
|
href={`${process.env.NEXT_PUBLIC_API_URL}/v1/transcripts/${props.transcriptId}/audio/mp3`}
|
||||||
>
|
>
|
||||||
<FontAwesomeIcon icon={faDownload} className="h-5 w-auto" />
|
<FontAwesomeIcon icon={faDownload} className="h-5 w-auto" />
|
||||||
|
|||||||
13
www/app/transcripts/transcriptTitle.tsx
Normal file
13
www/app/transcripts/transcriptTitle.tsx
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
type TranscriptTitle = {
|
||||||
|
title: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
const TranscriptTitle = (props: TranscriptTitle) => {
|
||||||
|
return (
|
||||||
|
<h2 className="text-2xl lg:text-4xl font-extrabold text-center mb-4">
|
||||||
|
{props.title}
|
||||||
|
</h2>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default TranscriptTitle;
|
||||||
Reference in New Issue
Block a user