Files
reflector/server/gpu/modal/reflector_transcriber.py
projects-g 628c69f81c Separate out transcription and translation into own Modal deployments (#268)
* abstract transcript/translate into separate GPU apps

* update app names

* update transformers library version

* update env.example file
2023-10-13 22:01:21 +05:30

198 lines
5.5 KiB
Python

"""
Reflector GPU backend - transcriber
===================================
"""
import os
import tempfile
from modal import Image, Secret, Stub, asgi_app, method
from pydantic import BaseModel
# Whisper
WHISPER_MODEL: str = "large-v2"
WHISPER_COMPUTE_TYPE: str = "float16"
WHISPER_NUM_WORKERS: int = 1
WHISPER_MODEL_DIR = "/root/transcription_models"
stub = Stub(name="reflector-transcriber")
def download_whisper():
from faster_whisper.utils import download_model
print("Downloading Whisper model")
download_model(WHISPER_MODEL, cache_dir=WHISPER_MODEL_DIR)
print("Whisper model downloaded")
def migrate_cache_llm():
"""
XXX The cache for model files in Transformers v4.22.0 has been updated.
Migrating your old cache. This is a one-time only operation. You can
interrupt this and resume the migration later on by calling
`transformers.utils.move_cache()`.
"""
from transformers.utils.hub import move_cache
print("Moving LLM cache")
move_cache(cache_dir=WHISPER_MODEL_DIR, new_cache_dir=WHISPER_MODEL_DIR)
print("LLM cache moved")
transcriber_image = (
Image.debian_slim(python_version="3.10.8")
.apt_install("git")
.apt_install("wget")
.apt_install("libsndfile-dev")
.pip_install(
"faster-whisper",
"requests",
"torch",
"transformers==4.34.0",
"sentencepiece",
"protobuf",
"huggingface_hub==0.16.4",
"gitpython",
"torchaudio",
"fairseq2",
"pyyaml",
"hf-transfer~=0.1"
)
.run_function(download_whisper)
.run_function(migrate_cache_llm)
.env(
{
"LD_LIBRARY_PATH": (
"/usr/local/lib/python3.10/site-packages/nvidia/cudnn/lib/:"
"/opt/conda/lib/python3.10/site-packages/nvidia/cublas/lib/"
)
}
)
)
@stub.cls(
gpu="A10G",
timeout=60 * 5,
container_idle_timeout=60 * 5,
image=transcriber_image,
)
class Transcriber:
def __enter__(self):
import faster_whisper
import torch
self.use_gpu = torch.cuda.is_available()
self.device = "cuda" if self.use_gpu else "cpu"
self.model = faster_whisper.WhisperModel(
WHISPER_MODEL,
device=self.device,
compute_type=WHISPER_COMPUTE_TYPE,
num_workers=WHISPER_NUM_WORKERS,
download_root=WHISPER_MODEL_DIR
)
@method()
def transcribe_segment(
self,
audio_data: str,
audio_suffix: str,
source_language: str,
timestamp: float = 0
):
with tempfile.NamedTemporaryFile("wb+", suffix=f".{audio_suffix}") as fp:
fp.write(audio_data)
segments, _ = self.model.transcribe(
fp.name,
language=source_language,
beam_size=5,
word_timestamps=True,
vad_filter=True,
vad_parameters={"min_silence_duration_ms": 500},
)
multilingual_transcript = {}
transcript_source_lang = ""
words = []
if segments:
segments = list(segments)
for segment in segments:
transcript_source_lang += segment.text
for word in segment.words:
words.append(
{
"text": word.word,
"start": round(timestamp + word.start, 3),
"end": round(timestamp + word.end, 3),
}
)
multilingual_transcript[source_language] = transcript_source_lang
return {
"text": multilingual_transcript,
"words": words
}
# -------------------------------------------------------------------
# Web API
# -------------------------------------------------------------------
@stub.function(
container_idle_timeout=60,
timeout=60,
secrets=[
Secret.from_name("reflector-gpu"),
],
)
@asgi_app()
def web():
from fastapi import Body, Depends, FastAPI, HTTPException, UploadFile, status
from fastapi.security import OAuth2PasswordBearer
from typing_extensions import Annotated
transcriberstub = Transcriber()
app = FastAPI()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
supported_audio_file_types = ["wav", "mp3", "ogg", "flac"]
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
if apikey != os.environ["REFLECTOR_GPU_APIKEY"]:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API key",
headers={"WWW-Authenticate": "Bearer"},
)
class TranscriptResponse(BaseModel):
result: dict
@app.post("/transcribe", dependencies=[Depends(apikey_auth)])
async def transcribe(
file: UploadFile,
source_language: Annotated[str, Body(...)] = "en",
timestamp: Annotated[float, Body()] = 0.0
) -> TranscriptResponse:
audio_data = await file.read()
audio_suffix = file.filename.split(".")[-1]
assert audio_suffix in supported_audio_file_types
func = transcriberstub.transcribe_segment.spawn(
audio_data=audio_data,
audio_suffix=audio_suffix,
source_language=source_language,
timestamp=timestamp
)
result = func.get()
return result
return app