Merge branch 'main' of github.com:Monadical-SAS/reflector into jose/markers

This commit is contained in:
Jose B
2023-08-16 15:29:00 -05:00
39 changed files with 1214 additions and 888 deletions

44
.github/workflows/deploy.yml vendored Normal file
View File

@@ -0,0 +1,44 @@
name: Deploy to Amazon ECS
on: [deployment, workflow_dispatch]
env:
# 384658522150.dkr.ecr.us-east-1.amazonaws.com/reflector
AWS_REGION: us-east-1
ECR_REPOSITORY: reflector
jobs:
deploy:
runs-on: ubuntu-latest
permissions:
deployments: write
contents: read
steps:
- uses: actions/checkout@v3
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build and push
id: docker_build
uses: docker/build-push-action@v4
with:
context: server
push: true
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest

3
server/.gitignore vendored
View File

@@ -175,3 +175,6 @@ test_samples/
.vscode/
artefacts/
audio_*.wav
# ignore local database
reflector.sqlite3

80
server/env.example Normal file
View File

@@ -0,0 +1,80 @@
#
# This file serve as an example of possible configuration
# All the settings are described here: reflector/settings.py
#
## =======================================================
## Database
## =======================================================
#DATABASE_URL=sqlite://./reflector.db
#DATABASE_URL=postgresql://reflector:reflector@localhost:5432/reflector
## =======================================================
## Transcription backend
##
## Check reflector/processors/audio_transcript_* for the
## full list of available transcription backend
## =======================================================
## Using local whisper (default)
#TRANSCRIPT_BACKEND=whisper
#WHISPER_MODEL_SIZE=tiny
## Using serverless modal.com (require reflector-gpu-modal deployed)
#TRANSCRIPT_BACKEND=modal
#TRANSCRIPT_URL=https://xxxxx--reflector-transcriber-web.modal.run
#TRANSCRIPT_MODAL_API_KEY=xxxxx
## Using serverless banana.dev (require reflector-gpu-banana deployed)
## XXX this service is buggy do not use at the moment
## XXX it also require the audio to be saved to S3
#TRANSCRIPT_BACKEND=banana
#TRANSCRIPT_URL=https://reflector-gpu-banana-xxxxx.run.banana.dev
#TRANSCRIPT_BANANA_API_KEY=xxx
#TRANSCRIPT_BANANA_MODEL_KEY=xxx
#TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=xxx
#TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=xxx
#TRANSCRIPT_STORAGE_AWS_BUCKET_NAME="reflector-bucket/chunks"
## =======================================================
## LLM backend
##
## Check reflector/llm/* for the full list of available
## llm backend implementation
## =======================================================
## Use oobagooda (default)
#LLM_BACKEND=oobagooda
#LLM_URL=http://xxx:7860/api/generate/v1
## Using serverless modal.com (require reflector-gpu-modal deployed)
#LLM_BACKEND=modal
#LLM_URL=https://xxxxxx--reflector-llm-web.modal.run
#LLM_MODAL_API_KEY=xxx
## Using serverless banana.dev (require reflector-gpu-banana deployed)
## XXX this service is buggy do not use at the moment
#LLM_BACKEND=banana
#LLM_URL=https://reflector-gpu-banana-xxxxx.run.banana.dev
#LLM_BANANA_API_KEY=xxxxx
#LLM_BANANA_MODEL_KEY=xxxxx
## Using OpenAI
#LLM_BACKEND=openai
#LLM_OPENAI_KEY=xxx
#LLM_OPENAI_MODEL=gpt-3.5-turbo
## Using GPT4ALL
#LLM_BACKEND=openai
#LLM_URL=http://localhost:4891/v1/completions
#LLM_OPENAI_MODEL="GPT4All Falcon"
## =======================================================
## Sentry
## =======================================================
## Sentry DSN configuration
#SENTRY_DSN=

View File

@@ -0,0 +1,92 @@
# Reflector GPU implementation - Transcription and LLM
This repository hold an API for the GPU implementation of the Reflector API service,
and use [Modal.com](https://modal.com)
- `reflector_llm.py` - LLM API
- `reflector_transcriber.py` - Transcription API
## Modal.com deployment
Create a modal secret, and name it `reflector-gpu`.
It should contain an `REFLECTOR_APIKEY` environment variable with a value.
The deployment is done using [Modal.com](https://modal.com) service.
```
$ modal deploy reflector_transcriber.py
...
└── 🔨 Created web => https://xxxx--reflector-transcriber-web.modal.run
$ modal deploy reflector_llm.py
...
└── 🔨 Created web => https://xxxx--reflector-llm-web.modal.run
```
Then in your reflector api configuration `.env`, you can set theses keys:
```
TRANSCRIPT_BACKEND=modal
TRANSCRIPT_URL=https://xxxx--reflector-transcriber-web.modal.run
TRANSCRIPT_MODAL_API_KEY=REFLECTOR_APIKEY
LLM_BACKEND=modal
LLM_URL=https://xxxx--reflector-llm-web.modal.run
LLM_MODAL_API_KEY=REFLECTOR_APIKEY
```
## API
Authentication must be passed with the `Authorization` header, using the `bearer` scheme.
```
Authorization: bearer <REFLECTOR_APIKEY>
```
### Warmup (both)
`POST /warmup`
**response**
```
{
"status": "ok"
}
```
### LLM
`POST /llm`
**request**
```
{
"prompt": "xxx"
}
```
**response**
```
{
"text": "xxx completed"
}
```
### Transcription
`POST /transcribe`
**request** (multipart/form-data)
- `file` - audio file
- `language` - language code (e.g. `en`)
**response**
```
{
"text": "xxx",
"words": [
{"text": "xxx", "start": 0.0, "end": 1.0}
]
}
```

View File

@@ -0,0 +1,170 @@
"""
Reflector GPU backend - LLM
===========================
"""
import os
from modal import Image, method, Stub, asgi_app, Secret
# LLM
LLM_MODEL: str = "lmsys/vicuna-13b-v1.5"
LLM_LOW_CPU_MEM_USAGE: bool = False
LLM_TORCH_DTYPE: str = "bfloat16"
LLM_MAX_NEW_TOKENS: int = 300
IMAGE_MODEL_DIR = "/model"
stub = Stub(name="reflector-llm")
def download_llm():
from huggingface_hub import snapshot_download
print("Downloading LLM model")
snapshot_download(LLM_MODEL, local_dir=IMAGE_MODEL_DIR)
print("LLM model downloaded")
def migrate_cache_llm():
"""
XXX The cache for model files in Transformers v4.22.0 has been updated.
Migrating your old cache. This is a one-time only operation. You can
interrupt this and resume the migration later on by calling
`transformers.utils.move_cache()`.
"""
from transformers.utils.hub import move_cache
print("Moving LLM cache")
move_cache()
print("LLM cache moved")
llm_image = (
Image.debian_slim(python_version="3.10.8")
.apt_install("git")
.pip_install(
"transformers",
"torch",
"sentencepiece",
"protobuf",
"einops==0.6.1",
"hf-transfer~=0.1",
"huggingface_hub==0.16.4",
)
.env({"HF_HUB_ENABLE_HF_TRANSFER": "1"})
.run_function(download_llm)
.run_function(migrate_cache_llm)
)
@stub.cls(
gpu="A100",
timeout=60 * 5,
container_idle_timeout=60 * 5,
concurrency_limit=2,
image=llm_image,
)
class LLM:
def __enter__(self):
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
print("Instance llm model")
model = AutoModelForCausalLM.from_pretrained(
IMAGE_MODEL_DIR,
torch_dtype=getattr(torch, LLM_TORCH_DTYPE),
low_cpu_mem_usage=LLM_LOW_CPU_MEM_USAGE,
)
# generation configuration
print("Instance llm generation config")
model.config.max_new_tokens = LLM_MAX_NEW_TOKENS
gen_cfg = GenerationConfig.from_model_config(model.config)
gen_cfg.max_new_tokens = LLM_MAX_NEW_TOKENS
# load tokenizer
print("Instance llm tokenizer")
tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL)
# move model to gpu
print("Move llm model to GPU")
model = model.cuda()
print("Warmup llm done")
self.model = model
self.tokenizer = tokenizer
self.gen_cfg = gen_cfg
def __exit__(self, *args):
print("Exit llm")
@method()
def warmup(self):
print("Warmup ok")
return {"status": "ok"}
@method()
def generate(self, prompt: str):
print(f"Generate {prompt=}")
# tokenize prompt
input_ids = self.tokenizer.encode(prompt, return_tensors="pt").to(
self.model.device
)
output = self.model.generate(input_ids, generation_config=self.gen_cfg)
# decode output
response = self.tokenizer.decode(output[0].cpu(), skip_special_tokens=True)
print(f"Generated {response=}")
return {"text": response}
# -------------------------------------------------------------------
# Web API
# -------------------------------------------------------------------
@stub.function(
container_idle_timeout=60 * 10,
timeout=60 * 5,
secrets=[
Secret.from_name("reflector-gpu"),
],
)
@asgi_app()
def web():
from fastapi import FastAPI, HTTPException, status, Depends
from fastapi.security import OAuth2PasswordBearer
from pydantic import BaseModel
llmstub = LLM()
app = FastAPI()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
if apikey != os.environ["REFLECTOR_GPU_APIKEY"]:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API key",
headers={"WWW-Authenticate": "Bearer"},
)
class LLMRequest(BaseModel):
prompt: str
@app.post("/llm", dependencies=[Depends(apikey_auth)])
async def llm(
req: LLMRequest,
):
func = llmstub.generate.spawn(prompt=req.prompt)
result = func.get()
return result
@app.post("/warmup", dependencies=[Depends(apikey_auth)])
async def warmup():
return llmstub.warmup.spawn().get()
return app

View File

@@ -0,0 +1,173 @@
"""
Reflector GPU backend - transcriber
===================================
"""
import tempfile
import os
from modal import Image, method, Stub, asgi_app, Secret
from pydantic import BaseModel
# Whisper
WHISPER_MODEL: str = "large-v2"
WHISPER_COMPUTE_TYPE: str = "float16"
WHISPER_NUM_WORKERS: int = 1
WHISPER_CACHE_DIR: str = "/cache/whisper"
stub = Stub(name="reflector-transcriber")
def download_whisper():
from faster_whisper.utils import download_model
download_model(WHISPER_MODEL, local_files_only=False)
whisper_image = (
Image.debian_slim(python_version="3.10.8")
.apt_install("git")
.pip_install(
"faster-whisper",
"requests",
"torch",
)
.run_function(download_whisper)
.env(
{
"LD_LIBRARY_PATH": (
"/usr/local/lib/python3.10/site-packages/nvidia/cudnn/lib/:"
"/opt/conda/lib/python3.10/site-packages/nvidia/cublas/lib/"
)
}
)
)
@stub.cls(
gpu="A10G",
container_idle_timeout=60,
image=whisper_image,
)
class Whisper:
def __enter__(self):
import torch
import faster_whisper
self.use_gpu = torch.cuda.is_available()
device = "cuda" if self.use_gpu else "cpu"
self.model = faster_whisper.WhisperModel(
WHISPER_MODEL,
device=device,
compute_type=WHISPER_COMPUTE_TYPE,
num_workers=WHISPER_NUM_WORKERS,
)
@method()
def warmup(self):
return {"status": "ok"}
@method()
def transcribe_segment(
self,
audio_data: str,
audio_suffix: str,
timestamp: float = 0,
language: str = "en",
):
with tempfile.NamedTemporaryFile("wb+", suffix=f".{audio_suffix}") as fp:
fp.write(audio_data)
segments, _ = self.model.transcribe(
fp.name,
language=language,
beam_size=5,
word_timestamps=True,
vad_filter=True,
vad_parameters={"min_silence_duration_ms": 500},
)
transcript = ""
words = []
if segments:
segments = list(segments)
for segment in segments:
transcript += segment.text
for word in segment.words:
words.append(
{
"text": word.word,
"start": round(timestamp + word.start, 3),
"end": round(timestamp + word.end, 3),
}
)
return {
"text": transcript,
"words": words,
}
# -------------------------------------------------------------------
# Web API
# -------------------------------------------------------------------
@stub.function(
container_idle_timeout=60,
timeout=60,
secrets=[
Secret.from_name("reflector-gpu"),
],
)
@asgi_app()
def web():
from fastapi import FastAPI, UploadFile, Form, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from typing_extensions import Annotated
transcriberstub = Whisper()
app = FastAPI()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
if apikey != os.environ["REFLECTOR_GPU_APIKEY"]:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API key",
headers={"WWW-Authenticate": "Bearer"},
)
class TranscriptionRequest(BaseModel):
timestamp: float = 0
language: str = "en"
class TranscriptResponse(BaseModel):
result: str
@app.post("/transcribe", dependencies=[Depends(apikey_auth)])
async def transcribe(
file: UploadFile,
timestamp: Annotated[float, Form()] = 0,
language: Annotated[str, Form()] = "en",
):
audio_data = await file.read()
audio_suffix = file.filename.split(".")[-1]
assert audio_suffix in ["wav", "mp3", "ogg", "flac"]
func = transcriberstub.transcribe_segment.spawn(
audio_data=audio_data,
audio_suffix=audio_suffix,
language=language,
timestamp=timestamp,
)
result = func.get()
return result
@app.post("/warmup", dependencies=[Depends(apikey_auth)])
async def warmup():
return transcriberstub.warmup.spawn().get()
return app

240
server/poetry.lock generated
View File

@@ -274,6 +274,21 @@ files = [
[package.dependencies]
frozenlist = ">=1.1.0"
[[package]]
name = "aiosqlite"
version = "0.19.0"
description = "asyncio bridge to the standard sqlite3 module"
optional = false
python-versions = ">=3.7"
files = [
{file = "aiosqlite-0.19.0-py3-none-any.whl", hash = "sha256:edba222e03453e094a3ce605db1b970c4b3376264e56f32e2a4959f948d66a96"},
{file = "aiosqlite-0.19.0.tar.gz", hash = "sha256:95ee77b91c8d2808bd08a59fbebf66270e9090c3d92ffbf260dc0db0b979577d"},
]
[package.extras]
dev = ["aiounittest (==1.4.1)", "attribution (==1.6.2)", "black (==23.3.0)", "coverage[toml] (==7.2.3)", "flake8 (==5.0.4)", "flake8-bugbear (==23.3.12)", "flit (==3.7.1)", "mypy (==1.2.0)", "ufmt (==2.1.0)", "usort (==1.0.6)"]
docs = ["sphinx (==6.1.3)", "sphinx-mdinclude (==0.5.3)"]
[[package]]
name = "annotated-types"
version = "0.5.0"
@@ -316,6 +331,59 @@ files = [
{file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"},
]
[[package]]
name = "asyncpg"
version = "0.28.0"
description = "An asyncio PostgreSQL driver"
optional = false
python-versions = ">=3.7.0"
files = [
{file = "asyncpg-0.28.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a6d1b954d2b296292ddff4e0060f494bb4270d87fb3655dd23c5c6096d16d83"},
{file = "asyncpg-0.28.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0740f836985fd2bd73dca42c50c6074d1d61376e134d7ad3ad7566c4f79f8184"},
{file = "asyncpg-0.28.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e907cf620a819fab1737f2dd90c0f185e2a796f139ac7de6aa3212a8af96c050"},
{file = "asyncpg-0.28.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b339984d55e8202e0c4b252e9573e26e5afa05617ed02252544f7b3e6de3e9"},
{file = "asyncpg-0.28.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0c402745185414e4c204a02daca3d22d732b37359db4d2e705172324e2d94e85"},
{file = "asyncpg-0.28.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c88eef5e096296626e9688f00ab627231f709d0e7e3fb84bb4413dff81d996d7"},
{file = "asyncpg-0.28.0-cp310-cp310-win32.whl", hash = "sha256:90a7bae882a9e65a9e448fdad3e090c2609bb4637d2a9c90bfdcebbfc334bf89"},
{file = "asyncpg-0.28.0-cp310-cp310-win_amd64.whl", hash = "sha256:76aacdcd5e2e9999e83c8fbcb748208b60925cc714a578925adcb446d709016c"},
{file = "asyncpg-0.28.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a0e08fe2c9b3618459caaef35979d45f4e4f8d4f79490c9fa3367251366af207"},
{file = "asyncpg-0.28.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b24e521f6060ff5d35f761a623b0042c84b9c9b9fb82786aadca95a9cb4a893b"},
{file = "asyncpg-0.28.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99417210461a41891c4ff301490a8713d1ca99b694fef05dabd7139f9d64bd6c"},
{file = "asyncpg-0.28.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f029c5adf08c47b10bcdc857001bbef551ae51c57b3110964844a9d79ca0f267"},
{file = "asyncpg-0.28.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ad1d6abf6c2f5152f46fff06b0e74f25800ce8ec6c80967f0bc789974de3c652"},
{file = "asyncpg-0.28.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d7fa81ada2807bc50fea1dc741b26a4e99258825ba55913b0ddbf199a10d69d8"},
{file = "asyncpg-0.28.0-cp311-cp311-win32.whl", hash = "sha256:f33c5685e97821533df3ada9384e7784bd1e7865d2b22f153f2e4bd4a083e102"},
{file = "asyncpg-0.28.0-cp311-cp311-win_amd64.whl", hash = "sha256:5e7337c98fb493079d686a4a6965e8bcb059b8e1b8ec42106322fc6c1c889bb0"},
{file = "asyncpg-0.28.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1c56092465e718a9fdcc726cc3d9dcf3a692e4834031c9a9f871d92a75d20d48"},
{file = "asyncpg-0.28.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4acd6830a7da0eb4426249d71353e8895b350daae2380cb26d11e0d4a01c5472"},
{file = "asyncpg-0.28.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63861bb4a540fa033a56db3bb58b0c128c56fad5d24e6d0a8c37cb29b17c1c7d"},
{file = "asyncpg-0.28.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a93a94ae777c70772073d0512f21c74ac82a8a49be3a1d982e3f259ab5f27307"},
{file = "asyncpg-0.28.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d14681110e51a9bc9c065c4e7944e8139076a778e56d6f6a306a26e740ed86d2"},
{file = "asyncpg-0.28.0-cp37-cp37m-win32.whl", hash = "sha256:8aec08e7310f9ab322925ae5c768532e1d78cfb6440f63c078b8392a38aa636a"},
{file = "asyncpg-0.28.0-cp37-cp37m-win_amd64.whl", hash = "sha256:319f5fa1ab0432bc91fb39b3960b0d591e6b5c7844dafc92c79e3f1bff96abef"},
{file = "asyncpg-0.28.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b337ededaabc91c26bf577bfcd19b5508d879c0ad009722be5bb0a9dd30b85a0"},
{file = "asyncpg-0.28.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4d32b680a9b16d2957a0a3cc6b7fa39068baba8e6b728f2e0a148a67644578f4"},
{file = "asyncpg-0.28.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4f62f04cdf38441a70f279505ef3b4eadf64479b17e707c950515846a2df197"},
{file = "asyncpg-0.28.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f20cac332c2576c79c2e8e6464791c1f1628416d1115935a34ddd7121bfc6a4"},
{file = "asyncpg-0.28.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:59f9712ce01e146ff71d95d561fb68bd2d588a35a187116ef05028675462d5ed"},
{file = "asyncpg-0.28.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fc9e9f9ff1aa0eddcc3247a180ac9e9b51a62311e988809ac6152e8fb8097756"},
{file = "asyncpg-0.28.0-cp38-cp38-win32.whl", hash = "sha256:9e721dccd3838fcff66da98709ed884df1e30a95f6ba19f595a3706b4bc757e3"},
{file = "asyncpg-0.28.0-cp38-cp38-win_amd64.whl", hash = "sha256:8ba7d06a0bea539e0487234511d4adf81dc8762249858ed2a580534e1720db00"},
{file = "asyncpg-0.28.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d009b08602b8b18edef3a731f2ce6d3f57d8dac2a0a4140367e194eabd3de457"},
{file = "asyncpg-0.28.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ec46a58d81446d580fb21b376ec6baecab7288ce5a578943e2fc7ab73bf7eb39"},
{file = "asyncpg-0.28.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b48ceed606cce9e64fd5480a9b0b9a95cea2b798bb95129687abd8599c8b019"},
{file = "asyncpg-0.28.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8858f713810f4fe67876728680f42e93b7e7d5c7b61cf2118ef9153ec16b9423"},
{file = "asyncpg-0.28.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5e18438a0730d1c0c1715016eacda6e9a505fc5aa931b37c97d928d44941b4bf"},
{file = "asyncpg-0.28.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e9c433f6fcdd61c21a715ee9128a3ca48be8ac16fa07be69262f016bb0f4dbd2"},
{file = "asyncpg-0.28.0-cp39-cp39-win32.whl", hash = "sha256:41e97248d9076bc8e4849da9e33e051be7ba37cd507cbd51dfe4b2d99c70e3dc"},
{file = "asyncpg-0.28.0-cp39-cp39-win_amd64.whl", hash = "sha256:3ed77f00c6aacfe9d79e9eff9e21729ce92a4b38e80ea99a58ed382f42ebd55b"},
{file = "asyncpg-0.28.0.tar.gz", hash = "sha256:7252cdc3acb2f52feaa3664280d3bcd78a46bd6c10bfd681acfffefa1120e278"},
]
[package.extras]
docs = ["Sphinx (>=5.3.0,<5.4.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"]
test = ["flake8 (>=5.0,<6.0)", "uvloop (>=0.15.3)"]
[[package]]
name = "attrs"
version = "23.1.0"
@@ -836,6 +904,32 @@ files = [
numpy = "*"
pyyaml = ">=5.3,<7"
[[package]]
name = "databases"
version = "0.7.0"
description = "Async database support for Python."
optional = false
python-versions = ">=3.7"
files = [
{file = "databases-0.7.0-py3-none-any.whl", hash = "sha256:cf5da4b8a3e3cd038c459529725ebb64931cbbb7a091102664f20ef8f6cefd0d"},
{file = "databases-0.7.0.tar.gz", hash = "sha256:ea2d419d3d2eb80595b7ceb8f282056f080af62efe2fb9bcd83562f93ec4b674"},
]
[package.dependencies]
aiosqlite = {version = "*", optional = true, markers = "extra == \"aiosqlite\""}
asyncpg = {version = "*", optional = true, markers = "extra == \"asyncpg\""}
sqlalchemy = ">=1.4.42,<1.5"
[package.extras]
aiomysql = ["aiomysql"]
aiopg = ["aiopg"]
aiosqlite = ["aiosqlite"]
asyncmy = ["asyncmy"]
asyncpg = ["asyncpg"]
mysql = ["aiomysql"]
postgresql = ["asyncpg"]
sqlite = ["aiosqlite"]
[[package]]
name = "dnspython"
version = "2.4.1"
@@ -1139,6 +1233,79 @@ files = [
[package.extras]
testing = ["pytest"]
[[package]]
name = "greenlet"
version = "2.0.2"
description = "Lightweight in-process concurrent programming"
optional = false
python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*"
files = [
{file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"},
{file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"},
{file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"},
{file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"},
{file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"},
{file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"},
{file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"},
{file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"},
{file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"},
{file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"},
{file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"},
{file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"},
{file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"},
{file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"},
{file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"},
{file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"},
{file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"},
{file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"},
{file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"},
{file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"},
{file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"},
{file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"},
{file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"},
{file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"},
{file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"},
{file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"},
{file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"},
{file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"},
{file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"},
{file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"},
{file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"},
{file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"},
{file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"},
{file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"},
{file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"},
{file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"},
{file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"},
{file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"},
{file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"},
{file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"},
{file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"},
{file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"},
{file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"},
{file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"},
{file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"},
{file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"},
{file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"},
{file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"},
{file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"},
{file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"},
{file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"},
{file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"},
{file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"},
{file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"},
{file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"},
{file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"},
{file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"},
{file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"},
{file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"},
{file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"},
]
[package.extras]
docs = ["Sphinx", "docutils (<0.18)"]
test = ["objgraph", "psutil"]
[[package]]
name = "h11"
version = "0.14.0"
@@ -2429,6 +2596,77 @@ files = [
{file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"},
]
[[package]]
name = "sqlalchemy"
version = "1.4.49"
description = "Database Abstraction Library"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
{file = "SQLAlchemy-1.4.49-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e126cf98b7fd38f1e33c64484406b78e937b1a280e078ef558b95bf5b6895f6"},
{file = "SQLAlchemy-1.4.49-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:03db81b89fe7ef3857b4a00b63dedd632d6183d4ea5a31c5d8a92e000a41fc71"},
{file = "SQLAlchemy-1.4.49-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:95b9df9afd680b7a3b13b38adf6e3a38995da5e162cc7524ef08e3be4e5ed3e1"},
{file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a63e43bf3f668c11bb0444ce6e809c1227b8f067ca1068898f3008a273f52b09"},
{file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f835c050ebaa4e48b18403bed2c0fda986525896efd76c245bdd4db995e51a4c"},
{file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c21b172dfb22e0db303ff6419451f0cac891d2e911bb9fbf8003d717f1bcf91"},
{file = "SQLAlchemy-1.4.49-cp310-cp310-win32.whl", hash = "sha256:5fb1ebdfc8373b5a291485757bd6431de8d7ed42c27439f543c81f6c8febd729"},
{file = "SQLAlchemy-1.4.49-cp310-cp310-win_amd64.whl", hash = "sha256:f8a65990c9c490f4651b5c02abccc9f113a7f56fa482031ac8cb88b70bc8ccaa"},
{file = "SQLAlchemy-1.4.49-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8923dfdf24d5aa8a3adb59723f54118dd4fe62cf59ed0d0d65d940579c1170a4"},
{file = "SQLAlchemy-1.4.49-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9ab2c507a7a439f13ca4499db6d3f50423d1d65dc9b5ed897e70941d9e135b0"},
{file = "SQLAlchemy-1.4.49-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5debe7d49b8acf1f3035317e63d9ec8d5e4d904c6e75a2a9246a119f5f2fdf3d"},
{file = "SQLAlchemy-1.4.49-cp311-cp311-win32.whl", hash = "sha256:82b08e82da3756765c2e75f327b9bf6b0f043c9c3925fb95fb51e1567fa4ee87"},
{file = "SQLAlchemy-1.4.49-cp311-cp311-win_amd64.whl", hash = "sha256:171e04eeb5d1c0d96a544caf982621a1711d078dbc5c96f11d6469169bd003f1"},
{file = "SQLAlchemy-1.4.49-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:36e58f8c4fe43984384e3fbe6341ac99b6b4e083de2fe838f0fdb91cebe9e9cb"},
{file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b31e67ff419013f99ad6f8fc73ee19ea31585e1e9fe773744c0f3ce58c039c30"},
{file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c14b29d9e1529f99efd550cd04dbb6db6ba5d690abb96d52de2bff4ed518bc95"},
{file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c40f3470e084d31247aea228aa1c39bbc0904c2b9ccbf5d3cfa2ea2dac06f26d"},
{file = "SQLAlchemy-1.4.49-cp36-cp36m-win32.whl", hash = "sha256:706bfa02157b97c136547c406f263e4c6274a7b061b3eb9742915dd774bbc264"},
{file = "SQLAlchemy-1.4.49-cp36-cp36m-win_amd64.whl", hash = "sha256:a7f7b5c07ae5c0cfd24c2db86071fb2a3d947da7bd487e359cc91e67ac1c6d2e"},
{file = "SQLAlchemy-1.4.49-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:4afbbf5ef41ac18e02c8dc1f86c04b22b7a2125f2a030e25bbb4aff31abb224b"},
{file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24e300c0c2147484a002b175f4e1361f102e82c345bf263242f0449672a4bccf"},
{file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:201de072b818f8ad55c80d18d1a788729cccf9be6d9dc3b9d8613b053cd4836d"},
{file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7653ed6817c710d0c95558232aba799307d14ae084cc9b1f4c389157ec50df5c"},
{file = "SQLAlchemy-1.4.49-cp37-cp37m-win32.whl", hash = "sha256:647e0b309cb4512b1f1b78471fdaf72921b6fa6e750b9f891e09c6e2f0e5326f"},
{file = "SQLAlchemy-1.4.49-cp37-cp37m-win_amd64.whl", hash = "sha256:ab73ed1a05ff539afc4a7f8cf371764cdf79768ecb7d2ec691e3ff89abbc541e"},
{file = "SQLAlchemy-1.4.49-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:37ce517c011560d68f1ffb28af65d7e06f873f191eb3a73af5671e9c3fada08a"},
{file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1878ce508edea4a879015ab5215546c444233881301e97ca16fe251e89f1c55"},
{file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0e8e608983e6f85d0852ca61f97e521b62e67969e6e640fe6c6b575d4db68557"},
{file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccf956da45290df6e809ea12c54c02ace7f8ff4d765d6d3dfb3655ee876ce58d"},
{file = "SQLAlchemy-1.4.49-cp38-cp38-win32.whl", hash = "sha256:f167c8175ab908ce48bd6550679cc6ea20ae169379e73c7720a28f89e53aa532"},
{file = "SQLAlchemy-1.4.49-cp38-cp38-win_amd64.whl", hash = "sha256:45806315aae81a0c202752558f0df52b42d11dd7ba0097bf71e253b4215f34f4"},
{file = "SQLAlchemy-1.4.49-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:b6d0c4b15d65087738a6e22e0ff461b407533ff65a73b818089efc8eb2b3e1de"},
{file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a843e34abfd4c797018fd8d00ffffa99fd5184c421f190b6ca99def4087689bd"},
{file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1c890421651b45a681181301b3497e4d57c0d01dc001e10438a40e9a9c25ee77"},
{file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d26f280b8f0a8f497bc10573849ad6dc62e671d2468826e5c748d04ed9e670d5"},
{file = "SQLAlchemy-1.4.49-cp39-cp39-win32.whl", hash = "sha256:ec2268de67f73b43320383947e74700e95c6770d0c68c4e615e9897e46296294"},
{file = "SQLAlchemy-1.4.49-cp39-cp39-win_amd64.whl", hash = "sha256:bbdf16372859b8ed3f4d05f925a984771cd2abd18bd187042f24be4886c2a15f"},
{file = "SQLAlchemy-1.4.49.tar.gz", hash = "sha256:06ff25cbae30c396c4b7737464f2a7fc37a67b7da409993b182b024cec80aed9"},
]
[package.dependencies]
greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\")"}
[package.extras]
aiomysql = ["aiomysql", "greenlet (!=0.4.17)"]
aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"]
asyncio = ["greenlet (!=0.4.17)"]
asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"]
mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"]
mssql = ["pyodbc"]
mssql-pymssql = ["pymssql"]
mssql-pyodbc = ["pyodbc"]
mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"]
mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"]
mysql-connector = ["mysql-connector-python"]
oracle = ["cx-oracle (>=7)", "cx-oracle (>=7,<8)"]
postgresql = ["psycopg2 (>=2.7)"]
postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"]
postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"]
postgresql-psycopg2binary = ["psycopg2-binary"]
postgresql-psycopg2cffi = ["psycopg2cffi"]
pymysql = ["pymysql", "pymysql (<1)"]
sqlcipher = ["sqlcipher3-binary"]
[[package]]
name = "stamina"
version = "23.1.0"
@@ -2996,4 +3234,4 @@ multidict = ">=4.0"
[metadata]
lock-version = "2.0"
python-versions = "^3.11"
content-hash = "c9924049dacf7310590416f096f5b20f6ed905d8a50edf5e8afcf2c28b70799f"
content-hash = "ea523f9b74581a7867097a6249d416d8836f4daaf33fde65ea343e4d3502c71c"

View File

@@ -23,6 +23,8 @@ fastapi = "^0.100.1"
sentry-sdk = {extras = ["fastapi"], version = "^1.29.2"}
httpx = "^0.24.1"
fastapi-pagination = "^0.12.6"
databases = {extras = ["aiosqlite", "asyncpg"], version = "^0.7.0"}
sqlalchemy = "<1.5"
[tool.poetry.group.dev.dependencies]

View File

@@ -2,6 +2,7 @@ from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi_pagination import add_pagination
from fastapi.routing import APIRoute
import reflector.db # noqa
from reflector.views.rtc_offer import router as rtc_offer_router
from reflector.views.transcripts import router as transcripts_router
from reflector.events import subscribers_startup, subscribers_shutdown

View File

@@ -0,0 +1,42 @@
import databases
import sqlalchemy
from reflector.events import subscribers_startup, subscribers_shutdown
from reflector.settings import settings
database = databases.Database(settings.DATABASE_URL)
metadata = sqlalchemy.MetaData()
transcripts = sqlalchemy.Table(
"transcript",
metadata,
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String),
sqlalchemy.Column("status", sqlalchemy.String),
sqlalchemy.Column("locked", sqlalchemy.Boolean),
sqlalchemy.Column("duration", sqlalchemy.Integer),
sqlalchemy.Column("created_at", sqlalchemy.DateTime),
sqlalchemy.Column("summary", sqlalchemy.String, nullable=True),
sqlalchemy.Column("topics", sqlalchemy.JSON),
sqlalchemy.Column("events", sqlalchemy.JSON),
# with user attached, optional
sqlalchemy.Column("user_id", sqlalchemy.String),
)
engine = sqlalchemy.create_engine(
settings.DATABASE_URL, connect_args={"check_same_thread": False}
)
metadata.create_all(engine)
async def database_connect():
await database.connect()
async def database_disconnect():
await database.disconnect()
subscribers_startup.append(database_connect)
subscribers_shutdown.append(database_disconnect)

View File

@@ -1,211 +0,0 @@
"""
Collection of data classes for streamlining and rigidly structuring
the input and output parameters of functions
"""
import datetime
from dataclasses import dataclass
from typing import List
from sortedcontainers import SortedDict
import av
@dataclass
class TitleSummaryInput:
"""
Data class for the input to generate title and summaries.
The outcome will be used to send query to the LLM for processing.
"""
input_text = str
transcribed_time = float
prompt = str
data = dict
def __init__(self, transcribed_time, input_text=""):
self.input_text = input_text
self.transcribed_time = transcribed_time
self.prompt = f"""
### Human:
Create a JSON object as response.The JSON object must have 2 fields:
i) title and ii) summary.For the title field,generate a short title
for the given text. For the summary field, summarize the given text
in three sentences.
{self.input_text}
### Assistant:
"""
self.data = {"prompt": self.prompt}
self.headers = {"Content-Type": "application/json"}
@dataclass
class IncrementalResult:
"""
Data class for the result of generating one title and summaries.
Defines how a single "topic" looks like.
"""
title = str
description = str
transcript = str
timestamp = str
def __init__(self, title, desc, transcript, timestamp):
self.title = title
self.description = desc
self.transcript = transcript
self.timestamp = timestamp
@dataclass
class TitleSummaryOutput:
"""
Data class for the result of all generated titles and summaries.
The result will be sent back to the client
"""
cmd = str
topics = List[IncrementalResult]
def __init__(self, inc_responses):
self.topics = inc_responses
self.cmd = "UPDATE_TOPICS"
def get_result(self) -> dict:
"""
Return the result dict for displaying the transcription
:return:
"""
return {"cmd": self.cmd, "topics": self.topics}
@dataclass
class ParseLLMResult:
"""
Data class to parse the result returned by the LLM while generating title
and summaries. The result will be sent back to the client.
"""
title = str
description = str
transcript = str
timestamp = str
def __init__(self, param: TitleSummaryInput, output: dict):
self.title = output["title"]
self.transcript = param.input_text
self.description = output.pop("summary")
self.timestamp = str(datetime.timedelta(seconds=round(param.transcribed_time)))
def get_result(self) -> dict:
"""
Return the result dict after parsing the response from LLM
:return:
"""
return {
"title": self.title,
"description": self.description,
"transcript": self.transcript,
"timestamp": self.timestamp,
}
@dataclass
class TranscriptionInput:
"""
Data class to define the input to the transcription function
AudioFrames -> input
"""
frames = List[av.audio.frame.AudioFrame]
def __init__(self, frames):
self.frames = frames
@dataclass
class TranscriptionOutput:
"""
Dataclass to define the result of the transcription function.
The result will be sent back to the client
"""
cmd = str
result_text = str
def __init__(self, result_text):
self.cmd = "SHOW_TRANSCRIPTION"
self.result_text = result_text
def get_result(self) -> dict:
"""
Return the result dict for displaying the transcription
:return:
"""
return {"cmd": self.cmd, "text": self.result_text}
@dataclass
class FinalSummaryResult:
"""
Dataclass to define the result of the final summary function.
The result will be sent back to the client.
"""
cmd = str
final_summary = str
duration = str
def __init__(self, final_summary, time):
self.duration = str(datetime.timedelta(seconds=round(time)))
self.final_summary = final_summary
self.cmd = "DISPLAY_FINAL_SUMMARY"
def get_result(self) -> dict:
"""
Return the result dict for displaying the final summary
:return:
"""
return {
"cmd": self.cmd,
"duration": self.duration,
"summary": self.final_summary,
}
class BlackListedMessages:
"""
Class to hold the blacklisted messages. These messages should be filtered
out and not sent back to the client as part of the transcription.
"""
messages = [
" Thank you.",
" See you next time!",
" Thank you for watching!",
" Bye!",
" And that's what I'm talking about.",
]
@dataclass
class TranscriptionContext:
transcription_text: str
last_transcribed_time: float
incremental_responses: List[IncrementalResult]
sorted_transcripts: dict
data_channel: None # FIXME
logger: None
status: str
def __init__(self, logger):
self.transcription_text = ""
self.last_transcribed_time = 0.0
self.incremental_responses = []
self.data_channel = None
self.sorted_transcripts = SortedDict()
self.status = "idle"
self.logger = logger

View File

@@ -1,5 +1,6 @@
from .base import Processor, ThreadedProcessor, Pipeline # noqa: F401
from .types import AudioFile, Transcript, Word, TitleSummary, FinalSummary # noqa: F401
from .audio_file_writer import AudioFileWriterProcessor # noqa: F401
from .audio_chunker import AudioChunkerProcessor # noqa: F401
from .audio_merge import AudioMergeProcessor # noqa: F401
from .audio_transcript import AudioTranscriptProcessor # noqa: F401

View File

@@ -0,0 +1,39 @@
from reflector.processors.base import Processor
import av
from pathlib import Path
class AudioFileWriterProcessor(Processor):
"""
Write audio frames to a file.
"""
INPUT_TYPE = av.AudioFrame
OUTPUT_TYPE = av.AudioFrame
def __init__(self, path: Path | str):
super().__init__()
if isinstance(path, str):
path = Path(path)
self.path = path
self.out_container = None
self.out_stream = None
async def _push(self, data: av.AudioFrame):
if not self.out_container:
self.path.parent.mkdir(parents=True, exist_ok=True)
self.out_container = av.open(self.path.as_posix(), "w", format="wav")
self.out_stream = self.out_container.add_stream(
"pcm_s16le", rate=data.sample_rate
)
for packet in self.out_stream.encode(data):
self.out_container.mux(packet)
await self.emit(data)
async def _flush(self):
if self.out_container:
for packet in self.out_stream.encode(None):
self.out_container.mux(packet)
self.out_container.close()
self.out_container = None
self.out_stream = None

View File

@@ -1,7 +1,8 @@
from reflector.processors.base import Processor
from reflector.processors.types import AudioFile
from pathlib import Path
import wave
from time import monotonic_ns
from uuid import uuid4
import io
import av
@@ -24,24 +25,27 @@ class AudioMergeProcessor(Processor):
sample_width = frame.format.bytes
# create audio file
from time import monotonic_ns
from uuid import uuid4
uu = uuid4().hex
path = Path(f"audio_{monotonic_ns()}_{uu}.wav")
with wave.open(path.as_posix(), "wb") as wf:
wf.setnchannels(channels)
wf.setsampwidth(sample_width)
wf.setframerate(sample_rate)
fd = io.BytesIO()
out_container = av.open(fd, "w", format="wav")
out_stream = out_container.add_stream("pcm_s16le", rate=sample_rate)
for frame in data:
wf.writeframes(frame.to_ndarray().tobytes())
for packet in out_stream.encode(frame):
out_container.mux(packet)
for packet in out_stream.encode(None):
out_container.mux(packet)
out_container.close()
fd.seek(0)
# emit audio file
audiofile = AudioFile(
path=path,
name=f"{monotonic_ns()}-{uu}.wav",
fd=fd,
sample_rate=sample_rate,
channels=channels,
sample_width=sample_width,
timestamp=data[0].pts * data[0].time_base,
)
await self.emit(audiofile)

View File

@@ -48,9 +48,9 @@ class AudioTranscriptModalProcessor(AudioTranscriptProcessor):
async def _transcript(self, data: AudioFile):
async with httpx.AsyncClient() as client:
self.logger.debug(f"Try to transcribe audio {data.path.name}")
self.logger.debug(f"Try to transcribe audio {data.name}")
files = {
"file": (data.path.name, data.path.open("rb")),
"file": (data.name, data.fd),
}
response = await retry(client.post)(
self.transcript_url,

View File

@@ -1,16 +1,41 @@
from pydantic import BaseModel
from pydantic import BaseModel, PrivateAttr
from pathlib import Path
import tempfile
import io
class AudioFile(BaseModel):
path: Path
name: str
sample_rate: int
channels: int
sample_width: int
timestamp: float = 0.0
_fd: io.BytesIO = PrivateAttr(None)
_path: Path = PrivateAttr(None)
def __init__(self, fd, **kwargs):
super().__init__(**kwargs)
self._fd = fd
@property
def fd(self):
self._fd.seek(0)
return self._fd
@property
def path(self):
if self._path is None:
# write down to disk
filename = tempfile.NamedTemporaryFile(suffix=".wav", delete=False).name
self._path = Path(filename)
with self._path.open("wb") as f:
f.write(self._fd.getbuffer())
return self._path
def release(self):
self.path.unlink()
if self._path:
self._path.unlink()
class Word(BaseModel):

View File

@@ -1,381 +0,0 @@
import argparse
import asyncio
import datetime
import json
import os
import wave
import uuid
from concurrent.futures import ThreadPoolExecutor
from typing import NoReturn, Union
import aiohttp_cors
import av
import requests
from aiohttp import web
from aiortc import MediaStreamTrack, RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaRelay
from faster_whisper import WhisperModel
from reflector.models import (
BlackListedMessages,
FinalSummaryResult,
ParseLLMResult,
TitleSummaryInput,
TitleSummaryOutput,
TranscriptionInput,
TranscriptionOutput,
TranscriptionContext,
)
from reflector.logger import logger
from reflector.utils.run_utils import run_in_executor
from reflector.settings import settings
# WebRTC components
pcs = set()
relay = MediaRelay()
executor = ThreadPoolExecutor()
# Transcription model
model = WhisperModel("tiny", device="cpu", compute_type="float32", num_workers=12)
# LLM
LLM_URL = settings.LLM_URL
if not LLM_URL:
assert settings.LLM_BACKEND == "oobagooda"
LLM_URL = f"http://{settings.LLM_HOST}:{settings.LLM_PORT}/api/v1/generate"
logger.info(f"Using LLM [{settings.LLM_BACKEND}]: {LLM_URL}")
def parse_llm_output(
param: TitleSummaryInput, response: requests.Response
) -> Union[None, ParseLLMResult]:
"""
Function to parse the LLM response
:param param:
:param response:
:return:
"""
try:
output = json.loads(response.json()["results"][0]["text"])
return ParseLLMResult(param, output)
except Exception:
logger.exception("Exception while parsing LLM output")
return None
def get_title_and_summary(
ctx: TranscriptionContext, param: TitleSummaryInput
) -> Union[None, TitleSummaryOutput]:
"""
From the input provided (transcript), query the LLM to generate
topics and summaries
:param param:
:return:
"""
logger.info("Generating title and summary")
# TODO : Handle unexpected output formats from the model
try:
response = requests.post(LLM_URL, headers=param.headers, json=param.data)
output = parse_llm_output(param, response)
if output:
result = output.get_result()
ctx.incremental_responses.append(result)
return TitleSummaryOutput(ctx.incremental_responses)
except Exception:
logger.exception("Exception while generating title and summary")
return None
def channel_send(channel, message: str) -> NoReturn:
"""
Send text messages via the data channel
:param channel:
:param message:
:return:
"""
if channel:
channel.send(message)
def channel_send_increment(
channel, param: Union[FinalSummaryResult, TitleSummaryOutput]
) -> NoReturn:
"""
Send the incremental topics and summaries via the data channel
:param channel:
:param param:
:return:
"""
if channel and param:
message = param.get_result()
channel.send(json.dumps(message))
def channel_send_transcript(ctx: TranscriptionContext) -> NoReturn:
"""
Send the transcription result via the data channel
:param channel:
:return:
"""
if not ctx.data_channel:
return
try:
least_time = next(iter(ctx.sorted_transcripts))
message = ctx.sorted_transcripts[least_time].get_result()
if message:
del ctx.sorted_transcripts[least_time]
if message["text"] not in BlackListedMessages.messages:
ctx.data_channel.send(json.dumps(message))
# Due to exceptions if one of the earlier batches can't return
# a transcript, we don't want to be stuck waiting for the result
# With the threshold size of 3, we pop the first(lost) element
else:
if len(ctx.sorted_transcripts) >= 3:
del ctx.sorted_transcripts[least_time]
except Exception:
logger.exception("Exception while sending transcript")
def get_transcription(
ctx: TranscriptionContext, input_frames: TranscriptionInput
) -> Union[None, TranscriptionOutput]:
"""
From the collected audio frames create transcription by inferring from
the chosen transcription model
:param input_frames:
:return:
"""
ctx.logger.info("Transcribing..")
ctx.sorted_transcripts[input_frames.frames[0].time] = None
# TODO: Find cleaner way, watch "no transcription" issue below
# Passing IO objects instead of temporary files throws an error
# Passing ndarray (type casted with float) does not give any
# transcription. Refer issue,
# https://github.com/guillaumekln/faster-whisper/issues/369
audio_file = "test" + str(datetime.datetime.now())
wf = wave.open(audio_file, "wb")
wf.setnchannels(settings.AUDIO_CHANNELS)
wf.setframerate(settings.AUDIO_SAMPLING_RATE)
wf.setsampwidth(settings.AUDIO_SAMPLING_WIDTH)
for frame in input_frames.frames:
wf.writeframes(b"".join(frame.to_ndarray()))
wf.close()
result_text = ""
try:
segments, _ = model.transcribe(
audio_file,
language="en",
beam_size=5,
vad_filter=True,
vad_parameters={"min_silence_duration_ms": 500},
)
os.remove(audio_file)
segments = list(segments)
result_text = ""
duration = 0.0
for segment in segments:
result_text += segment.text
start_time = segment.start
end_time = segment.end
if not segment.start:
start_time = 0.0
if not segment.end:
end_time = 5.5
duration += end_time - start_time
ctx.last_transcribed_time += duration
ctx.transcription_text += result_text
except Exception:
logger.exception("Exception while transcribing")
result = TranscriptionOutput(result_text)
ctx.sorted_transcripts[input_frames.frames[0].time] = result
return result
def get_final_summary_response(ctx: TranscriptionContext) -> FinalSummaryResult:
"""
Collate the incremental summaries generated so far and return as the final
summary
:return:
"""
final_summary = ""
# Collate inc summaries
for topic in ctx.incremental_responses:
final_summary += topic["description"]
response = FinalSummaryResult(final_summary, ctx.last_transcribed_time)
with open(
"./artefacts/meeting_titles_and_summaries.txt", "a", encoding="utf-8"
) as file:
file.write(json.dumps(ctx.incremental_responses))
return response
class AudioStreamTrack(MediaStreamTrack):
"""
An audio stream track.
"""
kind = "audio"
def __init__(self, ctx: TranscriptionContext, track):
super().__init__()
self.ctx = ctx
self.track = track
self.audio_buffer = av.AudioFifo()
async def recv(self) -> av.audio.frame.AudioFrame:
ctx = self.ctx
frame = await self.track.recv()
self.audio_buffer.write(frame)
if local_frames := self.audio_buffer.read_many(
settings.AUDIO_BUFFER_SIZE, partial=False
):
whisper_result = run_in_executor(
get_transcription,
ctx,
TranscriptionInput(local_frames),
executor=executor,
)
whisper_result.add_done_callback(
lambda f: channel_send_transcript(ctx) if f.result() else None
)
if len(ctx.transcription_text) > 25:
llm_input_text = ctx.transcription_text
ctx.transcription_text = ""
param = TitleSummaryInput(
input_text=llm_input_text, transcribed_time=ctx.last_transcribed_time
)
llm_result = run_in_executor(
get_title_and_summary, ctx, param, executor=executor
)
llm_result.add_done_callback(
lambda f: channel_send_increment(ctx.data_channel, llm_result.result())
if f.result()
else None
)
return frame
async def offer(request: requests.Request) -> web.Response:
"""
Establish the WebRTC connection with the client
:param request:
:return:
"""
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
# client identification
peername = request.transport.get_extra_info("peername")
if peername is not None:
clientid = f"{peername[0]}:{peername[1]}"
else:
clientid = uuid.uuid4()
# create a context for the whole rtc transaction
# add a customised logger to the context
ctx = TranscriptionContext(logger=logger.bind(client=clientid))
# handle RTC peer connection
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on("datachannel")
def on_datachannel(channel) -> NoReturn:
ctx.data_channel = channel
ctx.logger = ctx.logger.bind(channel=channel.label)
ctx.logger.info("Channel created by remote party")
@channel.on("message")
def on_message(message: str) -> NoReturn:
ctx.logger.info(f"Message: {message}")
if json.loads(message)["cmd"] == "STOP":
# Placeholder final summary
response = get_final_summary_response()
channel_send_increment(channel, response)
# To-do Add code to stop connection from server side here
# But have to handshake with client once
if isinstance(message, str) and message.startswith("ping"):
channel_send(channel, "pong" + message[4:])
@pc.on("connectionstatechange")
async def on_connectionstatechange() -> NoReturn:
ctx.logger.info(f"Connection state changed: {pc.connectionState}")
if pc.connectionState == "failed":
await pc.close()
pcs.discard(pc)
@pc.on("track")
def on_track(track) -> NoReturn:
ctx.logger.info(f"Track {track.kind} received")
pc.addTrack(AudioStreamTrack(ctx, relay.subscribe(track)))
await pc.setRemoteDescription(offer)
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)
async def on_shutdown(application: web.Application) -> NoReturn:
"""
On shutdown, the coroutines that shutdown client connections are
executed
:param application:
:return:
"""
coroutines = [pc.close() for pc in pcs]
await asyncio.gather(*coroutines)
pcs.clear()
def create_app() -> web.Application:
"""
Create the web application
"""
app = web.Application()
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True, expose_headers="*", allow_headers="*"
)
},
)
offer_resource = cors.add(app.router.add_resource("/offer"))
cors.add(offer_resource.add_route("POST", offer))
app.on_shutdown.append(on_shutdown)
return app
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="WebRTC based server for Reflector")
parser.add_argument(
"--host", default="0.0.0.0", help="Server host IP (def: 0.0.0.0)"
)
parser.add_argument(
"--port", type=int, default=1250, help="Server port (def: 1250)"
)
args = parser.parse_args()
app = create_app()
web.run_app(app, access_log=None, host=args.host, port=args.port)

View File

@@ -6,6 +6,12 @@ class Settings(BaseSettings):
OPENMP_KMP_DUPLICATE_LIB_OK: bool = False
# Database
DATABASE_URL: str = "sqlite:///./reflector.sqlite3"
# local data directory (audio for no)
DATA_DIR: str = "./data"
# Whisper
WHISPER_MODEL_SIZE: str = "tiny"
WHISPER_REAL_TIME_MODEL_SIZE: str = "tiny"
@@ -27,7 +33,7 @@ class Settings(BaseSettings):
AUDIO_BUFFER_SIZE: int = 256 * 960
# Audio Transcription
# backends: whisper, banana
# backends: whisper, banana, modal
TRANSCRIPT_BACKEND: str = "whisper"
TRANSCRIPT_URL: str | None = None
TRANSCRIPT_TIMEOUT: int = 90
@@ -49,6 +55,7 @@ class Settings(BaseSettings):
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
# LLM
# available backend: openai, banana, modal, oobagooda
LLM_BACKEND: str = "oobagooda"
# LLM common configuration

View File

@@ -2,17 +2,18 @@ import asyncio
from fastapi import Request, APIRouter
from reflector.events import subscribers_shutdown
from pydantic import BaseModel
from reflector.models import TranscriptionContext
from reflector.logger import logger
from aiortc import RTCPeerConnection, RTCSessionDescription, MediaStreamTrack
from json import loads, dumps
from enum import StrEnum
from pathlib import Path
import av
from reflector.processors import (
Pipeline,
AudioChunkerProcessor,
AudioMergeProcessor,
AudioTranscriptAutoProcessor,
AudioFileWriterProcessor,
TranscriptLinerProcessor,
TranscriptTopicDetectorProcessor,
TranscriptFinalSummaryProcessor,
@@ -25,6 +26,15 @@ sessions = []
router = APIRouter()
class TranscriptionContext(object):
def __init__(self, logger):
self.logger = logger
self.pipeline = None
self.data_channel = None
self.status = "idle"
self.topics = []
class AudioStreamTrack(MediaStreamTrack):
"""
An audio stream track.
@@ -64,7 +74,11 @@ class PipelineEvent(StrEnum):
async def rtc_offer_base(
params: RtcOffer, request: Request, event_callback=None, event_callback_args=None
params: RtcOffer,
request: Request,
event_callback=None,
event_callback_args=None,
audio_filename: Path | None = None,
):
# build an rtc session
offer = RTCSessionDescription(sdp=params.sdp, type=params.type)
@@ -73,7 +87,6 @@ async def rtc_offer_base(
peername = request.client
clientid = f"{peername[0]}:{peername[1]}"
ctx = TranscriptionContext(logger=logger.bind(client=clientid))
ctx.topics = []
async def update_status(status: str):
changed = ctx.status != status
@@ -151,14 +164,18 @@ async def rtc_offer_base(
# create a context for the whole rtc transaction
# add a customised logger to the context
ctx.pipeline = Pipeline(
processors = []
if audio_filename is not None:
processors += [AudioFileWriterProcessor(path=audio_filename)]
processors += [
AudioChunkerProcessor(),
AudioMergeProcessor(),
AudioTranscriptAutoProcessor.as_threaded(callback=on_transcript),
TranscriptLinerProcessor(),
TranscriptTopicDetectorProcessor.as_threaded(callback=on_topic),
TranscriptFinalSummaryProcessor.as_threaded(callback=on_final_summary),
)
]
ctx.pipeline = Pipeline(*processors)
# FIXME: warmup is not working well yet
# await ctx.pipeline.warmup()

View File

@@ -5,13 +5,20 @@ from fastapi import (
WebSocket,
WebSocketDisconnect,
)
from fastapi.responses import FileResponse
from starlette.concurrency import run_in_threadpool
from pydantic import BaseModel, Field
from uuid import UUID, uuid4
from uuid import uuid4
from datetime import datetime
from fastapi_pagination import Page, paginate
from reflector.logger import logger
from reflector.db import database, transcripts
from reflector.settings import settings
from .rtc_offer import rtc_offer_base, RtcOffer, PipelineEvent
from typing import Optional
from pathlib import Path
from tempfile import NamedTemporaryFile
import av
router = APIRouter()
@@ -21,6 +28,10 @@ router = APIRouter()
# ==============================================================
def generate_uuid4():
return str(uuid4())
def generate_transcript_name():
now = datetime.utcnow()
return f"Transcript {now.strftime('%Y-%m-%d %H:%M:%S')}"
@@ -31,7 +42,7 @@ class TranscriptText(BaseModel):
class TranscriptTopic(BaseModel):
id: UUID = Field(default_factory=uuid4)
id: str = Field(default_factory=generate_uuid4)
title: str
summary: str
transcript: str
@@ -48,7 +59,7 @@ class TranscriptEvent(BaseModel):
class Transcript(BaseModel):
id: UUID = Field(default_factory=uuid4)
id: str = Field(default_factory=generate_uuid4)
name: str = Field(default_factory=generate_transcript_name)
status: str = "idle"
locked: bool = False
@@ -70,21 +81,87 @@ class Transcript(BaseModel):
else:
self.topics.append(topic)
def events_dump(self, mode="json"):
return [event.model_dump(mode=mode) for event in self.events]
def topics_dump(self, mode="json"):
return [topic.model_dump(mode=mode) for topic in self.topics]
def convert_audio_to_mp3(self):
fn = self.audio_mp3_filename
if fn.exists():
return
logger.info(f"Converting audio to mp3: {self.audio_filename}")
inp = av.open(self.audio_filename.as_posix(), "r")
# create temporary file for mp3
with NamedTemporaryFile(suffix=".mp3", delete=False) as tmp:
out = av.open(tmp.name, "w")
stream = out.add_stream("mp3")
for frame in inp.decode(audio=0):
frame.pts = None
for packet in stream.encode(frame):
out.mux(packet)
for packet in stream.encode(None):
out.mux(packet)
out.close()
# move temporary file to final location
Path(tmp.name).rename(fn)
def unlink(self):
self.data_path.unlink(missing_ok=True)
@property
def data_path(self):
return Path(settings.DATA_DIR) / self.id
@property
def audio_filename(self):
return self.data_path / "audio.wav"
@property
def audio_mp3_filename(self):
return self.data_path / "audio.mp3"
class TranscriptController:
transcripts: list[Transcript] = []
async def get_all(self) -> list[Transcript]:
query = transcripts.select()
results = await database.fetch_all(query)
return results
def get_all(self) -> list[Transcript]:
return self.transcripts
async def get_by_id(self, transcript_id: str) -> Transcript | None:
query = transcripts.select().where(transcripts.c.id == transcript_id)
result = await database.fetch_one(query)
if not result:
return None
return Transcript(**result)
def get_by_id(self, transcript_id: UUID) -> Transcript | None:
return next((t for t in self.transcripts if t.id == transcript_id), None)
async def add(self, name: str):
transcript = Transcript(name=name)
query = transcripts.insert().values(**transcript.model_dump())
await database.execute(query)
return transcript
def add(self, transcript: Transcript):
self.transcripts.append(transcript)
async def update(self, transcript: Transcript, values: dict):
query = (
transcripts.update()
.where(transcripts.c.id == transcript.id)
.values(**values)
)
await database.execute(query)
for key, value in values.items():
setattr(transcript, key, value)
def remove(self, transcript: Transcript):
self.transcripts.remove(transcript)
async def remove_by_id(self, transcript_id: str) -> None:
transcript = await self.get_by_id(transcript_id)
if not transcript:
return
transcript.unlink()
query = transcripts.delete().where(transcripts.c.id == transcript_id)
await database.execute(query)
transcripts_controller = TranscriptController()
@@ -96,7 +173,7 @@ transcripts_controller = TranscriptController()
class GetTranscript(BaseModel):
id: UUID
id: str
name: str
status: str
locked: bool
@@ -123,15 +200,12 @@ class DeletionStatus(BaseModel):
@router.get("/transcripts", response_model=Page[GetTranscript])
async def transcripts_list():
return paginate(transcripts_controller.get_all())
return paginate(await transcripts_controller.get_all())
@router.post("/transcripts", response_model=GetTranscript)
async def transcripts_create(info: CreateTranscript):
transcript = Transcript()
transcript.name = info.name
transcripts_controller.add(transcript)
return transcript
return await transcripts_controller.add(info.name)
# ==============================================================
@@ -140,54 +214,72 @@ async def transcripts_create(info: CreateTranscript):
@router.get("/transcripts/{transcript_id}", response_model=GetTranscript)
async def transcript_get(transcript_id: UUID):
transcript = transcripts_controller.get_by_id(transcript_id)
async def transcript_get(transcript_id: str):
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
raise HTTPException(status_code=404, detail="Transcript not found")
return transcript
@router.patch("/transcripts/{transcript_id}", response_model=GetTranscript)
async def transcript_update(transcript_id: UUID, info: UpdateTranscript):
transcript = transcripts_controller.get_by_id(transcript_id)
async def transcript_update(transcript_id: str, info: UpdateTranscript):
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
raise HTTPException(status_code=404, detail="Transcript not found")
values = {}
if info.name is not None:
transcript.name = info.name
values["name"] = info.name
if info.locked is not None:
transcript.locked = info.locked
values["locked"] = info.locked
await transcripts_controller.update(transcript, values)
return transcript
@router.delete("/transcripts/{transcript_id}", response_model=DeletionStatus)
async def transcript_delete(transcript_id: UUID):
transcript = transcripts_controller.get_by_id(transcript_id)
async def transcript_delete(transcript_id: str):
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
raise HTTPException(status_code=404, detail="Transcript not found")
transcripts_controller.remove(transcript)
await transcripts_controller.remove_by_id(transcript.id)
return DeletionStatus(status="ok")
@router.get("/transcripts/{transcript_id}/audio")
async def transcript_get_audio(transcript_id: UUID):
transcript = transcripts_controller.get_by_id(transcript_id)
async def transcript_get_audio(transcript_id: str):
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
raise HTTPException(status_code=404, detail="Transcript not found")
# TODO: Implement audio generation
return HTTPException(status_code=500, detail="Not implemented")
if not transcript.audio_filename.exists():
raise HTTPException(status_code=404, detail="Audio not found")
return FileResponse(transcript.audio_filename, media_type="audio/wav")
@router.get("/transcripts/{transcript_id}/audio/mp3")
async def transcript_get_audio_mp3(transcript_id: str):
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
raise HTTPException(status_code=404, detail="Transcript not found")
if not transcript.audio_filename.exists():
raise HTTPException(status_code=404, detail="Audio not found")
await run_in_threadpool(transcript.convert_audio_to_mp3)
return FileResponse(transcript.audio_mp3_filename, media_type="audio/mp3")
@router.get("/transcripts/{transcript_id}/topics", response_model=list[TranscriptTopic])
async def transcript_get_topics(transcript_id: UUID):
transcript = transcripts_controller.get_by_id(transcript_id)
async def transcript_get_topics(transcript_id: str):
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
raise HTTPException(status_code=404, detail="Transcript not found")
return transcript.topics
@router.get("/transcripts/{transcript_id}/events")
async def transcript_get_websocket_events(transcript_id: UUID):
async def transcript_get_websocket_events(transcript_id: str):
pass
@@ -200,20 +292,20 @@ class WebsocketManager:
def __init__(self):
self.active_connections = {}
async def connect(self, transcript_id: UUID, websocket: WebSocket):
async def connect(self, transcript_id: str, websocket: WebSocket):
await websocket.accept()
if transcript_id not in self.active_connections:
self.active_connections[transcript_id] = []
self.active_connections[transcript_id].append(websocket)
def disconnect(self, transcript_id: UUID, websocket: WebSocket):
def disconnect(self, transcript_id: str, websocket: WebSocket):
if transcript_id not in self.active_connections:
return
self.active_connections[transcript_id].remove(websocket)
if not self.active_connections[transcript_id]:
del self.active_connections[transcript_id]
async def send_json(self, transcript_id: UUID, message):
async def send_json(self, transcript_id: str, message):
if transcript_id not in self.active_connections:
return
for connection in self.active_connections[transcript_id][:]:
@@ -227,8 +319,8 @@ ws_manager = WebsocketManager()
@router.websocket("/transcripts/{transcript_id}/events")
async def transcript_events_websocket(transcript_id: UUID, websocket: WebSocket):
transcript = transcripts_controller.get_by_id(transcript_id)
async def transcript_events_websocket(transcript_id: str, websocket: WebSocket):
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
raise HTTPException(status_code=404, detail="Transcript not found")
@@ -260,7 +352,7 @@ async def handle_rtc_event(event: PipelineEvent, args, data):
# transcript from the database for each event.
# print(f"Event: {event}", args, data)
transcript_id = args
transcript = transcripts_controller.get_by_id(transcript_id)
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
return
@@ -272,6 +364,12 @@ async def handle_rtc_event(event: PipelineEvent, args, data):
# FIXME don't do copy
if event == PipelineEvent.TRANSCRIPT:
resp = transcript.add_event(event=event, data=TranscriptText(text=data.text))
await transcripts_controller.update(
transcript,
{
"events": transcript.events_dump(),
},
)
elif event == PipelineEvent.TOPIC:
topic = TranscriptTopic(
@@ -283,14 +381,34 @@ async def handle_rtc_event(event: PipelineEvent, args, data):
resp = transcript.add_event(event=event, data=topic)
transcript.upsert_topic(topic)
await transcripts_controller.update(
transcript,
{
"events": transcript.events_dump(),
"topics": transcript.topics_dump(),
},
)
elif event == PipelineEvent.FINAL_SUMMARY:
final_summary = TranscriptFinalSummary(summary=data.summary)
resp = transcript.add_event(event=event, data=final_summary)
transcript.summary = final_summary
await transcripts_controller.update(
transcript,
{
"events": transcript.events_dump(),
"summary": final_summary.summary,
},
)
elif event == PipelineEvent.STATUS:
resp = transcript.add_event(event=event, data=data)
transcript.status = data.value
await transcripts_controller.update(
transcript,
{
"events": transcript.events_dump(),
"status": data.value,
},
)
else:
logger.warning(f"Unknown event: {event}")
@@ -302,9 +420,9 @@ async def handle_rtc_event(event: PipelineEvent, args, data):
@router.post("/transcripts/{transcript_id}/record/webrtc")
async def transcript_record_webrtc(
transcript_id: UUID, params: RtcOffer, request: Request
transcript_id: str, params: RtcOffer, request: Request
):
transcript = transcripts_controller.get_by_id(transcript_id)
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
raise HTTPException(status_code=404, detail="Transcript not found")
@@ -317,4 +435,5 @@ async def transcript_record_webrtc(
request,
event_callback=handle_rtc_event,
event_callback_args=transcript_id,
audio_filename=transcript.audio_filename,
)

BIN
server/test.db Normal file

Binary file not shown.

View File

@@ -1,63 +0,0 @@
import pytest
from unittest.mock import patch
@pytest.mark.asyncio
async def test_basic_rtc_server(aiohttp_server, event_loop):
# goal is to start the server, and send rtc audio to it
# validate the events received
import argparse
import json
from pathlib import Path
from reflector.server import create_app
from reflector.stream_client import StreamClient
from reflector.models import TitleSummaryOutput
from aiortc.contrib.signaling import add_signaling_arguments, create_signaling
# customize settings to have a mock LLM server
with patch("reflector.server.get_title_and_summary") as mock_llm:
# any response from mock_llm will be test topic
mock_llm.return_value = TitleSummaryOutput(["topic_test"])
# create the server
app = create_app()
server = await aiohttp_server(app)
url = f"http://{server.host}:{server.port}/offer"
# create signaling
parser = argparse.ArgumentParser()
add_signaling_arguments(parser)
args = parser.parse_args(["-s", "tcp-socket"])
signaling = create_signaling(args)
# create the client
path = Path(__file__).parent / "records" / "test_mathieu_hello.wav"
client = StreamClient(signaling, url=url, play_from=path.as_posix())
await client.start()
# we just want the first transcription
# and topic update messages
marks = {
"SHOW_TRANSCRIPTION": False,
"UPDATE_TOPICS": False,
}
async for rawmsg in client.get_reader():
msg = json.loads(rawmsg)
cmd = msg["cmd"]
if cmd == "SHOW_TRANSCRIPTION":
assert "text" in msg
assert "want to share my incredible experience" in msg["text"]
elif cmd == "UPDATE_TOPICS":
assert "topics" in msg
assert "topic_test" in msg["topics"]
marks[cmd] = True
# break if we have all the events we need
if all(marks.values()):
break
# stop the server
await server.close()
await client.stop()

View File

@@ -70,11 +70,15 @@ async def dummy_llm():
@pytest.mark.asyncio
async def test_transcript_rtc_and_websocket(dummy_transcript, dummy_llm):
async def test_transcript_rtc_and_websocket(tmpdir, dummy_transcript, dummy_llm):
# goal: start the server, exchange RTC, receive websocket events
# because of that, we need to start the server in a thread
# to be able to connect with aiortc
from reflector.settings import settings
settings.DATA_DIR = Path(tmpdir)
# start server
host = "127.0.0.1"
port = 1255
@@ -188,3 +192,13 @@ async def test_transcript_rtc_and_websocket(dummy_transcript, dummy_llm):
resp = await ac.get(f"/transcripts/{tid}")
assert resp.status_code == 200
assert resp.json()["status"] == "ended"
# check that audio is available
resp = await ac.get(f"/transcripts/{tid}/audio")
assert resp.status_code == 200
assert resp.headers["Content-Type"] == "audio/wav"
# check that audio/mp3 is available
resp = await ac.get(f"/transcripts/{tid}/audio/mp3")
assert resp.status_code == 200
assert resp.headers["Content-Type"] == "audio/mp3"

View File

@@ -1,4 +1,4 @@
import "./globals.scss";
import "./styles/globals.scss";
import { Roboto } from "next/font/google";
import Head from "next/head";

View File

@@ -17,15 +17,3 @@ export function Mulberry32(seed) {
return ((t ^ (t >>> 14)) >>> 0) / 4294967296;
};
}
export const formatTime = (seconds) => {
let hours = Math.floor(seconds / 3600);
let minutes = Math.floor((seconds % 3600) / 60);
let secs = Math.floor(seconds % 60);
let timeString = `${hours > 0 ? hours + ":" : ""}${minutes
.toString()
.padStart(2, "0")}:${secs.toString().padStart(2, "0")}`;
return timeString;
};

11
www/app/lib/time.js Normal file
View File

@@ -0,0 +1,11 @@
export const formatTime = (seconds) => {
let hours = Math.floor(seconds / 3600);
let minutes = Math.floor((seconds % 3600) / 60);
let secs = Math.floor(seconds % 60);
let timeString = `${hours > 0 ? hours + ":" : ""}${minutes
.toString()
.padStart(2, "0")}:${secs.toString().padStart(2, "0")}`;
return timeString;
};

View File

@@ -1,57 +1,4 @@
"use client";
import React, { useEffect, useState } from "react";
import Recorder from "./components/record.js";
import { Dashboard } from "./components/dashboard.js";
import useWebRTC from "./components/webrtc.js";
import useTranscript from "./components/transcript.js";
import { useWebSockets } from "./components/websocket.js";
import "../public/button.css";
const App = () => {
const [stream, setStream] = useState(null);
const [disconnected, setDisconnected] = useState(false);
const useActiveTopic = useState(null);
useEffect(() => {
if (process.env.NEXT_PUBLIC_ENV === "development") {
document.onkeyup = (e) => {
if (e.key === "d") {
setDisconnected((prev) => !prev);
}
};
}
}, []);
const transcript = useTranscript();
const webRTC = useWebRTC(stream, transcript.response?.id);
const webSockets = useWebSockets(transcript.response?.id);
return (
<div className="flex flex-col items-center h-[100svh] bg-gradient-to-r from-[#8ec5fc30] to-[#e0c3fc42]">
<div className="h-[13svh] flex flex-col justify-center items-center">
<h1 className="text-5xl font-bold text-blue-500">Reflector</h1>
<p className="text-gray-500">Capture The Signal, Not The Noise</p>
</div>
<Recorder
setStream={setStream}
onStop={() => {
webRTC?.peer?.send(JSON.stringify({ cmd: "STOP" }));
setStream(null);
}}
topics={webSockets.topics}
useActiveTopic={useActiveTopic}
/>
<Dashboard
transcriptionText={webSockets.transcriptText}
finalSummary={webSockets.finalSummary}
topics={webSockets.topics}
stream={stream}
disconnected={disconnected}
useActiveTopic={useActiveTopic}
/>
</div>
);
};
export default App;
import { redirect } from "next/navigation";
export default async function Index({ params }) {
redirect("/transcripts/new");
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

View File

@@ -0,0 +1,56 @@
"use client";
import React, { useEffect, useState } from "react";
import Recorder from "../recorder";
import { Dashboard } from "../dashboard";
import useWebRTC from "../useWebRTC";
import useTranscript from "../useTranscript";
import { useWebSockets } from "../useWebSockets";
import "../../styles/button.css";
const App = () => {
const [stream, setStream] = useState(null);
const [disconnected, setDisconnected] = useState(false);
useEffect(() => {
if (process.env.NEXT_PUBLIC_ENV === "development") {
document.onkeyup = (e) => {
if (e.key === "d") {
setDisconnected((prev) => !prev);
}
};
}
}, []);
const transcript = useTranscript();
const webRTC = useWebRTC(stream, transcript.response?.id);
const webSockets = useWebSockets(transcript.response?.id);
return (
<div className="flex flex-col items-center h-[100svh] bg-gradient-to-r from-[#8ec5fc30] to-[#e0c3fc42]">
<div className="h-[13svh] flex flex-col justify-center items-center">
<h1 className="text-5xl font-bold text-blue-500">Reflector</h1>
<p className="text-gray-500">Capture The Signal, Not The Noise</p>
</div>
<Recorder
setStream={setStream}
onStop={() => {
webRTC?.peer?.send(JSON.stringify({ cmd: "STOP" }));
setStream(null);
}}
/>
<hr />
<Dashboard
transcriptionText={webSockets.transcriptText}
finalSummary={webSockets.finalSummary}
topics={webSockets.topics}
stream={stream}
disconnected={disconnected}
/>
</div>
);
};
export default App;

View File

@@ -10,7 +10,7 @@ import Dropdown from "react-dropdown";
import "react-dropdown/style.css";
import CustomRecordPlugin from "./CustomRecordPlugin";
import { formatTime } from "../utils";
import { formatTime } from "../lib/time";
const AudioInputsDropdown = (props) => {
const [ddOptions, setDdOptions] = useState([]);

View File

@@ -1,5 +0,0 @@
// A faulty API route to test Sentry's error monitoring
export default function handler(_req, res) {
throw new Error("Sentry Example API Route Error");
res.status(200).json({ name: "John Doe" });
}

View File

@@ -1,87 +0,0 @@
import Head from "next/head";
import * as Sentry from "@sentry/nextjs";
export default function Home() {
return (
<div>
<Head>
<title>Sentry Onboarding</title>
<meta name="description" content="Test Sentry for your Next.js app!" />
</Head>
<main
style={{
minHeight: "100vh",
display: "flex",
flexDirection: "column",
justifyContent: "center",
alignItems: "center",
}}
>
<h1 style={{ fontSize: "4rem", margin: "14px 0" }}>
<svg
style={{
height: "1em",
}}
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 200 44"
>
<path
fill="currentColor"
d="M124.32,28.28,109.56,9.22h-3.68V34.77h3.73V15.19l15.18,19.58h3.26V9.22h-3.73ZM87.15,23.54h13.23V20.22H87.14V12.53h14.93V9.21H83.34V34.77h18.92V31.45H87.14ZM71.59,20.3h0C66.44,19.06,65,18.08,65,15.7c0-2.14,1.89-3.59,4.71-3.59a12.06,12.06,0,0,1,7.07,2.55l2-2.83a14.1,14.1,0,0,0-9-3c-5.06,0-8.59,3-8.59,7.27,0,4.6,3,6.19,8.46,7.52C74.51,24.74,76,25.78,76,28.11s-2,3.77-5.09,3.77a12.34,12.34,0,0,1-8.3-3.26l-2.25,2.69a15.94,15.94,0,0,0,10.42,3.85c5.48,0,9-2.95,9-7.51C79.75,23.79,77.47,21.72,71.59,20.3ZM195.7,9.22l-7.69,12-7.64-12h-4.46L186,24.67V34.78h3.84V24.55L200,9.22Zm-64.63,3.46h8.37v22.1h3.84V12.68h8.37V9.22H131.08ZM169.41,24.8c3.86-1.07,6-3.77,6-7.63,0-4.91-3.59-8-9.38-8H154.67V34.76h3.8V25.58h6.45l6.48,9.2h4.44l-7-9.82Zm-10.95-2.5V12.6h7.17c3.74,0,5.88,1.77,5.88,4.84s-2.29,4.86-5.84,4.86Z M29,2.26a4.67,4.67,0,0,0-8,0L14.42,13.53A32.21,32.21,0,0,1,32.17,40.19H27.55A27.68,27.68,0,0,0,12.09,17.47L6,28a15.92,15.92,0,0,1,9.23,12.17H4.62A.76.76,0,0,1,4,39.06l2.94-5a10.74,10.74,0,0,0-3.36-1.9l-2.91,5a4.54,4.54,0,0,0,1.69,6.24A4.66,4.66,0,0,0,4.62,44H19.15a19.4,19.4,0,0,0-8-17.31l2.31-4A23.87,23.87,0,0,1,23.76,44H36.07a35.88,35.88,0,0,0-16.41-31.8l4.67-8a.77.77,0,0,1,1.05-.27c.53.29,20.29,34.77,20.66,35.17a.76.76,0,0,1-.68,1.13H40.6q.09,1.91,0,3.81h4.78A4.59,4.59,0,0,0,50,39.43a4.49,4.49,0,0,0-.62-2.28Z"
></path>
</svg>
</h1>
<p>Get started by sending us a sample error:</p>
<button
type="button"
style={{
padding: "12px",
cursor: "pointer",
backgroundColor: "#AD6CAA",
borderRadius: "4px",
border: "none",
color: "white",
fontSize: "14px",
margin: "18px",
}}
onClick={async () => {
const transaction = Sentry.startTransaction({
name: "Example Frontend Transaction",
});
Sentry.configureScope((scope) => {
scope.setSpan(transaction);
});
try {
const res = await fetch("/api/sentry-example-api");
if (!res.ok) {
throw new Error("Sentry Example Frontend Error");
}
} finally {
transaction.finish();
}
}}
>
Throw error!
</button>
<p>
Next, look for the error on the{" "}
<a href="https://monadical.sentry.io/issues/?project=4505634666577920">
Issues Page
</a>
.
</p>
<p style={{ marginTop: "24px" }}>
For more information, see{" "}
<a href="https://docs.sentry.io/platforms/javascript/guides/nextjs/">
https://docs.sentry.io/platforms/javascript/guides/nextjs/
</a>
</p>
</main>
</div>
);
}