mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-20 20:29:06 +00:00
Compare commits
54 Commits
mathieu/ca
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
080895be74 | ||
| b42f7cfc60 | |||
| c546e69739 | |||
|
|
3f1fe8c9bf | ||
| 5f143fe364 | |||
|
|
79f161436e | ||
|
|
5cba5d310d | ||
| 43ea9349f5 | |||
|
|
b3a8e9739d | ||
|
|
369ecdff13 | ||
| fc363bd49b | |||
|
|
962038ee3f | ||
|
|
3b85ff3bdf | ||
|
|
cde99ca271 | ||
|
|
f81fe9948a | ||
|
|
5a5b323382 | ||
| 02a3938822 | |||
|
|
7f5a4c9ddc | ||
|
|
08d88ec349 | ||
|
|
c4d2825c81 | ||
| 0663700a61 | |||
| dc82f8bb3b | |||
| 457823e1c1 | |||
|
|
695d1a957d | ||
| ccffdba75b | |||
| 84a381220b | |||
| 5f2f0e9317 | |||
| 88ed7cfa78 | |||
| 6f0c7c1a5e | |||
| 9dfd76996f | |||
| 55cc8637c6 | |||
| f5331a2107 | |||
|
|
124ce03bf8 | ||
| 7030e0f236 | |||
| 37f0110892 | |||
| cf2896a7f4 | |||
| aabf2c2572 | |||
| 6a7b08f016 | |||
| e2736563d9 | |||
| 0f54b7782d | |||
| 359280dd34 | |||
| 9265d201b5 | |||
| 52f9f533d7 | |||
| 0c3878ac3c | |||
|
|
d70beee51b | ||
| bc5b351d2b | |||
|
|
07981e8090 | ||
| 7e366f6338 | |||
| 7592679a35 | |||
| af16178f86 | |||
| 3ea7f6b7b6 | |||
|
|
009590c080 | ||
|
|
fe5d344cff | ||
|
|
86455ce573 |
5
.github/workflows/db_migrations.yml
vendored
5
.github/workflows/db_migrations.yml
vendored
@@ -2,6 +2,8 @@ name: Test Database Migrations
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "server/migrations/**"
|
||||
- "server/reflector/db/**"
|
||||
@@ -17,6 +19,9 @@ on:
|
||||
jobs:
|
||||
test-migrations:
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: db-ubuntu-latest-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:17
|
||||
|
||||
77
.github/workflows/deploy.yml
vendored
77
.github/workflows/deploy.yml
vendored
@@ -8,18 +8,30 @@ env:
|
||||
ECR_REPOSITORY: reflector
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: linux-amd64
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: linux-arm64
|
||||
arch: arm64
|
||||
|
||||
runs-on: ${{ matrix.runner }}
|
||||
|
||||
permissions:
|
||||
deployments: write
|
||||
contents: read
|
||||
|
||||
outputs:
|
||||
registry: ${{ steps.login-ecr.outputs.registry }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
@@ -27,21 +39,52 @@ jobs:
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
- name: Build and push ${{ matrix.arch }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: server
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: ${{ matrix.platform }}
|
||||
push: true
|
||||
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
provenance: false
|
||||
|
||||
create-manifest:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
|
||||
permissions:
|
||||
deployments: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Create and push multi-arch manifest
|
||||
run: |
|
||||
# Get the registry URL (since we can't easily access job outputs in matrix)
|
||||
ECR_REGISTRY=$(aws ecr describe-registry --query 'registryId' --output text).dkr.ecr.${{ env.AWS_REGION }}.amazonaws.com
|
||||
|
||||
docker manifest create \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-amd64 \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-arm64
|
||||
|
||||
docker manifest push $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest
|
||||
|
||||
echo "✅ Multi-arch manifest pushed: $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest"
|
||||
|
||||
45
.github/workflows/test_next_server.yml
vendored
Normal file
45
.github/workflows/test_next_server.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Test Next Server
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "www/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "www/**"
|
||||
|
||||
jobs:
|
||||
test-next-server:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./www
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 8
|
||||
|
||||
- name: Setup Node.js cache
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'pnpm'
|
||||
cache-dependency-path: './www/pnpm-lock.yaml'
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm test
|
||||
49
.github/workflows/test_server.yml
vendored
49
.github/workflows/test_server.yml
vendored
@@ -5,12 +5,17 @@ on:
|
||||
paths:
|
||||
- "server/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "server/**"
|
||||
|
||||
jobs:
|
||||
pytest:
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: pytest-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
services:
|
||||
redis:
|
||||
image: redis:6
|
||||
@@ -19,29 +24,47 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v3
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
working-directory: server
|
||||
|
||||
- name: Tests
|
||||
run: |
|
||||
cd server
|
||||
uv run -m pytest -v tests
|
||||
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
docker-amd64:
|
||||
runs-on: linux-amd64
|
||||
concurrency:
|
||||
group: docker-amd64-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build AMD64
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: server
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64
|
||||
cache-from: type=gha,scope=amd64
|
||||
cache-to: type=gha,mode=max,scope=amd64
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
|
||||
docker-arm64:
|
||||
runs-on: linux-arm64
|
||||
concurrency:
|
||||
group: docker-arm64-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build ARM64
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: server
|
||||
platforms: linux/arm64
|
||||
cache-from: type=gha,scope=arm64
|
||||
cache-to: type=gha,mode=max,scope=arm64
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -14,4 +14,7 @@ data/
|
||||
www/REFACTOR.md
|
||||
www/reload-frontend
|
||||
server/test.sqlite
|
||||
CLAUDE.local.md
|
||||
CLAUDE.local.md
|
||||
www/.env.development
|
||||
www/.env.production
|
||||
.playwright-mcp
|
||||
|
||||
1
.gitleaksignore
Normal file
1
.gitleaksignore
Normal file
@@ -0,0 +1 @@
|
||||
b9d891d3424f371642cb032ecfd0e2564470a72c:server/tests/test_transcripts_recording_deletion.py:generic-api-key:15
|
||||
@@ -27,3 +27,8 @@ repos:
|
||||
files: ^server/
|
||||
- id: ruff-format
|
||||
files: ^server/
|
||||
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.28.0
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
|
||||
101
CHANGELOG.md
101
CHANGELOG.md
@@ -1,5 +1,106 @@
|
||||
# Changelog
|
||||
|
||||
## [0.10.0](https://github.com/Monadical-SAS/reflector/compare/v0.9.0...v0.10.0) (2025-09-11)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* replace nextjs-config with environment variables ([#632](https://github.com/Monadical-SAS/reflector/issues/632)) ([369ecdf](https://github.com/Monadical-SAS/reflector/commit/369ecdff13f3862d926a9c0b87df52c9d94c4dde))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* anonymous users transcript permissions ([#621](https://github.com/Monadical-SAS/reflector/issues/621)) ([f81fe99](https://github.com/Monadical-SAS/reflector/commit/f81fe9948a9237b3e0001b2d8ca84f54d76878f9))
|
||||
* auth post ([#624](https://github.com/Monadical-SAS/reflector/issues/624)) ([cde99ca](https://github.com/Monadical-SAS/reflector/commit/cde99ca2716f84ba26798f289047732f0448742e))
|
||||
* auth post ([#626](https://github.com/Monadical-SAS/reflector/issues/626)) ([3b85ff3](https://github.com/Monadical-SAS/reflector/commit/3b85ff3bdf4fb053b103070646811bc990c0e70a))
|
||||
* auth post ([#627](https://github.com/Monadical-SAS/reflector/issues/627)) ([962038e](https://github.com/Monadical-SAS/reflector/commit/962038ee3f2a555dc3c03856be0e4409456e0996))
|
||||
* missing follow_redirects=True on modal endpoint ([#630](https://github.com/Monadical-SAS/reflector/issues/630)) ([fc363bd](https://github.com/Monadical-SAS/reflector/commit/fc363bd49b17b075e64f9186e5e0185abc325ea7))
|
||||
* sync backend and frontend token refresh logic ([#614](https://github.com/Monadical-SAS/reflector/issues/614)) ([5a5b323](https://github.com/Monadical-SAS/reflector/commit/5a5b3233820df9536da75e87ce6184a983d4713a))
|
||||
|
||||
## [0.9.0](https://github.com/Monadical-SAS/reflector/compare/v0.8.2...v0.9.0) (2025-09-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* frontend openapi react query ([#606](https://github.com/Monadical-SAS/reflector/issues/606)) ([c4d2825](https://github.com/Monadical-SAS/reflector/commit/c4d2825c81f81ad8835629fbf6ea8c7383f8c31b))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* align whisper transcriber api with parakeet ([#602](https://github.com/Monadical-SAS/reflector/issues/602)) ([0663700](https://github.com/Monadical-SAS/reflector/commit/0663700a615a4af69a03c96c410f049e23ec9443))
|
||||
* kv use tls explicit ([#610](https://github.com/Monadical-SAS/reflector/issues/610)) ([08d88ec](https://github.com/Monadical-SAS/reflector/commit/08d88ec349f38b0d13e0fa4cb73486c8dfd31836))
|
||||
* source kind for file processing ([#601](https://github.com/Monadical-SAS/reflector/issues/601)) ([dc82f8b](https://github.com/Monadical-SAS/reflector/commit/dc82f8bb3bdf3ab3d4088e592a30fd63907319e1))
|
||||
* token refresh locking ([#613](https://github.com/Monadical-SAS/reflector/issues/613)) ([7f5a4c9](https://github.com/Monadical-SAS/reflector/commit/7f5a4c9ddc7fd098860c8bdda2ca3b57f63ded2f))
|
||||
|
||||
## [0.8.2](https://github.com/Monadical-SAS/reflector/compare/v0.8.1...v0.8.2) (2025-08-29)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* search-logspam ([#593](https://github.com/Monadical-SAS/reflector/issues/593)) ([695d1a9](https://github.com/Monadical-SAS/reflector/commit/695d1a957d4cd862753049f9beed88836cabd5ab))
|
||||
|
||||
## [0.8.1](https://github.com/Monadical-SAS/reflector/compare/v0.8.0...v0.8.1) (2025-08-29)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* make webhook secret/url allowing null ([#590](https://github.com/Monadical-SAS/reflector/issues/590)) ([84a3812](https://github.com/Monadical-SAS/reflector/commit/84a381220bc606231d08d6f71d4babc818fa3c75))
|
||||
|
||||
## [0.8.0](https://github.com/Monadical-SAS/reflector/compare/v0.7.3...v0.8.0) (2025-08-29)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **cleanup:** add automatic data retention for public instances ([#574](https://github.com/Monadical-SAS/reflector/issues/574)) ([6f0c7c1](https://github.com/Monadical-SAS/reflector/commit/6f0c7c1a5e751713366886c8e764c2009e12ba72))
|
||||
* **rooms:** add webhook for transcript completion ([#578](https://github.com/Monadical-SAS/reflector/issues/578)) ([88ed7cf](https://github.com/Monadical-SAS/reflector/commit/88ed7cfa7804794b9b54cad4c3facc8a98cf85fd))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* file pipeline status reporting and websocket updates ([#589](https://github.com/Monadical-SAS/reflector/issues/589)) ([9dfd769](https://github.com/Monadical-SAS/reflector/commit/9dfd76996f851cc52be54feea078adbc0816dc57))
|
||||
* Igor/evaluation ([#575](https://github.com/Monadical-SAS/reflector/issues/575)) ([124ce03](https://github.com/Monadical-SAS/reflector/commit/124ce03bf86044c18313d27228a25da4bc20c9c5))
|
||||
* optimize parakeet transcription batching algorithm ([#577](https://github.com/Monadical-SAS/reflector/issues/577)) ([7030e0f](https://github.com/Monadical-SAS/reflector/commit/7030e0f23649a8cf6c1eb6d5889684a41ce849ec))
|
||||
|
||||
## [0.7.3](https://github.com/Monadical-SAS/reflector/compare/v0.7.2...v0.7.3) (2025-08-22)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* cleaned repo, and get git-leaks clean ([359280d](https://github.com/Monadical-SAS/reflector/commit/359280dd340433ba4402ed69034094884c825e67))
|
||||
* restore previous behavior on live pipeline + audio downscaler ([#561](https://github.com/Monadical-SAS/reflector/issues/561)) ([9265d20](https://github.com/Monadical-SAS/reflector/commit/9265d201b590d23c628c5f19251b70f473859043))
|
||||
|
||||
## [0.7.2](https://github.com/Monadical-SAS/reflector/compare/v0.7.1...v0.7.2) (2025-08-21)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* docker image not loading libgomp.so.1 for torch ([#560](https://github.com/Monadical-SAS/reflector/issues/560)) ([773fccd](https://github.com/Monadical-SAS/reflector/commit/773fccd93e887c3493abc2e4a4864dddce610177))
|
||||
* include shared rooms to search ([#558](https://github.com/Monadical-SAS/reflector/issues/558)) ([499eced](https://github.com/Monadical-SAS/reflector/commit/499eced3360b84fb3a90e1c8a3b554290d21adc2))
|
||||
|
||||
## [0.7.1](https://github.com/Monadical-SAS/reflector/compare/v0.7.0...v0.7.1) (2025-08-21)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* webvtt db null expectation mismatch ([#556](https://github.com/Monadical-SAS/reflector/issues/556)) ([e67ad1a](https://github.com/Monadical-SAS/reflector/commit/e67ad1a4a2054467bfeb1e0258fbac5868aaaf21))
|
||||
|
||||
## [0.7.0](https://github.com/Monadical-SAS/reflector/compare/v0.6.1...v0.7.0) (2025-08-21)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* delete recording with transcript ([#547](https://github.com/Monadical-SAS/reflector/issues/547)) ([99cc984](https://github.com/Monadical-SAS/reflector/commit/99cc9840b3f5de01e0adfbfae93234042d706d13))
|
||||
* pipeline improvement with file processing, parakeet, silero-vad ([#540](https://github.com/Monadical-SAS/reflector/issues/540)) ([bcc29c9](https://github.com/Monadical-SAS/reflector/commit/bcc29c9e0050ae215f89d460e9d645aaf6a5e486))
|
||||
* postgresql migration and removal of sqlite in pytest ([#546](https://github.com/Monadical-SAS/reflector/issues/546)) ([cd1990f](https://github.com/Monadical-SAS/reflector/commit/cd1990f8f0fe1503ef5069512f33777a73a93d7f))
|
||||
* search backend ([#537](https://github.com/Monadical-SAS/reflector/issues/537)) ([5f9b892](https://github.com/Monadical-SAS/reflector/commit/5f9b89260c9ef7f3c921319719467df22830453f))
|
||||
* search frontend ([#551](https://github.com/Monadical-SAS/reflector/issues/551)) ([3657242](https://github.com/Monadical-SAS/reflector/commit/365724271ca6e615e3425125a69ae2b46ce39285))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* evaluation cli event wrap ([#536](https://github.com/Monadical-SAS/reflector/issues/536)) ([941c3db](https://github.com/Monadical-SAS/reflector/commit/941c3db0bdacc7b61fea412f3746cc5a7cb67836))
|
||||
* use structlog not logging ([#550](https://github.com/Monadical-SAS/reflector/issues/550)) ([27e2f81](https://github.com/Monadical-SAS/reflector/commit/27e2f81fda5232e53edc729d3e99c5ef03adbfe9))
|
||||
|
||||
## [0.6.1](https://github.com/Monadical-SAS/reflector/compare/v0.6.0...v0.6.1) (2025-08-06)
|
||||
|
||||
|
||||
|
||||
@@ -66,7 +66,6 @@ pnpm install
|
||||
|
||||
# Copy configuration templates
|
||||
cp .env_template .env
|
||||
cp config-template.ts config.ts
|
||||
```
|
||||
|
||||
**Development:**
|
||||
|
||||
81
README.md
81
README.md
@@ -1,43 +1,60 @@
|
||||
<div align="center">
|
||||
<img width="100" alt="image" src="https://github.com/user-attachments/assets/66fb367b-2c89-4516-9912-f47ac59c6a7f"/>
|
||||
|
||||
# Reflector
|
||||
|
||||
Reflector Audio Management and Analysis is a cutting-edge web application under development by Monadical. It utilizes AI to record meetings, providing a permanent record with transcripts, translations, and automated summaries.
|
||||
Reflector is an AI-powered audio transcription and meeting analysis platform that provides real-time transcription, speaker diarization, translation and summarization for audio content and live meetings. It works 100% with local models (whisper/parakeet, pyannote, seamless-m4t, and your local llm like phi-4).
|
||||
|
||||
[](https://github.com/monadical-sas/reflector/actions/workflows/pytests.yml)
|
||||
[](https://github.com/monadical-sas/reflector/actions/workflows/test_server.yml)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
</div>
|
||||
|
||||
## Screenshots
|
||||
</div>
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/user-attachments/assets/3a976930-56c1-47ef-8c76-55d3864309e3">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/3a976930-56c1-47ef-8c76-55d3864309e3" />
|
||||
<a href="https://github.com/user-attachments/assets/21f5597c-2930-4899-a154-f7bd61a59e97">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/21f5597c-2930-4899-a154-f7bd61a59e97" />
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
<a href="https://github.com/user-attachments/assets/bfe3bde3-08af-4426-a9a1-11ad5cd63b33">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/bfe3bde3-08af-4426-a9a1-11ad5cd63b33" />
|
||||
<a href="https://github.com/user-attachments/assets/f6b9399a-5e51-4bae-b807-59128d0a940c">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/f6b9399a-5e51-4bae-b807-59128d0a940c" />
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
<a href="https://github.com/user-attachments/assets/7b60c9d0-efe4-474f-a27b-ea13bd0fabdc">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/7b60c9d0-efe4-474f-a27b-ea13bd0fabdc" />
|
||||
<a href="https://github.com/user-attachments/assets/a42ce460-c1fd-4489-a995-270516193897">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/a42ce460-c1fd-4489-a995-270516193897" />
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
<a href="https://github.com/user-attachments/assets/21929f6d-c309-42fe-9c11-f1299e50fbd4">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/21929f6d-c309-42fe-9c11-f1299e50fbd4" />
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## What is Reflector?
|
||||
|
||||
Reflector is a web application that utilizes local models to process audio content, providing:
|
||||
|
||||
- **Real-time Transcription**: Convert speech to text using [Whisper](https://github.com/openai/whisper) (multi-language) or [Parakeet](https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2) (English) models
|
||||
- **Speaker Diarization**: Identify and label different speakers using [Pyannote](https://github.com/pyannote/pyannote-audio) 3.1
|
||||
- **Live Translation**: Translate audio content in real-time to many languages with [Facebook Seamless-M4T](https://github.com/facebookresearch/seamless_communication)
|
||||
- **Topic Detection & Summarization**: Extract key topics and generate concise summaries using LLMs
|
||||
- **Meeting Recording**: Create permanent records of meetings with searchable transcripts
|
||||
|
||||
Currently we provide [modal.com](https://modal.com/) gpu template to deploy.
|
||||
|
||||
## Background
|
||||
|
||||
The project architecture consists of three primary components:
|
||||
|
||||
- **Front-End**: NextJS React project hosted on Vercel, located in `www/`.
|
||||
- **Back-End**: Python server that offers an API and data persistence, found in `server/`.
|
||||
- **GPU implementation**: Providing services such as speech-to-text transcription, topic generation, automated summaries, and translations. Most reliable option is Modal deployment
|
||||
- **Front-End**: NextJS React project hosted on Vercel, located in `www/`.
|
||||
- **GPU implementation**: Providing services such as speech-to-text transcription, topic generation, automated summaries, and translations.
|
||||
|
||||
It also uses authentik for authentication if activated, and Vercel for deployment and configuration of the front-end.
|
||||
It also uses authentik for authentication if activated.
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
@@ -72,6 +89,8 @@ Note: We currently do not have instructions for Windows users.
|
||||
|
||||
## Installation
|
||||
|
||||
*Note: we're working toward better installation, theses instructions are not accurate for now*
|
||||
|
||||
### Frontend
|
||||
|
||||
Start with `cd www`.
|
||||
@@ -80,11 +99,10 @@ Start with `cd www`.
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
cp .env_template .env
|
||||
cp config-template.ts config.ts
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
Then, fill in the environment variables in `.env` and the configuration in `config.ts` as needed. If you are unsure on how to proceed, ask in Zulip.
|
||||
Then, fill in the environment variables in `.env` as needed. If you are unsure on how to proceed, ask in Zulip.
|
||||
|
||||
**Run in development mode**
|
||||
|
||||
@@ -149,3 +167,34 @@ You can manually process an audio file by calling the process tool:
|
||||
```bash
|
||||
uv run python -m reflector.tools.process path/to/audio.wav
|
||||
```
|
||||
|
||||
|
||||
## Feature Flags
|
||||
|
||||
Reflector uses environment variable-based feature flags to control application functionality. These flags allow you to enable or disable features without code changes.
|
||||
|
||||
### Available Feature Flags
|
||||
|
||||
| Feature Flag | Environment Variable |
|
||||
|-------------|---------------------|
|
||||
| `requireLogin` | `NEXT_PUBLIC_FEATURE_REQUIRE_LOGIN` |
|
||||
| `privacy` | `NEXT_PUBLIC_FEATURE_PRIVACY` |
|
||||
| `browse` | `NEXT_PUBLIC_FEATURE_BROWSE` |
|
||||
| `sendToZulip` | `NEXT_PUBLIC_FEATURE_SEND_TO_ZULIP` |
|
||||
| `rooms` | `NEXT_PUBLIC_FEATURE_ROOMS` |
|
||||
|
||||
### Setting Feature Flags
|
||||
|
||||
Feature flags are controlled via environment variables using the pattern `NEXT_PUBLIC_FEATURE_{FEATURE_NAME}` where `{FEATURE_NAME}` is the SCREAMING_SNAKE_CASE version of the feature name.
|
||||
|
||||
**Examples:**
|
||||
```bash
|
||||
# Enable user authentication requirement
|
||||
NEXT_PUBLIC_FEATURE_REQUIRE_LOGIN=true
|
||||
|
||||
# Disable browse functionality
|
||||
NEXT_PUBLIC_FEATURE_BROWSE=false
|
||||
|
||||
# Enable Zulip integration
|
||||
NEXT_PUBLIC_FEATURE_SEND_TO_ZULIP=true
|
||||
```
|
||||
|
||||
@@ -6,6 +6,7 @@ services:
|
||||
- 1250:1250
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
@@ -16,6 +17,7 @@ services:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
@@ -26,6 +28,7 @@ services:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
|
||||
3
server/.gitignore
vendored
3
server/.gitignore
vendored
@@ -176,7 +176,8 @@ artefacts/
|
||||
audio_*.wav
|
||||
|
||||
# ignore local database
|
||||
reflector.sqlite3
|
||||
*.sqlite3
|
||||
*.db
|
||||
data/
|
||||
|
||||
dump.rdb
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
UV_LINK_MODE=copy
|
||||
UV_LINK_MODE=copy \
|
||||
UV_NO_CACHE=1
|
||||
|
||||
# builder install base dependencies
|
||||
WORKDIR /tmp
|
||||
@@ -13,8 +14,8 @@ ENV PATH="/root/.local/bin/:$PATH"
|
||||
# install application dependencies
|
||||
RUN mkdir -p /app
|
||||
WORKDIR /app
|
||||
COPY pyproject.toml uv.lock /app/
|
||||
RUN touch README.md && env uv sync --compile-bytecode --locked
|
||||
COPY pyproject.toml uv.lock README.md /app/
|
||||
RUN uv sync --compile-bytecode --locked
|
||||
|
||||
# pre-download nltk packages
|
||||
RUN uv run python -c "import nltk; nltk.download('punkt_tab'); nltk.download('averaged_perceptron_tagger_eng')"
|
||||
@@ -26,4 +27,15 @@ COPY migrations /app/migrations
|
||||
COPY reflector /app/reflector
|
||||
WORKDIR /app
|
||||
|
||||
# Create symlink for libgomp if it doesn't exist (for ARM64 compatibility)
|
||||
RUN if [ "$(uname -m)" = "aarch64" ] && [ ! -f /usr/lib/libgomp.so.1 ]; then \
|
||||
LIBGOMP_PATH=$(find /app/.venv/lib -path "*/torch.libs/libgomp*.so.*" 2>/dev/null | head -n1); \
|
||||
if [ -n "$LIBGOMP_PATH" ]; then \
|
||||
ln -sf "$LIBGOMP_PATH" /usr/lib/libgomp.so.1; \
|
||||
fi \
|
||||
fi
|
||||
|
||||
# Pre-check just to make sure the image will not fail
|
||||
RUN uv run python -c "import silero_vad.model"
|
||||
|
||||
CMD ["./runserver.sh"]
|
||||
|
||||
@@ -40,3 +40,5 @@ uv run python -c "from reflector.pipelines.main_live_pipeline import task_pipeli
|
||||
```bash
|
||||
uv run python -c "from reflector.pipelines.main_live_pipeline import pipeline_post; pipeline_post(transcript_id='TRANSCRIPT_ID')"
|
||||
```
|
||||
|
||||
.
|
||||
|
||||
95
server/docs/data_retention.md
Normal file
95
server/docs/data_retention.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Data Retention and Cleanup
|
||||
|
||||
## Overview
|
||||
|
||||
For public instances of Reflector, a data retention policy is automatically enforced to delete anonymous user data after a configurable period (default: 7 days). This ensures compliance with privacy expectations and prevents unbounded storage growth.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
- `PUBLIC_MODE` (bool): Must be set to `true` to enable automatic cleanup
|
||||
- `PUBLIC_DATA_RETENTION_DAYS` (int): Number of days to retain anonymous data (default: 7)
|
||||
|
||||
### What Gets Deleted
|
||||
|
||||
When data reaches the retention period, the following items are automatically removed:
|
||||
|
||||
1. **Transcripts** from anonymous users (where `user_id` is NULL):
|
||||
- Database records
|
||||
- Local files (audio.wav, audio.mp3, audio.json waveform)
|
||||
- Storage files (cloud storage if configured)
|
||||
|
||||
## Automatic Cleanup
|
||||
|
||||
### Celery Beat Schedule
|
||||
|
||||
When `PUBLIC_MODE=true`, a Celery beat task runs daily at 3 AM to clean up old data:
|
||||
|
||||
```python
|
||||
# Automatically scheduled when PUBLIC_MODE=true
|
||||
"cleanup_old_public_data": {
|
||||
"task": "reflector.worker.cleanup.cleanup_old_public_data",
|
||||
"schedule": crontab(hour=3, minute=0), # Daily at 3 AM
|
||||
}
|
||||
```
|
||||
|
||||
### Running the Worker
|
||||
|
||||
Ensure both Celery worker and beat scheduler are running:
|
||||
|
||||
```bash
|
||||
# Start Celery worker
|
||||
uv run celery -A reflector.worker.app worker --loglevel=info
|
||||
|
||||
# Start Celery beat scheduler (in another terminal)
|
||||
uv run celery -A reflector.worker.app beat
|
||||
```
|
||||
|
||||
## Manual Cleanup
|
||||
|
||||
For testing or manual intervention, use the cleanup tool:
|
||||
|
||||
```bash
|
||||
# Delete data older than 7 days (default)
|
||||
uv run python -m reflector.tools.cleanup_old_data
|
||||
|
||||
# Delete data older than 30 days
|
||||
uv run python -m reflector.tools.cleanup_old_data --days 30
|
||||
```
|
||||
|
||||
Note: The manual tool uses the same implementation as the Celery worker task to ensure consistency.
|
||||
|
||||
## Important Notes
|
||||
|
||||
1. **User Data Deletion**: Only anonymous data (where `user_id` is NULL) is deleted. Authenticated user data is preserved.
|
||||
|
||||
2. **Storage Cleanup**: The system properly cleans up both local files and cloud storage when configured.
|
||||
|
||||
3. **Error Handling**: If individual deletions fail, the cleanup continues and logs errors. Failed deletions are reported in the task output.
|
||||
|
||||
4. **Public Instance Only**: The automatic cleanup task only runs when `PUBLIC_MODE=true` to prevent accidental data loss in private deployments.
|
||||
|
||||
## Testing
|
||||
|
||||
Run the cleanup tests:
|
||||
|
||||
```bash
|
||||
uv run pytest tests/test_cleanup.py -v
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
Check Celery logs for cleanup task execution:
|
||||
|
||||
```bash
|
||||
# Look for cleanup task logs
|
||||
grep "cleanup_old_public_data" celery.log
|
||||
grep "Starting cleanup of old public data" celery.log
|
||||
```
|
||||
|
||||
Task statistics are logged after each run:
|
||||
- Number of transcripts deleted
|
||||
- Number of meetings deleted
|
||||
- Number of orphaned recordings deleted
|
||||
- Any errors encountered
|
||||
194
server/docs/gpu/api-transcription.md
Normal file
194
server/docs/gpu/api-transcription.md
Normal file
@@ -0,0 +1,194 @@
|
||||
## Reflector GPU Transcription API (Specification)
|
||||
|
||||
This document defines the Reflector GPU transcription API that all implementations must adhere to. Current implementations include NVIDIA Parakeet (NeMo) and Whisper (faster-whisper), both deployed on Modal.com. The API surface and response shapes are OpenAI/Whisper-compatible, so clients can switch implementations by changing only the base URL.
|
||||
|
||||
### Base URL and Authentication
|
||||
|
||||
- Example base URLs (Modal web endpoints):
|
||||
|
||||
- Parakeet: `https://<account>--reflector-transcriber-parakeet-web.modal.run`
|
||||
- Whisper: `https://<account>--reflector-transcriber-web.modal.run`
|
||||
|
||||
- All endpoints are served under `/v1` and require a Bearer token:
|
||||
|
||||
```
|
||||
Authorization: Bearer <REFLECTOR_GPU_APIKEY>
|
||||
```
|
||||
|
||||
Note: To switch implementations, deploy the desired variant and point `TRANSCRIPT_URL` to its base URL. The API is identical.
|
||||
|
||||
### Supported file types
|
||||
|
||||
`mp3, mp4, mpeg, mpga, m4a, wav, webm`
|
||||
|
||||
### Models and languages
|
||||
|
||||
- Parakeet (NVIDIA NeMo): default `nvidia/parakeet-tdt-0.6b-v2`
|
||||
- Language support: only `en`. Other languages return HTTP 400.
|
||||
- Whisper (faster-whisper): default `large-v2` (or deployment-specific)
|
||||
- Language support: multilingual (per Whisper model capabilities).
|
||||
|
||||
Note: The `model` parameter is accepted by all implementations for interface parity. Some backends may treat it as informational.
|
||||
|
||||
### Endpoints
|
||||
|
||||
#### POST /v1/audio/transcriptions
|
||||
|
||||
Transcribe one or more uploaded audio files.
|
||||
|
||||
Request: multipart/form-data
|
||||
|
||||
- `file` (File) — optional. Single file to transcribe.
|
||||
- `files` (File[]) — optional. One or more files to transcribe.
|
||||
- `model` (string) — optional. Defaults to the implementation-specific model (see above).
|
||||
- `language` (string) — optional, defaults to `en`.
|
||||
- Parakeet: only `en` is accepted; other values return HTTP 400
|
||||
- Whisper: model-dependent; typically multilingual
|
||||
- `batch` (boolean) — optional, defaults to `false`.
|
||||
|
||||
Notes:
|
||||
|
||||
- Provide either `file` or `files`, not both. If neither is provided, HTTP 400.
|
||||
- `batch` requires `files`; using `batch=true` without `files` returns HTTP 400.
|
||||
- Response shape for multiple files is the same regardless of `batch`.
|
||||
- Files sent to this endpoint are processed in a single pass (no VAD/chunking). This is intended for short clips (roughly ≤ 30s; depends on GPU memory/model). For longer audio, prefer `/v1/audio/transcriptions-from-url` which supports VAD-based chunking.
|
||||
|
||||
Responses
|
||||
|
||||
Single file response:
|
||||
|
||||
```json
|
||||
{
|
||||
"text": "transcribed text",
|
||||
"words": [
|
||||
{ "word": "hello", "start": 0.0, "end": 0.5 },
|
||||
{ "word": "world", "start": 0.5, "end": 1.0 }
|
||||
],
|
||||
"filename": "audio.mp3"
|
||||
}
|
||||
```
|
||||
|
||||
Multiple files response:
|
||||
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{"filename": "a1.mp3", "text": "...", "words": [...]},
|
||||
{"filename": "a2.mp3", "text": "...", "words": [...]}]
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- Word objects always include keys: `word`, `start`, `end`.
|
||||
- Some implementations may include a trailing space in `word` to match Whisper tokenization behavior; clients should trim if needed.
|
||||
|
||||
Example curl (single file):
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $REFLECTOR_GPU_APIKEY" \
|
||||
-F "file=@/path/to/audio.mp3" \
|
||||
-F "language=en" \
|
||||
"$BASE_URL/v1/audio/transcriptions"
|
||||
```
|
||||
|
||||
Example curl (multiple files, batch):
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $REFLECTOR_GPU_APIKEY" \
|
||||
-F "files=@/path/a1.mp3" -F "files=@/path/a2.mp3" \
|
||||
-F "batch=true" -F "language=en" \
|
||||
"$BASE_URL/v1/audio/transcriptions"
|
||||
```
|
||||
|
||||
#### POST /v1/audio/transcriptions-from-url
|
||||
|
||||
Transcribe a single remote audio file by URL.
|
||||
|
||||
Request: application/json
|
||||
|
||||
Body parameters:
|
||||
|
||||
- `audio_file_url` (string) — required. URL of the audio file to transcribe.
|
||||
- `model` (string) — optional. Defaults to the implementation-specific model (see above).
|
||||
- `language` (string) — optional, defaults to `en`. Parakeet only accepts `en`.
|
||||
- `timestamp_offset` (number) — optional, defaults to `0.0`. Added to each word's `start`/`end` in the response.
|
||||
|
||||
```json
|
||||
{
|
||||
"audio_file_url": "https://example.com/audio.mp3",
|
||||
"model": "nvidia/parakeet-tdt-0.6b-v2",
|
||||
"language": "en",
|
||||
"timestamp_offset": 0.0
|
||||
}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"text": "transcribed text",
|
||||
"words": [
|
||||
{ "word": "hello", "start": 10.0, "end": 10.5 },
|
||||
{ "word": "world", "start": 10.5, "end": 11.0 }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- `timestamp_offset` is added to each word’s `start`/`end` in the response.
|
||||
- Implementations may perform VAD-based chunking and batching for long-form audio; word timings are adjusted accordingly.
|
||||
|
||||
Example curl:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $REFLECTOR_GPU_APIKEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"audio_file_url": "https://example.com/audio.mp3",
|
||||
"language": "en",
|
||||
"timestamp_offset": 0
|
||||
}' \
|
||||
"$BASE_URL/v1/audio/transcriptions-from-url"
|
||||
```
|
||||
|
||||
### Error handling
|
||||
|
||||
- 400 Bad Request
|
||||
- Parakeet: `language` other than `en`
|
||||
- Missing required parameters (`file`/`files` for upload; `audio_file_url` for URL endpoint)
|
||||
- Unsupported file extension
|
||||
- 401 Unauthorized
|
||||
- Missing or invalid Bearer token
|
||||
- 404 Not Found
|
||||
- `audio_file_url` does not exist
|
||||
|
||||
### Implementation details
|
||||
|
||||
- GPUs: A10G for small-file/live, L40S for large-file URL transcription (subject to deployment)
|
||||
- VAD chunking and segment batching; word timings adjusted and overlapping ends constrained
|
||||
- Pads very short segments (< 0.5s) to avoid model crashes on some backends
|
||||
|
||||
### Server configuration (Reflector API)
|
||||
|
||||
Set the Reflector server to use the Modal backend and point `TRANSCRIPT_URL` to your chosen deployment:
|
||||
|
||||
```
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://<account>--reflector-transcriber-parakeet-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=<REFLECTOR_GPU_APIKEY>
|
||||
```
|
||||
|
||||
### Conformance tests
|
||||
|
||||
Use the pytest-based conformance tests to validate any new implementation (including self-hosted) against this spec:
|
||||
|
||||
```
|
||||
TRANSCRIPT_URL=https://<your-deployment-base> \
|
||||
TRANSCRIPT_MODAL_API_KEY=your-api-key \
|
||||
uv run -m pytest -m gpu_modal --no-cov server/tests/test_gpu_modal_transcript.py
|
||||
```
|
||||
212
server/docs/webhook.md
Normal file
212
server/docs/webhook.md
Normal file
@@ -0,0 +1,212 @@
|
||||
# Reflector Webhook Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
Reflector supports webhook notifications to notify external systems when transcript processing is completed. Webhooks can be configured per room and are triggered automatically after a transcript is successfully processed.
|
||||
|
||||
## Configuration
|
||||
|
||||
Webhooks are configured at the room level with two fields:
|
||||
- `webhook_url`: The HTTPS endpoint to receive webhook notifications
|
||||
- `webhook_secret`: Optional secret key for HMAC signature verification (auto-generated if not provided)
|
||||
|
||||
## Events
|
||||
|
||||
### `transcript.completed`
|
||||
|
||||
Triggered when a transcript has been fully processed, including transcription, diarization, summarization, and topic detection.
|
||||
|
||||
### `test`
|
||||
|
||||
A test event that can be triggered manually to verify webhook configuration.
|
||||
|
||||
## Webhook Request Format
|
||||
|
||||
### Headers
|
||||
|
||||
All webhook requests include the following headers:
|
||||
|
||||
| Header | Description | Example |
|
||||
|--------|-------------|---------|
|
||||
| `Content-Type` | Always `application/json` | `application/json` |
|
||||
| `User-Agent` | Identifies Reflector as the source | `Reflector-Webhook/1.0` |
|
||||
| `X-Webhook-Event` | The event type | `transcript.completed` or `test` |
|
||||
| `X-Webhook-Retry` | Current retry attempt number | `0`, `1`, `2`... |
|
||||
| `X-Webhook-Signature` | HMAC signature (if secret configured) | `t=1735306800,v1=abc123...` |
|
||||
|
||||
### Signature Verification
|
||||
|
||||
If a webhook secret is configured, Reflector includes an HMAC-SHA256 signature in the `X-Webhook-Signature` header to verify the webhook authenticity.
|
||||
|
||||
The signature format is: `t={timestamp},v1={signature}`
|
||||
|
||||
To verify the signature:
|
||||
1. Extract the timestamp and signature from the header
|
||||
2. Create the signed payload: `{timestamp}.{request_body}`
|
||||
3. Compute HMAC-SHA256 of the signed payload using your webhook secret
|
||||
4. Compare the computed signature with the received signature
|
||||
|
||||
Example verification (Python):
|
||||
```python
|
||||
import hmac
|
||||
import hashlib
|
||||
|
||||
def verify_webhook_signature(payload: bytes, signature_header: str, secret: str) -> bool:
|
||||
# Parse header: "t=1735306800,v1=abc123..."
|
||||
parts = dict(part.split("=") for part in signature_header.split(","))
|
||||
timestamp = parts["t"]
|
||||
received_signature = parts["v1"]
|
||||
|
||||
# Create signed payload
|
||||
signed_payload = f"{timestamp}.{payload.decode('utf-8')}"
|
||||
|
||||
# Compute expected signature
|
||||
expected_signature = hmac.new(
|
||||
secret.encode("utf-8"),
|
||||
signed_payload.encode("utf-8"),
|
||||
hashlib.sha256
|
||||
).hexdigest()
|
||||
|
||||
# Compare signatures
|
||||
return hmac.compare_digest(expected_signature, received_signature)
|
||||
```
|
||||
|
||||
## Event Payloads
|
||||
|
||||
### `transcript.completed` Event
|
||||
|
||||
This event includes a convenient URL for accessing the transcript:
|
||||
- `frontend_url`: Direct link to view the transcript in the web interface
|
||||
|
||||
```json
|
||||
{
|
||||
"event": "transcript.completed",
|
||||
"event_id": "transcript.completed-abc-123-def-456",
|
||||
"timestamp": "2025-08-27T12:34:56.789012Z",
|
||||
"transcript": {
|
||||
"id": "abc-123-def-456",
|
||||
"room_id": "room-789",
|
||||
"created_at": "2025-08-27T12:00:00Z",
|
||||
"duration": 1800.5,
|
||||
"title": "Q3 Product Planning Meeting",
|
||||
"short_summary": "Team discussed Q3 product roadmap, prioritizing mobile app features and API improvements.",
|
||||
"long_summary": "The product team met to finalize the Q3 roadmap. Key decisions included...",
|
||||
"webvtt": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v Speaker 1>Welcome everyone to today's meeting...",
|
||||
"topics": [
|
||||
{
|
||||
"title": "Introduction and Agenda",
|
||||
"summary": "Meeting kickoff with agenda review",
|
||||
"timestamp": 0.0,
|
||||
"duration": 120.0,
|
||||
"webvtt": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v Speaker 1>Welcome everyone..."
|
||||
},
|
||||
{
|
||||
"title": "Mobile App Features Discussion",
|
||||
"summary": "Team reviewed proposed mobile app features for Q3",
|
||||
"timestamp": 120.0,
|
||||
"duration": 600.0,
|
||||
"webvtt": "WEBVTT\n\n00:02:00.000 --> 00:02:10.000\n<v Speaker 2>Let's talk about the mobile app..."
|
||||
}
|
||||
],
|
||||
"participants": [
|
||||
{
|
||||
"id": "participant-1",
|
||||
"name": "John Doe",
|
||||
"speaker": "Speaker 1"
|
||||
},
|
||||
{
|
||||
"id": "participant-2",
|
||||
"name": "Jane Smith",
|
||||
"speaker": "Speaker 2"
|
||||
}
|
||||
],
|
||||
"source_language": "en",
|
||||
"target_language": "en",
|
||||
"status": "completed",
|
||||
"frontend_url": "https://app.reflector.com/transcripts/abc-123-def-456"
|
||||
},
|
||||
"room": {
|
||||
"id": "room-789",
|
||||
"name": "Product Team Room"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `test` Event
|
||||
|
||||
```json
|
||||
{
|
||||
"event": "test",
|
||||
"event_id": "test.2025-08-27T12:34:56.789012Z",
|
||||
"timestamp": "2025-08-27T12:34:56.789012Z",
|
||||
"message": "This is a test webhook from Reflector",
|
||||
"room": {
|
||||
"id": "room-789",
|
||||
"name": "Product Team Room"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Retry Policy
|
||||
|
||||
Webhooks are delivered with automatic retry logic to handle transient failures. When a webhook delivery fails due to server errors or network issues, Reflector will automatically retry the delivery multiple times over an extended period.
|
||||
|
||||
### Retry Mechanism
|
||||
|
||||
Reflector implements an exponential backoff strategy for webhook retries:
|
||||
|
||||
- **Initial retry delay**: 60 seconds after the first failure
|
||||
- **Exponential backoff**: Each subsequent retry waits approximately twice as long as the previous one
|
||||
- **Maximum retry interval**: 1 hour (backoff is capped at this duration)
|
||||
- **Maximum retry attempts**: 30 attempts total
|
||||
- **Total retry duration**: Retries continue for approximately 24 hours
|
||||
|
||||
### How Retries Work
|
||||
|
||||
When a webhook fails, Reflector will:
|
||||
1. Wait 60 seconds, then retry (attempt #1)
|
||||
2. If it fails again, wait ~2 minutes, then retry (attempt #2)
|
||||
3. Continue doubling the wait time up to a maximum of 1 hour between attempts
|
||||
4. Keep retrying at 1-hour intervals until successful or 30 attempts are exhausted
|
||||
|
||||
The `X-Webhook-Retry` header indicates the current retry attempt number (0 for the initial attempt, 1 for first retry, etc.), allowing your endpoint to track retry attempts.
|
||||
|
||||
### Retry Behavior by HTTP Status Code
|
||||
|
||||
| Status Code | Behavior |
|
||||
|-------------|----------|
|
||||
| 2xx (Success) | No retry, webhook marked as delivered |
|
||||
| 4xx (Client Error) | No retry, request is considered permanently failed |
|
||||
| 5xx (Server Error) | Automatic retry with exponential backoff |
|
||||
| Network/Timeout Error | Automatic retry with exponential backoff |
|
||||
|
||||
**Important Notes:**
|
||||
- Webhooks timeout after 30 seconds. If your endpoint takes longer to respond, it will be considered a timeout error and retried.
|
||||
- During the retry period (~24 hours), you may receive the same webhook multiple times if your endpoint experiences intermittent failures.
|
||||
- There is no mechanism to manually retry failed webhooks after the retry period expires.
|
||||
|
||||
## Testing Webhooks
|
||||
|
||||
You can test your webhook configuration before processing transcripts:
|
||||
|
||||
```http
|
||||
POST /v1/rooms/{room_id}/webhook/test
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"status_code": 200,
|
||||
"message": "Webhook test successful",
|
||||
"response_preview": "OK"
|
||||
}
|
||||
```
|
||||
|
||||
Or in case of failure:
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Webhook request timed out (10 seconds)"
|
||||
}
|
||||
```
|
||||
@@ -4,7 +4,8 @@ This repository hold an API for the GPU implementation of the Reflector API serv
|
||||
and use [Modal.com](https://modal.com)
|
||||
|
||||
- `reflector_diarizer.py` - Diarization API
|
||||
- `reflector_transcriber.py` - Transcription API
|
||||
- `reflector_transcriber.py` - Transcription API (Whisper)
|
||||
- `reflector_transcriber_parakeet.py` - Transcription API (NVIDIA Parakeet)
|
||||
- `reflector_translator.py` - Translation API
|
||||
|
||||
## Modal.com deployment
|
||||
@@ -19,6 +20,10 @@ $ modal deploy reflector_transcriber.py
|
||||
...
|
||||
└── 🔨 Created web => https://xxxx--reflector-transcriber-web.modal.run
|
||||
|
||||
$ modal deploy reflector_transcriber_parakeet.py
|
||||
...
|
||||
└── 🔨 Created web => https://xxxx--reflector-transcriber-parakeet-web.modal.run
|
||||
|
||||
$ modal deploy reflector_llm.py
|
||||
...
|
||||
└── 🔨 Created web => https://xxxx--reflector-llm-web.modal.run
|
||||
@@ -68,6 +73,86 @@ Authorization: bearer <REFLECTOR_APIKEY>
|
||||
|
||||
### Transcription
|
||||
|
||||
#### Parakeet Transcriber (`reflector_transcriber_parakeet.py`)
|
||||
|
||||
NVIDIA Parakeet is a state-of-the-art ASR model optimized for real-time transcription with superior word-level timestamps.
|
||||
|
||||
**GPU Configuration:**
|
||||
- **A10G GPU** - Used for `/v1/audio/transcriptions` endpoint (small files, live transcription)
|
||||
- Higher concurrency (max_inputs=10)
|
||||
- Optimized for multiple small audio files
|
||||
- Supports batch processing for efficiency
|
||||
|
||||
- **L40S GPU** - Used for `/v1/audio/transcriptions-from-url` endpoint (large files)
|
||||
- Lower concurrency but more powerful processing
|
||||
- Optimized for single large audio files
|
||||
- VAD-based chunking for long-form audio
|
||||
|
||||
##### `/v1/audio/transcriptions` - Small file transcription
|
||||
|
||||
**request** (multipart/form-data)
|
||||
- `file` or `files[]` - audio file(s) to transcribe
|
||||
- `model` - model name (default: `nvidia/parakeet-tdt-0.6b-v2`)
|
||||
- `language` - language code (default: `en`)
|
||||
- `batch` - whether to use batch processing for multiple files (default: `true`)
|
||||
|
||||
**response**
|
||||
```json
|
||||
{
|
||||
"text": "transcribed text",
|
||||
"words": [
|
||||
{"word": "hello", "start": 0.0, "end": 0.5},
|
||||
{"word": "world", "start": 0.5, "end": 1.0}
|
||||
],
|
||||
"filename": "audio.mp3"
|
||||
}
|
||||
```
|
||||
|
||||
For multiple files with batch=true:
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"filename": "audio1.mp3",
|
||||
"text": "transcribed text",
|
||||
"words": [...]
|
||||
},
|
||||
{
|
||||
"filename": "audio2.mp3",
|
||||
"text": "transcribed text",
|
||||
"words": [...]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
##### `/v1/audio/transcriptions-from-url` - Large file transcription
|
||||
|
||||
**request** (application/json)
|
||||
```json
|
||||
{
|
||||
"audio_file_url": "https://example.com/audio.mp3",
|
||||
"model": "nvidia/parakeet-tdt-0.6b-v2",
|
||||
"language": "en",
|
||||
"timestamp_offset": 0.0
|
||||
}
|
||||
```
|
||||
|
||||
**response**
|
||||
```json
|
||||
{
|
||||
"text": "transcribed text from large file",
|
||||
"words": [
|
||||
{"word": "hello", "start": 0.0, "end": 0.5},
|
||||
{"word": "world", "start": 0.5, "end": 1.0}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Supported file types:** mp3, mp4, mpeg, mpga, m4a, wav, webm
|
||||
|
||||
#### Whisper Transcriber (`reflector_transcriber.py`)
|
||||
|
||||
`POST /transcribe`
|
||||
|
||||
**request** (multipart/form-data)
|
||||
|
||||
@@ -4,14 +4,80 @@ Reflector GPU backend - diarizer
|
||||
"""
|
||||
|
||||
import os
|
||||
import uuid
|
||||
from typing import Mapping, NewType
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import modal.gpu
|
||||
from modal import App, Image, Secret, asgi_app, enter, method
|
||||
from pydantic import BaseModel
|
||||
import modal
|
||||
|
||||
PYANNOTE_MODEL_NAME: str = "pyannote/speaker-diarization-3.1"
|
||||
MODEL_DIR = "/root/diarization_models"
|
||||
app = App(name="reflector-diarizer")
|
||||
UPLOADS_PATH = "/uploads"
|
||||
SUPPORTED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||
|
||||
DiarizerUniqFilename = NewType("DiarizerUniqFilename", str)
|
||||
AudioFileExtension = NewType("AudioFileExtension", str)
|
||||
|
||||
app = modal.App(name="reflector-diarizer")
|
||||
|
||||
# Volume for temporary file uploads
|
||||
upload_volume = modal.Volume.from_name("diarizer-uploads", create_if_missing=True)
|
||||
|
||||
|
||||
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
|
||||
parsed_url = urlparse(url)
|
||||
url_path = parsed_url.path
|
||||
|
||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
||||
if url_path.lower().endswith(f".{ext}"):
|
||||
return AudioFileExtension(ext)
|
||||
|
||||
content_type = headers.get("content-type", "").lower()
|
||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
||||
return AudioFileExtension("mp3")
|
||||
if "audio/wav" in content_type:
|
||||
return AudioFileExtension("wav")
|
||||
if "audio/mp4" in content_type:
|
||||
return AudioFileExtension("mp4")
|
||||
|
||||
raise ValueError(
|
||||
f"Unsupported audio format for URL: {url}. "
|
||||
f"Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
)
|
||||
|
||||
|
||||
def download_audio_to_volume(
|
||||
audio_file_url: str,
|
||||
) -> tuple[DiarizerUniqFilename, AudioFileExtension]:
|
||||
import requests
|
||||
from fastapi import HTTPException
|
||||
|
||||
print(f"Checking audio file at: {audio_file_url}")
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
||||
|
||||
print(f"Downloading audio file from: {audio_file_url}")
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
|
||||
if response.status_code != 200:
|
||||
print(f"Download failed with status {response.status_code}: {response.text}")
|
||||
raise HTTPException(
|
||||
status_code=response.status_code,
|
||||
detail=f"Failed to download audio file: {response.status_code}",
|
||||
)
|
||||
|
||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
||||
unique_filename = DiarizerUniqFilename(f"{uuid.uuid4()}.{audio_suffix}")
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
print(f"Writing file to: {file_path} (size: {len(response.content)} bytes)")
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
upload_volume.commit()
|
||||
print(f"File saved as: {unique_filename}")
|
||||
return unique_filename, audio_suffix
|
||||
|
||||
|
||||
def migrate_cache_llm():
|
||||
@@ -39,7 +105,7 @@ def download_pyannote_audio():
|
||||
|
||||
|
||||
diarizer_image = (
|
||||
Image.debian_slim(python_version="3.10.8")
|
||||
modal.Image.debian_slim(python_version="3.10.8")
|
||||
.pip_install(
|
||||
"pyannote.audio==3.1.0",
|
||||
"requests",
|
||||
@@ -55,7 +121,8 @@ diarizer_image = (
|
||||
"hf-transfer",
|
||||
)
|
||||
.run_function(
|
||||
download_pyannote_audio, secrets=[Secret.from_name("my-huggingface-secret")]
|
||||
download_pyannote_audio,
|
||||
secrets=[modal.Secret.from_name("hf_token")],
|
||||
)
|
||||
.run_function(migrate_cache_llm)
|
||||
.env(
|
||||
@@ -70,53 +137,60 @@ diarizer_image = (
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu=modal.gpu.A100(size="40GB"),
|
||||
gpu="A100",
|
||||
timeout=60 * 30,
|
||||
scaledown_window=60,
|
||||
allow_concurrent_inputs=1,
|
||||
image=diarizer_image,
|
||||
volumes={UPLOADS_PATH: upload_volume},
|
||||
enable_memory_snapshot=True,
|
||||
experimental_options={"enable_gpu_snapshot": True},
|
||||
secrets=[
|
||||
modal.Secret.from_name("hf_token"),
|
||||
],
|
||||
)
|
||||
@modal.concurrent(max_inputs=1)
|
||||
class Diarizer:
|
||||
@enter()
|
||||
@modal.enter(snap=True)
|
||||
def enter(self):
|
||||
import torch
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
self.use_gpu = torch.cuda.is_available()
|
||||
self.device = "cuda" if self.use_gpu else "cpu"
|
||||
print(f"Using device: {self.device}")
|
||||
self.diarization_pipeline = Pipeline.from_pretrained(
|
||||
PYANNOTE_MODEL_NAME, cache_dir=MODEL_DIR
|
||||
PYANNOTE_MODEL_NAME,
|
||||
cache_dir=MODEL_DIR,
|
||||
use_auth_token=os.environ["HF_TOKEN"],
|
||||
)
|
||||
self.diarization_pipeline.to(torch.device(self.device))
|
||||
|
||||
@method()
|
||||
def diarize(self, audio_data: str, audio_suffix: str, timestamp: float):
|
||||
import tempfile
|
||||
|
||||
@modal.method()
|
||||
def diarize(self, filename: str, timestamp: float = 0.0):
|
||||
import torchaudio
|
||||
|
||||
with tempfile.NamedTemporaryFile("wb+", suffix=f".{audio_suffix}") as fp:
|
||||
fp.write(audio_data)
|
||||
upload_volume.reload()
|
||||
|
||||
print("Diarizing audio")
|
||||
waveform, sample_rate = torchaudio.load(fp.name)
|
||||
diarization = self.diarization_pipeline(
|
||||
{"waveform": waveform, "sample_rate": sample_rate}
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
print(f"Diarizing audio from: {file_path}")
|
||||
waveform, sample_rate = torchaudio.load(file_path)
|
||||
diarization = self.diarization_pipeline(
|
||||
{"waveform": waveform, "sample_rate": sample_rate}
|
||||
)
|
||||
|
||||
words = []
|
||||
for diarization_segment, _, speaker in diarization.itertracks(yield_label=True):
|
||||
words.append(
|
||||
{
|
||||
"start": round(timestamp + diarization_segment.start, 3),
|
||||
"end": round(timestamp + diarization_segment.end, 3),
|
||||
"speaker": int(speaker[-2:]),
|
||||
}
|
||||
)
|
||||
|
||||
words = []
|
||||
for diarization_segment, _, speaker in diarization.itertracks(
|
||||
yield_label=True
|
||||
):
|
||||
words.append(
|
||||
{
|
||||
"start": round(timestamp + diarization_segment.start, 3),
|
||||
"end": round(timestamp + diarization_segment.end, 3),
|
||||
"speaker": int(speaker[-2:]),
|
||||
}
|
||||
)
|
||||
print("Diarization complete")
|
||||
return {"diarization": words}
|
||||
print("Diarization complete")
|
||||
return {"diarization": words}
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
@@ -127,17 +201,18 @@ class Diarizer:
|
||||
@app.function(
|
||||
timeout=60 * 10,
|
||||
scaledown_window=60 * 3,
|
||||
allow_concurrent_inputs=40,
|
||||
secrets=[
|
||||
Secret.from_name("reflector-gpu"),
|
||||
modal.Secret.from_name("reflector-gpu"),
|
||||
],
|
||||
volumes={UPLOADS_PATH: upload_volume},
|
||||
image=diarizer_image,
|
||||
)
|
||||
@asgi_app()
|
||||
@modal.concurrent(max_inputs=40)
|
||||
@modal.asgi_app()
|
||||
def web():
|
||||
import requests
|
||||
from fastapi import Depends, FastAPI, HTTPException, status
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
diarizerstub = Diarizer()
|
||||
|
||||
@@ -153,35 +228,26 @@ def web():
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
def validate_audio_file(audio_file_url: str):
|
||||
# Check if the audio file exists
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(
|
||||
status_code=response.status_code,
|
||||
detail="The audio file does not exist.",
|
||||
)
|
||||
|
||||
class DiarizationResponse(BaseModel):
|
||||
result: dict
|
||||
|
||||
@app.post(
|
||||
"/diarize", dependencies=[Depends(apikey_auth), Depends(validate_audio_file)]
|
||||
)
|
||||
def diarize(
|
||||
audio_file_url: str, timestamp: float = 0.0
|
||||
) -> HTTPException | DiarizationResponse:
|
||||
# Currently the uploaded files are in mp3 format
|
||||
audio_suffix = "mp3"
|
||||
@app.post("/diarize", dependencies=[Depends(apikey_auth)])
|
||||
def diarize(audio_file_url: str, timestamp: float = 0.0) -> DiarizationResponse:
|
||||
unique_filename, audio_suffix = download_audio_to_volume(audio_file_url)
|
||||
|
||||
print("Downloading audio file")
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
print("Audio file downloaded successfully")
|
||||
|
||||
func = diarizerstub.diarize.spawn(
|
||||
audio_data=response.content, audio_suffix=audio_suffix, timestamp=timestamp
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
try:
|
||||
func = diarizerstub.diarize.spawn(
|
||||
filename=unique_filename, timestamp=timestamp
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
finally:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
print(f"Deleting file: {file_path}")
|
||||
os.remove(file_path)
|
||||
upload_volume.commit()
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up {unique_filename}: {e}")
|
||||
|
||||
return app
|
||||
|
||||
@@ -1,41 +1,78 @@
|
||||
import os
|
||||
import tempfile
|
||||
import sys
|
||||
import threading
|
||||
import uuid
|
||||
from typing import Generator, Mapping, NamedTuple, NewType, TypedDict
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import modal
|
||||
from pydantic import BaseModel
|
||||
|
||||
MODELS_DIR = "/models"
|
||||
|
||||
MODEL_NAME = "large-v2"
|
||||
MODEL_COMPUTE_TYPE: str = "float16"
|
||||
MODEL_NUM_WORKERS: int = 1
|
||||
|
||||
MINUTES = 60 # seconds
|
||||
SAMPLERATE = 16000
|
||||
UPLOADS_PATH = "/uploads"
|
||||
CACHE_PATH = "/models"
|
||||
SUPPORTED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||
VAD_CONFIG = {
|
||||
"batch_max_duration": 30.0,
|
||||
"silence_padding": 0.5,
|
||||
"window_size": 512,
|
||||
}
|
||||
|
||||
volume = modal.Volume.from_name("models", create_if_missing=True)
|
||||
|
||||
WhisperUniqFilename = NewType("WhisperUniqFilename", str)
|
||||
AudioFileExtension = NewType("AudioFileExtension", str)
|
||||
|
||||
app = modal.App("reflector-transcriber")
|
||||
|
||||
model_cache = modal.Volume.from_name("models", create_if_missing=True)
|
||||
upload_volume = modal.Volume.from_name("whisper-uploads", create_if_missing=True)
|
||||
|
||||
|
||||
class TimeSegment(NamedTuple):
|
||||
"""Represents a time segment with start and end times."""
|
||||
|
||||
start: float
|
||||
end: float
|
||||
|
||||
|
||||
class AudioSegment(NamedTuple):
|
||||
"""Represents an audio segment with timing and audio data."""
|
||||
|
||||
start: float
|
||||
end: float
|
||||
audio: any
|
||||
|
||||
|
||||
class TranscriptResult(NamedTuple):
|
||||
"""Represents a transcription result with text and word timings."""
|
||||
|
||||
text: str
|
||||
words: list["WordTiming"]
|
||||
|
||||
|
||||
class WordTiming(TypedDict):
|
||||
"""Represents a word with its timing information."""
|
||||
|
||||
word: str
|
||||
start: float
|
||||
end: float
|
||||
|
||||
|
||||
def download_model():
|
||||
from faster_whisper import download_model
|
||||
|
||||
volume.reload()
|
||||
model_cache.reload()
|
||||
|
||||
download_model(MODEL_NAME, cache_dir=MODELS_DIR)
|
||||
download_model(MODEL_NAME, cache_dir=CACHE_PATH)
|
||||
|
||||
volume.commit()
|
||||
model_cache.commit()
|
||||
|
||||
|
||||
image = (
|
||||
modal.Image.debian_slim(python_version="3.12")
|
||||
.pip_install(
|
||||
"huggingface_hub==0.27.1",
|
||||
"hf-transfer==0.1.9",
|
||||
"torch==2.5.1",
|
||||
"faster-whisper==1.1.1",
|
||||
)
|
||||
.env(
|
||||
{
|
||||
"HF_HUB_ENABLE_HF_TRANSFER": "1",
|
||||
@@ -45,19 +82,98 @@ image = (
|
||||
),
|
||||
}
|
||||
)
|
||||
.run_function(download_model, volumes={MODELS_DIR: volume})
|
||||
.apt_install("ffmpeg")
|
||||
.pip_install(
|
||||
"huggingface_hub==0.27.1",
|
||||
"hf-transfer==0.1.9",
|
||||
"torch==2.5.1",
|
||||
"faster-whisper==1.1.1",
|
||||
"fastapi==0.115.12",
|
||||
"requests",
|
||||
"librosa==0.10.1",
|
||||
"numpy<2",
|
||||
"silero-vad==5.1.0",
|
||||
)
|
||||
.run_function(download_model, volumes={CACHE_PATH: model_cache})
|
||||
)
|
||||
|
||||
|
||||
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
|
||||
parsed_url = urlparse(url)
|
||||
url_path = parsed_url.path
|
||||
|
||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
||||
if url_path.lower().endswith(f".{ext}"):
|
||||
return AudioFileExtension(ext)
|
||||
|
||||
content_type = headers.get("content-type", "").lower()
|
||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
||||
return AudioFileExtension("mp3")
|
||||
if "audio/wav" in content_type:
|
||||
return AudioFileExtension("wav")
|
||||
if "audio/mp4" in content_type:
|
||||
return AudioFileExtension("mp4")
|
||||
|
||||
raise ValueError(
|
||||
f"Unsupported audio format for URL: {url}. "
|
||||
f"Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
)
|
||||
|
||||
|
||||
def download_audio_to_volume(
|
||||
audio_file_url: str,
|
||||
) -> tuple[WhisperUniqFilename, AudioFileExtension]:
|
||||
import requests
|
||||
from fastapi import HTTPException
|
||||
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
||||
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
response.raise_for_status()
|
||||
|
||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
||||
unique_filename = WhisperUniqFilename(f"{uuid.uuid4()}.{audio_suffix}")
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
upload_volume.commit()
|
||||
return unique_filename, audio_suffix
|
||||
|
||||
|
||||
def pad_audio(audio_array, sample_rate: int = SAMPLERATE):
|
||||
"""Add 0.5s of silence if audio is shorter than the silence_padding window.
|
||||
|
||||
Whisper does not require this strictly, but aligning behavior with Parakeet
|
||||
avoids edge-case crashes on extremely short inputs and makes comparisons easier.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
audio_duration = len(audio_array) / sample_rate
|
||||
if audio_duration < VAD_CONFIG["silence_padding"]:
|
||||
silence_samples = int(sample_rate * VAD_CONFIG["silence_padding"])
|
||||
silence = np.zeros(silence_samples, dtype=np.float32)
|
||||
return np.concatenate([audio_array, silence])
|
||||
return audio_array
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu="A10G",
|
||||
timeout=5 * MINUTES,
|
||||
scaledown_window=5 * MINUTES,
|
||||
allow_concurrent_inputs=6,
|
||||
image=image,
|
||||
volumes={MODELS_DIR: volume},
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
)
|
||||
class Transcriber:
|
||||
@modal.concurrent(max_inputs=10)
|
||||
class TranscriberWhisperLive:
|
||||
"""Live transcriber class for small audio segments (A10G).
|
||||
|
||||
Mirrors the Parakeet live class API but uses Faster-Whisper under the hood.
|
||||
"""
|
||||
|
||||
@modal.enter()
|
||||
def enter(self):
|
||||
import faster_whisper
|
||||
@@ -71,23 +187,200 @@ class Transcriber:
|
||||
device=self.device,
|
||||
compute_type=MODEL_COMPUTE_TYPE,
|
||||
num_workers=MODEL_NUM_WORKERS,
|
||||
download_root=MODELS_DIR,
|
||||
download_root=CACHE_PATH,
|
||||
local_files_only=True,
|
||||
)
|
||||
print(f"Model is on device: {self.device}")
|
||||
|
||||
@modal.method()
|
||||
def transcribe_segment(
|
||||
self,
|
||||
audio_data: str,
|
||||
audio_suffix: str,
|
||||
language: str,
|
||||
filename: str,
|
||||
language: str = "en",
|
||||
):
|
||||
with tempfile.NamedTemporaryFile("wb+", suffix=f".{audio_suffix}") as fp:
|
||||
fp.write(audio_data)
|
||||
"""Transcribe a single uploaded audio file by filename."""
|
||||
upload_volume.reload()
|
||||
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
with self.lock:
|
||||
with NoStdStreams():
|
||||
segments, _ = self.model.transcribe(
|
||||
file_path,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
|
||||
segments = list(segments)
|
||||
text = "".join(segment.text for segment in segments).strip()
|
||||
words = [
|
||||
{
|
||||
"word": word.word,
|
||||
"start": round(float(word.start), 2),
|
||||
"end": round(float(word.end), 2),
|
||||
}
|
||||
for segment in segments
|
||||
for word in segment.words
|
||||
]
|
||||
|
||||
return {"text": text, "words": words}
|
||||
|
||||
@modal.method()
|
||||
def transcribe_batch(
|
||||
self,
|
||||
filenames: list[str],
|
||||
language: str = "en",
|
||||
):
|
||||
"""Transcribe multiple uploaded audio files and return per-file results."""
|
||||
upload_volume.reload()
|
||||
|
||||
results = []
|
||||
for filename in filenames:
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"Batch file not found: {file_path}")
|
||||
|
||||
with self.lock:
|
||||
with NoStdStreams():
|
||||
segments, _ = self.model.transcribe(
|
||||
file_path,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
|
||||
segments = list(segments)
|
||||
text = "".join(seg.text for seg in segments).strip()
|
||||
words = [
|
||||
{
|
||||
"word": w.word,
|
||||
"start": round(float(w.start), 2),
|
||||
"end": round(float(w.end), 2),
|
||||
}
|
||||
for seg in segments
|
||||
for w in seg.words
|
||||
]
|
||||
|
||||
results.append(
|
||||
{
|
||||
"filename": filename,
|
||||
"text": text,
|
||||
"words": words,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu="L40S",
|
||||
timeout=15 * MINUTES,
|
||||
image=image,
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
)
|
||||
class TranscriberWhisperFile:
|
||||
"""File transcriber for larger/longer audio, using VAD-driven batching (L40S)."""
|
||||
|
||||
@modal.enter()
|
||||
def enter(self):
|
||||
import faster_whisper
|
||||
import torch
|
||||
from silero_vad import load_silero_vad
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self.use_gpu = torch.cuda.is_available()
|
||||
self.device = "cuda" if self.use_gpu else "cpu"
|
||||
self.model = faster_whisper.WhisperModel(
|
||||
MODEL_NAME,
|
||||
device=self.device,
|
||||
compute_type=MODEL_COMPUTE_TYPE,
|
||||
num_workers=MODEL_NUM_WORKERS,
|
||||
download_root=CACHE_PATH,
|
||||
local_files_only=True,
|
||||
)
|
||||
self.vad_model = load_silero_vad(onnx=False)
|
||||
|
||||
@modal.method()
|
||||
def transcribe_segment(
|
||||
self, filename: str, timestamp_offset: float = 0.0, language: str = "en"
|
||||
):
|
||||
import librosa
|
||||
import numpy as np
|
||||
from silero_vad import VADIterator
|
||||
|
||||
def vad_segments(
|
||||
audio_array,
|
||||
sample_rate: int = SAMPLERATE,
|
||||
window_size: int = VAD_CONFIG["window_size"],
|
||||
) -> Generator[TimeSegment, None, None]:
|
||||
"""Generate speech segments as TimeSegment using Silero VAD."""
|
||||
iterator = VADIterator(self.vad_model, sampling_rate=sample_rate)
|
||||
start = None
|
||||
for i in range(0, len(audio_array), window_size):
|
||||
chunk = audio_array[i : i + window_size]
|
||||
if len(chunk) < window_size:
|
||||
chunk = np.pad(
|
||||
chunk, (0, window_size - len(chunk)), mode="constant"
|
||||
)
|
||||
speech = iterator(chunk)
|
||||
if not speech:
|
||||
continue
|
||||
if "start" in speech:
|
||||
start = speech["start"]
|
||||
continue
|
||||
if "end" in speech and start is not None:
|
||||
end = speech["end"]
|
||||
yield TimeSegment(
|
||||
start / float(SAMPLERATE), end / float(SAMPLERATE)
|
||||
)
|
||||
start = None
|
||||
iterator.reset_states()
|
||||
|
||||
upload_volume.reload()
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
audio_array, _sr = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
||||
|
||||
# Batch segments up to ~30s windows by merging contiguous VAD segments
|
||||
merged_batches: list[TimeSegment] = []
|
||||
batch_start = None
|
||||
batch_end = None
|
||||
max_duration = VAD_CONFIG["batch_max_duration"]
|
||||
for segment in vad_segments(audio_array):
|
||||
seg_start, seg_end = segment.start, segment.end
|
||||
if batch_start is None:
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
continue
|
||||
if seg_end - batch_start <= max_duration:
|
||||
batch_end = seg_end
|
||||
else:
|
||||
merged_batches.append(TimeSegment(batch_start, batch_end))
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
if batch_start is not None and batch_end is not None:
|
||||
merged_batches.append(TimeSegment(batch_start, batch_end))
|
||||
|
||||
all_text = []
|
||||
all_words = []
|
||||
|
||||
for segment in merged_batches:
|
||||
start_time, end_time = segment.start, segment.end
|
||||
s_idx = int(start_time * SAMPLERATE)
|
||||
e_idx = int(end_time * SAMPLERATE)
|
||||
segment = audio_array[s_idx:e_idx]
|
||||
segment = pad_audio(segment, SAMPLERATE)
|
||||
|
||||
with self.lock:
|
||||
segments, _ = self.model.transcribe(
|
||||
fp.name,
|
||||
segment,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
@@ -96,66 +389,220 @@ class Transcriber:
|
||||
)
|
||||
|
||||
segments = list(segments)
|
||||
text = "".join(segment.text for segment in segments)
|
||||
text = "".join(seg.text for seg in segments).strip()
|
||||
words = [
|
||||
{"word": word.word, "start": word.start, "end": word.end}
|
||||
for segment in segments
|
||||
for word in segment.words
|
||||
{
|
||||
"word": w.word,
|
||||
"start": round(float(w.start) + start_time + timestamp_offset, 2),
|
||||
"end": round(float(w.end) + start_time + timestamp_offset, 2),
|
||||
}
|
||||
for seg in segments
|
||||
for w in seg.words
|
||||
]
|
||||
if text:
|
||||
all_text.append(text)
|
||||
all_words.extend(words)
|
||||
|
||||
return {"text": text, "words": words}
|
||||
return {"text": " ".join(all_text), "words": all_words}
|
||||
|
||||
|
||||
def detect_audio_format(url: str, headers: dict) -> str:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from fastapi import HTTPException
|
||||
|
||||
url_path = urlparse(url).path
|
||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
||||
if url_path.lower().endswith(f".{ext}"):
|
||||
return ext
|
||||
|
||||
content_type = headers.get("content-type", "").lower()
|
||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
||||
return "mp3"
|
||||
if "audio/wav" in content_type:
|
||||
return "wav"
|
||||
if "audio/mp4" in content_type:
|
||||
return "mp4"
|
||||
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=(
|
||||
f"Unsupported audio format for URL. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def download_audio_to_volume(audio_file_url: str) -> tuple[str, str]:
|
||||
import requests
|
||||
from fastapi import HTTPException
|
||||
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
||||
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
response.raise_for_status()
|
||||
|
||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
upload_volume.commit()
|
||||
return unique_filename, audio_suffix
|
||||
|
||||
|
||||
@app.function(
|
||||
scaledown_window=60,
|
||||
timeout=60,
|
||||
allow_concurrent_inputs=40,
|
||||
timeout=600,
|
||||
secrets=[
|
||||
modal.Secret.from_name("reflector-gpu"),
|
||||
],
|
||||
volumes={MODELS_DIR: volume},
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
image=image,
|
||||
)
|
||||
@modal.concurrent(max_inputs=40)
|
||||
@modal.asgi_app()
|
||||
def web():
|
||||
from fastapi import Body, Depends, FastAPI, HTTPException, UploadFile, status
|
||||
from fastapi import (
|
||||
Body,
|
||||
Depends,
|
||||
FastAPI,
|
||||
Form,
|
||||
HTTPException,
|
||||
UploadFile,
|
||||
status,
|
||||
)
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from typing_extensions import Annotated
|
||||
|
||||
transcriber = Transcriber()
|
||||
transcriber_live = TranscriberWhisperLive()
|
||||
transcriber_file = TranscriberWhisperFile()
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
supported_file_types = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
if apikey != os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
if apikey == os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
return
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
class TranscriptResponse(BaseModel):
|
||||
result: dict
|
||||
class TranscriptResponse(dict):
|
||||
pass
|
||||
|
||||
@app.post("/v1/audio/transcriptions", dependencies=[Depends(apikey_auth)])
|
||||
def transcribe(
|
||||
file: UploadFile,
|
||||
model: str = "whisper-1",
|
||||
language: Annotated[str, Body(...)] = "en",
|
||||
) -> TranscriptResponse:
|
||||
audio_data = file.file.read()
|
||||
audio_suffix = file.filename.split(".")[-1]
|
||||
assert audio_suffix in supported_file_types
|
||||
file: UploadFile = None,
|
||||
files: list[UploadFile] | None = None,
|
||||
model: str = Form(MODEL_NAME),
|
||||
language: str = Form("en"),
|
||||
batch: bool = Form(False),
|
||||
):
|
||||
if not file and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Either 'file' or 'files' parameter is required"
|
||||
)
|
||||
if batch and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Batch transcription requires 'files'"
|
||||
)
|
||||
|
||||
func = transcriber.transcribe_segment.spawn(
|
||||
audio_data=audio_data,
|
||||
audio_suffix=audio_suffix,
|
||||
language=language,
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
upload_files = [file] if file else files
|
||||
|
||||
uploaded_filenames: list[str] = []
|
||||
for upload_file in upload_files:
|
||||
audio_suffix = upload_file.filename.split(".")[-1]
|
||||
if audio_suffix not in SUPPORTED_FILE_EXTENSIONS:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=(
|
||||
f"Unsupported audio format. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
),
|
||||
)
|
||||
|
||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
with open(file_path, "wb") as f:
|
||||
content = upload_file.file.read()
|
||||
f.write(content)
|
||||
uploaded_filenames.append(unique_filename)
|
||||
|
||||
upload_volume.commit()
|
||||
|
||||
try:
|
||||
if batch and len(upload_files) > 1:
|
||||
func = transcriber_live.transcribe_batch.spawn(
|
||||
filenames=uploaded_filenames,
|
||||
language=language,
|
||||
)
|
||||
results = func.get()
|
||||
return {"results": results}
|
||||
|
||||
results = []
|
||||
for filename in uploaded_filenames:
|
||||
func = transcriber_live.transcribe_segment.spawn(
|
||||
filename=filename,
|
||||
language=language,
|
||||
)
|
||||
result = func.get()
|
||||
result["filename"] = filename
|
||||
results.append(result)
|
||||
|
||||
return {"results": results} if len(results) > 1 else results[0]
|
||||
finally:
|
||||
for filename in uploaded_filenames:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
os.remove(file_path)
|
||||
except Exception:
|
||||
pass
|
||||
upload_volume.commit()
|
||||
|
||||
@app.post("/v1/audio/transcriptions-from-url", dependencies=[Depends(apikey_auth)])
|
||||
def transcribe_from_url(
|
||||
audio_file_url: str = Body(
|
||||
..., description="URL of the audio file to transcribe"
|
||||
),
|
||||
model: str = Body(MODEL_NAME),
|
||||
language: str = Body("en"),
|
||||
timestamp_offset: float = Body(0.0),
|
||||
):
|
||||
unique_filename, _audio_suffix = download_audio_to_volume(audio_file_url)
|
||||
try:
|
||||
func = transcriber_file.transcribe_segment.spawn(
|
||||
filename=unique_filename,
|
||||
timestamp_offset=timestamp_offset,
|
||||
language=language,
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
finally:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
os.remove(file_path)
|
||||
upload_volume.commit()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return app
|
||||
|
||||
|
||||
class NoStdStreams:
|
||||
def __init__(self):
|
||||
self.devnull = open(os.devnull, "w")
|
||||
|
||||
def __enter__(self):
|
||||
self._stdout, self._stderr = sys.stdout, sys.stderr
|
||||
self._stdout.flush()
|
||||
self._stderr.flush()
|
||||
sys.stdout, sys.stderr = self.devnull, self.devnull
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
sys.stdout, sys.stderr = self._stdout, self._stderr
|
||||
self.devnull.close()
|
||||
|
||||
658
server/gpu/modal_deployments/reflector_transcriber_parakeet.py
Normal file
658
server/gpu/modal_deployments/reflector_transcriber_parakeet.py
Normal file
@@ -0,0 +1,658 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import uuid
|
||||
from typing import Generator, Mapping, NamedTuple, NewType, TypedDict
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import modal
|
||||
|
||||
MODEL_NAME = "nvidia/parakeet-tdt-0.6b-v2"
|
||||
SUPPORTED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||
SAMPLERATE = 16000
|
||||
UPLOADS_PATH = "/uploads"
|
||||
CACHE_PATH = "/cache"
|
||||
VAD_CONFIG = {
|
||||
"batch_max_duration": 30.0,
|
||||
"silence_padding": 0.5,
|
||||
"window_size": 512,
|
||||
}
|
||||
|
||||
ParakeetUniqFilename = NewType("ParakeetUniqFilename", str)
|
||||
AudioFileExtension = NewType("AudioFileExtension", str)
|
||||
|
||||
|
||||
class TimeSegment(NamedTuple):
|
||||
"""Represents a time segment with start and end times."""
|
||||
|
||||
start: float
|
||||
end: float
|
||||
|
||||
|
||||
class AudioSegment(NamedTuple):
|
||||
"""Represents an audio segment with timing and audio data."""
|
||||
|
||||
start: float
|
||||
end: float
|
||||
audio: any
|
||||
|
||||
|
||||
class TranscriptResult(NamedTuple):
|
||||
"""Represents a transcription result with text and word timings."""
|
||||
|
||||
text: str
|
||||
words: list["WordTiming"]
|
||||
|
||||
|
||||
class WordTiming(TypedDict):
|
||||
"""Represents a word with its timing information."""
|
||||
|
||||
word: str
|
||||
start: float
|
||||
end: float
|
||||
|
||||
|
||||
app = modal.App("reflector-transcriber-parakeet")
|
||||
|
||||
# Volume for caching model weights
|
||||
model_cache = modal.Volume.from_name("parakeet-model-cache", create_if_missing=True)
|
||||
# Volume for temporary file uploads
|
||||
upload_volume = modal.Volume.from_name("parakeet-uploads", create_if_missing=True)
|
||||
|
||||
image = (
|
||||
modal.Image.from_registry(
|
||||
"nvidia/cuda:12.8.0-cudnn-devel-ubuntu22.04", add_python="3.12"
|
||||
)
|
||||
.env(
|
||||
{
|
||||
"HF_HUB_ENABLE_HF_TRANSFER": "1",
|
||||
"HF_HOME": "/cache",
|
||||
"DEBIAN_FRONTEND": "noninteractive",
|
||||
"CXX": "g++",
|
||||
"CC": "g++",
|
||||
}
|
||||
)
|
||||
.apt_install("ffmpeg")
|
||||
.pip_install(
|
||||
"hf_transfer==0.1.9",
|
||||
"huggingface_hub[hf-xet]==0.31.2",
|
||||
"nemo_toolkit[asr]==2.3.0",
|
||||
"cuda-python==12.8.0",
|
||||
"fastapi==0.115.12",
|
||||
"numpy<2",
|
||||
"librosa==0.10.1",
|
||||
"requests",
|
||||
"silero-vad==5.1.0",
|
||||
"torch",
|
||||
)
|
||||
.entrypoint([]) # silence chatty logs by container on start
|
||||
)
|
||||
|
||||
|
||||
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
|
||||
parsed_url = urlparse(url)
|
||||
url_path = parsed_url.path
|
||||
|
||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
||||
if url_path.lower().endswith(f".{ext}"):
|
||||
return AudioFileExtension(ext)
|
||||
|
||||
content_type = headers.get("content-type", "").lower()
|
||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
||||
return AudioFileExtension("mp3")
|
||||
if "audio/wav" in content_type:
|
||||
return AudioFileExtension("wav")
|
||||
if "audio/mp4" in content_type:
|
||||
return AudioFileExtension("mp4")
|
||||
|
||||
raise ValueError(
|
||||
f"Unsupported audio format for URL: {url}. "
|
||||
f"Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
)
|
||||
|
||||
|
||||
def download_audio_to_volume(
|
||||
audio_file_url: str,
|
||||
) -> tuple[ParakeetUniqFilename, AudioFileExtension]:
|
||||
import requests
|
||||
from fastapi import HTTPException
|
||||
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
||||
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
response.raise_for_status()
|
||||
|
||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
||||
unique_filename = ParakeetUniqFilename(f"{uuid.uuid4()}.{audio_suffix}")
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
upload_volume.commit()
|
||||
return unique_filename, audio_suffix
|
||||
|
||||
|
||||
def pad_audio(audio_array, sample_rate: int = SAMPLERATE):
|
||||
"""Add 0.5 seconds of silence if audio is less than 500ms.
|
||||
|
||||
This is a workaround for a Parakeet bug where very short audio (<500ms) causes:
|
||||
ValueError: `char_offsets`: [] and `processed_tokens`: [157, 834, 834, 841]
|
||||
have to be of the same length
|
||||
|
||||
See: https://github.com/NVIDIA/NeMo/issues/8451
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
audio_duration = len(audio_array) / sample_rate
|
||||
if audio_duration < 0.5:
|
||||
silence_samples = int(sample_rate * 0.5)
|
||||
silence = np.zeros(silence_samples, dtype=np.float32)
|
||||
return np.concatenate([audio_array, silence])
|
||||
return audio_array
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu="A10G",
|
||||
timeout=600,
|
||||
scaledown_window=300,
|
||||
image=image,
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
enable_memory_snapshot=True,
|
||||
experimental_options={"enable_gpu_snapshot": True},
|
||||
)
|
||||
@modal.concurrent(max_inputs=10)
|
||||
class TranscriberParakeetLive:
|
||||
@modal.enter(snap=True)
|
||||
def enter(self):
|
||||
import nemo.collections.asr as nemo_asr
|
||||
|
||||
logging.getLogger("nemo_logger").setLevel(logging.CRITICAL)
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self.model = nemo_asr.models.ASRModel.from_pretrained(model_name=MODEL_NAME)
|
||||
device = next(self.model.parameters()).device
|
||||
print(f"Model is on device: {device}")
|
||||
|
||||
@modal.method()
|
||||
def transcribe_segment(
|
||||
self,
|
||||
filename: str,
|
||||
):
|
||||
import librosa
|
||||
|
||||
upload_volume.reload()
|
||||
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
||||
padded_audio = pad_audio(audio_array, sample_rate)
|
||||
|
||||
with self.lock:
|
||||
with NoStdStreams():
|
||||
(output,) = self.model.transcribe([padded_audio], timestamps=True)
|
||||
|
||||
text = output.text.strip()
|
||||
words: list[WordTiming] = [
|
||||
WordTiming(
|
||||
# XXX the space added here is to match the output of whisper
|
||||
# whisper add space to each words, while parakeet don't
|
||||
word=word_info["word"] + " ",
|
||||
start=round(word_info["start"], 2),
|
||||
end=round(word_info["end"], 2),
|
||||
)
|
||||
for word_info in output.timestamp["word"]
|
||||
]
|
||||
|
||||
return {"text": text, "words": words}
|
||||
|
||||
@modal.method()
|
||||
def transcribe_batch(
|
||||
self,
|
||||
filenames: list[str],
|
||||
):
|
||||
import librosa
|
||||
|
||||
upload_volume.reload()
|
||||
|
||||
results = []
|
||||
audio_arrays = []
|
||||
|
||||
# Load all audio files with padding
|
||||
for filename in filenames:
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"Batch file not found: {file_path}")
|
||||
|
||||
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
||||
padded_audio = pad_audio(audio_array, sample_rate)
|
||||
audio_arrays.append(padded_audio)
|
||||
|
||||
with self.lock:
|
||||
with NoStdStreams():
|
||||
outputs = self.model.transcribe(audio_arrays, timestamps=True)
|
||||
|
||||
# Process results for each file
|
||||
for i, (filename, output) in enumerate(zip(filenames, outputs)):
|
||||
text = output.text.strip()
|
||||
|
||||
words: list[WordTiming] = [
|
||||
WordTiming(
|
||||
word=word_info["word"] + " ",
|
||||
start=round(word_info["start"], 2),
|
||||
end=round(word_info["end"], 2),
|
||||
)
|
||||
for word_info in output.timestamp["word"]
|
||||
]
|
||||
|
||||
results.append(
|
||||
{
|
||||
"filename": filename,
|
||||
"text": text,
|
||||
"words": words,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
# L40S class for file transcription (bigger files)
|
||||
@app.cls(
|
||||
gpu="L40S",
|
||||
timeout=900,
|
||||
image=image,
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
enable_memory_snapshot=True,
|
||||
experimental_options={"enable_gpu_snapshot": True},
|
||||
)
|
||||
class TranscriberParakeetFile:
|
||||
@modal.enter(snap=True)
|
||||
def enter(self):
|
||||
import nemo.collections.asr as nemo_asr
|
||||
import torch
|
||||
from silero_vad import load_silero_vad
|
||||
|
||||
logging.getLogger("nemo_logger").setLevel(logging.CRITICAL)
|
||||
|
||||
self.model = nemo_asr.models.ASRModel.from_pretrained(model_name=MODEL_NAME)
|
||||
device = next(self.model.parameters()).device
|
||||
print(f"Model is on device: {device}")
|
||||
|
||||
torch.set_num_threads(1)
|
||||
self.vad_model = load_silero_vad(onnx=False)
|
||||
print("Silero VAD initialized")
|
||||
|
||||
@modal.method()
|
||||
def transcribe_segment(
|
||||
self,
|
||||
filename: str,
|
||||
timestamp_offset: float = 0.0,
|
||||
):
|
||||
import librosa
|
||||
import numpy as np
|
||||
from silero_vad import VADIterator
|
||||
|
||||
def load_and_convert_audio(file_path):
|
||||
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
||||
return audio_array
|
||||
|
||||
def vad_segment_generator(
|
||||
audio_array,
|
||||
) -> Generator[TimeSegment, None, None]:
|
||||
"""Generate speech segments using VAD with start/end sample indices"""
|
||||
vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE)
|
||||
window_size = VAD_CONFIG["window_size"]
|
||||
start = None
|
||||
|
||||
for i in range(0, len(audio_array), window_size):
|
||||
chunk = audio_array[i : i + window_size]
|
||||
if len(chunk) < window_size:
|
||||
chunk = np.pad(
|
||||
chunk, (0, window_size - len(chunk)), mode="constant"
|
||||
)
|
||||
|
||||
speech_dict = vad_iterator(chunk)
|
||||
if not speech_dict:
|
||||
continue
|
||||
|
||||
if "start" in speech_dict:
|
||||
start = speech_dict["start"]
|
||||
continue
|
||||
|
||||
if "end" in speech_dict and start is not None:
|
||||
end = speech_dict["end"]
|
||||
start_time = start / float(SAMPLERATE)
|
||||
end_time = end / float(SAMPLERATE)
|
||||
|
||||
yield TimeSegment(start_time, end_time)
|
||||
start = None
|
||||
|
||||
vad_iterator.reset_states()
|
||||
|
||||
def batch_speech_segments(
|
||||
segments: Generator[TimeSegment, None, None], max_duration: int
|
||||
) -> Generator[TimeSegment, None, None]:
|
||||
"""
|
||||
Input segments:
|
||||
[0-2] [3-5] [6-8] [10-11] [12-15] [17-19] [20-22]
|
||||
|
||||
↓ (max_duration=10)
|
||||
|
||||
Output batches:
|
||||
[0-8] [10-19] [20-22]
|
||||
|
||||
Note: silences are kept for better transcription, previous implementation was
|
||||
passing segments separatly, but the output was less accurate.
|
||||
"""
|
||||
batch_start_time = None
|
||||
batch_end_time = None
|
||||
|
||||
for segment in segments:
|
||||
start_time, end_time = segment.start, segment.end
|
||||
if batch_start_time is None or batch_end_time is None:
|
||||
batch_start_time = start_time
|
||||
batch_end_time = end_time
|
||||
continue
|
||||
|
||||
total_duration = end_time - batch_start_time
|
||||
|
||||
if total_duration <= max_duration:
|
||||
batch_end_time = end_time
|
||||
continue
|
||||
|
||||
yield TimeSegment(batch_start_time, batch_end_time)
|
||||
batch_start_time = start_time
|
||||
batch_end_time = end_time
|
||||
|
||||
if batch_start_time is None or batch_end_time is None:
|
||||
return
|
||||
|
||||
yield TimeSegment(batch_start_time, batch_end_time)
|
||||
|
||||
def batch_segment_to_audio_segment(
|
||||
segments: Generator[TimeSegment, None, None],
|
||||
audio_array,
|
||||
) -> Generator[AudioSegment, None, None]:
|
||||
"""Extract audio segments and apply padding for Parakeet compatibility.
|
||||
|
||||
Uses pad_audio to ensure segments are at least 0.5s long, preventing
|
||||
Parakeet crashes. This padding may cause slight timing overlaps between
|
||||
segments, which are corrected by enforce_word_timing_constraints.
|
||||
"""
|
||||
for segment in segments:
|
||||
start_time, end_time = segment.start, segment.end
|
||||
start_sample = int(start_time * SAMPLERATE)
|
||||
end_sample = int(end_time * SAMPLERATE)
|
||||
audio_segment = audio_array[start_sample:end_sample]
|
||||
|
||||
padded_segment = pad_audio(audio_segment, SAMPLERATE)
|
||||
|
||||
yield AudioSegment(start_time, end_time, padded_segment)
|
||||
|
||||
def transcribe_batch(model, audio_segments: list) -> list:
|
||||
with NoStdStreams():
|
||||
outputs = model.transcribe(audio_segments, timestamps=True)
|
||||
return outputs
|
||||
|
||||
def enforce_word_timing_constraints(
|
||||
words: list[WordTiming],
|
||||
) -> list[WordTiming]:
|
||||
"""Enforce that word end times don't exceed the start time of the next word.
|
||||
|
||||
Due to silence padding added in batch_segment_to_audio_segment for better
|
||||
transcription accuracy, word timings from different segments may overlap.
|
||||
This function ensures there are no overlaps by adjusting end times.
|
||||
"""
|
||||
if len(words) <= 1:
|
||||
return words
|
||||
|
||||
enforced_words = []
|
||||
for i, word in enumerate(words):
|
||||
enforced_word = word.copy()
|
||||
|
||||
if i < len(words) - 1:
|
||||
next_start = words[i + 1]["start"]
|
||||
if enforced_word["end"] > next_start:
|
||||
enforced_word["end"] = next_start
|
||||
|
||||
enforced_words.append(enforced_word)
|
||||
|
||||
return enforced_words
|
||||
|
||||
def emit_results(
|
||||
results: list,
|
||||
segments_info: list[AudioSegment],
|
||||
) -> Generator[TranscriptResult, None, None]:
|
||||
"""Yield transcribed text and word timings from model output, adjusting timestamps to absolute positions."""
|
||||
for i, (output, segment) in enumerate(zip(results, segments_info)):
|
||||
start_time, end_time = segment.start, segment.end
|
||||
text = output.text.strip()
|
||||
words: list[WordTiming] = [
|
||||
WordTiming(
|
||||
word=word_info["word"] + " ",
|
||||
start=round(
|
||||
word_info["start"] + start_time + timestamp_offset, 2
|
||||
),
|
||||
end=round(word_info["end"] + start_time + timestamp_offset, 2),
|
||||
)
|
||||
for word_info in output.timestamp["word"]
|
||||
]
|
||||
|
||||
yield TranscriptResult(text, words)
|
||||
|
||||
upload_volume.reload()
|
||||
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
audio_array = load_and_convert_audio(file_path)
|
||||
total_duration = len(audio_array) / float(SAMPLERATE)
|
||||
|
||||
all_text_parts: list[str] = []
|
||||
all_words: list[WordTiming] = []
|
||||
|
||||
raw_segments = vad_segment_generator(audio_array)
|
||||
speech_segments = batch_speech_segments(
|
||||
raw_segments,
|
||||
VAD_CONFIG["batch_max_duration"],
|
||||
)
|
||||
audio_segments = batch_segment_to_audio_segment(speech_segments, audio_array)
|
||||
|
||||
for batch in audio_segments:
|
||||
audio_segment = batch.audio
|
||||
results = transcribe_batch(self.model, [audio_segment])
|
||||
|
||||
for result in emit_results(
|
||||
results,
|
||||
[batch],
|
||||
):
|
||||
if not result.text:
|
||||
continue
|
||||
all_text_parts.append(result.text)
|
||||
all_words.extend(result.words)
|
||||
|
||||
all_words = enforce_word_timing_constraints(all_words)
|
||||
|
||||
combined_text = " ".join(all_text_parts)
|
||||
return {"text": combined_text, "words": all_words}
|
||||
|
||||
|
||||
@app.function(
|
||||
scaledown_window=60,
|
||||
timeout=600,
|
||||
secrets=[
|
||||
modal.Secret.from_name("reflector-gpu"),
|
||||
],
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
image=image,
|
||||
)
|
||||
@modal.concurrent(max_inputs=40)
|
||||
@modal.asgi_app()
|
||||
def web():
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from fastapi import (
|
||||
Body,
|
||||
Depends,
|
||||
FastAPI,
|
||||
Form,
|
||||
HTTPException,
|
||||
UploadFile,
|
||||
status,
|
||||
)
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
transcriber_live = TranscriberParakeetLive()
|
||||
transcriber_file = TranscriberParakeetFile()
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
if apikey == os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
return
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
class TranscriptResponse(BaseModel):
|
||||
result: dict
|
||||
|
||||
@app.post("/v1/audio/transcriptions", dependencies=[Depends(apikey_auth)])
|
||||
def transcribe(
|
||||
file: UploadFile = None,
|
||||
files: list[UploadFile] | None = None,
|
||||
model: str = Form(MODEL_NAME),
|
||||
language: str = Form("en"),
|
||||
batch: bool = Form(False),
|
||||
):
|
||||
# Parakeet only supports English
|
||||
if language != "en":
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Parakeet model only supports English. Got language='{language}'",
|
||||
)
|
||||
# Handle both single file and multiple files
|
||||
if not file and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Either 'file' or 'files' parameter is required"
|
||||
)
|
||||
if batch and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Batch transcription requires 'files'"
|
||||
)
|
||||
|
||||
upload_files = [file] if file else files
|
||||
|
||||
# Upload files to volume
|
||||
uploaded_filenames = []
|
||||
for upload_file in upload_files:
|
||||
audio_suffix = upload_file.filename.split(".")[-1]
|
||||
assert audio_suffix in SUPPORTED_FILE_EXTENSIONS
|
||||
|
||||
# Generate unique filename
|
||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
print(f"Writing file to: {file_path}")
|
||||
with open(file_path, "wb") as f:
|
||||
content = upload_file.file.read()
|
||||
f.write(content)
|
||||
|
||||
uploaded_filenames.append(unique_filename)
|
||||
|
||||
upload_volume.commit()
|
||||
|
||||
try:
|
||||
# Use A10G live transcriber for per-file transcription
|
||||
if batch and len(upload_files) > 1:
|
||||
# Use batch transcription
|
||||
func = transcriber_live.transcribe_batch.spawn(
|
||||
filenames=uploaded_filenames,
|
||||
)
|
||||
results = func.get()
|
||||
return {"results": results}
|
||||
|
||||
# Per-file transcription
|
||||
results = []
|
||||
for filename in uploaded_filenames:
|
||||
func = transcriber_live.transcribe_segment.spawn(
|
||||
filename=filename,
|
||||
)
|
||||
result = func.get()
|
||||
result["filename"] = filename
|
||||
results.append(result)
|
||||
|
||||
return {"results": results} if len(results) > 1 else results[0]
|
||||
|
||||
finally:
|
||||
for filename in uploaded_filenames:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
print(f"Deleting file: {file_path}")
|
||||
os.remove(file_path)
|
||||
except Exception as e:
|
||||
print(f"Error deleting {filename}: {e}")
|
||||
|
||||
upload_volume.commit()
|
||||
|
||||
@app.post("/v1/audio/transcriptions-from-url", dependencies=[Depends(apikey_auth)])
|
||||
def transcribe_from_url(
|
||||
audio_file_url: str = Body(
|
||||
..., description="URL of the audio file to transcribe"
|
||||
),
|
||||
model: str = Body(MODEL_NAME),
|
||||
language: str = Body("en", description="Language code (only 'en' supported)"),
|
||||
timestamp_offset: float = Body(0.0),
|
||||
):
|
||||
# Parakeet only supports English
|
||||
if language != "en":
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Parakeet model only supports English. Got language='{language}'",
|
||||
)
|
||||
unique_filename, audio_suffix = download_audio_to_volume(audio_file_url)
|
||||
|
||||
try:
|
||||
func = transcriber_file.transcribe_segment.spawn(
|
||||
filename=unique_filename,
|
||||
timestamp_offset=timestamp_offset,
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
finally:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
print(f"Deleting file: {file_path}")
|
||||
os.remove(file_path)
|
||||
upload_volume.commit()
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up {unique_filename}: {e}")
|
||||
|
||||
return app
|
||||
|
||||
|
||||
class NoStdStreams:
|
||||
def __init__(self):
|
||||
self.devnull = open(os.devnull, "w")
|
||||
|
||||
def __enter__(self):
|
||||
self._stdout, self._stderr = sys.stdout, sys.stderr
|
||||
self._stdout.flush()
|
||||
self._stderr.flush()
|
||||
sys.stdout, sys.stderr = self.devnull, self.devnull
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
sys.stdout, sys.stderr = self._stdout, self._stderr
|
||||
self.devnull.close()
|
||||
@@ -0,0 +1,36 @@
|
||||
"""Add webhook fields to rooms
|
||||
|
||||
Revision ID: 0194f65cd6d3
|
||||
Revises: 5a8907fd1d78
|
||||
Create Date: 2025-08-27 09:03:19.610995
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0194f65cd6d3"
|
||||
down_revision: Union[str, None] = "5a8907fd1d78"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column("webhook_url", sa.String(), nullable=True))
|
||||
batch_op.add_column(sa.Column("webhook_secret", sa.String(), nullable=True))
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.drop_column("webhook_secret")
|
||||
batch_op.drop_column("webhook_url")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -0,0 +1,64 @@
|
||||
"""add_long_summary_to_search_vector
|
||||
|
||||
Revision ID: 0ab2d7ffaa16
|
||||
Revises: b1c33bd09963
|
||||
Create Date: 2025-08-15 13:27:52.680211
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0ab2d7ffaa16"
|
||||
down_revision: Union[str, None] = "b1c33bd09963"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Drop the existing search vector column and index
|
||||
op.drop_index("idx_transcript_search_vector_en", table_name="transcript")
|
||||
op.drop_column("transcript", "search_vector_en")
|
||||
|
||||
# Recreate the search vector column with long_summary included
|
||||
op.execute("""
|
||||
ALTER TABLE transcript ADD COLUMN search_vector_en tsvector
|
||||
GENERATED ALWAYS AS (
|
||||
setweight(to_tsvector('english', coalesce(title, '')), 'A') ||
|
||||
setweight(to_tsvector('english', coalesce(long_summary, '')), 'B') ||
|
||||
setweight(to_tsvector('english', coalesce(webvtt, '')), 'C')
|
||||
) STORED
|
||||
""")
|
||||
|
||||
# Recreate the GIN index for the search vector
|
||||
op.create_index(
|
||||
"idx_transcript_search_vector_en",
|
||||
"transcript",
|
||||
["search_vector_en"],
|
||||
postgresql_using="gin",
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop the updated search vector column and index
|
||||
op.drop_index("idx_transcript_search_vector_en", table_name="transcript")
|
||||
op.drop_column("transcript", "search_vector_en")
|
||||
|
||||
# Recreate the original search vector column without long_summary
|
||||
op.execute("""
|
||||
ALTER TABLE transcript ADD COLUMN search_vector_en tsvector
|
||||
GENERATED ALWAYS AS (
|
||||
setweight(to_tsvector('english', coalesce(title, '')), 'A') ||
|
||||
setweight(to_tsvector('english', coalesce(webvtt, '')), 'B')
|
||||
) STORED
|
||||
""")
|
||||
|
||||
# Recreate the GIN index for the search vector
|
||||
op.create_index(
|
||||
"idx_transcript_search_vector_en",
|
||||
"transcript",
|
||||
["search_vector_en"],
|
||||
postgresql_using="gin",
|
||||
)
|
||||
@@ -0,0 +1,36 @@
|
||||
"""remove user_id from meeting table
|
||||
|
||||
Revision ID: 0ce521cda2ee
|
||||
Revises: 6dec9fb5b46c
|
||||
Create Date: 2025-09-10 12:40:55.688899
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0ce521cda2ee"
|
||||
down_revision: Union[str, None] = "6dec9fb5b46c"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.drop_column("user_id")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column("user_id", sa.VARCHAR(), autoincrement=False, nullable=True)
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -0,0 +1,32 @@
|
||||
"""clean up orphaned room_id references in meeting table
|
||||
|
||||
Revision ID: 2ae3db106d4e
|
||||
Revises: def1b5867d4c
|
||||
Create Date: 2025-09-11 10:35:15.759967
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "2ae3db106d4e"
|
||||
down_revision: Union[str, None] = "def1b5867d4c"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Set room_id to NULL for meetings that reference non-existent rooms
|
||||
op.execute("""
|
||||
UPDATE meeting
|
||||
SET room_id = NULL
|
||||
WHERE room_id IS NOT NULL
|
||||
AND room_id NOT IN (SELECT id FROM room WHERE id IS NOT NULL)
|
||||
""")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Cannot restore orphaned references - no operation needed
|
||||
pass
|
||||
@@ -0,0 +1,50 @@
|
||||
"""add cascade delete to meeting consent foreign key
|
||||
|
||||
Revision ID: 5a8907fd1d78
|
||||
Revises: 0ab2d7ffaa16
|
||||
Create Date: 2025-08-26 17:26:50.945491
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "5a8907fd1d78"
|
||||
down_revision: Union[str, None] = "0ab2d7ffaa16"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting_consent", schema=None) as batch_op:
|
||||
batch_op.drop_constraint(
|
||||
batch_op.f("meeting_consent_meeting_id_fkey"), type_="foreignkey"
|
||||
)
|
||||
batch_op.create_foreign_key(
|
||||
batch_op.f("meeting_consent_meeting_id_fkey"),
|
||||
"meeting",
|
||||
["meeting_id"],
|
||||
["id"],
|
||||
ondelete="CASCADE",
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting_consent", schema=None) as batch_op:
|
||||
batch_op.drop_constraint(
|
||||
batch_op.f("meeting_consent_meeting_id_fkey"), type_="foreignkey"
|
||||
)
|
||||
batch_op.create_foreign_key(
|
||||
batch_op.f("meeting_consent_meeting_id_fkey"),
|
||||
"meeting",
|
||||
["meeting_id"],
|
||||
["id"],
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -0,0 +1,28 @@
|
||||
"""webhook url and secret null by default
|
||||
|
||||
|
||||
Revision ID: 61882a919591
|
||||
Revises: 0194f65cd6d3
|
||||
Create Date: 2025-08-29 11:46:36.738091
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "61882a919591"
|
||||
down_revision: Union[str, None] = "0194f65cd6d3"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
@@ -0,0 +1,38 @@
|
||||
"""make meeting room_id required and add foreign key
|
||||
|
||||
Revision ID: 6dec9fb5b46c
|
||||
Revises: 61882a919591
|
||||
Create Date: 2025-09-10 10:47:06.006819
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "6dec9fb5b46c"
|
||||
down_revision: Union[str, None] = "61882a919591"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=False)
|
||||
batch_op.create_foreign_key(
|
||||
None, "room", ["room_id"], ["id"], ondelete="CASCADE"
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.drop_constraint("meeting_room_id_fkey", type_="foreignkey")
|
||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=True)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -0,0 +1,41 @@
|
||||
"""add_search_optimization_indexes
|
||||
|
||||
Revision ID: b1c33bd09963
|
||||
Revises: 9f5c78d352d6
|
||||
Create Date: 2025-08-14 17:26:02.117408
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "b1c33bd09963"
|
||||
down_revision: Union[str, None] = "9f5c78d352d6"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add indexes for actual search filtering patterns used in frontend
|
||||
# Based on /browse page filters: room_id and source_kind
|
||||
|
||||
# Index for room_id + created_at (for room-specific searches with date ordering)
|
||||
op.create_index(
|
||||
"idx_transcript_room_id_created_at",
|
||||
"transcript",
|
||||
["room_id", "created_at"],
|
||||
if_not_exists=True,
|
||||
)
|
||||
|
||||
# Index for source_kind alone (actively used filter in frontend)
|
||||
op.create_index(
|
||||
"idx_transcript_source_kind", "transcript", ["source_kind"], if_not_exists=True
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Remove the indexes in reverse order
|
||||
op.drop_index("idx_transcript_source_kind", "transcript", if_exists=True)
|
||||
op.drop_index("idx_transcript_room_id_created_at", "transcript", if_exists=True)
|
||||
@@ -0,0 +1,34 @@
|
||||
"""make meeting room_id nullable but keep foreign key
|
||||
|
||||
Revision ID: def1b5867d4c
|
||||
Revises: 0ce521cda2ee
|
||||
Create Date: 2025-09-11 09:42:18.697264
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "def1b5867d4c"
|
||||
down_revision: Union[str, None] = "0ce521cda2ee"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=True)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=False)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -27,12 +27,10 @@ dependencies = [
|
||||
"prometheus-fastapi-instrumentator>=6.1.0",
|
||||
"sentencepiece>=0.1.99",
|
||||
"protobuf>=4.24.3",
|
||||
"profanityfilter>=2.0.6",
|
||||
"celery>=5.3.4",
|
||||
"redis>=5.0.1",
|
||||
"python-jose[cryptography]>=3.3.0",
|
||||
"python-multipart>=0.0.6",
|
||||
"faster-whisper>=0.10.0",
|
||||
"transformers>=4.36.2",
|
||||
"jsonschema>=4.23.0",
|
||||
"openai>=1.59.7",
|
||||
@@ -57,6 +55,7 @@ tests = [
|
||||
"httpx-ws>=0.4.1",
|
||||
"pytest-httpx>=0.23.1",
|
||||
"pytest-celery>=0.0.0",
|
||||
"pytest-recording>=0.13.4",
|
||||
"pytest-docker>=3.2.3",
|
||||
"asgi-lifespan>=2.1.0",
|
||||
]
|
||||
@@ -67,6 +66,15 @@ evaluation = [
|
||||
"tqdm>=4.66.0",
|
||||
"pydantic>=2.1.1",
|
||||
]
|
||||
local = [
|
||||
"pyannote-audio>=3.3.2",
|
||||
"faster-whisper>=0.10.0",
|
||||
]
|
||||
silero-vad = [
|
||||
"silero-vad>=5.1.2",
|
||||
"torch>=2.8.0",
|
||||
"torchaudio>=2.8.0",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
default-groups = [
|
||||
@@ -74,6 +82,21 @@ default-groups = [
|
||||
"tests",
|
||||
"aws",
|
||||
"evaluation",
|
||||
"local",
|
||||
"silero-vad"
|
||||
]
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-cpu"
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
explicit = true
|
||||
|
||||
[tool.uv.sources]
|
||||
torch = [
|
||||
{ index = "pytorch-cpu" },
|
||||
]
|
||||
torchaudio = [
|
||||
{ index = "pytorch-cpu" },
|
||||
]
|
||||
|
||||
[build-system]
|
||||
@@ -94,6 +117,9 @@ DATABASE_URL = "postgresql://test_user:test_password@localhost:15432/reflector_t
|
||||
addopts = "-ra -q --disable-pytest-warnings --cov --cov-report html -v"
|
||||
testpaths = ["tests"]
|
||||
asyncio_mode = "auto"
|
||||
markers = [
|
||||
"gpu_modal: mark test to run only with GPU Modal endpoints (deselect with '-m \"not gpu_modal\"')",
|
||||
]
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
|
||||
27
server/reflector/asynctask.py
Normal file
27
server/reflector/asynctask.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import asyncio
|
||||
import functools
|
||||
|
||||
from reflector.db import get_database
|
||||
|
||||
|
||||
def asynctask(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
async def run_with_db():
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
return await f(*args, **kwargs)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
coro = run_with_db()
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
loop = None
|
||||
if loop and loop.is_running():
|
||||
return loop.run_until_complete(coro)
|
||||
return asyncio.run(coro)
|
||||
|
||||
return wrapper
|
||||
@@ -2,7 +2,6 @@ from datetime import datetime
|
||||
from typing import Literal
|
||||
|
||||
import sqlalchemy as sa
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
@@ -18,8 +17,12 @@ meetings = sa.Table(
|
||||
sa.Column("host_room_url", sa.String),
|
||||
sa.Column("start_date", sa.DateTime(timezone=True)),
|
||||
sa.Column("end_date", sa.DateTime(timezone=True)),
|
||||
sa.Column("user_id", sa.String),
|
||||
sa.Column("room_id", sa.String),
|
||||
sa.Column(
|
||||
"room_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("room.id", ondelete="CASCADE"),
|
||||
nullable=True,
|
||||
),
|
||||
sa.Column("is_locked", sa.Boolean, nullable=False, server_default=sa.false()),
|
||||
sa.Column("room_mode", sa.String, nullable=False, server_default="normal"),
|
||||
sa.Column("recording_type", sa.String, nullable=False, server_default="cloud"),
|
||||
@@ -54,7 +57,12 @@ meeting_consent = sa.Table(
|
||||
"meeting_consent",
|
||||
metadata,
|
||||
sa.Column("id", sa.String, primary_key=True),
|
||||
sa.Column("meeting_id", sa.String, sa.ForeignKey("meeting.id"), nullable=False),
|
||||
sa.Column(
|
||||
"meeting_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("meeting.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("user_id", sa.String),
|
||||
sa.Column("consent_given", sa.Boolean, nullable=False),
|
||||
sa.Column("consent_timestamp", sa.DateTime(timezone=True), nullable=False),
|
||||
@@ -76,8 +84,7 @@ class Meeting(BaseModel):
|
||||
host_room_url: str
|
||||
start_date: datetime
|
||||
end_date: datetime
|
||||
user_id: str | None = None
|
||||
room_id: str | None = None
|
||||
room_id: str | None
|
||||
is_locked: bool = False
|
||||
room_mode: Literal["normal", "group"] = "normal"
|
||||
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
||||
@@ -96,12 +103,8 @@ class MeetingController:
|
||||
host_room_url: str,
|
||||
start_date: datetime,
|
||||
end_date: datetime,
|
||||
user_id: str,
|
||||
room: Room,
|
||||
):
|
||||
"""
|
||||
Create a new meeting
|
||||
"""
|
||||
meeting = Meeting(
|
||||
id=id,
|
||||
room_name=room_name,
|
||||
@@ -109,7 +112,6 @@ class MeetingController:
|
||||
host_room_url=host_room_url,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
user_id=user_id,
|
||||
room_id=room.id,
|
||||
is_locked=room.is_locked,
|
||||
room_mode=room.room_mode,
|
||||
@@ -121,19 +123,13 @@ class MeetingController:
|
||||
return meeting
|
||||
|
||||
async def get_all_active(self) -> list[Meeting]:
|
||||
"""
|
||||
Get active meetings.
|
||||
"""
|
||||
query = meetings.select().where(meetings.c.is_active)
|
||||
return await get_database().fetch_all(query)
|
||||
|
||||
async def get_by_room_name(
|
||||
self,
|
||||
room_name: str,
|
||||
) -> Meeting:
|
||||
"""
|
||||
Get a meeting by room name.
|
||||
"""
|
||||
) -> Meeting | None:
|
||||
query = meetings.select().where(meetings.c.room_name == room_name)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
@@ -141,10 +137,7 @@ class MeetingController:
|
||||
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_active(self, room: Room, current_time: datetime) -> Meeting:
|
||||
"""
|
||||
Get latest active meeting for a room.
|
||||
"""
|
||||
async def get_active(self, room: Room, current_time: datetime) -> Meeting | None:
|
||||
end_date = getattr(meetings.c, "end_date")
|
||||
query = (
|
||||
meetings.select()
|
||||
@@ -164,32 +157,12 @@ class MeetingController:
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_by_id(self, meeting_id: str, **kwargs) -> Meeting | None:
|
||||
"""
|
||||
Get a meeting by id
|
||||
"""
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_by_id_for_http(self, meeting_id: str, user_id: str | None) -> Meeting:
|
||||
"""
|
||||
Get a meeting by ID for HTTP request.
|
||||
|
||||
If not found, it will raise a 404 error.
|
||||
"""
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||
|
||||
meeting = Meeting(**result)
|
||||
if result["user_id"] != user_id:
|
||||
meeting.host_room_url = ""
|
||||
|
||||
return meeting
|
||||
|
||||
async def update_meeting(self, meeting_id: str, **kwargs):
|
||||
query = meetings.update().where(meetings.c.id == meeting_id).values(**kwargs)
|
||||
await get_database().execute(query)
|
||||
@@ -214,7 +187,7 @@ class MeetingConsentController:
|
||||
result = await get_database().fetch_one(query)
|
||||
if result is None:
|
||||
return None
|
||||
return MeetingConsent(**result) if result else None
|
||||
return MeetingConsent(**result)
|
||||
|
||||
async def upsert(self, consent: MeetingConsent) -> MeetingConsent:
|
||||
"""Create new consent or update existing one for authenticated users"""
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import secrets
|
||||
from datetime import datetime, timezone
|
||||
from sqlite3 import IntegrityError
|
||||
from typing import Literal
|
||||
@@ -40,6 +41,8 @@ rooms = sqlalchemy.Table(
|
||||
sqlalchemy.Column(
|
||||
"is_shared", sqlalchemy.Boolean, nullable=False, server_default=false()
|
||||
),
|
||||
sqlalchemy.Column("webhook_url", sqlalchemy.String, nullable=True),
|
||||
sqlalchemy.Column("webhook_secret", sqlalchemy.String, nullable=True),
|
||||
sqlalchemy.Index("idx_room_is_shared", "is_shared"),
|
||||
)
|
||||
|
||||
@@ -59,6 +62,8 @@ class Room(BaseModel):
|
||||
"none", "prompt", "automatic", "automatic-2nd-participant"
|
||||
] = "automatic-2nd-participant"
|
||||
is_shared: bool = False
|
||||
webhook_url: str | None = None
|
||||
webhook_secret: str | None = None
|
||||
|
||||
|
||||
class RoomController:
|
||||
@@ -107,10 +112,15 @@ class RoomController:
|
||||
recording_type: str,
|
||||
recording_trigger: str,
|
||||
is_shared: bool,
|
||||
webhook_url: str = "",
|
||||
webhook_secret: str = "",
|
||||
):
|
||||
"""
|
||||
Add a new room
|
||||
"""
|
||||
if webhook_url and not webhook_secret:
|
||||
webhook_secret = secrets.token_urlsafe(32)
|
||||
|
||||
room = Room(
|
||||
name=name,
|
||||
user_id=user_id,
|
||||
@@ -122,6 +132,8 @@ class RoomController:
|
||||
recording_type=recording_type,
|
||||
recording_trigger=recording_trigger,
|
||||
is_shared=is_shared,
|
||||
webhook_url=webhook_url,
|
||||
webhook_secret=webhook_secret,
|
||||
)
|
||||
query = rooms.insert().values(**room.model_dump())
|
||||
try:
|
||||
@@ -134,6 +146,9 @@ class RoomController:
|
||||
"""
|
||||
Update a room fields with key/values in values
|
||||
"""
|
||||
if values.get("webhook_url") and not values.get("webhook_secret"):
|
||||
values["webhook_secret"] = secrets.token_urlsafe(32)
|
||||
|
||||
query = rooms.update().where(rooms.c.id == room.id).values(**values)
|
||||
try:
|
||||
await get_database().execute(query)
|
||||
|
||||
@@ -1,22 +1,38 @@
|
||||
"""Search functionality for transcripts and other entities."""
|
||||
|
||||
import itertools
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from io import StringIO
|
||||
from typing import Annotated, Any, Dict
|
||||
from typing import Annotated, Any, Dict, Iterator
|
||||
|
||||
import sqlalchemy
|
||||
import webvtt
|
||||
from pydantic import BaseModel, Field, constr, field_serializer
|
||||
from databases.interfaces import Record as DbRecord
|
||||
from fastapi import HTTPException
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
NonNegativeFloat,
|
||||
NonNegativeInt,
|
||||
TypeAdapter,
|
||||
ValidationError,
|
||||
constr,
|
||||
field_serializer,
|
||||
)
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.db.transcripts import SourceKind, transcripts
|
||||
from reflector.db.rooms import rooms
|
||||
from reflector.db.transcripts import SourceKind, TranscriptStatus, transcripts
|
||||
from reflector.db.utils import is_postgresql
|
||||
from reflector.logger import logger
|
||||
from reflector.utils.string import NonEmptyString, try_parse_non_empty_string
|
||||
|
||||
DEFAULT_SEARCH_LIMIT = 20
|
||||
SNIPPET_CONTEXT_LENGTH = 50 # Characters before/after match to include
|
||||
DEFAULT_SNIPPET_MAX_LENGTH = 150
|
||||
DEFAULT_MAX_SNIPPETS = 3
|
||||
DEFAULT_SNIPPET_MAX_LENGTH = NonNegativeInt(150)
|
||||
DEFAULT_MAX_SNIPPETS = NonNegativeInt(3)
|
||||
LONG_SUMMARY_MAX_SNIPPETS = 2
|
||||
|
||||
SearchQueryBase = constr(min_length=1, strip_whitespace=True)
|
||||
SearchLimitBase = Annotated[int, Field(ge=1, le=100)]
|
||||
@@ -24,6 +40,7 @@ SearchOffsetBase = Annotated[int, Field(ge=0)]
|
||||
SearchTotalBase = Annotated[int, Field(ge=0)]
|
||||
|
||||
SearchQuery = Annotated[SearchQueryBase, Field(description="Search query text")]
|
||||
search_query_adapter = TypeAdapter(SearchQuery)
|
||||
SearchLimit = Annotated[SearchLimitBase, Field(description="Results per page")]
|
||||
SearchOffset = Annotated[
|
||||
SearchOffsetBase, Field(description="Number of results to skip")
|
||||
@@ -32,15 +49,92 @@ SearchTotal = Annotated[
|
||||
SearchTotalBase, Field(description="Total number of search results")
|
||||
]
|
||||
|
||||
WEBVTT_SPEC_HEADER = "WEBVTT"
|
||||
|
||||
WebVTTContent = Annotated[
|
||||
str,
|
||||
Field(min_length=len(WEBVTT_SPEC_HEADER), description="WebVTT content"),
|
||||
]
|
||||
|
||||
|
||||
class WebVTTProcessor:
|
||||
"""Stateless processor for WebVTT content operations."""
|
||||
|
||||
@staticmethod
|
||||
def parse(raw_content: str) -> WebVTTContent:
|
||||
"""Parse WebVTT content and return it as a string."""
|
||||
if not raw_content.startswith(WEBVTT_SPEC_HEADER):
|
||||
raise ValueError(f"Invalid WebVTT content, no header {WEBVTT_SPEC_HEADER}")
|
||||
return raw_content
|
||||
|
||||
@staticmethod
|
||||
def extract_text(webvtt_content: WebVTTContent) -> str:
|
||||
"""Extract plain text from WebVTT content using webvtt library."""
|
||||
try:
|
||||
buffer = StringIO(webvtt_content)
|
||||
vtt = webvtt.read_buffer(buffer)
|
||||
return " ".join(caption.text for caption in vtt if caption.text)
|
||||
except webvtt.errors.MalformedFileError as e:
|
||||
logger.warning(f"Malformed WebVTT content: {e}")
|
||||
return ""
|
||||
except (UnicodeDecodeError, ValueError) as e:
|
||||
logger.warning(f"Failed to decode WebVTT content: {e}")
|
||||
return ""
|
||||
except AttributeError as e:
|
||||
logger.error(
|
||||
f"WebVTT parsing error - unexpected format: {e}", exc_info=True
|
||||
)
|
||||
return ""
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error parsing WebVTT: {e}", exc_info=True)
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def generate_snippets(
|
||||
webvtt_content: WebVTTContent,
|
||||
query: SearchQuery,
|
||||
max_snippets: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
||||
) -> list[str]:
|
||||
"""Generate snippets from WebVTT content."""
|
||||
return SnippetGenerator.generate(
|
||||
WebVTTProcessor.extract_text(webvtt_content),
|
||||
query,
|
||||
max_snippets=max_snippets,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SnippetCandidate:
|
||||
"""Represents a candidate snippet with its position."""
|
||||
|
||||
_text: str
|
||||
start: NonNegativeInt
|
||||
_original_text_length: int
|
||||
|
||||
@property
|
||||
def end(self) -> NonNegativeInt:
|
||||
"""Calculate end position from start and raw text length."""
|
||||
return self.start + len(self._text)
|
||||
|
||||
def text(self) -> str:
|
||||
"""Get display text with ellipses added if needed."""
|
||||
result = self._text.strip()
|
||||
if self.start > 0:
|
||||
result = "..." + result
|
||||
if self.end < self._original_text_length:
|
||||
result = result + "..."
|
||||
return result
|
||||
|
||||
|
||||
class SearchParameters(BaseModel):
|
||||
"""Validated search parameters for full-text search."""
|
||||
|
||||
query_text: SearchQuery
|
||||
query_text: SearchQuery | None = None
|
||||
limit: SearchLimit = DEFAULT_SEARCH_LIMIT
|
||||
offset: SearchOffset = 0
|
||||
user_id: str | None = None
|
||||
room_id: str | None = None
|
||||
source_kind: SourceKind | None = None
|
||||
|
||||
|
||||
class SearchResultDB(BaseModel):
|
||||
@@ -64,13 +158,18 @@ class SearchResult(BaseModel):
|
||||
title: str | None = None
|
||||
user_id: str | None = None
|
||||
room_id: str | None = None
|
||||
room_name: str | None = None
|
||||
source_kind: SourceKind
|
||||
created_at: datetime
|
||||
status: str = Field(..., min_length=1)
|
||||
status: TranscriptStatus = Field(..., min_length=1)
|
||||
rank: float = Field(..., ge=0, le=1)
|
||||
duration: float | None = Field(..., ge=0, description="Duration in seconds")
|
||||
duration: NonNegativeFloat | None = Field(..., description="Duration in seconds")
|
||||
search_snippets: list[str] = Field(
|
||||
description="Text snippets around search matches"
|
||||
)
|
||||
total_match_count: NonNegativeInt = Field(
|
||||
default=0, description="Total number of matches found in the transcript"
|
||||
)
|
||||
|
||||
@field_serializer("created_at", when_used="json")
|
||||
def serialize_datetime(self, dt: datetime) -> str:
|
||||
@@ -79,84 +178,157 @@ class SearchResult(BaseModel):
|
||||
return dt.isoformat()
|
||||
|
||||
|
||||
class SearchController:
|
||||
"""Controller for search operations across different entities."""
|
||||
class SnippetGenerator:
|
||||
"""Stateless generator for text snippets and match operations."""
|
||||
|
||||
@staticmethod
|
||||
def _extract_webvtt_text(webvtt_content: str) -> str:
|
||||
"""Extract plain text from WebVTT content using webvtt library."""
|
||||
if not webvtt_content:
|
||||
return ""
|
||||
def find_all_matches(text: str, query: str) -> Iterator[int]:
|
||||
"""Generate all match positions for a query in text."""
|
||||
if not text:
|
||||
logger.warning("Empty text for search query in find_all_matches")
|
||||
return
|
||||
if not query:
|
||||
logger.warning("Empty query for search text in find_all_matches")
|
||||
return
|
||||
|
||||
try:
|
||||
buffer = StringIO(webvtt_content)
|
||||
vtt = webvtt.read_buffer(buffer)
|
||||
return " ".join(caption.text for caption in vtt if caption.text)
|
||||
except (webvtt.errors.MalformedFileError, UnicodeDecodeError, ValueError) as e:
|
||||
logger.warning(f"Failed to parse WebVTT content: {e}", exc_info=e)
|
||||
return ""
|
||||
except AttributeError as e:
|
||||
logger.warning(f"WebVTT parsing error - unexpected format: {e}", exc_info=e)
|
||||
return ""
|
||||
text_lower = text.lower()
|
||||
query_lower = query.lower()
|
||||
start = 0
|
||||
prev_start = start
|
||||
while (pos := text_lower.find(query_lower, start)) != -1:
|
||||
yield pos
|
||||
start = pos + len(query_lower)
|
||||
if start <= prev_start:
|
||||
raise ValueError("panic! find_all_matches is not incremental")
|
||||
prev_start = start
|
||||
|
||||
@staticmethod
|
||||
def _generate_snippets(
|
||||
def count_matches(text: str, query: SearchQuery) -> NonNegativeInt:
|
||||
"""Count total number of matches for a query in text."""
|
||||
ZERO = NonNegativeInt(0)
|
||||
if not text:
|
||||
logger.warning("Empty text for search query in count_matches")
|
||||
return ZERO
|
||||
assert query is not None
|
||||
return NonNegativeInt(
|
||||
sum(1 for _ in SnippetGenerator.find_all_matches(text, query))
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_snippet(
|
||||
text: str, match_pos: int, max_length: int = DEFAULT_SNIPPET_MAX_LENGTH
|
||||
) -> SnippetCandidate:
|
||||
"""Create a snippet from a match position."""
|
||||
snippet_start = NonNegativeInt(max(0, match_pos - SNIPPET_CONTEXT_LENGTH))
|
||||
snippet_end = min(len(text), match_pos + max_length - SNIPPET_CONTEXT_LENGTH)
|
||||
|
||||
snippet_text = text[snippet_start:snippet_end]
|
||||
|
||||
return SnippetCandidate(
|
||||
_text=snippet_text, start=snippet_start, _original_text_length=len(text)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def filter_non_overlapping(
|
||||
candidates: Iterator[SnippetCandidate],
|
||||
) -> Iterator[str]:
|
||||
"""Filter out overlapping snippets and return only display text."""
|
||||
last_end = 0
|
||||
for candidate in candidates:
|
||||
display_text = candidate.text()
|
||||
# it means that next overlapping snippets simply don't get included
|
||||
# it's fine as simplistic logic and users probably won't care much because they already have their search results just fin
|
||||
if candidate.start >= last_end and display_text:
|
||||
yield display_text
|
||||
last_end = candidate.end
|
||||
|
||||
@staticmethod
|
||||
def generate(
|
||||
text: str,
|
||||
q: SearchQuery,
|
||||
max_length: int = DEFAULT_SNIPPET_MAX_LENGTH,
|
||||
max_snippets: int = DEFAULT_MAX_SNIPPETS,
|
||||
query: SearchQuery,
|
||||
max_length: NonNegativeInt = DEFAULT_SNIPPET_MAX_LENGTH,
|
||||
max_snippets: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
||||
) -> list[str]:
|
||||
"""Generate multiple snippets around all occurrences of search term."""
|
||||
if not text or not q:
|
||||
"""Generate snippets from text."""
|
||||
assert query is not None
|
||||
if not text:
|
||||
logger.warning("Empty text for generate_snippets")
|
||||
return []
|
||||
|
||||
snippets = []
|
||||
lower_text = text.lower()
|
||||
search_lower = q.lower()
|
||||
candidates = (
|
||||
SnippetGenerator.create_snippet(text, pos, max_length)
|
||||
for pos in SnippetGenerator.find_all_matches(text, query)
|
||||
)
|
||||
filtered = SnippetGenerator.filter_non_overlapping(candidates)
|
||||
snippets = list(itertools.islice(filtered, max_snippets))
|
||||
|
||||
last_snippet_end = 0
|
||||
start_pos = 0
|
||||
|
||||
while len(snippets) < max_snippets:
|
||||
match_pos = lower_text.find(search_lower, start_pos)
|
||||
|
||||
if match_pos == -1:
|
||||
if not snippets and search_lower.split():
|
||||
first_word = search_lower.split()[0]
|
||||
match_pos = lower_text.find(first_word, start_pos)
|
||||
if match_pos == -1:
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
snippet_start = max(0, match_pos - SNIPPET_CONTEXT_LENGTH)
|
||||
snippet_end = min(
|
||||
len(text), match_pos + max_length - SNIPPET_CONTEXT_LENGTH
|
||||
)
|
||||
|
||||
if snippet_start < last_snippet_end:
|
||||
start_pos = match_pos + len(search_lower)
|
||||
continue
|
||||
|
||||
snippet = text[snippet_start:snippet_end]
|
||||
|
||||
if snippet_start > 0:
|
||||
snippet = "..." + snippet
|
||||
if snippet_end < len(text):
|
||||
snippet = snippet + "..."
|
||||
|
||||
snippet = snippet.strip()
|
||||
|
||||
if snippet:
|
||||
snippets.append(snippet)
|
||||
last_snippet_end = snippet_end
|
||||
|
||||
start_pos = match_pos + len(search_lower)
|
||||
if start_pos >= len(text):
|
||||
break
|
||||
# Fallback to first word search if no full matches
|
||||
# it's another assumption: proper snippet logic generation is quite complicated and tied to db logic, so simplification is used here
|
||||
if not snippets and " " in query:
|
||||
first_word = query.split()[0]
|
||||
return SnippetGenerator.generate(text, first_word, max_length, max_snippets)
|
||||
|
||||
return snippets
|
||||
|
||||
@staticmethod
|
||||
def from_summary(
|
||||
summary: str,
|
||||
query: SearchQuery,
|
||||
max_snippets: NonNegativeInt = LONG_SUMMARY_MAX_SNIPPETS,
|
||||
) -> list[str]:
|
||||
"""Generate snippets from summary text."""
|
||||
return SnippetGenerator.generate(summary, query, max_snippets=max_snippets)
|
||||
|
||||
@staticmethod
|
||||
def combine_sources(
|
||||
summary: NonEmptyString | None,
|
||||
webvtt: WebVTTContent | None,
|
||||
query: SearchQuery,
|
||||
max_total: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
||||
) -> tuple[list[str], NonNegativeInt]:
|
||||
"""Combine snippets from multiple sources and return total match count.
|
||||
|
||||
Returns (snippets, total_match_count) tuple.
|
||||
|
||||
snippets can be empty for real in case of e.g. title match
|
||||
"""
|
||||
|
||||
assert (
|
||||
summary is not None or webvtt is not None
|
||||
), "At least one source must be present"
|
||||
|
||||
webvtt_matches = 0
|
||||
summary_matches = 0
|
||||
|
||||
if webvtt:
|
||||
webvtt_text = WebVTTProcessor.extract_text(webvtt)
|
||||
webvtt_matches = SnippetGenerator.count_matches(webvtt_text, query)
|
||||
|
||||
if summary:
|
||||
summary_matches = SnippetGenerator.count_matches(summary, query)
|
||||
|
||||
total_matches = NonNegativeInt(webvtt_matches + summary_matches)
|
||||
|
||||
summary_snippets = (
|
||||
SnippetGenerator.from_summary(summary, query) if summary else []
|
||||
)
|
||||
|
||||
if len(summary_snippets) >= max_total:
|
||||
return summary_snippets[:max_total], total_matches
|
||||
|
||||
remaining = max_total - len(summary_snippets)
|
||||
webvtt_snippets = (
|
||||
WebVTTProcessor.generate_snippets(webvtt, query, remaining)
|
||||
if webvtt
|
||||
else []
|
||||
)
|
||||
|
||||
return summary_snippets + webvtt_snippets, total_matches
|
||||
|
||||
|
||||
class SearchController:
|
||||
"""Controller for search operations across different entities."""
|
||||
|
||||
@classmethod
|
||||
async def search_transcripts(
|
||||
cls, params: SearchParameters
|
||||
@@ -172,39 +344,72 @@ class SearchController:
|
||||
)
|
||||
return [], 0
|
||||
|
||||
search_query = sqlalchemy.func.websearch_to_tsquery(
|
||||
"english", params.query_text
|
||||
base_columns = [
|
||||
transcripts.c.id,
|
||||
transcripts.c.title,
|
||||
transcripts.c.created_at,
|
||||
transcripts.c.duration,
|
||||
transcripts.c.status,
|
||||
transcripts.c.user_id,
|
||||
transcripts.c.room_id,
|
||||
transcripts.c.source_kind,
|
||||
transcripts.c.webvtt,
|
||||
transcripts.c.long_summary,
|
||||
sqlalchemy.case(
|
||||
(
|
||||
transcripts.c.room_id.isnot(None) & rooms.c.id.is_(None),
|
||||
"Deleted Room",
|
||||
),
|
||||
else_=rooms.c.name,
|
||||
).label("room_name"),
|
||||
]
|
||||
search_query = None
|
||||
if params.query_text is not None:
|
||||
search_query = sqlalchemy.func.websearch_to_tsquery(
|
||||
"english", params.query_text
|
||||
)
|
||||
rank_column = sqlalchemy.func.ts_rank(
|
||||
transcripts.c.search_vector_en,
|
||||
search_query,
|
||||
32, # normalization flag: rank/(rank+1) for 0-1 range
|
||||
).label("rank")
|
||||
else:
|
||||
rank_column = sqlalchemy.cast(1.0, sqlalchemy.Float).label("rank")
|
||||
|
||||
columns = base_columns + [rank_column]
|
||||
base_query = sqlalchemy.select(columns).select_from(
|
||||
transcripts.join(rooms, transcripts.c.room_id == rooms.c.id, isouter=True)
|
||||
)
|
||||
|
||||
base_query = sqlalchemy.select(
|
||||
[
|
||||
transcripts.c.id,
|
||||
transcripts.c.title,
|
||||
transcripts.c.created_at,
|
||||
transcripts.c.duration,
|
||||
transcripts.c.status,
|
||||
transcripts.c.user_id,
|
||||
transcripts.c.room_id,
|
||||
transcripts.c.source_kind,
|
||||
transcripts.c.webvtt,
|
||||
sqlalchemy.func.ts_rank(
|
||||
transcripts.c.search_vector_en,
|
||||
search_query,
|
||||
32, # normalization flag: rank/(rank+1) for 0-1 range
|
||||
).label("rank"),
|
||||
]
|
||||
).where(transcripts.c.search_vector_en.op("@@")(search_query))
|
||||
if params.query_text is not None:
|
||||
# because already initialized based on params.query_text presence above
|
||||
assert search_query is not None
|
||||
base_query = base_query.where(
|
||||
transcripts.c.search_vector_en.op("@@")(search_query)
|
||||
)
|
||||
|
||||
if params.user_id:
|
||||
base_query = base_query.where(transcripts.c.user_id == params.user_id)
|
||||
base_query = base_query.where(
|
||||
sqlalchemy.or_(
|
||||
transcripts.c.user_id == params.user_id, rooms.c.is_shared
|
||||
)
|
||||
)
|
||||
else:
|
||||
base_query = base_query.where(rooms.c.is_shared)
|
||||
if params.room_id:
|
||||
base_query = base_query.where(transcripts.c.room_id == params.room_id)
|
||||
if params.source_kind:
|
||||
base_query = base_query.where(
|
||||
transcripts.c.source_kind == params.source_kind
|
||||
)
|
||||
|
||||
if params.query_text is not None:
|
||||
order_by = sqlalchemy.desc(sqlalchemy.text("rank"))
|
||||
else:
|
||||
order_by = sqlalchemy.desc(transcripts.c.created_at)
|
||||
|
||||
query = base_query.order_by(order_by).limit(params.limit).offset(params.offset)
|
||||
|
||||
query = (
|
||||
base_query.order_by(sqlalchemy.desc(sqlalchemy.text("rank")))
|
||||
.limit(params.limit)
|
||||
.offset(params.offset)
|
||||
)
|
||||
rs = await get_database().fetch_all(query)
|
||||
|
||||
count_query = sqlalchemy.select([sqlalchemy.func.count()]).select_from(
|
||||
@@ -212,20 +417,52 @@ class SearchController:
|
||||
)
|
||||
total = await get_database().fetch_val(count_query)
|
||||
|
||||
def _process_result(r) -> SearchResult:
|
||||
def _process_result(r: DbRecord) -> SearchResult:
|
||||
r_dict: Dict[str, Any] = dict(r)
|
||||
webvtt: str | None = r_dict.pop("webvtt", None)
|
||||
|
||||
webvtt_raw: str | None = r_dict.pop("webvtt", None)
|
||||
webvtt: WebVTTContent | None
|
||||
if webvtt_raw:
|
||||
webvtt = WebVTTProcessor.parse(webvtt_raw)
|
||||
else:
|
||||
webvtt = None
|
||||
|
||||
long_summary_r: str | None = r_dict.pop("long_summary", None)
|
||||
long_summary: NonEmptyString = try_parse_non_empty_string(long_summary_r)
|
||||
room_name: str | None = r_dict.pop("room_name", None)
|
||||
db_result = SearchResultDB.model_validate(r_dict)
|
||||
|
||||
snippets = []
|
||||
if webvtt:
|
||||
plain_text = cls._extract_webvtt_text(webvtt)
|
||||
snippets = cls._generate_snippets(plain_text, params.query_text)
|
||||
at_least_one_source = webvtt is not None or long_summary is not None
|
||||
has_query = params.query_text is not None
|
||||
snippets, total_match_count = (
|
||||
SnippetGenerator.combine_sources(
|
||||
long_summary, webvtt, params.query_text, DEFAULT_MAX_SNIPPETS
|
||||
)
|
||||
if has_query and at_least_one_source
|
||||
else ([], 0)
|
||||
)
|
||||
|
||||
return SearchResult(**db_result.model_dump(), search_snippets=snippets)
|
||||
return SearchResult(
|
||||
**db_result.model_dump(),
|
||||
room_name=room_name,
|
||||
search_snippets=snippets,
|
||||
total_match_count=total_match_count,
|
||||
)
|
||||
|
||||
try:
|
||||
results = [_process_result(r) for r in rs]
|
||||
except ValidationError as e:
|
||||
logger.error(f"Invalid search result data: {e}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=500, detail="Internal search result data consistency error"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing search results: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
results = [_process_result(r) for r in rs]
|
||||
return results, total
|
||||
|
||||
|
||||
search_controller = SearchController()
|
||||
webvtt_processor = WebVTTProcessor()
|
||||
snippet_generator = SnippetGenerator()
|
||||
|
||||
@@ -88,6 +88,8 @@ transcripts = sqlalchemy.Table(
|
||||
sqlalchemy.Index("idx_transcript_created_at", "created_at"),
|
||||
sqlalchemy.Index("idx_transcript_user_id_recording_id", "user_id", "recording_id"),
|
||||
sqlalchemy.Index("idx_transcript_room_id", "room_id"),
|
||||
sqlalchemy.Index("idx_transcript_source_kind", "source_kind"),
|
||||
sqlalchemy.Index("idx_transcript_room_id_created_at", "room_id", "created_at"),
|
||||
)
|
||||
|
||||
# Add PostgreSQL-specific full-text search column
|
||||
@@ -99,7 +101,8 @@ if is_postgresql():
|
||||
TSVECTOR,
|
||||
sqlalchemy.Computed(
|
||||
"setweight(to_tsvector('english', coalesce(title, '')), 'A') || "
|
||||
"setweight(to_tsvector('english', coalesce(webvtt, '')), 'B')",
|
||||
"setweight(to_tsvector('english', coalesce(long_summary, '')), 'B') || "
|
||||
"setweight(to_tsvector('english', coalesce(webvtt, '')), 'C')",
|
||||
persisted=True,
|
||||
),
|
||||
)
|
||||
@@ -119,6 +122,15 @@ def generate_transcript_name() -> str:
|
||||
return f"Transcript {now.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
|
||||
|
||||
TranscriptStatus = Literal[
|
||||
"idle", "uploaded", "recording", "processing", "error", "ended"
|
||||
]
|
||||
|
||||
|
||||
class StrValue(BaseModel):
|
||||
value: str
|
||||
|
||||
|
||||
class AudioWaveform(BaseModel):
|
||||
data: list[float]
|
||||
|
||||
@@ -182,7 +194,7 @@ class Transcript(BaseModel):
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
user_id: str | None = None
|
||||
name: str = Field(default_factory=generate_transcript_name)
|
||||
status: str = "idle"
|
||||
status: TranscriptStatus = "idle"
|
||||
duration: float = 0
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
title: str | None = None
|
||||
@@ -729,5 +741,27 @@ class TranscriptController:
|
||||
transcript.delete_participant(participant_id)
|
||||
await self.update(transcript, {"participants": transcript.participants_dump()})
|
||||
|
||||
async def set_status(
|
||||
self, transcript_id: str, status: TranscriptStatus
|
||||
) -> TranscriptEvent | None:
|
||||
"""
|
||||
Update the status of a transcript
|
||||
|
||||
Will add an event STATUS + update the status field of transcript
|
||||
"""
|
||||
async with self.transaction():
|
||||
transcript = await self.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
raise Exception(f"Transcript {transcript_id} not found")
|
||||
if transcript.status == status:
|
||||
return
|
||||
resp = await self.append_event(
|
||||
transcript=transcript,
|
||||
event="STATUS",
|
||||
data=StrValue(value=status),
|
||||
)
|
||||
await self.update(transcript, {"status": status})
|
||||
return resp
|
||||
|
||||
|
||||
transcripts_controller = TranscriptController()
|
||||
|
||||
439
server/reflector/pipelines/main_file_pipeline.py
Normal file
439
server/reflector/pipelines/main_file_pipeline.py
Normal file
@@ -0,0 +1,439 @@
|
||||
"""
|
||||
File-based processing pipeline
|
||||
==============================
|
||||
|
||||
Optimized pipeline for processing complete audio/video files.
|
||||
Uses parallel processing for transcription, diarization, and waveform generation.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
import av
|
||||
import structlog
|
||||
from celery import chain, shared_task
|
||||
|
||||
from reflector.asynctask import asynctask
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import (
|
||||
SourceKind,
|
||||
Transcript,
|
||||
TranscriptStatus,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines.main_live_pipeline import (
|
||||
PipelineMainBase,
|
||||
broadcast_to_sockets,
|
||||
task_cleanup_consent,
|
||||
task_pipeline_post_to_zulip,
|
||||
)
|
||||
from reflector.processors import (
|
||||
AudioFileWriterProcessor,
|
||||
TranscriptFinalSummaryProcessor,
|
||||
TranscriptFinalTitleProcessor,
|
||||
TranscriptTopicDetectorProcessor,
|
||||
)
|
||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||
from reflector.processors.file_diarization import FileDiarizationInput
|
||||
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
|
||||
from reflector.processors.file_transcript import FileTranscriptInput
|
||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||
from reflector.processors.transcript_diarization_assembler import (
|
||||
TranscriptDiarizationAssemblerInput,
|
||||
TranscriptDiarizationAssemblerProcessor,
|
||||
)
|
||||
from reflector.processors.types import (
|
||||
DiarizationSegment,
|
||||
TitleSummary,
|
||||
)
|
||||
from reflector.processors.types import (
|
||||
Transcript as TranscriptType,
|
||||
)
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.worker.webhook import send_transcript_webhook
|
||||
|
||||
|
||||
class EmptyPipeline:
|
||||
"""Empty pipeline for processors that need a pipeline reference"""
|
||||
|
||||
def __init__(self, logger: structlog.BoundLogger):
|
||||
self.logger = logger
|
||||
|
||||
def get_pref(self, k, d=None):
|
||||
return d
|
||||
|
||||
async def emit(self, event):
|
||||
pass
|
||||
|
||||
|
||||
class PipelineMainFile(PipelineMainBase):
|
||||
"""
|
||||
Optimized file processing pipeline.
|
||||
Processes complete audio/video files with parallel execution.
|
||||
"""
|
||||
|
||||
logger: structlog.BoundLogger = None
|
||||
empty_pipeline = None
|
||||
|
||||
def __init__(self, transcript_id: str):
|
||||
super().__init__(transcript_id=transcript_id)
|
||||
self.logger = logger.bind(transcript_id=self.transcript_id)
|
||||
self.empty_pipeline = EmptyPipeline(logger=self.logger)
|
||||
|
||||
def _handle_gather_exceptions(self, results: list, operation: str) -> None:
|
||||
"""Handle exceptions from asyncio.gather with return_exceptions=True"""
|
||||
for i, result in enumerate(results):
|
||||
if not isinstance(result, Exception):
|
||||
continue
|
||||
self.logger.error(
|
||||
f"Error in {operation} (task {i}): {result}",
|
||||
transcript_id=self.transcript_id,
|
||||
exc_info=result,
|
||||
)
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def set_status(self, transcript_id: str, status: TranscriptStatus):
|
||||
async with self.lock_transaction():
|
||||
return await transcripts_controller.set_status(transcript_id, status)
|
||||
|
||||
async def process(self, file_path: Path):
|
||||
"""Main entry point for file processing"""
|
||||
self.logger.info(f"Starting file pipeline for {file_path}")
|
||||
|
||||
transcript = await self.get_transcript()
|
||||
|
||||
# Clear transcript as we're going to regenerate everything
|
||||
async with self.transaction():
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"events": [],
|
||||
"topics": [],
|
||||
},
|
||||
)
|
||||
|
||||
# Extract audio and write to transcript location
|
||||
audio_path = await self.extract_and_write_audio(file_path, transcript)
|
||||
|
||||
# Upload for processing
|
||||
audio_url = await self.upload_audio(audio_path, transcript)
|
||||
|
||||
# Run parallel processing
|
||||
await self.run_parallel_processing(
|
||||
audio_path,
|
||||
audio_url,
|
||||
transcript.source_language,
|
||||
transcript.target_language,
|
||||
)
|
||||
|
||||
self.logger.info("File pipeline complete")
|
||||
|
||||
await transcripts_controller.set_status(transcript.id, "ended")
|
||||
|
||||
async def extract_and_write_audio(
|
||||
self, file_path: Path, transcript: Transcript
|
||||
) -> Path:
|
||||
"""Extract audio from video if needed and write to transcript location as MP3"""
|
||||
self.logger.info(f"Processing audio file: {file_path}")
|
||||
|
||||
# Check if it's already audio-only
|
||||
container = av.open(str(file_path))
|
||||
has_video = len(container.streams.video) > 0
|
||||
container.close()
|
||||
|
||||
# Use AudioFileWriterProcessor to write MP3 to transcript location
|
||||
mp3_writer = AudioFileWriterProcessor(
|
||||
path=transcript.audio_mp3_filename,
|
||||
on_duration=self.on_duration,
|
||||
)
|
||||
|
||||
# Process audio frames and write to transcript location
|
||||
input_container = av.open(str(file_path))
|
||||
for frame in input_container.decode(audio=0):
|
||||
await mp3_writer.push(frame)
|
||||
|
||||
await mp3_writer.flush()
|
||||
input_container.close()
|
||||
|
||||
if has_video:
|
||||
self.logger.info(
|
||||
f"Extracted audio from video and saved to {transcript.audio_mp3_filename}"
|
||||
)
|
||||
else:
|
||||
self.logger.info(
|
||||
f"Converted audio file and saved to {transcript.audio_mp3_filename}"
|
||||
)
|
||||
|
||||
return transcript.audio_mp3_filename
|
||||
|
||||
async def upload_audio(self, audio_path: Path, transcript: Transcript) -> str:
|
||||
"""Upload audio to storage for processing"""
|
||||
storage = get_transcripts_storage()
|
||||
|
||||
if not storage:
|
||||
raise Exception(
|
||||
"Storage backend required for file processing. Configure TRANSCRIPT_STORAGE_* settings."
|
||||
)
|
||||
|
||||
self.logger.info("Uploading audio to storage")
|
||||
|
||||
with open(audio_path, "rb") as f:
|
||||
audio_data = f.read()
|
||||
|
||||
storage_path = f"file_pipeline/{transcript.id}/audio.mp3"
|
||||
await storage.put_file(storage_path, audio_data)
|
||||
|
||||
audio_url = await storage.get_file_url(storage_path)
|
||||
|
||||
self.logger.info(f"Audio uploaded to {audio_url}")
|
||||
return audio_url
|
||||
|
||||
async def run_parallel_processing(
|
||||
self,
|
||||
audio_path: Path,
|
||||
audio_url: str,
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
):
|
||||
"""Coordinate parallel processing of transcription, diarization, and waveform"""
|
||||
self.logger.info(
|
||||
"Starting parallel processing", transcript_id=self.transcript_id
|
||||
)
|
||||
|
||||
# Phase 1: Parallel processing of independent tasks
|
||||
transcription_task = self.transcribe_file(audio_url, source_language)
|
||||
diarization_task = self.diarize_file(audio_url)
|
||||
waveform_task = self.generate_waveform(audio_path)
|
||||
|
||||
results = await asyncio.gather(
|
||||
transcription_task, diarization_task, waveform_task, return_exceptions=True
|
||||
)
|
||||
|
||||
transcript_result = results[0]
|
||||
diarization_result = results[1]
|
||||
|
||||
# Handle errors - raise any exception that occurred
|
||||
self._handle_gather_exceptions(results, "parallel processing")
|
||||
for result in results:
|
||||
if isinstance(result, Exception):
|
||||
raise result
|
||||
|
||||
# Phase 2: Assemble transcript with diarization
|
||||
self.logger.info(
|
||||
"Assembling transcript with diarization", transcript_id=self.transcript_id
|
||||
)
|
||||
processor = TranscriptDiarizationAssemblerProcessor()
|
||||
input_data = TranscriptDiarizationAssemblerInput(
|
||||
transcript=transcript_result, diarization=diarization_result or []
|
||||
)
|
||||
|
||||
# Store result for retrieval
|
||||
diarized_transcript: Transcript | None = None
|
||||
|
||||
async def capture_result(transcript):
|
||||
nonlocal diarized_transcript
|
||||
diarized_transcript = transcript
|
||||
|
||||
processor.on(capture_result)
|
||||
await processor.push(input_data)
|
||||
await processor.flush()
|
||||
|
||||
if not diarized_transcript:
|
||||
raise ValueError("No diarized transcript captured")
|
||||
|
||||
# Phase 3: Generate topics from diarized transcript
|
||||
self.logger.info("Generating topics", transcript_id=self.transcript_id)
|
||||
topics = await self.detect_topics(diarized_transcript, target_language)
|
||||
|
||||
# Phase 4: Generate title and summaries in parallel
|
||||
self.logger.info(
|
||||
"Generating title and summaries", transcript_id=self.transcript_id
|
||||
)
|
||||
results = await asyncio.gather(
|
||||
self.generate_title(topics),
|
||||
self.generate_summaries(topics),
|
||||
return_exceptions=True,
|
||||
)
|
||||
|
||||
self._handle_gather_exceptions(results, "title and summary generation")
|
||||
|
||||
async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType:
|
||||
"""Transcribe complete file"""
|
||||
processor = FileTranscriptAutoProcessor()
|
||||
input_data = FileTranscriptInput(audio_url=audio_url, language=language)
|
||||
|
||||
# Store result for retrieval
|
||||
result: TranscriptType | None = None
|
||||
|
||||
async def capture_result(transcript):
|
||||
nonlocal result
|
||||
result = transcript
|
||||
|
||||
processor.on(capture_result)
|
||||
await processor.push(input_data)
|
||||
await processor.flush()
|
||||
|
||||
if not result:
|
||||
raise ValueError("No transcript captured")
|
||||
|
||||
return result
|
||||
|
||||
async def diarize_file(self, audio_url: str) -> list[DiarizationSegment] | None:
|
||||
"""Get diarization for file"""
|
||||
if not settings.DIARIZATION_BACKEND:
|
||||
self.logger.info("Diarization disabled")
|
||||
return None
|
||||
|
||||
processor = FileDiarizationAutoProcessor()
|
||||
input_data = FileDiarizationInput(audio_url=audio_url)
|
||||
|
||||
# Store result for retrieval
|
||||
result = None
|
||||
|
||||
async def capture_result(diarization_output):
|
||||
nonlocal result
|
||||
result = diarization_output.diarization
|
||||
|
||||
try:
|
||||
processor.on(capture_result)
|
||||
await processor.push(input_data)
|
||||
await processor.flush()
|
||||
return result
|
||||
except Exception as e:
|
||||
self.logger.error(f"Diarization failed: {e}")
|
||||
return None
|
||||
|
||||
async def generate_waveform(self, audio_path: Path):
|
||||
"""Generate and save waveform"""
|
||||
transcript = await self.get_transcript()
|
||||
|
||||
processor = AudioWaveformProcessor(
|
||||
audio_path=audio_path,
|
||||
waveform_path=transcript.audio_waveform_filename,
|
||||
on_waveform=self.on_waveform,
|
||||
)
|
||||
processor.set_pipeline(self.empty_pipeline)
|
||||
|
||||
await processor.flush()
|
||||
|
||||
async def detect_topics(
|
||||
self, transcript: TranscriptType, target_language: str
|
||||
) -> list[TitleSummary]:
|
||||
"""Detect topics from complete transcript"""
|
||||
chunk_size = 300
|
||||
topics: list[TitleSummary] = []
|
||||
|
||||
async def on_topic(topic: TitleSummary):
|
||||
topics.append(topic)
|
||||
return await self.on_topic(topic)
|
||||
|
||||
topic_detector = TranscriptTopicDetectorProcessor(callback=on_topic)
|
||||
topic_detector.set_pipeline(self.empty_pipeline)
|
||||
|
||||
for i in range(0, len(transcript.words), chunk_size):
|
||||
chunk_words = transcript.words[i : i + chunk_size]
|
||||
if not chunk_words:
|
||||
continue
|
||||
|
||||
chunk_transcript = TranscriptType(
|
||||
words=chunk_words, translation=transcript.translation
|
||||
)
|
||||
|
||||
await topic_detector.push(chunk_transcript)
|
||||
|
||||
await topic_detector.flush()
|
||||
return topics
|
||||
|
||||
async def generate_title(self, topics: list[TitleSummary]):
|
||||
"""Generate title from topics"""
|
||||
if not topics:
|
||||
self.logger.warning("No topics for title generation")
|
||||
return
|
||||
|
||||
processor = TranscriptFinalTitleProcessor(callback=self.on_title)
|
||||
processor.set_pipeline(self.empty_pipeline)
|
||||
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
|
||||
async def generate_summaries(self, topics: list[TitleSummary]):
|
||||
"""Generate long and short summaries from topics"""
|
||||
if not topics:
|
||||
self.logger.warning("No topics for summary generation")
|
||||
return
|
||||
|
||||
transcript = await self.get_transcript()
|
||||
processor = TranscriptFinalSummaryProcessor(
|
||||
transcript=transcript,
|
||||
callback=self.on_long_summary,
|
||||
on_short_summary=self.on_short_summary,
|
||||
)
|
||||
processor.set_pipeline(self.empty_pipeline)
|
||||
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_send_webhook_if_needed(*, transcript_id: str):
|
||||
"""Send webhook if this is a room recording with webhook configured"""
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
return
|
||||
|
||||
if transcript.source_kind == SourceKind.ROOM and transcript.room_id:
|
||||
room = await rooms_controller.get_by_id(transcript.room_id)
|
||||
if room and room.webhook_url:
|
||||
logger.info(
|
||||
"Dispatching webhook",
|
||||
transcript_id=transcript_id,
|
||||
room_id=room.id,
|
||||
webhook_url=room.webhook_url,
|
||||
)
|
||||
send_transcript_webhook.delay(
|
||||
transcript_id, room.id, event_id=uuid.uuid4().hex
|
||||
)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_file_process(*, transcript_id: str):
|
||||
"""Celery task for file pipeline processing"""
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
raise Exception(f"Transcript {transcript_id} not found")
|
||||
|
||||
pipeline = PipelineMainFile(transcript_id=transcript_id)
|
||||
try:
|
||||
await pipeline.set_status(transcript_id, "processing")
|
||||
|
||||
# Find the file to process
|
||||
audio_file = next(transcript.data_path.glob("upload.*"), None)
|
||||
if not audio_file:
|
||||
audio_file = next(transcript.data_path.glob("audio.*"), None)
|
||||
|
||||
if not audio_file:
|
||||
raise Exception("No audio file found to process")
|
||||
|
||||
await pipeline.process(audio_file)
|
||||
|
||||
except Exception:
|
||||
await pipeline.set_status(transcript_id, "error")
|
||||
raise
|
||||
|
||||
# Run post-processing chain: consent cleanup -> zulip -> webhook
|
||||
post_chain = chain(
|
||||
task_cleanup_consent.si(transcript_id=transcript_id),
|
||||
task_pipeline_post_to_zulip.si(transcript_id=transcript_id),
|
||||
task_send_webhook_if_needed.si(transcript_id=transcript_id),
|
||||
)
|
||||
post_chain.delay()
|
||||
@@ -22,7 +22,7 @@ from celery import chord, current_task, group, shared_task
|
||||
from pydantic import BaseModel
|
||||
from structlog import BoundLogger as Logger
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.asynctask import asynctask
|
||||
from reflector.db.meetings import meeting_consent_controller, meetings_controller
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
@@ -32,6 +32,7 @@ from reflector.db.transcripts import (
|
||||
TranscriptFinalLongSummary,
|
||||
TranscriptFinalShortSummary,
|
||||
TranscriptFinalTitle,
|
||||
TranscriptStatus,
|
||||
TranscriptText,
|
||||
TranscriptTopic,
|
||||
TranscriptWaveform,
|
||||
@@ -40,8 +41,9 @@ from reflector.db.transcripts import (
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines.runner import PipelineMessage, PipelineRunner
|
||||
from reflector.processors import (
|
||||
AudioChunkerProcessor,
|
||||
AudioChunkerAutoProcessor,
|
||||
AudioDiarizationAutoProcessor,
|
||||
AudioDownscaleProcessor,
|
||||
AudioFileWriterProcessor,
|
||||
AudioMergeProcessor,
|
||||
AudioTranscriptAutoProcessor,
|
||||
@@ -68,29 +70,6 @@ from reflector.zulip import (
|
||||
)
|
||||
|
||||
|
||||
def asynctask(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
async def run_with_db():
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
return await f(*args, **kwargs)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
coro = run_with_db()
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
loop = None
|
||||
if loop and loop.is_running():
|
||||
return loop.run_until_complete(coro)
|
||||
return asyncio.run(coro)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def broadcast_to_sockets(func):
|
||||
"""
|
||||
Decorator to broadcast transcript event to websockets
|
||||
@@ -147,15 +126,18 @@ class StrValue(BaseModel):
|
||||
|
||||
|
||||
class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]):
|
||||
transcript_id: str
|
||||
ws_room_id: str | None = None
|
||||
ws_manager: WebsocketManager | None = None
|
||||
|
||||
def prepare(self):
|
||||
# prepare websocket
|
||||
def __init__(self, transcript_id: str):
|
||||
super().__init__()
|
||||
self._lock = asyncio.Lock()
|
||||
self.transcript_id = transcript_id
|
||||
self.ws_room_id = f"ts:{self.transcript_id}"
|
||||
self.ws_manager = get_ws_manager()
|
||||
self._ws_manager = None
|
||||
|
||||
@property
|
||||
def ws_manager(self) -> WebsocketManager:
|
||||
if self._ws_manager is None:
|
||||
self._ws_manager = get_ws_manager()
|
||||
return self._ws_manager
|
||||
|
||||
async def get_transcript(self) -> Transcript:
|
||||
# fetch the transcript
|
||||
@@ -184,8 +166,15 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
]
|
||||
|
||||
@asynccontextmanager
|
||||
async def transaction(self):
|
||||
async def lock_transaction(self):
|
||||
# This lock is to prevent multiple processor starting adding
|
||||
# into event array at the same time
|
||||
async with self._lock:
|
||||
yield
|
||||
|
||||
@asynccontextmanager
|
||||
async def transaction(self):
|
||||
async with self.lock_transaction():
|
||||
async with transcripts_controller.transaction():
|
||||
yield
|
||||
|
||||
@@ -194,14 +183,14 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
# if it's the first part, update the status of the transcript
|
||||
# but do not set the ended status yet.
|
||||
if isinstance(self, PipelineMainLive):
|
||||
status_mapping = {
|
||||
status_mapping: dict[str, TranscriptStatus] = {
|
||||
"started": "recording",
|
||||
"push": "recording",
|
||||
"flush": "processing",
|
||||
"error": "error",
|
||||
}
|
||||
elif isinstance(self, PipelineMainFinalSummaries):
|
||||
status_mapping = {
|
||||
status_mapping: dict[str, TranscriptStatus] = {
|
||||
"push": "processing",
|
||||
"flush": "processing",
|
||||
"error": "error",
|
||||
@@ -217,22 +206,8 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
return
|
||||
|
||||
# when the status of the pipeline changes, update the transcript
|
||||
async with self.transaction():
|
||||
transcript = await self.get_transcript()
|
||||
if status == transcript.status:
|
||||
return
|
||||
resp = await transcripts_controller.append_event(
|
||||
transcript=transcript,
|
||||
event="STATUS",
|
||||
data=StrValue(value=status),
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"status": status,
|
||||
},
|
||||
)
|
||||
return resp
|
||||
async with self._lock:
|
||||
return await transcripts_controller.set_status(self.transcript_id, status)
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def on_transcript(self, data):
|
||||
@@ -355,7 +330,6 @@ class PipelineMainLive(PipelineMainBase):
|
||||
async def create(self) -> Pipeline:
|
||||
# create a context for the whole rtc transaction
|
||||
# add a customised logger to the context
|
||||
self.prepare()
|
||||
transcript = await self.get_transcript()
|
||||
|
||||
processors = [
|
||||
@@ -363,7 +337,8 @@ class PipelineMainLive(PipelineMainBase):
|
||||
path=transcript.audio_wav_filename,
|
||||
on_duration=self.on_duration,
|
||||
),
|
||||
AudioChunkerProcessor(),
|
||||
AudioDownscaleProcessor(),
|
||||
AudioChunkerAutoProcessor(),
|
||||
AudioMergeProcessor(),
|
||||
AudioTranscriptAutoProcessor.as_threaded(),
|
||||
TranscriptLinerProcessor(),
|
||||
@@ -376,6 +351,7 @@ class PipelineMainLive(PipelineMainBase):
|
||||
pipeline.set_pref("audio:target_language", transcript.target_language)
|
||||
pipeline.logger.bind(transcript_id=transcript.id)
|
||||
pipeline.logger.info("Pipeline main live created")
|
||||
pipeline.describe()
|
||||
|
||||
return pipeline
|
||||
|
||||
@@ -394,7 +370,6 @@ class PipelineMainDiarization(PipelineMainBase[AudioDiarizationInput]):
|
||||
async def create(self) -> Pipeline:
|
||||
# create a context for the whole rtc transaction
|
||||
# add a customised logger to the context
|
||||
self.prepare()
|
||||
pipeline = Pipeline(
|
||||
AudioDiarizationAutoProcessor(callback=self.on_topic),
|
||||
)
|
||||
@@ -435,8 +410,6 @@ class PipelineMainFromTopics(PipelineMainBase[TitleSummaryWithIdProcessorType]):
|
||||
raise NotImplementedError
|
||||
|
||||
async def create(self) -> Pipeline:
|
||||
self.prepare()
|
||||
|
||||
# get transcript
|
||||
self._transcript = transcript = await self.get_transcript()
|
||||
|
||||
@@ -792,7 +765,7 @@ def pipeline_post(*, transcript_id: str):
|
||||
chain_final_summaries,
|
||||
) | task_pipeline_post_to_zulip.si(transcript_id=transcript_id)
|
||||
|
||||
chain.delay()
|
||||
return chain.delay()
|
||||
|
||||
|
||||
@get_transcript
|
||||
|
||||
@@ -18,22 +18,14 @@ During its lifecycle, it will emit the following status:
|
||||
import asyncio
|
||||
from typing import Generic, TypeVar
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.processors import Pipeline
|
||||
|
||||
PipelineMessage = TypeVar("PipelineMessage")
|
||||
|
||||
|
||||
class PipelineRunner(BaseModel, Generic[PipelineMessage]):
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
status: str = "idle"
|
||||
pipeline: Pipeline | None = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
class PipelineRunner(Generic[PipelineMessage]):
|
||||
def __init__(self):
|
||||
self._task = None
|
||||
self._q_cmd = asyncio.Queue(maxsize=4096)
|
||||
self._ev_done = asyncio.Event()
|
||||
@@ -42,6 +34,8 @@ class PipelineRunner(BaseModel, Generic[PipelineMessage]):
|
||||
runner=id(self),
|
||||
runner_cls=self.__class__.__name__,
|
||||
)
|
||||
self.status = "idle"
|
||||
self.pipeline: Pipeline | None = None
|
||||
|
||||
async def create(self) -> Pipeline:
|
||||
"""
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from .audio_chunker import AudioChunkerProcessor # noqa: F401
|
||||
from .audio_chunker_auto import AudioChunkerAutoProcessor # noqa: F401
|
||||
from .audio_diarization_auto import AudioDiarizationAutoProcessor # noqa: F401
|
||||
from .audio_downscale import AudioDownscaleProcessor # noqa: F401
|
||||
from .audio_file_writer import AudioFileWriterProcessor # noqa: F401
|
||||
from .audio_merge import AudioMergeProcessor # noqa: F401
|
||||
from .audio_transcript import AudioTranscriptProcessor # noqa: F401
|
||||
@@ -11,6 +13,13 @@ from .base import ( # noqa: F401
|
||||
Processor,
|
||||
ThreadedProcessor,
|
||||
)
|
||||
from .file_diarization import FileDiarizationProcessor # noqa: F401
|
||||
from .file_diarization_auto import FileDiarizationAutoProcessor # noqa: F401
|
||||
from .file_transcript import FileTranscriptProcessor # noqa: F401
|
||||
from .file_transcript_auto import FileTranscriptAutoProcessor # noqa: F401
|
||||
from .transcript_diarization_assembler import (
|
||||
TranscriptDiarizationAssemblerProcessor, # noqa: F401
|
||||
)
|
||||
from .transcript_final_summary import TranscriptFinalSummaryProcessor # noqa: F401
|
||||
from .transcript_final_title import TranscriptFinalTitleProcessor # noqa: F401
|
||||
from .transcript_liner import TranscriptLinerProcessor # noqa: F401
|
||||
|
||||
@@ -1,28 +1,78 @@
|
||||
from typing import Optional
|
||||
|
||||
import av
|
||||
from prometheus_client import Counter, Histogram
|
||||
|
||||
from reflector.processors.base import Processor
|
||||
|
||||
|
||||
class AudioChunkerProcessor(Processor):
|
||||
"""
|
||||
Assemble audio frames into chunks
|
||||
Base class for assembling audio frames into chunks
|
||||
"""
|
||||
|
||||
INPUT_TYPE = av.AudioFrame
|
||||
OUTPUT_TYPE = list[av.AudioFrame]
|
||||
|
||||
def __init__(self, max_frames=256):
|
||||
super().__init__()
|
||||
m_chunk = Histogram(
|
||||
"audio_chunker",
|
||||
"Time spent in AudioChunker.chunk",
|
||||
["backend"],
|
||||
)
|
||||
m_chunk_call = Counter(
|
||||
"audio_chunker_call",
|
||||
"Number of calls to AudioChunker.chunk",
|
||||
["backend"],
|
||||
)
|
||||
m_chunk_success = Counter(
|
||||
"audio_chunker_success",
|
||||
"Number of successful calls to AudioChunker.chunk",
|
||||
["backend"],
|
||||
)
|
||||
m_chunk_failure = Counter(
|
||||
"audio_chunker_failure",
|
||||
"Number of failed calls to AudioChunker.chunk",
|
||||
["backend"],
|
||||
)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
name = self.__class__.__name__
|
||||
self.m_chunk = self.m_chunk.labels(name)
|
||||
self.m_chunk_call = self.m_chunk_call.labels(name)
|
||||
self.m_chunk_success = self.m_chunk_success.labels(name)
|
||||
self.m_chunk_failure = self.m_chunk_failure.labels(name)
|
||||
super().__init__(*args, **kwargs)
|
||||
self.frames: list[av.AudioFrame] = []
|
||||
self.max_frames = max_frames
|
||||
|
||||
async def _push(self, data: av.AudioFrame):
|
||||
self.frames.append(data)
|
||||
if len(self.frames) >= self.max_frames:
|
||||
await self.flush()
|
||||
"""Process incoming audio frame"""
|
||||
# Validate audio format on first frame
|
||||
if len(self.frames) == 0:
|
||||
if data.sample_rate != 16000 or len(data.layout.channels) != 1:
|
||||
raise ValueError(
|
||||
f"AudioChunkerProcessor expects 16kHz mono audio, got {data.sample_rate}Hz "
|
||||
f"with {len(data.layout.channels)} channel(s). "
|
||||
f"Use AudioDownscaleProcessor before this processor."
|
||||
)
|
||||
|
||||
try:
|
||||
self.m_chunk_call.inc()
|
||||
with self.m_chunk.time():
|
||||
result = await self._chunk(data)
|
||||
self.m_chunk_success.inc()
|
||||
if result:
|
||||
await self.emit(result)
|
||||
except Exception:
|
||||
self.m_chunk_failure.inc()
|
||||
raise
|
||||
|
||||
async def _chunk(self, data: av.AudioFrame) -> Optional[list[av.AudioFrame]]:
|
||||
"""
|
||||
Process audio frame and return chunk when ready.
|
||||
Subclasses should implement their chunking logic here.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def _flush(self):
|
||||
frames = self.frames[:]
|
||||
self.frames = []
|
||||
if frames:
|
||||
await self.emit(frames)
|
||||
"""Flush any remaining frames when processing ends"""
|
||||
raise NotImplementedError
|
||||
|
||||
32
server/reflector/processors/audio_chunker_auto.py
Normal file
32
server/reflector/processors/audio_chunker_auto.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import importlib
|
||||
|
||||
from reflector.processors.audio_chunker import AudioChunkerProcessor
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class AudioChunkerAutoProcessor(AudioChunkerProcessor):
|
||||
_registry = {}
|
||||
|
||||
@classmethod
|
||||
def register(cls, name, kclass):
|
||||
cls._registry[name] = kclass
|
||||
|
||||
def __new__(cls, name: str | None = None, **kwargs):
|
||||
if name is None:
|
||||
name = settings.AUDIO_CHUNKER_BACKEND
|
||||
if name not in cls._registry:
|
||||
module_name = f"reflector.processors.audio_chunker_{name}"
|
||||
importlib.import_module(module_name)
|
||||
|
||||
# gather specific configuration for the processor
|
||||
# search `AUDIO_CHUNKER_BACKEND_XXX_YYY`, push to constructor as `backend_xxx_yyy`
|
||||
config = {}
|
||||
name_upper = name.upper()
|
||||
settings_prefix = "AUDIO_CHUNKER_"
|
||||
config_prefix = f"{settings_prefix}{name_upper}_"
|
||||
for key, value in settings:
|
||||
if key.startswith(config_prefix):
|
||||
config_name = key[len(settings_prefix) :].lower()
|
||||
config[config_name] = value
|
||||
|
||||
return cls._registry[name](**config | kwargs)
|
||||
34
server/reflector/processors/audio_chunker_frames.py
Normal file
34
server/reflector/processors/audio_chunker_frames.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from typing import Optional
|
||||
|
||||
import av
|
||||
|
||||
from reflector.processors.audio_chunker import AudioChunkerProcessor
|
||||
from reflector.processors.audio_chunker_auto import AudioChunkerAutoProcessor
|
||||
|
||||
|
||||
class AudioChunkerFramesProcessor(AudioChunkerProcessor):
|
||||
"""
|
||||
Simple frame-based audio chunker that emits chunks after a fixed number of frames
|
||||
"""
|
||||
|
||||
def __init__(self, max_frames=256, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.max_frames = max_frames
|
||||
|
||||
async def _chunk(self, data: av.AudioFrame) -> Optional[list[av.AudioFrame]]:
|
||||
self.frames.append(data)
|
||||
if len(self.frames) >= self.max_frames:
|
||||
frames_to_emit = self.frames[:]
|
||||
self.frames = []
|
||||
return frames_to_emit
|
||||
|
||||
return None
|
||||
|
||||
async def _flush(self):
|
||||
frames = self.frames[:]
|
||||
self.frames = []
|
||||
if frames:
|
||||
await self.emit(frames)
|
||||
|
||||
|
||||
AudioChunkerAutoProcessor.register("frames", AudioChunkerFramesProcessor)
|
||||
298
server/reflector/processors/audio_chunker_silero.py
Normal file
298
server/reflector/processors/audio_chunker_silero.py
Normal file
@@ -0,0 +1,298 @@
|
||||
from typing import Optional
|
||||
|
||||
import av
|
||||
import numpy as np
|
||||
import torch
|
||||
from silero_vad import VADIterator, load_silero_vad
|
||||
|
||||
from reflector.processors.audio_chunker import AudioChunkerProcessor
|
||||
from reflector.processors.audio_chunker_auto import AudioChunkerAutoProcessor
|
||||
|
||||
|
||||
class AudioChunkerSileroProcessor(AudioChunkerProcessor):
|
||||
"""
|
||||
Assemble audio frames into chunks with VAD-based speech detection using Silero VAD
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
block_frames=256,
|
||||
max_frames=1024,
|
||||
use_onnx=True,
|
||||
min_frames=2,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.block_frames = block_frames
|
||||
self.max_frames = max_frames
|
||||
self.min_frames = min_frames
|
||||
|
||||
# Initialize Silero VAD
|
||||
self._init_vad(use_onnx)
|
||||
|
||||
def _init_vad(self, use_onnx=False):
|
||||
"""Initialize Silero VAD model"""
|
||||
try:
|
||||
torch.set_num_threads(1)
|
||||
self.vad_model = load_silero_vad(onnx=use_onnx)
|
||||
self.vad_iterator = VADIterator(self.vad_model, sampling_rate=16000)
|
||||
self.logger.info("Silero VAD initialized successfully")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to initialize Silero VAD: {e}")
|
||||
self.vad_model = None
|
||||
self.vad_iterator = None
|
||||
|
||||
async def _chunk(self, data: av.AudioFrame) -> Optional[list[av.AudioFrame]]:
|
||||
"""Process audio frame and return chunk when ready"""
|
||||
self.frames.append(data)
|
||||
|
||||
# Check for speech segments every 32 frames (~1 second)
|
||||
if len(self.frames) >= 32 and len(self.frames) % 32 == 0:
|
||||
return await self._process_block()
|
||||
|
||||
# Safety fallback - emit if we hit max frames
|
||||
elif len(self.frames) >= self.max_frames:
|
||||
self.logger.warning(
|
||||
f"AudioChunkerSileroProcessor: Reached max frames ({self.max_frames}), "
|
||||
f"emitting first {self.max_frames // 2} frames"
|
||||
)
|
||||
frames_to_emit = self.frames[: self.max_frames // 2]
|
||||
self.frames = self.frames[self.max_frames // 2 :]
|
||||
if len(frames_to_emit) >= self.min_frames:
|
||||
return frames_to_emit
|
||||
else:
|
||||
self.logger.debug(
|
||||
f"Ignoring fallback segment with {len(frames_to_emit)} frames "
|
||||
f"(< {self.min_frames} minimum)"
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
async def _process_block(self) -> Optional[list[av.AudioFrame]]:
|
||||
# Need at least 32 frames for VAD detection (~1 second)
|
||||
if len(self.frames) < 32 or self.vad_iterator is None:
|
||||
return None
|
||||
|
||||
# Processing block with current buffer size
|
||||
print(f"Processing block: {len(self.frames)} frames in buffer")
|
||||
|
||||
try:
|
||||
# Convert frames to numpy array for VAD
|
||||
audio_array = self._frames_to_numpy(self.frames)
|
||||
|
||||
if audio_array is None:
|
||||
# Fallback: emit all frames if conversion failed
|
||||
frames_to_emit = self.frames[:]
|
||||
self.frames = []
|
||||
if len(frames_to_emit) >= self.min_frames:
|
||||
return frames_to_emit
|
||||
else:
|
||||
self.logger.debug(
|
||||
f"Ignoring conversion-failed segment with {len(frames_to_emit)} frames "
|
||||
f"(< {self.min_frames} minimum)"
|
||||
)
|
||||
return None
|
||||
|
||||
# Find complete speech segments in the buffer
|
||||
speech_end_frame = self._find_speech_segment_end(audio_array)
|
||||
|
||||
if speech_end_frame is None or speech_end_frame <= 0:
|
||||
# No speech found but buffer is getting large
|
||||
if len(self.frames) > 512:
|
||||
# Check if it's all silence and can be discarded
|
||||
# No speech segment found, buffer at {len(self.frames)} frames
|
||||
|
||||
# Could emit silence or discard old frames here
|
||||
# For now, keep first 256 frames and discard older silence
|
||||
if len(self.frames) > 768:
|
||||
self.logger.debug(
|
||||
f"Discarding {len(self.frames) - 256} old frames (likely silence)"
|
||||
)
|
||||
self.frames = self.frames[-256:]
|
||||
return None
|
||||
|
||||
# Calculate segment timing information
|
||||
frames_to_emit = self.frames[:speech_end_frame]
|
||||
|
||||
# Get timing from av.AudioFrame
|
||||
if frames_to_emit:
|
||||
first_frame = frames_to_emit[0]
|
||||
last_frame = frames_to_emit[-1]
|
||||
sample_rate = first_frame.sample_rate
|
||||
|
||||
# Calculate duration
|
||||
total_samples = sum(f.samples for f in frames_to_emit)
|
||||
duration_seconds = total_samples / sample_rate if sample_rate > 0 else 0
|
||||
|
||||
# Get timestamps if available
|
||||
start_time = (
|
||||
first_frame.pts * first_frame.time_base if first_frame.pts else 0
|
||||
)
|
||||
end_time = (
|
||||
last_frame.pts * last_frame.time_base if last_frame.pts else 0
|
||||
)
|
||||
|
||||
# Convert to HH:MM:SS format for logging
|
||||
def format_time(seconds):
|
||||
if not seconds:
|
||||
return "00:00:00"
|
||||
total_seconds = int(float(seconds))
|
||||
hours = total_seconds // 3600
|
||||
minutes = (total_seconds % 3600) // 60
|
||||
secs = total_seconds % 60
|
||||
return f"{hours:02d}:{minutes:02d}:{secs:02d}"
|
||||
|
||||
start_formatted = format_time(start_time)
|
||||
end_formatted = format_time(end_time)
|
||||
|
||||
# Keep remaining frames for next processing
|
||||
remaining_after = len(self.frames) - speech_end_frame
|
||||
|
||||
# Single structured log line
|
||||
self.logger.info(
|
||||
"Speech segment found",
|
||||
start=start_formatted,
|
||||
end=end_formatted,
|
||||
frames=speech_end_frame,
|
||||
duration=round(duration_seconds, 2),
|
||||
buffer_before=len(self.frames),
|
||||
remaining=remaining_after,
|
||||
)
|
||||
|
||||
# Keep remaining frames for next processing
|
||||
self.frames = self.frames[speech_end_frame:]
|
||||
|
||||
# Filter out segments with too few frames
|
||||
if len(frames_to_emit) >= self.min_frames:
|
||||
return frames_to_emit
|
||||
else:
|
||||
self.logger.debug(
|
||||
f"Ignoring segment with {len(frames_to_emit)} frames "
|
||||
f"(< {self.min_frames} minimum)"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error in VAD processing: {e}")
|
||||
# Fallback to simple chunking
|
||||
if len(self.frames) >= self.block_frames:
|
||||
frames_to_emit = self.frames[: self.block_frames]
|
||||
self.frames = self.frames[self.block_frames :]
|
||||
if len(frames_to_emit) >= self.min_frames:
|
||||
return frames_to_emit
|
||||
else:
|
||||
self.logger.debug(
|
||||
f"Ignoring exception-fallback segment with {len(frames_to_emit)} frames "
|
||||
f"(< {self.min_frames} minimum)"
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def _frames_to_numpy(self, frames: list[av.AudioFrame]) -> Optional[np.ndarray]:
|
||||
"""Convert av.AudioFrame list to numpy array for VAD processing"""
|
||||
if not frames:
|
||||
return None
|
||||
|
||||
try:
|
||||
audio_data = []
|
||||
for frame in frames:
|
||||
frame_array = frame.to_ndarray()
|
||||
|
||||
if len(frame_array.shape) == 2:
|
||||
frame_array = frame_array.flatten()
|
||||
|
||||
audio_data.append(frame_array)
|
||||
|
||||
if not audio_data:
|
||||
return None
|
||||
|
||||
combined_audio = np.concatenate(audio_data)
|
||||
|
||||
# Ensure float32 format
|
||||
if combined_audio.dtype == np.int16:
|
||||
# Normalize int16 audio to float32 in range [-1.0, 1.0]
|
||||
combined_audio = combined_audio.astype(np.float32) / 32768.0
|
||||
elif combined_audio.dtype != np.float32:
|
||||
combined_audio = combined_audio.astype(np.float32)
|
||||
|
||||
return combined_audio
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error converting frames to numpy: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def _find_speech_segment_end(self, audio_array: np.ndarray) -> Optional[int]:
|
||||
"""Find complete speech segments and return frame index at segment end"""
|
||||
if self.vad_iterator is None or len(audio_array) == 0:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Process audio in 512-sample windows for VAD
|
||||
window_size = 512
|
||||
min_silence_windows = 3 # Require 3 windows of silence after speech
|
||||
|
||||
# Track speech state
|
||||
in_speech = False
|
||||
speech_start = None
|
||||
speech_end = None
|
||||
silence_count = 0
|
||||
|
||||
for i in range(0, len(audio_array), window_size):
|
||||
chunk = audio_array[i : i + window_size]
|
||||
if len(chunk) < window_size:
|
||||
chunk = np.pad(chunk, (0, window_size - len(chunk)))
|
||||
|
||||
# Detect if this window has speech
|
||||
speech_dict = self.vad_iterator(chunk, return_seconds=True)
|
||||
|
||||
# VADIterator returns dict with 'start' and 'end' when speech segments are detected
|
||||
if speech_dict:
|
||||
if not in_speech:
|
||||
# Speech started
|
||||
speech_start = i
|
||||
in_speech = True
|
||||
# Debug: print(f"Speech START at sample {i}, VAD: {speech_dict}")
|
||||
silence_count = 0 # Reset silence counter
|
||||
continue
|
||||
|
||||
if not in_speech:
|
||||
continue
|
||||
|
||||
# We're in speech but found silence
|
||||
silence_count += 1
|
||||
if silence_count < min_silence_windows:
|
||||
continue
|
||||
|
||||
# Found end of speech segment
|
||||
speech_end = i - (min_silence_windows - 1) * window_size
|
||||
# Debug: print(f"Speech END at sample {speech_end}")
|
||||
|
||||
# Convert sample position to frame index
|
||||
samples_per_frame = self.frames[0].samples if self.frames else 1024
|
||||
frame_index = speech_end // samples_per_frame
|
||||
|
||||
# Ensure we don't exceed buffer
|
||||
frame_index = min(frame_index, len(self.frames))
|
||||
return frame_index
|
||||
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error finding speech segment: {e}")
|
||||
return None
|
||||
|
||||
async def _flush(self):
|
||||
frames = self.frames[:]
|
||||
self.frames = []
|
||||
if frames:
|
||||
if len(frames) >= self.min_frames:
|
||||
await self.emit(frames)
|
||||
else:
|
||||
self.logger.debug(
|
||||
f"Ignoring flush segment with {len(frames)} frames "
|
||||
f"(< {self.min_frames} minimum)"
|
||||
)
|
||||
|
||||
|
||||
AudioChunkerAutoProcessor.register("silero", AudioChunkerSileroProcessor)
|
||||
@@ -1,6 +1,7 @@
|
||||
from reflector.processors.base import Processor
|
||||
from reflector.processors.types import (
|
||||
AudioDiarizationInput,
|
||||
DiarizationSegment,
|
||||
TitleSummary,
|
||||
Word,
|
||||
)
|
||||
@@ -37,18 +38,21 @@ class AudioDiarizationProcessor(Processor):
|
||||
async def _diarize(self, data: AudioDiarizationInput):
|
||||
raise NotImplementedError
|
||||
|
||||
def assign_speaker(self, words: list[Word], diarization: list[dict]):
|
||||
self._diarization_remove_overlap(diarization)
|
||||
self._diarization_remove_segment_without_words(words, diarization)
|
||||
self._diarization_merge_same_speaker(words, diarization)
|
||||
self._diarization_assign_speaker(words, diarization)
|
||||
@classmethod
|
||||
def assign_speaker(cls, words: list[Word], diarization: list[DiarizationSegment]):
|
||||
cls._diarization_remove_overlap(diarization)
|
||||
cls._diarization_remove_segment_without_words(words, diarization)
|
||||
cls._diarization_merge_same_speaker(diarization)
|
||||
cls._diarization_assign_speaker(words, diarization)
|
||||
|
||||
def iter_words_from_topics(self, topics: TitleSummary):
|
||||
@staticmethod
|
||||
def iter_words_from_topics(topics: list[TitleSummary]):
|
||||
for topic in topics:
|
||||
for word in topic.transcript.words:
|
||||
yield word
|
||||
|
||||
def is_word_continuation(self, word_prev, word):
|
||||
@staticmethod
|
||||
def is_word_continuation(word_prev, word):
|
||||
"""
|
||||
Return True if the word is a continuation of the previous word
|
||||
by checking if the previous word is ending with a punctuation
|
||||
@@ -61,7 +65,8 @@ class AudioDiarizationProcessor(Processor):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _diarization_remove_overlap(self, diarization: list[dict]):
|
||||
@staticmethod
|
||||
def _diarization_remove_overlap(diarization: list[DiarizationSegment]):
|
||||
"""
|
||||
Remove overlap in diarization results
|
||||
|
||||
@@ -86,8 +91,9 @@ class AudioDiarizationProcessor(Processor):
|
||||
else:
|
||||
diarization_idx += 1
|
||||
|
||||
@staticmethod
|
||||
def _diarization_remove_segment_without_words(
|
||||
self, words: list[Word], diarization: list[dict]
|
||||
words: list[Word], diarization: list[DiarizationSegment]
|
||||
):
|
||||
"""
|
||||
Remove diarization segments without words
|
||||
@@ -116,9 +122,8 @@ class AudioDiarizationProcessor(Processor):
|
||||
else:
|
||||
diarization_idx += 1
|
||||
|
||||
def _diarization_merge_same_speaker(
|
||||
self, words: list[Word], diarization: list[dict]
|
||||
):
|
||||
@staticmethod
|
||||
def _diarization_merge_same_speaker(diarization: list[DiarizationSegment]):
|
||||
"""
|
||||
Merge diarization contigous segments with the same speaker
|
||||
|
||||
@@ -135,7 +140,10 @@ class AudioDiarizationProcessor(Processor):
|
||||
else:
|
||||
diarization_idx += 1
|
||||
|
||||
def _diarization_assign_speaker(self, words: list[Word], diarization: list[dict]):
|
||||
@classmethod
|
||||
def _diarization_assign_speaker(
|
||||
cls, words: list[Word], diarization: list[DiarizationSegment]
|
||||
):
|
||||
"""
|
||||
Assign speaker to words based on diarization
|
||||
|
||||
@@ -143,7 +151,7 @@ class AudioDiarizationProcessor(Processor):
|
||||
"""
|
||||
|
||||
word_idx = 0
|
||||
last_speaker = None
|
||||
last_speaker = 0
|
||||
for d in diarization:
|
||||
start = d["start"]
|
||||
end = d["end"]
|
||||
@@ -158,7 +166,7 @@ class AudioDiarizationProcessor(Processor):
|
||||
# If it's a continuation, assign with the last speaker
|
||||
is_continuation = False
|
||||
if word_idx > 0 and word_idx < len(words) - 1:
|
||||
is_continuation = self.is_word_continuation(
|
||||
is_continuation = cls.is_word_continuation(
|
||||
*words[word_idx - 1 : word_idx + 1]
|
||||
)
|
||||
if is_continuation:
|
||||
|
||||
74
server/reflector/processors/audio_diarization_pyannote.py
Normal file
74
server/reflector/processors/audio_diarization_pyannote.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import os
|
||||
|
||||
import torch
|
||||
import torchaudio
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
from reflector.processors.audio_diarization import AudioDiarizationProcessor
|
||||
from reflector.processors.audio_diarization_auto import AudioDiarizationAutoProcessor
|
||||
from reflector.processors.types import AudioDiarizationInput, DiarizationSegment
|
||||
|
||||
|
||||
class AudioDiarizationPyannoteProcessor(AudioDiarizationProcessor):
|
||||
"""Local diarization processor using pyannote.audio library"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str = "pyannote/speaker-diarization-3.1",
|
||||
pyannote_auth_token: str | None = None,
|
||||
device: str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.model_name = model_name
|
||||
self.auth_token = pyannote_auth_token or os.environ.get("HF_TOKEN")
|
||||
self.device = device
|
||||
|
||||
if device is None:
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
self.logger.info(f"Loading pyannote diarization model: {self.model_name}")
|
||||
self.diarization_pipeline = Pipeline.from_pretrained(
|
||||
self.model_name, use_auth_token=self.auth_token
|
||||
)
|
||||
self.diarization_pipeline.to(torch.device(self.device))
|
||||
self.logger.info(f"Diarization model loaded on device: {self.device}")
|
||||
|
||||
async def _diarize(self, data: AudioDiarizationInput) -> list[DiarizationSegment]:
|
||||
try:
|
||||
# Load audio file (audio_url is assumed to be a local file path)
|
||||
self.logger.info(f"Loading local audio file: {data.audio_url}")
|
||||
waveform, sample_rate = torchaudio.load(data.audio_url)
|
||||
audio_input = {"waveform": waveform, "sample_rate": sample_rate}
|
||||
self.logger.info("Running speaker diarization")
|
||||
diarization = self.diarization_pipeline(audio_input)
|
||||
|
||||
# Convert pyannote diarization output to our format
|
||||
segments = []
|
||||
for segment, _, speaker in diarization.itertracks(yield_label=True):
|
||||
# Extract speaker number from label (e.g., "SPEAKER_00" -> 0)
|
||||
speaker_id = 0
|
||||
if speaker.startswith("SPEAKER_"):
|
||||
try:
|
||||
speaker_id = int(speaker.split("_")[-1])
|
||||
except (ValueError, IndexError):
|
||||
# Fallback to hash-based ID if parsing fails
|
||||
speaker_id = hash(speaker) % 1000
|
||||
|
||||
segments.append(
|
||||
{
|
||||
"start": round(segment.start, 3),
|
||||
"end": round(segment.end, 3),
|
||||
"speaker": speaker_id,
|
||||
}
|
||||
)
|
||||
|
||||
self.logger.info(f"Diarization completed with {len(segments)} segments")
|
||||
return segments
|
||||
|
||||
except Exception as e:
|
||||
self.logger.exception(f"Diarization failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
AudioDiarizationAutoProcessor.register("pyannote", AudioDiarizationPyannoteProcessor)
|
||||
60
server/reflector/processors/audio_downscale.py
Normal file
60
server/reflector/processors/audio_downscale.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from typing import Optional
|
||||
|
||||
import av
|
||||
from av.audio.resampler import AudioResampler
|
||||
|
||||
from reflector.processors.base import Processor
|
||||
|
||||
|
||||
def copy_frame(frame: av.AudioFrame) -> av.AudioFrame:
|
||||
frame_copy = frame.from_ndarray(
|
||||
frame.to_ndarray(),
|
||||
format=frame.format.name,
|
||||
layout=frame.layout.name,
|
||||
)
|
||||
frame_copy.sample_rate = frame.sample_rate
|
||||
frame_copy.pts = frame.pts
|
||||
frame_copy.time_base = frame.time_base
|
||||
return frame_copy
|
||||
|
||||
|
||||
class AudioDownscaleProcessor(Processor):
|
||||
"""
|
||||
Downscale audio frames to 16kHz mono format
|
||||
"""
|
||||
|
||||
INPUT_TYPE = av.AudioFrame
|
||||
OUTPUT_TYPE = av.AudioFrame
|
||||
|
||||
def __init__(self, target_rate: int = 16000, target_layout: str = "mono", **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.target_rate = target_rate
|
||||
self.target_layout = target_layout
|
||||
self.resampler: Optional[AudioResampler] = None
|
||||
self.needs_resampling: Optional[bool] = None
|
||||
|
||||
async def _push(self, data: av.AudioFrame):
|
||||
if self.needs_resampling is None:
|
||||
self.needs_resampling = (
|
||||
data.sample_rate != self.target_rate
|
||||
or data.layout.name != self.target_layout
|
||||
)
|
||||
|
||||
if self.needs_resampling:
|
||||
self.resampler = AudioResampler(
|
||||
format="s16", layout=self.target_layout, rate=self.target_rate
|
||||
)
|
||||
|
||||
if not self.needs_resampling or not self.resampler:
|
||||
await self.emit(data)
|
||||
return
|
||||
|
||||
resampled_frames = self.resampler.resample(copy_frame(data))
|
||||
for resampled_frame in resampled_frames:
|
||||
await self.emit(resampled_frame)
|
||||
|
||||
async def _flush(self):
|
||||
if self.needs_resampling and self.resampler:
|
||||
final_frames = self.resampler.resample(None)
|
||||
for frame in final_frames:
|
||||
await self.emit(frame)
|
||||
@@ -16,37 +16,46 @@ class AudioMergeProcessor(Processor):
|
||||
INPUT_TYPE = list[av.AudioFrame]
|
||||
OUTPUT_TYPE = AudioFile
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
async def _push(self, data: list[av.AudioFrame]):
|
||||
if not data:
|
||||
return
|
||||
|
||||
# get audio information from first frame
|
||||
frame = data[0]
|
||||
channels = len(frame.layout.channels)
|
||||
sample_rate = frame.sample_rate
|
||||
sample_width = frame.format.bytes
|
||||
output_channels = len(frame.layout.channels)
|
||||
output_sample_rate = frame.sample_rate
|
||||
output_sample_width = frame.format.bytes
|
||||
|
||||
# create audio file
|
||||
uu = uuid4().hex
|
||||
fd = io.BytesIO()
|
||||
|
||||
# Use PyAV to write frames
|
||||
out_container = av.open(fd, "w", format="wav")
|
||||
out_stream = out_container.add_stream("pcm_s16le", rate=sample_rate)
|
||||
out_stream = out_container.add_stream("pcm_s16le", rate=output_sample_rate)
|
||||
out_stream.layout = frame.layout.name
|
||||
|
||||
for frame in data:
|
||||
for packet in out_stream.encode(frame):
|
||||
out_container.mux(packet)
|
||||
|
||||
# Flush the encoder
|
||||
for packet in out_stream.encode(None):
|
||||
out_container.mux(packet)
|
||||
out_container.close()
|
||||
|
||||
fd.seek(0)
|
||||
|
||||
# emit audio file
|
||||
audiofile = AudioFile(
|
||||
name=f"{monotonic_ns()}-{uu}.wav",
|
||||
fd=fd,
|
||||
sample_rate=sample_rate,
|
||||
channels=channels,
|
||||
sample_width=sample_width,
|
||||
sample_rate=output_sample_rate,
|
||||
channels=output_channels,
|
||||
sample_width=output_sample_width,
|
||||
timestamp=data[0].pts * data[0].time_base,
|
||||
)
|
||||
|
||||
|
||||
@@ -21,7 +21,11 @@ from reflector.settings import settings
|
||||
|
||||
|
||||
class AudioTranscriptModalProcessor(AudioTranscriptProcessor):
|
||||
def __init__(self, modal_api_key: str | None = None, **kwargs):
|
||||
def __init__(
|
||||
self,
|
||||
modal_api_key: str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
if not settings.TRANSCRIPT_URL:
|
||||
raise Exception(
|
||||
|
||||
@@ -173,6 +173,7 @@ class Processor(Emitter):
|
||||
except Exception:
|
||||
self.m_processor_failure.inc()
|
||||
self.logger.exception("Error in push")
|
||||
raise
|
||||
|
||||
async def flush(self):
|
||||
"""
|
||||
@@ -240,33 +241,45 @@ class ThreadedProcessor(Processor):
|
||||
self.INPUT_TYPE = processor.INPUT_TYPE
|
||||
self.OUTPUT_TYPE = processor.OUTPUT_TYPE
|
||||
self.executor = ThreadPoolExecutor(max_workers=max_workers)
|
||||
self.queue = asyncio.Queue()
|
||||
self.task = asyncio.get_running_loop().create_task(self.loop())
|
||||
self.queue = asyncio.Queue(maxsize=50)
|
||||
self.task: asyncio.Task | None = None
|
||||
|
||||
def set_pipeline(self, pipeline: "Pipeline"):
|
||||
super().set_pipeline(pipeline)
|
||||
self.processor.set_pipeline(pipeline)
|
||||
|
||||
async def loop(self):
|
||||
while True:
|
||||
data = await self.queue.get()
|
||||
self.m_processor_queue.set(self.queue.qsize())
|
||||
with self.m_processor_queue_in_progress.track_inprogress():
|
||||
try:
|
||||
if data is None:
|
||||
await self.processor.flush()
|
||||
break
|
||||
try:
|
||||
while True:
|
||||
data = await self.queue.get()
|
||||
self.m_processor_queue.set(self.queue.qsize())
|
||||
with self.m_processor_queue_in_progress.track_inprogress():
|
||||
try:
|
||||
await self.processor.push(data)
|
||||
except Exception:
|
||||
self.logger.error(
|
||||
f"Error in push {self.processor.__class__.__name__}"
|
||||
", continue"
|
||||
)
|
||||
finally:
|
||||
self.queue.task_done()
|
||||
if data is None:
|
||||
await self.processor.flush()
|
||||
break
|
||||
try:
|
||||
await self.processor.push(data)
|
||||
except Exception:
|
||||
self.logger.error(
|
||||
f"Error in push {self.processor.__class__.__name__}"
|
||||
", continue"
|
||||
)
|
||||
finally:
|
||||
self.queue.task_done()
|
||||
except Exception as e:
|
||||
logger.error(f"Crash in {self.__class__.__name__}: {e}", exc_info=e)
|
||||
|
||||
async def _ensure_task(self):
|
||||
if self.task is None:
|
||||
self.task = asyncio.get_running_loop().create_task(self.loop())
|
||||
|
||||
# XXX not doing a sleep here make the whole pipeline prior the thread
|
||||
# to be running without having a chance to work on the task here.
|
||||
await asyncio.sleep(0)
|
||||
|
||||
async def _push(self, data):
|
||||
await self._ensure_task()
|
||||
await self.queue.put(data)
|
||||
|
||||
async def _flush(self):
|
||||
|
||||
33
server/reflector/processors/file_diarization.py
Normal file
33
server/reflector/processors/file_diarization.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.processors.base import Processor
|
||||
from reflector.processors.types import DiarizationSegment
|
||||
|
||||
|
||||
class FileDiarizationInput(BaseModel):
|
||||
"""Input for file diarization containing audio URL"""
|
||||
|
||||
audio_url: str
|
||||
|
||||
|
||||
class FileDiarizationOutput(BaseModel):
|
||||
"""Output for file diarization containing speaker segments"""
|
||||
|
||||
diarization: list[DiarizationSegment]
|
||||
|
||||
|
||||
class FileDiarizationProcessor(Processor):
|
||||
"""
|
||||
Diarize complete audio files from URL
|
||||
"""
|
||||
|
||||
INPUT_TYPE = FileDiarizationInput
|
||||
OUTPUT_TYPE = FileDiarizationOutput
|
||||
|
||||
async def _push(self, data: FileDiarizationInput):
|
||||
result = await self._diarize(data)
|
||||
if result:
|
||||
await self.emit(result)
|
||||
|
||||
async def _diarize(self, data: FileDiarizationInput):
|
||||
raise NotImplementedError
|
||||
33
server/reflector/processors/file_diarization_auto.py
Normal file
33
server/reflector/processors/file_diarization_auto.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import importlib
|
||||
|
||||
from reflector.processors.file_diarization import FileDiarizationProcessor
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class FileDiarizationAutoProcessor(FileDiarizationProcessor):
|
||||
_registry = {}
|
||||
|
||||
@classmethod
|
||||
def register(cls, name, kclass):
|
||||
cls._registry[name] = kclass
|
||||
|
||||
def __new__(cls, name: str | None = None, **kwargs):
|
||||
if name is None:
|
||||
name = settings.DIARIZATION_BACKEND
|
||||
|
||||
if name not in cls._registry:
|
||||
module_name = f"reflector.processors.file_diarization_{name}"
|
||||
importlib.import_module(module_name)
|
||||
|
||||
# gather specific configuration for the processor
|
||||
# search `DIARIZATION_BACKEND_XXX_YYY`, push to constructor as `backend_xxx_yyy`
|
||||
config = {}
|
||||
name_upper = name.upper()
|
||||
settings_prefix = "DIARIZATION_"
|
||||
config_prefix = f"{settings_prefix}{name_upper}_"
|
||||
for key, value in settings:
|
||||
if key.startswith(config_prefix):
|
||||
config_name = key[len(settings_prefix) :].lower()
|
||||
config[config_name] = value
|
||||
|
||||
return cls._registry[name](**config | kwargs)
|
||||
58
server/reflector/processors/file_diarization_modal.py
Normal file
58
server/reflector/processors/file_diarization_modal.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""
|
||||
File diarization implementation using the GPU service from modal.com
|
||||
|
||||
API will be a POST request to DIARIZATION_URL:
|
||||
|
||||
```
|
||||
POST /diarize?audio_file_url=...×tamp=0
|
||||
Authorization: Bearer <modal_api_key>
|
||||
```
|
||||
"""
|
||||
|
||||
import httpx
|
||||
|
||||
from reflector.processors.file_diarization import (
|
||||
FileDiarizationInput,
|
||||
FileDiarizationOutput,
|
||||
FileDiarizationProcessor,
|
||||
)
|
||||
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class FileDiarizationModalProcessor(FileDiarizationProcessor):
|
||||
def __init__(self, modal_api_key: str | None = None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
if not settings.DIARIZATION_URL:
|
||||
raise Exception(
|
||||
"DIARIZATION_URL required to use FileDiarizationModalProcessor"
|
||||
)
|
||||
self.diarization_url = settings.DIARIZATION_URL + "/diarize"
|
||||
self.file_timeout = settings.DIARIZATION_FILE_TIMEOUT
|
||||
self.modal_api_key = modal_api_key
|
||||
|
||||
async def _diarize(self, data: FileDiarizationInput):
|
||||
"""Get speaker diarization for file"""
|
||||
self.logger.info(f"Starting diarization from {data.audio_url}")
|
||||
|
||||
headers = {}
|
||||
if self.modal_api_key:
|
||||
headers["Authorization"] = f"Bearer {self.modal_api_key}"
|
||||
|
||||
async with httpx.AsyncClient(timeout=self.file_timeout) as client:
|
||||
response = await client.post(
|
||||
self.diarization_url,
|
||||
headers=headers,
|
||||
params={
|
||||
"audio_file_url": data.audio_url,
|
||||
"timestamp": 0,
|
||||
},
|
||||
follow_redirects=True,
|
||||
)
|
||||
response.raise_for_status()
|
||||
diarization_data = response.json()["diarization"]
|
||||
|
||||
return FileDiarizationOutput(diarization=diarization_data)
|
||||
|
||||
|
||||
FileDiarizationAutoProcessor.register("modal", FileDiarizationModalProcessor)
|
||||
65
server/reflector/processors/file_transcript.py
Normal file
65
server/reflector/processors/file_transcript.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from prometheus_client import Counter, Histogram
|
||||
|
||||
from reflector.processors.base import Processor
|
||||
from reflector.processors.types import Transcript
|
||||
|
||||
|
||||
class FileTranscriptInput:
|
||||
"""Input for file transcription containing audio URL and language settings"""
|
||||
|
||||
def __init__(self, audio_url: str, language: str = "en"):
|
||||
self.audio_url = audio_url
|
||||
self.language = language
|
||||
|
||||
|
||||
class FileTranscriptProcessor(Processor):
|
||||
"""
|
||||
Transcript complete audio files from URL
|
||||
"""
|
||||
|
||||
INPUT_TYPE = FileTranscriptInput
|
||||
OUTPUT_TYPE = Transcript
|
||||
|
||||
m_transcript = Histogram(
|
||||
"file_transcript",
|
||||
"Time spent in FileTranscript.transcript",
|
||||
["backend"],
|
||||
)
|
||||
m_transcript_call = Counter(
|
||||
"file_transcript_call",
|
||||
"Number of calls to FileTranscript.transcript",
|
||||
["backend"],
|
||||
)
|
||||
m_transcript_success = Counter(
|
||||
"file_transcript_success",
|
||||
"Number of successful calls to FileTranscript.transcript",
|
||||
["backend"],
|
||||
)
|
||||
m_transcript_failure = Counter(
|
||||
"file_transcript_failure",
|
||||
"Number of failed calls to FileTranscript.transcript",
|
||||
["backend"],
|
||||
)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
name = self.__class__.__name__
|
||||
self.m_transcript = self.m_transcript.labels(name)
|
||||
self.m_transcript_call = self.m_transcript_call.labels(name)
|
||||
self.m_transcript_success = self.m_transcript_success.labels(name)
|
||||
self.m_transcript_failure = self.m_transcript_failure.labels(name)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
async def _push(self, data: FileTranscriptInput):
|
||||
try:
|
||||
self.m_transcript_call.inc()
|
||||
with self.m_transcript.time():
|
||||
result = await self._transcript(data)
|
||||
self.m_transcript_success.inc()
|
||||
if result:
|
||||
await self.emit(result)
|
||||
except Exception:
|
||||
self.m_transcript_failure.inc()
|
||||
raise
|
||||
|
||||
async def _transcript(self, data: FileTranscriptInput):
|
||||
raise NotImplementedError
|
||||
32
server/reflector/processors/file_transcript_auto.py
Normal file
32
server/reflector/processors/file_transcript_auto.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import importlib
|
||||
|
||||
from reflector.processors.file_transcript import FileTranscriptProcessor
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class FileTranscriptAutoProcessor(FileTranscriptProcessor):
|
||||
_registry = {}
|
||||
|
||||
@classmethod
|
||||
def register(cls, name, kclass):
|
||||
cls._registry[name] = kclass
|
||||
|
||||
def __new__(cls, name: str | None = None, **kwargs):
|
||||
if name is None:
|
||||
name = settings.TRANSCRIPT_BACKEND
|
||||
if name not in cls._registry:
|
||||
module_name = f"reflector.processors.file_transcript_{name}"
|
||||
importlib.import_module(module_name)
|
||||
|
||||
# gather specific configuration for the processor
|
||||
# search `TRANSCRIPT_BACKEND_XXX_YYY`, push to constructor as `backend_xxx_yyy`
|
||||
config = {}
|
||||
name_upper = name.upper()
|
||||
settings_prefix = "TRANSCRIPT_"
|
||||
config_prefix = f"{settings_prefix}{name_upper}_"
|
||||
for key, value in settings:
|
||||
if key.startswith(config_prefix):
|
||||
config_name = key[len(settings_prefix) :].lower()
|
||||
config[config_name] = value
|
||||
|
||||
return cls._registry[name](**config | kwargs)
|
||||
78
server/reflector/processors/file_transcript_modal.py
Normal file
78
server/reflector/processors/file_transcript_modal.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""
|
||||
File transcription implementation using the GPU service from modal.com
|
||||
|
||||
API will be a POST request to TRANSCRIPT_URL:
|
||||
|
||||
```json
|
||||
{
|
||||
"audio_file_url": "https://...",
|
||||
"language": "en",
|
||||
"model": "parakeet-tdt-0.6b-v2",
|
||||
"batch": true
|
||||
}
|
||||
```
|
||||
"""
|
||||
|
||||
import httpx
|
||||
|
||||
from reflector.processors.file_transcript import (
|
||||
FileTranscriptInput,
|
||||
FileTranscriptProcessor,
|
||||
)
|
||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||
from reflector.processors.types import Transcript, Word
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class FileTranscriptModalProcessor(FileTranscriptProcessor):
|
||||
def __init__(self, modal_api_key: str | None = None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
if not settings.TRANSCRIPT_URL:
|
||||
raise Exception(
|
||||
"TRANSCRIPT_URL required to use FileTranscriptModalProcessor"
|
||||
)
|
||||
self.transcript_url = settings.TRANSCRIPT_URL
|
||||
self.file_timeout = settings.TRANSCRIPT_FILE_TIMEOUT
|
||||
self.modal_api_key = modal_api_key
|
||||
|
||||
async def _transcript(self, data: FileTranscriptInput):
|
||||
"""Send full file to Modal for transcription"""
|
||||
url = f"{self.transcript_url}/v1/audio/transcriptions-from-url"
|
||||
|
||||
self.logger.info(f"Starting file transcription from {data.audio_url}")
|
||||
|
||||
headers = {}
|
||||
if self.modal_api_key:
|
||||
headers["Authorization"] = f"Bearer {self.modal_api_key}"
|
||||
|
||||
async with httpx.AsyncClient(timeout=self.file_timeout) as client:
|
||||
response = await client.post(
|
||||
url,
|
||||
headers=headers,
|
||||
json={
|
||||
"audio_file_url": data.audio_url,
|
||||
"language": data.language,
|
||||
"batch": True,
|
||||
},
|
||||
follow_redirects=True,
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
words = [
|
||||
Word(
|
||||
text=word_info["word"],
|
||||
start=word_info["start"],
|
||||
end=word_info["end"],
|
||||
)
|
||||
for word_info in result.get("words", [])
|
||||
]
|
||||
|
||||
# words come not in order
|
||||
words.sort(key=lambda w: w.start)
|
||||
|
||||
return Transcript(words=words)
|
||||
|
||||
|
||||
# Register with the auto processor
|
||||
FileTranscriptAutoProcessor.register("modal", FileTranscriptModalProcessor)
|
||||
@@ -0,0 +1,45 @@
|
||||
"""
|
||||
Processor to assemble transcript with diarization results
|
||||
"""
|
||||
|
||||
from reflector.processors.audio_diarization import AudioDiarizationProcessor
|
||||
from reflector.processors.base import Processor
|
||||
from reflector.processors.types import DiarizationSegment, Transcript
|
||||
|
||||
|
||||
class TranscriptDiarizationAssemblerInput:
|
||||
"""Input containing transcript and diarization data"""
|
||||
|
||||
def __init__(self, transcript: Transcript, diarization: list[DiarizationSegment]):
|
||||
self.transcript = transcript
|
||||
self.diarization = diarization
|
||||
|
||||
|
||||
class TranscriptDiarizationAssemblerProcessor(Processor):
|
||||
"""
|
||||
Assemble transcript with diarization results by applying speaker assignments
|
||||
"""
|
||||
|
||||
INPUT_TYPE = TranscriptDiarizationAssemblerInput
|
||||
OUTPUT_TYPE = Transcript
|
||||
|
||||
async def _push(self, data: TranscriptDiarizationAssemblerInput):
|
||||
result = await self._assemble(data)
|
||||
if result:
|
||||
await self.emit(result)
|
||||
|
||||
async def _assemble(self, data: TranscriptDiarizationAssemblerInput):
|
||||
"""Apply diarization to transcript words"""
|
||||
if not data.diarization:
|
||||
self.logger.info(
|
||||
"No diarization data provided, returning original transcript"
|
||||
)
|
||||
return data.transcript
|
||||
|
||||
# Reuse logic from AudioDiarizationProcessor
|
||||
processor = AudioDiarizationProcessor()
|
||||
words = data.transcript.words
|
||||
processor.assign_speaker(words, data.diarization)
|
||||
|
||||
self.logger.info(f"Applied diarization to {len(words)} words")
|
||||
return data.transcript
|
||||
@@ -2,18 +2,21 @@ import io
|
||||
import re
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import Annotated
|
||||
from typing import Annotated, TypedDict
|
||||
|
||||
from profanityfilter import ProfanityFilter
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
|
||||
from reflector.redis_cache import redis_cache
|
||||
|
||||
class DiarizationSegment(TypedDict):
|
||||
"""Type definition for diarization segment containing speaker information"""
|
||||
|
||||
start: float
|
||||
end: float
|
||||
speaker: int
|
||||
|
||||
|
||||
PUNC_RE = re.compile(r"[.;:?!…]")
|
||||
|
||||
profanity_filter = ProfanityFilter()
|
||||
profanity_filter.set_censor("*")
|
||||
|
||||
|
||||
class AudioFile(BaseModel):
|
||||
name: str
|
||||
@@ -115,21 +118,11 @@ def words_to_segments(words: list[Word]) -> list[TranscriptSegment]:
|
||||
|
||||
class Transcript(BaseModel):
|
||||
translation: str | None = None
|
||||
words: list[Word] = None
|
||||
|
||||
@property
|
||||
def raw_text(self):
|
||||
# Uncensored text
|
||||
return "".join([word.text for word in self.words])
|
||||
|
||||
@redis_cache(prefix="profanity", duration=3600 * 24 * 7)
|
||||
def _get_censored_text(self, text: str):
|
||||
return profanity_filter.censor(text).strip()
|
||||
words: list[Word] = []
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
# Censored text
|
||||
return self._get_censored_text(self.raw_text)
|
||||
return "".join([word.text for word in self.words])
|
||||
|
||||
@property
|
||||
def human_timestamp(self):
|
||||
@@ -161,12 +154,6 @@ class Transcript(BaseModel):
|
||||
word.start += offset
|
||||
word.end += offset
|
||||
|
||||
def clone(self):
|
||||
words = [
|
||||
Word(text=word.text, start=word.start, end=word.end) for word in self.words
|
||||
]
|
||||
return Transcript(text=self.text, translation=self.translation, words=words)
|
||||
|
||||
def as_segments(self) -> list[TranscriptSegment]:
|
||||
return words_to_segments(self.words)
|
||||
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
from pydantic.types import PositiveInt
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
model_config = SettingsConfigDict(
|
||||
@@ -21,11 +24,16 @@ class Settings(BaseSettings):
|
||||
# local data directory
|
||||
DATA_DIR: str = "./data"
|
||||
|
||||
# Audio Chunking
|
||||
# backends: silero, frames
|
||||
AUDIO_CHUNKER_BACKEND: str = "frames"
|
||||
|
||||
# Audio Transcription
|
||||
# backends: whisper, modal
|
||||
TRANSCRIPT_BACKEND: str = "whisper"
|
||||
TRANSCRIPT_URL: str | None = None
|
||||
TRANSCRIPT_TIMEOUT: int = 90
|
||||
TRANSCRIPT_FILE_TIMEOUT: int = 600
|
||||
|
||||
# Audio Transcription: modal backend
|
||||
TRANSCRIPT_MODAL_API_KEY: str | None = None
|
||||
@@ -66,10 +74,14 @@ class Settings(BaseSettings):
|
||||
DIARIZATION_ENABLED: bool = True
|
||||
DIARIZATION_BACKEND: str = "modal"
|
||||
DIARIZATION_URL: str | None = None
|
||||
DIARIZATION_FILE_TIMEOUT: int = 600
|
||||
|
||||
# Diarization: modal backend
|
||||
DIARIZATION_MODAL_API_KEY: str | None = None
|
||||
|
||||
# Diarization: local pyannote.audio
|
||||
DIARIZATION_PYANNOTE_AUTH_TOKEN: str | None = None
|
||||
|
||||
# Sentry
|
||||
SENTRY_DSN: str | None = None
|
||||
|
||||
@@ -81,9 +93,8 @@ class Settings(BaseSettings):
|
||||
AUTH_JWT_PUBLIC_KEY: str | None = "authentik.monadical.com_public.pem"
|
||||
AUTH_JWT_AUDIENCE: str | None = None
|
||||
|
||||
# API public mode
|
||||
# if set, all anonymous record will be public
|
||||
PUBLIC_MODE: bool = False
|
||||
PUBLIC_DATA_RETENTION_DAYS: PositiveInt = 7
|
||||
|
||||
# Min transcript length to generate topic + summary
|
||||
MIN_TRANSCRIPT_LENGTH: int = 750
|
||||
@@ -111,7 +122,7 @@ class Settings(BaseSettings):
|
||||
|
||||
# Whereby integration
|
||||
WHEREBY_API_URL: str = "https://api.whereby.dev/v1"
|
||||
WHEREBY_API_KEY: str | None = None
|
||||
WHEREBY_API_KEY: NonEmptyString | None = None
|
||||
WHEREBY_WEBHOOK_SECRET: str | None = None
|
||||
AWS_WHEREBY_ACCESS_KEY_ID: str | None = None
|
||||
AWS_WHEREBY_ACCESS_KEY_SECRET: str | None = None
|
||||
|
||||
72
server/reflector/tools/cleanup_old_data.py
Normal file
72
server/reflector/tools/cleanup_old_data.py
Normal file
@@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
Manual cleanup tool for old public data.
|
||||
Uses the same implementation as the Celery worker task.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import sys
|
||||
|
||||
import structlog
|
||||
|
||||
from reflector.settings import settings
|
||||
from reflector.worker.cleanup import _cleanup_old_public_data
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
|
||||
async def cleanup_old_data(days: int = 7):
|
||||
logger.info(
|
||||
"Starting manual cleanup",
|
||||
retention_days=days,
|
||||
public_mode=settings.PUBLIC_MODE,
|
||||
)
|
||||
|
||||
if not settings.PUBLIC_MODE:
|
||||
logger.critical(
|
||||
"WARNING: PUBLIC_MODE is False. "
|
||||
"This tool is intended for public instances only."
|
||||
)
|
||||
raise Exception("Tool intended for public instances only")
|
||||
|
||||
result = await _cleanup_old_public_data(days=days)
|
||||
|
||||
if result:
|
||||
logger.info(
|
||||
"Cleanup completed",
|
||||
transcripts_deleted=result.get("transcripts_deleted", 0),
|
||||
meetings_deleted=result.get("meetings_deleted", 0),
|
||||
recordings_deleted=result.get("recordings_deleted", 0),
|
||||
errors_count=len(result.get("errors", [])),
|
||||
)
|
||||
if result.get("errors"):
|
||||
logger.warning(
|
||||
"Errors encountered during cleanup:", errors=result["errors"][:10]
|
||||
)
|
||||
else:
|
||||
logger.info("Cleanup skipped or completed without results")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Clean up old transcripts and meetings"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--days",
|
||||
type=int,
|
||||
default=7,
|
||||
help="Number of days to keep data (default: 7)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.days < 1:
|
||||
logger.error("Days must be at least 1")
|
||||
sys.exit(1)
|
||||
|
||||
asyncio.run(cleanup_old_data(days=args.days))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,105 +1,220 @@
|
||||
"""
|
||||
Process audio file with diarization support
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal
|
||||
|
||||
import av
|
||||
|
||||
from reflector.db.transcripts import SourceKind, TranscriptTopic, transcripts_controller
|
||||
from reflector.logger import logger
|
||||
from reflector.processors import (
|
||||
AudioChunkerProcessor,
|
||||
AudioMergeProcessor,
|
||||
AudioTranscriptAutoProcessor,
|
||||
Pipeline,
|
||||
PipelineEvent,
|
||||
TranscriptFinalSummaryProcessor,
|
||||
TranscriptFinalTitleProcessor,
|
||||
TranscriptLinerProcessor,
|
||||
TranscriptTopicDetectorProcessor,
|
||||
TranscriptTranslatorAutoProcessor,
|
||||
from reflector.pipelines.main_file_pipeline import (
|
||||
task_pipeline_file_process as task_pipeline_file_process,
|
||||
)
|
||||
from reflector.pipelines.main_live_pipeline import pipeline_post as live_pipeline_post
|
||||
from reflector.pipelines.main_live_pipeline import (
|
||||
pipeline_process as live_pipeline_process,
|
||||
)
|
||||
from reflector.processors.base import BroadcastProcessor
|
||||
|
||||
|
||||
async def process_audio_file(
|
||||
filename,
|
||||
event_callback,
|
||||
only_transcript=False,
|
||||
source_language="en",
|
||||
target_language="en",
|
||||
def serialize_topics(topics: List[TranscriptTopic]) -> List[Dict[str, Any]]:
|
||||
"""Convert TranscriptTopic objects to JSON-serializable dicts"""
|
||||
serialized = []
|
||||
for topic in topics:
|
||||
topic_dict = topic.model_dump()
|
||||
serialized.append(topic_dict)
|
||||
return serialized
|
||||
|
||||
|
||||
def debug_print_speakers(serialized_topics: List[Dict[str, Any]]) -> None:
|
||||
"""Print debug info about speakers found in topics"""
|
||||
all_speakers = set()
|
||||
for topic_dict in serialized_topics:
|
||||
for word in topic_dict.get("words", []):
|
||||
all_speakers.add(word.get("speaker", 0))
|
||||
|
||||
print(
|
||||
f"Found {len(serialized_topics)} topics with speakers: {all_speakers}",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
|
||||
TranscriptId = str
|
||||
|
||||
|
||||
# common interface for every flow: it needs an Entry in db with specific ceremony (file path + status + actual file in file system)
|
||||
# ideally we want to get rid of it at some point
|
||||
async def prepare_entry(
|
||||
source_path: str,
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
) -> TranscriptId:
|
||||
file_path = Path(source_path)
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
file_path.name,
|
||||
# note that the real file upload has SourceKind: LIVE for the reason of it's an error
|
||||
source_kind=SourceKind.FILE,
|
||||
source_language=source_language,
|
||||
target_language=target_language,
|
||||
user_id=None,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Created empty transcript {transcript.id} for file {file_path.name} because technically we need an empty transcript before we start transcript"
|
||||
)
|
||||
|
||||
# pipelines expect files as upload.*
|
||||
|
||||
extension = file_path.suffix
|
||||
upload_path = transcript.data_path / f"upload{extension}"
|
||||
upload_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy2(source_path, upload_path)
|
||||
logger.info(f"Copied {source_path} to {upload_path}")
|
||||
|
||||
# pipelines expect entity status "uploaded"
|
||||
await transcripts_controller.update(transcript, {"status": "uploaded"})
|
||||
|
||||
return transcript.id
|
||||
|
||||
|
||||
# same reason as prepare_entry
|
||||
async def extract_result_from_entry(
|
||||
transcript_id: TranscriptId, output_path: str
|
||||
) -> None:
|
||||
post_final_transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
|
||||
# assert post_final_transcript.status == "ended"
|
||||
# File pipeline doesn't set status to "ended", only live pipeline does https://github.com/Monadical-SAS/reflector/issues/582
|
||||
topics = post_final_transcript.topics
|
||||
if not topics:
|
||||
raise RuntimeError(
|
||||
f"No topics found for transcript {transcript_id} after processing"
|
||||
)
|
||||
|
||||
serialized_topics = serialize_topics(topics)
|
||||
|
||||
if output_path:
|
||||
# Write to JSON file
|
||||
with open(output_path, "w") as f:
|
||||
for topic_dict in serialized_topics:
|
||||
json.dump(topic_dict, f)
|
||||
f.write("\n")
|
||||
print(f"Results written to {output_path}", file=sys.stderr)
|
||||
else:
|
||||
# Write to stdout as JSONL
|
||||
for topic_dict in serialized_topics:
|
||||
print(json.dumps(topic_dict))
|
||||
|
||||
debug_print_speakers(serialized_topics)
|
||||
|
||||
|
||||
async def process_live_pipeline(
|
||||
transcript_id: TranscriptId,
|
||||
):
|
||||
# build pipeline for audio processing
|
||||
processors = [
|
||||
AudioChunkerProcessor(),
|
||||
AudioMergeProcessor(),
|
||||
AudioTranscriptAutoProcessor.as_threaded(),
|
||||
TranscriptLinerProcessor(),
|
||||
TranscriptTranslatorAutoProcessor.as_threaded(),
|
||||
]
|
||||
if not only_transcript:
|
||||
processors += [
|
||||
TranscriptTopicDetectorProcessor.as_threaded(),
|
||||
BroadcastProcessor(
|
||||
processors=[
|
||||
TranscriptFinalTitleProcessor.as_threaded(),
|
||||
TranscriptFinalSummaryProcessor.as_threaded(),
|
||||
],
|
||||
),
|
||||
]
|
||||
"""Process transcript_id with transcription and diarization"""
|
||||
|
||||
# transcription output
|
||||
pipeline = Pipeline(*processors)
|
||||
pipeline.set_pref("audio:source_language", source_language)
|
||||
pipeline.set_pref("audio:target_language", target_language)
|
||||
pipeline.describe()
|
||||
pipeline.on(event_callback)
|
||||
print(f"Processing transcript_id {transcript_id}...", file=sys.stderr)
|
||||
await live_pipeline_process(transcript_id=transcript_id)
|
||||
print(f"Processing complete for transcript {transcript_id}", file=sys.stderr)
|
||||
|
||||
pre_final_transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
|
||||
# assert documented behaviour: after process, the pipeline isn't ended. this is the reason of calling pipeline_post
|
||||
assert pre_final_transcript.status != "ended"
|
||||
|
||||
# at this point, diarization is running but we have no access to it. run diarization in parallel - one will hopefully win after polling
|
||||
result = live_pipeline_post(transcript_id=transcript_id)
|
||||
|
||||
# result.ready() blocks even without await; it mutates result also
|
||||
while not result.ready():
|
||||
print(f"Status: {result.state}")
|
||||
time.sleep(2)
|
||||
|
||||
|
||||
async def process_file_pipeline(
|
||||
transcript_id: TranscriptId,
|
||||
):
|
||||
"""Process audio/video file using the optimized file pipeline"""
|
||||
|
||||
# task_pipeline_file_process is a Celery task, need to use .delay() for async execution
|
||||
result = task_pipeline_file_process.delay(transcript_id=transcript_id)
|
||||
|
||||
# Wait for the Celery task to complete
|
||||
while not result.ready():
|
||||
print(f"File pipeline status: {result.state}", file=sys.stderr)
|
||||
time.sleep(2)
|
||||
|
||||
logger.info("File pipeline processing complete")
|
||||
|
||||
|
||||
async def process(
|
||||
source_path: str,
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
pipeline: Literal["live", "file"],
|
||||
output_path: str = None,
|
||||
):
|
||||
from reflector.db import get_database
|
||||
|
||||
database = get_database()
|
||||
# db connect is a part of ceremony
|
||||
await database.connect()
|
||||
|
||||
# start processing audio
|
||||
logger.info(f"Opening {filename}")
|
||||
container = av.open(filename)
|
||||
try:
|
||||
logger.info("Start pushing audio into the pipeline")
|
||||
for frame in container.decode(audio=0):
|
||||
await pipeline.push(frame)
|
||||
finally:
|
||||
logger.info("Flushing the pipeline")
|
||||
await pipeline.flush()
|
||||
transcript_id = await prepare_entry(
|
||||
source_path,
|
||||
source_language,
|
||||
target_language,
|
||||
)
|
||||
|
||||
logger.info("All done !")
|
||||
pipeline_handlers = {
|
||||
"live": process_live_pipeline,
|
||||
"file": process_file_pipeline,
|
||||
}
|
||||
|
||||
handler = pipeline_handlers.get(pipeline)
|
||||
if not handler:
|
||||
raise ValueError(f"Unknown pipeline type: {pipeline}")
|
||||
|
||||
await handler(transcript_id)
|
||||
|
||||
await extract_result_from_entry(transcript_id, output_path)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Process audio files with speaker diarization"
|
||||
)
|
||||
parser.add_argument("source", help="Source file (mp3, wav, mp4...)")
|
||||
parser.add_argument("--only-transcript", "-t", action="store_true")
|
||||
parser.add_argument("--source-language", default="en")
|
||||
parser.add_argument("--target-language", default="en")
|
||||
parser.add_argument(
|
||||
"--pipeline",
|
||||
required=True,
|
||||
choices=["live", "file"],
|
||||
help="Pipeline type to use for processing (live: streaming/incremental, file: batch/parallel)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--source-language", default="en", help="Source language code (default: en)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--target-language", default="en", help="Target language code (default: en)"
|
||||
)
|
||||
parser.add_argument("--output", "-o", help="Output file (output.jsonl)")
|
||||
args = parser.parse_args()
|
||||
|
||||
output_fd = None
|
||||
if args.output:
|
||||
output_fd = open(args.output, "w")
|
||||
|
||||
async def event_callback(event: PipelineEvent):
|
||||
processor = event.processor
|
||||
# ignore some processor
|
||||
if processor in ("AudioChunkerProcessor", "AudioMergeProcessor"):
|
||||
return
|
||||
logger.info(f"Event: {event}")
|
||||
if output_fd:
|
||||
output_fd.write(event.model_dump_json())
|
||||
output_fd.write("\n")
|
||||
|
||||
asyncio.run(
|
||||
process_audio_file(
|
||||
process(
|
||||
args.source,
|
||||
event_callback,
|
||||
only_transcript=args.only_transcript,
|
||||
source_language=args.source_language,
|
||||
target_language=args.target_language,
|
||||
args.source_language,
|
||||
args.target_language,
|
||||
args.pipeline,
|
||||
args.output,
|
||||
)
|
||||
)
|
||||
|
||||
if output_fd:
|
||||
output_fd.close()
|
||||
logger.info(f"Output written to {args.output}")
|
||||
|
||||
@@ -1,315 +0,0 @@
|
||||
"""
|
||||
@vibe-generated
|
||||
Process audio file with diarization support
|
||||
===========================================
|
||||
|
||||
Extended version of process.py that includes speaker diarization.
|
||||
This tool processes audio files locally without requiring the full server infrastructure.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import tempfile
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import av
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.processors import (
|
||||
AudioChunkerProcessor,
|
||||
AudioFileWriterProcessor,
|
||||
AudioMergeProcessor,
|
||||
AudioTranscriptAutoProcessor,
|
||||
Pipeline,
|
||||
PipelineEvent,
|
||||
TranscriptFinalSummaryProcessor,
|
||||
TranscriptFinalTitleProcessor,
|
||||
TranscriptLinerProcessor,
|
||||
TranscriptTopicDetectorProcessor,
|
||||
TranscriptTranslatorAutoProcessor,
|
||||
)
|
||||
from reflector.processors.base import BroadcastProcessor, Processor
|
||||
from reflector.processors.types import (
|
||||
AudioDiarizationInput,
|
||||
TitleSummary,
|
||||
TitleSummaryWithId,
|
||||
)
|
||||
|
||||
|
||||
class TopicCollectorProcessor(Processor):
|
||||
"""Collect topics for diarization"""
|
||||
|
||||
INPUT_TYPE = TitleSummary
|
||||
OUTPUT_TYPE = TitleSummary
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.topics: List[TitleSummaryWithId] = []
|
||||
self._topic_id = 0
|
||||
|
||||
async def _push(self, data: TitleSummary):
|
||||
# Convert to TitleSummaryWithId and collect
|
||||
self._topic_id += 1
|
||||
topic_with_id = TitleSummaryWithId(
|
||||
id=str(self._topic_id),
|
||||
title=data.title,
|
||||
summary=data.summary,
|
||||
timestamp=data.timestamp,
|
||||
duration=data.duration,
|
||||
transcript=data.transcript,
|
||||
)
|
||||
self.topics.append(topic_with_id)
|
||||
|
||||
# Pass through the original topic
|
||||
await self.emit(data)
|
||||
|
||||
def get_topics(self) -> List[TitleSummaryWithId]:
|
||||
return self.topics
|
||||
|
||||
|
||||
async def process_audio_file_with_diarization(
|
||||
filename,
|
||||
event_callback,
|
||||
only_transcript=False,
|
||||
source_language="en",
|
||||
target_language="en",
|
||||
enable_diarization=True,
|
||||
diarization_backend="modal",
|
||||
):
|
||||
# Create temp file for audio if diarization is enabled
|
||||
audio_temp_path = None
|
||||
if enable_diarization:
|
||||
audio_temp_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
|
||||
audio_temp_path = audio_temp_file.name
|
||||
audio_temp_file.close()
|
||||
|
||||
# Create processor for collecting topics
|
||||
topic_collector = TopicCollectorProcessor()
|
||||
|
||||
# Build pipeline for audio processing
|
||||
processors = []
|
||||
|
||||
# Add audio file writer at the beginning if diarization is enabled
|
||||
if enable_diarization:
|
||||
processors.append(AudioFileWriterProcessor(audio_temp_path))
|
||||
|
||||
# Add the rest of the processors
|
||||
processors += [
|
||||
AudioChunkerProcessor(),
|
||||
AudioMergeProcessor(),
|
||||
AudioTranscriptAutoProcessor.as_threaded(),
|
||||
]
|
||||
|
||||
processors += [
|
||||
TranscriptLinerProcessor(),
|
||||
TranscriptTranslatorAutoProcessor.as_threaded(),
|
||||
]
|
||||
|
||||
if not only_transcript:
|
||||
processors += [
|
||||
TranscriptTopicDetectorProcessor.as_threaded(),
|
||||
# Collect topics for diarization
|
||||
topic_collector,
|
||||
BroadcastProcessor(
|
||||
processors=[
|
||||
TranscriptFinalTitleProcessor.as_threaded(),
|
||||
TranscriptFinalSummaryProcessor.as_threaded(),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
# Create main pipeline
|
||||
pipeline = Pipeline(*processors)
|
||||
pipeline.set_pref("audio:source_language", source_language)
|
||||
pipeline.set_pref("audio:target_language", target_language)
|
||||
pipeline.describe()
|
||||
pipeline.on(event_callback)
|
||||
|
||||
# Start processing audio
|
||||
logger.info(f"Opening {filename}")
|
||||
container = av.open(filename)
|
||||
try:
|
||||
logger.info("Start pushing audio into the pipeline")
|
||||
for frame in container.decode(audio=0):
|
||||
await pipeline.push(frame)
|
||||
finally:
|
||||
logger.info("Flushing the pipeline")
|
||||
await pipeline.flush()
|
||||
|
||||
# Run diarization if enabled and we have topics
|
||||
if enable_diarization and not only_transcript and audio_temp_path:
|
||||
topics = topic_collector.get_topics()
|
||||
|
||||
if topics:
|
||||
logger.info(f"Starting diarization with {len(topics)} topics")
|
||||
|
||||
try:
|
||||
from reflector.processors import AudioDiarizationAutoProcessor
|
||||
|
||||
diarization_processor = AudioDiarizationAutoProcessor(
|
||||
name=diarization_backend
|
||||
)
|
||||
|
||||
diarization_processor.set_pipeline(pipeline)
|
||||
|
||||
# For Modal backend, we need to upload the file to S3 first
|
||||
if diarization_backend == "modal":
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.utils.s3_temp_file import S3TemporaryFile
|
||||
|
||||
storage = get_transcripts_storage()
|
||||
|
||||
# Generate a unique filename in evaluation folder
|
||||
timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S")
|
||||
audio_filename = f"evaluation/diarization_temp/{timestamp}_{uuid.uuid4().hex}.wav"
|
||||
|
||||
# Use context manager for automatic cleanup
|
||||
async with S3TemporaryFile(storage, audio_filename) as s3_file:
|
||||
# Read and upload the audio file
|
||||
with open(audio_temp_path, "rb") as f:
|
||||
audio_data = f.read()
|
||||
|
||||
audio_url = await s3_file.upload(audio_data)
|
||||
logger.info(f"Uploaded audio to S3: {audio_filename}")
|
||||
|
||||
# Create diarization input with S3 URL
|
||||
diarization_input = AudioDiarizationInput(
|
||||
audio_url=audio_url, topics=topics
|
||||
)
|
||||
|
||||
# Run diarization
|
||||
await diarization_processor.push(diarization_input)
|
||||
await diarization_processor.flush()
|
||||
|
||||
logger.info("Diarization complete")
|
||||
# File will be automatically cleaned up when exiting the context
|
||||
else:
|
||||
# For local backend, use local file path
|
||||
audio_url = audio_temp_path
|
||||
|
||||
# Create diarization input
|
||||
diarization_input = AudioDiarizationInput(
|
||||
audio_url=audio_url, topics=topics
|
||||
)
|
||||
|
||||
# Run diarization
|
||||
await diarization_processor.push(diarization_input)
|
||||
await diarization_processor.flush()
|
||||
|
||||
logger.info("Diarization complete")
|
||||
|
||||
except ImportError as e:
|
||||
logger.error(f"Failed to import diarization dependencies: {e}")
|
||||
logger.error(
|
||||
"Install with: uv pip install pyannote.audio torch torchaudio"
|
||||
)
|
||||
logger.error(
|
||||
"And set HF_TOKEN environment variable for pyannote models"
|
||||
)
|
||||
raise SystemExit(1)
|
||||
except Exception as e:
|
||||
logger.error(f"Diarization failed: {e}")
|
||||
raise SystemExit(1)
|
||||
else:
|
||||
logger.warning("Skipping diarization: no topics available")
|
||||
|
||||
# Clean up temp file
|
||||
if audio_temp_path:
|
||||
try:
|
||||
Path(audio_temp_path).unlink()
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to clean up temp file {audio_temp_path}: {e}")
|
||||
|
||||
logger.info("All done!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import os
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Process audio files with optional speaker diarization"
|
||||
)
|
||||
parser.add_argument("source", help="Source file (mp3, wav, mp4...)")
|
||||
parser.add_argument(
|
||||
"--only-transcript",
|
||||
"-t",
|
||||
action="store_true",
|
||||
help="Only generate transcript without topics/summaries",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--source-language", default="en", help="Source language code (default: en)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--target-language", default="en", help="Target language code (default: en)"
|
||||
)
|
||||
parser.add_argument("--output", "-o", help="Output file (output.jsonl)")
|
||||
parser.add_argument(
|
||||
"--enable-diarization",
|
||||
"-d",
|
||||
action="store_true",
|
||||
help="Enable speaker diarization",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--diarization-backend",
|
||||
default="modal",
|
||||
choices=["modal"],
|
||||
help="Diarization backend to use (default: modal)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Set REDIS_HOST to localhost if not provided
|
||||
if "REDIS_HOST" not in os.environ:
|
||||
os.environ["REDIS_HOST"] = "localhost"
|
||||
logger.info("REDIS_HOST not set, defaulting to localhost")
|
||||
|
||||
output_fd = None
|
||||
if args.output:
|
||||
output_fd = open(args.output, "w")
|
||||
|
||||
async def event_callback(event: PipelineEvent):
|
||||
processor = event.processor
|
||||
data = event.data
|
||||
|
||||
# Ignore internal processors
|
||||
if processor in (
|
||||
"AudioChunkerProcessor",
|
||||
"AudioMergeProcessor",
|
||||
"AudioFileWriterProcessor",
|
||||
"TopicCollectorProcessor",
|
||||
"BroadcastProcessor",
|
||||
):
|
||||
return
|
||||
|
||||
# If diarization is enabled, skip the original topic events from the pipeline
|
||||
# The diarization processor will emit the same topics but with speaker info
|
||||
if processor == "TranscriptTopicDetectorProcessor" and args.enable_diarization:
|
||||
return
|
||||
|
||||
# Log all events
|
||||
logger.info(f"Event: {processor} - {type(data).__name__}")
|
||||
|
||||
# Write to output
|
||||
if output_fd:
|
||||
output_fd.write(event.model_dump_json())
|
||||
output_fd.write("\n")
|
||||
output_fd.flush()
|
||||
|
||||
asyncio.run(
|
||||
process_audio_file_with_diarization(
|
||||
args.source,
|
||||
event_callback,
|
||||
only_transcript=args.only_transcript,
|
||||
source_language=args.source_language,
|
||||
target_language=args.target_language,
|
||||
enable_diarization=args.enable_diarization,
|
||||
diarization_backend=args.diarization_backend,
|
||||
)
|
||||
)
|
||||
|
||||
if output_fd:
|
||||
output_fd.close()
|
||||
logger.info(f"Output written to {args.output}")
|
||||
@@ -53,7 +53,7 @@ async def run_single_processor(args):
|
||||
async def event_callback(event: PipelineEvent):
|
||||
processor = event.processor
|
||||
# ignore some processor
|
||||
if processor in ("AudioChunkerProcessor", "AudioMergeProcessor"):
|
||||
if processor in ("AudioChunkerAutoProcessor", "AudioMergeProcessor"):
|
||||
return
|
||||
print(f"Event: {event}")
|
||||
if output_fd:
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
@vibe-generated
|
||||
Test script for the diarization CLI tool
|
||||
=========================================
|
||||
|
||||
This script helps test the diarization functionality with sample audio files.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from reflector.logger import logger
|
||||
|
||||
|
||||
async def test_diarization(audio_file: str):
|
||||
"""Test the diarization functionality"""
|
||||
|
||||
# Import the processing function
|
||||
from process_with_diarization import process_audio_file_with_diarization
|
||||
|
||||
# Collect events
|
||||
events = []
|
||||
|
||||
async def event_callback(event):
|
||||
events.append({"processor": event.processor, "data": event.data})
|
||||
logger.info(f"Event from {event.processor}")
|
||||
|
||||
# Process the audio file
|
||||
logger.info(f"Processing audio file: {audio_file}")
|
||||
|
||||
try:
|
||||
await process_audio_file_with_diarization(
|
||||
audio_file,
|
||||
event_callback,
|
||||
only_transcript=False,
|
||||
source_language="en",
|
||||
target_language="en",
|
||||
enable_diarization=True,
|
||||
diarization_backend="modal",
|
||||
)
|
||||
|
||||
# Analyze results
|
||||
logger.info(f"Processing complete. Received {len(events)} events")
|
||||
|
||||
# Look for diarization results
|
||||
diarized_topics = []
|
||||
for event in events:
|
||||
if "TitleSummary" in event["processor"]:
|
||||
# Check if words have speaker information
|
||||
if hasattr(event["data"], "transcript") and event["data"].transcript:
|
||||
words = event["data"].transcript.words
|
||||
if words and hasattr(words[0], "speaker"):
|
||||
speakers = set(
|
||||
w.speaker for w in words if hasattr(w, "speaker")
|
||||
)
|
||||
logger.info(
|
||||
f"Found {len(speakers)} speakers in topic: {event['data'].title}"
|
||||
)
|
||||
diarized_topics.append(event["data"])
|
||||
|
||||
if diarized_topics:
|
||||
logger.info(f"Successfully diarized {len(diarized_topics)} topics")
|
||||
|
||||
# Print sample output
|
||||
sample_topic = diarized_topics[0]
|
||||
logger.info("Sample diarized output:")
|
||||
for i, word in enumerate(sample_topic.transcript.words[:10]):
|
||||
logger.info(f" Word {i}: '{word.text}' - Speaker {word.speaker}")
|
||||
else:
|
||||
logger.warning("No diarization results found in output")
|
||||
|
||||
return events
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error during processing: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python test_diarization.py <audio_file>")
|
||||
sys.exit(1)
|
||||
|
||||
audio_file = sys.argv[1]
|
||||
if not Path(audio_file).exists():
|
||||
print(f"Error: Audio file '{audio_file}' not found")
|
||||
sys.exit(1)
|
||||
|
||||
# Run the test
|
||||
asyncio.run(test_diarization(audio_file))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
23
server/reflector/utils/string.py
Normal file
23
server/reflector/utils/string.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from typing import Annotated
|
||||
|
||||
from pydantic import Field, TypeAdapter, constr
|
||||
|
||||
NonEmptyStringBase = constr(min_length=1, strip_whitespace=False)
|
||||
NonEmptyString = Annotated[
|
||||
NonEmptyStringBase,
|
||||
Field(description="A non-empty string", min_length=1),
|
||||
]
|
||||
non_empty_string_adapter = TypeAdapter(NonEmptyString)
|
||||
|
||||
|
||||
def parse_non_empty_string(s: str, error: str | None = None) -> NonEmptyString:
|
||||
try:
|
||||
return non_empty_string_adapter.validate_python(s)
|
||||
except Exception as e:
|
||||
raise ValueError(f"{e}: {error}" if error else e) from e
|
||||
|
||||
|
||||
def try_parse_non_empty_string(s: str) -> NonEmptyString | None:
|
||||
if not s:
|
||||
return None
|
||||
return parse_non_empty_string(s)
|
||||
@@ -15,6 +15,7 @@ from reflector.db.meetings import meetings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.settings import settings
|
||||
from reflector.whereby import create_meeting, upload_logo
|
||||
from reflector.worker.webhook import test_webhook
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -44,6 +45,11 @@ class Room(BaseModel):
|
||||
is_shared: bool
|
||||
|
||||
|
||||
class RoomDetails(Room):
|
||||
webhook_url: str | None
|
||||
webhook_secret: str | None
|
||||
|
||||
|
||||
class Meeting(BaseModel):
|
||||
id: str
|
||||
room_name: str
|
||||
@@ -64,6 +70,8 @@ class CreateRoom(BaseModel):
|
||||
recording_type: str
|
||||
recording_trigger: str
|
||||
is_shared: bool
|
||||
webhook_url: str
|
||||
webhook_secret: str
|
||||
|
||||
|
||||
class UpdateRoom(BaseModel):
|
||||
@@ -76,16 +84,26 @@ class UpdateRoom(BaseModel):
|
||||
recording_type: str
|
||||
recording_trigger: str
|
||||
is_shared: bool
|
||||
webhook_url: str
|
||||
webhook_secret: str
|
||||
|
||||
|
||||
class DeletionStatus(BaseModel):
|
||||
status: str
|
||||
|
||||
|
||||
@router.get("/rooms", response_model=Page[Room])
|
||||
class WebhookTestResult(BaseModel):
|
||||
success: bool
|
||||
message: str = ""
|
||||
error: str = ""
|
||||
status_code: int | None = None
|
||||
response_preview: str | None = None
|
||||
|
||||
|
||||
@router.get("/rooms", response_model=Page[RoomDetails])
|
||||
async def rooms_list(
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
) -> list[Room]:
|
||||
) -> list[RoomDetails]:
|
||||
if not user and not settings.PUBLIC_MODE:
|
||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||
|
||||
@@ -99,6 +117,18 @@ async def rooms_list(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/rooms/{room_id}", response_model=RoomDetails)
|
||||
async def rooms_get(
|
||||
room_id: str,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
):
|
||||
user_id = user["sub"] if user else None
|
||||
room = await rooms_controller.get_by_id_for_http(room_id, user_id=user_id)
|
||||
if not room:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
return room
|
||||
|
||||
|
||||
@router.post("/rooms", response_model=Room)
|
||||
async def rooms_create(
|
||||
room: CreateRoom,
|
||||
@@ -117,10 +147,12 @@ async def rooms_create(
|
||||
recording_type=room.recording_type,
|
||||
recording_trigger=room.recording_trigger,
|
||||
is_shared=room.is_shared,
|
||||
webhook_url=room.webhook_url,
|
||||
webhook_secret=room.webhook_secret,
|
||||
)
|
||||
|
||||
|
||||
@router.patch("/rooms/{room_id}", response_model=Room)
|
||||
@router.patch("/rooms/{room_id}", response_model=RoomDetails)
|
||||
async def rooms_update(
|
||||
room_id: str,
|
||||
info: UpdateRoom,
|
||||
@@ -165,6 +197,7 @@ async def rooms_create_meeting(
|
||||
end_date = current_time + timedelta(hours=8)
|
||||
|
||||
whereby_meeting = await create_meeting("", end_date=end_date, room=room)
|
||||
|
||||
await upload_logo(whereby_meeting["roomName"], "./images/logo.png")
|
||||
|
||||
# Now try to save to database
|
||||
@@ -176,20 +209,15 @@ async def rooms_create_meeting(
|
||||
host_room_url=whereby_meeting["hostRoomUrl"],
|
||||
start_date=parse_datetime_with_timezone(whereby_meeting["startDate"]),
|
||||
end_date=parse_datetime_with_timezone(whereby_meeting["endDate"]),
|
||||
user_id=user_id,
|
||||
room=room,
|
||||
)
|
||||
except (asyncpg.exceptions.UniqueViolationError, sqlite3.IntegrityError):
|
||||
# Another request already created a meeting for this room
|
||||
# Log this race condition occurrence
|
||||
logger.info(
|
||||
"Race condition detected for room %s - fetching existing meeting",
|
||||
room.name,
|
||||
)
|
||||
logger.warning(
|
||||
"Whereby meeting %s was created but not used (resource leak) for room %s",
|
||||
whereby_meeting["meetingId"],
|
||||
"Race condition detected for room %s and meeting %s - fetching existing meeting",
|
||||
room.name,
|
||||
whereby_meeting["meetingId"],
|
||||
)
|
||||
|
||||
# Fetch the meeting that was created by the other request
|
||||
@@ -199,7 +227,9 @@ async def rooms_create_meeting(
|
||||
if meeting is None:
|
||||
# Edge case: meeting was created but expired/deleted between checks
|
||||
logger.error(
|
||||
"Meeting disappeared after race condition for room %s", room.name
|
||||
"Meeting disappeared after race condition for room %s",
|
||||
room.name,
|
||||
exc_info=True,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=503, detail="Unable to join meeting - please try again"
|
||||
@@ -209,3 +239,24 @@ async def rooms_create_meeting(
|
||||
meeting.host_room_url = ""
|
||||
|
||||
return meeting
|
||||
|
||||
|
||||
@router.post("/rooms/{room_id}/webhook/test", response_model=WebhookTestResult)
|
||||
async def rooms_test_webhook(
|
||||
room_id: str,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
):
|
||||
"""Test webhook configuration by sending a sample payload."""
|
||||
user_id = user["sub"] if user else None
|
||||
|
||||
room = await rooms_controller.get_by_id(room_id)
|
||||
if not room:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
|
||||
if user_id and room.user_id != user_id:
|
||||
raise HTTPException(
|
||||
status_code=403, detail="Not authorized to test this room's webhook"
|
||||
)
|
||||
|
||||
result = await test_webhook(room_id)
|
||||
return WebhookTestResult(**result)
|
||||
|
||||
@@ -5,7 +5,7 @@ from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from fastapi_pagination import Page
|
||||
from fastapi_pagination.ext.databases import apaginate
|
||||
from jose import jwt
|
||||
from pydantic import BaseModel, Field, field_serializer
|
||||
from pydantic import BaseModel, Field, constr, field_serializer
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.db import get_database
|
||||
@@ -19,14 +19,15 @@ from reflector.db.search import (
|
||||
SearchOffsetBase,
|
||||
SearchParameters,
|
||||
SearchQuery,
|
||||
SearchQueryBase,
|
||||
SearchResult,
|
||||
SearchTotal,
|
||||
search_controller,
|
||||
search_query_adapter,
|
||||
)
|
||||
from reflector.db.transcripts import (
|
||||
SourceKind,
|
||||
TranscriptParticipant,
|
||||
TranscriptStatus,
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
@@ -63,7 +64,7 @@ class GetTranscriptMinimal(BaseModel):
|
||||
id: str
|
||||
user_id: str | None
|
||||
name: str
|
||||
status: str
|
||||
status: TranscriptStatus
|
||||
locked: bool
|
||||
duration: float
|
||||
title: str | None
|
||||
@@ -96,6 +97,7 @@ class CreateTranscript(BaseModel):
|
||||
name: str
|
||||
source_language: str = Field("en")
|
||||
target_language: str = Field("en")
|
||||
source_kind: SourceKind | None = None
|
||||
|
||||
|
||||
class UpdateTranscript(BaseModel):
|
||||
@@ -114,7 +116,19 @@ class DeletionStatus(BaseModel):
|
||||
status: str
|
||||
|
||||
|
||||
SearchQueryParam = Annotated[SearchQueryBase, Query(description="Search query text")]
|
||||
SearchQueryParamBase = constr(min_length=0, strip_whitespace=True)
|
||||
SearchQueryParam = Annotated[
|
||||
SearchQueryParamBase, Query(description="Search query text")
|
||||
]
|
||||
|
||||
|
||||
# http and api standards accept "q="; we would like to handle it as the absence of query, not as "empty string query"
|
||||
def parse_search_query_param(q: SearchQueryParam) -> SearchQuery | None:
|
||||
if q == "":
|
||||
return None
|
||||
return search_query_adapter.validate_python(q)
|
||||
|
||||
|
||||
SearchLimitParam = Annotated[SearchLimitBase, Query(description="Results per page")]
|
||||
SearchOffsetParam = Annotated[
|
||||
SearchOffsetBase, Query(description="Number of results to skip")
|
||||
@@ -124,7 +138,7 @@ SearchOffsetParam = Annotated[
|
||||
class SearchResponse(BaseModel):
|
||||
results: list[SearchResult]
|
||||
total: SearchTotal
|
||||
query: SearchQuery
|
||||
query: SearchQuery | None = None
|
||||
limit: SearchLimit
|
||||
offset: SearchOffset
|
||||
|
||||
@@ -160,6 +174,7 @@ async def transcripts_search(
|
||||
limit: SearchLimitParam = DEFAULT_SEARCH_LIMIT,
|
||||
offset: SearchOffsetParam = 0,
|
||||
room_id: Optional[str] = None,
|
||||
source_kind: Optional[SourceKind] = None,
|
||||
user: Annotated[
|
||||
Optional[auth.UserInfo], Depends(auth.current_user_optional)
|
||||
] = None,
|
||||
@@ -173,7 +188,12 @@ async def transcripts_search(
|
||||
user_id = user["sub"] if user else None
|
||||
|
||||
search_params = SearchParameters(
|
||||
query_text=q, limit=limit, offset=offset, user_id=user_id, room_id=room_id
|
||||
query_text=parse_search_query_param(q),
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
user_id=user_id,
|
||||
room_id=room_id,
|
||||
source_kind=source_kind,
|
||||
)
|
||||
|
||||
results, total = await search_controller.search_transcripts(search_params)
|
||||
@@ -195,7 +215,7 @@ async def transcripts_create(
|
||||
user_id = user["sub"] if user else None
|
||||
return await transcripts_controller.add(
|
||||
info.name,
|
||||
source_kind=SourceKind.LIVE,
|
||||
source_kind=info.source_kind or SourceKind.LIVE,
|
||||
source_language=info.source_language,
|
||||
target_language=info.target_language,
|
||||
user_id=user_id,
|
||||
@@ -330,8 +350,6 @@ async def transcript_update(
|
||||
transcript = await transcripts_controller.get_by_id_for_http(
|
||||
transcript_id, user_id=user_id
|
||||
)
|
||||
if not transcript:
|
||||
raise HTTPException(status_code=404, detail="Transcript not found")
|
||||
values = info.dict(exclude_unset=True)
|
||||
updated_transcript = await transcripts_controller.update(transcript, values)
|
||||
return updated_transcript
|
||||
|
||||
@@ -6,7 +6,7 @@ from pydantic import BaseModel
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
from reflector.pipelines.main_live_pipeline import task_pipeline_process
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -34,13 +34,13 @@ async def transcript_process(
|
||||
)
|
||||
|
||||
if task_is_scheduled_or_active(
|
||||
"reflector.pipelines.main_live_pipeline.task_pipeline_process",
|
||||
"reflector.pipelines.main_file_pipeline.task_pipeline_file_process",
|
||||
transcript_id=transcript_id,
|
||||
):
|
||||
return ProcessStatus(status="already running")
|
||||
|
||||
# schedule a background task process the file
|
||||
task_pipeline_process.delay(transcript_id=transcript_id)
|
||||
task_pipeline_file_process.delay(transcript_id=transcript_id)
|
||||
|
||||
return ProcessStatus(status="ok")
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from pydantic import BaseModel
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
from reflector.pipelines.main_live_pipeline import task_pipeline_process
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -92,6 +92,6 @@ async def transcript_record_upload(
|
||||
await transcripts_controller.update(transcript, {"status": "uploaded"})
|
||||
|
||||
# launch a background task to process the file
|
||||
task_pipeline_process.delay(transcript_id=transcript_id)
|
||||
task_pipeline_file_process.delay(transcript_id=transcript_id)
|
||||
|
||||
return UploadStatus(status="ok")
|
||||
|
||||
@@ -1,18 +1,60 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import httpx
|
||||
|
||||
from reflector.db.rooms import Room
|
||||
from reflector.settings import settings
|
||||
from reflector.utils.string import parse_non_empty_string
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_headers():
|
||||
api_key = parse_non_empty_string(
|
||||
settings.WHEREBY_API_KEY, "WHEREBY_API_KEY value is required."
|
||||
)
|
||||
return {
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
}
|
||||
|
||||
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {settings.WHEREBY_API_KEY}",
|
||||
}
|
||||
TIMEOUT = 10 # seconds
|
||||
|
||||
|
||||
def _get_whereby_s3_auth():
|
||||
errors = []
|
||||
try:
|
||||
bucket_name = parse_non_empty_string(
|
||||
settings.RECORDING_STORAGE_AWS_BUCKET_NAME,
|
||||
"RECORDING_STORAGE_AWS_BUCKET_NAME value is required.",
|
||||
)
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
try:
|
||||
key_id = parse_non_empty_string(
|
||||
settings.AWS_WHEREBY_ACCESS_KEY_ID,
|
||||
"AWS_WHEREBY_ACCESS_KEY_ID value is required.",
|
||||
)
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
try:
|
||||
key_secret = parse_non_empty_string(
|
||||
settings.AWS_WHEREBY_ACCESS_KEY_SECRET,
|
||||
"AWS_WHEREBY_ACCESS_KEY_SECRET value is required.",
|
||||
)
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
if len(errors) > 0:
|
||||
raise Exception(
|
||||
f"Failed to get Whereby auth settings: {', '.join(str(e) for e in errors)}"
|
||||
)
|
||||
return bucket_name, key_id, key_secret
|
||||
|
||||
|
||||
async def create_meeting(room_name_prefix: str, end_date: datetime, room: Room):
|
||||
s3_bucket_name, s3_key_id, s3_key_secret = _get_whereby_s3_auth()
|
||||
data = {
|
||||
"isLocked": room.is_locked,
|
||||
"roomNamePrefix": room_name_prefix,
|
||||
@@ -23,23 +65,26 @@ async def create_meeting(room_name_prefix: str, end_date: datetime, room: Room):
|
||||
"type": room.recording_type,
|
||||
"destination": {
|
||||
"provider": "s3",
|
||||
"bucket": settings.RECORDING_STORAGE_AWS_BUCKET_NAME,
|
||||
"accessKeyId": settings.AWS_WHEREBY_ACCESS_KEY_ID,
|
||||
"accessKeySecret": settings.AWS_WHEREBY_ACCESS_KEY_SECRET,
|
||||
"bucket": s3_bucket_name,
|
||||
"accessKeyId": s3_key_id,
|
||||
"accessKeySecret": s3_key_secret,
|
||||
"fileFormat": "mp4",
|
||||
},
|
||||
"startTrigger": room.recording_trigger,
|
||||
},
|
||||
"fields": ["hostRoomUrl"],
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
f"{settings.WHEREBY_API_URL}/meetings",
|
||||
headers=HEADERS,
|
||||
headers=_get_headers(),
|
||||
json=data,
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
if response.status_code == 403:
|
||||
logger.warning(
|
||||
f"Failed to create meeting: access denied on Whereby: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
@@ -48,7 +93,7 @@ async def get_room_sessions(room_name: str):
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(
|
||||
f"{settings.WHEREBY_API_URL}/insights/room-sessions?roomName={room_name}",
|
||||
headers=HEADERS,
|
||||
headers=_get_headers(),
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
@@ -19,6 +19,7 @@ else:
|
||||
"reflector.pipelines.main_live_pipeline",
|
||||
"reflector.worker.healthcheck",
|
||||
"reflector.worker.process",
|
||||
"reflector.worker.cleanup",
|
||||
]
|
||||
)
|
||||
|
||||
@@ -38,6 +39,16 @@ else:
|
||||
},
|
||||
}
|
||||
|
||||
if settings.PUBLIC_MODE:
|
||||
app.conf.beat_schedule["cleanup_old_public_data"] = {
|
||||
"task": "reflector.worker.cleanup.cleanup_old_public_data_task",
|
||||
"schedule": crontab(hour=3, minute=0),
|
||||
}
|
||||
logger.info(
|
||||
"Public mode cleanup enabled",
|
||||
retention_days=settings.PUBLIC_DATA_RETENTION_DAYS,
|
||||
)
|
||||
|
||||
if settings.HEALTHCHECK_URL:
|
||||
app.conf.beat_schedule["healthcheck_ping"] = {
|
||||
"task": "reflector.worker.healthcheck.healthcheck_ping",
|
||||
|
||||
156
server/reflector/worker/cleanup.py
Normal file
156
server/reflector/worker/cleanup.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
Main task for cleanup old public data.
|
||||
|
||||
Deletes old anonymous transcripts and their associated meetings/recordings.
|
||||
Transcripts are the main entry point - any associated data is also removed.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import TypedDict
|
||||
|
||||
import structlog
|
||||
from celery import shared_task
|
||||
from databases import Database
|
||||
from pydantic.types import PositiveInt
|
||||
|
||||
from reflector.asynctask import asynctask
|
||||
from reflector.db import get_database
|
||||
from reflector.db.meetings import meetings
|
||||
from reflector.db.recordings import recordings
|
||||
from reflector.db.transcripts import transcripts, transcripts_controller
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_recordings_storage
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
|
||||
class CleanupStats(TypedDict):
|
||||
"""Statistics for cleanup operation."""
|
||||
|
||||
transcripts_deleted: int
|
||||
meetings_deleted: int
|
||||
recordings_deleted: int
|
||||
errors: list[str]
|
||||
|
||||
|
||||
async def delete_single_transcript(
|
||||
db: Database, transcript_data: dict, stats: CleanupStats
|
||||
):
|
||||
transcript_id = transcript_data["id"]
|
||||
meeting_id = transcript_data["meeting_id"]
|
||||
recording_id = transcript_data["recording_id"]
|
||||
|
||||
try:
|
||||
async with db.transaction(isolation="serializable"):
|
||||
if meeting_id:
|
||||
await db.execute(meetings.delete().where(meetings.c.id == meeting_id))
|
||||
stats["meetings_deleted"] += 1
|
||||
logger.info("Deleted associated meeting", meeting_id=meeting_id)
|
||||
|
||||
if recording_id:
|
||||
recording = await db.fetch_one(
|
||||
recordings.select().where(recordings.c.id == recording_id)
|
||||
)
|
||||
if recording:
|
||||
try:
|
||||
await get_recordings_storage().delete_file(
|
||||
recording["object_key"]
|
||||
)
|
||||
except Exception as storage_error:
|
||||
logger.warning(
|
||||
"Failed to delete recording from storage",
|
||||
recording_id=recording_id,
|
||||
object_key=recording["object_key"],
|
||||
error=str(storage_error),
|
||||
)
|
||||
|
||||
await db.execute(
|
||||
recordings.delete().where(recordings.c.id == recording_id)
|
||||
)
|
||||
stats["recordings_deleted"] += 1
|
||||
logger.info(
|
||||
"Deleted associated recording", recording_id=recording_id
|
||||
)
|
||||
|
||||
await transcripts_controller.remove_by_id(transcript_id)
|
||||
stats["transcripts_deleted"] += 1
|
||||
logger.info(
|
||||
"Deleted transcript",
|
||||
transcript_id=transcript_id,
|
||||
created_at=transcript_data["created_at"].isoformat(),
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete transcript {transcript_id}: {str(e)}"
|
||||
logger.error(error_msg, exc_info=e)
|
||||
stats["errors"].append(error_msg)
|
||||
|
||||
|
||||
async def cleanup_old_transcripts(
|
||||
db: Database, cutoff_date: datetime, stats: CleanupStats
|
||||
):
|
||||
"""Delete old anonymous transcripts and their associated recordings/meetings."""
|
||||
query = transcripts.select().where(
|
||||
(transcripts.c.created_at < cutoff_date) & (transcripts.c.user_id.is_(None))
|
||||
)
|
||||
old_transcripts = await db.fetch_all(query)
|
||||
|
||||
logger.info(f"Found {len(old_transcripts)} old transcripts to delete")
|
||||
|
||||
for transcript_data in old_transcripts:
|
||||
await delete_single_transcript(db, transcript_data, stats)
|
||||
|
||||
|
||||
def log_cleanup_results(stats: CleanupStats):
|
||||
logger.info(
|
||||
"Cleanup completed",
|
||||
transcripts_deleted=stats["transcripts_deleted"],
|
||||
meetings_deleted=stats["meetings_deleted"],
|
||||
recordings_deleted=stats["recordings_deleted"],
|
||||
errors_count=len(stats["errors"]),
|
||||
)
|
||||
|
||||
if stats["errors"]:
|
||||
logger.warning(
|
||||
"Cleanup completed with errors",
|
||||
errors=stats["errors"][:10],
|
||||
)
|
||||
|
||||
|
||||
async def cleanup_old_public_data(
|
||||
days: PositiveInt | None = None,
|
||||
) -> CleanupStats | None:
|
||||
if days is None:
|
||||
days = settings.PUBLIC_DATA_RETENTION_DAYS
|
||||
|
||||
if not settings.PUBLIC_MODE:
|
||||
logger.info("Skipping cleanup - not a public instance")
|
||||
return None
|
||||
|
||||
cutoff_date = datetime.now(timezone.utc) - timedelta(days=days)
|
||||
logger.info(
|
||||
"Starting cleanup of old public data",
|
||||
cutoff_date=cutoff_date.isoformat(),
|
||||
)
|
||||
|
||||
stats: CleanupStats = {
|
||||
"transcripts_deleted": 0,
|
||||
"meetings_deleted": 0,
|
||||
"recordings_deleted": 0,
|
||||
"errors": [],
|
||||
}
|
||||
|
||||
db = get_database()
|
||||
await cleanup_old_transcripts(db, cutoff_date, stats)
|
||||
|
||||
log_cleanup_results(stats)
|
||||
return stats
|
||||
|
||||
|
||||
@shared_task(
|
||||
autoretry_for=(Exception,),
|
||||
retry_kwargs={"max_retries": 3, "countdown": 300},
|
||||
)
|
||||
@asynctask
|
||||
def cleanup_old_public_data_task(days: int | None = None):
|
||||
asyncio.run(cleanup_old_public_data(days=days))
|
||||
@@ -14,7 +14,8 @@ from reflector.db.meetings import meetings_controller
|
||||
from reflector.db.recordings import Recording, recordings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import SourceKind, transcripts_controller
|
||||
from reflector.pipelines.main_live_pipeline import asynctask, task_pipeline_process
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.pipelines.main_live_pipeline import asynctask
|
||||
from reflector.settings import settings
|
||||
from reflector.whereby import get_room_sessions
|
||||
|
||||
@@ -140,7 +141,7 @@ async def process_recording(bucket_name: str, object_key: str):
|
||||
|
||||
await transcripts_controller.update(transcript, {"status": "uploaded"})
|
||||
|
||||
task_pipeline_process.delay(transcript_id=transcript.id)
|
||||
task_pipeline_file_process.delay(transcript_id=transcript.id)
|
||||
|
||||
|
||||
@shared_task
|
||||
|
||||
258
server/reflector/worker/webhook.py
Normal file
258
server/reflector/worker/webhook.py
Normal file
@@ -0,0 +1,258 @@
|
||||
"""Webhook task for sending transcript notifications."""
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import httpx
|
||||
import structlog
|
||||
from celery import shared_task
|
||||
from celery.utils.log import get_task_logger
|
||||
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
from reflector.pipelines.main_live_pipeline import asynctask
|
||||
from reflector.settings import settings
|
||||
from reflector.utils.webvtt import topics_to_webvtt
|
||||
|
||||
logger = structlog.wrap_logger(get_task_logger(__name__))
|
||||
|
||||
|
||||
def generate_webhook_signature(payload: bytes, secret: str, timestamp: str) -> str:
|
||||
"""Generate HMAC signature for webhook payload."""
|
||||
signed_payload = f"{timestamp}.{payload.decode('utf-8')}"
|
||||
hmac_obj = hmac.new(
|
||||
secret.encode("utf-8"),
|
||||
signed_payload.encode("utf-8"),
|
||||
hashlib.sha256,
|
||||
)
|
||||
return hmac_obj.hexdigest()
|
||||
|
||||
|
||||
@shared_task(
|
||||
bind=True,
|
||||
max_retries=30,
|
||||
default_retry_delay=60,
|
||||
retry_backoff=True,
|
||||
retry_backoff_max=3600, # Max 1 hour between retries
|
||||
)
|
||||
@asynctask
|
||||
async def send_transcript_webhook(
|
||||
self,
|
||||
transcript_id: str,
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
):
|
||||
log = logger.bind(
|
||||
transcript_id=transcript_id,
|
||||
room_id=room_id,
|
||||
retry_count=self.request.retries,
|
||||
)
|
||||
|
||||
try:
|
||||
# Fetch transcript and room
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
log.error("Transcript not found, skipping webhook")
|
||||
return
|
||||
|
||||
room = await rooms_controller.get_by_id(room_id)
|
||||
if not room:
|
||||
log.error("Room not found, skipping webhook")
|
||||
return
|
||||
|
||||
if not room.webhook_url:
|
||||
log.info("No webhook URL configured for room, skipping")
|
||||
return
|
||||
|
||||
# Generate WebVTT content from topics
|
||||
topics_data = []
|
||||
|
||||
if transcript.topics:
|
||||
# Build topics data with diarized content per topic
|
||||
for topic in transcript.topics:
|
||||
topic_webvtt = topics_to_webvtt([topic]) if topic.words else ""
|
||||
topics_data.append(
|
||||
{
|
||||
"title": topic.title,
|
||||
"summary": topic.summary,
|
||||
"timestamp": topic.timestamp,
|
||||
"duration": topic.duration,
|
||||
"webvtt": topic_webvtt,
|
||||
}
|
||||
)
|
||||
|
||||
# Build webhook payload
|
||||
frontend_url = f"{settings.UI_BASE_URL}/transcripts/{transcript.id}"
|
||||
participants = [
|
||||
{"id": p.id, "name": p.name, "speaker": p.speaker}
|
||||
for p in (transcript.participants or [])
|
||||
]
|
||||
payload_data = {
|
||||
"event": "transcript.completed",
|
||||
"event_id": event_id,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"transcript": {
|
||||
"id": transcript.id,
|
||||
"room_id": transcript.room_id,
|
||||
"created_at": transcript.created_at.isoformat(),
|
||||
"duration": transcript.duration,
|
||||
"title": transcript.title,
|
||||
"short_summary": transcript.short_summary,
|
||||
"long_summary": transcript.long_summary,
|
||||
"webvtt": transcript.webvtt,
|
||||
"topics": topics_data,
|
||||
"participants": participants,
|
||||
"source_language": transcript.source_language,
|
||||
"target_language": transcript.target_language,
|
||||
"status": transcript.status,
|
||||
"frontend_url": frontend_url,
|
||||
},
|
||||
"room": {
|
||||
"id": room.id,
|
||||
"name": room.name,
|
||||
},
|
||||
}
|
||||
|
||||
# Convert to JSON
|
||||
payload_json = json.dumps(payload_data, separators=(",", ":"))
|
||||
payload_bytes = payload_json.encode("utf-8")
|
||||
|
||||
# Generate signature if secret is configured
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": "Reflector-Webhook/1.0",
|
||||
"X-Webhook-Event": "transcript.completed",
|
||||
"X-Webhook-Retry": str(self.request.retries),
|
||||
}
|
||||
|
||||
if room.webhook_secret:
|
||||
timestamp = str(int(datetime.now(timezone.utc).timestamp()))
|
||||
signature = generate_webhook_signature(
|
||||
payload_bytes, room.webhook_secret, timestamp
|
||||
)
|
||||
headers["X-Webhook-Signature"] = f"t={timestamp},v1={signature}"
|
||||
|
||||
# Send webhook with timeout
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
log.info(
|
||||
"Sending webhook",
|
||||
url=room.webhook_url,
|
||||
payload_size=len(payload_bytes),
|
||||
)
|
||||
|
||||
response = await client.post(
|
||||
room.webhook_url,
|
||||
content=payload_bytes,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
log.info(
|
||||
"Webhook sent successfully",
|
||||
status_code=response.status_code,
|
||||
response_size=len(response.content),
|
||||
)
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
log.error(
|
||||
"Webhook failed with HTTP error",
|
||||
status_code=e.response.status_code,
|
||||
response_text=e.response.text[:500], # First 500 chars
|
||||
)
|
||||
|
||||
# Don't retry on client errors (4xx)
|
||||
if 400 <= e.response.status_code < 500:
|
||||
log.error("Client error, not retrying")
|
||||
return
|
||||
|
||||
# Retry on server errors (5xx)
|
||||
raise self.retry(exc=e)
|
||||
|
||||
except (httpx.ConnectError, httpx.TimeoutException) as e:
|
||||
# Retry on network errors
|
||||
log.error("Webhook failed with connection error", error=str(e))
|
||||
raise self.retry(exc=e)
|
||||
|
||||
except Exception as e:
|
||||
# Retry on unexpected errors
|
||||
log.exception("Unexpected error in webhook task", error=str(e))
|
||||
raise self.retry(exc=e)
|
||||
|
||||
|
||||
async def test_webhook(room_id: str) -> dict:
|
||||
"""
|
||||
Test webhook configuration by sending a sample payload.
|
||||
Returns immediately with success/failure status.
|
||||
This is the shared implementation used by both the API endpoint and Celery task.
|
||||
"""
|
||||
try:
|
||||
room = await rooms_controller.get_by_id(room_id)
|
||||
if not room:
|
||||
return {"success": False, "error": "Room not found"}
|
||||
|
||||
if not room.webhook_url:
|
||||
return {"success": False, "error": "No webhook URL configured"}
|
||||
|
||||
now = (datetime.now(timezone.utc).isoformat(),)
|
||||
payload_data = {
|
||||
"event": "test",
|
||||
"event_id": uuid.uuid4().hex,
|
||||
"timestamp": now,
|
||||
"message": "This is a test webhook from Reflector",
|
||||
"room": {
|
||||
"id": room.id,
|
||||
"name": room.name,
|
||||
},
|
||||
}
|
||||
|
||||
payload_json = json.dumps(payload_data, separators=(",", ":"))
|
||||
payload_bytes = payload_json.encode("utf-8")
|
||||
|
||||
# Generate headers with signature
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": "Reflector-Webhook/1.0",
|
||||
"X-Webhook-Event": "test",
|
||||
}
|
||||
|
||||
if room.webhook_secret:
|
||||
timestamp = str(int(datetime.now(timezone.utc).timestamp()))
|
||||
signature = generate_webhook_signature(
|
||||
payload_bytes, room.webhook_secret, timestamp
|
||||
)
|
||||
headers["X-Webhook-Signature"] = f"t={timestamp},v1={signature}"
|
||||
|
||||
# Send test webhook with short timeout
|
||||
async with httpx.AsyncClient(timeout=10.0) as client:
|
||||
response = await client.post(
|
||||
room.webhook_url,
|
||||
content=payload_bytes,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
return {
|
||||
"success": response.is_success,
|
||||
"status_code": response.status_code,
|
||||
"message": f"Webhook test {'successful' if response.is_success else 'failed'}",
|
||||
"response_preview": response.text if response.text else None,
|
||||
}
|
||||
|
||||
except httpx.TimeoutException:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "Webhook request timed out (10 seconds)",
|
||||
}
|
||||
except httpx.ConnectError as e:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Could not connect to webhook URL: {str(e)}",
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Unexpected error: {str(e)}",
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
if [ "${ENTRYPOINT}" = "server" ]; then
|
||||
uv run alembic upgrade head
|
||||
uv run -m reflector.app
|
||||
uv run uvicorn reflector.app:app --host 0.0.0.0 --port 1250
|
||||
elif [ "${ENTRYPOINT}" = "worker" ]; then
|
||||
uv run celery -A reflector.worker.app worker --loglevel=info
|
||||
elif [ "${ENTRYPOINT}" = "beat" ]; then
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: ''
|
||||
headers:
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- DUMMY_API_KEY
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '0'
|
||||
host:
|
||||
- monadical-sas--reflector-diarizer-web.modal.run
|
||||
user-agent:
|
||||
- python-httpx/0.27.2
|
||||
method: POST
|
||||
uri: https://monadical-sas--reflector-diarizer-web.modal.run/diarize?audio_file_url=https%3A%2F%2Freflector-github-pytest.s3.us-east-1.amazonaws.com%2Ftest_mathieu_hello.mp3×tamp=0
|
||||
response:
|
||||
body:
|
||||
string: '{"diarization":[{"start":0.823,"end":1.91,"speaker":0},{"start":2.572,"end":6.409,"speaker":0},{"start":6.783,"end":10.62,"speaker":0},{"start":11.231,"end":14.168,"speaker":0},{"start":14.796,"end":19.295,"speaker":0}]}'
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000
|
||||
Content-Length:
|
||||
- '220'
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 13 Aug 2025 18:25:34 GMT
|
||||
Modal-Function-Call-Id:
|
||||
- fc-01K2JAVNEP6N7Y1Y7W3T98BCXK
|
||||
Vary:
|
||||
- accept-encoding
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,46 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"audio_file_url": "https://reflector-github-pytest.s3.us-east-1.amazonaws.com/test_mathieu_hello.mp3",
|
||||
"language": "en", "batch": true}'
|
||||
headers:
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- DUMMY_API_KEY
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '136'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- monadical-sas--reflector-transcriber-parakeet-web.modal.run
|
||||
user-agent:
|
||||
- python-httpx/0.27.2
|
||||
method: POST
|
||||
uri: https://monadical-sas--reflector-transcriber-parakeet-web.modal.run/v1/audio/transcriptions-from-url
|
||||
response:
|
||||
body:
|
||||
string: '{"text":"Hi there everyone. Today I want to share my incredible experience
|
||||
with Reflector. a Q teenage product that revolutionizes audio processing.
|
||||
With reflector, I can easily convert any audio into accurate transcription.
|
||||
saving me hours of tedious manual work.","words":[{"word":"Hi","start":0.87,"end":1.19},{"word":"there","start":1.19,"end":1.35},{"word":"everyone.","start":1.51,"end":1.83},{"word":"Today","start":2.63,"end":2.87},{"word":"I","start":3.36,"end":3.52},{"word":"want","start":3.6,"end":3.76},{"word":"to","start":3.76,"end":3.92},{"word":"share","start":3.92,"end":4.16},{"word":"my","start":4.16,"end":4.4},{"word":"incredible","start":4.32,"end":4.96},{"word":"experience","start":4.96,"end":5.44},{"word":"with","start":5.44,"end":5.68},{"word":"Reflector.","start":5.68,"end":6.24},{"word":"a","start":6.93,"end":7.01},{"word":"Q","start":7.01,"end":7.17},{"word":"teenage","start":7.25,"end":7.65},{"word":"product","start":7.89,"end":8.29},{"word":"that","start":8.29,"end":8.61},{"word":"revolutionizes","start":8.61,"end":9.65},{"word":"audio","start":9.65,"end":10.05},{"word":"processing.","start":10.05,"end":10.53},{"word":"With","start":11.27,"end":11.43},{"word":"reflector,","start":11.51,"end":12.15},{"word":"I","start":12.31,"end":12.39},{"word":"can","start":12.39,"end":12.55},{"word":"easily","start":12.55,"end":12.95},{"word":"convert","start":12.95,"end":13.43},{"word":"any","start":13.43,"end":13.67},{"word":"audio","start":13.67,"end":13.99},{"word":"into","start":14.98,"end":15.06},{"word":"accurate","start":15.22,"end":15.54},{"word":"transcription.","start":15.7,"end":16.34},{"word":"saving","start":16.99,"end":17.15},{"word":"me","start":17.31,"end":17.47},{"word":"hours","start":17.47,"end":17.87},{"word":"of","start":17.87,"end":18.11},{"word":"tedious","start":18.11,"end":18.67},{"word":"manual","start":18.67,"end":19.07},{"word":"work.","start":19.07,"end":19.31}]}'
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000
|
||||
Content-Length:
|
||||
- '1933'
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 13 Aug 2025 18:26:59 GMT
|
||||
Modal-Function-Call-Id:
|
||||
- fc-01K2JAWC7GAMKX4DSJ21WV31NG
|
||||
Vary:
|
||||
- accept-encoding
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,84 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"audio_file_url": "https://reflector-github-pytest.s3.us-east-1.amazonaws.com/test_mathieu_hello.mp3",
|
||||
"language": "en", "batch": true}'
|
||||
headers:
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- DUMMY_API_KEY
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '136'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- monadical-sas--reflector-transcriber-parakeet-web.modal.run
|
||||
user-agent:
|
||||
- python-httpx/0.27.2
|
||||
method: POST
|
||||
uri: https://monadical-sas--reflector-transcriber-parakeet-web.modal.run/v1/audio/transcriptions-from-url
|
||||
response:
|
||||
body:
|
||||
string: '{"text":"Hi there everyone. Today I want to share my incredible experience
|
||||
with Reflector. a Q teenage product that revolutionizes audio processing.
|
||||
With reflector, I can easily convert any audio into accurate transcription.
|
||||
saving me hours of tedious manual work.","words":[{"word":"Hi","start":0.87,"end":1.19},{"word":"there","start":1.19,"end":1.35},{"word":"everyone.","start":1.51,"end":1.83},{"word":"Today","start":2.63,"end":2.87},{"word":"I","start":3.36,"end":3.52},{"word":"want","start":3.6,"end":3.76},{"word":"to","start":3.76,"end":3.92},{"word":"share","start":3.92,"end":4.16},{"word":"my","start":4.16,"end":4.4},{"word":"incredible","start":4.32,"end":4.96},{"word":"experience","start":4.96,"end":5.44},{"word":"with","start":5.44,"end":5.68},{"word":"Reflector.","start":5.68,"end":6.24},{"word":"a","start":6.93,"end":7.01},{"word":"Q","start":7.01,"end":7.17},{"word":"teenage","start":7.25,"end":7.65},{"word":"product","start":7.89,"end":8.29},{"word":"that","start":8.29,"end":8.61},{"word":"revolutionizes","start":8.61,"end":9.65},{"word":"audio","start":9.65,"end":10.05},{"word":"processing.","start":10.05,"end":10.53},{"word":"With","start":11.27,"end":11.43},{"word":"reflector,","start":11.51,"end":12.15},{"word":"I","start":12.31,"end":12.39},{"word":"can","start":12.39,"end":12.55},{"word":"easily","start":12.55,"end":12.95},{"word":"convert","start":12.95,"end":13.43},{"word":"any","start":13.43,"end":13.67},{"word":"audio","start":13.67,"end":13.99},{"word":"into","start":14.98,"end":15.06},{"word":"accurate","start":15.22,"end":15.54},{"word":"transcription.","start":15.7,"end":16.34},{"word":"saving","start":16.99,"end":17.15},{"word":"me","start":17.31,"end":17.47},{"word":"hours","start":17.47,"end":17.87},{"word":"of","start":17.87,"end":18.11},{"word":"tedious","start":18.11,"end":18.67},{"word":"manual","start":18.67,"end":19.07},{"word":"work.","start":19.07,"end":19.31}]}'
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000
|
||||
Content-Length:
|
||||
- '1933'
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 13 Aug 2025 18:27:02 GMT
|
||||
Modal-Function-Call-Id:
|
||||
- fc-01K2JAYZ1AR2HE422VJVKBWX9Z
|
||||
Vary:
|
||||
- accept-encoding
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: ''
|
||||
headers:
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- DUMMY_API_KEY
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '0'
|
||||
host:
|
||||
- monadical-sas--reflector-diarizer-web.modal.run
|
||||
user-agent:
|
||||
- python-httpx/0.27.2
|
||||
method: POST
|
||||
uri: https://monadical-sas--reflector-diarizer-web.modal.run/diarize?audio_file_url=https%3A%2F%2Freflector-github-pytest.s3.us-east-1.amazonaws.com%2Ftest_mathieu_hello.mp3×tamp=0
|
||||
response:
|
||||
body:
|
||||
string: '{"diarization":[{"start":0.823,"end":1.91,"speaker":0},{"start":2.572,"end":6.409,"speaker":0},{"start":6.783,"end":10.62,"speaker":0},{"start":11.231,"end":14.168,"speaker":0},{"start":14.796,"end":19.295,"speaker":0}]}'
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000
|
||||
Content-Length:
|
||||
- '220'
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 13 Aug 2025 18:27:18 GMT
|
||||
Modal-Function-Call-Id:
|
||||
- fc-01K2JAZ1M34NQRJK03CCFK95D6
|
||||
Vary:
|
||||
- accept-encoding
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -5,7 +5,29 @@ from unittest.mock import patch
|
||||
import pytest
|
||||
|
||||
|
||||
# Pytest-docker configuration
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def settings_configuration():
|
||||
# theses settings are linked to monadical for pytest-recording
|
||||
# if a fork is done, they have to provide their own url when cassettes needs to be updated
|
||||
# modal api keys has to be defined by the user
|
||||
from reflector.settings import settings
|
||||
|
||||
settings.TRANSCRIPT_BACKEND = "modal"
|
||||
settings.TRANSCRIPT_URL = (
|
||||
"https://monadical-sas--reflector-transcriber-parakeet-web.modal.run"
|
||||
)
|
||||
settings.DIARIZATION_BACKEND = "modal"
|
||||
settings.DIARIZATION_URL = "https://monadical-sas--reflector-diarizer-web.modal.run"
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def vcr_config():
|
||||
"""VCR configuration to filter sensitive headers"""
|
||||
return {
|
||||
"filter_headers": [("authorization", "DUMMY_API_KEY")],
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def docker_compose_file(pytestconfig):
|
||||
return os.path.join(str(pytestconfig.rootdir), "tests", "docker-compose.test.yml")
|
||||
@@ -156,6 +178,63 @@ async def dummy_diarization():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def dummy_file_transcript():
|
||||
from reflector.processors.file_transcript import FileTranscriptProcessor
|
||||
from reflector.processors.types import Transcript, Word
|
||||
|
||||
class TestFileTranscriptProcessor(FileTranscriptProcessor):
|
||||
async def _transcript(self, data):
|
||||
return Transcript(
|
||||
text="Hello world. How are you today?",
|
||||
words=[
|
||||
Word(start=0.0, end=0.5, text="Hello", speaker=0),
|
||||
Word(start=0.5, end=0.6, text=" ", speaker=0),
|
||||
Word(start=0.6, end=1.0, text="world", speaker=0),
|
||||
Word(start=1.0, end=1.1, text=".", speaker=0),
|
||||
Word(start=1.1, end=1.2, text=" ", speaker=0),
|
||||
Word(start=1.2, end=1.5, text="How", speaker=0),
|
||||
Word(start=1.5, end=1.6, text=" ", speaker=0),
|
||||
Word(start=1.6, end=1.8, text="are", speaker=0),
|
||||
Word(start=1.8, end=1.9, text=" ", speaker=0),
|
||||
Word(start=1.9, end=2.1, text="you", speaker=0),
|
||||
Word(start=2.1, end=2.2, text=" ", speaker=0),
|
||||
Word(start=2.2, end=2.5, text="today", speaker=0),
|
||||
Word(start=2.5, end=2.6, text="?", speaker=0),
|
||||
],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"reflector.processors.file_transcript_auto.FileTranscriptAutoProcessor.__new__"
|
||||
) as mock_auto:
|
||||
mock_auto.return_value = TestFileTranscriptProcessor()
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def dummy_file_diarization():
|
||||
from reflector.processors.file_diarization import (
|
||||
FileDiarizationOutput,
|
||||
FileDiarizationProcessor,
|
||||
)
|
||||
from reflector.processors.types import DiarizationSegment
|
||||
|
||||
class TestFileDiarizationProcessor(FileDiarizationProcessor):
|
||||
async def _diarize(self, data):
|
||||
return FileDiarizationOutput(
|
||||
diarization=[
|
||||
DiarizationSegment(start=0.0, end=1.1, speaker=0),
|
||||
DiarizationSegment(start=1.2, end=2.6, speaker=1),
|
||||
]
|
||||
)
|
||||
|
||||
with patch(
|
||||
"reflector.processors.file_diarization_auto.FileDiarizationAutoProcessor.__new__"
|
||||
) as mock_auto:
|
||||
mock_auto.return_value = TestFileDiarizationProcessor()
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def dummy_transcript_translator():
|
||||
from reflector.processors.transcript_translator import TranscriptTranslatorProcessor
|
||||
@@ -216,9 +295,13 @@ async def dummy_storage():
|
||||
with (
|
||||
patch("reflector.storage.base.Storage.get_instance") as mock_storage,
|
||||
patch("reflector.storage.get_transcripts_storage") as mock_get_transcripts,
|
||||
patch(
|
||||
"reflector.pipelines.main_file_pipeline.get_transcripts_storage"
|
||||
) as mock_get_transcripts2,
|
||||
):
|
||||
mock_storage.return_value = dummy
|
||||
mock_get_transcripts.return_value = dummy
|
||||
mock_get_transcripts2.return_value = dummy
|
||||
yield
|
||||
|
||||
|
||||
@@ -238,7 +321,10 @@ def celery_config():
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def celery_includes():
|
||||
return ["reflector.pipelines.main_live_pipeline"]
|
||||
return [
|
||||
"reflector.pipelines.main_live_pipeline",
|
||||
"reflector.pipelines.main_file_pipeline",
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -280,7 +366,7 @@ async def fake_transcript_with_topics(tmpdir, client):
|
||||
transcript = await transcripts_controller.get_by_id(tid)
|
||||
assert transcript is not None
|
||||
|
||||
await transcripts_controller.update(transcript, {"status": "finished"})
|
||||
await transcripts_controller.update(transcript, {"status": "ended"})
|
||||
|
||||
# manually copy a file at the expected location
|
||||
audio_filename = transcript.audio_mp3_filename
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
version: '3.8'
|
||||
version: "3.8"
|
||||
services:
|
||||
postgres_test:
|
||||
image: postgres:15
|
||||
image: postgres:17
|
||||
environment:
|
||||
POSTGRES_DB: reflector_test
|
||||
POSTGRES_USER: test_user
|
||||
@@ -10,4 +10,4 @@ services:
|
||||
- "15432:5432"
|
||||
command: postgres -c fsync=off -c synchronous_commit=off -c full_page_writes=off
|
||||
tmpfs:
|
||||
- /var/lib/postgresql/data:rw,noexec,nosuid,size=1g
|
||||
- /var/lib/postgresql/data:rw,noexec,nosuid,size=1g
|
||||
|
||||
285
server/tests/test_cleanup.py
Normal file
285
server/tests/test_cleanup.py
Normal file
@@ -0,0 +1,285 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.db.recordings import Recording, recordings_controller
|
||||
from reflector.db.transcripts import SourceKind, transcripts_controller
|
||||
from reflector.worker.cleanup import cleanup_old_public_data
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cleanup_old_public_data_skips_when_not_public():
|
||||
"""Test that cleanup is skipped when PUBLIC_MODE is False."""
|
||||
with patch("reflector.worker.cleanup.settings") as mock_settings:
|
||||
mock_settings.PUBLIC_MODE = False
|
||||
|
||||
result = await cleanup_old_public_data()
|
||||
|
||||
# Should return early without doing anything
|
||||
assert result is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cleanup_old_public_data_deletes_old_anonymous_transcripts():
|
||||
"""Test that old anonymous transcripts are deleted."""
|
||||
# Create old and new anonymous transcripts
|
||||
old_date = datetime.now(timezone.utc) - timedelta(days=8)
|
||||
new_date = datetime.now(timezone.utc) - timedelta(days=2)
|
||||
|
||||
# Create old anonymous transcript (should be deleted)
|
||||
old_transcript = await transcripts_controller.add(
|
||||
name="Old Anonymous Transcript",
|
||||
source_kind=SourceKind.FILE,
|
||||
user_id=None, # Anonymous
|
||||
)
|
||||
# Manually update created_at to be old
|
||||
from reflector.db import get_database
|
||||
from reflector.db.transcripts import transcripts
|
||||
|
||||
await get_database().execute(
|
||||
transcripts.update()
|
||||
.where(transcripts.c.id == old_transcript.id)
|
||||
.values(created_at=old_date)
|
||||
)
|
||||
|
||||
# Create new anonymous transcript (should NOT be deleted)
|
||||
new_transcript = await transcripts_controller.add(
|
||||
name="New Anonymous Transcript",
|
||||
source_kind=SourceKind.FILE,
|
||||
user_id=None, # Anonymous
|
||||
)
|
||||
|
||||
# Create old transcript with user (should NOT be deleted)
|
||||
old_user_transcript = await transcripts_controller.add(
|
||||
name="Old User Transcript",
|
||||
source_kind=SourceKind.FILE,
|
||||
user_id="user123",
|
||||
)
|
||||
await get_database().execute(
|
||||
transcripts.update()
|
||||
.where(transcripts.c.id == old_user_transcript.id)
|
||||
.values(created_at=old_date)
|
||||
)
|
||||
|
||||
with patch("reflector.worker.cleanup.settings") as mock_settings:
|
||||
mock_settings.PUBLIC_MODE = True
|
||||
mock_settings.PUBLIC_DATA_RETENTION_DAYS = 7
|
||||
|
||||
# Mock the storage deletion
|
||||
with patch("reflector.db.transcripts.get_transcripts_storage") as mock_storage:
|
||||
mock_storage.return_value.delete_file = AsyncMock()
|
||||
|
||||
result = await cleanup_old_public_data()
|
||||
|
||||
# Check results
|
||||
assert result["transcripts_deleted"] == 1
|
||||
assert result["errors"] == []
|
||||
|
||||
# Verify old anonymous transcript was deleted
|
||||
assert await transcripts_controller.get_by_id(old_transcript.id) is None
|
||||
|
||||
# Verify new anonymous transcript still exists
|
||||
assert await transcripts_controller.get_by_id(new_transcript.id) is not None
|
||||
|
||||
# Verify user transcript still exists
|
||||
assert await transcripts_controller.get_by_id(old_user_transcript.id) is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cleanup_deletes_associated_meeting_and_recording():
|
||||
"""Test that meetings and recordings associated with old transcripts are deleted."""
|
||||
from reflector.db import get_database
|
||||
from reflector.db.meetings import meetings
|
||||
from reflector.db.transcripts import transcripts
|
||||
|
||||
old_date = datetime.now(timezone.utc) - timedelta(days=8)
|
||||
|
||||
# Create a meeting
|
||||
meeting_id = "test-meeting-for-transcript"
|
||||
await get_database().execute(
|
||||
meetings.insert().values(
|
||||
id=meeting_id,
|
||||
room_name="Meeting with Transcript",
|
||||
room_url="https://example.com/meeting",
|
||||
host_room_url="https://example.com/meeting-host",
|
||||
start_date=old_date,
|
||||
end_date=old_date + timedelta(hours=1),
|
||||
room_id=None,
|
||||
)
|
||||
)
|
||||
|
||||
# Create a recording
|
||||
recording = await recordings_controller.create(
|
||||
Recording(
|
||||
bucket_name="test-bucket",
|
||||
object_key="test-recording.mp4",
|
||||
recorded_at=old_date,
|
||||
)
|
||||
)
|
||||
|
||||
# Create an old transcript with both meeting and recording
|
||||
old_transcript = await transcripts_controller.add(
|
||||
name="Old Transcript with Meeting and Recording",
|
||||
source_kind=SourceKind.ROOM,
|
||||
user_id=None,
|
||||
meeting_id=meeting_id,
|
||||
recording_id=recording.id,
|
||||
)
|
||||
|
||||
# Update created_at to be old
|
||||
await get_database().execute(
|
||||
transcripts.update()
|
||||
.where(transcripts.c.id == old_transcript.id)
|
||||
.values(created_at=old_date)
|
||||
)
|
||||
|
||||
with patch("reflector.worker.cleanup.settings") as mock_settings:
|
||||
mock_settings.PUBLIC_MODE = True
|
||||
mock_settings.PUBLIC_DATA_RETENTION_DAYS = 7
|
||||
|
||||
# Mock storage deletion
|
||||
with patch("reflector.db.transcripts.get_transcripts_storage") as mock_storage:
|
||||
mock_storage.return_value.delete_file = AsyncMock()
|
||||
with patch(
|
||||
"reflector.worker.cleanup.get_recordings_storage"
|
||||
) as mock_rec_storage:
|
||||
mock_rec_storage.return_value.delete_file = AsyncMock()
|
||||
|
||||
result = await cleanup_old_public_data()
|
||||
|
||||
# Check results
|
||||
assert result["transcripts_deleted"] == 1
|
||||
assert result["meetings_deleted"] == 1
|
||||
assert result["recordings_deleted"] == 1
|
||||
assert result["errors"] == []
|
||||
|
||||
# Verify transcript was deleted
|
||||
assert await transcripts_controller.get_by_id(old_transcript.id) is None
|
||||
|
||||
# Verify meeting was deleted
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
meeting_result = await get_database().fetch_one(query)
|
||||
assert meeting_result is None
|
||||
|
||||
# Verify recording was deleted
|
||||
assert await recordings_controller.get_by_id(recording.id) is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cleanup_handles_errors_gracefully():
|
||||
"""Test that cleanup continues even when individual deletions fail."""
|
||||
old_date = datetime.now(timezone.utc) - timedelta(days=8)
|
||||
|
||||
# Create multiple old transcripts
|
||||
transcript1 = await transcripts_controller.add(
|
||||
name="Transcript 1",
|
||||
source_kind=SourceKind.FILE,
|
||||
user_id=None,
|
||||
)
|
||||
transcript2 = await transcripts_controller.add(
|
||||
name="Transcript 2",
|
||||
source_kind=SourceKind.FILE,
|
||||
user_id=None,
|
||||
)
|
||||
|
||||
# Update created_at to be old
|
||||
from reflector.db import get_database
|
||||
from reflector.db.transcripts import transcripts
|
||||
|
||||
for t_id in [transcript1.id, transcript2.id]:
|
||||
await get_database().execute(
|
||||
transcripts.update()
|
||||
.where(transcripts.c.id == t_id)
|
||||
.values(created_at=old_date)
|
||||
)
|
||||
|
||||
with patch("reflector.worker.cleanup.settings") as mock_settings:
|
||||
mock_settings.PUBLIC_MODE = True
|
||||
mock_settings.PUBLIC_DATA_RETENTION_DAYS = 7
|
||||
|
||||
# Mock remove_by_id to fail for the first transcript
|
||||
original_remove = transcripts_controller.remove_by_id
|
||||
call_count = 0
|
||||
|
||||
async def mock_remove_by_id(transcript_id, user_id=None):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
raise Exception("Simulated deletion error")
|
||||
return await original_remove(transcript_id, user_id)
|
||||
|
||||
with patch.object(
|
||||
transcripts_controller, "remove_by_id", side_effect=mock_remove_by_id
|
||||
):
|
||||
result = await cleanup_old_public_data()
|
||||
|
||||
# Should have one successful deletion and one error
|
||||
assert result["transcripts_deleted"] == 1
|
||||
assert len(result["errors"]) == 1
|
||||
assert "Failed to delete transcript" in result["errors"][0]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_meeting_consent_cascade_delete():
|
||||
"""Test that meeting_consent records are automatically deleted when meeting is deleted."""
|
||||
from reflector.db import get_database
|
||||
from reflector.db.meetings import (
|
||||
meeting_consent,
|
||||
meeting_consent_controller,
|
||||
meetings,
|
||||
)
|
||||
|
||||
# Create a meeting
|
||||
meeting_id = "test-cascade-meeting"
|
||||
await get_database().execute(
|
||||
meetings.insert().values(
|
||||
id=meeting_id,
|
||||
room_name="Test Meeting for CASCADE",
|
||||
room_url="https://example.com/cascade-test",
|
||||
host_room_url="https://example.com/cascade-test-host",
|
||||
start_date=datetime.now(timezone.utc),
|
||||
end_date=datetime.now(timezone.utc) + timedelta(hours=1),
|
||||
room_id=None,
|
||||
)
|
||||
)
|
||||
|
||||
# Create consent records for this meeting
|
||||
consent1_id = "consent-1"
|
||||
consent2_id = "consent-2"
|
||||
|
||||
await get_database().execute(
|
||||
meeting_consent.insert().values(
|
||||
id=consent1_id,
|
||||
meeting_id=meeting_id,
|
||||
user_id="user1",
|
||||
consent_given=True,
|
||||
consent_timestamp=datetime.now(timezone.utc),
|
||||
)
|
||||
)
|
||||
|
||||
await get_database().execute(
|
||||
meeting_consent.insert().values(
|
||||
id=consent2_id,
|
||||
meeting_id=meeting_id,
|
||||
user_id="user2",
|
||||
consent_given=False,
|
||||
consent_timestamp=datetime.now(timezone.utc),
|
||||
)
|
||||
)
|
||||
|
||||
# Verify consent records exist
|
||||
consents = await meeting_consent_controller.get_by_meeting_id(meeting_id)
|
||||
assert len(consents) == 2
|
||||
|
||||
# Delete the meeting
|
||||
await get_database().execute(meetings.delete().where(meetings.c.id == meeting_id))
|
||||
|
||||
# Verify meeting is deleted
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
assert result is None
|
||||
|
||||
# Verify consent records are automatically deleted (CASCADE DELETE)
|
||||
consents_after = await meeting_consent_controller.get_by_meeting_id(meeting_id)
|
||||
assert len(consents_after) == 0
|
||||
333
server/tests/test_gpu_modal_transcript.py
Normal file
333
server/tests/test_gpu_modal_transcript.py
Normal file
@@ -0,0 +1,333 @@
|
||||
"""
|
||||
Tests for GPU Modal transcription endpoints.
|
||||
|
||||
These tests are marked with the "gpu-modal" group and will not run by default.
|
||||
Run them with: pytest -m gpu-modal tests/test_gpu_modal_transcript_parakeet.py
|
||||
|
||||
Required environment variables:
|
||||
- TRANSCRIPT_URL: URL to the Modal.com endpoint (required)
|
||||
- TRANSCRIPT_MODAL_API_KEY: API key for authentication (optional)
|
||||
- TRANSCRIPT_MODEL: Model name to use (optional, defaults to nvidia/parakeet-tdt-0.6b-v2)
|
||||
|
||||
Example with pytest (override default addopts to run ONLY gpu_modal tests):
|
||||
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-parakeet-web-dev.modal.run \
|
||||
TRANSCRIPT_MODAL_API_KEY=your-api-key \
|
||||
uv run -m pytest -m gpu_modal --no-cov tests/test_gpu_modal_transcript.py
|
||||
|
||||
# Or with completely clean options:
|
||||
uv run -m pytest -m gpu_modal -o addopts="" tests/
|
||||
|
||||
Running Modal locally for testing:
|
||||
modal serve gpu/modal_deployments/reflector_transcriber_parakeet.py
|
||||
# This will give you a local URL like https://xxxxx--reflector-transcriber-parakeet-web-dev.modal.run to test against
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
# Test audio file URL for testing
|
||||
TEST_AUDIO_URL = (
|
||||
"https://reflector-github-pytest.s3.us-east-1.amazonaws.com/test_mathieu_hello.mp3"
|
||||
)
|
||||
|
||||
|
||||
def get_modal_transcript_url():
|
||||
"""Get and validate the Modal transcript URL from environment."""
|
||||
url = os.environ.get("TRANSCRIPT_URL")
|
||||
if not url:
|
||||
pytest.skip(
|
||||
"TRANSCRIPT_URL environment variable is required for GPU Modal tests"
|
||||
)
|
||||
return url
|
||||
|
||||
|
||||
def get_auth_headers():
|
||||
"""Get authentication headers if API key is available."""
|
||||
api_key = os.environ.get("TRANSCRIPT_MODAL_API_KEY")
|
||||
if api_key:
|
||||
return {"Authorization": f"Bearer {api_key}"}
|
||||
return {}
|
||||
|
||||
|
||||
def get_model_name():
|
||||
"""Get the model name from environment or use default."""
|
||||
return os.environ.get("TRANSCRIPT_MODEL", "nvidia/parakeet-tdt-0.6b-v2")
|
||||
|
||||
|
||||
@pytest.mark.gpu_modal
|
||||
class TestGPUModalTranscript:
|
||||
"""Test suite for GPU Modal transcription endpoints."""
|
||||
|
||||
def test_transcriptions_from_url(self):
|
||||
"""Test the /v1/audio/transcriptions-from-url endpoint."""
|
||||
url = get_modal_transcript_url()
|
||||
headers = get_auth_headers()
|
||||
|
||||
with httpx.Client(timeout=60.0) as client:
|
||||
response = client.post(
|
||||
f"{url}/v1/audio/transcriptions-from-url",
|
||||
json={
|
||||
"audio_file_url": TEST_AUDIO_URL,
|
||||
"model": get_model_name(),
|
||||
"language": "en",
|
||||
"timestamp_offset": 0.0,
|
||||
},
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
assert response.status_code == 200, f"Request failed: {response.text}"
|
||||
result = response.json()
|
||||
|
||||
# Verify response structure
|
||||
assert "text" in result
|
||||
assert "words" in result
|
||||
assert isinstance(result["text"], str)
|
||||
assert isinstance(result["words"], list)
|
||||
|
||||
# Verify content is meaningful
|
||||
assert len(result["text"]) > 0, "Transcript text should not be empty"
|
||||
assert len(result["words"]) > 0, "Words list must not be empty"
|
||||
|
||||
# Verify word structure
|
||||
for word in result["words"]:
|
||||
assert "word" in word
|
||||
assert "start" in word
|
||||
assert "end" in word
|
||||
assert isinstance(word["start"], (int, float))
|
||||
assert isinstance(word["end"], (int, float))
|
||||
assert word["start"] <= word["end"]
|
||||
|
||||
def test_transcriptions_single_file(self):
|
||||
"""Test the /v1/audio/transcriptions endpoint with a single file."""
|
||||
url = get_modal_transcript_url()
|
||||
headers = get_auth_headers()
|
||||
|
||||
# Download test audio file to upload
|
||||
with httpx.Client(timeout=60.0) as client:
|
||||
audio_response = client.get(TEST_AUDIO_URL)
|
||||
audio_response.raise_for_status()
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as tmp_file:
|
||||
tmp_file.write(audio_response.content)
|
||||
tmp_file_path = tmp_file.name
|
||||
|
||||
try:
|
||||
# Upload the file for transcription
|
||||
with open(tmp_file_path, "rb") as f:
|
||||
files = {"file": ("test_audio.mp3", f, "audio/mpeg")}
|
||||
data = {
|
||||
"model": get_model_name(),
|
||||
"language": "en",
|
||||
"batch": "false",
|
||||
}
|
||||
|
||||
response = client.post(
|
||||
f"{url}/v1/audio/transcriptions",
|
||||
files=files,
|
||||
data=data,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
assert response.status_code == 200, f"Request failed: {response.text}"
|
||||
result = response.json()
|
||||
|
||||
# Verify response structure for single file
|
||||
assert "text" in result
|
||||
assert "words" in result
|
||||
assert "filename" in result
|
||||
assert isinstance(result["text"], str)
|
||||
assert isinstance(result["words"], list)
|
||||
|
||||
# Verify content
|
||||
assert len(result["text"]) > 0, "Transcript text should not be empty"
|
||||
|
||||
finally:
|
||||
Path(tmp_file_path).unlink(missing_ok=True)
|
||||
|
||||
def test_transcriptions_multiple_files(self):
|
||||
"""Test the /v1/audio/transcriptions endpoint with multiple files (non-batch mode)."""
|
||||
url = get_modal_transcript_url()
|
||||
headers = get_auth_headers()
|
||||
|
||||
# Create multiple test files (we'll use the same audio content for simplicity)
|
||||
with httpx.Client(timeout=60.0) as client:
|
||||
audio_response = client.get(TEST_AUDIO_URL)
|
||||
audio_response.raise_for_status()
|
||||
audio_content = audio_response.content
|
||||
|
||||
temp_files = []
|
||||
try:
|
||||
# Create 3 temporary files
|
||||
for i in range(3):
|
||||
tmp_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
|
||||
tmp_file.write(audio_content)
|
||||
tmp_file.close()
|
||||
temp_files.append(tmp_file.name)
|
||||
|
||||
# Upload multiple files for transcription (non-batch)
|
||||
files = [
|
||||
("files", (f"test_audio_{i}.mp3", open(f, "rb"), "audio/mpeg"))
|
||||
for i, f in enumerate(temp_files)
|
||||
]
|
||||
data = {
|
||||
"model": get_model_name(),
|
||||
"language": "en",
|
||||
"batch": "false",
|
||||
}
|
||||
|
||||
response = client.post(
|
||||
f"{url}/v1/audio/transcriptions",
|
||||
files=files,
|
||||
data=data,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
# Close file handles
|
||||
for _, file_tuple in files:
|
||||
file_tuple[1].close()
|
||||
|
||||
assert response.status_code == 200, f"Request failed: {response.text}"
|
||||
result = response.json()
|
||||
|
||||
# Verify response structure for multiple files (non-batch)
|
||||
assert "results" in result
|
||||
assert isinstance(result["results"], list)
|
||||
assert len(result["results"]) == 3
|
||||
|
||||
for idx, file_result in enumerate(result["results"]):
|
||||
assert "text" in file_result
|
||||
assert "words" in file_result
|
||||
assert "filename" in file_result
|
||||
assert isinstance(file_result["text"], str)
|
||||
assert isinstance(file_result["words"], list)
|
||||
assert len(file_result["text"]) > 0
|
||||
|
||||
finally:
|
||||
for f in temp_files:
|
||||
Path(f).unlink(missing_ok=True)
|
||||
|
||||
def test_transcriptions_multiple_files_batch(self):
|
||||
"""Test the /v1/audio/transcriptions endpoint with multiple files in batch mode."""
|
||||
url = get_modal_transcript_url()
|
||||
headers = get_auth_headers()
|
||||
|
||||
# Create multiple test files
|
||||
with httpx.Client(timeout=60.0) as client:
|
||||
audio_response = client.get(TEST_AUDIO_URL)
|
||||
audio_response.raise_for_status()
|
||||
audio_content = audio_response.content
|
||||
|
||||
temp_files = []
|
||||
try:
|
||||
# Create 3 temporary files
|
||||
for i in range(3):
|
||||
tmp_file = tempfile.NamedTemporaryFile(suffix=".mp3", delete=False)
|
||||
tmp_file.write(audio_content)
|
||||
tmp_file.close()
|
||||
temp_files.append(tmp_file.name)
|
||||
|
||||
# Upload multiple files for batch transcription
|
||||
files = [
|
||||
("files", (f"test_audio_{i}.mp3", open(f, "rb"), "audio/mpeg"))
|
||||
for i, f in enumerate(temp_files)
|
||||
]
|
||||
data = {
|
||||
"model": get_model_name(),
|
||||
"language": "en",
|
||||
"batch": "true",
|
||||
}
|
||||
|
||||
response = client.post(
|
||||
f"{url}/v1/audio/transcriptions",
|
||||
files=files,
|
||||
data=data,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
# Close file handles
|
||||
for _, file_tuple in files:
|
||||
file_tuple[1].close()
|
||||
|
||||
assert response.status_code == 200, f"Request failed: {response.text}"
|
||||
result = response.json()
|
||||
|
||||
# Verify response structure for batch mode
|
||||
assert "results" in result
|
||||
assert isinstance(result["results"], list)
|
||||
assert len(result["results"]) == 3
|
||||
|
||||
for idx, batch_result in enumerate(result["results"]):
|
||||
assert "text" in batch_result
|
||||
assert "words" in batch_result
|
||||
assert "filename" in batch_result
|
||||
assert isinstance(batch_result["text"], str)
|
||||
assert isinstance(batch_result["words"], list)
|
||||
assert len(batch_result["text"]) > 0
|
||||
|
||||
finally:
|
||||
for f in temp_files:
|
||||
Path(f).unlink(missing_ok=True)
|
||||
|
||||
@pytest.mark.skipif(
|
||||
not "parakeet" in get_model_name(), reason="Parakeet only supports English"
|
||||
)
|
||||
def test_transcriptions_error_handling(self):
|
||||
"""Test error handling for invalid requests."""
|
||||
url = get_modal_transcript_url()
|
||||
headers = get_auth_headers()
|
||||
|
||||
with httpx.Client(timeout=60.0) as client:
|
||||
# Test with unsupported language
|
||||
response = client.post(
|
||||
f"{url}/v1/audio/transcriptions-from-url",
|
||||
json={
|
||||
"audio_file_url": TEST_AUDIO_URL,
|
||||
"model": get_model_name(),
|
||||
"language": "fr", # Parakeet only supports English
|
||||
"timestamp_offset": 0.0,
|
||||
},
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
assert response.status_code == 400
|
||||
assert "only supports English" in response.text
|
||||
|
||||
def test_transcriptions_with_timestamp_offset(self):
|
||||
"""Test transcription with timestamp offset parameter."""
|
||||
url = get_modal_transcript_url()
|
||||
headers = get_auth_headers()
|
||||
|
||||
with httpx.Client(timeout=60.0) as client:
|
||||
# Test with timestamp offset
|
||||
response = client.post(
|
||||
f"{url}/v1/audio/transcriptions-from-url",
|
||||
json={
|
||||
"audio_file_url": TEST_AUDIO_URL,
|
||||
"model": get_model_name(),
|
||||
"language": "en",
|
||||
"timestamp_offset": 10.0, # Add 10 second offset
|
||||
},
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
assert response.status_code == 200, f"Request failed: {response.text}"
|
||||
result = response.json()
|
||||
|
||||
# Verify response structure
|
||||
assert "text" in result
|
||||
assert "words" in result
|
||||
assert len(result["words"]) > 0, "Words list must not be empty"
|
||||
|
||||
# Verify that timestamps have been offset
|
||||
for word in result["words"]:
|
||||
# All timestamps should be >= 10.0 due to offset
|
||||
assert (
|
||||
word["start"] >= 10.0
|
||||
), f"Word start time {word['start']} should be >= 10.0"
|
||||
assert (
|
||||
word["end"] >= 10.0
|
||||
), f"Word end time {word['end']} should be >= 10.0"
|
||||
633
server/tests/test_pipeline_main_file.py
Normal file
633
server/tests/test_pipeline_main_file.py
Normal file
@@ -0,0 +1,633 @@
|
||||
"""
|
||||
Tests for PipelineMainFile - file-based processing pipeline
|
||||
|
||||
This test verifies the complete file processing pipeline without mocking much,
|
||||
ensuring all processors are correctly invoked and the happy path works correctly.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from uuid import uuid4
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.pipelines.main_file_pipeline import PipelineMainFile
|
||||
from reflector.processors.file_diarization import FileDiarizationOutput
|
||||
from reflector.processors.types import (
|
||||
DiarizationSegment,
|
||||
TitleSummary,
|
||||
Word,
|
||||
)
|
||||
from reflector.processors.types import (
|
||||
Transcript as TranscriptType,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def dummy_file_transcript():
|
||||
"""Mock FileTranscriptAutoProcessor for file processing"""
|
||||
from reflector.processors.file_transcript import FileTranscriptProcessor
|
||||
|
||||
class TestFileTranscriptProcessor(FileTranscriptProcessor):
|
||||
async def _transcript(self, data):
|
||||
return TranscriptType(
|
||||
text="Hello world. How are you today?",
|
||||
words=[
|
||||
Word(start=0.0, end=0.5, text="Hello", speaker=0),
|
||||
Word(start=0.5, end=0.6, text=" ", speaker=0),
|
||||
Word(start=0.6, end=1.0, text="world", speaker=0),
|
||||
Word(start=1.0, end=1.1, text=".", speaker=0),
|
||||
Word(start=1.1, end=1.2, text=" ", speaker=0),
|
||||
Word(start=1.2, end=1.5, text="How", speaker=0),
|
||||
Word(start=1.5, end=1.6, text=" ", speaker=0),
|
||||
Word(start=1.6, end=1.8, text="are", speaker=0),
|
||||
Word(start=1.8, end=1.9, text=" ", speaker=0),
|
||||
Word(start=1.9, end=2.1, text="you", speaker=0),
|
||||
Word(start=2.1, end=2.2, text=" ", speaker=0),
|
||||
Word(start=2.2, end=2.5, text="today", speaker=0),
|
||||
Word(start=2.5, end=2.6, text="?", speaker=0),
|
||||
],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"reflector.processors.file_transcript_auto.FileTranscriptAutoProcessor.__new__"
|
||||
) as mock_auto:
|
||||
mock_auto.return_value = TestFileTranscriptProcessor()
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def dummy_file_diarization():
|
||||
"""Mock FileDiarizationAutoProcessor for file processing"""
|
||||
from reflector.processors.file_diarization import FileDiarizationProcessor
|
||||
|
||||
class TestFileDiarizationProcessor(FileDiarizationProcessor):
|
||||
async def _diarize(self, data):
|
||||
return FileDiarizationOutput(
|
||||
diarization=[
|
||||
DiarizationSegment(start=0.0, end=1.1, speaker=0),
|
||||
DiarizationSegment(start=1.2, end=2.6, speaker=1),
|
||||
]
|
||||
)
|
||||
|
||||
with patch(
|
||||
"reflector.processors.file_diarization_auto.FileDiarizationAutoProcessor.__new__"
|
||||
) as mock_auto:
|
||||
mock_auto.return_value = TestFileDiarizationProcessor()
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def mock_transcript_in_db(tmpdir):
|
||||
"""Create a mock transcript in the database"""
|
||||
from reflector.db.transcripts import Transcript
|
||||
from reflector.settings import settings
|
||||
|
||||
# Set the DATA_DIR to our tmpdir
|
||||
original_data_dir = settings.DATA_DIR
|
||||
settings.DATA_DIR = str(tmpdir)
|
||||
|
||||
transcript_id = str(uuid4())
|
||||
data_path = Path(tmpdir) / transcript_id
|
||||
data_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create mock transcript object
|
||||
transcript = Transcript(
|
||||
id=transcript_id,
|
||||
name="Test Transcript",
|
||||
status="processing",
|
||||
source_kind="file",
|
||||
source_language="en",
|
||||
target_language="en",
|
||||
)
|
||||
|
||||
# Mock the controller to return our transcript
|
||||
try:
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.transcripts_controller.get_by_id"
|
||||
) as mock_get:
|
||||
mock_get.return_value = transcript
|
||||
with patch(
|
||||
"reflector.pipelines.main_live_pipeline.transcripts_controller.get_by_id"
|
||||
) as mock_get2:
|
||||
mock_get2.return_value = transcript
|
||||
with patch(
|
||||
"reflector.pipelines.main_live_pipeline.transcripts_controller.update"
|
||||
) as mock_update:
|
||||
mock_update.return_value = None
|
||||
yield transcript
|
||||
finally:
|
||||
# Restore original DATA_DIR
|
||||
settings.DATA_DIR = original_data_dir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def mock_storage():
|
||||
"""Mock storage for file uploads"""
|
||||
from reflector.storage.base import Storage
|
||||
|
||||
class TestStorage(Storage):
|
||||
async def _put_file(self, path, data):
|
||||
return None
|
||||
|
||||
async def _get_file_url(self, path):
|
||||
return f"http://test-storage/{path}"
|
||||
|
||||
async def _get_file(self, path):
|
||||
return b"test_audio_data"
|
||||
|
||||
async def _delete_file(self, path):
|
||||
return None
|
||||
|
||||
storage = TestStorage()
|
||||
# Add mock tracking for verification
|
||||
storage._put_file = AsyncMock(side_effect=storage._put_file)
|
||||
storage._get_file_url = AsyncMock(side_effect=storage._get_file_url)
|
||||
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.get_transcripts_storage"
|
||||
) as mock_get:
|
||||
mock_get.return_value = storage
|
||||
yield storage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def mock_audio_file_writer():
|
||||
"""Mock AudioFileWriterProcessor to avoid actual file writing"""
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.AudioFileWriterProcessor"
|
||||
) as mock_writer_class:
|
||||
mock_writer = AsyncMock()
|
||||
mock_writer.push = AsyncMock()
|
||||
mock_writer.flush = AsyncMock()
|
||||
mock_writer_class.return_value = mock_writer
|
||||
yield mock_writer
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def mock_waveform_processor():
|
||||
"""Mock AudioWaveformProcessor"""
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.AudioWaveformProcessor"
|
||||
) as mock_waveform_class:
|
||||
mock_waveform = AsyncMock()
|
||||
mock_waveform.set_pipeline = MagicMock()
|
||||
mock_waveform.flush = AsyncMock()
|
||||
mock_waveform_class.return_value = mock_waveform
|
||||
yield mock_waveform
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def mock_topic_detector():
|
||||
"""Mock TranscriptTopicDetectorProcessor"""
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.TranscriptTopicDetectorProcessor"
|
||||
) as mock_topic_class:
|
||||
mock_topic = AsyncMock()
|
||||
mock_topic.set_pipeline = MagicMock()
|
||||
mock_topic.push = AsyncMock()
|
||||
mock_topic.flush_called = False
|
||||
|
||||
# When flush is called, simulate topic detection by calling the callback
|
||||
async def flush_with_callback():
|
||||
mock_topic.flush_called = True
|
||||
if hasattr(mock_topic, "_callback"):
|
||||
# Create a minimal transcript for the TitleSummary
|
||||
test_transcript = TranscriptType(words=[], text="test transcript")
|
||||
await mock_topic._callback(
|
||||
TitleSummary(
|
||||
title="Test Topic",
|
||||
summary="Test topic summary",
|
||||
timestamp=0.0,
|
||||
duration=10.0,
|
||||
transcript=test_transcript,
|
||||
)
|
||||
)
|
||||
|
||||
mock_topic.flush = flush_with_callback
|
||||
|
||||
def init_with_callback(callback=None):
|
||||
mock_topic._callback = callback
|
||||
return mock_topic
|
||||
|
||||
mock_topic_class.side_effect = init_with_callback
|
||||
yield mock_topic
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def mock_title_processor():
|
||||
"""Mock TranscriptFinalTitleProcessor"""
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.TranscriptFinalTitleProcessor"
|
||||
) as mock_title_class:
|
||||
mock_title = AsyncMock()
|
||||
mock_title.set_pipeline = MagicMock()
|
||||
mock_title.push = AsyncMock()
|
||||
mock_title.flush_called = False
|
||||
|
||||
# When flush is called, simulate title generation by calling the callback
|
||||
async def flush_with_callback():
|
||||
mock_title.flush_called = True
|
||||
if hasattr(mock_title, "_callback"):
|
||||
from reflector.processors.types import FinalTitle
|
||||
|
||||
await mock_title._callback(FinalTitle(title="Test Title"))
|
||||
|
||||
mock_title.flush = flush_with_callback
|
||||
|
||||
def init_with_callback(callback=None):
|
||||
mock_title._callback = callback
|
||||
return mock_title
|
||||
|
||||
mock_title_class.side_effect = init_with_callback
|
||||
yield mock_title
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def mock_summary_processor():
|
||||
"""Mock TranscriptFinalSummaryProcessor"""
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.TranscriptFinalSummaryProcessor"
|
||||
) as mock_summary_class:
|
||||
mock_summary = AsyncMock()
|
||||
mock_summary.set_pipeline = MagicMock()
|
||||
mock_summary.push = AsyncMock()
|
||||
mock_summary.flush_called = False
|
||||
|
||||
# When flush is called, simulate summary generation by calling the callbacks
|
||||
async def flush_with_callback():
|
||||
mock_summary.flush_called = True
|
||||
from reflector.processors.types import FinalLongSummary, FinalShortSummary
|
||||
|
||||
if hasattr(mock_summary, "_callback"):
|
||||
await mock_summary._callback(
|
||||
FinalLongSummary(long_summary="Test long summary", duration=10.0)
|
||||
)
|
||||
if hasattr(mock_summary, "_on_short_summary"):
|
||||
await mock_summary._on_short_summary(
|
||||
FinalShortSummary(short_summary="Test short summary", duration=10.0)
|
||||
)
|
||||
|
||||
mock_summary.flush = flush_with_callback
|
||||
|
||||
def init_with_callback(transcript=None, callback=None, on_short_summary=None):
|
||||
mock_summary._callback = callback
|
||||
mock_summary._on_short_summary = on_short_summary
|
||||
return mock_summary
|
||||
|
||||
mock_summary_class.side_effect = init_with_callback
|
||||
yield mock_summary
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pipeline_main_file_process(
|
||||
tmpdir,
|
||||
mock_transcript_in_db,
|
||||
dummy_file_transcript,
|
||||
dummy_file_diarization,
|
||||
mock_storage,
|
||||
mock_audio_file_writer,
|
||||
mock_waveform_processor,
|
||||
mock_topic_detector,
|
||||
mock_title_processor,
|
||||
mock_summary_processor,
|
||||
):
|
||||
"""
|
||||
Test the complete PipelineMainFile processing pipeline.
|
||||
|
||||
This test verifies:
|
||||
1. Audio extraction and writing
|
||||
2. Audio upload to storage
|
||||
3. Parallel processing of transcription, diarization, and waveform
|
||||
4. Assembly of transcript with diarization
|
||||
5. Topic detection
|
||||
6. Title and summary generation
|
||||
"""
|
||||
# Create a test audio file
|
||||
test_audio_path = Path(__file__).parent / "records" / "test_mathieu_hello.wav"
|
||||
|
||||
# Copy test audio to the transcript's data path as if it was uploaded
|
||||
upload_path = mock_transcript_in_db.data_path / "upload.wav"
|
||||
upload_path.write_bytes(test_audio_path.read_bytes())
|
||||
|
||||
# Also create the audio.mp3 file that would be created by AudioFileWriterProcessor
|
||||
# Since we're mocking AudioFileWriterProcessor, we need to create this manually
|
||||
mp3_path = mock_transcript_in_db.data_path / "audio.mp3"
|
||||
mp3_path.write_bytes(b"mock_mp3_data")
|
||||
|
||||
# Track callback invocations
|
||||
callback_marks = {
|
||||
"on_status": [],
|
||||
"on_duration": [],
|
||||
"on_waveform": [],
|
||||
"on_topic": [],
|
||||
"on_title": [],
|
||||
"on_long_summary": [],
|
||||
"on_short_summary": [],
|
||||
}
|
||||
|
||||
# Create pipeline with mocked callbacks
|
||||
pipeline = PipelineMainFile(transcript_id=mock_transcript_in_db.id)
|
||||
|
||||
# Override callbacks to track invocations
|
||||
async def track_callback(name, data):
|
||||
callback_marks[name].append(data)
|
||||
# Call the original callback
|
||||
original = getattr(PipelineMainFile, name)
|
||||
return await original(pipeline, data)
|
||||
|
||||
for callback_name in callback_marks.keys():
|
||||
setattr(
|
||||
pipeline,
|
||||
callback_name,
|
||||
lambda data, n=callback_name: track_callback(n, data),
|
||||
)
|
||||
|
||||
# Mock av.open for audio processing
|
||||
with patch("reflector.pipelines.main_file_pipeline.av.open") as mock_av:
|
||||
# Mock container for checking video streams
|
||||
mock_container = MagicMock()
|
||||
mock_container.streams.video = [] # No video streams (audio only)
|
||||
mock_container.close = MagicMock()
|
||||
|
||||
# Mock container for decoding audio frames
|
||||
mock_decode_container = MagicMock()
|
||||
mock_decode_container.decode.return_value = iter(
|
||||
[MagicMock()]
|
||||
) # One mock audio frame
|
||||
mock_decode_container.close = MagicMock()
|
||||
|
||||
# Return different containers for different calls
|
||||
mock_av.side_effect = [mock_container, mock_decode_container]
|
||||
|
||||
# Run the pipeline
|
||||
await pipeline.process(upload_path)
|
||||
|
||||
# Verify audio extraction and writing
|
||||
assert mock_audio_file_writer.push.called
|
||||
assert mock_audio_file_writer.flush.called
|
||||
|
||||
# Verify storage upload
|
||||
assert mock_storage._put_file.called
|
||||
assert mock_storage._get_file_url.called
|
||||
|
||||
# Verify waveform generation
|
||||
assert mock_waveform_processor.flush.called
|
||||
assert mock_waveform_processor.set_pipeline.called
|
||||
|
||||
# Verify topic detection
|
||||
assert mock_topic_detector.push.called
|
||||
assert mock_topic_detector.flush_called
|
||||
|
||||
# Verify title generation
|
||||
assert mock_title_processor.push.called
|
||||
assert mock_title_processor.flush_called
|
||||
|
||||
# Verify summary generation
|
||||
assert mock_summary_processor.push.called
|
||||
assert mock_summary_processor.flush_called
|
||||
|
||||
# Verify callbacks were invoked
|
||||
assert len(callback_marks["on_topic"]) > 0, "Topic callback should be invoked"
|
||||
assert len(callback_marks["on_title"]) > 0, "Title callback should be invoked"
|
||||
assert (
|
||||
len(callback_marks["on_long_summary"]) > 0
|
||||
), "Long summary callback should be invoked"
|
||||
assert (
|
||||
len(callback_marks["on_short_summary"]) > 0
|
||||
), "Short summary callback should be invoked"
|
||||
|
||||
print(f"Callback marks: {callback_marks}")
|
||||
|
||||
# Verify the pipeline completed successfully
|
||||
assert pipeline.logger is not None
|
||||
print("PipelineMainFile test completed successfully!")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pipeline_main_file_with_video(
|
||||
tmpdir,
|
||||
mock_transcript_in_db,
|
||||
dummy_file_transcript,
|
||||
dummy_file_diarization,
|
||||
mock_storage,
|
||||
mock_audio_file_writer,
|
||||
mock_waveform_processor,
|
||||
mock_topic_detector,
|
||||
mock_title_processor,
|
||||
mock_summary_processor,
|
||||
):
|
||||
"""
|
||||
Test PipelineMainFile with video input (verifies audio extraction).
|
||||
"""
|
||||
# Create a test audio file
|
||||
test_audio_path = Path(__file__).parent / "records" / "test_mathieu_hello.wav"
|
||||
|
||||
# Copy test audio to the transcript's data path as if it was a video upload
|
||||
upload_path = mock_transcript_in_db.data_path / "upload.mp4"
|
||||
upload_path.write_bytes(test_audio_path.read_bytes())
|
||||
|
||||
# Also create the audio.mp3 file that would be created by AudioFileWriterProcessor
|
||||
mp3_path = mock_transcript_in_db.data_path / "audio.mp3"
|
||||
mp3_path.write_bytes(b"mock_mp3_data")
|
||||
|
||||
# Create pipeline
|
||||
pipeline = PipelineMainFile(transcript_id=mock_transcript_in_db.id)
|
||||
|
||||
# Mock av.open for video processing
|
||||
with patch("reflector.pipelines.main_file_pipeline.av.open") as mock_av:
|
||||
# Mock container for checking video streams
|
||||
mock_container = MagicMock()
|
||||
mock_container.streams.video = [MagicMock()] # Has video streams
|
||||
mock_container.close = MagicMock()
|
||||
|
||||
# Mock container for decoding audio frames
|
||||
mock_decode_container = MagicMock()
|
||||
mock_decode_container.decode.return_value = iter(
|
||||
[MagicMock()]
|
||||
) # One mock audio frame
|
||||
mock_decode_container.close = MagicMock()
|
||||
|
||||
# Return different containers for different calls
|
||||
mock_av.side_effect = [mock_container, mock_decode_container]
|
||||
|
||||
# Run the pipeline
|
||||
await pipeline.process(upload_path)
|
||||
|
||||
# Verify audio extraction from video
|
||||
assert mock_audio_file_writer.push.called
|
||||
assert mock_audio_file_writer.flush.called
|
||||
|
||||
# Verify the rest of the pipeline completed
|
||||
assert mock_storage._put_file.called
|
||||
assert mock_waveform_processor.flush.called
|
||||
assert mock_topic_detector.push.called
|
||||
assert mock_title_processor.push.called
|
||||
assert mock_summary_processor.push.called
|
||||
|
||||
print("PipelineMainFile video test completed successfully!")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pipeline_main_file_no_diarization(
|
||||
tmpdir,
|
||||
mock_transcript_in_db,
|
||||
dummy_file_transcript,
|
||||
mock_storage,
|
||||
mock_audio_file_writer,
|
||||
mock_waveform_processor,
|
||||
mock_topic_detector,
|
||||
mock_title_processor,
|
||||
mock_summary_processor,
|
||||
):
|
||||
"""
|
||||
Test PipelineMainFile with diarization disabled.
|
||||
"""
|
||||
from reflector.settings import settings
|
||||
|
||||
# Disable diarization
|
||||
with patch.object(settings, "DIARIZATION_BACKEND", None):
|
||||
# Create a test audio file
|
||||
test_audio_path = Path(__file__).parent / "records" / "test_mathieu_hello.wav"
|
||||
|
||||
# Copy test audio to the transcript's data path
|
||||
upload_path = mock_transcript_in_db.data_path / "upload.wav"
|
||||
upload_path.write_bytes(test_audio_path.read_bytes())
|
||||
|
||||
# Also create the audio.mp3 file
|
||||
mp3_path = mock_transcript_in_db.data_path / "audio.mp3"
|
||||
mp3_path.write_bytes(b"mock_mp3_data")
|
||||
|
||||
# Create pipeline
|
||||
pipeline = PipelineMainFile(transcript_id=mock_transcript_in_db.id)
|
||||
|
||||
# Mock av.open for audio processing
|
||||
with patch("reflector.pipelines.main_file_pipeline.av.open") as mock_av:
|
||||
# Mock container for checking video streams
|
||||
mock_container = MagicMock()
|
||||
mock_container.streams.video = [] # No video streams
|
||||
mock_container.close = MagicMock()
|
||||
|
||||
# Mock container for decoding audio frames
|
||||
mock_decode_container = MagicMock()
|
||||
mock_decode_container.decode.return_value = iter([MagicMock()])
|
||||
mock_decode_container.close = MagicMock()
|
||||
|
||||
# Return different containers for different calls
|
||||
mock_av.side_effect = [mock_container, mock_decode_container]
|
||||
|
||||
# Run the pipeline
|
||||
await pipeline.process(upload_path)
|
||||
|
||||
# Verify the pipeline completed without diarization
|
||||
assert mock_storage._put_file.called
|
||||
assert mock_waveform_processor.flush.called
|
||||
assert mock_topic_detector.push.called
|
||||
assert mock_title_processor.push.called
|
||||
assert mock_summary_processor.push.called
|
||||
|
||||
print("PipelineMainFile no-diarization test completed successfully!")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_task_pipeline_file_process(
|
||||
tmpdir,
|
||||
mock_transcript_in_db,
|
||||
dummy_file_transcript,
|
||||
dummy_file_diarization,
|
||||
mock_storage,
|
||||
mock_audio_file_writer,
|
||||
mock_waveform_processor,
|
||||
mock_topic_detector,
|
||||
mock_title_processor,
|
||||
mock_summary_processor,
|
||||
):
|
||||
"""
|
||||
Test the Celery task entry point for file pipeline processing.
|
||||
"""
|
||||
# Direct import of the underlying async function, bypassing the asynctask decorator
|
||||
|
||||
# Create a test audio file in the transcript's data path
|
||||
test_audio_path = Path(__file__).parent / "records" / "test_mathieu_hello.wav"
|
||||
upload_path = mock_transcript_in_db.data_path / "upload.wav"
|
||||
upload_path.write_bytes(test_audio_path.read_bytes())
|
||||
|
||||
# Also create the audio.mp3 file
|
||||
mp3_path = mock_transcript_in_db.data_path / "audio.mp3"
|
||||
mp3_path.write_bytes(b"mock_mp3_data")
|
||||
|
||||
# Mock av.open for audio processing
|
||||
with patch("reflector.pipelines.main_file_pipeline.av.open") as mock_av:
|
||||
# Mock container for checking video streams
|
||||
mock_container = MagicMock()
|
||||
mock_container.streams.video = [] # No video streams
|
||||
mock_container.close = MagicMock()
|
||||
|
||||
# Mock container for decoding audio frames
|
||||
mock_decode_container = MagicMock()
|
||||
mock_decode_container.decode.return_value = iter([MagicMock()])
|
||||
mock_decode_container.close = MagicMock()
|
||||
|
||||
# Return different containers for different calls
|
||||
mock_av.side_effect = [mock_container, mock_decode_container]
|
||||
|
||||
# Get the original async function without the asynctask decorator
|
||||
# The function is wrapped, so we need to call it differently
|
||||
# For now, we test the pipeline directly since the task is just a thin wrapper
|
||||
from reflector.pipelines.main_file_pipeline import PipelineMainFile
|
||||
|
||||
pipeline = PipelineMainFile(transcript_id=mock_transcript_in_db.id)
|
||||
await pipeline.process(upload_path)
|
||||
|
||||
# Verify the pipeline was executed through the task
|
||||
assert mock_audio_file_writer.push.called
|
||||
assert mock_audio_file_writer.flush.called
|
||||
assert mock_storage._put_file.called
|
||||
assert mock_waveform_processor.flush.called
|
||||
assert mock_topic_detector.push.called
|
||||
assert mock_title_processor.push.called
|
||||
assert mock_summary_processor.push.called
|
||||
|
||||
print("task_pipeline_file_process test completed successfully!")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pipeline_file_process_no_transcript():
|
||||
"""
|
||||
Test the pipeline with a non-existent transcript.
|
||||
"""
|
||||
from reflector.pipelines.main_file_pipeline import PipelineMainFile
|
||||
|
||||
# Mock the controller to return None (transcript not found)
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.transcripts_controller.get_by_id"
|
||||
) as mock_get:
|
||||
mock_get.return_value = None
|
||||
|
||||
pipeline = PipelineMainFile(transcript_id=str(uuid4()))
|
||||
|
||||
# Should raise an exception for missing transcript when get_transcript is called
|
||||
with pytest.raises(Exception, match="Transcript not found"):
|
||||
await pipeline.get_transcript()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pipeline_file_process_no_audio_file(
|
||||
mock_transcript_in_db,
|
||||
):
|
||||
"""
|
||||
Test the pipeline when no audio file is found.
|
||||
"""
|
||||
from reflector.pipelines.main_file_pipeline import PipelineMainFile
|
||||
|
||||
# Don't create any audio files in the data path
|
||||
# The pipeline's process should handle missing files gracefully
|
||||
|
||||
pipeline = PipelineMainFile(transcript_id=mock_transcript_in_db.id)
|
||||
|
||||
# Try to process a non-existent file
|
||||
non_existent_path = mock_transcript_in_db.data_path / "nonexistent.wav"
|
||||
|
||||
# This should fail when trying to open the file with av
|
||||
with pytest.raises(Exception):
|
||||
await pipeline.process(non_existent_path)
|
||||
265
server/tests/test_processors_modal.py
Normal file
265
server/tests/test_processors_modal.py
Normal file
@@ -0,0 +1,265 @@
|
||||
"""
|
||||
Tests for Modal-based processors using pytest-recording for HTTP recording/playbook
|
||||
|
||||
Note: theses tests require full modal configuration to be able to record
|
||||
vcr cassettes
|
||||
|
||||
Configuration required for the first recording:
|
||||
- TRANSCRIPT_BACKEND=modal
|
||||
- TRANSCRIPT_URL=https://xxxxx--reflector-transcriber-parakeet-web.modal.run
|
||||
- TRANSCRIPT_MODAL_API_KEY=xxxxx
|
||||
- DIARIZATION_BACKEND=modal
|
||||
- DIARIZATION_URL=https://xxxxx--reflector-diarizer-web.modal.run
|
||||
- DIARIZATION_MODAL_API_KEY=xxxxx
|
||||
"""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.processors.file_diarization import FileDiarizationInput
|
||||
from reflector.processors.file_diarization_modal import FileDiarizationModalProcessor
|
||||
from reflector.processors.file_transcript import FileTranscriptInput
|
||||
from reflector.processors.file_transcript_modal import FileTranscriptModalProcessor
|
||||
from reflector.processors.transcript_diarization_assembler import (
|
||||
TranscriptDiarizationAssemblerInput,
|
||||
TranscriptDiarizationAssemblerProcessor,
|
||||
)
|
||||
from reflector.processors.types import DiarizationSegment, Transcript, Word
|
||||
|
||||
# Public test audio file hosted on S3 specifically for reflector pytests
|
||||
TEST_AUDIO_URL = (
|
||||
"https://reflector-github-pytest.s3.us-east-1.amazonaws.com/test_mathieu_hello.mp3"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_transcript_modal_processor_missing_url():
|
||||
with patch("reflector.processors.file_transcript_modal.settings") as mock_settings:
|
||||
mock_settings.TRANSCRIPT_URL = None
|
||||
with pytest.raises(Exception, match="TRANSCRIPT_URL required"):
|
||||
FileTranscriptModalProcessor(modal_api_key="test-api-key")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_diarization_modal_processor_missing_url():
|
||||
with patch("reflector.processors.file_diarization_modal.settings") as mock_settings:
|
||||
mock_settings.DIARIZATION_URL = None
|
||||
with pytest.raises(Exception, match="DIARIZATION_URL required"):
|
||||
FileDiarizationModalProcessor(modal_api_key="test-api-key")
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_diarization_modal_processor(vcr):
|
||||
"""Test FileDiarizationModalProcessor using public audio URL and Modal API"""
|
||||
from reflector.settings import settings
|
||||
|
||||
processor = FileDiarizationModalProcessor(
|
||||
modal_api_key=settings.DIARIZATION_MODAL_API_KEY
|
||||
)
|
||||
|
||||
test_input = FileDiarizationInput(audio_url=TEST_AUDIO_URL)
|
||||
result = await processor._diarize(test_input)
|
||||
|
||||
# Verify the result structure
|
||||
assert result is not None
|
||||
assert hasattr(result, "diarization")
|
||||
assert isinstance(result.diarization, list)
|
||||
|
||||
# Check structure of each diarization segment
|
||||
for segment in result.diarization:
|
||||
assert "start" in segment
|
||||
assert "end" in segment
|
||||
assert "speaker" in segment
|
||||
assert isinstance(segment["start"], (int, float))
|
||||
assert isinstance(segment["end"], (int, float))
|
||||
assert isinstance(segment["speaker"], int)
|
||||
# Basic sanity check - start should be before end
|
||||
assert segment["start"] < segment["end"]
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_transcript_modal_processor():
|
||||
"""Test FileTranscriptModalProcessor using public audio URL and Modal API"""
|
||||
from reflector.settings import settings
|
||||
|
||||
processor = FileTranscriptModalProcessor(
|
||||
modal_api_key=settings.TRANSCRIPT_MODAL_API_KEY
|
||||
)
|
||||
|
||||
test_input = FileTranscriptInput(
|
||||
audio_url=TEST_AUDIO_URL,
|
||||
language="en",
|
||||
)
|
||||
|
||||
# This will record the HTTP interaction on first run, replay on subsequent runs
|
||||
result = await processor._transcript(test_input)
|
||||
|
||||
# Verify the result structure
|
||||
assert result is not None
|
||||
assert hasattr(result, "words")
|
||||
assert isinstance(result.words, list)
|
||||
|
||||
# Check structure of each word if present
|
||||
for word in result.words:
|
||||
assert hasattr(word, "text")
|
||||
assert hasattr(word, "start")
|
||||
assert hasattr(word, "end")
|
||||
assert isinstance(word.start, (int, float))
|
||||
assert isinstance(word.end, (int, float))
|
||||
assert isinstance(word.text, str)
|
||||
# Basic sanity check - start should be before or equal to end
|
||||
assert word.start <= word.end
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_diarization_assembler_processor():
|
||||
"""Test TranscriptDiarizationAssemblerProcessor without VCR (no HTTP requests)"""
|
||||
# Create test transcript with words
|
||||
words = [
|
||||
Word(text="Hello", start=0.0, end=1.0, speaker=0),
|
||||
Word(text=" ", start=1.0, end=1.1, speaker=0),
|
||||
Word(text="world", start=1.1, end=2.0, speaker=0),
|
||||
Word(text=".", start=2.0, end=2.1, speaker=0),
|
||||
Word(text=" ", start=2.1, end=2.2, speaker=0),
|
||||
Word(text="How", start=2.2, end=2.8, speaker=0),
|
||||
Word(text=" ", start=2.8, end=2.9, speaker=0),
|
||||
Word(text="are", start=2.9, end=3.2, speaker=0),
|
||||
Word(text=" ", start=3.2, end=3.3, speaker=0),
|
||||
Word(text="you", start=3.3, end=3.8, speaker=0),
|
||||
Word(text="?", start=3.8, end=3.9, speaker=0),
|
||||
]
|
||||
transcript = Transcript(words=words)
|
||||
|
||||
# Create test diarization segments
|
||||
diarization = [
|
||||
DiarizationSegment(start=0.0, end=2.1, speaker=0),
|
||||
DiarizationSegment(start=2.1, end=3.9, speaker=1),
|
||||
]
|
||||
|
||||
# Create processor and test input
|
||||
processor = TranscriptDiarizationAssemblerProcessor()
|
||||
test_input = TranscriptDiarizationAssemblerInput(
|
||||
transcript=transcript, diarization=diarization
|
||||
)
|
||||
|
||||
# Track emitted results
|
||||
emitted_results = []
|
||||
|
||||
async def capture_result(result):
|
||||
emitted_results.append(result)
|
||||
|
||||
processor.on(capture_result)
|
||||
|
||||
# Process the input
|
||||
await processor.push(test_input)
|
||||
|
||||
# Verify result was emitted
|
||||
assert len(emitted_results) == 1
|
||||
result = emitted_results[0]
|
||||
|
||||
# Verify result structure
|
||||
assert isinstance(result, Transcript)
|
||||
assert len(result.words) == len(words)
|
||||
|
||||
# Verify speaker assignments were applied
|
||||
# Words 0-3 (indices) should be speaker 0 (time 0.0-2.0)
|
||||
# Words 4-10 (indices) should be speaker 1 (time 2.1-3.9)
|
||||
for i in range(4): # First 4 words (Hello, space, world, .)
|
||||
assert (
|
||||
result.words[i].speaker == 0
|
||||
), f"Word {i} '{result.words[i].text}' should be speaker 0, got {result.words[i].speaker}"
|
||||
|
||||
for i in range(4, 11): # Remaining words (space, How, space, are, space, you, ?)
|
||||
assert (
|
||||
result.words[i].speaker == 1
|
||||
), f"Word {i} '{result.words[i].text}' should be speaker 1, got {result.words[i].speaker}"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_diarization_assembler_no_diarization():
|
||||
"""Test TranscriptDiarizationAssemblerProcessor with no diarization data"""
|
||||
# Create test transcript
|
||||
words = [Word(text="Hello", start=0.0, end=1.0, speaker=0)]
|
||||
transcript = Transcript(words=words)
|
||||
|
||||
# Create processor and test input with empty diarization
|
||||
processor = TranscriptDiarizationAssemblerProcessor()
|
||||
test_input = TranscriptDiarizationAssemblerInput(
|
||||
transcript=transcript, diarization=[]
|
||||
)
|
||||
|
||||
# Track emitted results
|
||||
emitted_results = []
|
||||
|
||||
async def capture_result(result):
|
||||
emitted_results.append(result)
|
||||
|
||||
processor.on(capture_result)
|
||||
|
||||
# Process the input
|
||||
await processor.push(test_input)
|
||||
|
||||
# Verify original transcript was returned unchanged
|
||||
assert len(emitted_results) == 1
|
||||
result = emitted_results[0]
|
||||
assert result is transcript # Should be the same object
|
||||
assert result.words[0].speaker == 0 # Original speaker unchanged
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_full_modal_pipeline_integration(vcr):
|
||||
"""Integration test: Transcription -> Diarization -> Assembly
|
||||
|
||||
This test demonstrates the full pipeline:
|
||||
1. Run transcription via Modal
|
||||
2. Run diarization via Modal
|
||||
3. Assemble transcript with diarization
|
||||
"""
|
||||
from reflector.settings import settings
|
||||
|
||||
# Step 1: Transcription
|
||||
transcript_processor = FileTranscriptModalProcessor(
|
||||
modal_api_key=settings.TRANSCRIPT_MODAL_API_KEY
|
||||
)
|
||||
transcript_input = FileTranscriptInput(audio_url=TEST_AUDIO_URL, language="en")
|
||||
transcript = await transcript_processor._transcript(transcript_input)
|
||||
|
||||
# Step 2: Diarization
|
||||
diarization_processor = FileDiarizationModalProcessor(
|
||||
modal_api_key=settings.DIARIZATION_MODAL_API_KEY
|
||||
)
|
||||
diarization_input = FileDiarizationInput(audio_url=TEST_AUDIO_URL)
|
||||
diarization_result = await diarization_processor._diarize(diarization_input)
|
||||
|
||||
# Step 3: Assembly
|
||||
assembler = TranscriptDiarizationAssemblerProcessor()
|
||||
assembly_input = TranscriptDiarizationAssemblerInput(
|
||||
transcript=transcript, diarization=diarization_result.diarization
|
||||
)
|
||||
|
||||
# Track assembled result
|
||||
assembled_results = []
|
||||
|
||||
async def capture_result(result):
|
||||
assembled_results.append(result)
|
||||
|
||||
assembler.on(capture_result)
|
||||
|
||||
await assembler.push(assembly_input)
|
||||
|
||||
# Verify the full pipeline worked
|
||||
assert len(assembled_results) == 1
|
||||
final_transcript = assembled_results[0]
|
||||
|
||||
# Verify the final transcript has the original words with updated speaker info
|
||||
assert isinstance(final_transcript, Transcript)
|
||||
assert len(final_transcript.words) == len(transcript.words)
|
||||
assert len(final_transcript.words) > 0
|
||||
|
||||
# Verify some words have been assigned speakers from diarization
|
||||
speakers_found = set(word.speaker for word in final_transcript.words)
|
||||
assert len(speakers_found) > 0 # At least some speaker assignments
|
||||
@@ -1,39 +0,0 @@
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_basic_process(
|
||||
dummy_transcript,
|
||||
dummy_llm,
|
||||
dummy_processors,
|
||||
):
|
||||
# goal is to start the server, and send rtc audio to it
|
||||
# validate the events received
|
||||
from pathlib import Path
|
||||
|
||||
from reflector.settings import settings
|
||||
from reflector.tools.process import process_audio_file
|
||||
|
||||
# LLM_BACKEND no longer exists in settings
|
||||
# settings.LLM_BACKEND = "test"
|
||||
settings.TRANSCRIPT_BACKEND = "whisper"
|
||||
|
||||
# event callback
|
||||
marks = {}
|
||||
|
||||
async def event_callback(event):
|
||||
if event.processor not in marks:
|
||||
marks[event.processor] = 0
|
||||
marks[event.processor] += 1
|
||||
|
||||
# invoke the process and capture events
|
||||
path = Path(__file__).parent / "records" / "test_mathieu_hello.wav"
|
||||
await process_audio_file(path.as_posix(), event_callback)
|
||||
print(marks)
|
||||
|
||||
# validate the events
|
||||
assert marks["TranscriptLinerProcessor"] == 1
|
||||
assert marks["TranscriptTranslatorPassthroughProcessor"] == 1
|
||||
assert marks["TranscriptTopicDetectorProcessor"] == 1
|
||||
assert marks["TranscriptFinalSummaryProcessor"] == 1
|
||||
assert marks["TranscriptFinalTitleProcessor"] == 1
|
||||
@@ -2,13 +2,18 @@
|
||||
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.db.search import SearchParameters, search_controller
|
||||
from reflector.db.transcripts import transcripts
|
||||
from reflector.db.search import (
|
||||
SearchController,
|
||||
SearchParameters,
|
||||
SearchResult,
|
||||
search_controller,
|
||||
)
|
||||
from reflector.db.transcripts import SourceKind, transcripts
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -18,39 +23,137 @@ async def test_search_postgresql_only():
|
||||
assert results == []
|
||||
assert total == 0
|
||||
|
||||
try:
|
||||
SearchParameters(query_text="")
|
||||
assert False, "Should have raised validation error"
|
||||
except ValidationError:
|
||||
pass # Expected
|
||||
|
||||
# Test that whitespace query raises validation error
|
||||
try:
|
||||
SearchParameters(query_text=" ")
|
||||
assert False, "Should have raised validation error"
|
||||
except ValidationError:
|
||||
pass # Expected
|
||||
params_empty = SearchParameters(query_text=None)
|
||||
results_empty, total_empty = await search_controller.search_transcripts(
|
||||
params_empty
|
||||
)
|
||||
assert isinstance(results_empty, list)
|
||||
assert isinstance(total_empty, int)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_input_validation():
|
||||
try:
|
||||
SearchParameters(query_text="")
|
||||
assert False, "Should have raised ValidationError"
|
||||
except ValidationError:
|
||||
pass # Expected
|
||||
async def test_search_with_empty_query():
|
||||
"""Test that empty query returns all transcripts."""
|
||||
params = SearchParameters(query_text=None)
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
|
||||
assert isinstance(results, list)
|
||||
assert isinstance(total, int)
|
||||
if len(results) > 1:
|
||||
for i in range(len(results) - 1):
|
||||
assert results[i].created_at >= results[i + 1].created_at
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_transcript_title_only_match():
|
||||
"""Test that transcripts with title-only matches return empty snippets."""
|
||||
test_id = "test-empty-9b3f2a8d"
|
||||
|
||||
# Test that whitespace query raises validation error
|
||||
try:
|
||||
SearchParameters(query_text=" \t\n ")
|
||||
assert False, "Should have raised ValidationError"
|
||||
except ValidationError:
|
||||
pass # Expected
|
||||
await get_database().execute(
|
||||
transcripts.delete().where(transcripts.c.id == test_id)
|
||||
)
|
||||
|
||||
test_data = {
|
||||
"id": test_id,
|
||||
"name": "Empty Transcript",
|
||||
"title": "Empty Meeting",
|
||||
"status": "ended",
|
||||
"locked": False,
|
||||
"duration": 0.0,
|
||||
"created_at": datetime.now(timezone.utc),
|
||||
"short_summary": None,
|
||||
"long_summary": None,
|
||||
"topics": json.dumps([]),
|
||||
"events": json.dumps([]),
|
||||
"participants": json.dumps([]),
|
||||
"source_language": "en",
|
||||
"target_language": "en",
|
||||
"reviewed": False,
|
||||
"audio_location": "local",
|
||||
"share_mode": "private",
|
||||
"source_kind": "room",
|
||||
"webvtt": None,
|
||||
"user_id": "test-user-1",
|
||||
}
|
||||
|
||||
await get_database().execute(transcripts.insert().values(**test_data))
|
||||
|
||||
params = SearchParameters(query_text="empty", user_id="test-user-1")
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
|
||||
assert total >= 1
|
||||
found = next((r for r in results if r.id == test_id), None)
|
||||
assert found is not None, "Should find transcript by title match"
|
||||
assert found.search_snippets == []
|
||||
assert found.total_match_count == 0
|
||||
|
||||
finally:
|
||||
await get_database().execute(
|
||||
transcripts.delete().where(transcripts.c.id == test_id)
|
||||
)
|
||||
await get_database().disconnect()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_with_long_summary():
|
||||
"""Test that long_summary content is searchable."""
|
||||
test_id = "test-long-summary-8a9f3c2d"
|
||||
|
||||
try:
|
||||
await get_database().execute(
|
||||
transcripts.delete().where(transcripts.c.id == test_id)
|
||||
)
|
||||
|
||||
test_data = {
|
||||
"id": test_id,
|
||||
"name": "Test Long Summary",
|
||||
"title": "Regular Meeting",
|
||||
"status": "ended",
|
||||
"locked": False,
|
||||
"duration": 1800.0,
|
||||
"created_at": datetime.now(timezone.utc),
|
||||
"short_summary": "Brief overview",
|
||||
"long_summary": "Detailed discussion about quantum computing applications and blockchain technology integration",
|
||||
"topics": json.dumps([]),
|
||||
"events": json.dumps([]),
|
||||
"participants": json.dumps([]),
|
||||
"source_language": "en",
|
||||
"target_language": "en",
|
||||
"reviewed": False,
|
||||
"audio_location": "local",
|
||||
"share_mode": "private",
|
||||
"source_kind": "room",
|
||||
"webvtt": """WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:10.000
|
||||
Basic meeting content without special keywords.""",
|
||||
"user_id": "test-user-2",
|
||||
}
|
||||
|
||||
await get_database().execute(transcripts.insert().values(**test_data))
|
||||
|
||||
params = SearchParameters(query_text="quantum computing", user_id="test-user-2")
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
|
||||
assert total >= 1
|
||||
found = any(r.id == test_id for r in results)
|
||||
assert found, "Should find transcript by long_summary content"
|
||||
|
||||
test_result = next((r for r in results if r.id == test_id), None)
|
||||
assert test_result
|
||||
assert len(test_result.search_snippets) > 0
|
||||
assert "quantum computing" in test_result.search_snippets[0].lower()
|
||||
|
||||
finally:
|
||||
await get_database().execute(
|
||||
transcripts.delete().where(transcripts.c.id == test_id)
|
||||
)
|
||||
await get_database().disconnect()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_postgresql_search_with_data():
|
||||
# collision is improbable
|
||||
test_id = "test-search-e2e-7f3a9b2c"
|
||||
|
||||
try:
|
||||
@@ -62,7 +165,7 @@ async def test_postgresql_search_with_data():
|
||||
"id": test_id,
|
||||
"name": "Test Search Transcript",
|
||||
"title": "Engineering Planning Meeting Q4 2024",
|
||||
"status": "completed",
|
||||
"status": "ended",
|
||||
"locked": False,
|
||||
"duration": 1800.0,
|
||||
"created_at": datetime.now(timezone.utc),
|
||||
@@ -90,48 +193,49 @@ The search feature should support complex queries with ranking.
|
||||
|
||||
00:00:30.000 --> 00:00:40.000
|
||||
We need to implement PostgreSQL tsvector for better performance.""",
|
||||
"user_id": "test-user-3",
|
||||
}
|
||||
|
||||
await get_database().execute(transcripts.insert().values(**test_data))
|
||||
|
||||
# Test 1: Search for a word in title
|
||||
params = SearchParameters(query_text="planning")
|
||||
params = SearchParameters(query_text="planning", user_id="test-user-3")
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
assert total >= 1
|
||||
found = any(r.id == test_id for r in results)
|
||||
assert found, "Should find test transcript by title word"
|
||||
|
||||
# Test 2: Search for a word in webvtt content
|
||||
params = SearchParameters(query_text="tsvector")
|
||||
params = SearchParameters(query_text="tsvector", user_id="test-user-3")
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
assert total >= 1
|
||||
found = any(r.id == test_id for r in results)
|
||||
assert found, "Should find test transcript by webvtt content"
|
||||
|
||||
# Test 3: Search with multiple words
|
||||
params = SearchParameters(query_text="engineering planning")
|
||||
params = SearchParameters(
|
||||
query_text="engineering planning", user_id="test-user-3"
|
||||
)
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
assert total >= 1
|
||||
found = any(r.id == test_id for r in results)
|
||||
assert found, "Should find test transcript by multiple words"
|
||||
|
||||
# Test 4: Verify SearchResult structure
|
||||
test_result = next((r for r in results if r.id == test_id), None)
|
||||
if test_result:
|
||||
assert test_result.title == "Engineering Planning Meeting Q4 2024"
|
||||
assert test_result.status == "completed"
|
||||
assert test_result.status == "ended"
|
||||
assert test_result.duration == 1800.0
|
||||
assert 0 <= test_result.rank <= 1, "Rank should be normalized to 0-1"
|
||||
|
||||
# Test 5: Search with OR operator
|
||||
params = SearchParameters(query_text="tsvector OR nosuchword")
|
||||
params = SearchParameters(
|
||||
query_text="tsvector OR nosuchword", user_id="test-user-3"
|
||||
)
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
assert total >= 1
|
||||
found = any(r.id == test_id for r in results)
|
||||
assert found, "Should find test transcript with OR query"
|
||||
|
||||
# Test 6: Quoted phrase search
|
||||
params = SearchParameters(query_text='"full-text search"')
|
||||
params = SearchParameters(
|
||||
query_text='"full-text search"', user_id="test-user-3"
|
||||
)
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
assert total >= 1
|
||||
found = any(r.id == test_id for r in results)
|
||||
@@ -142,3 +246,240 @@ We need to implement PostgreSQL tsvector for better performance.""",
|
||||
transcripts.delete().where(transcripts.c.id == test_id)
|
||||
)
|
||||
await get_database().disconnect()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_search_params():
|
||||
"""Create sample search parameters for testing."""
|
||||
return SearchParameters(
|
||||
query_text="test query",
|
||||
limit=20,
|
||||
offset=0,
|
||||
user_id="test-user",
|
||||
room_id="room1",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_db_result():
|
||||
"""Create a mock database result."""
|
||||
return {
|
||||
"id": "test-transcript-id",
|
||||
"title": "Test Transcript",
|
||||
"created_at": datetime(2024, 6, 15, tzinfo=timezone.utc),
|
||||
"duration": 3600.0,
|
||||
"status": "ended",
|
||||
"user_id": "test-user",
|
||||
"room_id": "room1",
|
||||
"source_kind": SourceKind.LIVE,
|
||||
"webvtt": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\nThis is a test transcript",
|
||||
"rank": 0.95,
|
||||
}
|
||||
|
||||
|
||||
class TestSearchParameters:
|
||||
"""Test SearchParameters model validation and functionality."""
|
||||
|
||||
def test_search_parameters_with_available_filters(self):
|
||||
"""Test creating SearchParameters with currently available filter options."""
|
||||
params = SearchParameters(
|
||||
query_text="search term",
|
||||
limit=50,
|
||||
offset=10,
|
||||
user_id="user123",
|
||||
room_id="room1",
|
||||
)
|
||||
|
||||
assert params.query_text == "search term"
|
||||
assert params.limit == 50
|
||||
assert params.offset == 10
|
||||
assert params.user_id == "user123"
|
||||
assert params.room_id == "room1"
|
||||
|
||||
def test_search_parameters_defaults(self):
|
||||
"""Test SearchParameters with default values."""
|
||||
params = SearchParameters(query_text="test")
|
||||
|
||||
assert params.query_text == "test"
|
||||
assert params.limit == 20
|
||||
assert params.offset == 0
|
||||
assert params.user_id is None
|
||||
assert params.room_id is None
|
||||
|
||||
|
||||
class TestSearchControllerFilters:
|
||||
"""Test SearchController functionality with various filters."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_with_source_kind_filter(self):
|
||||
"""Test search filtering by source_kind."""
|
||||
controller = SearchController()
|
||||
with (
|
||||
patch("reflector.db.search.is_postgresql", return_value=True),
|
||||
patch("reflector.db.search.get_database") as mock_db,
|
||||
):
|
||||
mock_db.return_value.fetch_all = AsyncMock(return_value=[])
|
||||
mock_db.return_value.fetch_val = AsyncMock(return_value=0)
|
||||
|
||||
params = SearchParameters(query_text="test", source_kind=SourceKind.LIVE)
|
||||
|
||||
results, total = await controller.search_transcripts(params)
|
||||
|
||||
assert results == []
|
||||
assert total == 0
|
||||
|
||||
mock_db.return_value.fetch_all.assert_called_once()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_with_single_room_id(self):
|
||||
"""Test search filtering by single room ID (currently supported)."""
|
||||
controller = SearchController()
|
||||
with (
|
||||
patch("reflector.db.search.is_postgresql", return_value=True),
|
||||
patch("reflector.db.search.get_database") as mock_db,
|
||||
):
|
||||
mock_db.return_value.fetch_all = AsyncMock(return_value=[])
|
||||
mock_db.return_value.fetch_val = AsyncMock(return_value=0)
|
||||
|
||||
params = SearchParameters(
|
||||
query_text="test",
|
||||
room_id="room1",
|
||||
)
|
||||
|
||||
results, total = await controller.search_transcripts(params)
|
||||
|
||||
assert results == []
|
||||
assert total == 0
|
||||
mock_db.return_value.fetch_all.assert_called_once()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_result_includes_available_fields(self, mock_db_result):
|
||||
"""Test that search results include available fields like source_kind."""
|
||||
controller = SearchController()
|
||||
with (
|
||||
patch("reflector.db.search.is_postgresql", return_value=True),
|
||||
patch("reflector.db.search.get_database") as mock_db,
|
||||
):
|
||||
|
||||
class MockRow:
|
||||
def __init__(self, data):
|
||||
self._data = data
|
||||
self._mapping = data
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._data.items())
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._data[key]
|
||||
|
||||
def keys(self):
|
||||
return self._data.keys()
|
||||
|
||||
mock_row = MockRow(mock_db_result)
|
||||
|
||||
mock_db.return_value.fetch_all = AsyncMock(return_value=[mock_row])
|
||||
mock_db.return_value.fetch_val = AsyncMock(return_value=1)
|
||||
|
||||
params = SearchParameters(query_text="test")
|
||||
|
||||
results, total = await controller.search_transcripts(params)
|
||||
|
||||
assert total == 1
|
||||
assert len(results) == 1
|
||||
|
||||
result = results[0]
|
||||
assert isinstance(result, SearchResult)
|
||||
assert result.id == "test-transcript-id"
|
||||
assert result.title == "Test Transcript"
|
||||
assert result.rank == 0.95
|
||||
|
||||
|
||||
class TestSearchEndpointParsing:
|
||||
"""Test parameter parsing in the search endpoint."""
|
||||
|
||||
def test_parse_comma_separated_room_ids(self):
|
||||
"""Test parsing comma-separated room IDs."""
|
||||
room_ids_str = "room1,room2,room3"
|
||||
parsed = [rid.strip() for rid in room_ids_str.split(",") if rid.strip()]
|
||||
assert parsed == ["room1", "room2", "room3"]
|
||||
|
||||
room_ids_str = "room1, room2 , room3"
|
||||
parsed = [rid.strip() for rid in room_ids_str.split(",") if rid.strip()]
|
||||
assert parsed == ["room1", "room2", "room3"]
|
||||
|
||||
room_ids_str = "room1,,room3,"
|
||||
parsed = [rid.strip() for rid in room_ids_str.split(",") if rid.strip()]
|
||||
assert parsed == ["room1", "room3"]
|
||||
|
||||
def test_parse_source_kind(self):
|
||||
"""Test parsing source_kind values."""
|
||||
for kind_str in ["live", "file", "room"]:
|
||||
parsed = SourceKind(kind_str)
|
||||
assert parsed == SourceKind(kind_str)
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
SourceKind("invalid_kind")
|
||||
|
||||
|
||||
class TestSearchResultModel:
|
||||
"""Test SearchResult model and serialization."""
|
||||
|
||||
def test_search_result_with_available_fields(self):
|
||||
"""Test SearchResult model with currently available fields populated."""
|
||||
result = SearchResult(
|
||||
id="test-id",
|
||||
title="Test Title",
|
||||
user_id="user-123",
|
||||
room_id="room-456",
|
||||
source_kind=SourceKind.ROOM,
|
||||
created_at=datetime(2024, 6, 15, tzinfo=timezone.utc),
|
||||
status="ended",
|
||||
rank=0.85,
|
||||
duration=1800.5,
|
||||
search_snippets=["snippet 1", "snippet 2"],
|
||||
)
|
||||
|
||||
assert result.id == "test-id"
|
||||
assert result.title == "Test Title"
|
||||
assert result.user_id == "user-123"
|
||||
assert result.room_id == "room-456"
|
||||
assert result.status == "ended"
|
||||
assert result.rank == 0.85
|
||||
assert result.duration == 1800.5
|
||||
assert len(result.search_snippets) == 2
|
||||
|
||||
def test_search_result_with_optional_fields_none(self):
|
||||
"""Test SearchResult model with optional fields as None."""
|
||||
result = SearchResult(
|
||||
id="test-id",
|
||||
source_kind=SourceKind.FILE,
|
||||
created_at=datetime.now(timezone.utc),
|
||||
status="processing",
|
||||
rank=0.5,
|
||||
search_snippets=[],
|
||||
title=None,
|
||||
user_id=None,
|
||||
room_id=None,
|
||||
duration=None,
|
||||
)
|
||||
|
||||
assert result.title is None
|
||||
assert result.user_id is None
|
||||
assert result.room_id is None
|
||||
assert result.duration is None
|
||||
|
||||
def test_search_result_datetime_field(self):
|
||||
"""Test that SearchResult accepts datetime field."""
|
||||
result = SearchResult(
|
||||
id="test-id",
|
||||
source_kind=SourceKind.LIVE,
|
||||
created_at=datetime(2024, 6, 15, 12, 30, 45, tzinfo=timezone.utc),
|
||||
status="ended",
|
||||
rank=0.9,
|
||||
duration=None,
|
||||
search_snippets=[],
|
||||
)
|
||||
|
||||
assert result.created_at == datetime(
|
||||
2024, 6, 15, 12, 30, 45, tzinfo=timezone.utc
|
||||
)
|
||||
|
||||
166
server/tests/test_search_long_summary.py
Normal file
166
server/tests/test_search_long_summary.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Tests for long_summary in search functionality."""
|
||||
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.db.search import SearchParameters, search_controller
|
||||
from reflector.db.transcripts import transcripts
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_long_summary_snippet_prioritization():
|
||||
"""Test that snippets from long_summary are prioritized over webvtt content."""
|
||||
test_id = "test-snippet-priority-3f9a2b8c"
|
||||
|
||||
try:
|
||||
# Clean up any existing test data
|
||||
await get_database().execute(
|
||||
transcripts.delete().where(transcripts.c.id == test_id)
|
||||
)
|
||||
|
||||
test_data = {
|
||||
"id": test_id,
|
||||
"name": "Test Snippet Priority",
|
||||
"title": "Meeting About Projects",
|
||||
"status": "ended",
|
||||
"locked": False,
|
||||
"duration": 1800.0,
|
||||
"created_at": datetime.now(timezone.utc),
|
||||
"short_summary": "Project discussion",
|
||||
"long_summary": (
|
||||
"The team discussed advanced robotics applications including "
|
||||
"autonomous navigation systems and sensor fusion techniques. "
|
||||
"Robotics development will focus on real-time processing."
|
||||
),
|
||||
"topics": json.dumps([]),
|
||||
"events": json.dumps([]),
|
||||
"participants": json.dumps([]),
|
||||
"source_language": "en",
|
||||
"target_language": "en",
|
||||
"reviewed": False,
|
||||
"audio_location": "local",
|
||||
"share_mode": "private",
|
||||
"source_kind": "room",
|
||||
"webvtt": """WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:10.000
|
||||
We talked about many different topics today.
|
||||
|
||||
00:00:10.000 --> 00:00:20.000
|
||||
The robotics project is making good progress.
|
||||
|
||||
00:00:20.000 --> 00:00:30.000
|
||||
We need to consider various implementation approaches.""",
|
||||
"user_id": "test-user-priority",
|
||||
}
|
||||
|
||||
await get_database().execute(transcripts.insert().values(**test_data))
|
||||
|
||||
# Search for "robotics" which appears in both long_summary and webvtt
|
||||
params = SearchParameters(query_text="robotics", user_id="test-user-priority")
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
|
||||
assert total >= 1
|
||||
test_result = next((r for r in results if r.id == test_id), None)
|
||||
assert test_result, "Should find the test transcript"
|
||||
|
||||
snippets = test_result.search_snippets
|
||||
assert len(snippets) > 0, "Should have at least one snippet"
|
||||
|
||||
# The first snippets should be from long_summary (more detailed content)
|
||||
first_snippet = snippets[0].lower()
|
||||
assert (
|
||||
"advanced robotics" in first_snippet or "autonomous" in first_snippet
|
||||
), f"First snippet should be from long_summary with detailed content. Got: {snippets[0]}"
|
||||
|
||||
# With max 3 snippets, we should get both from long_summary and webvtt
|
||||
assert len(snippets) <= 3, "Should respect max snippets limit"
|
||||
|
||||
# All snippets should contain the search term
|
||||
for snippet in snippets:
|
||||
assert (
|
||||
"robotics" in snippet.lower()
|
||||
), f"Snippet should contain search term: {snippet}"
|
||||
|
||||
finally:
|
||||
await get_database().execute(
|
||||
transcripts.delete().where(transcripts.c.id == test_id)
|
||||
)
|
||||
await get_database().disconnect()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_long_summary_only_search():
|
||||
"""Test searching for content that only exists in long_summary."""
|
||||
test_id = "test-long-only-8b3c9f2a"
|
||||
|
||||
try:
|
||||
await get_database().execute(
|
||||
transcripts.delete().where(transcripts.c.id == test_id)
|
||||
)
|
||||
|
||||
test_data = {
|
||||
"id": test_id,
|
||||
"name": "Test Long Only",
|
||||
"title": "Standard Meeting",
|
||||
"status": "ended",
|
||||
"locked": False,
|
||||
"duration": 1800.0,
|
||||
"created_at": datetime.now(timezone.utc),
|
||||
"short_summary": "Team sync",
|
||||
"long_summary": (
|
||||
"Detailed analysis of cryptocurrency market trends and "
|
||||
"decentralized finance protocols. Discussion included "
|
||||
"yield farming strategies and liquidity pool mechanics."
|
||||
),
|
||||
"topics": json.dumps([]),
|
||||
"events": json.dumps([]),
|
||||
"participants": json.dumps([]),
|
||||
"source_language": "en",
|
||||
"target_language": "en",
|
||||
"reviewed": False,
|
||||
"audio_location": "local",
|
||||
"share_mode": "private",
|
||||
"source_kind": "room",
|
||||
"webvtt": """WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:10.000
|
||||
Team meeting about general project updates.
|
||||
|
||||
00:00:10.000 --> 00:00:20.000
|
||||
Discussion of timeline and deliverables.""",
|
||||
"user_id": "test-user-long",
|
||||
}
|
||||
|
||||
await get_database().execute(transcripts.insert().values(**test_data))
|
||||
|
||||
# Search for terms only in long_summary
|
||||
params = SearchParameters(query_text="cryptocurrency", user_id="test-user-long")
|
||||
results, total = await search_controller.search_transcripts(params)
|
||||
|
||||
found = any(r.id == test_id for r in results)
|
||||
assert found, "Should find transcript by long_summary-only content"
|
||||
|
||||
test_result = next((r for r in results if r.id == test_id), None)
|
||||
assert test_result
|
||||
assert len(test_result.search_snippets) > 0
|
||||
|
||||
# Verify the snippet is about cryptocurrency
|
||||
snippet = test_result.search_snippets[0].lower()
|
||||
assert "cryptocurrency" in snippet, "Snippet should contain the search term"
|
||||
|
||||
# Search for "yield farming" - a more specific term
|
||||
params2 = SearchParameters(query_text="yield farming", user_id="test-user-long")
|
||||
results2, total2 = await search_controller.search_transcripts(params2)
|
||||
|
||||
found2 = any(r.id == test_id for r in results2)
|
||||
assert found2, "Should find transcript by specific long_summary phrase"
|
||||
|
||||
finally:
|
||||
await get_database().execute(
|
||||
transcripts.delete().where(transcripts.c.id == test_id)
|
||||
)
|
||||
await get_database().disconnect()
|
||||
@@ -1,6 +1,12 @@
|
||||
"""Unit tests for search snippet generation."""
|
||||
|
||||
from reflector.db.search import SearchController
|
||||
import pytest
|
||||
|
||||
from reflector.db.search import (
|
||||
SnippetCandidate,
|
||||
SnippetGenerator,
|
||||
WebVTTProcessor,
|
||||
)
|
||||
|
||||
|
||||
class TestExtractWebVTT:
|
||||
@@ -16,7 +22,7 @@ class TestExtractWebVTT:
|
||||
00:00:10.000 --> 00:00:20.000
|
||||
<v Speaker1>Indeed it is a test of WebVTT parsing.
|
||||
"""
|
||||
result = SearchController._extract_webvtt_text(webvtt)
|
||||
result = WebVTTProcessor.extract_text(webvtt)
|
||||
assert "Hello world, this is a test" in result
|
||||
assert "Indeed it is a test" in result
|
||||
assert "<v Speaker" not in result
|
||||
@@ -25,12 +31,11 @@ class TestExtractWebVTT:
|
||||
|
||||
def test_extract_empty_webvtt(self):
|
||||
"""Test empty WebVTT returns empty string."""
|
||||
assert SearchController._extract_webvtt_text("") == ""
|
||||
assert SearchController._extract_webvtt_text(None) == ""
|
||||
assert WebVTTProcessor.extract_text("") == ""
|
||||
|
||||
def test_extract_malformed_webvtt(self):
|
||||
"""Test malformed WebVTT returns empty string."""
|
||||
result = SearchController._extract_webvtt_text("Not a valid WebVTT")
|
||||
result = WebVTTProcessor.extract_text("Not a valid WebVTT")
|
||||
assert result == ""
|
||||
|
||||
|
||||
@@ -39,8 +44,7 @@ class TestGenerateSnippets:
|
||||
|
||||
def test_multiple_matches(self):
|
||||
"""Test finding multiple occurrences of search term in long text."""
|
||||
# Create text with Python mentions far apart to get separate snippets
|
||||
separator = " This is filler text. " * 20 # ~400 chars of padding
|
||||
separator = " This is filler text. " * 20
|
||||
text = (
|
||||
"Python is great for machine learning."
|
||||
+ separator
|
||||
@@ -51,18 +55,16 @@ class TestGenerateSnippets:
|
||||
+ "The Python community is very supportive."
|
||||
)
|
||||
|
||||
snippets = SearchController._generate_snippets(text, "Python")
|
||||
# With enough separation, we should get multiple snippets
|
||||
assert len(snippets) >= 2 # At least 2 distinct snippets
|
||||
snippets = SnippetGenerator.generate(text, "Python")
|
||||
assert len(snippets) >= 2
|
||||
|
||||
# Each snippet should contain "Python"
|
||||
for snippet in snippets:
|
||||
assert "python" in snippet.lower()
|
||||
|
||||
def test_single_match(self):
|
||||
"""Test single occurrence returns one snippet."""
|
||||
text = "This document discusses artificial intelligence and its applications."
|
||||
snippets = SearchController._generate_snippets(text, "artificial intelligence")
|
||||
snippets = SnippetGenerator.generate(text, "artificial intelligence")
|
||||
|
||||
assert len(snippets) == 1
|
||||
assert "artificial intelligence" in snippets[0].lower()
|
||||
@@ -70,24 +72,22 @@ class TestGenerateSnippets:
|
||||
def test_no_matches(self):
|
||||
"""Test no matches returns empty list."""
|
||||
text = "This is some random text without the search term."
|
||||
snippets = SearchController._generate_snippets(text, "machine learning")
|
||||
snippets = SnippetGenerator.generate(text, "machine learning")
|
||||
|
||||
assert snippets == []
|
||||
|
||||
def test_case_insensitive_search(self):
|
||||
"""Test search is case insensitive."""
|
||||
# Add enough text between matches to get separate snippets
|
||||
text = (
|
||||
"MACHINE LEARNING is important for modern applications. "
|
||||
+ "It requires lots of data and computational resources. " * 5 # Padding
|
||||
+ "It requires lots of data and computational resources. " * 5
|
||||
+ "Machine Learning rocks and transforms industries. "
|
||||
+ "Deep learning is a subset of it. " * 5 # More padding
|
||||
+ "Deep learning is a subset of it. " * 5
|
||||
+ "Finally, machine learning will shape our future."
|
||||
)
|
||||
|
||||
snippets = SearchController._generate_snippets(text, "machine learning")
|
||||
snippets = SnippetGenerator.generate(text, "machine learning")
|
||||
|
||||
# Should find at least 2 (might be 3 if text is long enough)
|
||||
assert len(snippets) >= 2
|
||||
for snippet in snippets:
|
||||
assert "machine learning" in snippet.lower()
|
||||
@@ -95,61 +95,55 @@ class TestGenerateSnippets:
|
||||
def test_partial_match_fallback(self):
|
||||
"""Test fallback to first word when exact phrase not found."""
|
||||
text = "We use machine intelligence for processing."
|
||||
snippets = SearchController._generate_snippets(text, "machine learning")
|
||||
snippets = SnippetGenerator.generate(text, "machine learning")
|
||||
|
||||
# Should fall back to finding "machine"
|
||||
assert len(snippets) == 1
|
||||
assert "machine" in snippets[0].lower()
|
||||
|
||||
def test_snippet_ellipsis(self):
|
||||
"""Test ellipsis added for truncated snippets."""
|
||||
# Long text where match is in the middle
|
||||
text = "a " * 100 + "TARGET_WORD special content here" + " b" * 100
|
||||
snippets = SearchController._generate_snippets(text, "TARGET_WORD")
|
||||
snippets = SnippetGenerator.generate(text, "TARGET_WORD")
|
||||
|
||||
assert len(snippets) == 1
|
||||
assert "..." in snippets[0] # Should have ellipsis
|
||||
assert "..." in snippets[0]
|
||||
assert "TARGET_WORD" in snippets[0]
|
||||
|
||||
def test_overlapping_snippets_deduplicated(self):
|
||||
"""Test overlapping matches don't create duplicate snippets."""
|
||||
text = "test test test word" * 10 # Repeated pattern
|
||||
snippets = SearchController._generate_snippets(text, "test")
|
||||
text = "test test test word" * 10
|
||||
snippets = SnippetGenerator.generate(text, "test")
|
||||
|
||||
# Should get unique snippets, not duplicates
|
||||
assert len(snippets) <= 3
|
||||
assert len(snippets) == len(set(snippets)) # All unique
|
||||
assert len(snippets) == len(set(snippets))
|
||||
|
||||
def test_empty_inputs(self):
|
||||
"""Test empty text or search term returns empty list."""
|
||||
assert SearchController._generate_snippets("", "search") == []
|
||||
assert SearchController._generate_snippets("text", "") == []
|
||||
assert SearchController._generate_snippets("", "") == []
|
||||
assert SnippetGenerator.generate("", "search") == []
|
||||
assert SnippetGenerator.generate("text", "") == []
|
||||
assert SnippetGenerator.generate("", "") == []
|
||||
|
||||
def test_max_snippets_limit(self):
|
||||
"""Test respects max_snippets parameter."""
|
||||
# Create text with well-separated occurrences
|
||||
separator = " filler " * 50 # Ensure snippets don't overlap
|
||||
text = ("Python is amazing" + separator) * 10 # 10 occurrences
|
||||
separator = " filler " * 50
|
||||
text = ("Python is amazing" + separator) * 10
|
||||
|
||||
# Test with different limits
|
||||
snippets_1 = SearchController._generate_snippets(text, "Python", max_snippets=1)
|
||||
snippets_1 = SnippetGenerator.generate(text, "Python", max_snippets=1)
|
||||
assert len(snippets_1) == 1
|
||||
|
||||
snippets_2 = SearchController._generate_snippets(text, "Python", max_snippets=2)
|
||||
snippets_2 = SnippetGenerator.generate(text, "Python", max_snippets=2)
|
||||
assert len(snippets_2) == 2
|
||||
|
||||
snippets_5 = SearchController._generate_snippets(text, "Python", max_snippets=5)
|
||||
assert len(snippets_5) == 5 # Should get exactly 5 with enough separation
|
||||
snippets_5 = SnippetGenerator.generate(text, "Python", max_snippets=5)
|
||||
assert len(snippets_5) == 5
|
||||
|
||||
def test_snippet_length(self):
|
||||
"""Test snippet length is reasonable."""
|
||||
text = "word " * 200 # Long text
|
||||
snippets = SearchController._generate_snippets(text, "word")
|
||||
text = "word " * 200
|
||||
snippets = SnippetGenerator.generate(text, "word")
|
||||
|
||||
for snippet in snippets:
|
||||
# Default max_length is 150 + some context
|
||||
assert len(snippet) <= 200 # Some buffer for ellipsis
|
||||
assert len(snippet) <= 200
|
||||
|
||||
|
||||
class TestFullPipeline:
|
||||
@@ -157,7 +151,6 @@ class TestFullPipeline:
|
||||
|
||||
def test_webvtt_to_snippets_integration(self):
|
||||
"""Test full pipeline from WebVTT to search snippets."""
|
||||
# Create WebVTT with well-separated content for multiple snippets
|
||||
webvtt = (
|
||||
"""WEBVTT
|
||||
|
||||
@@ -182,17 +175,360 @@ class TestFullPipeline:
|
||||
"""
|
||||
)
|
||||
|
||||
# Extract and generate snippets
|
||||
plain_text = SearchController._extract_webvtt_text(webvtt)
|
||||
snippets = SearchController._generate_snippets(plain_text, "machine learning")
|
||||
plain_text = WebVTTProcessor.extract_text(webvtt)
|
||||
snippets = SnippetGenerator.generate(plain_text, "machine learning")
|
||||
|
||||
# Should find at least 2 snippets (text might still be close together)
|
||||
assert len(snippets) >= 1 # At minimum one snippet containing matches
|
||||
assert len(snippets) <= 3 # At most 3 by default
|
||||
assert len(snippets) >= 1
|
||||
assert len(snippets) <= 3
|
||||
|
||||
# No WebVTT artifacts in snippets
|
||||
for snippet in snippets:
|
||||
assert "machine learning" in snippet.lower()
|
||||
assert "<v Speaker" not in snippet
|
||||
assert "00:00" not in snippet
|
||||
assert "-->" not in snippet
|
||||
|
||||
|
||||
class TestMultiWordQueryBehavior:
|
||||
"""Tests for multi-word query behavior and exact phrase matching."""
|
||||
|
||||
def test_multi_word_query_snippet_behavior(self):
|
||||
"""Test that multi-word queries generate snippets based on exact phrase matching."""
|
||||
sample_text = """This is a sample transcript where user Alice is talking.
|
||||
Later in the conversation, jordan mentions something important.
|
||||
The user jordan collaboration was successful.
|
||||
Another user named Bob joins the discussion."""
|
||||
|
||||
user_snippets = SnippetGenerator.generate(sample_text, "user")
|
||||
assert len(user_snippets) == 2, "Should find 2 snippets for 'user'"
|
||||
|
||||
jordan_snippets = SnippetGenerator.generate(sample_text, "jordan")
|
||||
assert len(jordan_snippets) >= 1, "Should find at least 1 snippet for 'jordan'"
|
||||
|
||||
multi_word_snippets = SnippetGenerator.generate(sample_text, "user jordan")
|
||||
assert len(multi_word_snippets) == 1, (
|
||||
"Should return exactly 1 snippet for 'user jordan' "
|
||||
"(only the exact phrase match, not individual word occurrences)"
|
||||
)
|
||||
|
||||
snippet = multi_word_snippets[0]
|
||||
assert (
|
||||
"user jordan" in snippet.lower()
|
||||
), "The snippet should contain the exact phrase 'user jordan'"
|
||||
|
||||
assert (
|
||||
"alice" not in snippet.lower()
|
||||
), "The snippet should not include the first standalone 'user' with Alice"
|
||||
|
||||
def test_multi_word_query_without_exact_match(self):
|
||||
"""Test snippet generation when exact phrase is not found."""
|
||||
sample_text = """User Alice is here. Bob and jordan are talking.
|
||||
Later jordan mentions something. The user is happy."""
|
||||
|
||||
snippets = SnippetGenerator.generate(sample_text, "user jordan")
|
||||
|
||||
assert (
|
||||
len(snippets) >= 1
|
||||
), "Should find at least 1 snippet when falling back to first word"
|
||||
|
||||
all_snippets_text = " ".join(snippets).lower()
|
||||
assert (
|
||||
"user" in all_snippets_text
|
||||
), "Snippets should contain 'user' (the first word)"
|
||||
|
||||
def test_exact_phrase_at_text_boundaries(self):
|
||||
"""Test snippet generation when exact phrase appears at text boundaries."""
|
||||
|
||||
text_start = "user jordan started the meeting. Other content here."
|
||||
snippets = SnippetGenerator.generate(text_start, "user jordan")
|
||||
assert len(snippets) == 1
|
||||
assert "user jordan" in snippets[0].lower()
|
||||
|
||||
text_end = "Other content here. The meeting ended with user jordan"
|
||||
snippets = SnippetGenerator.generate(text_end, "user jordan")
|
||||
assert len(snippets) == 1
|
||||
assert "user jordan" in snippets[0].lower()
|
||||
|
||||
def test_multi_word_query_matches_words_appearing_separately_and_together(self):
|
||||
"""Test that multi-word queries prioritize exact phrase matches over individual word occurrences."""
|
||||
sample_text = """This is a sample transcript where user Alice is talking.
|
||||
Later in the conversation, jordan mentions something important.
|
||||
The user jordan collaboration was successful.
|
||||
Another user named Bob joins the discussion."""
|
||||
|
||||
search_query = "user jordan"
|
||||
snippets = SnippetGenerator.generate(sample_text, search_query)
|
||||
|
||||
assert len(snippets) == 1, (
|
||||
f"Expected exactly 1 snippet for '{search_query}' when exact phrase exists, "
|
||||
f"got {len(snippets)}. Should ignore individual word occurrences."
|
||||
)
|
||||
|
||||
snippet = snippets[0]
|
||||
|
||||
assert (
|
||||
search_query in snippet.lower()
|
||||
), f"Snippet should contain the exact phrase '{search_query}'. Got: {snippet}"
|
||||
|
||||
assert (
|
||||
"jordan mentions" in snippet.lower()
|
||||
), f"Snippet should include context before the exact phrase match. Got: {snippet}"
|
||||
|
||||
assert (
|
||||
"alice" not in snippet.lower()
|
||||
), f"Snippet should not include separate occurrences of individual words. Got: {snippet}"
|
||||
|
||||
text_2 = """The alpha version was released.
|
||||
Beta testing started yesterday.
|
||||
The alpha beta integration is complete."""
|
||||
|
||||
snippets_2 = SnippetGenerator.generate(text_2, "alpha beta")
|
||||
assert len(snippets_2) == 1, "Should return 1 snippet for exact phrase match"
|
||||
assert "alpha beta" in snippets_2[0].lower(), "Should contain exact phrase"
|
||||
assert (
|
||||
"version" not in snippets_2[0].lower()
|
||||
), "Should not include first separate occurrence"
|
||||
|
||||
|
||||
class TestSnippetGenerationEnhanced:
|
||||
"""Additional snippet generation tests from test_search_enhancements.py."""
|
||||
|
||||
def test_snippet_generation_from_webvtt(self):
|
||||
"""Test snippet generation from WebVTT content."""
|
||||
webvtt_content = """WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:05.000
|
||||
This is the beginning of the transcript
|
||||
|
||||
00:00:05.000 --> 00:00:10.000
|
||||
The search term appears here in the middle
|
||||
|
||||
00:00:10.000 --> 00:00:15.000
|
||||
And this is the end of the content"""
|
||||
|
||||
plain_text = WebVTTProcessor.extract_text(webvtt_content)
|
||||
snippets = SnippetGenerator.generate(plain_text, "search term")
|
||||
|
||||
assert len(snippets) > 0
|
||||
assert any("search term" in snippet.lower() for snippet in snippets)
|
||||
|
||||
def test_extract_webvtt_text_with_malformed_variations(self):
|
||||
"""Test WebVTT extraction with various malformed content."""
|
||||
malformed_vtt = "This is not valid WebVTT content"
|
||||
result = WebVTTProcessor.extract_text(malformed_vtt)
|
||||
assert result == ""
|
||||
|
||||
partial_vtt = "WEBVTT\nNo timestamps here"
|
||||
result = WebVTTProcessor.extract_text(partial_vtt)
|
||||
assert result == "" or "No timestamps" not in result
|
||||
|
||||
|
||||
class TestPureFunctions:
|
||||
"""Test the pure functions extracted for functional programming."""
|
||||
|
||||
def test_find_all_matches(self):
|
||||
"""Test finding all match positions in text."""
|
||||
text = "Python is great. Python is powerful. I love Python."
|
||||
matches = list(SnippetGenerator.find_all_matches(text, "Python"))
|
||||
assert matches == [0, 17, 44]
|
||||
|
||||
matches = list(SnippetGenerator.find_all_matches(text, "python"))
|
||||
assert matches == [0, 17, 44]
|
||||
|
||||
matches = list(SnippetGenerator.find_all_matches(text, "Ruby"))
|
||||
assert matches == []
|
||||
|
||||
matches = list(SnippetGenerator.find_all_matches("", "test"))
|
||||
assert matches == []
|
||||
matches = list(SnippetGenerator.find_all_matches("test", ""))
|
||||
assert matches == []
|
||||
|
||||
def test_create_snippet(self):
|
||||
"""Test creating a snippet from a match position."""
|
||||
text = "This is a long text with the word Python in the middle and more text after."
|
||||
|
||||
snippet = SnippetGenerator.create_snippet(text, 35, max_length=150)
|
||||
assert "Python" in snippet.text()
|
||||
assert snippet.start >= 0
|
||||
assert snippet.end <= len(text)
|
||||
assert isinstance(snippet, SnippetCandidate)
|
||||
|
||||
assert len(snippet.text()) > 0
|
||||
assert snippet.start <= snippet.end
|
||||
|
||||
long_text = "A" * 200
|
||||
snippet = SnippetGenerator.create_snippet(long_text, 100, max_length=50)
|
||||
assert snippet.text().startswith("...")
|
||||
assert snippet.text().endswith("...")
|
||||
|
||||
snippet = SnippetGenerator.create_snippet("short text", 0, max_length=100)
|
||||
assert snippet.start == 0
|
||||
assert "short text" in snippet.text()
|
||||
|
||||
def test_filter_non_overlapping(self):
|
||||
"""Test filtering overlapping snippets."""
|
||||
candidates = [
|
||||
SnippetCandidate(_text="First snippet", start=0, _original_text_length=100),
|
||||
SnippetCandidate(_text="Overlapping", start=10, _original_text_length=100),
|
||||
SnippetCandidate(
|
||||
_text="Third snippet", start=40, _original_text_length=100
|
||||
),
|
||||
SnippetCandidate(
|
||||
_text="Fourth snippet", start=65, _original_text_length=100
|
||||
),
|
||||
]
|
||||
|
||||
filtered = list(SnippetGenerator.filter_non_overlapping(iter(candidates)))
|
||||
assert filtered == [
|
||||
"First snippet...",
|
||||
"...Third snippet...",
|
||||
"...Fourth snippet...",
|
||||
]
|
||||
|
||||
filtered = list(SnippetGenerator.filter_non_overlapping(iter([])))
|
||||
assert filtered == []
|
||||
|
||||
def test_generate_integration(self):
|
||||
"""Test the main SnippetGenerator.generate function."""
|
||||
text = "Machine learning is amazing. Machine learning transforms data. Learn machine learning today."
|
||||
|
||||
snippets = SnippetGenerator.generate(text, "machine learning")
|
||||
assert len(snippets) <= 3
|
||||
assert all("machine learning" in s.lower() for s in snippets)
|
||||
|
||||
snippets = SnippetGenerator.generate(text, "machine learning", max_snippets=2)
|
||||
assert len(snippets) <= 2
|
||||
|
||||
snippets = SnippetGenerator.generate(text, "machine vision")
|
||||
assert len(snippets) > 0
|
||||
assert any("machine" in s.lower() for s in snippets)
|
||||
|
||||
def test_extract_webvtt_text_basic(self):
|
||||
"""Test WebVTT text extraction (basic test, full tests exist elsewhere)."""
|
||||
webvtt = """WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:02.000
|
||||
Hello world
|
||||
|
||||
00:00:02.000 --> 00:00:04.000
|
||||
This is a test"""
|
||||
|
||||
result = WebVTTProcessor.extract_text(webvtt)
|
||||
assert "Hello world" in result
|
||||
assert "This is a test" in result
|
||||
|
||||
# Test empty input
|
||||
assert WebVTTProcessor.extract_text("") == ""
|
||||
assert WebVTTProcessor.extract_text(None) == ""
|
||||
|
||||
def test_generate_webvtt_snippets(self):
|
||||
"""Test generating snippets from WebVTT content."""
|
||||
webvtt = """WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:02.000
|
||||
Python programming is great
|
||||
|
||||
00:00:02.000 --> 00:00:04.000
|
||||
Learn Python today"""
|
||||
|
||||
snippets = WebVTTProcessor.generate_snippets(webvtt, "Python")
|
||||
assert len(snippets) > 0
|
||||
assert any("Python" in s for s in snippets)
|
||||
|
||||
snippets = WebVTTProcessor.generate_snippets("", "Python")
|
||||
assert snippets == []
|
||||
|
||||
def test_from_summary(self):
|
||||
"""Test generating snippets from summary text."""
|
||||
summary = "This meeting discussed Python development and machine learning applications."
|
||||
|
||||
snippets = SnippetGenerator.from_summary(summary, "Python")
|
||||
assert len(snippets) > 0
|
||||
assert any("Python" in s for s in snippets)
|
||||
|
||||
long_summary = "Python " * 20
|
||||
snippets = SnippetGenerator.from_summary(long_summary, "Python")
|
||||
assert len(snippets) <= 2
|
||||
|
||||
def test_combine_sources(self):
|
||||
"""Test combining snippets from multiple sources."""
|
||||
summary = "Python is a great programming language."
|
||||
webvtt = """WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:02.000
|
||||
Learn Python programming
|
||||
|
||||
00:00:02.000 --> 00:00:04.000
|
||||
Python is powerful"""
|
||||
|
||||
snippets, total_count = SnippetGenerator.combine_sources(
|
||||
summary, webvtt, "Python", max_total=3
|
||||
)
|
||||
assert len(snippets) <= 3
|
||||
assert len(snippets) > 0
|
||||
assert total_count > 0
|
||||
|
||||
snippets, total_count = SnippetGenerator.combine_sources(
|
||||
summary, None, "Python", max_total=3
|
||||
)
|
||||
assert len(snippets) > 0
|
||||
assert all("Python" in s for s in snippets)
|
||||
assert total_count == 1
|
||||
|
||||
snippets, total_count = SnippetGenerator.combine_sources(
|
||||
None, webvtt, "Python", max_total=3
|
||||
)
|
||||
assert len(snippets) > 0
|
||||
assert total_count == 2
|
||||
|
||||
long_summary = "Python " * 10
|
||||
snippets, total_count = SnippetGenerator.combine_sources(
|
||||
long_summary, webvtt, "Python", max_total=2
|
||||
)
|
||||
assert len(snippets) == 2
|
||||
assert total_count >= 10
|
||||
|
||||
def test_match_counting_sum_logic(self):
|
||||
"""Test that match counting correctly sums matches from both sources."""
|
||||
summary = "data science uses data analysis and data mining techniques"
|
||||
webvtt = """WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:02.000
|
||||
Big data processing
|
||||
|
||||
00:00:02.000 --> 00:00:04.000
|
||||
data visualization and data storage"""
|
||||
|
||||
snippets, total_count = SnippetGenerator.combine_sources(
|
||||
summary, webvtt, "data", max_total=3
|
||||
)
|
||||
assert total_count == 6
|
||||
assert len(snippets) <= 3
|
||||
|
||||
summary_snippets, summary_count = SnippetGenerator.combine_sources(
|
||||
summary, None, "data", max_total=3
|
||||
)
|
||||
assert summary_count == 3
|
||||
|
||||
webvtt_snippets, webvtt_count = SnippetGenerator.combine_sources(
|
||||
None, webvtt, "data", max_total=3
|
||||
)
|
||||
assert webvtt_count == 3
|
||||
|
||||
# combine_sources requires at least one source to be present
|
||||
with pytest.raises(AssertionError, match="At least one source must be present"):
|
||||
SnippetGenerator.combine_sources(None, None, "data", max_total=3)
|
||||
|
||||
def test_edge_cases(self):
|
||||
"""Test edge cases for the pure functions."""
|
||||
text = "Test with special: @#$%^&*() characters"
|
||||
snippets = SnippetGenerator.generate(text, "@#$%")
|
||||
assert len(snippets) > 0
|
||||
|
||||
long_query = "a" * 100
|
||||
snippets = SnippetGenerator.generate("Some text", long_query)
|
||||
assert snippets == []
|
||||
|
||||
text = "Unicode test: café, naïve, 日本語"
|
||||
snippets = SnippetGenerator.generate(text, "café")
|
||||
assert len(snippets) > 0
|
||||
assert "café" in snippets[0]
|
||||
|
||||
@@ -19,7 +19,7 @@ async def fake_transcript(tmpdir, client):
|
||||
transcript = await transcripts_controller.get_by_id(tid)
|
||||
assert transcript is not None
|
||||
|
||||
await transcripts_controller.update(transcript, {"status": "finished"})
|
||||
await transcripts_controller.update(transcript, {"status": "ended"})
|
||||
|
||||
# manually copy a file at the expected location
|
||||
audio_filename = transcript.audio_mp3_filename
|
||||
|
||||
@@ -29,10 +29,10 @@ async def client(app_lifespan):
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_process(
|
||||
tmpdir,
|
||||
whisper_transcript,
|
||||
dummy_llm,
|
||||
dummy_processors,
|
||||
dummy_diarization,
|
||||
dummy_file_transcript,
|
||||
dummy_file_diarization,
|
||||
dummy_storage,
|
||||
client,
|
||||
):
|
||||
@@ -56,8 +56,8 @@ async def test_transcript_process(
|
||||
assert response.status_code == 200
|
||||
assert response.json()["status"] == "ok"
|
||||
|
||||
# wait for processing to finish (max 10 minutes)
|
||||
timeout_seconds = 600 # 10 minutes
|
||||
# wait for processing to finish (max 1 minute)
|
||||
timeout_seconds = 60
|
||||
start_time = time.monotonic()
|
||||
while (time.monotonic() - start_time) < timeout_seconds:
|
||||
# fetch the transcript and check if it is ended
|
||||
@@ -75,9 +75,10 @@ async def test_transcript_process(
|
||||
)
|
||||
assert response.status_code == 200
|
||||
assert response.json()["status"] == "ok"
|
||||
await asyncio.sleep(2)
|
||||
|
||||
# wait for processing to finish (max 10 minutes)
|
||||
timeout_seconds = 600 # 10 minutes
|
||||
# wait for processing to finish (max 1 minute)
|
||||
timeout_seconds = 60
|
||||
start_time = time.monotonic()
|
||||
while (time.monotonic() - start_time) < timeout_seconds:
|
||||
# fetch the transcript and check if it is ended
|
||||
@@ -99,4 +100,4 @@ async def test_transcript_process(
|
||||
response = await client.get(f"/transcripts/{tid}/topics")
|
||||
assert response.status_code == 200
|
||||
assert len(response.json()) == 1
|
||||
assert "want to share" in response.json()[0]["transcript"]
|
||||
assert "Hello world. How are you today?" in response.json()[0]["transcript"]
|
||||
|
||||
@@ -12,7 +12,8 @@ async def test_transcript_upload_file(
|
||||
tmpdir,
|
||||
dummy_llm,
|
||||
dummy_processors,
|
||||
dummy_diarization,
|
||||
dummy_file_transcript,
|
||||
dummy_file_diarization,
|
||||
dummy_storage,
|
||||
client,
|
||||
):
|
||||
@@ -36,8 +37,8 @@ async def test_transcript_upload_file(
|
||||
assert response.status_code == 200
|
||||
assert response.json()["status"] == "ok"
|
||||
|
||||
# wait the processing to finish (max 10 minutes)
|
||||
timeout_seconds = 600 # 10 minutes
|
||||
# wait the processing to finish (max 1 minute)
|
||||
timeout_seconds = 60
|
||||
start_time = time.monotonic()
|
||||
while (time.monotonic() - start_time) < timeout_seconds:
|
||||
# fetch the transcript and check if it is ended
|
||||
@@ -47,7 +48,7 @@ async def test_transcript_upload_file(
|
||||
break
|
||||
await asyncio.sleep(1)
|
||||
else:
|
||||
pytest.fail(f"Processing timed out after {timeout_seconds} seconds")
|
||||
return pytest.fail(f"Processing timed out after {timeout_seconds} seconds")
|
||||
|
||||
# check the transcript is ended
|
||||
transcript = resp.json()
|
||||
@@ -59,4 +60,4 @@ async def test_transcript_upload_file(
|
||||
response = await client.get(f"/transcripts/{tid}/topics")
|
||||
assert response.status_code == 200
|
||||
assert len(response.json()) == 1
|
||||
assert "want to share" in response.json()[0]["transcript"]
|
||||
assert "Hello world. How are you today?" in response.json()[0]["transcript"]
|
||||
|
||||
886
server/uv.lock
generated
886
server/uv.lock
generated
File diff suppressed because it is too large
Load Diff
34
www/.env.example
Normal file
34
www/.env.example
Normal file
@@ -0,0 +1,34 @@
|
||||
# Environment
|
||||
ENVIRONMENT=development
|
||||
NEXT_PUBLIC_ENV=development
|
||||
|
||||
# Site Configuration
|
||||
NEXT_PUBLIC_SITE_URL=http://localhost:3000
|
||||
|
||||
# Nextauth envs
|
||||
# not used in app code but in lib code
|
||||
NEXTAUTH_URL=http://localhost:3000
|
||||
NEXTAUTH_SECRET=your-nextauth-secret-here
|
||||
# / Nextauth envs
|
||||
|
||||
# Authentication (Authentik OAuth/OIDC)
|
||||
AUTHENTIK_ISSUER=https://authentik.example.com/application/o/reflector
|
||||
AUTHENTIK_REFRESH_TOKEN_URL=https://authentik.example.com/application/o/token/
|
||||
AUTHENTIK_CLIENT_ID=your-client-id-here
|
||||
AUTHENTIK_CLIENT_SECRET=your-client-secret-here
|
||||
|
||||
# Feature Flags
|
||||
# NEXT_PUBLIC_FEATURE_REQUIRE_LOGIN=true
|
||||
# NEXT_PUBLIC_FEATURE_PRIVACY=false
|
||||
# NEXT_PUBLIC_FEATURE_BROWSE=true
|
||||
# NEXT_PUBLIC_FEATURE_SEND_TO_ZULIP=true
|
||||
# NEXT_PUBLIC_FEATURE_ROOMS=true
|
||||
|
||||
# API URLs
|
||||
NEXT_PUBLIC_API_URL=http://127.0.0.1:1250
|
||||
NEXT_PUBLIC_WEBSOCKET_URL=ws://127.0.0.1:1250
|
||||
NEXT_PUBLIC_AUTH_CALLBACK_URL=http://localhost:3000/auth-callback
|
||||
|
||||
# Sentry
|
||||
# SENTRY_DSN=https://your-dsn@sentry.io/project-id
|
||||
# SENTRY_IGNORE_API_RESOLUTION_ERROR=1
|
||||
1
www/.gitignore
vendored
1
www/.gitignore
vendored
@@ -40,7 +40,6 @@ next-env.d.ts
|
||||
# Sentry Auth Token
|
||||
.sentryclirc
|
||||
|
||||
config.ts
|
||||
|
||||
# openapi logs
|
||||
openapi-ts-error-*.log
|
||||
|
||||
30
www/app/(app)/AuthWrapper.tsx
Normal file
30
www/app/(app)/AuthWrapper.tsx
Normal file
@@ -0,0 +1,30 @@
|
||||
"use client";
|
||||
|
||||
import { Flex, Spinner } from "@chakra-ui/react";
|
||||
import { useAuth } from "../lib/AuthProvider";
|
||||
import { useLoginRequiredPages } from "../lib/useLoginRequiredPages";
|
||||
|
||||
export default function AuthWrapper({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
const auth = useAuth();
|
||||
const redirectPath = useLoginRequiredPages();
|
||||
const redirectHappens = !!redirectPath;
|
||||
|
||||
if (auth.status === "loading" || redirectHappens) {
|
||||
return (
|
||||
<Flex
|
||||
flexDir="column"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
h="calc(100vh - 80px)" // Account for header height
|
||||
>
|
||||
<Spinner size="xl" color="blue.500" />
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
|
||||
return <>{children}</>;
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
import React from "react";
|
||||
import { Box, Stack, Link, Heading } from "@chakra-ui/react";
|
||||
import NextLink from "next/link";
|
||||
import { Room, SourceKind } from "../../../api";
|
||||
import type { components } from "../../../reflector-api";
|
||||
|
||||
type Room = components["schemas"]["Room"];
|
||||
type SourceKind = components["schemas"]["SourceKind"];
|
||||
|
||||
interface FilterSidebarProps {
|
||||
rooms: Room[];
|
||||
@@ -72,7 +75,7 @@ export default function FilterSidebar({
|
||||
key={room.id}
|
||||
as={NextLink}
|
||||
href="#"
|
||||
onClick={() => onFilterChange("room", room.id)}
|
||||
onClick={() => onFilterChange("room" as SourceKind, room.id)}
|
||||
color={
|
||||
selectedSourceKind === "room" && selectedRoomId === room.id
|
||||
? "blue.500"
|
||||
|
||||
@@ -1,26 +1,67 @@
|
||||
import React from "react";
|
||||
import React, { useEffect } from "react";
|
||||
import { Pagination, IconButton, ButtonGroup } from "@chakra-ui/react";
|
||||
import { LuChevronLeft, LuChevronRight } from "react-icons/lu";
|
||||
|
||||
// explicitly 1-based to prevent +/-1-confusion errors
|
||||
export const FIRST_PAGE = 1 as PaginationPage;
|
||||
export const parsePaginationPage = (
|
||||
page: number,
|
||||
):
|
||||
| {
|
||||
value: PaginationPage;
|
||||
}
|
||||
| {
|
||||
error: string;
|
||||
} => {
|
||||
if (page < FIRST_PAGE)
|
||||
return {
|
||||
error: "Page must be greater than 0",
|
||||
};
|
||||
if (!Number.isInteger(page))
|
||||
return {
|
||||
error: "Page must be an integer",
|
||||
};
|
||||
return {
|
||||
value: page as PaginationPage,
|
||||
};
|
||||
};
|
||||
export type PaginationPage = number & { __brand: "PaginationPage" };
|
||||
export const PaginationPage = (page: number): PaginationPage => {
|
||||
const v = parsePaginationPage(page);
|
||||
if ("error" in v) throw new Error(v.error);
|
||||
return v.value;
|
||||
};
|
||||
|
||||
export const paginationPageTo0Based = (page: PaginationPage): number =>
|
||||
page - FIRST_PAGE;
|
||||
|
||||
type PaginationProps = {
|
||||
page: number;
|
||||
setPage: (page: number) => void;
|
||||
page: PaginationPage;
|
||||
setPage: (page: PaginationPage) => void;
|
||||
total: number;
|
||||
size: number;
|
||||
};
|
||||
|
||||
export const totalPages = (total: number, size: number) => {
|
||||
return Math.ceil(total / size);
|
||||
};
|
||||
|
||||
export default function PaginationComponent(props: PaginationProps) {
|
||||
const { page, setPage, total, size } = props;
|
||||
const totalPages = Math.ceil(total / size);
|
||||
|
||||
if (totalPages <= 1) return null;
|
||||
useEffect(() => {
|
||||
if (page > totalPages(total, size)) {
|
||||
console.error(
|
||||
`Page number (${page}) is greater than total pages (${totalPages}) in pagination`,
|
||||
);
|
||||
}
|
||||
}, [page, totalPages(total, size)]);
|
||||
|
||||
return (
|
||||
<Pagination.Root
|
||||
count={total}
|
||||
pageSize={size}
|
||||
page={page}
|
||||
onPageChange={(details) => setPage(details.page)}
|
||||
onPageChange={(details) => setPage(PaginationPage(details.page))}
|
||||
style={{ display: "flex", justifyContent: "center" }}
|
||||
>
|
||||
<ButtonGroup variant="ghost" size="xs">
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
import React, { useState } from "react";
|
||||
import { Flex, Input, Button } from "@chakra-ui/react";
|
||||
|
||||
interface SearchBarProps {
|
||||
onSearch: (searchTerm: string) => void;
|
||||
}
|
||||
|
||||
export default function SearchBar({ onSearch }: SearchBarProps) {
|
||||
const [searchInputValue, setSearchInputValue] = useState("");
|
||||
|
||||
const handleSearch = () => {
|
||||
onSearch(searchInputValue);
|
||||
};
|
||||
|
||||
const handleKeyDown = (event: React.KeyboardEvent) => {
|
||||
if (event.key === "Enter") {
|
||||
handleSearch();
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Flex alignItems="center">
|
||||
<Input
|
||||
placeholder="Search transcriptions..."
|
||||
value={searchInputValue}
|
||||
onChange={(e) => setSearchInputValue(e.target.value)}
|
||||
onKeyDown={handleKeyDown}
|
||||
/>
|
||||
<Button ml={2} onClick={handleSearch}>
|
||||
Search
|
||||
</Button>
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
@@ -4,8 +4,8 @@ import { LuMenu, LuTrash, LuRotateCw } from "react-icons/lu";
|
||||
|
||||
interface TranscriptActionsMenuProps {
|
||||
transcriptId: string;
|
||||
onDelete: (transcriptId: string) => (e: any) => void;
|
||||
onReprocess: (transcriptId: string) => (e: any) => void;
|
||||
onDelete: (transcriptId: string) => void;
|
||||
onReprocess: (transcriptId: string) => void;
|
||||
}
|
||||
|
||||
export default function TranscriptActionsMenu({
|
||||
@@ -24,11 +24,17 @@ export default function TranscriptActionsMenu({
|
||||
<Menu.Content>
|
||||
<Menu.Item
|
||||
value="reprocess"
|
||||
onClick={(e) => onReprocess(transcriptId)(e)}
|
||||
onClick={() => onReprocess(transcriptId)}
|
||||
>
|
||||
<LuRotateCw /> Reprocess
|
||||
</Menu.Item>
|
||||
<Menu.Item value="delete" onClick={(e) => onDelete(transcriptId)(e)}>
|
||||
<Menu.Item
|
||||
value="delete"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation();
|
||||
onDelete(transcriptId);
|
||||
}}
|
||||
>
|
||||
<LuTrash /> Delete
|
||||
</Menu.Item>
|
||||
</Menu.Content>
|
||||
|
||||
@@ -1,27 +1,293 @@
|
||||
import React from "react";
|
||||
import { Box, Stack, Text, Flex, Link, Spinner } from "@chakra-ui/react";
|
||||
import React, { useState } from "react";
|
||||
import {
|
||||
Box,
|
||||
Stack,
|
||||
Text,
|
||||
Flex,
|
||||
Link,
|
||||
Spinner,
|
||||
Badge,
|
||||
HStack,
|
||||
VStack,
|
||||
} from "@chakra-ui/react";
|
||||
import NextLink from "next/link";
|
||||
import { GetTranscriptMinimal } from "../../../api";
|
||||
import { formatTimeMs, formatLocalDate } from "../../../lib/time";
|
||||
import TranscriptStatusIcon from "./TranscriptStatusIcon";
|
||||
import TranscriptActionsMenu from "./TranscriptActionsMenu";
|
||||
import {
|
||||
highlightMatches,
|
||||
generateTextFragment,
|
||||
} from "../../../lib/textHighlight";
|
||||
import type { components } from "../../../reflector-api";
|
||||
|
||||
type SearchResult = components["schemas"]["SearchResult"];
|
||||
type SourceKind = components["schemas"]["SourceKind"];
|
||||
|
||||
interface TranscriptCardsProps {
|
||||
transcripts: GetTranscriptMinimal[];
|
||||
onDelete: (transcriptId: string) => (e: any) => void;
|
||||
onReprocess: (transcriptId: string) => (e: any) => void;
|
||||
loading?: boolean;
|
||||
results: SearchResult[];
|
||||
query: string;
|
||||
isLoading?: boolean;
|
||||
onDelete: (transcriptId: string) => void;
|
||||
onReprocess: (transcriptId: string) => void;
|
||||
}
|
||||
|
||||
function highlightText(text: string, query: string): React.ReactNode {
|
||||
if (!query) return text;
|
||||
|
||||
const matches = highlightMatches(text, query);
|
||||
|
||||
if (matches.length === 0) return text;
|
||||
|
||||
// Sort matches by index to process them in order
|
||||
const sortedMatches = [...matches].sort((a, b) => a.index - b.index);
|
||||
|
||||
const parts: React.ReactNode[] = [];
|
||||
let lastIndex = 0;
|
||||
|
||||
sortedMatches.forEach((match, i) => {
|
||||
// Add text before the match
|
||||
if (match.index > lastIndex) {
|
||||
parts.push(
|
||||
<Text as="span" key={`text-${i}`} display="inline">
|
||||
{text.slice(lastIndex, match.index)}
|
||||
</Text>,
|
||||
);
|
||||
}
|
||||
|
||||
// Add the highlighted match
|
||||
parts.push(
|
||||
<Text
|
||||
as="mark"
|
||||
key={`match-${i}`}
|
||||
bg="yellow.200"
|
||||
px={0.5}
|
||||
display="inline"
|
||||
>
|
||||
{match.match}
|
||||
</Text>,
|
||||
);
|
||||
|
||||
lastIndex = match.index + match.match.length;
|
||||
});
|
||||
|
||||
// Add remaining text after last match
|
||||
if (lastIndex < text.length) {
|
||||
parts.push(
|
||||
<Text as="span" key={`text-end`} display="inline">
|
||||
{text.slice(lastIndex)}
|
||||
</Text>,
|
||||
);
|
||||
}
|
||||
|
||||
return parts;
|
||||
}
|
||||
|
||||
const transcriptHref = (
|
||||
transcriptId: string,
|
||||
mainSnippet: string,
|
||||
query: string,
|
||||
): `/transcripts/${string}` => {
|
||||
const urlTextFragment = mainSnippet
|
||||
? generateTextFragment(mainSnippet, query)
|
||||
: null;
|
||||
const urlTextFragmentWithHash = urlTextFragment
|
||||
? `#${urlTextFragment.k}=${encodeURIComponent(urlTextFragment.v)}`
|
||||
: "";
|
||||
return `/transcripts/${transcriptId}${urlTextFragmentWithHash}`;
|
||||
};
|
||||
|
||||
// note that it's strongly tied to search logic - in case you want to use it independently, refactor
|
||||
function TranscriptCard({
|
||||
result,
|
||||
query,
|
||||
onDelete,
|
||||
onReprocess,
|
||||
}: {
|
||||
result: SearchResult;
|
||||
query: string;
|
||||
onDelete: (transcriptId: string) => void;
|
||||
onReprocess: (transcriptId: string) => void;
|
||||
}) {
|
||||
const [isExpanded, setIsExpanded] = useState(false);
|
||||
|
||||
const mainSnippet = result.search_snippets[0];
|
||||
const additionalSnippets = result.search_snippets.slice(1);
|
||||
const totalMatches = result.total_match_count || 0;
|
||||
const snippetsShown = result.search_snippets.length;
|
||||
const remainingMatches = totalMatches - snippetsShown;
|
||||
const hasAdditionalSnippets = additionalSnippets.length > 0;
|
||||
const resultTitle = result.title || "Unnamed Transcript";
|
||||
|
||||
const formattedDuration = result.duration
|
||||
? formatTimeMs(result.duration)
|
||||
: "N/A";
|
||||
const formattedDate = formatLocalDate(result.created_at);
|
||||
const source =
|
||||
result.source_kind === ("room" as SourceKind)
|
||||
? result.room_name || result.room_id
|
||||
: result.source_kind;
|
||||
|
||||
const handleExpandClick = (e: React.MouseEvent) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
setIsExpanded(!isExpanded);
|
||||
};
|
||||
|
||||
return (
|
||||
<Box borderWidth={1} p={4} borderRadius="md" fontSize="sm">
|
||||
<Flex justify="space-between" alignItems="flex-start" gap="2">
|
||||
<Box>
|
||||
<TranscriptStatusIcon status={result.status} />
|
||||
</Box>
|
||||
<Box flex="1">
|
||||
{/* Title with highlighting and text fragment for deep linking */}
|
||||
<Link
|
||||
as={NextLink}
|
||||
href={transcriptHref(result.id, mainSnippet, query)}
|
||||
fontWeight="600"
|
||||
display="block"
|
||||
mb={2}
|
||||
>
|
||||
{highlightText(resultTitle, query)}
|
||||
</Link>
|
||||
|
||||
{/* Metadata - Horizontal on desktop, vertical on mobile */}
|
||||
<Flex
|
||||
direction={{ base: "column", md: "row" }}
|
||||
gap={{ base: 1, md: 2 }}
|
||||
fontSize="xs"
|
||||
color="gray.600"
|
||||
flexWrap="wrap"
|
||||
align={{ base: "flex-start", md: "center" }}
|
||||
>
|
||||
<Flex align="center" gap={1}>
|
||||
<Text fontWeight="medium" color="gray.500">
|
||||
Source:
|
||||
</Text>
|
||||
<Text>{source}</Text>
|
||||
</Flex>
|
||||
<Text display={{ base: "none", md: "block" }} color="gray.400">
|
||||
•
|
||||
</Text>
|
||||
<Flex align="center" gap={1}>
|
||||
<Text fontWeight="medium" color="gray.500">
|
||||
Date:
|
||||
</Text>
|
||||
<Text>{formattedDate}</Text>
|
||||
</Flex>
|
||||
<Text display={{ base: "none", md: "block" }} color="gray.400">
|
||||
•
|
||||
</Text>
|
||||
<Flex align="center" gap={1}>
|
||||
<Text fontWeight="medium" color="gray.500">
|
||||
Duration:
|
||||
</Text>
|
||||
<Text>{formattedDuration}</Text>
|
||||
</Flex>
|
||||
</Flex>
|
||||
|
||||
{/* Search Results Section - only show when searching */}
|
||||
{mainSnippet && (
|
||||
<>
|
||||
{/* Main Snippet */}
|
||||
<Box
|
||||
mt={3}
|
||||
p={2}
|
||||
bg="gray.50"
|
||||
borderLeft="2px solid"
|
||||
borderLeftColor="blue.400"
|
||||
borderRadius="sm"
|
||||
fontSize="xs"
|
||||
>
|
||||
<Text color="gray.700">
|
||||
{highlightText(mainSnippet, query)}
|
||||
</Text>
|
||||
</Box>
|
||||
|
||||
{hasAdditionalSnippets && (
|
||||
<>
|
||||
<Flex
|
||||
mt={2}
|
||||
p={2}
|
||||
bg="blue.50"
|
||||
borderRadius="sm"
|
||||
cursor="pointer"
|
||||
onClick={handleExpandClick}
|
||||
_hover={{ bg: "blue.100" }}
|
||||
align="center"
|
||||
justify="space-between"
|
||||
>
|
||||
<HStack gap={2}>
|
||||
<Badge
|
||||
bg="blue.500"
|
||||
color="white"
|
||||
fontSize="xs"
|
||||
px={2}
|
||||
borderRadius="full"
|
||||
>
|
||||
{remainingMatches > 0
|
||||
? `${additionalSnippets.length + remainingMatches}+`
|
||||
: additionalSnippets.length}
|
||||
</Badge>
|
||||
<Text fontSize="xs" color="blue.600" fontWeight="medium">
|
||||
more{" "}
|
||||
{additionalSnippets.length + remainingMatches === 1
|
||||
? "match"
|
||||
: "matches"}
|
||||
{remainingMatches > 0 &&
|
||||
` (${additionalSnippets.length} shown)`}
|
||||
</Text>
|
||||
</HStack>
|
||||
<Text fontSize="xs" color="blue.600">
|
||||
{isExpanded ? "▲" : "▼"}
|
||||
</Text>
|
||||
</Flex>
|
||||
|
||||
{/* Additional Snippets */}
|
||||
{isExpanded && (
|
||||
<VStack align="stretch" gap={2} mt={2}>
|
||||
{additionalSnippets.map((snippet, index) => (
|
||||
<Box
|
||||
key={index}
|
||||
p={2}
|
||||
bg="gray.50"
|
||||
borderLeft="2px solid"
|
||||
borderLeftColor="gray.300"
|
||||
borderRadius="sm"
|
||||
fontSize="xs"
|
||||
>
|
||||
<Text color="gray.700">
|
||||
{highlightText(snippet, query)}
|
||||
</Text>
|
||||
</Box>
|
||||
))}
|
||||
</VStack>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</Box>
|
||||
<TranscriptActionsMenu
|
||||
transcriptId={result.id}
|
||||
onDelete={onDelete}
|
||||
onReprocess={onReprocess}
|
||||
/>
|
||||
</Flex>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
export default function TranscriptCards({
|
||||
transcripts,
|
||||
results,
|
||||
query,
|
||||
isLoading,
|
||||
onDelete,
|
||||
onReprocess,
|
||||
loading,
|
||||
}: TranscriptCardsProps) {
|
||||
return (
|
||||
<Box display={{ base: "block", lg: "none" }} position="relative">
|
||||
{loading && (
|
||||
<Box position="relative">
|
||||
{isLoading && (
|
||||
<Flex
|
||||
position="absolute"
|
||||
top={0}
|
||||
@@ -37,48 +303,19 @@ export default function TranscriptCards({
|
||||
</Flex>
|
||||
)}
|
||||
<Box
|
||||
opacity={loading ? 0.9 : 1}
|
||||
pointerEvents={loading ? "none" : "auto"}
|
||||
opacity={isLoading ? 0.9 : 1}
|
||||
pointerEvents={isLoading ? "none" : "auto"}
|
||||
transition="opacity 0.2s ease-in-out"
|
||||
>
|
||||
<Stack gap={2}>
|
||||
{transcripts.map((item) => (
|
||||
<Box
|
||||
key={item.id}
|
||||
borderWidth={1}
|
||||
p={4}
|
||||
borderRadius="md"
|
||||
fontSize="sm"
|
||||
>
|
||||
<Flex justify="space-between" alignItems="flex-start" gap="2">
|
||||
<Box>
|
||||
<TranscriptStatusIcon status={item.status} />
|
||||
</Box>
|
||||
<Box flex="1">
|
||||
<Link
|
||||
as={NextLink}
|
||||
href={`/transcripts/${item.id}`}
|
||||
fontWeight="600"
|
||||
display="block"
|
||||
>
|
||||
{item.title || "Unnamed Transcript"}
|
||||
</Link>
|
||||
<Text>
|
||||
Source:{" "}
|
||||
{item.source_kind === "room"
|
||||
? item.room_name
|
||||
: item.source_kind}
|
||||
</Text>
|
||||
<Text>Date: {formatLocalDate(item.created_at)}</Text>
|
||||
<Text>Duration: {formatTimeMs(item.duration)}</Text>
|
||||
</Box>
|
||||
<TranscriptActionsMenu
|
||||
transcriptId={item.id}
|
||||
onDelete={onDelete}
|
||||
onReprocess={onReprocess}
|
||||
/>
|
||||
</Flex>
|
||||
</Box>
|
||||
<Stack gap={3}>
|
||||
{results.map((result) => (
|
||||
<TranscriptCard
|
||||
key={result.id}
|
||||
result={result}
|
||||
query={query}
|
||||
onDelete={onDelete}
|
||||
onReprocess={onReprocess}
|
||||
/>
|
||||
))}
|
||||
</Stack>
|
||||
</Box>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user