mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-21 04:39:06 +00:00
Compare commits
1 Commits
v0.20.0
...
mathieu/fi
| Author | SHA1 | Date | |
|---|---|---|---|
| 5aed513c47 |
5
.github/workflows/db_migrations.yml
vendored
5
.github/workflows/db_migrations.yml
vendored
@@ -2,8 +2,6 @@ name: Test Database Migrations
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
paths:
|
||||||
- "server/migrations/**"
|
- "server/migrations/**"
|
||||||
- "server/reflector/db/**"
|
- "server/reflector/db/**"
|
||||||
@@ -19,9 +17,6 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
test-migrations:
|
test-migrations:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
concurrency:
|
|
||||||
group: db-ubuntu-latest-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
services:
|
services:
|
||||||
postgres:
|
postgres:
|
||||||
image: postgres:17
|
image: postgres:17
|
||||||
|
|||||||
2
.github/workflows/deploy.yml
vendored
2
.github/workflows/deploy.yml
vendored
@@ -1,4 +1,4 @@
|
|||||||
name: Build container/push to container registry
|
name: Deploy to Amazon ECS
|
||||||
|
|
||||||
on: [workflow_dispatch]
|
on: [workflow_dispatch]
|
||||||
|
|
||||||
|
|||||||
57
.github/workflows/docker-frontend.yml
vendored
57
.github/workflows/docker-frontend.yml
vendored
@@ -1,57 +0,0 @@
|
|||||||
name: Build and Push Frontend Docker Image
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- 'www/**'
|
|
||||||
- '.github/workflows/docker-frontend.yml'
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
IMAGE_NAME: ${{ github.repository }}-frontend
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-and-push:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Log in to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
registry: ${{ env.REGISTRY }}
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Extract metadata
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
|
||||||
tags: |
|
|
||||||
type=ref,event=branch
|
|
||||||
type=sha,prefix={{branch}}-
|
|
||||||
type=raw,value=latest,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: ./www
|
|
||||||
file: ./www/Dockerfile
|
|
||||||
push: true
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
45
.github/workflows/test_next_server.yml
vendored
45
.github/workflows/test_next_server.yml
vendored
@@ -1,45 +0,0 @@
|
|||||||
name: Test Next Server
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- "www/**"
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
|
||||||
- "www/**"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test-next-server:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
working-directory: ./www
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- name: Setup Node.js
|
|
||||||
uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: '20'
|
|
||||||
|
|
||||||
- name: Install pnpm
|
|
||||||
uses: pnpm/action-setup@v4
|
|
||||||
with:
|
|
||||||
version: 8
|
|
||||||
|
|
||||||
- name: Setup Node.js cache
|
|
||||||
uses: actions/setup-node@v4
|
|
||||||
with:
|
|
||||||
node-version: '20'
|
|
||||||
cache: 'pnpm'
|
|
||||||
cache-dependency-path: './www/pnpm-lock.yaml'
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: pnpm install
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: pnpm test
|
|
||||||
11
.github/workflows/test_server.yml
vendored
11
.github/workflows/test_server.yml
vendored
@@ -5,17 +5,12 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- "server/**"
|
- "server/**"
|
||||||
push:
|
push:
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
paths:
|
paths:
|
||||||
- "server/**"
|
- "server/**"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
pytest:
|
pytest:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
concurrency:
|
|
||||||
group: pytest-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
services:
|
services:
|
||||||
redis:
|
redis:
|
||||||
image: redis:6
|
image: redis:6
|
||||||
@@ -35,9 +30,6 @@ jobs:
|
|||||||
|
|
||||||
docker-amd64:
|
docker-amd64:
|
||||||
runs-on: linux-amd64
|
runs-on: linux-amd64
|
||||||
concurrency:
|
|
||||||
group: docker-amd64-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
@@ -53,9 +45,6 @@ jobs:
|
|||||||
|
|
||||||
docker-arm64:
|
docker-arm64:
|
||||||
runs-on: linux-arm64
|
runs-on: linux-arm64
|
||||||
concurrency:
|
|
||||||
group: docker-arm64-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -14,7 +14,4 @@ data/
|
|||||||
www/REFACTOR.md
|
www/REFACTOR.md
|
||||||
www/reload-frontend
|
www/reload-frontend
|
||||||
server/test.sqlite
|
server/test.sqlite
|
||||||
CLAUDE.local.md
|
CLAUDE.local.md
|
||||||
www/.env.development
|
|
||||||
www/.env.production
|
|
||||||
.playwright-mcp
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
b9d891d3424f371642cb032ecfd0e2564470a72c:server/tests/test_transcripts_recording_deletion.py:generic-api-key:15
|
|
||||||
@@ -27,8 +27,3 @@ repos:
|
|||||||
files: ^server/
|
files: ^server/
|
||||||
- id: ruff-format
|
- id: ruff-format
|
||||||
files: ^server/
|
files: ^server/
|
||||||
|
|
||||||
- repo: https://github.com/gitleaks/gitleaks
|
|
||||||
rev: v8.28.0
|
|
||||||
hooks:
|
|
||||||
- id: gitleaks
|
|
||||||
|
|||||||
197
CHANGELOG.md
197
CHANGELOG.md
@@ -1,202 +1,5 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## [0.20.0](https://github.com/Monadical-SAS/reflector/compare/v0.19.0...v0.20.0) (2025-11-25)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* link transcript participants ([#737](https://github.com/Monadical-SAS/reflector/issues/737)) ([9bec398](https://github.com/Monadical-SAS/reflector/commit/9bec39808fc6322612d8b87e922a6f7901fc01c1))
|
|
||||||
* transcript restart script ([#742](https://github.com/Monadical-SAS/reflector/issues/742)) ([86d5e26](https://github.com/Monadical-SAS/reflector/commit/86d5e26224bb55a0f1cc785aeda52065bb92ee6f))
|
|
||||||
|
|
||||||
## [0.19.0](https://github.com/Monadical-SAS/reflector/compare/v0.18.0...v0.19.0) (2025-11-25)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* dailyco api module ([#725](https://github.com/Monadical-SAS/reflector/issues/725)) ([4287f8b](https://github.com/Monadical-SAS/reflector/commit/4287f8b8aeee60e51db7539f4dcbda5f6e696bd8))
|
|
||||||
* dailyco poll ([#730](https://github.com/Monadical-SAS/reflector/issues/730)) ([8e438ca](https://github.com/Monadical-SAS/reflector/commit/8e438ca285152bd48fdc42767e706fb448d3525c))
|
|
||||||
* multitrack cli ([#735](https://github.com/Monadical-SAS/reflector/issues/735)) ([11731c9](https://github.com/Monadical-SAS/reflector/commit/11731c9d38439b04e93b1c3afbd7090bad11a11f))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* default platform fix ([#736](https://github.com/Monadical-SAS/reflector/issues/736)) ([c442a62](https://github.com/Monadical-SAS/reflector/commit/c442a627873ca667656eeaefb63e54ab10b8d19e))
|
|
||||||
* parakeet vad not getting the end timestamp ([#728](https://github.com/Monadical-SAS/reflector/issues/728)) ([18ed713](https://github.com/Monadical-SAS/reflector/commit/18ed7133693653ef4ddac6c659a8c14b320d1657))
|
|
||||||
* start raw tracks recording ([#729](https://github.com/Monadical-SAS/reflector/issues/729)) ([3e47c2c](https://github.com/Monadical-SAS/reflector/commit/3e47c2c0573504858e0d2e1798b6ed31f16b4a5d))
|
|
||||||
|
|
||||||
## [0.18.0](https://github.com/Monadical-SAS/reflector/compare/v0.17.0...v0.18.0) (2025-11-14)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* daily QOL: participants dictionary ([#721](https://github.com/Monadical-SAS/reflector/issues/721)) ([b20cad7](https://github.com/Monadical-SAS/reflector/commit/b20cad76e69fb6a76405af299a005f1ddcf60eae))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* add proccessing page to file upload and reprocessing ([#650](https://github.com/Monadical-SAS/reflector/issues/650)) ([28a7258](https://github.com/Monadical-SAS/reflector/commit/28a7258e45317b78e60e6397be2bc503647eaace))
|
|
||||||
* copy transcript ([#674](https://github.com/Monadical-SAS/reflector/issues/674)) ([a9a4f32](https://github.com/Monadical-SAS/reflector/commit/a9a4f32324f66c838e081eee42bb9502f38c1db1))
|
|
||||||
|
|
||||||
## [0.17.0](https://github.com/Monadical-SAS/reflector/compare/v0.16.0...v0.17.0) (2025-11-13)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* add API key management UI ([#716](https://github.com/Monadical-SAS/reflector/issues/716)) ([372202b](https://github.com/Monadical-SAS/reflector/commit/372202b0e1a86823900b0aa77be1bfbc2893d8a1))
|
|
||||||
* daily.co support as alternative to whereby ([#691](https://github.com/Monadical-SAS/reflector/issues/691)) ([1473fd8](https://github.com/Monadical-SAS/reflector/commit/1473fd82dc472c394cbaa2987212ad662a74bcac))
|
|
||||||
|
|
||||||
## [0.16.0](https://github.com/Monadical-SAS/reflector/compare/v0.15.0...v0.16.0) (2025-10-24)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* search date filter ([#710](https://github.com/Monadical-SAS/reflector/issues/710)) ([962c40e](https://github.com/Monadical-SAS/reflector/commit/962c40e2b6428ac42fd10aea926782d7a6f3f902))
|
|
||||||
|
|
||||||
## [0.15.0](https://github.com/Monadical-SAS/reflector/compare/v0.14.0...v0.15.0) (2025-10-20)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* api tokens ([#705](https://github.com/Monadical-SAS/reflector/issues/705)) ([9a258ab](https://github.com/Monadical-SAS/reflector/commit/9a258abc0209b0ac3799532a507ea6a9125d703a))
|
|
||||||
|
|
||||||
## [0.14.0](https://github.com/Monadical-SAS/reflector/compare/v0.13.1...v0.14.0) (2025-10-08)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* Add calendar event data to transcript webhook payload ([#689](https://github.com/Monadical-SAS/reflector/issues/689)) ([5f6910e](https://github.com/Monadical-SAS/reflector/commit/5f6910e5131b7f28f86c9ecdcc57fed8412ee3cd))
|
|
||||||
* container build for www / github ([#672](https://github.com/Monadical-SAS/reflector/issues/672)) ([969bd84](https://github.com/Monadical-SAS/reflector/commit/969bd84fcc14851d1a101412a0ba115f1b7cde82))
|
|
||||||
* docker-compose for production frontend ([#664](https://github.com/Monadical-SAS/reflector/issues/664)) ([5bf64b5](https://github.com/Monadical-SAS/reflector/commit/5bf64b5a41f64535e22849b4bb11734d4dbb4aae))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* restore feature boolean logic ([#671](https://github.com/Monadical-SAS/reflector/issues/671)) ([3660884](https://github.com/Monadical-SAS/reflector/commit/36608849ec64e953e3be456172502762e3c33df9))
|
|
||||||
* security review ([#656](https://github.com/Monadical-SAS/reflector/issues/656)) ([5d98754](https://github.com/Monadical-SAS/reflector/commit/5d98754305c6c540dd194dda268544f6d88bfaf8))
|
|
||||||
* update transcript list on reprocess ([#676](https://github.com/Monadical-SAS/reflector/issues/676)) ([9a71af1](https://github.com/Monadical-SAS/reflector/commit/9a71af145ee9b833078c78d0c684590ab12e9f0e))
|
|
||||||
* upgrade nemo toolkit ([#678](https://github.com/Monadical-SAS/reflector/issues/678)) ([eef6dc3](https://github.com/Monadical-SAS/reflector/commit/eef6dc39037329b65804297786d852dddb0557f9))
|
|
||||||
|
|
||||||
## [0.13.1](https://github.com/Monadical-SAS/reflector/compare/v0.13.0...v0.13.1) (2025-09-22)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* TypeError on not all arguments converted during string formatting in logger ([#667](https://github.com/Monadical-SAS/reflector/issues/667)) ([565a629](https://github.com/Monadical-SAS/reflector/commit/565a62900f5a02fc946b68f9269a42190ed70ab6))
|
|
||||||
|
|
||||||
## [0.13.0](https://github.com/Monadical-SAS/reflector/compare/v0.12.1...v0.13.0) (2025-09-19)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* room form edit with enter ([#662](https://github.com/Monadical-SAS/reflector/issues/662)) ([47716f6](https://github.com/Monadical-SAS/reflector/commit/47716f6e5ddee952609d2fa0ffabdfa865286796))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* invalid cleanup call ([#660](https://github.com/Monadical-SAS/reflector/issues/660)) ([0abcebf](https://github.com/Monadical-SAS/reflector/commit/0abcebfc9491f87f605f21faa3e53996fafedd9a))
|
|
||||||
|
|
||||||
## [0.12.1](https://github.com/Monadical-SAS/reflector/compare/v0.12.0...v0.12.1) (2025-09-17)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* production blocked because having existing meeting with room_id null ([#657](https://github.com/Monadical-SAS/reflector/issues/657)) ([870e860](https://github.com/Monadical-SAS/reflector/commit/870e8605171a27155a9cbee215eeccb9a8d6c0a2))
|
|
||||||
|
|
||||||
## [0.12.0](https://github.com/Monadical-SAS/reflector/compare/v0.11.0...v0.12.0) (2025-09-17)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* calendar integration ([#608](https://github.com/Monadical-SAS/reflector/issues/608)) ([6f680b5](https://github.com/Monadical-SAS/reflector/commit/6f680b57954c688882c4ed49f40f161c52a00a24))
|
|
||||||
* self-hosted gpu api ([#636](https://github.com/Monadical-SAS/reflector/issues/636)) ([ab859d6](https://github.com/Monadical-SAS/reflector/commit/ab859d65a6bded904133a163a081a651b3938d42))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* ignore player hotkeys for text inputs ([#646](https://github.com/Monadical-SAS/reflector/issues/646)) ([fa049e8](https://github.com/Monadical-SAS/reflector/commit/fa049e8d068190ce7ea015fd9fcccb8543f54a3f))
|
|
||||||
|
|
||||||
## [0.11.0](https://github.com/Monadical-SAS/reflector/compare/v0.10.0...v0.11.0) (2025-09-16)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* remove profanity filter that was there for conference ([#652](https://github.com/Monadical-SAS/reflector/issues/652)) ([b42f7cf](https://github.com/Monadical-SAS/reflector/commit/b42f7cfc606783afcee792590efcc78b507468ab))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* zulip and consent handler on the file pipeline ([#645](https://github.com/Monadical-SAS/reflector/issues/645)) ([5f143fe](https://github.com/Monadical-SAS/reflector/commit/5f143fe3640875dcb56c26694254a93189281d17))
|
|
||||||
* zulip stream and topic selection in share dialog ([#644](https://github.com/Monadical-SAS/reflector/issues/644)) ([c546e69](https://github.com/Monadical-SAS/reflector/commit/c546e69739e68bb74fbc877eb62609928e5b8de6))
|
|
||||||
|
|
||||||
## [0.10.0](https://github.com/Monadical-SAS/reflector/compare/v0.9.0...v0.10.0) (2025-09-11)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* replace nextjs-config with environment variables ([#632](https://github.com/Monadical-SAS/reflector/issues/632)) ([369ecdf](https://github.com/Monadical-SAS/reflector/commit/369ecdff13f3862d926a9c0b87df52c9d94c4dde))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* anonymous users transcript permissions ([#621](https://github.com/Monadical-SAS/reflector/issues/621)) ([f81fe99](https://github.com/Monadical-SAS/reflector/commit/f81fe9948a9237b3e0001b2d8ca84f54d76878f9))
|
|
||||||
* auth post ([#624](https://github.com/Monadical-SAS/reflector/issues/624)) ([cde99ca](https://github.com/Monadical-SAS/reflector/commit/cde99ca2716f84ba26798f289047732f0448742e))
|
|
||||||
* auth post ([#626](https://github.com/Monadical-SAS/reflector/issues/626)) ([3b85ff3](https://github.com/Monadical-SAS/reflector/commit/3b85ff3bdf4fb053b103070646811bc990c0e70a))
|
|
||||||
* auth post ([#627](https://github.com/Monadical-SAS/reflector/issues/627)) ([962038e](https://github.com/Monadical-SAS/reflector/commit/962038ee3f2a555dc3c03856be0e4409456e0996))
|
|
||||||
* missing follow_redirects=True on modal endpoint ([#630](https://github.com/Monadical-SAS/reflector/issues/630)) ([fc363bd](https://github.com/Monadical-SAS/reflector/commit/fc363bd49b17b075e64f9186e5e0185abc325ea7))
|
|
||||||
* sync backend and frontend token refresh logic ([#614](https://github.com/Monadical-SAS/reflector/issues/614)) ([5a5b323](https://github.com/Monadical-SAS/reflector/commit/5a5b3233820df9536da75e87ce6184a983d4713a))
|
|
||||||
|
|
||||||
## [0.9.0](https://github.com/Monadical-SAS/reflector/compare/v0.8.2...v0.9.0) (2025-09-06)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* frontend openapi react query ([#606](https://github.com/Monadical-SAS/reflector/issues/606)) ([c4d2825](https://github.com/Monadical-SAS/reflector/commit/c4d2825c81f81ad8835629fbf6ea8c7383f8c31b))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* align whisper transcriber api with parakeet ([#602](https://github.com/Monadical-SAS/reflector/issues/602)) ([0663700](https://github.com/Monadical-SAS/reflector/commit/0663700a615a4af69a03c96c410f049e23ec9443))
|
|
||||||
* kv use tls explicit ([#610](https://github.com/Monadical-SAS/reflector/issues/610)) ([08d88ec](https://github.com/Monadical-SAS/reflector/commit/08d88ec349f38b0d13e0fa4cb73486c8dfd31836))
|
|
||||||
* source kind for file processing ([#601](https://github.com/Monadical-SAS/reflector/issues/601)) ([dc82f8b](https://github.com/Monadical-SAS/reflector/commit/dc82f8bb3bdf3ab3d4088e592a30fd63907319e1))
|
|
||||||
* token refresh locking ([#613](https://github.com/Monadical-SAS/reflector/issues/613)) ([7f5a4c9](https://github.com/Monadical-SAS/reflector/commit/7f5a4c9ddc7fd098860c8bdda2ca3b57f63ded2f))
|
|
||||||
|
|
||||||
## [0.8.2](https://github.com/Monadical-SAS/reflector/compare/v0.8.1...v0.8.2) (2025-08-29)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* search-logspam ([#593](https://github.com/Monadical-SAS/reflector/issues/593)) ([695d1a9](https://github.com/Monadical-SAS/reflector/commit/695d1a957d4cd862753049f9beed88836cabd5ab))
|
|
||||||
|
|
||||||
## [0.8.1](https://github.com/Monadical-SAS/reflector/compare/v0.8.0...v0.8.1) (2025-08-29)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* make webhook secret/url allowing null ([#590](https://github.com/Monadical-SAS/reflector/issues/590)) ([84a3812](https://github.com/Monadical-SAS/reflector/commit/84a381220bc606231d08d6f71d4babc818fa3c75))
|
|
||||||
|
|
||||||
## [0.8.0](https://github.com/Monadical-SAS/reflector/compare/v0.7.3...v0.8.0) (2025-08-29)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **cleanup:** add automatic data retention for public instances ([#574](https://github.com/Monadical-SAS/reflector/issues/574)) ([6f0c7c1](https://github.com/Monadical-SAS/reflector/commit/6f0c7c1a5e751713366886c8e764c2009e12ba72))
|
|
||||||
* **rooms:** add webhook for transcript completion ([#578](https://github.com/Monadical-SAS/reflector/issues/578)) ([88ed7cf](https://github.com/Monadical-SAS/reflector/commit/88ed7cfa7804794b9b54cad4c3facc8a98cf85fd))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* file pipeline status reporting and websocket updates ([#589](https://github.com/Monadical-SAS/reflector/issues/589)) ([9dfd769](https://github.com/Monadical-SAS/reflector/commit/9dfd76996f851cc52be54feea078adbc0816dc57))
|
|
||||||
* Igor/evaluation ([#575](https://github.com/Monadical-SAS/reflector/issues/575)) ([124ce03](https://github.com/Monadical-SAS/reflector/commit/124ce03bf86044c18313d27228a25da4bc20c9c5))
|
|
||||||
* optimize parakeet transcription batching algorithm ([#577](https://github.com/Monadical-SAS/reflector/issues/577)) ([7030e0f](https://github.com/Monadical-SAS/reflector/commit/7030e0f23649a8cf6c1eb6d5889684a41ce849ec))
|
|
||||||
|
|
||||||
## [0.7.3](https://github.com/Monadical-SAS/reflector/compare/v0.7.2...v0.7.3) (2025-08-22)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* cleaned repo, and get git-leaks clean ([359280d](https://github.com/Monadical-SAS/reflector/commit/359280dd340433ba4402ed69034094884c825e67))
|
|
||||||
* restore previous behavior on live pipeline + audio downscaler ([#561](https://github.com/Monadical-SAS/reflector/issues/561)) ([9265d20](https://github.com/Monadical-SAS/reflector/commit/9265d201b590d23c628c5f19251b70f473859043))
|
|
||||||
|
|
||||||
## [0.7.2](https://github.com/Monadical-SAS/reflector/compare/v0.7.1...v0.7.2) (2025-08-21)
|
## [0.7.2](https://github.com/Monadical-SAS/reflector/compare/v0.7.1...v0.7.2) (2025-08-21)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -66,6 +66,7 @@ pnpm install
|
|||||||
|
|
||||||
# Copy configuration templates
|
# Copy configuration templates
|
||||||
cp .env_template .env
|
cp .env_template .env
|
||||||
|
cp config-template.ts config.ts
|
||||||
```
|
```
|
||||||
|
|
||||||
**Development:**
|
**Development:**
|
||||||
@@ -151,7 +152,7 @@ All endpoints prefixed `/v1/`:
|
|||||||
|
|
||||||
**Frontend** (`www/.env`):
|
**Frontend** (`www/.env`):
|
||||||
- `NEXTAUTH_URL`, `NEXTAUTH_SECRET` - Authentication configuration
|
- `NEXTAUTH_URL`, `NEXTAUTH_SECRET` - Authentication configuration
|
||||||
- `REFLECTOR_API_URL` - Backend API endpoint
|
- `NEXT_PUBLIC_REFLECTOR_API_URL` - Backend API endpoint
|
||||||
- `REFLECTOR_DOMAIN_CONFIG` - Feature flags and domain settings
|
- `REFLECTOR_DOMAIN_CONFIG` - Feature flags and domain settings
|
||||||
|
|
||||||
## Testing Strategy
|
## Testing Strategy
|
||||||
|
|||||||
88
README.md
88
README.md
@@ -1,60 +1,43 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<img width="100" alt="image" src="https://github.com/user-attachments/assets/66fb367b-2c89-4516-9912-f47ac59c6a7f"/>
|
|
||||||
|
|
||||||
# Reflector
|
# Reflector
|
||||||
|
|
||||||
Reflector is an AI-powered audio transcription and meeting analysis platform that provides real-time transcription, speaker diarization, translation and summarization for audio content and live meetings. It works 100% with local models (whisper/parakeet, pyannote, seamless-m4t, and your local llm like phi-4).
|
Reflector Audio Management and Analysis is a cutting-edge web application under development by Monadical. It utilizes AI to record meetings, providing a permanent record with transcripts, translations, and automated summaries.
|
||||||
|
|
||||||
[](https://github.com/monadical-sas/reflector/actions/workflows/test_server.yml)
|
[](https://github.com/monadical-sas/reflector/actions/workflows/pytests.yml)
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
</div>
|
</div>
|
||||||
</div>
|
|
||||||
|
## Screenshots
|
||||||
<table>
|
<table>
|
||||||
<tr>
|
<tr>
|
||||||
<td>
|
<td>
|
||||||
<a href="https://github.com/user-attachments/assets/21f5597c-2930-4899-a154-f7bd61a59e97">
|
<a href="https://github.com/user-attachments/assets/3a976930-56c1-47ef-8c76-55d3864309e3">
|
||||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/21f5597c-2930-4899-a154-f7bd61a59e97" />
|
<img width="700" alt="image" src="https://github.com/user-attachments/assets/3a976930-56c1-47ef-8c76-55d3864309e3" />
|
||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<a href="https://github.com/user-attachments/assets/f6b9399a-5e51-4bae-b807-59128d0a940c">
|
<a href="https://github.com/user-attachments/assets/bfe3bde3-08af-4426-a9a1-11ad5cd63b33">
|
||||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/f6b9399a-5e51-4bae-b807-59128d0a940c" />
|
<img width="700" alt="image" src="https://github.com/user-attachments/assets/bfe3bde3-08af-4426-a9a1-11ad5cd63b33" />
|
||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<a href="https://github.com/user-attachments/assets/a42ce460-c1fd-4489-a995-270516193897">
|
<a href="https://github.com/user-attachments/assets/7b60c9d0-efe4-474f-a27b-ea13bd0fabdc">
|
||||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/a42ce460-c1fd-4489-a995-270516193897" />
|
<img width="700" alt="image" src="https://github.com/user-attachments/assets/7b60c9d0-efe4-474f-a27b-ea13bd0fabdc" />
|
||||||
</a>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
<a href="https://github.com/user-attachments/assets/21929f6d-c309-42fe-9c11-f1299e50fbd4">
|
|
||||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/21929f6d-c309-42fe-9c11-f1299e50fbd4" />
|
|
||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
## What is Reflector?
|
|
||||||
|
|
||||||
Reflector is a web application that utilizes local models to process audio content, providing:
|
|
||||||
|
|
||||||
- **Real-time Transcription**: Convert speech to text using [Whisper](https://github.com/openai/whisper) (multi-language) or [Parakeet](https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2) (English) models
|
|
||||||
- **Speaker Diarization**: Identify and label different speakers using [Pyannote](https://github.com/pyannote/pyannote-audio) 3.1
|
|
||||||
- **Live Translation**: Translate audio content in real-time to many languages with [Facebook Seamless-M4T](https://github.com/facebookresearch/seamless_communication)
|
|
||||||
- **Topic Detection & Summarization**: Extract key topics and generate concise summaries using LLMs
|
|
||||||
- **Meeting Recording**: Create permanent records of meetings with searchable transcripts
|
|
||||||
|
|
||||||
Currently we provide [modal.com](https://modal.com/) gpu template to deploy.
|
|
||||||
|
|
||||||
## Background
|
## Background
|
||||||
|
|
||||||
The project architecture consists of three primary components:
|
The project architecture consists of three primary components:
|
||||||
|
|
||||||
- **Back-End**: Python server that offers an API and data persistence, found in `server/`.
|
|
||||||
- **Front-End**: NextJS React project hosted on Vercel, located in `www/`.
|
- **Front-End**: NextJS React project hosted on Vercel, located in `www/`.
|
||||||
- **GPU implementation**: Providing services such as speech-to-text transcription, topic generation, automated summaries, and translations.
|
- **Back-End**: Python server that offers an API and data persistence, found in `server/`.
|
||||||
|
- **GPU implementation**: Providing services such as speech-to-text transcription, topic generation, automated summaries, and translations. Most reliable option is Modal deployment
|
||||||
|
|
||||||
It also uses authentik for authentication if activated.
|
It also uses authentik for authentication if activated, and Vercel for deployment and configuration of the front-end.
|
||||||
|
|
||||||
## Contribution Guidelines
|
## Contribution Guidelines
|
||||||
|
|
||||||
@@ -89,8 +72,6 @@ Note: We currently do not have instructions for Windows users.
|
|||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
*Note: we're working toward better installation, theses instructions are not accurate for now*
|
|
||||||
|
|
||||||
### Frontend
|
### Frontend
|
||||||
|
|
||||||
Start with `cd www`.
|
Start with `cd www`.
|
||||||
@@ -99,10 +80,11 @@ Start with `cd www`.
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
pnpm install
|
pnpm install
|
||||||
cp .env.example .env
|
cp .env_template .env
|
||||||
|
cp config-template.ts config.ts
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, fill in the environment variables in `.env` as needed. If you are unsure on how to proceed, ask in Zulip.
|
Then, fill in the environment variables in `.env` and the configuration in `config.ts` as needed. If you are unsure on how to proceed, ask in Zulip.
|
||||||
|
|
||||||
**Run in development mode**
|
**Run in development mode**
|
||||||
|
|
||||||
@@ -167,41 +149,3 @@ You can manually process an audio file by calling the process tool:
|
|||||||
```bash
|
```bash
|
||||||
uv run python -m reflector.tools.process path/to/audio.wav
|
uv run python -m reflector.tools.process path/to/audio.wav
|
||||||
```
|
```
|
||||||
|
|
||||||
## Build-time env variables
|
|
||||||
|
|
||||||
Next.js projects are more used to NEXT_PUBLIC_ prefixed buildtime vars. We don't have those for the reason we need to serve a ccustomizable prebuild docker container.
|
|
||||||
|
|
||||||
Instead, all the variables are runtime. Variables needed to the frontend are served to the frontend app at initial render.
|
|
||||||
|
|
||||||
It also means there's no static prebuild and no static files to serve for js/html.
|
|
||||||
|
|
||||||
## Feature Flags
|
|
||||||
|
|
||||||
Reflector uses environment variable-based feature flags to control application functionality. These flags allow you to enable or disable features without code changes.
|
|
||||||
|
|
||||||
### Available Feature Flags
|
|
||||||
|
|
||||||
| Feature Flag | Environment Variable |
|
|
||||||
|-------------|---------------------|
|
|
||||||
| `requireLogin` | `FEATURE_REQUIRE_LOGIN` |
|
|
||||||
| `privacy` | `FEATURE_PRIVACY` |
|
|
||||||
| `browse` | `FEATURE_BROWSE` |
|
|
||||||
| `sendToZulip` | `FEATURE_SEND_TO_ZULIP` |
|
|
||||||
| `rooms` | `FEATURE_ROOMS` |
|
|
||||||
|
|
||||||
### Setting Feature Flags
|
|
||||||
|
|
||||||
Feature flags are controlled via environment variables using the pattern `FEATURE_{FEATURE_NAME}` where `{FEATURE_NAME}` is the SCREAMING_SNAKE_CASE version of the feature name.
|
|
||||||
|
|
||||||
**Examples:**
|
|
||||||
```bash
|
|
||||||
# Enable user authentication requirement
|
|
||||||
FEATURE_REQUIRE_LOGIN=true
|
|
||||||
|
|
||||||
# Disable browse functionality
|
|
||||||
FEATURE_BROWSE=false
|
|
||||||
|
|
||||||
# Enable Zulip integration
|
|
||||||
FEATURE_SEND_TO_ZULIP=true
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ services:
|
|||||||
- 1250:1250
|
- 1250:1250
|
||||||
volumes:
|
volumes:
|
||||||
- ./server/:/app/
|
- ./server/:/app/
|
||||||
- /app/.venv
|
|
||||||
env_file:
|
env_file:
|
||||||
- ./server/.env
|
- ./server/.env
|
||||||
environment:
|
environment:
|
||||||
@@ -17,7 +16,6 @@ services:
|
|||||||
context: server
|
context: server
|
||||||
volumes:
|
volumes:
|
||||||
- ./server/:/app/
|
- ./server/:/app/
|
||||||
- /app/.venv
|
|
||||||
env_file:
|
env_file:
|
||||||
- ./server/.env
|
- ./server/.env
|
||||||
environment:
|
environment:
|
||||||
@@ -28,7 +26,6 @@ services:
|
|||||||
context: server
|
context: server
|
||||||
volumes:
|
volumes:
|
||||||
- ./server/:/app/
|
- ./server/:/app/
|
||||||
- /app/.venv
|
|
||||||
env_file:
|
env_file:
|
||||||
- ./server/.env
|
- ./server/.env
|
||||||
environment:
|
environment:
|
||||||
@@ -39,7 +36,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- 6379:6379
|
- 6379:6379
|
||||||
web:
|
web:
|
||||||
image: node:22-alpine
|
image: node:18
|
||||||
ports:
|
ports:
|
||||||
- "3000:3000"
|
- "3000:3000"
|
||||||
command: sh -c "corepack enable && pnpm install && pnpm dev"
|
command: sh -c "corepack enable && pnpm install && pnpm dev"
|
||||||
@@ -50,8 +47,6 @@ services:
|
|||||||
- /app/node_modules
|
- /app/node_modules
|
||||||
env_file:
|
env_file:
|
||||||
- ./www/.env.local
|
- ./www/.env.local
|
||||||
environment:
|
|
||||||
- NODE_ENV=development
|
|
||||||
|
|
||||||
postgres:
|
postgres:
|
||||||
image: postgres:17
|
image: postgres:17
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
# Production Docker Compose configuration for Frontend
|
|
||||||
# Usage: docker compose -f docker-compose.prod.yml up -d
|
|
||||||
|
|
||||||
services:
|
|
||||||
web:
|
|
||||||
build:
|
|
||||||
context: ./www
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
image: reflector-frontend:latest
|
|
||||||
environment:
|
|
||||||
- KV_URL=${KV_URL:-redis://redis:6379}
|
|
||||||
- SITE_URL=${SITE_URL}
|
|
||||||
- API_URL=${API_URL}
|
|
||||||
- WEBSOCKET_URL=${WEBSOCKET_URL}
|
|
||||||
- NEXTAUTH_URL=${NEXTAUTH_URL:-http://localhost:3000}
|
|
||||||
- NEXTAUTH_SECRET=${NEXTAUTH_SECRET:-changeme-in-production}
|
|
||||||
- AUTHENTIK_ISSUER=${AUTHENTIK_ISSUER}
|
|
||||||
- AUTHENTIK_CLIENT_ID=${AUTHENTIK_CLIENT_ID}
|
|
||||||
- AUTHENTIK_CLIENT_SECRET=${AUTHENTIK_CLIENT_SECRET}
|
|
||||||
- AUTHENTIK_REFRESH_TOKEN_URL=${AUTHENTIK_REFRESH_TOKEN_URL}
|
|
||||||
- SENTRY_DSN=${SENTRY_DSN}
|
|
||||||
- SENTRY_IGNORE_API_RESOLUTION_ERROR=${SENTRY_IGNORE_API_RESOLUTION_ERROR:-1}
|
|
||||||
depends_on:
|
|
||||||
- redis
|
|
||||||
restart: unless-stopped
|
|
||||||
|
|
||||||
redis:
|
|
||||||
image: redis:7.2-alpine
|
|
||||||
restart: unless-stopped
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "redis-cli", "ping"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 3
|
|
||||||
volumes:
|
|
||||||
- redis_data:/data
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
redis_data:
|
|
||||||
33
gpu/modal_deployments/.gitignore
vendored
33
gpu/modal_deployments/.gitignore
vendored
@@ -1,33 +0,0 @@
|
|||||||
# OS / Editor
|
|
||||||
.DS_Store
|
|
||||||
.vscode/
|
|
||||||
.idea/
|
|
||||||
|
|
||||||
# Python
|
|
||||||
__pycache__/
|
|
||||||
*.py[cod]
|
|
||||||
*$py.class
|
|
||||||
|
|
||||||
# Logs
|
|
||||||
*.log
|
|
||||||
|
|
||||||
# Env and secrets
|
|
||||||
.env
|
|
||||||
.env.*
|
|
||||||
*.env
|
|
||||||
*.secret
|
|
||||||
|
|
||||||
# Build / dist
|
|
||||||
build/
|
|
||||||
dist/
|
|
||||||
.eggs/
|
|
||||||
*.egg-info/
|
|
||||||
|
|
||||||
# Coverage / test
|
|
||||||
.pytest_cache/
|
|
||||||
.coverage*
|
|
||||||
htmlcov/
|
|
||||||
|
|
||||||
# Modal local state (if any)
|
|
||||||
modal_mounts/
|
|
||||||
.modal_cache/
|
|
||||||
@@ -1,608 +0,0 @@
|
|||||||
import os
|
|
||||||
import sys
|
|
||||||
import threading
|
|
||||||
import uuid
|
|
||||||
from typing import Generator, Mapping, NamedTuple, NewType, TypedDict
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
|
|
||||||
import modal
|
|
||||||
|
|
||||||
MODEL_NAME = "large-v2"
|
|
||||||
MODEL_COMPUTE_TYPE: str = "float16"
|
|
||||||
MODEL_NUM_WORKERS: int = 1
|
|
||||||
MINUTES = 60 # seconds
|
|
||||||
SAMPLERATE = 16000
|
|
||||||
UPLOADS_PATH = "/uploads"
|
|
||||||
CACHE_PATH = "/models"
|
|
||||||
SUPPORTED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
|
||||||
VAD_CONFIG = {
|
|
||||||
"batch_max_duration": 30.0,
|
|
||||||
"silence_padding": 0.5,
|
|
||||||
"window_size": 512,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
WhisperUniqFilename = NewType("WhisperUniqFilename", str)
|
|
||||||
AudioFileExtension = NewType("AudioFileExtension", str)
|
|
||||||
|
|
||||||
app = modal.App("reflector-transcriber")
|
|
||||||
|
|
||||||
model_cache = modal.Volume.from_name("models", create_if_missing=True)
|
|
||||||
upload_volume = modal.Volume.from_name("whisper-uploads", create_if_missing=True)
|
|
||||||
|
|
||||||
|
|
||||||
class TimeSegment(NamedTuple):
|
|
||||||
"""Represents a time segment with start and end times."""
|
|
||||||
|
|
||||||
start: float
|
|
||||||
end: float
|
|
||||||
|
|
||||||
|
|
||||||
class AudioSegment(NamedTuple):
|
|
||||||
"""Represents an audio segment with timing and audio data."""
|
|
||||||
|
|
||||||
start: float
|
|
||||||
end: float
|
|
||||||
audio: any
|
|
||||||
|
|
||||||
|
|
||||||
class TranscriptResult(NamedTuple):
|
|
||||||
"""Represents a transcription result with text and word timings."""
|
|
||||||
|
|
||||||
text: str
|
|
||||||
words: list["WordTiming"]
|
|
||||||
|
|
||||||
|
|
||||||
class WordTiming(TypedDict):
|
|
||||||
"""Represents a word with its timing information."""
|
|
||||||
|
|
||||||
word: str
|
|
||||||
start: float
|
|
||||||
end: float
|
|
||||||
|
|
||||||
|
|
||||||
def download_model():
|
|
||||||
from faster_whisper import download_model
|
|
||||||
|
|
||||||
model_cache.reload()
|
|
||||||
|
|
||||||
download_model(MODEL_NAME, cache_dir=CACHE_PATH)
|
|
||||||
|
|
||||||
model_cache.commit()
|
|
||||||
|
|
||||||
|
|
||||||
image = (
|
|
||||||
modal.Image.debian_slim(python_version="3.12")
|
|
||||||
.env(
|
|
||||||
{
|
|
||||||
"HF_HUB_ENABLE_HF_TRANSFER": "1",
|
|
||||||
"LD_LIBRARY_PATH": (
|
|
||||||
"/usr/local/lib/python3.12/site-packages/nvidia/cudnn/lib/:"
|
|
||||||
"/opt/conda/lib/python3.12/site-packages/nvidia/cublas/lib/"
|
|
||||||
),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
.apt_install("ffmpeg")
|
|
||||||
.pip_install(
|
|
||||||
"huggingface_hub==0.27.1",
|
|
||||||
"hf-transfer==0.1.9",
|
|
||||||
"torch==2.5.1",
|
|
||||||
"faster-whisper==1.1.1",
|
|
||||||
"fastapi==0.115.12",
|
|
||||||
"requests",
|
|
||||||
"librosa==0.10.1",
|
|
||||||
"numpy<2",
|
|
||||||
"silero-vad==5.1.0",
|
|
||||||
)
|
|
||||||
.run_function(download_model, volumes={CACHE_PATH: model_cache})
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
|
|
||||||
parsed_url = urlparse(url)
|
|
||||||
url_path = parsed_url.path
|
|
||||||
|
|
||||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
|
||||||
if url_path.lower().endswith(f".{ext}"):
|
|
||||||
return AudioFileExtension(ext)
|
|
||||||
|
|
||||||
content_type = headers.get("content-type", "").lower()
|
|
||||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
|
||||||
return AudioFileExtension("mp3")
|
|
||||||
if "audio/wav" in content_type:
|
|
||||||
return AudioFileExtension("wav")
|
|
||||||
if "audio/mp4" in content_type:
|
|
||||||
return AudioFileExtension("mp4")
|
|
||||||
|
|
||||||
raise ValueError(
|
|
||||||
f"Unsupported audio format for URL: {url}. "
|
|
||||||
f"Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def download_audio_to_volume(
|
|
||||||
audio_file_url: str,
|
|
||||||
) -> tuple[WhisperUniqFilename, AudioFileExtension]:
|
|
||||||
import requests
|
|
||||||
from fastapi import HTTPException
|
|
||||||
|
|
||||||
response = requests.head(audio_file_url, allow_redirects=True)
|
|
||||||
if response.status_code == 404:
|
|
||||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
|
||||||
|
|
||||||
response = requests.get(audio_file_url, allow_redirects=True)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
|
||||||
unique_filename = WhisperUniqFilename(f"{uuid.uuid4()}.{audio_suffix}")
|
|
||||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
|
||||||
|
|
||||||
with open(file_path, "wb") as f:
|
|
||||||
f.write(response.content)
|
|
||||||
|
|
||||||
upload_volume.commit()
|
|
||||||
return unique_filename, audio_suffix
|
|
||||||
|
|
||||||
|
|
||||||
def pad_audio(audio_array, sample_rate: int = SAMPLERATE):
|
|
||||||
"""Add 0.5s of silence if audio is shorter than the silence_padding window.
|
|
||||||
|
|
||||||
Whisper does not require this strictly, but aligning behavior with Parakeet
|
|
||||||
avoids edge-case crashes on extremely short inputs and makes comparisons easier.
|
|
||||||
"""
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
audio_duration = len(audio_array) / sample_rate
|
|
||||||
if audio_duration < VAD_CONFIG["silence_padding"]:
|
|
||||||
silence_samples = int(sample_rate * VAD_CONFIG["silence_padding"])
|
|
||||||
silence = np.zeros(silence_samples, dtype=np.float32)
|
|
||||||
return np.concatenate([audio_array, silence])
|
|
||||||
return audio_array
|
|
||||||
|
|
||||||
|
|
||||||
@app.cls(
|
|
||||||
gpu="A10G",
|
|
||||||
timeout=5 * MINUTES,
|
|
||||||
scaledown_window=5 * MINUTES,
|
|
||||||
image=image,
|
|
||||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
|
||||||
)
|
|
||||||
@modal.concurrent(max_inputs=10)
|
|
||||||
class TranscriberWhisperLive:
|
|
||||||
"""Live transcriber class for small audio segments (A10G).
|
|
||||||
|
|
||||||
Mirrors the Parakeet live class API but uses Faster-Whisper under the hood.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@modal.enter()
|
|
||||||
def enter(self):
|
|
||||||
import faster_whisper
|
|
||||||
import torch
|
|
||||||
|
|
||||||
self.lock = threading.Lock()
|
|
||||||
self.use_gpu = torch.cuda.is_available()
|
|
||||||
self.device = "cuda" if self.use_gpu else "cpu"
|
|
||||||
self.model = faster_whisper.WhisperModel(
|
|
||||||
MODEL_NAME,
|
|
||||||
device=self.device,
|
|
||||||
compute_type=MODEL_COMPUTE_TYPE,
|
|
||||||
num_workers=MODEL_NUM_WORKERS,
|
|
||||||
download_root=CACHE_PATH,
|
|
||||||
local_files_only=True,
|
|
||||||
)
|
|
||||||
print(f"Model is on device: {self.device}")
|
|
||||||
|
|
||||||
@modal.method()
|
|
||||||
def transcribe_segment(
|
|
||||||
self,
|
|
||||||
filename: str,
|
|
||||||
language: str = "en",
|
|
||||||
):
|
|
||||||
"""Transcribe a single uploaded audio file by filename."""
|
|
||||||
upload_volume.reload()
|
|
||||||
|
|
||||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
|
||||||
if not os.path.exists(file_path):
|
|
||||||
raise FileNotFoundError(f"File not found: {file_path}")
|
|
||||||
|
|
||||||
with self.lock:
|
|
||||||
with NoStdStreams():
|
|
||||||
segments, _ = self.model.transcribe(
|
|
||||||
file_path,
|
|
||||||
language=language,
|
|
||||||
beam_size=5,
|
|
||||||
word_timestamps=True,
|
|
||||||
vad_filter=True,
|
|
||||||
vad_parameters={"min_silence_duration_ms": 500},
|
|
||||||
)
|
|
||||||
|
|
||||||
segments = list(segments)
|
|
||||||
text = "".join(segment.text for segment in segments).strip()
|
|
||||||
words = [
|
|
||||||
{
|
|
||||||
"word": word.word,
|
|
||||||
"start": round(float(word.start), 2),
|
|
||||||
"end": round(float(word.end), 2),
|
|
||||||
}
|
|
||||||
for segment in segments
|
|
||||||
for word in segment.words
|
|
||||||
]
|
|
||||||
|
|
||||||
return {"text": text, "words": words}
|
|
||||||
|
|
||||||
@modal.method()
|
|
||||||
def transcribe_batch(
|
|
||||||
self,
|
|
||||||
filenames: list[str],
|
|
||||||
language: str = "en",
|
|
||||||
):
|
|
||||||
"""Transcribe multiple uploaded audio files and return per-file results."""
|
|
||||||
upload_volume.reload()
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for filename in filenames:
|
|
||||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
|
||||||
if not os.path.exists(file_path):
|
|
||||||
raise FileNotFoundError(f"Batch file not found: {file_path}")
|
|
||||||
|
|
||||||
with self.lock:
|
|
||||||
with NoStdStreams():
|
|
||||||
segments, _ = self.model.transcribe(
|
|
||||||
file_path,
|
|
||||||
language=language,
|
|
||||||
beam_size=5,
|
|
||||||
word_timestamps=True,
|
|
||||||
vad_filter=True,
|
|
||||||
vad_parameters={"min_silence_duration_ms": 500},
|
|
||||||
)
|
|
||||||
|
|
||||||
segments = list(segments)
|
|
||||||
text = "".join(seg.text for seg in segments).strip()
|
|
||||||
words = [
|
|
||||||
{
|
|
||||||
"word": w.word,
|
|
||||||
"start": round(float(w.start), 2),
|
|
||||||
"end": round(float(w.end), 2),
|
|
||||||
}
|
|
||||||
for seg in segments
|
|
||||||
for w in seg.words
|
|
||||||
]
|
|
||||||
|
|
||||||
results.append(
|
|
||||||
{
|
|
||||||
"filename": filename,
|
|
||||||
"text": text,
|
|
||||||
"words": words,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
@app.cls(
|
|
||||||
gpu="L40S",
|
|
||||||
timeout=15 * MINUTES,
|
|
||||||
image=image,
|
|
||||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
|
||||||
)
|
|
||||||
class TranscriberWhisperFile:
|
|
||||||
"""File transcriber for larger/longer audio, using VAD-driven batching (L40S)."""
|
|
||||||
|
|
||||||
@modal.enter()
|
|
||||||
def enter(self):
|
|
||||||
import faster_whisper
|
|
||||||
import torch
|
|
||||||
from silero_vad import load_silero_vad
|
|
||||||
|
|
||||||
self.lock = threading.Lock()
|
|
||||||
self.use_gpu = torch.cuda.is_available()
|
|
||||||
self.device = "cuda" if self.use_gpu else "cpu"
|
|
||||||
self.model = faster_whisper.WhisperModel(
|
|
||||||
MODEL_NAME,
|
|
||||||
device=self.device,
|
|
||||||
compute_type=MODEL_COMPUTE_TYPE,
|
|
||||||
num_workers=MODEL_NUM_WORKERS,
|
|
||||||
download_root=CACHE_PATH,
|
|
||||||
local_files_only=True,
|
|
||||||
)
|
|
||||||
self.vad_model = load_silero_vad(onnx=False)
|
|
||||||
|
|
||||||
@modal.method()
|
|
||||||
def transcribe_segment(
|
|
||||||
self, filename: str, timestamp_offset: float = 0.0, language: str = "en"
|
|
||||||
):
|
|
||||||
import librosa
|
|
||||||
import numpy as np
|
|
||||||
from silero_vad import VADIterator
|
|
||||||
|
|
||||||
def vad_segments(
|
|
||||||
audio_array,
|
|
||||||
sample_rate: int = SAMPLERATE,
|
|
||||||
window_size: int = VAD_CONFIG["window_size"],
|
|
||||||
) -> Generator[TimeSegment, None, None]:
|
|
||||||
"""Generate speech segments as TimeSegment using Silero VAD."""
|
|
||||||
iterator = VADIterator(self.vad_model, sampling_rate=sample_rate)
|
|
||||||
start = None
|
|
||||||
for i in range(0, len(audio_array), window_size):
|
|
||||||
chunk = audio_array[i : i + window_size]
|
|
||||||
if len(chunk) < window_size:
|
|
||||||
chunk = np.pad(
|
|
||||||
chunk, (0, window_size - len(chunk)), mode="constant"
|
|
||||||
)
|
|
||||||
speech = iterator(chunk)
|
|
||||||
if not speech:
|
|
||||||
continue
|
|
||||||
if "start" in speech:
|
|
||||||
start = speech["start"]
|
|
||||||
continue
|
|
||||||
if "end" in speech and start is not None:
|
|
||||||
end = speech["end"]
|
|
||||||
yield TimeSegment(
|
|
||||||
start / float(SAMPLERATE), end / float(SAMPLERATE)
|
|
||||||
)
|
|
||||||
start = None
|
|
||||||
iterator.reset_states()
|
|
||||||
|
|
||||||
upload_volume.reload()
|
|
||||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
|
||||||
if not os.path.exists(file_path):
|
|
||||||
raise FileNotFoundError(f"File not found: {file_path}")
|
|
||||||
|
|
||||||
audio_array, _sr = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
|
||||||
|
|
||||||
# Batch segments up to ~30s windows by merging contiguous VAD segments
|
|
||||||
merged_batches: list[TimeSegment] = []
|
|
||||||
batch_start = None
|
|
||||||
batch_end = None
|
|
||||||
max_duration = VAD_CONFIG["batch_max_duration"]
|
|
||||||
for segment in vad_segments(audio_array):
|
|
||||||
seg_start, seg_end = segment.start, segment.end
|
|
||||||
if batch_start is None:
|
|
||||||
batch_start, batch_end = seg_start, seg_end
|
|
||||||
continue
|
|
||||||
if seg_end - batch_start <= max_duration:
|
|
||||||
batch_end = seg_end
|
|
||||||
else:
|
|
||||||
merged_batches.append(TimeSegment(batch_start, batch_end))
|
|
||||||
batch_start, batch_end = seg_start, seg_end
|
|
||||||
if batch_start is not None and batch_end is not None:
|
|
||||||
merged_batches.append(TimeSegment(batch_start, batch_end))
|
|
||||||
|
|
||||||
all_text = []
|
|
||||||
all_words = []
|
|
||||||
|
|
||||||
for segment in merged_batches:
|
|
||||||
start_time, end_time = segment.start, segment.end
|
|
||||||
s_idx = int(start_time * SAMPLERATE)
|
|
||||||
e_idx = int(end_time * SAMPLERATE)
|
|
||||||
segment = audio_array[s_idx:e_idx]
|
|
||||||
segment = pad_audio(segment, SAMPLERATE)
|
|
||||||
|
|
||||||
with self.lock:
|
|
||||||
segments, _ = self.model.transcribe(
|
|
||||||
segment,
|
|
||||||
language=language,
|
|
||||||
beam_size=5,
|
|
||||||
word_timestamps=True,
|
|
||||||
vad_filter=True,
|
|
||||||
vad_parameters={"min_silence_duration_ms": 500},
|
|
||||||
)
|
|
||||||
|
|
||||||
segments = list(segments)
|
|
||||||
text = "".join(seg.text for seg in segments).strip()
|
|
||||||
words = [
|
|
||||||
{
|
|
||||||
"word": w.word,
|
|
||||||
"start": round(float(w.start) + start_time + timestamp_offset, 2),
|
|
||||||
"end": round(float(w.end) + start_time + timestamp_offset, 2),
|
|
||||||
}
|
|
||||||
for seg in segments
|
|
||||||
for w in seg.words
|
|
||||||
]
|
|
||||||
if text:
|
|
||||||
all_text.append(text)
|
|
||||||
all_words.extend(words)
|
|
||||||
|
|
||||||
return {"text": " ".join(all_text), "words": all_words}
|
|
||||||
|
|
||||||
|
|
||||||
def detect_audio_format(url: str, headers: dict) -> str:
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
|
|
||||||
from fastapi import HTTPException
|
|
||||||
|
|
||||||
url_path = urlparse(url).path
|
|
||||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
|
||||||
if url_path.lower().endswith(f".{ext}"):
|
|
||||||
return ext
|
|
||||||
|
|
||||||
content_type = headers.get("content-type", "").lower()
|
|
||||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
|
||||||
return "mp3"
|
|
||||||
if "audio/wav" in content_type:
|
|
||||||
return "wav"
|
|
||||||
if "audio/mp4" in content_type:
|
|
||||||
return "mp4"
|
|
||||||
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail=(
|
|
||||||
f"Unsupported audio format for URL. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def download_audio_to_volume(audio_file_url: str) -> tuple[str, str]:
|
|
||||||
import requests
|
|
||||||
from fastapi import HTTPException
|
|
||||||
|
|
||||||
response = requests.head(audio_file_url, allow_redirects=True)
|
|
||||||
if response.status_code == 404:
|
|
||||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
|
||||||
|
|
||||||
response = requests.get(audio_file_url, allow_redirects=True)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
|
||||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
|
||||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
|
||||||
|
|
||||||
with open(file_path, "wb") as f:
|
|
||||||
f.write(response.content)
|
|
||||||
|
|
||||||
upload_volume.commit()
|
|
||||||
return unique_filename, audio_suffix
|
|
||||||
|
|
||||||
|
|
||||||
@app.function(
|
|
||||||
scaledown_window=60,
|
|
||||||
timeout=600,
|
|
||||||
secrets=[
|
|
||||||
modal.Secret.from_name("reflector-gpu"),
|
|
||||||
],
|
|
||||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
|
||||||
image=image,
|
|
||||||
)
|
|
||||||
@modal.concurrent(max_inputs=40)
|
|
||||||
@modal.asgi_app()
|
|
||||||
def web():
|
|
||||||
from fastapi import (
|
|
||||||
Body,
|
|
||||||
Depends,
|
|
||||||
FastAPI,
|
|
||||||
Form,
|
|
||||||
HTTPException,
|
|
||||||
UploadFile,
|
|
||||||
status,
|
|
||||||
)
|
|
||||||
from fastapi.security import OAuth2PasswordBearer
|
|
||||||
|
|
||||||
transcriber_live = TranscriberWhisperLive()
|
|
||||||
transcriber_file = TranscriberWhisperFile()
|
|
||||||
|
|
||||||
app = FastAPI()
|
|
||||||
|
|
||||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
|
||||||
|
|
||||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
|
||||||
if apikey == os.environ["REFLECTOR_GPU_APIKEY"]:
|
|
||||||
return
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
||||||
detail="Invalid API key",
|
|
||||||
headers={"WWW-Authenticate": "Bearer"},
|
|
||||||
)
|
|
||||||
|
|
||||||
class TranscriptResponse(dict):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@app.post("/v1/audio/transcriptions", dependencies=[Depends(apikey_auth)])
|
|
||||||
def transcribe(
|
|
||||||
file: UploadFile = None,
|
|
||||||
files: list[UploadFile] | None = None,
|
|
||||||
model: str = Form(MODEL_NAME),
|
|
||||||
language: str = Form("en"),
|
|
||||||
batch: bool = Form(False),
|
|
||||||
):
|
|
||||||
if not file and not files:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400, detail="Either 'file' or 'files' parameter is required"
|
|
||||||
)
|
|
||||||
if batch and not files:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400, detail="Batch transcription requires 'files'"
|
|
||||||
)
|
|
||||||
|
|
||||||
upload_files = [file] if file else files
|
|
||||||
|
|
||||||
uploaded_filenames: list[str] = []
|
|
||||||
for upload_file in upload_files:
|
|
||||||
audio_suffix = upload_file.filename.split(".")[-1]
|
|
||||||
if audio_suffix not in SUPPORTED_FILE_EXTENSIONS:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail=(
|
|
||||||
f"Unsupported audio format. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
|
||||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
|
||||||
with open(file_path, "wb") as f:
|
|
||||||
content = upload_file.file.read()
|
|
||||||
f.write(content)
|
|
||||||
uploaded_filenames.append(unique_filename)
|
|
||||||
|
|
||||||
upload_volume.commit()
|
|
||||||
|
|
||||||
try:
|
|
||||||
if batch and len(upload_files) > 1:
|
|
||||||
func = transcriber_live.transcribe_batch.spawn(
|
|
||||||
filenames=uploaded_filenames,
|
|
||||||
language=language,
|
|
||||||
)
|
|
||||||
results = func.get()
|
|
||||||
return {"results": results}
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for filename in uploaded_filenames:
|
|
||||||
func = transcriber_live.transcribe_segment.spawn(
|
|
||||||
filename=filename,
|
|
||||||
language=language,
|
|
||||||
)
|
|
||||||
result = func.get()
|
|
||||||
result["filename"] = filename
|
|
||||||
results.append(result)
|
|
||||||
|
|
||||||
return {"results": results} if len(results) > 1 else results[0]
|
|
||||||
finally:
|
|
||||||
for filename in uploaded_filenames:
|
|
||||||
try:
|
|
||||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
|
||||||
os.remove(file_path)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
upload_volume.commit()
|
|
||||||
|
|
||||||
@app.post("/v1/audio/transcriptions-from-url", dependencies=[Depends(apikey_auth)])
|
|
||||||
def transcribe_from_url(
|
|
||||||
audio_file_url: str = Body(
|
|
||||||
..., description="URL of the audio file to transcribe"
|
|
||||||
),
|
|
||||||
model: str = Body(MODEL_NAME),
|
|
||||||
language: str = Body("en"),
|
|
||||||
timestamp_offset: float = Body(0.0),
|
|
||||||
):
|
|
||||||
unique_filename, _audio_suffix = download_audio_to_volume(audio_file_url)
|
|
||||||
try:
|
|
||||||
func = transcriber_file.transcribe_segment.spawn(
|
|
||||||
filename=unique_filename,
|
|
||||||
timestamp_offset=timestamp_offset,
|
|
||||||
language=language,
|
|
||||||
)
|
|
||||||
result = func.get()
|
|
||||||
return result
|
|
||||||
finally:
|
|
||||||
try:
|
|
||||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
|
||||||
os.remove(file_path)
|
|
||||||
upload_volume.commit()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return app
|
|
||||||
|
|
||||||
|
|
||||||
class NoStdStreams:
|
|
||||||
def __init__(self):
|
|
||||||
self.devnull = open(os.devnull, "w")
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
self._stdout, self._stderr = sys.stdout, sys.stderr
|
|
||||||
self._stdout.flush()
|
|
||||||
self._stderr.flush()
|
|
||||||
sys.stdout, sys.stderr = self.devnull, self.devnull
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, traceback):
|
|
||||||
sys.stdout, sys.stderr = self._stdout, self._stderr
|
|
||||||
self.devnull.close()
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
REFLECTOR_GPU_APIKEY=
|
|
||||||
HF_TOKEN=
|
|
||||||
38
gpu/self_hosted/.gitignore
vendored
38
gpu/self_hosted/.gitignore
vendored
@@ -1,38 +0,0 @@
|
|||||||
cache/
|
|
||||||
|
|
||||||
# OS / Editor
|
|
||||||
.DS_Store
|
|
||||||
.vscode/
|
|
||||||
.idea/
|
|
||||||
|
|
||||||
# Python
|
|
||||||
__pycache__/
|
|
||||||
*.py[cod]
|
|
||||||
*$py.class
|
|
||||||
|
|
||||||
# Env and secrets
|
|
||||||
.env
|
|
||||||
*.env
|
|
||||||
*.secret
|
|
||||||
HF_TOKEN
|
|
||||||
REFLECTOR_GPU_APIKEY
|
|
||||||
|
|
||||||
# Virtual env / uv
|
|
||||||
.venv/
|
|
||||||
venv/
|
|
||||||
ENV/
|
|
||||||
uv/
|
|
||||||
|
|
||||||
# Build / dist
|
|
||||||
build/
|
|
||||||
dist/
|
|
||||||
.eggs/
|
|
||||||
*.egg-info/
|
|
||||||
|
|
||||||
# Coverage / test
|
|
||||||
.pytest_cache/
|
|
||||||
.coverage*
|
|
||||||
htmlcov/
|
|
||||||
|
|
||||||
# Logs
|
|
||||||
*.log
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
FROM python:3.12-slim
|
|
||||||
|
|
||||||
ENV PYTHONUNBUFFERED=1 \
|
|
||||||
UV_LINK_MODE=copy \
|
|
||||||
UV_NO_CACHE=1
|
|
||||||
|
|
||||||
WORKDIR /tmp
|
|
||||||
RUN apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
ffmpeg \
|
|
||||||
curl \
|
|
||||||
ca-certificates \
|
|
||||||
gnupg \
|
|
||||||
wget \
|
|
||||||
&& apt-get clean
|
|
||||||
# Add NVIDIA CUDA repo for Debian 12 (bookworm) and install cuDNN 9 for CUDA 12
|
|
||||||
ADD https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/cuda-keyring_1.1-1_all.deb /cuda-keyring.deb
|
|
||||||
RUN dpkg -i /cuda-keyring.deb \
|
|
||||||
&& rm /cuda-keyring.deb \
|
|
||||||
&& apt-get update \
|
|
||||||
&& apt-get install -y --no-install-recommends \
|
|
||||||
cuda-cudart-12-6 \
|
|
||||||
libcublas-12-6 \
|
|
||||||
libcudnn9-cuda-12 \
|
|
||||||
libcudnn9-dev-cuda-12 \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
ADD https://astral.sh/uv/install.sh /uv-installer.sh
|
|
||||||
RUN sh /uv-installer.sh && rm /uv-installer.sh
|
|
||||||
ENV PATH="/root/.local/bin/:$PATH"
|
|
||||||
ENV LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH"
|
|
||||||
|
|
||||||
RUN mkdir -p /app
|
|
||||||
WORKDIR /app
|
|
||||||
COPY pyproject.toml uv.lock /app/
|
|
||||||
|
|
||||||
|
|
||||||
COPY ./app /app/app
|
|
||||||
COPY ./main.py /app/
|
|
||||||
COPY ./runserver.sh /app/
|
|
||||||
|
|
||||||
EXPOSE 8000
|
|
||||||
|
|
||||||
CMD ["sh", "/app/runserver.sh"]
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
# Self-hosted Model API
|
|
||||||
|
|
||||||
Run transcription, translation, and diarization services compatible with Reflector's GPU Model API. Works on CPU or GPU.
|
|
||||||
|
|
||||||
Environment variables
|
|
||||||
|
|
||||||
- REFLECTOR_GPU_APIKEY: Optional Bearer token. If unset, auth is disabled.
|
|
||||||
- HF_TOKEN: Optional. Required for diarization to download pyannote pipelines
|
|
||||||
|
|
||||||
Requirements
|
|
||||||
|
|
||||||
- FFmpeg must be installed and on PATH (used for URL-based and segmented transcription)
|
|
||||||
- Python 3.12+
|
|
||||||
- NVIDIA GPU optional. If available, it will be used automatically
|
|
||||||
|
|
||||||
Local run
|
|
||||||
Set env vars in self_hosted/.env file
|
|
||||||
uv sync
|
|
||||||
|
|
||||||
uv run uvicorn main:app --host 0.0.0.0 --port 8000
|
|
||||||
|
|
||||||
Authentication
|
|
||||||
|
|
||||||
- If REFLECTOR_GPU_APIKEY is set, include header: Authorization: Bearer <key>
|
|
||||||
|
|
||||||
Endpoints
|
|
||||||
|
|
||||||
- POST /v1/audio/transcriptions
|
|
||||||
|
|
||||||
- multipart/form-data
|
|
||||||
- fields: file (single file) OR files[] (multiple files), language, batch (true/false)
|
|
||||||
- response: single { text, words, filename } or { results: [ ... ] }
|
|
||||||
|
|
||||||
- POST /v1/audio/transcriptions-from-url
|
|
||||||
|
|
||||||
- application/json
|
|
||||||
- body: { audio_file_url, language, timestamp_offset }
|
|
||||||
- response: { text, words }
|
|
||||||
|
|
||||||
- POST /translate
|
|
||||||
|
|
||||||
- text: query parameter
|
|
||||||
- body (application/json): { source_language, target_language }
|
|
||||||
- response: { text: { <src>: original, <tgt>: translated } }
|
|
||||||
|
|
||||||
- POST /diarize
|
|
||||||
- query parameters: audio_file_url, timestamp (optional)
|
|
||||||
- requires HF_TOKEN to be set (for pyannote)
|
|
||||||
- response: { diarization: [ { start, end, speaker } ] }
|
|
||||||
|
|
||||||
OpenAPI docs
|
|
||||||
|
|
||||||
- Visit /docs when the server is running
|
|
||||||
|
|
||||||
Docker
|
|
||||||
|
|
||||||
- Not yet provided in this directory. A Dockerfile will be added later. For now, use Local run above
|
|
||||||
|
|
||||||
Conformance tests
|
|
||||||
|
|
||||||
# From this directory
|
|
||||||
|
|
||||||
TRANSCRIPT_URL=http://localhost:8000 \
|
|
||||||
TRANSCRIPT_API_KEY=dev-key \
|
|
||||||
uv run -m pytest -m model_api --no-cov ../../server/tests/test_model_api_transcript.py
|
|
||||||
|
|
||||||
TRANSLATION_URL=http://localhost:8000 \
|
|
||||||
TRANSLATION_API_KEY=dev-key \
|
|
||||||
uv run -m pytest -m model_api --no-cov ../../server/tests/test_model_api_translation.py
|
|
||||||
|
|
||||||
DIARIZATION_URL=http://localhost:8000 \
|
|
||||||
DIARIZATION_API_KEY=dev-key \
|
|
||||||
uv run -m pytest -m model_api --no-cov ../../server/tests/test_model_api_diarization.py
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from fastapi import Depends, HTTPException, status
|
|
||||||
from fastapi.security import OAuth2PasswordBearer
|
|
||||||
|
|
||||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
|
||||||
|
|
||||||
|
|
||||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
|
||||||
required_key = os.environ.get("REFLECTOR_GPU_APIKEY")
|
|
||||||
if not required_key:
|
|
||||||
return
|
|
||||||
if apikey == required_key:
|
|
||||||
return
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
||||||
detail="Invalid API key",
|
|
||||||
headers={"WWW-Authenticate": "Bearer"},
|
|
||||||
)
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
from pathlib import Path
|
|
||||||
|
|
||||||
SUPPORTED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
|
||||||
SAMPLE_RATE = 16000
|
|
||||||
VAD_CONFIG = {
|
|
||||||
"batch_max_duration": 30.0,
|
|
||||||
"silence_padding": 0.5,
|
|
||||||
"window_size": 512,
|
|
||||||
}
|
|
||||||
|
|
||||||
# App-level paths
|
|
||||||
UPLOADS_PATH = Path("/tmp/whisper-uploads")
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
from contextlib import asynccontextmanager
|
|
||||||
|
|
||||||
from fastapi import FastAPI
|
|
||||||
|
|
||||||
from .routers.diarization import router as diarization_router
|
|
||||||
from .routers.transcription import router as transcription_router
|
|
||||||
from .routers.translation import router as translation_router
|
|
||||||
from .services.transcriber import WhisperService
|
|
||||||
from .services.diarizer import PyannoteDiarizationService
|
|
||||||
from .utils import ensure_dirs
|
|
||||||
|
|
||||||
|
|
||||||
@asynccontextmanager
|
|
||||||
async def lifespan(app: FastAPI):
|
|
||||||
ensure_dirs()
|
|
||||||
whisper_service = WhisperService()
|
|
||||||
whisper_service.load()
|
|
||||||
app.state.whisper = whisper_service
|
|
||||||
diarization_service = PyannoteDiarizationService()
|
|
||||||
diarization_service.load()
|
|
||||||
app.state.diarizer = diarization_service
|
|
||||||
yield
|
|
||||||
|
|
||||||
|
|
||||||
def create_app() -> FastAPI:
|
|
||||||
app = FastAPI(lifespan=lifespan)
|
|
||||||
app.include_router(transcription_router)
|
|
||||||
app.include_router(translation_router)
|
|
||||||
app.include_router(diarization_router)
|
|
||||||
return app
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
from typing import List
|
|
||||||
|
|
||||||
from fastapi import APIRouter, Depends, Request
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from ..auth import apikey_auth
|
|
||||||
from ..services.diarizer import PyannoteDiarizationService
|
|
||||||
from ..utils import download_audio_file
|
|
||||||
|
|
||||||
router = APIRouter(tags=["diarization"])
|
|
||||||
|
|
||||||
|
|
||||||
class DiarizationSegment(BaseModel):
|
|
||||||
start: float
|
|
||||||
end: float
|
|
||||||
speaker: int
|
|
||||||
|
|
||||||
|
|
||||||
class DiarizationResponse(BaseModel):
|
|
||||||
diarization: List[DiarizationSegment]
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/diarize", dependencies=[Depends(apikey_auth)], response_model=DiarizationResponse
|
|
||||||
)
|
|
||||||
def diarize(request: Request, audio_file_url: str, timestamp: float = 0.0):
|
|
||||||
with download_audio_file(audio_file_url) as (file_path, _ext):
|
|
||||||
file_path = str(file_path)
|
|
||||||
diarizer: PyannoteDiarizationService = request.app.state.diarizer
|
|
||||||
return diarizer.diarize_file(file_path, timestamp=timestamp)
|
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
import uuid
|
|
||||||
from typing import Optional, Union
|
|
||||||
|
|
||||||
from fastapi import APIRouter, Body, Depends, Form, HTTPException, Request, UploadFile
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from pathlib import Path
|
|
||||||
from ..auth import apikey_auth
|
|
||||||
from ..config import SUPPORTED_FILE_EXTENSIONS, UPLOADS_PATH
|
|
||||||
from ..services.transcriber import MODEL_NAME
|
|
||||||
from ..utils import cleanup_uploaded_files, download_audio_file
|
|
||||||
|
|
||||||
router = APIRouter(prefix="/v1/audio", tags=["transcription"])
|
|
||||||
|
|
||||||
|
|
||||||
class WordTiming(BaseModel):
|
|
||||||
word: str
|
|
||||||
start: float
|
|
||||||
end: float
|
|
||||||
|
|
||||||
|
|
||||||
class TranscriptResult(BaseModel):
|
|
||||||
text: str
|
|
||||||
words: list[WordTiming]
|
|
||||||
filename: Optional[str] = None
|
|
||||||
|
|
||||||
|
|
||||||
class TranscriptBatchResponse(BaseModel):
|
|
||||||
results: list[TranscriptResult]
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/transcriptions",
|
|
||||||
dependencies=[Depends(apikey_auth)],
|
|
||||||
response_model=Union[TranscriptResult, TranscriptBatchResponse],
|
|
||||||
)
|
|
||||||
def transcribe(
|
|
||||||
request: Request,
|
|
||||||
file: UploadFile = None,
|
|
||||||
files: list[UploadFile] | None = None,
|
|
||||||
model: str = Form(MODEL_NAME),
|
|
||||||
language: str = Form("en"),
|
|
||||||
batch: bool = Form(False),
|
|
||||||
):
|
|
||||||
service = request.app.state.whisper
|
|
||||||
if not file and not files:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400, detail="Either 'file' or 'files' parameter is required"
|
|
||||||
)
|
|
||||||
if batch and not files:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400, detail="Batch transcription requires 'files'"
|
|
||||||
)
|
|
||||||
|
|
||||||
upload_files = [file] if file else files
|
|
||||||
|
|
||||||
uploaded_paths: list[Path] = []
|
|
||||||
with cleanup_uploaded_files(uploaded_paths):
|
|
||||||
for upload_file in upload_files:
|
|
||||||
audio_suffix = upload_file.filename.split(".")[-1].lower()
|
|
||||||
if audio_suffix not in SUPPORTED_FILE_EXTENSIONS:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail=(
|
|
||||||
f"Unsupported audio format. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
|
||||||
file_path = UPLOADS_PATH / unique_filename
|
|
||||||
with open(file_path, "wb") as f:
|
|
||||||
content = upload_file.file.read()
|
|
||||||
f.write(content)
|
|
||||||
uploaded_paths.append(file_path)
|
|
||||||
|
|
||||||
if batch and len(upload_files) > 1:
|
|
||||||
results = []
|
|
||||||
for path in uploaded_paths:
|
|
||||||
result = service.transcribe_file(str(path), language=language)
|
|
||||||
result["filename"] = path.name
|
|
||||||
results.append(result)
|
|
||||||
return {"results": results}
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for path in uploaded_paths:
|
|
||||||
result = service.transcribe_file(str(path), language=language)
|
|
||||||
result["filename"] = path.name
|
|
||||||
results.append(result)
|
|
||||||
|
|
||||||
return {"results": results} if len(results) > 1 else results[0]
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/transcriptions-from-url",
|
|
||||||
dependencies=[Depends(apikey_auth)],
|
|
||||||
response_model=TranscriptResult,
|
|
||||||
)
|
|
||||||
def transcribe_from_url(
|
|
||||||
request: Request,
|
|
||||||
audio_file_url: str = Body(..., description="URL of the audio file to transcribe"),
|
|
||||||
model: str = Body(MODEL_NAME),
|
|
||||||
language: str = Body("en"),
|
|
||||||
timestamp_offset: float = Body(0.0),
|
|
||||||
):
|
|
||||||
service = request.app.state.whisper
|
|
||||||
with download_audio_file(audio_file_url) as (file_path, _ext):
|
|
||||||
file_path = str(file_path)
|
|
||||||
result = service.transcribe_vad_url_segment(
|
|
||||||
file_path=file_path, timestamp_offset=timestamp_offset, language=language
|
|
||||||
)
|
|
||||||
return result
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
from typing import Dict
|
|
||||||
|
|
||||||
from fastapi import APIRouter, Body, Depends
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from ..auth import apikey_auth
|
|
||||||
from ..services.translator import TextTranslatorService
|
|
||||||
|
|
||||||
router = APIRouter(tags=["translation"])
|
|
||||||
|
|
||||||
translator = TextTranslatorService()
|
|
||||||
|
|
||||||
|
|
||||||
class TranslationResponse(BaseModel):
|
|
||||||
text: Dict[str, str]
|
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/translate",
|
|
||||||
dependencies=[Depends(apikey_auth)],
|
|
||||||
response_model=TranslationResponse,
|
|
||||||
)
|
|
||||||
def translate(
|
|
||||||
text: str,
|
|
||||||
source_language: str = Body("en"),
|
|
||||||
target_language: str = Body("fr"),
|
|
||||||
):
|
|
||||||
return translator.translate(text, source_language, target_language)
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
import os
|
|
||||||
import threading
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import torchaudio
|
|
||||||
from pyannote.audio import Pipeline
|
|
||||||
|
|
||||||
|
|
||||||
class PyannoteDiarizationService:
|
|
||||||
def __init__(self):
|
|
||||||
self._pipeline = None
|
|
||||||
self._device = "cpu"
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
|
|
||||||
def load(self):
|
|
||||||
self._device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
||||||
self._pipeline = Pipeline.from_pretrained(
|
|
||||||
"pyannote/speaker-diarization-3.1",
|
|
||||||
use_auth_token=os.environ.get("HF_TOKEN"),
|
|
||||||
)
|
|
||||||
self._pipeline.to(torch.device(self._device))
|
|
||||||
|
|
||||||
def diarize_file(self, file_path: str, timestamp: float = 0.0) -> dict:
|
|
||||||
if self._pipeline is None:
|
|
||||||
self.load()
|
|
||||||
waveform, sample_rate = torchaudio.load(file_path)
|
|
||||||
with self._lock:
|
|
||||||
diarization = self._pipeline(
|
|
||||||
{"waveform": waveform, "sample_rate": sample_rate}
|
|
||||||
)
|
|
||||||
words = []
|
|
||||||
for diarization_segment, _, speaker in diarization.itertracks(yield_label=True):
|
|
||||||
words.append(
|
|
||||||
{
|
|
||||||
"start": round(timestamp + diarization_segment.start, 3),
|
|
||||||
"end": round(timestamp + diarization_segment.end, 3),
|
|
||||||
"speaker": int(speaker[-2:])
|
|
||||||
if speaker and speaker[-2:].isdigit()
|
|
||||||
else 0,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return {"diarization": words}
|
|
||||||
@@ -1,208 +0,0 @@
|
|||||||
import os
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import threading
|
|
||||||
from typing import Generator
|
|
||||||
|
|
||||||
import faster_whisper
|
|
||||||
import librosa
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
|
||||||
from fastapi import HTTPException
|
|
||||||
from silero_vad import VADIterator, load_silero_vad
|
|
||||||
|
|
||||||
from ..config import SAMPLE_RATE, VAD_CONFIG
|
|
||||||
|
|
||||||
# Whisper configuration (service-local defaults)
|
|
||||||
MODEL_NAME = "large-v2"
|
|
||||||
# None delegates compute type to runtime: float16 on CUDA, int8 on CPU
|
|
||||||
MODEL_COMPUTE_TYPE = None
|
|
||||||
MODEL_NUM_WORKERS = 1
|
|
||||||
CACHE_PATH = os.path.join(os.path.expanduser("~"), ".cache", "reflector-whisper")
|
|
||||||
from ..utils import NoStdStreams
|
|
||||||
|
|
||||||
|
|
||||||
class WhisperService:
|
|
||||||
def __init__(self):
|
|
||||||
self.model = None
|
|
||||||
self.device = "cpu"
|
|
||||||
self.lock = threading.Lock()
|
|
||||||
|
|
||||||
def load(self):
|
|
||||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
||||||
compute_type = MODEL_COMPUTE_TYPE or (
|
|
||||||
"float16" if self.device == "cuda" else "int8"
|
|
||||||
)
|
|
||||||
self.model = faster_whisper.WhisperModel(
|
|
||||||
MODEL_NAME,
|
|
||||||
device=self.device,
|
|
||||||
compute_type=compute_type,
|
|
||||||
num_workers=MODEL_NUM_WORKERS,
|
|
||||||
download_root=CACHE_PATH,
|
|
||||||
)
|
|
||||||
|
|
||||||
def pad_audio(self, audio_array, sample_rate: int = SAMPLE_RATE):
|
|
||||||
audio_duration = len(audio_array) / sample_rate
|
|
||||||
if audio_duration < VAD_CONFIG["silence_padding"]:
|
|
||||||
silence_samples = int(sample_rate * VAD_CONFIG["silence_padding"])
|
|
||||||
silence = np.zeros(silence_samples, dtype=np.float32)
|
|
||||||
return np.concatenate([audio_array, silence])
|
|
||||||
return audio_array
|
|
||||||
|
|
||||||
def enforce_word_timing_constraints(self, words: list[dict]) -> list[dict]:
|
|
||||||
if len(words) <= 1:
|
|
||||||
return words
|
|
||||||
enforced: list[dict] = []
|
|
||||||
for i, word in enumerate(words):
|
|
||||||
current = dict(word)
|
|
||||||
if i < len(words) - 1:
|
|
||||||
next_start = words[i + 1]["start"]
|
|
||||||
if current["end"] > next_start:
|
|
||||||
current["end"] = next_start
|
|
||||||
enforced.append(current)
|
|
||||||
return enforced
|
|
||||||
|
|
||||||
def transcribe_file(self, file_path: str, language: str = "en") -> dict:
|
|
||||||
input_for_model: str | "object" = file_path
|
|
||||||
try:
|
|
||||||
audio_array, _sample_rate = librosa.load(
|
|
||||||
file_path, sr=SAMPLE_RATE, mono=True
|
|
||||||
)
|
|
||||||
if len(audio_array) / float(SAMPLE_RATE) < VAD_CONFIG["silence_padding"]:
|
|
||||||
input_for_model = self.pad_audio(audio_array, SAMPLE_RATE)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
with self.lock:
|
|
||||||
with NoStdStreams():
|
|
||||||
segments, _ = self.model.transcribe(
|
|
||||||
input_for_model,
|
|
||||||
language=language,
|
|
||||||
beam_size=5,
|
|
||||||
word_timestamps=True,
|
|
||||||
vad_filter=True,
|
|
||||||
vad_parameters={"min_silence_duration_ms": 500},
|
|
||||||
)
|
|
||||||
|
|
||||||
segments = list(segments)
|
|
||||||
text = "".join(segment.text for segment in segments).strip()
|
|
||||||
words = [
|
|
||||||
{
|
|
||||||
"word": word.word,
|
|
||||||
"start": round(float(word.start), 2),
|
|
||||||
"end": round(float(word.end), 2),
|
|
||||||
}
|
|
||||||
for segment in segments
|
|
||||||
for word in segment.words
|
|
||||||
]
|
|
||||||
words = self.enforce_word_timing_constraints(words)
|
|
||||||
return {"text": text, "words": words}
|
|
||||||
|
|
||||||
def transcribe_vad_url_segment(
|
|
||||||
self, file_path: str, timestamp_offset: float = 0.0, language: str = "en"
|
|
||||||
) -> dict:
|
|
||||||
def load_audio_via_ffmpeg(input_path: str, sample_rate: int) -> np.ndarray:
|
|
||||||
ffmpeg_bin = shutil.which("ffmpeg") or "ffmpeg"
|
|
||||||
cmd = [
|
|
||||||
ffmpeg_bin,
|
|
||||||
"-nostdin",
|
|
||||||
"-threads",
|
|
||||||
"1",
|
|
||||||
"-i",
|
|
||||||
input_path,
|
|
||||||
"-f",
|
|
||||||
"f32le",
|
|
||||||
"-acodec",
|
|
||||||
"pcm_f32le",
|
|
||||||
"-ac",
|
|
||||||
"1",
|
|
||||||
"-ar",
|
|
||||||
str(sample_rate),
|
|
||||||
"pipe:1",
|
|
||||||
]
|
|
||||||
try:
|
|
||||||
proc = subprocess.run(
|
|
||||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=400, detail=f"ffmpeg failed: {e}")
|
|
||||||
audio = np.frombuffer(proc.stdout, dtype=np.float32)
|
|
||||||
return audio
|
|
||||||
|
|
||||||
def vad_segments(
|
|
||||||
audio_array,
|
|
||||||
sample_rate: int = SAMPLE_RATE,
|
|
||||||
window_size: int = VAD_CONFIG["window_size"],
|
|
||||||
) -> Generator[tuple[float, float], None, None]:
|
|
||||||
vad_model = load_silero_vad(onnx=False)
|
|
||||||
iterator = VADIterator(vad_model, sampling_rate=sample_rate)
|
|
||||||
start = None
|
|
||||||
for i in range(0, len(audio_array), window_size):
|
|
||||||
chunk = audio_array[i : i + window_size]
|
|
||||||
if len(chunk) < window_size:
|
|
||||||
chunk = np.pad(
|
|
||||||
chunk, (0, window_size - len(chunk)), mode="constant"
|
|
||||||
)
|
|
||||||
speech = iterator(chunk)
|
|
||||||
if not speech:
|
|
||||||
continue
|
|
||||||
if "start" in speech:
|
|
||||||
start = speech["start"]
|
|
||||||
continue
|
|
||||||
if "end" in speech and start is not None:
|
|
||||||
end = speech["end"]
|
|
||||||
yield (start / float(SAMPLE_RATE), end / float(SAMPLE_RATE))
|
|
||||||
start = None
|
|
||||||
iterator.reset_states()
|
|
||||||
|
|
||||||
audio_array = load_audio_via_ffmpeg(file_path, SAMPLE_RATE)
|
|
||||||
|
|
||||||
merged_batches: list[tuple[float, float]] = []
|
|
||||||
batch_start = None
|
|
||||||
batch_end = None
|
|
||||||
max_duration = VAD_CONFIG["batch_max_duration"]
|
|
||||||
for seg_start, seg_end in vad_segments(audio_array):
|
|
||||||
if batch_start is None:
|
|
||||||
batch_start, batch_end = seg_start, seg_end
|
|
||||||
continue
|
|
||||||
if seg_end - batch_start <= max_duration:
|
|
||||||
batch_end = seg_end
|
|
||||||
else:
|
|
||||||
merged_batches.append((batch_start, batch_end))
|
|
||||||
batch_start, batch_end = seg_start, seg_end
|
|
||||||
if batch_start is not None and batch_end is not None:
|
|
||||||
merged_batches.append((batch_start, batch_end))
|
|
||||||
|
|
||||||
all_text = []
|
|
||||||
all_words = []
|
|
||||||
for start_time, end_time in merged_batches:
|
|
||||||
s_idx = int(start_time * SAMPLE_RATE)
|
|
||||||
e_idx = int(end_time * SAMPLE_RATE)
|
|
||||||
segment = audio_array[s_idx:e_idx]
|
|
||||||
segment = self.pad_audio(segment, SAMPLE_RATE)
|
|
||||||
with self.lock:
|
|
||||||
segments, _ = self.model.transcribe(
|
|
||||||
segment,
|
|
||||||
language=language,
|
|
||||||
beam_size=5,
|
|
||||||
word_timestamps=True,
|
|
||||||
vad_filter=True,
|
|
||||||
vad_parameters={"min_silence_duration_ms": 500},
|
|
||||||
)
|
|
||||||
segments = list(segments)
|
|
||||||
text = "".join(seg.text for seg in segments).strip()
|
|
||||||
words = [
|
|
||||||
{
|
|
||||||
"word": w.word,
|
|
||||||
"start": round(float(w.start) + start_time + timestamp_offset, 2),
|
|
||||||
"end": round(float(w.end) + start_time + timestamp_offset, 2),
|
|
||||||
}
|
|
||||||
for seg in segments
|
|
||||||
for w in seg.words
|
|
||||||
]
|
|
||||||
if text:
|
|
||||||
all_text.append(text)
|
|
||||||
all_words.extend(words)
|
|
||||||
|
|
||||||
all_words = self.enforce_word_timing_constraints(all_words)
|
|
||||||
return {"text": " ".join(all_text), "words": all_words}
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
import threading
|
|
||||||
|
|
||||||
from transformers import MarianMTModel, MarianTokenizer, pipeline
|
|
||||||
|
|
||||||
|
|
||||||
class TextTranslatorService:
|
|
||||||
"""Simple text-to-text translator using HuggingFace MarianMT models.
|
|
||||||
|
|
||||||
This mirrors the modal translator API shape but uses text translation only.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self._pipeline = None
|
|
||||||
self._lock = threading.Lock()
|
|
||||||
|
|
||||||
def load(self, source_language: str = "en", target_language: str = "fr"):
|
|
||||||
# Pick a default MarianMT model pair if available; fall back to Helsinki-NLP en->fr
|
|
||||||
model_name = self._resolve_model_name(source_language, target_language)
|
|
||||||
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
|
||||||
model = MarianMTModel.from_pretrained(model_name)
|
|
||||||
self._pipeline = pipeline("translation", model=model, tokenizer=tokenizer)
|
|
||||||
|
|
||||||
def _resolve_model_name(self, src: str, tgt: str) -> str:
|
|
||||||
# Minimal mapping; extend as needed
|
|
||||||
pair = (src.lower(), tgt.lower())
|
|
||||||
mapping = {
|
|
||||||
("en", "fr"): "Helsinki-NLP/opus-mt-en-fr",
|
|
||||||
("fr", "en"): "Helsinki-NLP/opus-mt-fr-en",
|
|
||||||
("en", "es"): "Helsinki-NLP/opus-mt-en-es",
|
|
||||||
("es", "en"): "Helsinki-NLP/opus-mt-es-en",
|
|
||||||
("en", "de"): "Helsinki-NLP/opus-mt-en-de",
|
|
||||||
("de", "en"): "Helsinki-NLP/opus-mt-de-en",
|
|
||||||
}
|
|
||||||
return mapping.get(pair, "Helsinki-NLP/opus-mt-en-fr")
|
|
||||||
|
|
||||||
def translate(self, text: str, source_language: str, target_language: str) -> dict:
|
|
||||||
if self._pipeline is None:
|
|
||||||
self.load(source_language, target_language)
|
|
||||||
with self._lock:
|
|
||||||
results = self._pipeline(
|
|
||||||
text, src_lang=source_language, tgt_lang=target_language
|
|
||||||
)
|
|
||||||
translated = results[0]["translation_text"] if results else ""
|
|
||||||
return {"text": {source_language: text, target_language: translated}}
|
|
||||||
@@ -1,107 +0,0 @@
|
|||||||
import logging
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import uuid
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from typing import Mapping
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from fastapi import HTTPException
|
|
||||||
|
|
||||||
from .config import SUPPORTED_FILE_EXTENSIONS, UPLOADS_PATH
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class NoStdStreams:
|
|
||||||
def __init__(self):
|
|
||||||
self.devnull = open(os.devnull, "w")
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
self._stdout, self._stderr = sys.stdout, sys.stderr
|
|
||||||
self._stdout.flush()
|
|
||||||
self._stderr.flush()
|
|
||||||
sys.stdout, sys.stderr = self.devnull, self.devnull
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, traceback):
|
|
||||||
sys.stdout, sys.stderr = self._stdout, self._stderr
|
|
||||||
self.devnull.close()
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_dirs():
|
|
||||||
UPLOADS_PATH.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
|
|
||||||
def detect_audio_format(url: str, headers: Mapping[str, str]) -> str:
|
|
||||||
url_path = urlparse(url).path
|
|
||||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
|
||||||
if url_path.lower().endswith(f".{ext}"):
|
|
||||||
return ext
|
|
||||||
|
|
||||||
content_type = headers.get("content-type", "").lower()
|
|
||||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
|
||||||
return "mp3"
|
|
||||||
if "audio/wav" in content_type:
|
|
||||||
return "wav"
|
|
||||||
if "audio/mp4" in content_type:
|
|
||||||
return "mp4"
|
|
||||||
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail=(
|
|
||||||
f"Unsupported audio format for URL. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def download_audio_to_uploads(audio_file_url: str) -> tuple[Path, str]:
|
|
||||||
response = requests.head(audio_file_url, allow_redirects=True)
|
|
||||||
if response.status_code == 404:
|
|
||||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
|
||||||
|
|
||||||
response = requests.get(audio_file_url, allow_redirects=True)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
|
||||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
|
||||||
file_path: Path = UPLOADS_PATH / unique_filename
|
|
||||||
|
|
||||||
with open(file_path, "wb") as f:
|
|
||||||
f.write(response.content)
|
|
||||||
|
|
||||||
return file_path, audio_suffix
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def download_audio_file(audio_file_url: str):
|
|
||||||
"""Download an audio file to UPLOADS_PATH and remove it after use.
|
|
||||||
|
|
||||||
Yields (file_path: Path, audio_suffix: str).
|
|
||||||
"""
|
|
||||||
file_path, audio_suffix = download_audio_to_uploads(audio_file_url)
|
|
||||||
try:
|
|
||||||
yield file_path, audio_suffix
|
|
||||||
finally:
|
|
||||||
try:
|
|
||||||
file_path.unlink(missing_ok=True)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error("Error deleting temporary file %s: %s", file_path, e)
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def cleanup_uploaded_files(file_paths: list[Path]):
|
|
||||||
"""Ensure provided file paths are removed after use.
|
|
||||||
|
|
||||||
The provided list can be populated inside the context; all present entries
|
|
||||||
at exit will be deleted.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
yield file_paths
|
|
||||||
finally:
|
|
||||||
for path in list(file_paths):
|
|
||||||
try:
|
|
||||||
path.unlink(missing_ok=True)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error("Error deleting temporary file %s: %s", path, e)
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
services:
|
|
||||||
reflector_gpu:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
ports:
|
|
||||||
- "8000:8000"
|
|
||||||
env_file:
|
|
||||||
- .env
|
|
||||||
volumes:
|
|
||||||
- ./cache:/root/.cache
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
from app.factory import create_app
|
|
||||||
|
|
||||||
app = create_app()
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
[project]
|
|
||||||
name = "reflector-gpu"
|
|
||||||
version = "0.1.0"
|
|
||||||
description = "Self-hosted GPU service for speech transcription, diarization, and translation via FastAPI."
|
|
||||||
readme = "README.md"
|
|
||||||
requires-python = ">=3.12"
|
|
||||||
dependencies = [
|
|
||||||
"fastapi[standard]>=0.116.1",
|
|
||||||
"uvicorn[standard]>=0.30.0",
|
|
||||||
"torch>=2.3.0",
|
|
||||||
"faster-whisper>=1.1.0",
|
|
||||||
"librosa==0.10.1",
|
|
||||||
"numpy<2",
|
|
||||||
"silero-vad==5.1.0",
|
|
||||||
"transformers>=4.35.0",
|
|
||||||
"sentencepiece",
|
|
||||||
"pyannote.audio==3.1.0",
|
|
||||||
"torchaudio>=2.3.0",
|
|
||||||
]
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
|
|
||||||
export PATH="/root/.local/bin:$PATH"
|
|
||||||
cd /app
|
|
||||||
|
|
||||||
# Install Python dependencies at runtime (first run or when FORCE_SYNC=1)
|
|
||||||
if [ ! -d "/app/.venv" ] || [ "$FORCE_SYNC" = "1" ]; then
|
|
||||||
echo "[startup] Installing Python dependencies with uv..."
|
|
||||||
uv sync --compile-bytecode --locked
|
|
||||||
else
|
|
||||||
echo "[startup] Using existing virtual environment at /app/.venv"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec uv run uvicorn main:app --host 0.0.0.0 --port 8000
|
|
||||||
|
|
||||||
|
|
||||||
3013
gpu/self_hosted/uv.lock
generated
3013
gpu/self_hosted/uv.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,29 +1,3 @@
|
|||||||
## API Key Management
|
|
||||||
|
|
||||||
### Finding Your User ID
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Get your OAuth sub (user ID) - requires authentication
|
|
||||||
curl -H "Authorization: Bearer <your_jwt>" http://localhost:1250/v1/me
|
|
||||||
# Returns: {"sub": "your-oauth-sub-here", "email": "...", ...}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Creating API Keys
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -X POST http://localhost:1250/v1/user/api-keys \
|
|
||||||
-H "Authorization: Bearer <your_jwt>" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '{"name": "My API Key"}'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using API Keys
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Use X-API-Key header instead of Authorization
|
|
||||||
curl -H "X-API-Key: <your_api_key>" http://localhost:1250/v1/transcripts
|
|
||||||
```
|
|
||||||
|
|
||||||
## AWS S3/SQS usage clarification
|
## AWS S3/SQS usage clarification
|
||||||
|
|
||||||
Whereby.com uploads recordings directly to our S3 bucket when meetings end.
|
Whereby.com uploads recordings directly to our S3 bucket when meetings end.
|
||||||
|
|||||||
@@ -1,95 +0,0 @@
|
|||||||
# Data Retention and Cleanup
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
For public instances of Reflector, a data retention policy is automatically enforced to delete anonymous user data after a configurable period (default: 7 days). This ensures compliance with privacy expectations and prevents unbounded storage growth.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
### Environment Variables
|
|
||||||
|
|
||||||
- `PUBLIC_MODE` (bool): Must be set to `true` to enable automatic cleanup
|
|
||||||
- `PUBLIC_DATA_RETENTION_DAYS` (int): Number of days to retain anonymous data (default: 7)
|
|
||||||
|
|
||||||
### What Gets Deleted
|
|
||||||
|
|
||||||
When data reaches the retention period, the following items are automatically removed:
|
|
||||||
|
|
||||||
1. **Transcripts** from anonymous users (where `user_id` is NULL):
|
|
||||||
- Database records
|
|
||||||
- Local files (audio.wav, audio.mp3, audio.json waveform)
|
|
||||||
- Storage files (cloud storage if configured)
|
|
||||||
|
|
||||||
## Automatic Cleanup
|
|
||||||
|
|
||||||
### Celery Beat Schedule
|
|
||||||
|
|
||||||
When `PUBLIC_MODE=true`, a Celery beat task runs daily at 3 AM to clean up old data:
|
|
||||||
|
|
||||||
```python
|
|
||||||
# Automatically scheduled when PUBLIC_MODE=true
|
|
||||||
"cleanup_old_public_data": {
|
|
||||||
"task": "reflector.worker.cleanup.cleanup_old_public_data",
|
|
||||||
"schedule": crontab(hour=3, minute=0), # Daily at 3 AM
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Running the Worker
|
|
||||||
|
|
||||||
Ensure both Celery worker and beat scheduler are running:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Start Celery worker
|
|
||||||
uv run celery -A reflector.worker.app worker --loglevel=info
|
|
||||||
|
|
||||||
# Start Celery beat scheduler (in another terminal)
|
|
||||||
uv run celery -A reflector.worker.app beat
|
|
||||||
```
|
|
||||||
|
|
||||||
## Manual Cleanup
|
|
||||||
|
|
||||||
For testing or manual intervention, use the cleanup tool:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Delete data older than 7 days (default)
|
|
||||||
uv run python -m reflector.tools.cleanup_old_data
|
|
||||||
|
|
||||||
# Delete data older than 30 days
|
|
||||||
uv run python -m reflector.tools.cleanup_old_data --days 30
|
|
||||||
```
|
|
||||||
|
|
||||||
Note: The manual tool uses the same implementation as the Celery worker task to ensure consistency.
|
|
||||||
|
|
||||||
## Important Notes
|
|
||||||
|
|
||||||
1. **User Data Deletion**: Only anonymous data (where `user_id` is NULL) is deleted. Authenticated user data is preserved.
|
|
||||||
|
|
||||||
2. **Storage Cleanup**: The system properly cleans up both local files and cloud storage when configured.
|
|
||||||
|
|
||||||
3. **Error Handling**: If individual deletions fail, the cleanup continues and logs errors. Failed deletions are reported in the task output.
|
|
||||||
|
|
||||||
4. **Public Instance Only**: The automatic cleanup task only runs when `PUBLIC_MODE=true` to prevent accidental data loss in private deployments.
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
Run the cleanup tests:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
uv run pytest tests/test_cleanup.py -v
|
|
||||||
```
|
|
||||||
|
|
||||||
## Monitoring
|
|
||||||
|
|
||||||
Check Celery logs for cleanup task execution:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Look for cleanup task logs
|
|
||||||
grep "cleanup_old_public_data" celery.log
|
|
||||||
grep "Starting cleanup of old public data" celery.log
|
|
||||||
```
|
|
||||||
|
|
||||||
Task statistics are logged after each run:
|
|
||||||
- Number of transcripts deleted
|
|
||||||
- Number of meetings deleted
|
|
||||||
- Number of orphaned recordings deleted
|
|
||||||
- Any errors encountered
|
|
||||||
@@ -1,194 +0,0 @@
|
|||||||
## Reflector GPU Transcription API (Specification)
|
|
||||||
|
|
||||||
This document defines the Reflector GPU transcription API that all implementations must adhere to. Current implementations include NVIDIA Parakeet (NeMo) and Whisper (faster-whisper), both deployed on Modal.com. The API surface and response shapes are OpenAI/Whisper-compatible, so clients can switch implementations by changing only the base URL.
|
|
||||||
|
|
||||||
### Base URL and Authentication
|
|
||||||
|
|
||||||
- Example base URLs (Modal web endpoints):
|
|
||||||
|
|
||||||
- Parakeet: `https://<account>--reflector-transcriber-parakeet-web.modal.run`
|
|
||||||
- Whisper: `https://<account>--reflector-transcriber-web.modal.run`
|
|
||||||
|
|
||||||
- All endpoints are served under `/v1` and require a Bearer token:
|
|
||||||
|
|
||||||
```
|
|
||||||
Authorization: Bearer <REFLECTOR_GPU_APIKEY>
|
|
||||||
```
|
|
||||||
|
|
||||||
Note: To switch implementations, deploy the desired variant and point `TRANSCRIPT_URL` to its base URL. The API is identical.
|
|
||||||
|
|
||||||
### Supported file types
|
|
||||||
|
|
||||||
`mp3, mp4, mpeg, mpga, m4a, wav, webm`
|
|
||||||
|
|
||||||
### Models and languages
|
|
||||||
|
|
||||||
- Parakeet (NVIDIA NeMo): default `nvidia/parakeet-tdt-0.6b-v2`
|
|
||||||
- Language support: only `en`. Other languages return HTTP 400.
|
|
||||||
- Whisper (faster-whisper): default `large-v2` (or deployment-specific)
|
|
||||||
- Language support: multilingual (per Whisper model capabilities).
|
|
||||||
|
|
||||||
Note: The `model` parameter is accepted by all implementations for interface parity. Some backends may treat it as informational.
|
|
||||||
|
|
||||||
### Endpoints
|
|
||||||
|
|
||||||
#### POST /v1/audio/transcriptions
|
|
||||||
|
|
||||||
Transcribe one or more uploaded audio files.
|
|
||||||
|
|
||||||
Request: multipart/form-data
|
|
||||||
|
|
||||||
- `file` (File) — optional. Single file to transcribe.
|
|
||||||
- `files` (File[]) — optional. One or more files to transcribe.
|
|
||||||
- `model` (string) — optional. Defaults to the implementation-specific model (see above).
|
|
||||||
- `language` (string) — optional, defaults to `en`.
|
|
||||||
- Parakeet: only `en` is accepted; other values return HTTP 400
|
|
||||||
- Whisper: model-dependent; typically multilingual
|
|
||||||
- `batch` (boolean) — optional, defaults to `false`.
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
|
|
||||||
- Provide either `file` or `files`, not both. If neither is provided, HTTP 400.
|
|
||||||
- `batch` requires `files`; using `batch=true` without `files` returns HTTP 400.
|
|
||||||
- Response shape for multiple files is the same regardless of `batch`.
|
|
||||||
- Files sent to this endpoint are processed in a single pass (no VAD/chunking). This is intended for short clips (roughly ≤ 30s; depends on GPU memory/model). For longer audio, prefer `/v1/audio/transcriptions-from-url` which supports VAD-based chunking.
|
|
||||||
|
|
||||||
Responses
|
|
||||||
|
|
||||||
Single file response:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"text": "transcribed text",
|
|
||||||
"words": [
|
|
||||||
{ "word": "hello", "start": 0.0, "end": 0.5 },
|
|
||||||
{ "word": "world", "start": 0.5, "end": 1.0 }
|
|
||||||
],
|
|
||||||
"filename": "audio.mp3"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Multiple files response:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"results": [
|
|
||||||
{"filename": "a1.mp3", "text": "...", "words": [...]},
|
|
||||||
{"filename": "a2.mp3", "text": "...", "words": [...]}]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
|
|
||||||
- Word objects always include keys: `word`, `start`, `end`.
|
|
||||||
- Some implementations may include a trailing space in `word` to match Whisper tokenization behavior; clients should trim if needed.
|
|
||||||
|
|
||||||
Example curl (single file):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -X POST \
|
|
||||||
-H "Authorization: Bearer $REFLECTOR_GPU_APIKEY" \
|
|
||||||
-F "file=@/path/to/audio.mp3" \
|
|
||||||
-F "language=en" \
|
|
||||||
"$BASE_URL/v1/audio/transcriptions"
|
|
||||||
```
|
|
||||||
|
|
||||||
Example curl (multiple files, batch):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -X POST \
|
|
||||||
-H "Authorization: Bearer $REFLECTOR_GPU_APIKEY" \
|
|
||||||
-F "files=@/path/a1.mp3" -F "files=@/path/a2.mp3" \
|
|
||||||
-F "batch=true" -F "language=en" \
|
|
||||||
"$BASE_URL/v1/audio/transcriptions"
|
|
||||||
```
|
|
||||||
|
|
||||||
#### POST /v1/audio/transcriptions-from-url
|
|
||||||
|
|
||||||
Transcribe a single remote audio file by URL.
|
|
||||||
|
|
||||||
Request: application/json
|
|
||||||
|
|
||||||
Body parameters:
|
|
||||||
|
|
||||||
- `audio_file_url` (string) — required. URL of the audio file to transcribe.
|
|
||||||
- `model` (string) — optional. Defaults to the implementation-specific model (see above).
|
|
||||||
- `language` (string) — optional, defaults to `en`. Parakeet only accepts `en`.
|
|
||||||
- `timestamp_offset` (number) — optional, defaults to `0.0`. Added to each word's `start`/`end` in the response.
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"audio_file_url": "https://example.com/audio.mp3",
|
|
||||||
"model": "nvidia/parakeet-tdt-0.6b-v2",
|
|
||||||
"language": "en",
|
|
||||||
"timestamp_offset": 0.0
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Response:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"text": "transcribed text",
|
|
||||||
"words": [
|
|
||||||
{ "word": "hello", "start": 10.0, "end": 10.5 },
|
|
||||||
{ "word": "world", "start": 10.5, "end": 11.0 }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
|
|
||||||
- `timestamp_offset` is added to each word’s `start`/`end` in the response.
|
|
||||||
- Implementations may perform VAD-based chunking and batching for long-form audio; word timings are adjusted accordingly.
|
|
||||||
|
|
||||||
Example curl:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl -X POST \
|
|
||||||
-H "Authorization: Bearer $REFLECTOR_GPU_APIKEY" \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '{
|
|
||||||
"audio_file_url": "https://example.com/audio.mp3",
|
|
||||||
"language": "en",
|
|
||||||
"timestamp_offset": 0
|
|
||||||
}' \
|
|
||||||
"$BASE_URL/v1/audio/transcriptions-from-url"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Error handling
|
|
||||||
|
|
||||||
- 400 Bad Request
|
|
||||||
- Parakeet: `language` other than `en`
|
|
||||||
- Missing required parameters (`file`/`files` for upload; `audio_file_url` for URL endpoint)
|
|
||||||
- Unsupported file extension
|
|
||||||
- 401 Unauthorized
|
|
||||||
- Missing or invalid Bearer token
|
|
||||||
- 404 Not Found
|
|
||||||
- `audio_file_url` does not exist
|
|
||||||
|
|
||||||
### Implementation details
|
|
||||||
|
|
||||||
- GPUs: A10G for small-file/live, L40S for large-file URL transcription (subject to deployment)
|
|
||||||
- VAD chunking and segment batching; word timings adjusted and overlapping ends constrained
|
|
||||||
- Pads very short segments (< 0.5s) to avoid model crashes on some backends
|
|
||||||
|
|
||||||
### Server configuration (Reflector API)
|
|
||||||
|
|
||||||
Set the Reflector server to use the Modal backend and point `TRANSCRIPT_URL` to your chosen deployment:
|
|
||||||
|
|
||||||
```
|
|
||||||
TRANSCRIPT_BACKEND=modal
|
|
||||||
TRANSCRIPT_URL=https://<account>--reflector-transcriber-parakeet-web.modal.run
|
|
||||||
TRANSCRIPT_MODAL_API_KEY=<REFLECTOR_GPU_APIKEY>
|
|
||||||
```
|
|
||||||
|
|
||||||
### Conformance tests
|
|
||||||
|
|
||||||
Use the pytest-based conformance tests to validate any new implementation (including self-hosted) against this spec:
|
|
||||||
|
|
||||||
```
|
|
||||||
TRANSCRIPT_URL=https://<your-deployment-base> \
|
|
||||||
TRANSCRIPT_MODAL_API_KEY=your-api-key \
|
|
||||||
uv run -m pytest -m model_api --no-cov server/tests/test_model_api_transcript.py
|
|
||||||
```
|
|
||||||
@@ -1,236 +0,0 @@
|
|||||||
# Reflector Architecture: Whereby + Daily.co Recording Storage
|
|
||||||
|
|
||||||
## System Overview
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
graph TB
|
|
||||||
subgraph "Actors"
|
|
||||||
APP[Our App<br/>Reflector]
|
|
||||||
WHEREBY[Whereby Service<br/>External]
|
|
||||||
DAILY[Daily.co Service<br/>External]
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph "AWS S3 Buckets"
|
|
||||||
TRANSCRIPT_BUCKET[Transcript Bucket<br/>reflector-transcripts<br/>Output: Processed MP3s]
|
|
||||||
WHEREBY_BUCKET[Whereby Bucket<br/>reflector-whereby-recordings<br/>Input: Raw MP4s]
|
|
||||||
DAILY_BUCKET[Daily.co Bucket<br/>reflector-dailyco-recordings<br/>Input: Raw WebM tracks]
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph "AWS Infrastructure"
|
|
||||||
SQS[SQS Queue<br/>Whereby notifications]
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph "Database"
|
|
||||||
DB[(PostgreSQL<br/>Recordings, Transcripts, Meetings)]
|
|
||||||
end
|
|
||||||
|
|
||||||
APP -->|Write processed| TRANSCRIPT_BUCKET
|
|
||||||
APP -->|Read/Delete| WHEREBY_BUCKET
|
|
||||||
APP -->|Read/Delete| DAILY_BUCKET
|
|
||||||
APP -->|Poll| SQS
|
|
||||||
APP -->|Store metadata| DB
|
|
||||||
|
|
||||||
WHEREBY -->|Write recordings| WHEREBY_BUCKET
|
|
||||||
WHEREBY_BUCKET -->|S3 Event| SQS
|
|
||||||
WHEREBY -->|Participant webhooks<br/>room.client.joined/left| APP
|
|
||||||
|
|
||||||
DAILY -->|Write recordings| DAILY_BUCKET
|
|
||||||
DAILY -->|Recording webhook<br/>recording.ready-to-download| APP
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note on Webhook vs S3 Event for Recording Processing:**
|
|
||||||
- **Whereby**: Uses S3 Events → SQS for recording availability (S3 as source of truth, no race conditions)
|
|
||||||
- **Daily.co**: Uses webhooks for recording availability (more immediate, built-in reliability)
|
|
||||||
- **Both**: Use webhooks for participant tracking (real-time updates)
|
|
||||||
|
|
||||||
## Credentials & Permissions
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
graph LR
|
|
||||||
subgraph "Master Credentials"
|
|
||||||
MASTER[TRANSCRIPT_STORAGE_AWS_*<br/>Access Key ID + Secret]
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph "Whereby Upload Credentials"
|
|
||||||
WHEREBY_CREDS[AWS_WHEREBY_ACCESS_KEY_*<br/>Access Key ID + Secret]
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph "Daily.co Upload Role"
|
|
||||||
DAILY_ROLE[DAILY_STORAGE_AWS_ROLE_ARN<br/>IAM Role ARN]
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph "Our App Uses"
|
|
||||||
MASTER -->|Read/Write/Delete| TRANSCRIPT_BUCKET[Transcript Bucket]
|
|
||||||
MASTER -->|Read/Delete| WHEREBY_BUCKET[Whereby Bucket]
|
|
||||||
MASTER -->|Read/Delete| DAILY_BUCKET[Daily.co Bucket]
|
|
||||||
MASTER -->|Poll/Delete| SQS[SQS Queue]
|
|
||||||
end
|
|
||||||
|
|
||||||
subgraph "We Give To Services"
|
|
||||||
WHEREBY_CREDS -->|Passed in API call| WHEREBY_SERVICE[Whereby Service]
|
|
||||||
WHEREBY_SERVICE -->|Write Only| WHEREBY_BUCKET
|
|
||||||
|
|
||||||
DAILY_ROLE -->|Passed in API call| DAILY_SERVICE[Daily.co Service]
|
|
||||||
DAILY_SERVICE -->|Assume Role| DAILY_ROLE
|
|
||||||
DAILY_SERVICE -->|Write Only| DAILY_BUCKET
|
|
||||||
end
|
|
||||||
```
|
|
||||||
|
|
||||||
# Video Platform Recording Integration
|
|
||||||
|
|
||||||
This document explains how Reflector receives and identifies multitrack audio recordings from different video platforms.
|
|
||||||
|
|
||||||
## Platform Comparison
|
|
||||||
|
|
||||||
| Platform | Delivery Method | Track Identification |
|
|
||||||
|----------|----------------|---------------------|
|
|
||||||
| **Daily.co** | Webhook | Explicit track list in payload |
|
|
||||||
| **Whereby** | SQS (S3 notifications) | Single file per notification |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Daily.co
|
|
||||||
|
|
||||||
**Note:** Primary discovery via polling (`poll_daily_recordings`), webhooks as backup.
|
|
||||||
|
|
||||||
Daily.co uses **webhooks** to notify Reflector when recordings are ready.
|
|
||||||
|
|
||||||
### How It Works
|
|
||||||
|
|
||||||
1. **Daily.co sends webhook** when recording is ready
|
|
||||||
- Event type: `recording.ready-to-download`
|
|
||||||
- Endpoint: `/v1/daily/webhook` (`reflector/views/daily.py:46-102`)
|
|
||||||
|
|
||||||
2. **Webhook payload explicitly includes track list**:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"recording_id": "7443ee0a-dab1-40eb-b316-33d6c0d5ff88",
|
|
||||||
"room_name": "daily-20251020193458",
|
|
||||||
"tracks": [
|
|
||||||
{
|
|
||||||
"type": "audio",
|
|
||||||
"s3Key": "monadical/daily-20251020193458/1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922",
|
|
||||||
"size": 831843
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "audio",
|
|
||||||
"s3Key": "monadical/daily-20251020193458/1760988935484-a37c35e3-6f8e-4274-a482-e9d0f102a732-cam-audio-1760988943823",
|
|
||||||
"size": 408438
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "video",
|
|
||||||
"s3Key": "monadical/daily-20251020193458/...-video.webm",
|
|
||||||
"size": 30000000
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
3. **System extracts audio tracks** (`daily.py:211`):
|
|
||||||
```python
|
|
||||||
track_keys = [t.s3Key for t in tracks if t.type == "audio"]
|
|
||||||
```
|
|
||||||
|
|
||||||
4. **Triggers multitrack processing** (`daily.py:213-218`):
|
|
||||||
```python
|
|
||||||
process_multitrack_recording.delay(
|
|
||||||
bucket_name=bucket_name, # reflector-dailyco-local
|
|
||||||
room_name=room_name, # daily-20251020193458
|
|
||||||
recording_id=recording_id, # 7443ee0a-dab1-40eb-b316-33d6c0d5ff88
|
|
||||||
track_keys=track_keys # Only audio s3Keys
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Key Advantage: No Ambiguity
|
|
||||||
|
|
||||||
Even though multiple meetings may share the same S3 bucket/folder (`monadical/`), **there's no ambiguity** because:
|
|
||||||
- Each webhook payload contains the exact `s3Key` list for that specific `recording_id`
|
|
||||||
- No need to scan folders or guess which files belong together
|
|
||||||
- Each track's s3Key includes the room timestamp subfolder (e.g., `daily-20251020193458/`)
|
|
||||||
|
|
||||||
The room name includes timestamp (`daily-20251020193458`) to keep recordings organized, but **the webhook's explicit track list is what prevents mixing files from different meetings**.
|
|
||||||
|
|
||||||
### Track Timeline Extraction
|
|
||||||
|
|
||||||
Daily.co provides timing information in two places:
|
|
||||||
|
|
||||||
**1. PyAV WebM Metadata (current approach)**:
|
|
||||||
```python
|
|
||||||
# Read from WebM container stream metadata
|
|
||||||
stream.start_time = 8.130s # Meeting-relative timing
|
|
||||||
```
|
|
||||||
|
|
||||||
**2. Filename Timestamps (alternative approach, commit 3bae9076)**:
|
|
||||||
```
|
|
||||||
Filename format: {recording_start_ts}-{uuid}-cam-audio-{track_start_ts}.webm
|
|
||||||
Example: 1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922.webm
|
|
||||||
|
|
||||||
Parse timestamps:
|
|
||||||
- recording_start_ts: 1760988935484 (Unix ms)
|
|
||||||
- track_start_ts: 1760988935922 (Unix ms)
|
|
||||||
- offset: (1760988935922 - 1760988935484) / 1000 = 0.438s
|
|
||||||
```
|
|
||||||
|
|
||||||
**Time Difference (PyAV vs Filename)**:
|
|
||||||
```
|
|
||||||
Track 0:
|
|
||||||
Filename offset: 438ms
|
|
||||||
PyAV metadata: 229ms
|
|
||||||
Difference: 209ms
|
|
||||||
|
|
||||||
Track 1:
|
|
||||||
Filename offset: 8339ms
|
|
||||||
PyAV metadata: 8130ms
|
|
||||||
Difference: 209ms
|
|
||||||
```
|
|
||||||
|
|
||||||
**Consistent 209ms delta** suggests network/encoding delay between file upload initiation (filename) and actual audio stream start (metadata).
|
|
||||||
|
|
||||||
**Current implementation uses PyAV metadata** because:
|
|
||||||
- More accurate (represents when audio actually started)
|
|
||||||
- Padding BEFORE transcription produces correct Whisper timestamps automatically
|
|
||||||
- No manual offset adjustment needed during transcript merge
|
|
||||||
|
|
||||||
### Why Re-encoding During Padding
|
|
||||||
|
|
||||||
Padding coincidentally involves re-encoding, which is important for Daily.co + Whisper:
|
|
||||||
|
|
||||||
**Problem:** Daily.co skips frames in recordings when microphone is muted or paused
|
|
||||||
- WebM containers have gaps where audio frames should be
|
|
||||||
- Whisper doesn't understand these gaps and produces incorrect timestamps
|
|
||||||
- Example: 5s of audio with 2s muted → file has frames only for 3s, Whisper thinks duration is 3s
|
|
||||||
|
|
||||||
**Solution:** Re-encoding via PyAV filter graph (`adelay` + `aresample`)
|
|
||||||
- Restores missing frames as silence
|
|
||||||
- Produces continuous audio stream without gaps
|
|
||||||
- Whisper now sees correct duration and produces accurate timestamps
|
|
||||||
|
|
||||||
**Why combined with padding:**
|
|
||||||
- Already re-encoding for padding (adding initial silence)
|
|
||||||
- More performant to do both operations in single PyAV pipeline
|
|
||||||
- Padded values needed for mixdown anyway (creating final MP3)
|
|
||||||
|
|
||||||
Implementation: `main_multitrack_pipeline.py:_apply_audio_padding_streaming()`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Whereby (SQS-based)
|
|
||||||
|
|
||||||
Whereby uses **AWS SQS** (via S3 notifications) to notify Reflector when files are uploaded.
|
|
||||||
|
|
||||||
### How It Works
|
|
||||||
|
|
||||||
1. **Whereby uploads recording** to S3
|
|
||||||
2. **S3 sends notification** to SQS queue (one notification per file)
|
|
||||||
3. **Reflector polls SQS queue** (`worker/process.py:process_messages()`)
|
|
||||||
4. **System processes single file** (`worker/process.py:process_recording()`)
|
|
||||||
|
|
||||||
### Key Difference from Daily.co
|
|
||||||
|
|
||||||
**Whereby (SQS):** System receives S3 notification "file X was created" - only knows about one file at a time, would need to scan folder to find related files
|
|
||||||
|
|
||||||
**Daily.co (Webhook):** Daily explicitly tells system which files belong together in the webhook payload
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,233 +0,0 @@
|
|||||||
# Reflector Webhook Documentation
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
Reflector supports webhook notifications to notify external systems when transcript processing is completed. Webhooks can be configured per room and are triggered automatically after a transcript is successfully processed.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
Webhooks are configured at the room level with two fields:
|
|
||||||
- `webhook_url`: The HTTPS endpoint to receive webhook notifications
|
|
||||||
- `webhook_secret`: Optional secret key for HMAC signature verification (auto-generated if not provided)
|
|
||||||
|
|
||||||
## Events
|
|
||||||
|
|
||||||
### `transcript.completed`
|
|
||||||
|
|
||||||
Triggered when a transcript has been fully processed, including transcription, diarization, summarization, topic detection and calendar event integration.
|
|
||||||
|
|
||||||
### `test`
|
|
||||||
|
|
||||||
A test event that can be triggered manually to verify webhook configuration.
|
|
||||||
|
|
||||||
## Webhook Request Format
|
|
||||||
|
|
||||||
### Headers
|
|
||||||
|
|
||||||
All webhook requests include the following headers:
|
|
||||||
|
|
||||||
| Header | Description | Example |
|
|
||||||
|--------|-------------|---------|
|
|
||||||
| `Content-Type` | Always `application/json` | `application/json` |
|
|
||||||
| `User-Agent` | Identifies Reflector as the source | `Reflector-Webhook/1.0` |
|
|
||||||
| `X-Webhook-Event` | The event type | `transcript.completed` or `test` |
|
|
||||||
| `X-Webhook-Retry` | Current retry attempt number | `0`, `1`, `2`... |
|
|
||||||
| `X-Webhook-Signature` | HMAC signature (if secret configured) | `t=1735306800,v1=abc123...` |
|
|
||||||
|
|
||||||
### Signature Verification
|
|
||||||
|
|
||||||
If a webhook secret is configured, Reflector includes an HMAC-SHA256 signature in the `X-Webhook-Signature` header to verify the webhook authenticity.
|
|
||||||
|
|
||||||
The signature format is: `t={timestamp},v1={signature}`
|
|
||||||
|
|
||||||
To verify the signature:
|
|
||||||
1. Extract the timestamp and signature from the header
|
|
||||||
2. Create the signed payload: `{timestamp}.{request_body}`
|
|
||||||
3. Compute HMAC-SHA256 of the signed payload using your webhook secret
|
|
||||||
4. Compare the computed signature with the received signature
|
|
||||||
|
|
||||||
Example verification (Python):
|
|
||||||
```python
|
|
||||||
import hmac
|
|
||||||
import hashlib
|
|
||||||
|
|
||||||
def verify_webhook_signature(payload: bytes, signature_header: str, secret: str) -> bool:
|
|
||||||
# Parse header: "t=1735306800,v1=abc123..."
|
|
||||||
parts = dict(part.split("=") for part in signature_header.split(","))
|
|
||||||
timestamp = parts["t"]
|
|
||||||
received_signature = parts["v1"]
|
|
||||||
|
|
||||||
# Create signed payload
|
|
||||||
signed_payload = f"{timestamp}.{payload.decode('utf-8')}"
|
|
||||||
|
|
||||||
# Compute expected signature
|
|
||||||
expected_signature = hmac.new(
|
|
||||||
secret.encode("utf-8"),
|
|
||||||
signed_payload.encode("utf-8"),
|
|
||||||
hashlib.sha256
|
|
||||||
).hexdigest()
|
|
||||||
|
|
||||||
# Compare signatures
|
|
||||||
return hmac.compare_digest(expected_signature, received_signature)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Event Payloads
|
|
||||||
|
|
||||||
### `transcript.completed` Event
|
|
||||||
|
|
||||||
This event includes a convenient URL for accessing the transcript:
|
|
||||||
- `frontend_url`: Direct link to view the transcript in the web interface
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"event": "transcript.completed",
|
|
||||||
"event_id": "transcript.completed-abc-123-def-456",
|
|
||||||
"timestamp": "2025-08-27T12:34:56.789012Z",
|
|
||||||
"transcript": {
|
|
||||||
"id": "abc-123-def-456",
|
|
||||||
"room_id": "room-789",
|
|
||||||
"created_at": "2025-08-27T12:00:00Z",
|
|
||||||
"duration": 1800.5,
|
|
||||||
"title": "Q3 Product Planning Meeting",
|
|
||||||
"short_summary": "Team discussed Q3 product roadmap, prioritizing mobile app features and API improvements.",
|
|
||||||
"long_summary": "The product team met to finalize the Q3 roadmap. Key decisions included...",
|
|
||||||
"webvtt": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v Speaker 1>Welcome everyone to today's meeting...",
|
|
||||||
"topics": [
|
|
||||||
{
|
|
||||||
"title": "Introduction and Agenda",
|
|
||||||
"summary": "Meeting kickoff with agenda review",
|
|
||||||
"timestamp": 0.0,
|
|
||||||
"duration": 120.0,
|
|
||||||
"webvtt": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v Speaker 1>Welcome everyone..."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"title": "Mobile App Features Discussion",
|
|
||||||
"summary": "Team reviewed proposed mobile app features for Q3",
|
|
||||||
"timestamp": 120.0,
|
|
||||||
"duration": 600.0,
|
|
||||||
"webvtt": "WEBVTT\n\n00:02:00.000 --> 00:02:10.000\n<v Speaker 2>Let's talk about the mobile app..."
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"participants": [
|
|
||||||
{
|
|
||||||
"id": "participant-1",
|
|
||||||
"name": "John Doe",
|
|
||||||
"speaker": "Speaker 1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "participant-2",
|
|
||||||
"name": "Jane Smith",
|
|
||||||
"speaker": "Speaker 2"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source_language": "en",
|
|
||||||
"target_language": "en",
|
|
||||||
"status": "completed",
|
|
||||||
"frontend_url": "https://app.reflector.com/transcripts/abc-123-def-456"
|
|
||||||
},
|
|
||||||
"room": {
|
|
||||||
"id": "room-789",
|
|
||||||
"name": "Product Team Room"
|
|
||||||
},
|
|
||||||
"calendar_event": {
|
|
||||||
"id": "calendar-event-123",
|
|
||||||
"ics_uid": "event-123",
|
|
||||||
"title": "Q3 Product Planning Meeting",
|
|
||||||
"start_time": "2025-08-27T12:00:00Z",
|
|
||||||
"end_time": "2025-08-27T12:30:00Z",
|
|
||||||
"description": "Team discussed Q3 product roadmap, prioritizing mobile app features and API improvements.",
|
|
||||||
"location": "Conference Room 1",
|
|
||||||
"attendees": [
|
|
||||||
{
|
|
||||||
"id": "participant-1",
|
|
||||||
"name": "John Doe",
|
|
||||||
"speaker": "Speaker 1"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"id": "participant-2",
|
|
||||||
"name": "Jane Smith",
|
|
||||||
"speaker": "Speaker 2"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### `test` Event
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"event": "test",
|
|
||||||
"event_id": "test.2025-08-27T12:34:56.789012Z",
|
|
||||||
"timestamp": "2025-08-27T12:34:56.789012Z",
|
|
||||||
"message": "This is a test webhook from Reflector",
|
|
||||||
"room": {
|
|
||||||
"id": "room-789",
|
|
||||||
"name": "Product Team Room"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Retry Policy
|
|
||||||
|
|
||||||
Webhooks are delivered with automatic retry logic to handle transient failures. When a webhook delivery fails due to server errors or network issues, Reflector will automatically retry the delivery multiple times over an extended period.
|
|
||||||
|
|
||||||
### Retry Mechanism
|
|
||||||
|
|
||||||
Reflector implements an exponential backoff strategy for webhook retries:
|
|
||||||
|
|
||||||
- **Initial retry delay**: 60 seconds after the first failure
|
|
||||||
- **Exponential backoff**: Each subsequent retry waits approximately twice as long as the previous one
|
|
||||||
- **Maximum retry interval**: 1 hour (backoff is capped at this duration)
|
|
||||||
- **Maximum retry attempts**: 30 attempts total
|
|
||||||
- **Total retry duration**: Retries continue for approximately 24 hours
|
|
||||||
|
|
||||||
### How Retries Work
|
|
||||||
|
|
||||||
When a webhook fails, Reflector will:
|
|
||||||
1. Wait 60 seconds, then retry (attempt #1)
|
|
||||||
2. If it fails again, wait ~2 minutes, then retry (attempt #2)
|
|
||||||
3. Continue doubling the wait time up to a maximum of 1 hour between attempts
|
|
||||||
4. Keep retrying at 1-hour intervals until successful or 30 attempts are exhausted
|
|
||||||
|
|
||||||
The `X-Webhook-Retry` header indicates the current retry attempt number (0 for the initial attempt, 1 for first retry, etc.), allowing your endpoint to track retry attempts.
|
|
||||||
|
|
||||||
### Retry Behavior by HTTP Status Code
|
|
||||||
|
|
||||||
| Status Code | Behavior |
|
|
||||||
|-------------|----------|
|
|
||||||
| 2xx (Success) | No retry, webhook marked as delivered |
|
|
||||||
| 4xx (Client Error) | No retry, request is considered permanently failed |
|
|
||||||
| 5xx (Server Error) | Automatic retry with exponential backoff |
|
|
||||||
| Network/Timeout Error | Automatic retry with exponential backoff |
|
|
||||||
|
|
||||||
**Important Notes:**
|
|
||||||
- Webhooks timeout after 30 seconds. If your endpoint takes longer to respond, it will be considered a timeout error and retried.
|
|
||||||
- During the retry period (~24 hours), you may receive the same webhook multiple times if your endpoint experiences intermittent failures.
|
|
||||||
- There is no mechanism to manually retry failed webhooks after the retry period expires.
|
|
||||||
|
|
||||||
## Testing Webhooks
|
|
||||||
|
|
||||||
You can test your webhook configuration before processing transcripts:
|
|
||||||
|
|
||||||
```http
|
|
||||||
POST /v1/rooms/{room_id}/webhook/test
|
|
||||||
```
|
|
||||||
|
|
||||||
Response:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"success": true,
|
|
||||||
"status_code": 200,
|
|
||||||
"message": "Webhook test successful",
|
|
||||||
"response_preview": "OK"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Or in case of failure:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"success": false,
|
|
||||||
"error": "Webhook request timed out (10 seconds)"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
@@ -27,7 +27,7 @@ AUTH_JWT_AUDIENCE=
|
|||||||
#TRANSCRIPT_MODAL_API_KEY=xxxxx
|
#TRANSCRIPT_MODAL_API_KEY=xxxxx
|
||||||
|
|
||||||
TRANSCRIPT_BACKEND=modal
|
TRANSCRIPT_BACKEND=modal
|
||||||
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-parakeet-web.modal.run
|
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-web.modal.run
|
||||||
TRANSCRIPT_MODAL_API_KEY=
|
TRANSCRIPT_MODAL_API_KEY=
|
||||||
|
|
||||||
## =======================================================
|
## =======================================================
|
||||||
@@ -71,30 +71,3 @@ DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
|
|||||||
|
|
||||||
## Sentry DSN configuration
|
## Sentry DSN configuration
|
||||||
#SENTRY_DSN=
|
#SENTRY_DSN=
|
||||||
|
|
||||||
## =======================================================
|
|
||||||
## Video Platform Configuration
|
|
||||||
## =======================================================
|
|
||||||
|
|
||||||
## Whereby
|
|
||||||
#WHEREBY_API_KEY=your-whereby-api-key
|
|
||||||
#WHEREBY_WEBHOOK_SECRET=your-whereby-webhook-secret
|
|
||||||
#WHEREBY_STORAGE_AWS_ACCESS_KEY_ID=your-aws-key
|
|
||||||
#WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret
|
|
||||||
#AWS_PROCESS_RECORDING_QUEUE_URL=https://sqs.us-west-2.amazonaws.com/...
|
|
||||||
|
|
||||||
## Daily.co
|
|
||||||
#DAILY_API_KEY=your-daily-api-key
|
|
||||||
#DAILY_WEBHOOK_SECRET=your-daily-webhook-secret
|
|
||||||
#DAILY_SUBDOMAIN=your-subdomain
|
|
||||||
#DAILY_WEBHOOK_UUID= # Auto-populated by recreate_daily_webhook.py script
|
|
||||||
#DAILYCO_STORAGE_AWS_ROLE_ARN=... # IAM role ARN for Daily.co S3 access
|
|
||||||
#DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
|
||||||
#DAILYCO_STORAGE_AWS_REGION=us-west-2
|
|
||||||
|
|
||||||
## Whereby (optional separate bucket)
|
|
||||||
#WHEREBY_STORAGE_AWS_BUCKET_NAME=reflector-whereby
|
|
||||||
#WHEREBY_STORAGE_AWS_REGION=us-east-1
|
|
||||||
|
|
||||||
## Platform Configuration
|
|
||||||
#DEFAULT_VIDEO_PLATFORM=whereby # Default platform for new rooms
|
|
||||||
|
|||||||
161
server/gpu/modal_deployments/reflector_transcriber.py
Normal file
161
server/gpu/modal_deployments/reflector_transcriber.py
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
import threading
|
||||||
|
|
||||||
|
import modal
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
MODELS_DIR = "/models"
|
||||||
|
|
||||||
|
MODEL_NAME = "large-v2"
|
||||||
|
MODEL_COMPUTE_TYPE: str = "float16"
|
||||||
|
MODEL_NUM_WORKERS: int = 1
|
||||||
|
|
||||||
|
MINUTES = 60 # seconds
|
||||||
|
|
||||||
|
volume = modal.Volume.from_name("models", create_if_missing=True)
|
||||||
|
|
||||||
|
app = modal.App("reflector-transcriber")
|
||||||
|
|
||||||
|
|
||||||
|
def download_model():
|
||||||
|
from faster_whisper import download_model
|
||||||
|
|
||||||
|
volume.reload()
|
||||||
|
|
||||||
|
download_model(MODEL_NAME, cache_dir=MODELS_DIR)
|
||||||
|
|
||||||
|
volume.commit()
|
||||||
|
|
||||||
|
|
||||||
|
image = (
|
||||||
|
modal.Image.debian_slim(python_version="3.12")
|
||||||
|
.pip_install(
|
||||||
|
"huggingface_hub==0.27.1",
|
||||||
|
"hf-transfer==0.1.9",
|
||||||
|
"torch==2.5.1",
|
||||||
|
"faster-whisper==1.1.1",
|
||||||
|
)
|
||||||
|
.env(
|
||||||
|
{
|
||||||
|
"HF_HUB_ENABLE_HF_TRANSFER": "1",
|
||||||
|
"LD_LIBRARY_PATH": (
|
||||||
|
"/usr/local/lib/python3.12/site-packages/nvidia/cudnn/lib/:"
|
||||||
|
"/opt/conda/lib/python3.12/site-packages/nvidia/cublas/lib/"
|
||||||
|
),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
.run_function(download_model, volumes={MODELS_DIR: volume})
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.cls(
|
||||||
|
gpu="A10G",
|
||||||
|
timeout=5 * MINUTES,
|
||||||
|
scaledown_window=5 * MINUTES,
|
||||||
|
allow_concurrent_inputs=6,
|
||||||
|
image=image,
|
||||||
|
volumes={MODELS_DIR: volume},
|
||||||
|
)
|
||||||
|
class Transcriber:
|
||||||
|
@modal.enter()
|
||||||
|
def enter(self):
|
||||||
|
import faster_whisper
|
||||||
|
import torch
|
||||||
|
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
self.use_gpu = torch.cuda.is_available()
|
||||||
|
self.device = "cuda" if self.use_gpu else "cpu"
|
||||||
|
self.model = faster_whisper.WhisperModel(
|
||||||
|
MODEL_NAME,
|
||||||
|
device=self.device,
|
||||||
|
compute_type=MODEL_COMPUTE_TYPE,
|
||||||
|
num_workers=MODEL_NUM_WORKERS,
|
||||||
|
download_root=MODELS_DIR,
|
||||||
|
local_files_only=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@modal.method()
|
||||||
|
def transcribe_segment(
|
||||||
|
self,
|
||||||
|
audio_data: str,
|
||||||
|
audio_suffix: str,
|
||||||
|
language: str,
|
||||||
|
):
|
||||||
|
with tempfile.NamedTemporaryFile("wb+", suffix=f".{audio_suffix}") as fp:
|
||||||
|
fp.write(audio_data)
|
||||||
|
|
||||||
|
with self.lock:
|
||||||
|
segments, _ = self.model.transcribe(
|
||||||
|
fp.name,
|
||||||
|
language=language,
|
||||||
|
beam_size=5,
|
||||||
|
word_timestamps=True,
|
||||||
|
vad_filter=True,
|
||||||
|
vad_parameters={"min_silence_duration_ms": 500},
|
||||||
|
)
|
||||||
|
|
||||||
|
segments = list(segments)
|
||||||
|
text = "".join(segment.text for segment in segments)
|
||||||
|
words = [
|
||||||
|
{"word": word.word, "start": word.start, "end": word.end}
|
||||||
|
for segment in segments
|
||||||
|
for word in segment.words
|
||||||
|
]
|
||||||
|
|
||||||
|
return {"text": text, "words": words}
|
||||||
|
|
||||||
|
|
||||||
|
@app.function(
|
||||||
|
scaledown_window=60,
|
||||||
|
timeout=60,
|
||||||
|
allow_concurrent_inputs=40,
|
||||||
|
secrets=[
|
||||||
|
modal.Secret.from_name("reflector-gpu"),
|
||||||
|
],
|
||||||
|
volumes={MODELS_DIR: volume},
|
||||||
|
)
|
||||||
|
@modal.asgi_app()
|
||||||
|
def web():
|
||||||
|
from fastapi import Body, Depends, FastAPI, HTTPException, UploadFile, status
|
||||||
|
from fastapi.security import OAuth2PasswordBearer
|
||||||
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
|
transcriber = Transcriber()
|
||||||
|
|
||||||
|
app = FastAPI()
|
||||||
|
|
||||||
|
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||||
|
|
||||||
|
supported_file_types = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||||
|
|
||||||
|
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||||
|
if apikey != os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||||
|
detail="Invalid API key",
|
||||||
|
headers={"WWW-Authenticate": "Bearer"},
|
||||||
|
)
|
||||||
|
|
||||||
|
class TranscriptResponse(BaseModel):
|
||||||
|
result: dict
|
||||||
|
|
||||||
|
@app.post("/v1/audio/transcriptions", dependencies=[Depends(apikey_auth)])
|
||||||
|
def transcribe(
|
||||||
|
file: UploadFile,
|
||||||
|
model: str = "whisper-1",
|
||||||
|
language: Annotated[str, Body(...)] = "en",
|
||||||
|
) -> TranscriptResponse:
|
||||||
|
audio_data = file.file.read()
|
||||||
|
audio_suffix = file.filename.split(".")[-1]
|
||||||
|
assert audio_suffix in supported_file_types
|
||||||
|
|
||||||
|
func = transcriber.transcribe_segment.spawn(
|
||||||
|
audio_data=audio_data,
|
||||||
|
audio_suffix=audio_suffix,
|
||||||
|
language=language,
|
||||||
|
)
|
||||||
|
result = func.get()
|
||||||
|
return result
|
||||||
|
|
||||||
|
return app
|
||||||
@@ -3,7 +3,7 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Generator, Mapping, NamedTuple, NewType, TypedDict
|
from typing import Mapping, NewType
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
import modal
|
import modal
|
||||||
@@ -14,7 +14,10 @@ SAMPLERATE = 16000
|
|||||||
UPLOADS_PATH = "/uploads"
|
UPLOADS_PATH = "/uploads"
|
||||||
CACHE_PATH = "/cache"
|
CACHE_PATH = "/cache"
|
||||||
VAD_CONFIG = {
|
VAD_CONFIG = {
|
||||||
"batch_max_duration": 30.0,
|
"max_segment_duration": 30.0,
|
||||||
|
"batch_max_files": 10,
|
||||||
|
"batch_max_duration": 5.0,
|
||||||
|
"min_segment_duration": 0.02,
|
||||||
"silence_padding": 0.5,
|
"silence_padding": 0.5,
|
||||||
"window_size": 512,
|
"window_size": 512,
|
||||||
}
|
}
|
||||||
@@ -22,37 +25,6 @@ VAD_CONFIG = {
|
|||||||
ParakeetUniqFilename = NewType("ParakeetUniqFilename", str)
|
ParakeetUniqFilename = NewType("ParakeetUniqFilename", str)
|
||||||
AudioFileExtension = NewType("AudioFileExtension", str)
|
AudioFileExtension = NewType("AudioFileExtension", str)
|
||||||
|
|
||||||
|
|
||||||
class TimeSegment(NamedTuple):
|
|
||||||
"""Represents a time segment with start and end times."""
|
|
||||||
|
|
||||||
start: float
|
|
||||||
end: float
|
|
||||||
|
|
||||||
|
|
||||||
class AudioSegment(NamedTuple):
|
|
||||||
"""Represents an audio segment with timing and audio data."""
|
|
||||||
|
|
||||||
start: float
|
|
||||||
end: float
|
|
||||||
audio: any
|
|
||||||
|
|
||||||
|
|
||||||
class TranscriptResult(NamedTuple):
|
|
||||||
"""Represents a transcription result with text and word timings."""
|
|
||||||
|
|
||||||
text: str
|
|
||||||
words: list["WordTiming"]
|
|
||||||
|
|
||||||
|
|
||||||
class WordTiming(TypedDict):
|
|
||||||
"""Represents a word with its timing information."""
|
|
||||||
|
|
||||||
word: str
|
|
||||||
start: float
|
|
||||||
end: float
|
|
||||||
|
|
||||||
|
|
||||||
app = modal.App("reflector-transcriber-parakeet")
|
app = modal.App("reflector-transcriber-parakeet")
|
||||||
|
|
||||||
# Volume for caching model weights
|
# Volume for caching model weights
|
||||||
@@ -77,13 +49,13 @@ image = (
|
|||||||
.pip_install(
|
.pip_install(
|
||||||
"hf_transfer==0.1.9",
|
"hf_transfer==0.1.9",
|
||||||
"huggingface_hub[hf-xet]==0.31.2",
|
"huggingface_hub[hf-xet]==0.31.2",
|
||||||
"nemo_toolkit[asr]==2.5.0",
|
"nemo_toolkit[asr]==2.3.0",
|
||||||
"cuda-python==12.8.0",
|
"cuda-python==12.8.0",
|
||||||
"fastapi==0.115.12",
|
"fastapi==0.115.12",
|
||||||
"numpy<2",
|
"numpy<2",
|
||||||
"librosa==0.11.0",
|
"librosa==0.10.1",
|
||||||
"requests",
|
"requests",
|
||||||
"silero-vad==6.2.0",
|
"silero-vad==5.1.0",
|
||||||
"torch",
|
"torch",
|
||||||
)
|
)
|
||||||
.entrypoint([]) # silence chatty logs by container on start
|
.entrypoint([]) # silence chatty logs by container on start
|
||||||
@@ -198,14 +170,12 @@ class TranscriberParakeetLive:
|
|||||||
(output,) = self.model.transcribe([padded_audio], timestamps=True)
|
(output,) = self.model.transcribe([padded_audio], timestamps=True)
|
||||||
|
|
||||||
text = output.text.strip()
|
text = output.text.strip()
|
||||||
words: list[WordTiming] = [
|
words = [
|
||||||
WordTiming(
|
{
|
||||||
# XXX the space added here is to match the output of whisper
|
"word": word_info["word"] + " ",
|
||||||
# whisper add space to each words, while parakeet don't
|
"start": round(word_info["start"], 2),
|
||||||
word=word_info["word"] + " ",
|
"end": round(word_info["end"], 2),
|
||||||
start=round(word_info["start"], 2),
|
}
|
||||||
end=round(word_info["end"], 2),
|
|
||||||
)
|
|
||||||
for word_info in output.timestamp["word"]
|
for word_info in output.timestamp["word"]
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -241,12 +211,12 @@ class TranscriberParakeetLive:
|
|||||||
for i, (filename, output) in enumerate(zip(filenames, outputs)):
|
for i, (filename, output) in enumerate(zip(filenames, outputs)):
|
||||||
text = output.text.strip()
|
text = output.text.strip()
|
||||||
|
|
||||||
words: list[WordTiming] = [
|
words = [
|
||||||
WordTiming(
|
{
|
||||||
word=word_info["word"] + " ",
|
"word": word_info["word"] + " ",
|
||||||
start=round(word_info["start"], 2),
|
"start": round(word_info["start"], 2),
|
||||||
end=round(word_info["end"], 2),
|
"end": round(word_info["end"], 2),
|
||||||
)
|
}
|
||||||
for word_info in output.timestamp["word"]
|
for word_info in output.timestamp["word"]
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -301,12 +271,9 @@ class TranscriberParakeetFile:
|
|||||||
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
||||||
return audio_array
|
return audio_array
|
||||||
|
|
||||||
def vad_segment_generator(
|
def vad_segment_generator(audio_array):
|
||||||
audio_array,
|
|
||||||
) -> Generator[TimeSegment, None, None]:
|
|
||||||
"""Generate speech segments using VAD with start/end sample indices"""
|
"""Generate speech segments using VAD with start/end sample indices"""
|
||||||
vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE)
|
vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE)
|
||||||
audio_duration = len(audio_array) / float(SAMPLERATE)
|
|
||||||
window_size = VAD_CONFIG["window_size"]
|
window_size = VAD_CONFIG["window_size"]
|
||||||
start = None
|
start = None
|
||||||
|
|
||||||
@@ -330,125 +297,107 @@ class TranscriberParakeetFile:
|
|||||||
start_time = start / float(SAMPLERATE)
|
start_time = start / float(SAMPLERATE)
|
||||||
end_time = end / float(SAMPLERATE)
|
end_time = end / float(SAMPLERATE)
|
||||||
|
|
||||||
yield TimeSegment(start_time, end_time)
|
# Extract the actual audio segment
|
||||||
start = None
|
audio_segment = audio_array[start:end]
|
||||||
|
|
||||||
if start is not None:
|
yield (start_time, end_time, audio_segment)
|
||||||
start_time = start / float(SAMPLERATE)
|
start = None
|
||||||
yield TimeSegment(start_time, audio_duration)
|
|
||||||
|
|
||||||
vad_iterator.reset_states()
|
vad_iterator.reset_states()
|
||||||
|
|
||||||
def batch_speech_segments(
|
def vad_segment_filter(segments):
|
||||||
segments: Generator[TimeSegment, None, None], max_duration: int
|
"""Filter VAD segments by duration and chunk large segments"""
|
||||||
) -> Generator[TimeSegment, None, None]:
|
min_dur = VAD_CONFIG["min_segment_duration"]
|
||||||
"""
|
max_dur = VAD_CONFIG["max_segment_duration"]
|
||||||
Input segments:
|
|
||||||
[0-2] [3-5] [6-8] [10-11] [12-15] [17-19] [20-22]
|
|
||||||
|
|
||||||
↓ (max_duration=10)
|
for start_time, end_time, audio_segment in segments:
|
||||||
|
segment_duration = end_time - start_time
|
||||||
|
|
||||||
Output batches:
|
# Skip very small segments
|
||||||
[0-8] [10-19] [20-22]
|
if segment_duration < min_dur:
|
||||||
|
|
||||||
Note: silences are kept for better transcription, previous implementation was
|
|
||||||
passing segments separatly, but the output was less accurate.
|
|
||||||
"""
|
|
||||||
batch_start_time = None
|
|
||||||
batch_end_time = None
|
|
||||||
|
|
||||||
for segment in segments:
|
|
||||||
start_time, end_time = segment.start, segment.end
|
|
||||||
if batch_start_time is None or batch_end_time is None:
|
|
||||||
batch_start_time = start_time
|
|
||||||
batch_end_time = end_time
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
total_duration = end_time - batch_start_time
|
# If segment is within max duration, yield as-is
|
||||||
|
if segment_duration <= max_dur:
|
||||||
if total_duration <= max_duration:
|
yield (start_time, end_time, audio_segment)
|
||||||
batch_end_time = end_time
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
yield TimeSegment(batch_start_time, batch_end_time)
|
# Chunk large segments into smaller pieces
|
||||||
batch_start_time = start_time
|
chunk_samples = int(max_dur * SAMPLERATE)
|
||||||
batch_end_time = end_time
|
current_start = start_time
|
||||||
|
|
||||||
if batch_start_time is None or batch_end_time is None:
|
for chunk_offset in range(0, len(audio_segment), chunk_samples):
|
||||||
return
|
chunk_audio = audio_segment[
|
||||||
|
chunk_offset : chunk_offset + chunk_samples
|
||||||
|
]
|
||||||
|
if len(chunk_audio) == 0:
|
||||||
|
break
|
||||||
|
|
||||||
yield TimeSegment(batch_start_time, batch_end_time)
|
chunk_duration = len(chunk_audio) / float(SAMPLERATE)
|
||||||
|
chunk_end = current_start + chunk_duration
|
||||||
|
|
||||||
def batch_segment_to_audio_segment(
|
# Only yield chunks that meet minimum duration
|
||||||
segments: Generator[TimeSegment, None, None],
|
if chunk_duration >= min_dur:
|
||||||
audio_array,
|
yield (current_start, chunk_end, chunk_audio)
|
||||||
) -> Generator[AudioSegment, None, None]:
|
|
||||||
"""Extract audio segments and apply padding for Parakeet compatibility.
|
|
||||||
|
|
||||||
Uses pad_audio to ensure segments are at least 0.5s long, preventing
|
current_start = chunk_end
|
||||||
Parakeet crashes. This padding may cause slight timing overlaps between
|
|
||||||
segments, which are corrected by enforce_word_timing_constraints.
|
|
||||||
"""
|
|
||||||
for segment in segments:
|
|
||||||
start_time, end_time = segment.start, segment.end
|
|
||||||
start_sample = int(start_time * SAMPLERATE)
|
|
||||||
end_sample = int(end_time * SAMPLERATE)
|
|
||||||
audio_segment = audio_array[start_sample:end_sample]
|
|
||||||
|
|
||||||
padded_segment = pad_audio(audio_segment, SAMPLERATE)
|
def batch_segments(segments, max_files=10, max_duration=5.0):
|
||||||
|
batch = []
|
||||||
|
batch_duration = 0.0
|
||||||
|
|
||||||
yield AudioSegment(start_time, end_time, padded_segment)
|
for start_time, end_time, audio_segment in segments:
|
||||||
|
segment_duration = end_time - start_time
|
||||||
|
|
||||||
def transcribe_batch(model, audio_segments: list) -> list:
|
if segment_duration < VAD_CONFIG["silence_padding"]:
|
||||||
|
silence_samples = int(
|
||||||
|
(VAD_CONFIG["silence_padding"] - segment_duration) * SAMPLERATE
|
||||||
|
)
|
||||||
|
padding = np.zeros(silence_samples, dtype=np.float32)
|
||||||
|
audio_segment = np.concatenate([audio_segment, padding])
|
||||||
|
segment_duration = VAD_CONFIG["silence_padding"]
|
||||||
|
|
||||||
|
batch.append((start_time, end_time, audio_segment))
|
||||||
|
batch_duration += segment_duration
|
||||||
|
|
||||||
|
if len(batch) >= max_files or batch_duration >= max_duration:
|
||||||
|
yield batch
|
||||||
|
batch = []
|
||||||
|
batch_duration = 0.0
|
||||||
|
|
||||||
|
if batch:
|
||||||
|
yield batch
|
||||||
|
|
||||||
|
def transcribe_batch(model, audio_segments):
|
||||||
with NoStdStreams():
|
with NoStdStreams():
|
||||||
outputs = model.transcribe(audio_segments, timestamps=True)
|
outputs = model.transcribe(audio_segments, timestamps=True)
|
||||||
return outputs
|
return outputs
|
||||||
|
|
||||||
def enforce_word_timing_constraints(
|
|
||||||
words: list[WordTiming],
|
|
||||||
) -> list[WordTiming]:
|
|
||||||
"""Enforce that word end times don't exceed the start time of the next word.
|
|
||||||
|
|
||||||
Due to silence padding added in batch_segment_to_audio_segment for better
|
|
||||||
transcription accuracy, word timings from different segments may overlap.
|
|
||||||
This function ensures there are no overlaps by adjusting end times.
|
|
||||||
"""
|
|
||||||
if len(words) <= 1:
|
|
||||||
return words
|
|
||||||
|
|
||||||
enforced_words = []
|
|
||||||
for i, word in enumerate(words):
|
|
||||||
enforced_word = word.copy()
|
|
||||||
|
|
||||||
if i < len(words) - 1:
|
|
||||||
next_start = words[i + 1]["start"]
|
|
||||||
if enforced_word["end"] > next_start:
|
|
||||||
enforced_word["end"] = next_start
|
|
||||||
|
|
||||||
enforced_words.append(enforced_word)
|
|
||||||
|
|
||||||
return enforced_words
|
|
||||||
|
|
||||||
def emit_results(
|
def emit_results(
|
||||||
results: list,
|
results,
|
||||||
segments_info: list[AudioSegment],
|
segments_info,
|
||||||
) -> Generator[TranscriptResult, None, None]:
|
batch_index,
|
||||||
|
total_batches,
|
||||||
|
):
|
||||||
"""Yield transcribed text and word timings from model output, adjusting timestamps to absolute positions."""
|
"""Yield transcribed text and word timings from model output, adjusting timestamps to absolute positions."""
|
||||||
for i, (output, segment) in enumerate(zip(results, segments_info)):
|
for i, (output, (start_time, end_time, _)) in enumerate(
|
||||||
start_time, end_time = segment.start, segment.end
|
zip(results, segments_info)
|
||||||
|
):
|
||||||
text = output.text.strip()
|
text = output.text.strip()
|
||||||
words: list[WordTiming] = [
|
words = [
|
||||||
WordTiming(
|
{
|
||||||
word=word_info["word"] + " ",
|
"word": word_info["word"] + " ",
|
||||||
start=round(
|
"start": round(
|
||||||
word_info["start"] + start_time + timestamp_offset, 2
|
word_info["start"] + start_time + timestamp_offset, 2
|
||||||
),
|
),
|
||||||
end=round(word_info["end"] + start_time + timestamp_offset, 2),
|
"end": round(
|
||||||
)
|
word_info["end"] + start_time + timestamp_offset, 2
|
||||||
|
),
|
||||||
|
}
|
||||||
for word_info in output.timestamp["word"]
|
for word_info in output.timestamp["word"]
|
||||||
]
|
]
|
||||||
|
|
||||||
yield TranscriptResult(text, words)
|
yield text, words
|
||||||
|
|
||||||
upload_volume.reload()
|
upload_volume.reload()
|
||||||
|
|
||||||
@@ -458,31 +407,41 @@ class TranscriberParakeetFile:
|
|||||||
|
|
||||||
audio_array = load_and_convert_audio(file_path)
|
audio_array = load_and_convert_audio(file_path)
|
||||||
total_duration = len(audio_array) / float(SAMPLERATE)
|
total_duration = len(audio_array) / float(SAMPLERATE)
|
||||||
|
processed_duration = 0.0
|
||||||
|
|
||||||
all_text_parts: list[str] = []
|
all_text_parts = []
|
||||||
all_words: list[WordTiming] = []
|
all_words = []
|
||||||
|
|
||||||
raw_segments = vad_segment_generator(audio_array)
|
raw_segments = vad_segment_generator(audio_array)
|
||||||
speech_segments = batch_speech_segments(
|
filtered_segments = vad_segment_filter(raw_segments)
|
||||||
raw_segments,
|
batches = batch_segments(
|
||||||
|
filtered_segments,
|
||||||
|
VAD_CONFIG["batch_max_files"],
|
||||||
VAD_CONFIG["batch_max_duration"],
|
VAD_CONFIG["batch_max_duration"],
|
||||||
)
|
)
|
||||||
audio_segments = batch_segment_to_audio_segment(speech_segments, audio_array)
|
|
||||||
|
|
||||||
for batch in audio_segments:
|
batch_index = 0
|
||||||
audio_segment = batch.audio
|
total_batches = max(
|
||||||
results = transcribe_batch(self.model, [audio_segment])
|
1, int(total_duration / VAD_CONFIG["batch_max_duration"]) + 1
|
||||||
|
)
|
||||||
|
|
||||||
for result in emit_results(
|
for batch in batches:
|
||||||
|
batch_index += 1
|
||||||
|
audio_segments = [seg[2] for seg in batch]
|
||||||
|
results = transcribe_batch(self.model, audio_segments)
|
||||||
|
|
||||||
|
for text, words in emit_results(
|
||||||
results,
|
results,
|
||||||
[batch],
|
batch,
|
||||||
|
batch_index,
|
||||||
|
total_batches,
|
||||||
):
|
):
|
||||||
if not result.text:
|
if not text:
|
||||||
continue
|
continue
|
||||||
all_text_parts.append(result.text)
|
all_text_parts.append(text)
|
||||||
all_words.extend(result.words)
|
all_words.extend(words)
|
||||||
|
|
||||||
all_words = enforce_word_timing_constraints(all_words)
|
processed_duration += sum(len(seg[2]) / float(SAMPLERATE) for seg in batch)
|
||||||
|
|
||||||
combined_text = " ".join(all_text_parts)
|
combined_text = " ".join(all_text_parts)
|
||||||
return {"text": combined_text, "words": all_words}
|
return {"text": combined_text, "words": all_words}
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
"""Add webhook fields to rooms
|
|
||||||
|
|
||||||
Revision ID: 0194f65cd6d3
|
|
||||||
Revises: 5a8907fd1d78
|
|
||||||
Create Date: 2025-08-27 09:03:19.610995
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "0194f65cd6d3"
|
|
||||||
down_revision: Union[str, None] = "5a8907fd1d78"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
|
||||||
batch_op.add_column(sa.Column("webhook_url", sa.String(), nullable=True))
|
|
||||||
batch_op.add_column(sa.Column("webhook_secret", sa.String(), nullable=True))
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
|
||||||
batch_op.drop_column("webhook_secret")
|
|
||||||
batch_op.drop_column("webhook_url")
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
"""remove user_id from meeting table
|
|
||||||
|
|
||||||
Revision ID: 0ce521cda2ee
|
|
||||||
Revises: 6dec9fb5b46c
|
|
||||||
Create Date: 2025-09-10 12:40:55.688899
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "0ce521cda2ee"
|
|
||||||
down_revision: Union[str, None] = "6dec9fb5b46c"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.drop_column("user_id")
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.add_column(
|
|
||||||
sa.Column("user_id", sa.VARCHAR(), autoincrement=False, nullable=True)
|
|
||||||
)
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
"""add_platform_support
|
|
||||||
|
|
||||||
Revision ID: 1e49625677e4
|
|
||||||
Revises: 9e3f7b2a4c8e
|
|
||||||
Create Date: 2025-10-08 13:17:29.943612
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "1e49625677e4"
|
|
||||||
down_revision: Union[str, None] = "9e3f7b2a4c8e"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
"""Add platform field with default 'whereby' for backward compatibility."""
|
|
||||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
|
||||||
batch_op.add_column(
|
|
||||||
sa.Column(
|
|
||||||
"platform",
|
|
||||||
sa.String(),
|
|
||||||
nullable=True,
|
|
||||||
server_default=None,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.add_column(
|
|
||||||
sa.Column(
|
|
||||||
"platform",
|
|
||||||
sa.String(),
|
|
||||||
nullable=False,
|
|
||||||
server_default="whereby",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
"""Remove platform field."""
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.drop_column("platform")
|
|
||||||
|
|
||||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
|
||||||
batch_op.drop_column("platform")
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
"""clean up orphaned room_id references in meeting table
|
|
||||||
|
|
||||||
Revision ID: 2ae3db106d4e
|
|
||||||
Revises: def1b5867d4c
|
|
||||||
Create Date: 2025-09-11 10:35:15.759967
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "2ae3db106d4e"
|
|
||||||
down_revision: Union[str, None] = "def1b5867d4c"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# Set room_id to NULL for meetings that reference non-existent rooms
|
|
||||||
op.execute("""
|
|
||||||
UPDATE meeting
|
|
||||||
SET room_id = NULL
|
|
||||||
WHERE room_id IS NOT NULL
|
|
||||||
AND room_id NOT IN (SELECT id FROM room WHERE id IS NOT NULL)
|
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# Cannot restore orphaned references - no operation needed
|
|
||||||
pass
|
|
||||||
@@ -1,79 +0,0 @@
|
|||||||
"""add daily participant session table with immutable left_at
|
|
||||||
|
|
||||||
Revision ID: 2b92a1b03caa
|
|
||||||
Revises: f8294b31f022
|
|
||||||
Create Date: 2025-11-13 20:29:30.486577
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "2b92a1b03caa"
|
|
||||||
down_revision: Union[str, None] = "f8294b31f022"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# Create table
|
|
||||||
op.create_table(
|
|
||||||
"daily_participant_session",
|
|
||||||
sa.Column("id", sa.String(), nullable=False),
|
|
||||||
sa.Column("meeting_id", sa.String(), nullable=False),
|
|
||||||
sa.Column("room_id", sa.String(), nullable=False),
|
|
||||||
sa.Column("session_id", sa.String(), nullable=False),
|
|
||||||
sa.Column("user_id", sa.String(), nullable=True),
|
|
||||||
sa.Column("user_name", sa.String(), nullable=False),
|
|
||||||
sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("left_at", sa.DateTime(timezone=True), nullable=True),
|
|
||||||
sa.ForeignKeyConstraint(["meeting_id"], ["meeting.id"], ondelete="CASCADE"),
|
|
||||||
sa.ForeignKeyConstraint(["room_id"], ["room.id"], ondelete="CASCADE"),
|
|
||||||
sa.PrimaryKeyConstraint("id"),
|
|
||||||
)
|
|
||||||
with op.batch_alter_table("daily_participant_session", schema=None) as batch_op:
|
|
||||||
batch_op.create_index(
|
|
||||||
"idx_daily_session_meeting_left", ["meeting_id", "left_at"], unique=False
|
|
||||||
)
|
|
||||||
batch_op.create_index("idx_daily_session_room", ["room_id"], unique=False)
|
|
||||||
|
|
||||||
# Create trigger function to prevent left_at from being updated once set
|
|
||||||
op.execute("""
|
|
||||||
CREATE OR REPLACE FUNCTION prevent_left_at_update()
|
|
||||||
RETURNS TRIGGER AS $$
|
|
||||||
BEGIN
|
|
||||||
IF OLD.left_at IS NOT NULL THEN
|
|
||||||
RAISE EXCEPTION 'left_at is immutable once set';
|
|
||||||
END IF;
|
|
||||||
RETURN NEW;
|
|
||||||
END;
|
|
||||||
$$ LANGUAGE plpgsql;
|
|
||||||
""")
|
|
||||||
|
|
||||||
# Create trigger
|
|
||||||
op.execute("""
|
|
||||||
CREATE TRIGGER prevent_left_at_update_trigger
|
|
||||||
BEFORE UPDATE ON daily_participant_session
|
|
||||||
FOR EACH ROW
|
|
||||||
EXECUTE FUNCTION prevent_left_at_update();
|
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# Drop trigger
|
|
||||||
op.execute(
|
|
||||||
"DROP TRIGGER IF EXISTS prevent_left_at_update_trigger ON daily_participant_session;"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Drop trigger function
|
|
||||||
op.execute("DROP FUNCTION IF EXISTS prevent_left_at_update();")
|
|
||||||
|
|
||||||
# Drop indexes and table
|
|
||||||
with op.batch_alter_table("daily_participant_session", schema=None) as batch_op:
|
|
||||||
batch_op.drop_index("idx_daily_session_room")
|
|
||||||
batch_op.drop_index("idx_daily_session_meeting_left")
|
|
||||||
|
|
||||||
op.drop_table("daily_participant_session")
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
"""add cascade delete to meeting consent foreign key
|
|
||||||
|
|
||||||
Revision ID: 5a8907fd1d78
|
|
||||||
Revises: 0ab2d7ffaa16
|
|
||||||
Create Date: 2025-08-26 17:26:50.945491
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "5a8907fd1d78"
|
|
||||||
down_revision: Union[str, None] = "0ab2d7ffaa16"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("meeting_consent", schema=None) as batch_op:
|
|
||||||
batch_op.drop_constraint(
|
|
||||||
batch_op.f("meeting_consent_meeting_id_fkey"), type_="foreignkey"
|
|
||||||
)
|
|
||||||
batch_op.create_foreign_key(
|
|
||||||
batch_op.f("meeting_consent_meeting_id_fkey"),
|
|
||||||
"meeting",
|
|
||||||
["meeting_id"],
|
|
||||||
["id"],
|
|
||||||
ondelete="CASCADE",
|
|
||||||
)
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("meeting_consent", schema=None) as batch_op:
|
|
||||||
batch_op.drop_constraint(
|
|
||||||
batch_op.f("meeting_consent_meeting_id_fkey"), type_="foreignkey"
|
|
||||||
)
|
|
||||||
batch_op.create_foreign_key(
|
|
||||||
batch_op.f("meeting_consent_meeting_id_fkey"),
|
|
||||||
"meeting",
|
|
||||||
["meeting_id"],
|
|
||||||
["id"],
|
|
||||||
)
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
"""Make room platform non-nullable with dynamic default
|
|
||||||
|
|
||||||
Revision ID: 5d6b9df9b045
|
|
||||||
Revises: 2b92a1b03caa
|
|
||||||
Create Date: 2025-11-21 13:22:25.756584
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "5d6b9df9b045"
|
|
||||||
down_revision: Union[str, None] = "2b92a1b03caa"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
op.execute("UPDATE room SET platform = 'whereby' WHERE platform IS NULL")
|
|
||||||
|
|
||||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
|
||||||
batch_op.alter_column("platform", existing_type=sa.String(), nullable=False)
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
|
||||||
batch_op.alter_column("platform", existing_type=sa.String(), nullable=True)
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
"""remove_one_active_meeting_per_room_constraint
|
|
||||||
|
|
||||||
Revision ID: 6025e9b2bef2
|
|
||||||
Revises: 2ae3db106d4e
|
|
||||||
Create Date: 2025-08-18 18:45:44.418392
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "6025e9b2bef2"
|
|
||||||
down_revision: Union[str, None] = "2ae3db106d4e"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# Remove the unique constraint that prevents multiple active meetings per room
|
|
||||||
# This is needed to support calendar integration with overlapping meetings
|
|
||||||
# Check if index exists before trying to drop it
|
|
||||||
from alembic import context
|
|
||||||
|
|
||||||
if context.get_context().dialect.name == "postgresql":
|
|
||||||
conn = op.get_bind()
|
|
||||||
result = conn.execute(
|
|
||||||
sa.text(
|
|
||||||
"SELECT 1 FROM pg_indexes WHERE indexname = 'idx_one_active_meeting_per_room'"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if result.fetchone():
|
|
||||||
op.drop_index("idx_one_active_meeting_per_room", table_name="meeting")
|
|
||||||
else:
|
|
||||||
# For SQLite, just try to drop it
|
|
||||||
try:
|
|
||||||
op.drop_index("idx_one_active_meeting_per_room", table_name="meeting")
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# Restore the unique constraint
|
|
||||||
op.create_index(
|
|
||||||
"idx_one_active_meeting_per_room",
|
|
||||||
"meeting",
|
|
||||||
["room_id"],
|
|
||||||
unique=True,
|
|
||||||
postgresql_where=sa.text("is_active = true"),
|
|
||||||
sqlite_where=sa.text("is_active = 1"),
|
|
||||||
)
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
"""webhook url and secret null by default
|
|
||||||
|
|
||||||
|
|
||||||
Revision ID: 61882a919591
|
|
||||||
Revises: 0194f65cd6d3
|
|
||||||
Create Date: 2025-08-29 11:46:36.738091
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "61882a919591"
|
|
||||||
down_revision: Union[str, None] = "0194f65cd6d3"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
pass
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
pass
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
"""make meeting room_id required and add foreign key
|
|
||||||
|
|
||||||
Revision ID: 6dec9fb5b46c
|
|
||||||
Revises: 61882a919591
|
|
||||||
Create Date: 2025-09-10 10:47:06.006819
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "6dec9fb5b46c"
|
|
||||||
down_revision: Union[str, None] = "61882a919591"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.create_foreign_key(
|
|
||||||
None, "room", ["room_id"], ["id"], ondelete="CASCADE"
|
|
||||||
)
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.drop_constraint("meeting_room_id_fkey", type_="foreignkey")
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
"""add user api keys
|
|
||||||
|
|
||||||
Revision ID: 9e3f7b2a4c8e
|
|
||||||
Revises: dc035ff72fd5
|
|
||||||
Create Date: 2025-10-17 00:00:00.000000
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "9e3f7b2a4c8e"
|
|
||||||
down_revision: Union[str, None] = "dc035ff72fd5"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
op.create_table(
|
|
||||||
"user_api_key",
|
|
||||||
sa.Column("id", sa.String(), nullable=False),
|
|
||||||
sa.Column("user_id", sa.String(), nullable=False),
|
|
||||||
sa.Column("key_hash", sa.String(), nullable=False),
|
|
||||||
sa.Column("name", sa.String(), nullable=True),
|
|
||||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.PrimaryKeyConstraint("id"),
|
|
||||||
)
|
|
||||||
|
|
||||||
with op.batch_alter_table("user_api_key", schema=None) as batch_op:
|
|
||||||
batch_op.create_index("idx_user_api_key_hash", ["key_hash"], unique=True)
|
|
||||||
batch_op.create_index("idx_user_api_key_user_id", ["user_id"], unique=False)
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
op.drop_table("user_api_key")
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
"""add user table
|
|
||||||
|
|
||||||
Revision ID: bbafedfa510c
|
|
||||||
Revises: 5d6b9df9b045
|
|
||||||
Create Date: 2025-11-19 21:06:30.543262
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "bbafedfa510c"
|
|
||||||
down_revision: Union[str, None] = "5d6b9df9b045"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
op.create_table(
|
|
||||||
"user",
|
|
||||||
sa.Column("id", sa.String(), nullable=False),
|
|
||||||
sa.Column("email", sa.String(), nullable=False),
|
|
||||||
sa.Column("authentik_uid", sa.String(), nullable=False),
|
|
||||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.PrimaryKeyConstraint("id"),
|
|
||||||
)
|
|
||||||
|
|
||||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
|
||||||
batch_op.create_index("idx_user_authentik_uid", ["authentik_uid"], unique=True)
|
|
||||||
batch_op.create_index("idx_user_email", ["email"], unique=False)
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
op.drop_table("user")
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
"""add_grace_period_fields_to_meeting
|
|
||||||
|
|
||||||
Revision ID: d4a1c446458c
|
|
||||||
Revises: 6025e9b2bef2
|
|
||||||
Create Date: 2025-08-18 18:50:37.768052
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "d4a1c446458c"
|
|
||||||
down_revision: Union[str, None] = "6025e9b2bef2"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# Add fields to track when participants left for grace period logic
|
|
||||||
op.add_column(
|
|
||||||
"meeting", sa.Column("last_participant_left_at", sa.DateTime(timezone=True))
|
|
||||||
)
|
|
||||||
op.add_column(
|
|
||||||
"meeting",
|
|
||||||
sa.Column("grace_period_minutes", sa.Integer, server_default=sa.text("15")),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
op.drop_column("meeting", "grace_period_minutes")
|
|
||||||
op.drop_column("meeting", "last_participant_left_at")
|
|
||||||
@@ -1,129 +0,0 @@
|
|||||||
"""add calendar
|
|
||||||
|
|
||||||
Revision ID: d8e204bbf615
|
|
||||||
Revises: d4a1c446458c
|
|
||||||
Create Date: 2025-09-10 19:56:22.295756
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
from sqlalchemy.dialects import postgresql
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "d8e204bbf615"
|
|
||||||
down_revision: Union[str, None] = "d4a1c446458c"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
op.create_table(
|
|
||||||
"calendar_event",
|
|
||||||
sa.Column("id", sa.String(), nullable=False),
|
|
||||||
sa.Column("room_id", sa.String(), nullable=False),
|
|
||||||
sa.Column("ics_uid", sa.Text(), nullable=False),
|
|
||||||
sa.Column("title", sa.Text(), nullable=True),
|
|
||||||
sa.Column("description", sa.Text(), nullable=True),
|
|
||||||
sa.Column("start_time", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("end_time", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("attendees", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
|
|
||||||
sa.Column("location", sa.Text(), nullable=True),
|
|
||||||
sa.Column("ics_raw_data", sa.Text(), nullable=True),
|
|
||||||
sa.Column("last_synced", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column(
|
|
||||||
"is_deleted", sa.Boolean(), server_default=sa.text("false"), nullable=False
|
|
||||||
),
|
|
||||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.ForeignKeyConstraint(
|
|
||||||
["room_id"],
|
|
||||||
["room.id"],
|
|
||||||
name="fk_calendar_event_room_id",
|
|
||||||
ondelete="CASCADE",
|
|
||||||
),
|
|
||||||
sa.PrimaryKeyConstraint("id"),
|
|
||||||
sa.UniqueConstraint("room_id", "ics_uid", name="uq_room_calendar_event"),
|
|
||||||
)
|
|
||||||
with op.batch_alter_table("calendar_event", schema=None) as batch_op:
|
|
||||||
batch_op.create_index(
|
|
||||||
"idx_calendar_event_deleted",
|
|
||||||
["is_deleted"],
|
|
||||||
unique=False,
|
|
||||||
postgresql_where=sa.text("NOT is_deleted"),
|
|
||||||
)
|
|
||||||
batch_op.create_index(
|
|
||||||
"idx_calendar_event_room_start", ["room_id", "start_time"], unique=False
|
|
||||||
)
|
|
||||||
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.add_column(sa.Column("calendar_event_id", sa.String(), nullable=True))
|
|
||||||
batch_op.add_column(
|
|
||||||
sa.Column(
|
|
||||||
"calendar_metadata",
|
|
||||||
postgresql.JSONB(astext_type=sa.Text()),
|
|
||||||
nullable=True,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
batch_op.create_index(
|
|
||||||
"idx_meeting_calendar_event", ["calendar_event_id"], unique=False
|
|
||||||
)
|
|
||||||
batch_op.create_foreign_key(
|
|
||||||
"fk_meeting_calendar_event_id",
|
|
||||||
"calendar_event",
|
|
||||||
["calendar_event_id"],
|
|
||||||
["id"],
|
|
||||||
ondelete="SET NULL",
|
|
||||||
)
|
|
||||||
|
|
||||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
|
||||||
batch_op.add_column(sa.Column("ics_url", sa.Text(), nullable=True))
|
|
||||||
batch_op.add_column(
|
|
||||||
sa.Column(
|
|
||||||
"ics_fetch_interval", sa.Integer(), server_default="300", nullable=True
|
|
||||||
)
|
|
||||||
)
|
|
||||||
batch_op.add_column(
|
|
||||||
sa.Column(
|
|
||||||
"ics_enabled",
|
|
||||||
sa.Boolean(),
|
|
||||||
server_default=sa.text("false"),
|
|
||||||
nullable=False,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
batch_op.add_column(
|
|
||||||
sa.Column("ics_last_sync", sa.DateTime(timezone=True), nullable=True)
|
|
||||||
)
|
|
||||||
batch_op.add_column(sa.Column("ics_last_etag", sa.Text(), nullable=True))
|
|
||||||
batch_op.create_index("idx_room_ics_enabled", ["ics_enabled"], unique=False)
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
|
||||||
batch_op.drop_index("idx_room_ics_enabled")
|
|
||||||
batch_op.drop_column("ics_last_etag")
|
|
||||||
batch_op.drop_column("ics_last_sync")
|
|
||||||
batch_op.drop_column("ics_enabled")
|
|
||||||
batch_op.drop_column("ics_fetch_interval")
|
|
||||||
batch_op.drop_column("ics_url")
|
|
||||||
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.drop_constraint("fk_meeting_calendar_event_id", type_="foreignkey")
|
|
||||||
batch_op.drop_index("idx_meeting_calendar_event")
|
|
||||||
batch_op.drop_column("calendar_metadata")
|
|
||||||
batch_op.drop_column("calendar_event_id")
|
|
||||||
|
|
||||||
with op.batch_alter_table("calendar_event", schema=None) as batch_op:
|
|
||||||
batch_op.drop_index("idx_calendar_event_room_start")
|
|
||||||
batch_op.drop_index(
|
|
||||||
"idx_calendar_event_deleted", postgresql_where=sa.text("NOT is_deleted")
|
|
||||||
)
|
|
||||||
|
|
||||||
op.drop_table("calendar_event")
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
"""remove_grace_period_fields
|
|
||||||
|
|
||||||
Revision ID: dc035ff72fd5
|
|
||||||
Revises: d8e204bbf615
|
|
||||||
Create Date: 2025-09-11 10:36:45.197588
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "dc035ff72fd5"
|
|
||||||
down_revision: Union[str, None] = "d8e204bbf615"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# Remove grace period columns from meeting table
|
|
||||||
op.drop_column("meeting", "last_participant_left_at")
|
|
||||||
op.drop_column("meeting", "grace_period_minutes")
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# Add back grace period columns to meeting table
|
|
||||||
op.add_column(
|
|
||||||
"meeting",
|
|
||||||
sa.Column(
|
|
||||||
"last_participant_left_at", sa.DateTime(timezone=True), nullable=True
|
|
||||||
),
|
|
||||||
)
|
|
||||||
op.add_column(
|
|
||||||
"meeting",
|
|
||||||
sa.Column(
|
|
||||||
"grace_period_minutes",
|
|
||||||
sa.Integer(),
|
|
||||||
server_default=sa.text("15"),
|
|
||||||
nullable=True,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
"""make meeting room_id nullable but keep foreign key
|
|
||||||
|
|
||||||
Revision ID: def1b5867d4c
|
|
||||||
Revises: 0ce521cda2ee
|
|
||||||
Create Date: 2025-09-11 09:42:18.697264
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "def1b5867d4c"
|
|
||||||
down_revision: Union[str, None] = "0ce521cda2ee"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=True)
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
# ### commands auto generated by Alembic - please adjust! ###
|
|
||||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
|
||||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=False)
|
|
||||||
|
|
||||||
# ### end Alembic commands ###
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
"""add_track_keys
|
|
||||||
|
|
||||||
Revision ID: f8294b31f022
|
|
||||||
Revises: 1e49625677e4
|
|
||||||
Create Date: 2025-10-27 18:52:17.589167
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Sequence, Union
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from alembic import op
|
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
|
||||||
revision: str = "f8294b31f022"
|
|
||||||
down_revision: Union[str, None] = "1e49625677e4"
|
|
||||||
branch_labels: Union[str, Sequence[str], None] = None
|
|
||||||
depends_on: Union[str, Sequence[str], None] = None
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
|
||||||
with op.batch_alter_table("recording", schema=None) as batch_op:
|
|
||||||
batch_op.add_column(sa.Column("track_keys", sa.JSON(), nullable=True))
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
|
||||||
with op.batch_alter_table("recording", schema=None) as batch_op:
|
|
||||||
batch_op.drop_column("track_keys")
|
|
||||||
@@ -12,6 +12,7 @@ dependencies = [
|
|||||||
"requests>=2.31.0",
|
"requests>=2.31.0",
|
||||||
"aiortc>=1.5.0",
|
"aiortc>=1.5.0",
|
||||||
"sortedcontainers>=2.4.0",
|
"sortedcontainers>=2.4.0",
|
||||||
|
"loguru>=0.7.0",
|
||||||
"pydantic-settings>=2.0.2",
|
"pydantic-settings>=2.0.2",
|
||||||
"structlog>=23.1.0",
|
"structlog>=23.1.0",
|
||||||
"uvicorn[standard]>=0.23.1",
|
"uvicorn[standard]>=0.23.1",
|
||||||
@@ -26,6 +27,7 @@ dependencies = [
|
|||||||
"prometheus-fastapi-instrumentator>=6.1.0",
|
"prometheus-fastapi-instrumentator>=6.1.0",
|
||||||
"sentencepiece>=0.1.99",
|
"sentencepiece>=0.1.99",
|
||||||
"protobuf>=4.24.3",
|
"protobuf>=4.24.3",
|
||||||
|
"profanityfilter>=2.0.6",
|
||||||
"celery>=5.3.4",
|
"celery>=5.3.4",
|
||||||
"redis>=5.0.1",
|
"redis>=5.0.1",
|
||||||
"python-jose[cryptography]>=3.3.0",
|
"python-jose[cryptography]>=3.3.0",
|
||||||
@@ -38,7 +40,6 @@ dependencies = [
|
|||||||
"llama-index-llms-openai-like>=0.4.0",
|
"llama-index-llms-openai-like>=0.4.0",
|
||||||
"pytest-env>=1.1.5",
|
"pytest-env>=1.1.5",
|
||||||
"webvtt-py>=0.5.0",
|
"webvtt-py>=0.5.0",
|
||||||
"icalendar>=6.0.0",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[dependency-groups]
|
[dependency-groups]
|
||||||
@@ -112,14 +113,13 @@ source = ["reflector"]
|
|||||||
[tool.pytest_env]
|
[tool.pytest_env]
|
||||||
ENVIRONMENT = "pytest"
|
ENVIRONMENT = "pytest"
|
||||||
DATABASE_URL = "postgresql://test_user:test_password@localhost:15432/reflector_test"
|
DATABASE_URL = "postgresql://test_user:test_password@localhost:15432/reflector_test"
|
||||||
AUTH_BACKEND = "jwt"
|
|
||||||
|
|
||||||
[tool.pytest.ini_options]
|
[tool.pytest.ini_options]
|
||||||
addopts = "-ra -q --disable-pytest-warnings --cov --cov-report html -v"
|
addopts = "-ra -q --disable-pytest-warnings --cov --cov-report html -v"
|
||||||
testpaths = ["tests"]
|
testpaths = ["tests"]
|
||||||
asyncio_mode = "auto"
|
asyncio_mode = "auto"
|
||||||
markers = [
|
markers = [
|
||||||
"model_api: tests for the unified model-serving HTTP API (backend- and hardware-agnostic)",
|
"gpu_modal: mark test to run only with GPU Modal endpoints (deselect with '-m \"not gpu_modal\"')",
|
||||||
]
|
]
|
||||||
|
|
||||||
[tool.ruff.lint]
|
[tool.ruff.lint]
|
||||||
@@ -131,7 +131,7 @@ select = [
|
|||||||
|
|
||||||
[tool.ruff.lint.per-file-ignores]
|
[tool.ruff.lint.per-file-ignores]
|
||||||
"reflector/processors/summary/summary_builder.py" = ["E501"]
|
"reflector/processors/summary/summary_builder.py" = ["E501"]
|
||||||
"gpu/modal_deployments/**.py" = ["PLC0415"]
|
"gpu/**.py" = ["PLC0415"]
|
||||||
"reflector/tools/**.py" = ["PLC0415"]
|
"reflector/tools/**.py" = ["PLC0415"]
|
||||||
"migrations/versions/**.py" = ["PLC0415"]
|
"migrations/versions/**.py" = ["PLC0415"]
|
||||||
"tests/**.py" = ["PLC0415"]
|
"tests/**.py" = ["PLC0415"]
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ from reflector.events import subscribers_shutdown, subscribers_startup
|
|||||||
from reflector.logger import logger
|
from reflector.logger import logger
|
||||||
from reflector.metrics import metrics_init
|
from reflector.metrics import metrics_init
|
||||||
from reflector.settings import settings
|
from reflector.settings import settings
|
||||||
from reflector.views.daily import router as daily_router
|
|
||||||
from reflector.views.meetings import router as meetings_router
|
from reflector.views.meetings import router as meetings_router
|
||||||
from reflector.views.rooms import router as rooms_router
|
from reflector.views.rooms import router as rooms_router
|
||||||
from reflector.views.rtc_offer import router as rtc_offer_router
|
from reflector.views.rtc_offer import router as rtc_offer_router
|
||||||
@@ -27,8 +26,6 @@ from reflector.views.transcripts_upload import router as transcripts_upload_rout
|
|||||||
from reflector.views.transcripts_webrtc import router as transcripts_webrtc_router
|
from reflector.views.transcripts_webrtc import router as transcripts_webrtc_router
|
||||||
from reflector.views.transcripts_websocket import router as transcripts_websocket_router
|
from reflector.views.transcripts_websocket import router as transcripts_websocket_router
|
||||||
from reflector.views.user import router as user_router
|
from reflector.views.user import router as user_router
|
||||||
from reflector.views.user_api_keys import router as user_api_keys_router
|
|
||||||
from reflector.views.user_websocket import router as user_ws_router
|
|
||||||
from reflector.views.whereby import router as whereby_router
|
from reflector.views.whereby import router as whereby_router
|
||||||
from reflector.views.zulip import router as zulip_router
|
from reflector.views.zulip import router as zulip_router
|
||||||
|
|
||||||
@@ -68,12 +65,6 @@ app.add_middleware(
|
|||||||
allow_headers=["*"],
|
allow_headers=["*"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.get("/health")
|
|
||||||
async def health():
|
|
||||||
return {"status": "healthy"}
|
|
||||||
|
|
||||||
|
|
||||||
# metrics
|
# metrics
|
||||||
instrumentator = Instrumentator(
|
instrumentator = Instrumentator(
|
||||||
excluded_handlers=["/docs", "/metrics"],
|
excluded_handlers=["/docs", "/metrics"],
|
||||||
@@ -93,11 +84,8 @@ app.include_router(transcripts_websocket_router, prefix="/v1")
|
|||||||
app.include_router(transcripts_webrtc_router, prefix="/v1")
|
app.include_router(transcripts_webrtc_router, prefix="/v1")
|
||||||
app.include_router(transcripts_process_router, prefix="/v1")
|
app.include_router(transcripts_process_router, prefix="/v1")
|
||||||
app.include_router(user_router, prefix="/v1")
|
app.include_router(user_router, prefix="/v1")
|
||||||
app.include_router(user_api_keys_router, prefix="/v1")
|
|
||||||
app.include_router(user_ws_router, prefix="/v1")
|
|
||||||
app.include_router(zulip_router, prefix="/v1")
|
app.include_router(zulip_router, prefix="/v1")
|
||||||
app.include_router(whereby_router, prefix="/v1")
|
app.include_router(whereby_router, prefix="/v1")
|
||||||
app.include_router(daily_router, prefix="/v1/daily")
|
|
||||||
add_pagination(app)
|
add_pagination(app)
|
||||||
|
|
||||||
# prepare celery
|
# prepare celery
|
||||||
|
|||||||
@@ -1,27 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import functools
|
|
||||||
|
|
||||||
from reflector.db import get_database
|
|
||||||
|
|
||||||
|
|
||||||
def asynctask(f):
|
|
||||||
@functools.wraps(f)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
async def run_with_db():
|
|
||||||
database = get_database()
|
|
||||||
await database.connect()
|
|
||||||
try:
|
|
||||||
return await f(*args, **kwargs)
|
|
||||||
finally:
|
|
||||||
await database.disconnect()
|
|
||||||
|
|
||||||
coro = run_with_db()
|
|
||||||
try:
|
|
||||||
loop = asyncio.get_running_loop()
|
|
||||||
except RuntimeError:
|
|
||||||
loop = None
|
|
||||||
if loop and loop.is_running():
|
|
||||||
return loop.run_until_complete(coro)
|
|
||||||
return asyncio.run(coro)
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
@@ -1,18 +1,14 @@
|
|||||||
from typing import Annotated, List, Optional
|
from typing import Annotated, Optional
|
||||||
|
|
||||||
from fastapi import Depends, HTTPException
|
from fastapi import Depends, HTTPException
|
||||||
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer
|
from fastapi.security import OAuth2PasswordBearer
|
||||||
from jose import JWTError, jwt
|
from jose import JWTError, jwt
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from reflector.db.user_api_keys import user_api_keys_controller
|
|
||||||
from reflector.db.users import user_controller
|
|
||||||
from reflector.logger import logger
|
from reflector.logger import logger
|
||||||
from reflector.settings import settings
|
from reflector.settings import settings
|
||||||
from reflector.utils import generate_uuid4
|
|
||||||
|
|
||||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
|
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
|
||||||
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
|
||||||
|
|
||||||
jwt_public_key = open(f"reflector/auth/jwt/keys/{settings.AUTH_JWT_PUBLIC_KEY}").read()
|
jwt_public_key = open(f"reflector/auth/jwt/keys/{settings.AUTH_JWT_PUBLIC_KEY}").read()
|
||||||
jwt_algorithm = settings.AUTH_JWT_ALGORITHM
|
jwt_algorithm = settings.AUTH_JWT_ALGORITHM
|
||||||
@@ -30,7 +26,7 @@ class JWTException(Exception):
|
|||||||
|
|
||||||
class UserInfo(BaseModel):
|
class UserInfo(BaseModel):
|
||||||
sub: str
|
sub: str
|
||||||
email: Optional[str] = None
|
email: str
|
||||||
|
|
||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
return getattr(self, key)
|
return getattr(self, key)
|
||||||
@@ -62,65 +58,33 @@ def authenticated(token: Annotated[str, Depends(oauth2_scheme)]):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
async def _authenticate_user(
|
def current_user(
|
||||||
jwt_token: Optional[str],
|
token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||||
api_key: Optional[str],
|
|
||||||
jwtauth: JWTAuth,
|
|
||||||
) -> UserInfo | None:
|
|
||||||
user_infos: List[UserInfo] = []
|
|
||||||
if api_key:
|
|
||||||
user_api_key = await user_api_keys_controller.verify_key(api_key)
|
|
||||||
if user_api_key:
|
|
||||||
user_infos.append(UserInfo(sub=user_api_key.user_id, email=None))
|
|
||||||
|
|
||||||
if jwt_token:
|
|
||||||
try:
|
|
||||||
payload = jwtauth.verify_token(jwt_token)
|
|
||||||
authentik_uid = payload["sub"]
|
|
||||||
email = payload["email"]
|
|
||||||
|
|
||||||
user = await user_controller.get_by_authentik_uid(authentik_uid)
|
|
||||||
if not user:
|
|
||||||
logger.info(
|
|
||||||
f"Creating new user on first login: {authentik_uid} ({email})"
|
|
||||||
)
|
|
||||||
user = await user_controller.create_or_update(
|
|
||||||
id=generate_uuid4(),
|
|
||||||
authentik_uid=authentik_uid,
|
|
||||||
email=email,
|
|
||||||
)
|
|
||||||
|
|
||||||
user_infos.append(UserInfo(sub=user.id, email=email))
|
|
||||||
except JWTError as e:
|
|
||||||
logger.error(f"JWT error: {e}")
|
|
||||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
|
||||||
|
|
||||||
if len(user_infos) == 0:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if len(set([x.sub for x in user_infos])) > 1:
|
|
||||||
raise JWTException(
|
|
||||||
status_code=401,
|
|
||||||
detail="Invalid authentication: more than one user provided",
|
|
||||||
)
|
|
||||||
|
|
||||||
return user_infos[0]
|
|
||||||
|
|
||||||
|
|
||||||
async def current_user(
|
|
||||||
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
|
||||||
api_key: Annotated[Optional[str], Depends(api_key_header)],
|
|
||||||
jwtauth: JWTAuth = Depends(),
|
jwtauth: JWTAuth = Depends(),
|
||||||
):
|
):
|
||||||
user = await _authenticate_user(jwt_token, api_key, jwtauth)
|
if token is None:
|
||||||
if user is None:
|
|
||||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||||
return user
|
try:
|
||||||
|
payload = jwtauth.verify_token(token)
|
||||||
|
sub = payload["sub"]
|
||||||
|
return UserInfo(sub=sub)
|
||||||
|
except JWTError as e:
|
||||||
|
logger.error(f"JWT error: {e}")
|
||||||
|
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||||
|
|
||||||
|
|
||||||
async def current_user_optional(
|
def current_user_optional(
|
||||||
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||||
api_key: Annotated[Optional[str], Depends(api_key_header)],
|
|
||||||
jwtauth: JWTAuth = Depends(),
|
jwtauth: JWTAuth = Depends(),
|
||||||
):
|
):
|
||||||
return await _authenticate_user(jwt_token, api_key, jwtauth)
|
# we accept no token, but if one is provided, it must be a valid one.
|
||||||
|
if token is None:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
payload = jwtauth.verify_token(token)
|
||||||
|
sub = payload["sub"]
|
||||||
|
email = payload["email"]
|
||||||
|
return UserInfo(sub=sub, email=email)
|
||||||
|
except JWTError as e:
|
||||||
|
logger.error(f"JWT error: {e}")
|
||||||
|
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
anything about Daily.co api interaction
|
|
||||||
|
|
||||||
- webhook event shapes
|
|
||||||
- REST api client
|
|
||||||
|
|
||||||
No REST api client existing found in the wild; the official lib is about working with videocall as a bot
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
"""
|
|
||||||
Daily.co API Module
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Client
|
|
||||||
from .client import DailyApiClient, DailyApiError
|
|
||||||
|
|
||||||
# Request models
|
|
||||||
from .requests import (
|
|
||||||
CreateMeetingTokenRequest,
|
|
||||||
CreateRoomRequest,
|
|
||||||
CreateWebhookRequest,
|
|
||||||
MeetingTokenProperties,
|
|
||||||
RecordingsBucketConfig,
|
|
||||||
RoomProperties,
|
|
||||||
UpdateWebhookRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Response models
|
|
||||||
from .responses import (
|
|
||||||
MeetingParticipant,
|
|
||||||
MeetingParticipantsResponse,
|
|
||||||
MeetingResponse,
|
|
||||||
MeetingTokenResponse,
|
|
||||||
RecordingResponse,
|
|
||||||
RecordingS3Info,
|
|
||||||
RoomPresenceParticipant,
|
|
||||||
RoomPresenceResponse,
|
|
||||||
RoomResponse,
|
|
||||||
WebhookResponse,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Webhook utilities
|
|
||||||
from .webhook_utils import (
|
|
||||||
extract_room_name,
|
|
||||||
parse_participant_joined,
|
|
||||||
parse_participant_left,
|
|
||||||
parse_recording_error,
|
|
||||||
parse_recording_ready,
|
|
||||||
parse_recording_started,
|
|
||||||
parse_webhook_payload,
|
|
||||||
verify_webhook_signature,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Webhook models
|
|
||||||
from .webhooks import (
|
|
||||||
DailyTrack,
|
|
||||||
DailyWebhookEvent,
|
|
||||||
DailyWebhookEventUnion,
|
|
||||||
ParticipantJoinedEvent,
|
|
||||||
ParticipantJoinedPayload,
|
|
||||||
ParticipantLeftEvent,
|
|
||||||
ParticipantLeftPayload,
|
|
||||||
RecordingErrorEvent,
|
|
||||||
RecordingErrorPayload,
|
|
||||||
RecordingReadyEvent,
|
|
||||||
RecordingReadyToDownloadPayload,
|
|
||||||
RecordingStartedEvent,
|
|
||||||
RecordingStartedPayload,
|
|
||||||
)
|
|
||||||
|
|
||||||
__all__ = [
|
|
||||||
# Client
|
|
||||||
"DailyApiClient",
|
|
||||||
"DailyApiError",
|
|
||||||
# Requests
|
|
||||||
"CreateRoomRequest",
|
|
||||||
"RoomProperties",
|
|
||||||
"RecordingsBucketConfig",
|
|
||||||
"CreateMeetingTokenRequest",
|
|
||||||
"MeetingTokenProperties",
|
|
||||||
"CreateWebhookRequest",
|
|
||||||
"UpdateWebhookRequest",
|
|
||||||
# Responses
|
|
||||||
"RoomResponse",
|
|
||||||
"RoomPresenceResponse",
|
|
||||||
"RoomPresenceParticipant",
|
|
||||||
"MeetingParticipantsResponse",
|
|
||||||
"MeetingParticipant",
|
|
||||||
"MeetingResponse",
|
|
||||||
"RecordingResponse",
|
|
||||||
"RecordingS3Info",
|
|
||||||
"MeetingTokenResponse",
|
|
||||||
"WebhookResponse",
|
|
||||||
# Webhooks
|
|
||||||
"DailyWebhookEvent",
|
|
||||||
"DailyWebhookEventUnion",
|
|
||||||
"DailyTrack",
|
|
||||||
"ParticipantJoinedEvent",
|
|
||||||
"ParticipantJoinedPayload",
|
|
||||||
"ParticipantLeftEvent",
|
|
||||||
"ParticipantLeftPayload",
|
|
||||||
"RecordingStartedEvent",
|
|
||||||
"RecordingStartedPayload",
|
|
||||||
"RecordingReadyEvent",
|
|
||||||
"RecordingReadyToDownloadPayload",
|
|
||||||
"RecordingErrorEvent",
|
|
||||||
"RecordingErrorPayload",
|
|
||||||
# Webhook utilities
|
|
||||||
"verify_webhook_signature",
|
|
||||||
"extract_room_name",
|
|
||||||
"parse_webhook_payload",
|
|
||||||
"parse_participant_joined",
|
|
||||||
"parse_participant_left",
|
|
||||||
"parse_recording_started",
|
|
||||||
"parse_recording_ready",
|
|
||||||
"parse_recording_error",
|
|
||||||
]
|
|
||||||
@@ -1,573 +0,0 @@
|
|||||||
"""
|
|
||||||
Daily.co API Client
|
|
||||||
|
|
||||||
Complete async client for Daily.co REST API with Pydantic models.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api
|
|
||||||
"""
|
|
||||||
|
|
||||||
from http import HTTPStatus
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
import httpx
|
|
||||||
import structlog
|
|
||||||
|
|
||||||
from reflector.utils.string import NonEmptyString
|
|
||||||
|
|
||||||
from .requests import (
|
|
||||||
CreateMeetingTokenRequest,
|
|
||||||
CreateRoomRequest,
|
|
||||||
CreateWebhookRequest,
|
|
||||||
UpdateWebhookRequest,
|
|
||||||
)
|
|
||||||
from .responses import (
|
|
||||||
MeetingParticipantsResponse,
|
|
||||||
MeetingResponse,
|
|
||||||
MeetingTokenResponse,
|
|
||||||
RecordingResponse,
|
|
||||||
RoomPresenceResponse,
|
|
||||||
RoomResponse,
|
|
||||||
WebhookResponse,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = structlog.get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class DailyApiError(Exception):
|
|
||||||
"""Daily.co API error with full request/response context."""
|
|
||||||
|
|
||||||
def __init__(self, operation: str, response: httpx.Response):
|
|
||||||
self.operation = operation
|
|
||||||
self.response = response
|
|
||||||
self.status_code = response.status_code
|
|
||||||
self.response_body = response.text
|
|
||||||
self.url = str(response.url)
|
|
||||||
self.request_body = (
|
|
||||||
response.request.content.decode() if response.request.content else None
|
|
||||||
)
|
|
||||||
|
|
||||||
super().__init__(
|
|
||||||
f"Daily.co API error: {operation} failed with status {self.status_code}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class DailyApiClient:
|
|
||||||
"""
|
|
||||||
Complete async client for Daily.co REST API.
|
|
||||||
|
|
||||||
Usage:
|
|
||||||
# Direct usage
|
|
||||||
client = DailyApiClient(api_key="your_api_key")
|
|
||||||
room = await client.create_room(CreateRoomRequest(name="my-room"))
|
|
||||||
await client.close() # Clean up when done
|
|
||||||
|
|
||||||
# Context manager (recommended)
|
|
||||||
async with DailyApiClient(api_key="your_api_key") as client:
|
|
||||||
room = await client.create_room(CreateRoomRequest(name="my-room"))
|
|
||||||
"""
|
|
||||||
|
|
||||||
BASE_URL = "https://api.daily.co/v1"
|
|
||||||
DEFAULT_TIMEOUT = 10.0
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
api_key: NonEmptyString,
|
|
||||||
webhook_secret: NonEmptyString | None = None,
|
|
||||||
timeout: float = DEFAULT_TIMEOUT,
|
|
||||||
base_url: NonEmptyString | None = None,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Initialize Daily.co API client.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
api_key: Daily.co API key (Bearer token)
|
|
||||||
webhook_secret: Base64-encoded HMAC secret for webhook verification.
|
|
||||||
Must match the 'hmac' value provided when creating webhooks.
|
|
||||||
Generate with: base64.b64encode(os.urandom(32)).decode()
|
|
||||||
timeout: Default request timeout in seconds
|
|
||||||
base_url: Override base URL (for testing)
|
|
||||||
"""
|
|
||||||
self.api_key = api_key
|
|
||||||
self.webhook_secret = webhook_secret
|
|
||||||
self.timeout = timeout
|
|
||||||
self.base_url = base_url or self.BASE_URL
|
|
||||||
|
|
||||||
self.headers = {
|
|
||||||
"Authorization": f"Bearer {api_key}",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
}
|
|
||||||
|
|
||||||
self._client: httpx.AsyncClient | None = None
|
|
||||||
|
|
||||||
async def __aenter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
await self.close()
|
|
||||||
|
|
||||||
async def _get_client(self) -> httpx.AsyncClient:
|
|
||||||
if self._client is None:
|
|
||||||
self._client = httpx.AsyncClient(timeout=self.timeout)
|
|
||||||
return self._client
|
|
||||||
|
|
||||||
async def close(self):
|
|
||||||
if self._client is not None:
|
|
||||||
await self._client.aclose()
|
|
||||||
self._client = None
|
|
||||||
|
|
||||||
async def _handle_response(
|
|
||||||
self, response: httpx.Response, operation: str
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
"""
|
|
||||||
Handle API response with error logging.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
response: HTTP response
|
|
||||||
operation: Operation name for logging (e.g., "create_room")
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Parsed JSON response
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
DailyApiError: If request failed with full context
|
|
||||||
"""
|
|
||||||
if response.status_code >= 400:
|
|
||||||
logger.error(
|
|
||||||
f"Daily.co API error: {operation}",
|
|
||||||
status_code=response.status_code,
|
|
||||||
response_body=response.text,
|
|
||||||
request_body=response.request.content.decode()
|
|
||||||
if response.request.content
|
|
||||||
else None,
|
|
||||||
url=str(response.url),
|
|
||||||
)
|
|
||||||
raise DailyApiError(operation, response)
|
|
||||||
|
|
||||||
return response.json()
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# ROOMS
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
async def create_room(self, request: CreateRoomRequest) -> RoomResponse:
|
|
||||||
"""
|
|
||||||
Create a new Daily.co room.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
|
||||||
|
|
||||||
Args:
|
|
||||||
request: Room creation request with name, privacy, and properties
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Created room data including URL and ID
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.post(
|
|
||||||
f"{self.base_url}/rooms",
|
|
||||||
headers=self.headers,
|
|
||||||
json=request.model_dump(exclude_none=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "create_room")
|
|
||||||
return RoomResponse(**data)
|
|
||||||
|
|
||||||
async def get_room(self, room_name: NonEmptyString) -> RoomResponse:
|
|
||||||
"""
|
|
||||||
Get room configuration.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_name: Daily.co room name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Room configuration data
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.get(
|
|
||||||
f"{self.base_url}/rooms/{room_name}",
|
|
||||||
headers=self.headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "get_room")
|
|
||||||
return RoomResponse(**data)
|
|
||||||
|
|
||||||
async def get_room_presence(
|
|
||||||
self, room_name: NonEmptyString
|
|
||||||
) -> RoomPresenceResponse:
|
|
||||||
"""
|
|
||||||
Get current participants in a room (real-time presence).
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_name: Daily.co room name
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of currently present participants with join time and duration
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.get(
|
|
||||||
f"{self.base_url}/rooms/{room_name}/presence",
|
|
||||||
headers=self.headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "get_room_presence")
|
|
||||||
return RoomPresenceResponse(**data)
|
|
||||||
|
|
||||||
async def delete_room(self, room_name: NonEmptyString) -> None:
|
|
||||||
"""
|
|
||||||
Delete a room (idempotent - succeeds even if room doesn't exist).
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/rooms/delete-room
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_name: Daily.co room name
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails (except 404)
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.delete(
|
|
||||||
f"{self.base_url}/rooms/{room_name}",
|
|
||||||
headers=self.headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Idempotent delete - 404 means already deleted
|
|
||||||
if response.status_code == HTTPStatus.NOT_FOUND:
|
|
||||||
logger.debug("Room not found (already deleted)", room_name=room_name)
|
|
||||||
return
|
|
||||||
|
|
||||||
await self._handle_response(response, "delete_room")
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# MEETINGS
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
async def get_meeting(self, meeting_id: NonEmptyString) -> MeetingResponse:
|
|
||||||
"""
|
|
||||||
Get full meeting information including participants.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information
|
|
||||||
|
|
||||||
Args:
|
|
||||||
meeting_id: Daily.co meeting/session ID
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Meeting metadata including room, duration, participants, and status
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.get(
|
|
||||||
f"{self.base_url}/meetings/{meeting_id}",
|
|
||||||
headers=self.headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "get_meeting")
|
|
||||||
return MeetingResponse(**data)
|
|
||||||
|
|
||||||
async def get_meeting_participants(
|
|
||||||
self,
|
|
||||||
meeting_id: NonEmptyString,
|
|
||||||
limit: int | None = None,
|
|
||||||
joined_after: NonEmptyString | None = None,
|
|
||||||
joined_before: NonEmptyString | None = None,
|
|
||||||
) -> MeetingParticipantsResponse:
|
|
||||||
"""
|
|
||||||
Get historical participant data from a completed meeting (paginated).
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
|
|
||||||
|
|
||||||
Args:
|
|
||||||
meeting_id: Daily.co meeting/session ID
|
|
||||||
limit: Maximum number of participant records to return
|
|
||||||
joined_after: Return participants who joined after this participant_id
|
|
||||||
joined_before: Return participants who joined before this participant_id
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of participants with join times and duration
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails (404 when no more participants)
|
|
||||||
|
|
||||||
Note:
|
|
||||||
For pagination, use joined_after with the last participant_id from previous response.
|
|
||||||
Returns 404 when no more participants remain.
|
|
||||||
"""
|
|
||||||
params = {}
|
|
||||||
if limit is not None:
|
|
||||||
params["limit"] = limit
|
|
||||||
if joined_after is not None:
|
|
||||||
params["joined_after"] = joined_after
|
|
||||||
if joined_before is not None:
|
|
||||||
params["joined_before"] = joined_before
|
|
||||||
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.get(
|
|
||||||
f"{self.base_url}/meetings/{meeting_id}/participants",
|
|
||||||
headers=self.headers,
|
|
||||||
params=params,
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "get_meeting_participants")
|
|
||||||
return MeetingParticipantsResponse(**data)
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# RECORDINGS
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
async def get_recording(self, recording_id: NonEmptyString) -> RecordingResponse:
|
|
||||||
"""
|
|
||||||
https://docs.daily.co/reference/rest-api/recordings/get-recording-information
|
|
||||||
Get recording metadata and status.
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.get(
|
|
||||||
f"{self.base_url}/recordings/{recording_id}",
|
|
||||||
headers=self.headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "get_recording")
|
|
||||||
return RecordingResponse(**data)
|
|
||||||
|
|
||||||
async def list_recordings(
|
|
||||||
self,
|
|
||||||
room_name: NonEmptyString | None = None,
|
|
||||||
starting_after: str | None = None,
|
|
||||||
ending_before: str | None = None,
|
|
||||||
limit: int = 100,
|
|
||||||
) -> list[RecordingResponse]:
|
|
||||||
"""
|
|
||||||
List recordings with optional filters.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_name: Filter by room name
|
|
||||||
starting_after: Pagination cursor - recording ID to start after
|
|
||||||
ending_before: Pagination cursor - recording ID to end before
|
|
||||||
limit: Max results per page (default 100, max 100)
|
|
||||||
|
|
||||||
Note: starting_after/ending_before are pagination cursors (recording IDs),
|
|
||||||
NOT time filters. API returns recordings in reverse chronological order.
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
|
|
||||||
params = {"limit": limit}
|
|
||||||
if room_name:
|
|
||||||
params["room_name"] = room_name
|
|
||||||
if starting_after:
|
|
||||||
params["starting_after"] = starting_after
|
|
||||||
if ending_before:
|
|
||||||
params["ending_before"] = ending_before
|
|
||||||
|
|
||||||
response = await client.get(
|
|
||||||
f"{self.base_url}/recordings",
|
|
||||||
headers=self.headers,
|
|
||||||
params=params,
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "list_recordings")
|
|
||||||
|
|
||||||
if not isinstance(data, dict) or "data" not in data:
|
|
||||||
logger.error(
|
|
||||||
"Daily.co API returned unexpected format for list_recordings",
|
|
||||||
data_type=type(data).__name__,
|
|
||||||
data_keys=list(data.keys()) if isinstance(data, dict) else None,
|
|
||||||
data_sample=str(data)[:500],
|
|
||||||
room_name=room_name,
|
|
||||||
operation="list_recordings",
|
|
||||||
)
|
|
||||||
raise httpx.HTTPStatusError(
|
|
||||||
message=f"Unexpected response format from list_recordings: {type(data).__name__}",
|
|
||||||
request=response.request,
|
|
||||||
response=response,
|
|
||||||
)
|
|
||||||
|
|
||||||
return [RecordingResponse(**r) for r in data["data"]]
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# MEETING TOKENS
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
async def create_meeting_token(
|
|
||||||
self, request: CreateMeetingTokenRequest
|
|
||||||
) -> MeetingTokenResponse:
|
|
||||||
"""
|
|
||||||
Create a meeting token for participant authentication.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
|
||||||
|
|
||||||
Args:
|
|
||||||
request: Token properties including room name, user_id, permissions
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
JWT meeting token
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.post(
|
|
||||||
f"{self.base_url}/meeting-tokens",
|
|
||||||
headers=self.headers,
|
|
||||||
json=request.model_dump(exclude_none=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "create_meeting_token")
|
|
||||||
return MeetingTokenResponse(**data)
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# WEBHOOKS
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
async def list_webhooks(self) -> list[WebhookResponse]:
|
|
||||||
"""
|
|
||||||
List all configured webhooks for this account.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of webhook configurations
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.get(
|
|
||||||
f"{self.base_url}/webhooks",
|
|
||||||
headers=self.headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "list_webhooks")
|
|
||||||
|
|
||||||
# Daily.co returns array directly (not paginated)
|
|
||||||
if isinstance(data, list):
|
|
||||||
return [WebhookResponse(**wh) for wh in data]
|
|
||||||
|
|
||||||
# Future-proof: handle potential pagination envelope
|
|
||||||
if isinstance(data, dict) and "data" in data:
|
|
||||||
return [WebhookResponse(**wh) for wh in data["data"]]
|
|
||||||
|
|
||||||
logger.warning("Unexpected webhook list response format", data=data)
|
|
||||||
return []
|
|
||||||
|
|
||||||
async def create_webhook(self, request: CreateWebhookRequest) -> WebhookResponse:
|
|
||||||
"""
|
|
||||||
Create a new webhook subscription.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
|
|
||||||
Args:
|
|
||||||
request: Webhook configuration with URL, event types, and HMAC secret
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Created webhook with UUID and state
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.post(
|
|
||||||
f"{self.base_url}/webhooks",
|
|
||||||
headers=self.headers,
|
|
||||||
json=request.model_dump(exclude_none=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "create_webhook")
|
|
||||||
return WebhookResponse(**data)
|
|
||||||
|
|
||||||
async def update_webhook(
|
|
||||||
self, webhook_uuid: NonEmptyString, request: UpdateWebhookRequest
|
|
||||||
) -> WebhookResponse:
|
|
||||||
"""
|
|
||||||
Update webhook configuration.
|
|
||||||
|
|
||||||
Note: Daily.co may not support PATCH for all fields.
|
|
||||||
Common pattern is delete + recreate.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
|
|
||||||
Args:
|
|
||||||
webhook_uuid: Webhook UUID to update
|
|
||||||
request: Updated webhook configuration
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Updated webhook configuration
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If API request fails
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.patch(
|
|
||||||
f"{self.base_url}/webhooks/{webhook_uuid}",
|
|
||||||
headers=self.headers,
|
|
||||||
json=request.model_dump(exclude_none=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
data = await self._handle_response(response, "update_webhook")
|
|
||||||
return WebhookResponse(**data)
|
|
||||||
|
|
||||||
async def delete_webhook(self, webhook_uuid: NonEmptyString) -> None:
|
|
||||||
"""
|
|
||||||
Delete a webhook.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
|
|
||||||
Args:
|
|
||||||
webhook_uuid: Webhook UUID to delete
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
httpx.HTTPStatusError: If webhook not found or deletion fails
|
|
||||||
"""
|
|
||||||
client = await self._get_client()
|
|
||||||
response = await client.delete(
|
|
||||||
f"{self.base_url}/webhooks/{webhook_uuid}",
|
|
||||||
headers=self.headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
await self._handle_response(response, "delete_webhook")
|
|
||||||
|
|
||||||
# ============================================================================
|
|
||||||
# HELPER METHODS
|
|
||||||
# ============================================================================
|
|
||||||
|
|
||||||
async def find_webhook_by_url(self, url: NonEmptyString) -> WebhookResponse | None:
|
|
||||||
"""
|
|
||||||
Find a webhook by its URL.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
url: Webhook endpoint URL to search for
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Webhook if found, None otherwise
|
|
||||||
"""
|
|
||||||
webhooks = await self.list_webhooks()
|
|
||||||
for webhook in webhooks:
|
|
||||||
if webhook.url == url:
|
|
||||||
return webhook
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def find_webhooks_by_pattern(
|
|
||||||
self, pattern: NonEmptyString
|
|
||||||
) -> list[WebhookResponse]:
|
|
||||||
"""
|
|
||||||
Find webhooks matching a URL pattern (e.g., 'ngrok').
|
|
||||||
|
|
||||||
Args:
|
|
||||||
pattern: String to match in webhook URLs
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of matching webhooks
|
|
||||||
"""
|
|
||||||
webhooks = await self.list_webhooks()
|
|
||||||
return [wh for wh in webhooks if pattern in wh.url]
|
|
||||||
@@ -1,158 +0,0 @@
|
|||||||
"""
|
|
||||||
Daily.co API Request Models
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import List, Literal
|
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
from reflector.utils.string import NonEmptyString
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingsBucketConfig(BaseModel):
|
|
||||||
"""
|
|
||||||
S3 bucket configuration for raw-tracks recordings.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
|
||||||
"""
|
|
||||||
|
|
||||||
bucket_name: NonEmptyString = Field(description="S3 bucket name")
|
|
||||||
bucket_region: NonEmptyString = Field(description="AWS region (e.g., 'us-east-1')")
|
|
||||||
assume_role_arn: NonEmptyString = Field(
|
|
||||||
description="AWS IAM role ARN that Daily.co will assume to write recordings"
|
|
||||||
)
|
|
||||||
allow_api_access: bool = Field(
|
|
||||||
default=True,
|
|
||||||
description="Whether to allow API access to recording metadata",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RoomProperties(BaseModel):
|
|
||||||
"""
|
|
||||||
Room configuration properties.
|
|
||||||
"""
|
|
||||||
|
|
||||||
enable_recording: Literal["cloud", "local", "raw-tracks"] | None = Field(
|
|
||||||
default=None,
|
|
||||||
description="Recording mode: 'cloud' for mixed, 'local' for local recording, 'raw-tracks' for multitrack, None to disable",
|
|
||||||
)
|
|
||||||
enable_chat: bool = Field(default=True, description="Enable in-meeting chat")
|
|
||||||
enable_screenshare: bool = Field(default=True, description="Enable screen sharing")
|
|
||||||
start_video_off: bool = Field(
|
|
||||||
default=False, description="Start with video off for all participants"
|
|
||||||
)
|
|
||||||
start_audio_off: bool = Field(
|
|
||||||
default=False, description="Start with audio muted for all participants"
|
|
||||||
)
|
|
||||||
exp: int | None = Field(
|
|
||||||
None, description="Room expiration timestamp (Unix epoch seconds)"
|
|
||||||
)
|
|
||||||
recordings_bucket: RecordingsBucketConfig | None = Field(
|
|
||||||
None, description="S3 bucket configuration for raw-tracks recordings"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CreateRoomRequest(BaseModel):
|
|
||||||
"""
|
|
||||||
Request to create a new Daily.co room.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: NonEmptyString = Field(description="Room name (must be unique within domain)")
|
|
||||||
privacy: Literal["public", "private"] = Field(
|
|
||||||
default="public", description="Room privacy setting"
|
|
||||||
)
|
|
||||||
properties: RoomProperties = Field(
|
|
||||||
default_factory=RoomProperties, description="Room configuration properties"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MeetingTokenProperties(BaseModel):
|
|
||||||
"""
|
|
||||||
Properties for meeting token creation.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
|
||||||
"""
|
|
||||||
|
|
||||||
room_name: NonEmptyString = Field(description="Room name this token is valid for")
|
|
||||||
user_id: NonEmptyString | None = Field(
|
|
||||||
None, description="User identifier to associate with token"
|
|
||||||
)
|
|
||||||
is_owner: bool = Field(
|
|
||||||
default=False, description="Grant owner privileges to token holder"
|
|
||||||
)
|
|
||||||
start_cloud_recording: bool = Field(
|
|
||||||
default=False, description="Automatically start cloud recording on join"
|
|
||||||
)
|
|
||||||
enable_recording_ui: bool = Field(
|
|
||||||
default=True, description="Show recording controls in UI"
|
|
||||||
)
|
|
||||||
eject_at_token_exp: bool = Field(
|
|
||||||
default=False, description="Eject participant when token expires"
|
|
||||||
)
|
|
||||||
nbf: int | None = Field(
|
|
||||||
None, description="Not-before timestamp (Unix epoch seconds)"
|
|
||||||
)
|
|
||||||
exp: int | None = Field(
|
|
||||||
None, description="Expiration timestamp (Unix epoch seconds)"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CreateMeetingTokenRequest(BaseModel):
|
|
||||||
"""
|
|
||||||
Request to create a meeting token for participant authentication.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
|
||||||
"""
|
|
||||||
|
|
||||||
properties: MeetingTokenProperties = Field(description="Token properties")
|
|
||||||
|
|
||||||
|
|
||||||
class CreateWebhookRequest(BaseModel):
|
|
||||||
"""
|
|
||||||
Request to create a webhook subscription.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
"""
|
|
||||||
|
|
||||||
url: NonEmptyString = Field(description="Webhook endpoint URL (must be HTTPS)")
|
|
||||||
eventTypes: List[
|
|
||||||
Literal[
|
|
||||||
"participant.joined",
|
|
||||||
"participant.left",
|
|
||||||
"recording.started",
|
|
||||||
"recording.ready-to-download",
|
|
||||||
"recording.error",
|
|
||||||
]
|
|
||||||
] = Field(
|
|
||||||
description="Array of event types to subscribe to (only events we handle)"
|
|
||||||
)
|
|
||||||
hmac: NonEmptyString = Field(
|
|
||||||
description="Base64-encoded HMAC secret for webhook signature verification"
|
|
||||||
)
|
|
||||||
basicAuth: NonEmptyString | None = Field(
|
|
||||||
None, description="Optional basic auth credentials for webhook endpoint"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class UpdateWebhookRequest(BaseModel):
|
|
||||||
"""
|
|
||||||
Request to update an existing webhook.
|
|
||||||
|
|
||||||
Note: Daily.co API may not support PATCH for webhooks.
|
|
||||||
Common pattern is to delete and recreate.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
"""
|
|
||||||
|
|
||||||
url: NonEmptyString | None = Field(None, description="New webhook endpoint URL")
|
|
||||||
eventTypes: List[NonEmptyString] | None = Field(
|
|
||||||
None, description="New array of event types"
|
|
||||||
)
|
|
||||||
hmac: NonEmptyString | None = Field(None, description="New HMAC secret")
|
|
||||||
basicAuth: NonEmptyString | None = Field(
|
|
||||||
None, description="New basic auth credentials"
|
|
||||||
)
|
|
||||||
@@ -1,193 +0,0 @@
|
|||||||
"""
|
|
||||||
Daily.co API Response Models
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Any, Dict, List, Literal
|
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
from reflector.dailyco_api.webhooks import DailyTrack
|
|
||||||
from reflector.utils.string import NonEmptyString
|
|
||||||
|
|
||||||
# not documented in daily; we fill it according to observations
|
|
||||||
RecordingStatus = Literal["in-progress", "finished"]
|
|
||||||
|
|
||||||
|
|
||||||
class RoomResponse(BaseModel):
|
|
||||||
"""
|
|
||||||
Response from room creation or retrieval.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
|
||||||
"""
|
|
||||||
|
|
||||||
id: NonEmptyString = Field(description="Unique room identifier (UUID)")
|
|
||||||
name: NonEmptyString = Field(description="Room name used in URLs")
|
|
||||||
api_created: bool = Field(description="Whether room was created via API")
|
|
||||||
privacy: Literal["public", "private"] = Field(description="Room privacy setting")
|
|
||||||
url: NonEmptyString = Field(description="Full room URL")
|
|
||||||
created_at: NonEmptyString = Field(description="ISO 8601 creation timestamp")
|
|
||||||
config: Dict[NonEmptyString, Any] = Field(
|
|
||||||
default_factory=dict, description="Room configuration properties"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RoomPresenceParticipant(BaseModel):
|
|
||||||
"""
|
|
||||||
Participant presence information in a room.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
|
|
||||||
"""
|
|
||||||
|
|
||||||
room: NonEmptyString = Field(description="Room name")
|
|
||||||
id: NonEmptyString = Field(description="Participant session ID")
|
|
||||||
userId: NonEmptyString | None = Field(None, description="User ID if provided")
|
|
||||||
userName: NonEmptyString | None = Field(None, description="User display name")
|
|
||||||
joinTime: NonEmptyString = Field(description="ISO 8601 join timestamp")
|
|
||||||
duration: int = Field(description="Duration in room (seconds)")
|
|
||||||
|
|
||||||
|
|
||||||
class RoomPresenceResponse(BaseModel):
|
|
||||||
"""
|
|
||||||
Response from room presence endpoint.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
|
|
||||||
"""
|
|
||||||
|
|
||||||
total_count: int = Field(
|
|
||||||
description="Total number of participants currently in room"
|
|
||||||
)
|
|
||||||
data: List[RoomPresenceParticipant] = Field(
|
|
||||||
default_factory=list, description="Array of participant presence data"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MeetingParticipant(BaseModel):
|
|
||||||
"""
|
|
||||||
Historical participant data from a meeting.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
|
|
||||||
"""
|
|
||||||
|
|
||||||
user_id: NonEmptyString = Field(description="User identifier")
|
|
||||||
participant_id: NonEmptyString = Field(description="Participant session identifier")
|
|
||||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
|
||||||
join_time: int = Field(description="Join timestamp (Unix epoch seconds)")
|
|
||||||
duration: int = Field(description="Duration in meeting (seconds)")
|
|
||||||
|
|
||||||
|
|
||||||
class MeetingParticipantsResponse(BaseModel):
|
|
||||||
"""
|
|
||||||
Response from meeting participants endpoint.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
|
|
||||||
"""
|
|
||||||
|
|
||||||
data: List[MeetingParticipant] = Field(
|
|
||||||
default_factory=list, description="Array of participant data"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MeetingResponse(BaseModel):
|
|
||||||
"""
|
|
||||||
Response from meeting information endpoint.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information
|
|
||||||
"""
|
|
||||||
|
|
||||||
id: NonEmptyString = Field(description="Meeting session identifier (UUID)")
|
|
||||||
room: NonEmptyString = Field(description="Room name where meeting occurred")
|
|
||||||
start_time: int = Field(
|
|
||||||
description="Meeting start Unix timestamp (~15s granularity)"
|
|
||||||
)
|
|
||||||
duration: int = Field(description="Total meeting duration in seconds")
|
|
||||||
ongoing: bool = Field(description="Whether meeting is currently active")
|
|
||||||
max_participants: int = Field(description="Peak concurrent participant count")
|
|
||||||
participants: List[MeetingParticipant] = Field(
|
|
||||||
default_factory=list, description="Array of participant session data"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingS3Info(BaseModel):
|
|
||||||
"""
|
|
||||||
S3 bucket information for a recording.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
|
||||||
"""
|
|
||||||
|
|
||||||
bucket_name: NonEmptyString
|
|
||||||
bucket_region: NonEmptyString
|
|
||||||
endpoint: NonEmptyString | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingResponse(BaseModel):
|
|
||||||
"""
|
|
||||||
Response from recording retrieval endpoint.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
|
||||||
"""
|
|
||||||
|
|
||||||
id: NonEmptyString = Field(description="Recording identifier")
|
|
||||||
room_name: NonEmptyString = Field(description="Room where recording occurred")
|
|
||||||
start_ts: int = Field(description="Recording start timestamp (Unix epoch seconds)")
|
|
||||||
status: RecordingStatus = Field(
|
|
||||||
description="Recording status ('in-progress' or 'finished')"
|
|
||||||
)
|
|
||||||
max_participants: int | None = Field(
|
|
||||||
None, description="Maximum participants during recording (may be missing)"
|
|
||||||
)
|
|
||||||
duration: int = Field(description="Recording duration in seconds")
|
|
||||||
share_token: NonEmptyString | None = Field(
|
|
||||||
None, description="Token for sharing recording"
|
|
||||||
)
|
|
||||||
s3: RecordingS3Info | None = Field(None, description="S3 bucket information")
|
|
||||||
tracks: list[DailyTrack] = Field(
|
|
||||||
default_factory=list,
|
|
||||||
description="Track list for raw-tracks recordings (always array, never null)",
|
|
||||||
)
|
|
||||||
# this is not a mistake but a deliberate Daily.co naming decision
|
|
||||||
mtgSessionId: NonEmptyString | None = Field(
|
|
||||||
None, description="Meeting session identifier (may be missing)"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MeetingTokenResponse(BaseModel):
|
|
||||||
"""
|
|
||||||
Response from meeting token creation.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
|
||||||
"""
|
|
||||||
|
|
||||||
token: NonEmptyString = Field(
|
|
||||||
description="JWT meeting token for participant authentication"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class WebhookResponse(BaseModel):
|
|
||||||
"""
|
|
||||||
Response from webhook creation or retrieval.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
"""
|
|
||||||
|
|
||||||
uuid: NonEmptyString = Field(description="Unique webhook identifier")
|
|
||||||
url: NonEmptyString = Field(description="Webhook endpoint URL")
|
|
||||||
hmac: NonEmptyString | None = Field(
|
|
||||||
None, description="Base64-encoded HMAC secret for signature verification"
|
|
||||||
)
|
|
||||||
basicAuth: NonEmptyString | None = Field(
|
|
||||||
None, description="Basic auth credentials if configured"
|
|
||||||
)
|
|
||||||
eventTypes: List[NonEmptyString] = Field(
|
|
||||||
default_factory=list,
|
|
||||||
description="Array of event types (e.g., ['recording.started', 'participant.joined'])",
|
|
||||||
)
|
|
||||||
state: Literal["ACTIVE", "FAILED"] = Field(
|
|
||||||
description="Webhook state - FAILED after 3+ consecutive failures"
|
|
||||||
)
|
|
||||||
failedCount: int = Field(default=0, description="Number of consecutive failures")
|
|
||||||
lastMomentPushed: NonEmptyString | None = Field(
|
|
||||||
None, description="ISO 8601 timestamp of last successful push"
|
|
||||||
)
|
|
||||||
domainId: NonEmptyString = Field(description="Daily.co domain/account identifier")
|
|
||||||
createdAt: NonEmptyString = Field(description="ISO 8601 creation timestamp")
|
|
||||||
updatedAt: NonEmptyString = Field(description="ISO 8601 last update timestamp")
|
|
||||||
@@ -1,228 +0,0 @@
|
|||||||
"""
|
|
||||||
Daily.co Webhook Utilities
|
|
||||||
|
|
||||||
Utilities for verifying and parsing Daily.co webhook events.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
"""
|
|
||||||
|
|
||||||
import base64
|
|
||||||
import hmac
|
|
||||||
from hashlib import sha256
|
|
||||||
|
|
||||||
import structlog
|
|
||||||
|
|
||||||
from .webhooks import (
|
|
||||||
DailyWebhookEvent,
|
|
||||||
ParticipantJoinedPayload,
|
|
||||||
ParticipantLeftPayload,
|
|
||||||
RecordingErrorPayload,
|
|
||||||
RecordingReadyToDownloadPayload,
|
|
||||||
RecordingStartedPayload,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = structlog.get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def verify_webhook_signature(
|
|
||||||
body: bytes,
|
|
||||||
signature: str,
|
|
||||||
timestamp: str,
|
|
||||||
webhook_secret: str,
|
|
||||||
) -> bool:
|
|
||||||
"""
|
|
||||||
Verify Daily.co webhook signature using HMAC-SHA256.
|
|
||||||
|
|
||||||
Daily.co signature verification:
|
|
||||||
1. Base64-decode the webhook secret
|
|
||||||
2. Create signed content: timestamp + '.' + body
|
|
||||||
3. Compute HMAC-SHA256(secret, signed_content)
|
|
||||||
4. Base64-encode the result
|
|
||||||
5. Compare with provided signature using constant-time comparison
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
|
|
||||||
Args:
|
|
||||||
body: Raw request body bytes
|
|
||||||
signature: X-Webhook-Signature header value
|
|
||||||
timestamp: X-Webhook-Timestamp header value
|
|
||||||
webhook_secret: Base64-encoded HMAC secret
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if signature is valid, False otherwise
|
|
||||||
|
|
||||||
Example:
|
|
||||||
>>> body = b'{"version":"1.0.0","type":"participant.joined",...}'
|
|
||||||
>>> signature = "abc123..."
|
|
||||||
>>> timestamp = "1234567890"
|
|
||||||
>>> secret = "your-base64-secret"
|
|
||||||
>>> is_valid = verify_webhook_signature(body, signature, timestamp, secret)
|
|
||||||
"""
|
|
||||||
if not signature or not timestamp or not webhook_secret:
|
|
||||||
logger.warning(
|
|
||||||
"Missing required data for webhook verification",
|
|
||||||
has_signature=bool(signature),
|
|
||||||
has_timestamp=bool(timestamp),
|
|
||||||
has_secret=bool(webhook_secret),
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
try:
|
|
||||||
secret_bytes = base64.b64decode(webhook_secret)
|
|
||||||
signed_content = timestamp.encode() + b"." + body
|
|
||||||
expected = hmac.new(secret_bytes, signed_content, sha256).digest()
|
|
||||||
expected_b64 = base64.b64encode(expected).decode()
|
|
||||||
|
|
||||||
# Constant-time comparison to prevent timing attacks
|
|
||||||
return hmac.compare_digest(expected_b64, signature)
|
|
||||||
|
|
||||||
except (base64.binascii.Error, ValueError, TypeError, UnicodeDecodeError) as e:
|
|
||||||
logger.error(
|
|
||||||
"Webhook signature verification failed",
|
|
||||||
error=str(e),
|
|
||||||
error_type=type(e).__name__,
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def extract_room_name(event: DailyWebhookEvent) -> str | None:
|
|
||||||
"""
|
|
||||||
Extract room name from Daily.co webhook event payload.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: Parsed webhook event
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Room name if present and is a string, None otherwise
|
|
||||||
|
|
||||||
Example:
|
|
||||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
|
||||||
>>> room_name = extract_room_name(event)
|
|
||||||
"""
|
|
||||||
room = event.payload.get("room_name")
|
|
||||||
# Ensure we return a string, not any falsy value that might be in payload
|
|
||||||
return room if isinstance(room, str) else None
|
|
||||||
|
|
||||||
|
|
||||||
def parse_participant_joined(event: DailyWebhookEvent) -> ParticipantJoinedPayload:
|
|
||||||
"""
|
|
||||||
Parse participant.joined webhook event payload.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: Webhook event with type "participant.joined"
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Parsed participant joined payload
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
pydantic.ValidationError: If payload doesn't match expected schema
|
|
||||||
"""
|
|
||||||
return ParticipantJoinedPayload(**event.payload)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_participant_left(event: DailyWebhookEvent) -> ParticipantLeftPayload:
|
|
||||||
"""
|
|
||||||
Parse participant.left webhook event payload.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: Webhook event with type "participant.left"
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Parsed participant left payload
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
pydantic.ValidationError: If payload doesn't match expected schema
|
|
||||||
"""
|
|
||||||
return ParticipantLeftPayload(**event.payload)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_recording_started(event: DailyWebhookEvent) -> RecordingStartedPayload:
|
|
||||||
"""
|
|
||||||
Parse recording.started webhook event payload.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: Webhook event with type "recording.started"
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Parsed recording started payload
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
pydantic.ValidationError: If payload doesn't match expected schema
|
|
||||||
"""
|
|
||||||
return RecordingStartedPayload(**event.payload)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_recording_ready(
|
|
||||||
event: DailyWebhookEvent,
|
|
||||||
) -> RecordingReadyToDownloadPayload:
|
|
||||||
"""
|
|
||||||
Parse recording.ready-to-download webhook event payload.
|
|
||||||
|
|
||||||
This event is sent when raw-tracks recordings are complete and uploaded to S3.
|
|
||||||
The payload includes a 'tracks' array with individual audio/video files.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: Webhook event with type "recording.ready-to-download"
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Parsed recording ready payload with tracks array
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
pydantic.ValidationError: If payload doesn't match expected schema
|
|
||||||
|
|
||||||
Example:
|
|
||||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
|
||||||
>>> if event.type == "recording.ready-to-download":
|
|
||||||
... payload = parse_recording_ready(event)
|
|
||||||
... audio_tracks = [t for t in payload.tracks if t.type == "audio"]
|
|
||||||
"""
|
|
||||||
return RecordingReadyToDownloadPayload(**event.payload)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_recording_error(event: DailyWebhookEvent) -> RecordingErrorPayload:
|
|
||||||
"""
|
|
||||||
Parse recording.error webhook event payload.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: Webhook event with type "recording.error"
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Parsed recording error payload
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
pydantic.ValidationError: If payload doesn't match expected schema
|
|
||||||
"""
|
|
||||||
return RecordingErrorPayload(**event.payload)
|
|
||||||
|
|
||||||
|
|
||||||
WEBHOOK_PARSERS = {
|
|
||||||
"participant.joined": parse_participant_joined,
|
|
||||||
"participant.left": parse_participant_left,
|
|
||||||
"recording.started": parse_recording_started,
|
|
||||||
"recording.ready-to-download": parse_recording_ready,
|
|
||||||
"recording.error": parse_recording_error,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def parse_webhook_payload(event: DailyWebhookEvent):
|
|
||||||
"""
|
|
||||||
Parse webhook event payload based on event type.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event: Webhook event
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Typed payload model based on event type, or raw dict if unknown
|
|
||||||
|
|
||||||
Example:
|
|
||||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
|
||||||
>>> payload = parse_webhook_payload(event)
|
|
||||||
>>> if isinstance(payload, ParticipantJoinedPayload):
|
|
||||||
... print(f"User {payload.user_name} joined")
|
|
||||||
"""
|
|
||||||
parser = WEBHOOK_PARSERS.get(event.type)
|
|
||||||
if parser:
|
|
||||||
return parser(event)
|
|
||||||
else:
|
|
||||||
logger.warning("Unknown webhook event type", event_type=event.type)
|
|
||||||
return event.payload
|
|
||||||
@@ -1,271 +0,0 @@
|
|||||||
"""
|
|
||||||
Daily.co Webhook Event Models
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Annotated, Any, Dict, Literal, Union
|
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, field_validator
|
|
||||||
|
|
||||||
from reflector.utils.string import NonEmptyString
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_timestamp_to_int(v):
|
|
||||||
"""
|
|
||||||
Normalize float timestamps to int by truncating decimal part.
|
|
||||||
|
|
||||||
Daily.co sometimes sends timestamps as floats (e.g., 1708972279.96).
|
|
||||||
Pydantic expects int for fields typed as `int`.
|
|
||||||
"""
|
|
||||||
if v is None:
|
|
||||||
return v
|
|
||||||
if isinstance(v, float):
|
|
||||||
return int(v)
|
|
||||||
return v
|
|
||||||
|
|
||||||
|
|
||||||
WebhookEventType = Literal[
|
|
||||||
"participant.joined",
|
|
||||||
"participant.left",
|
|
||||||
"recording.started",
|
|
||||||
"recording.ready-to-download",
|
|
||||||
"recording.error",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class DailyTrack(BaseModel):
|
|
||||||
"""
|
|
||||||
Individual audio or video track from a multitrack recording.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
|
||||||
"""
|
|
||||||
|
|
||||||
type: Literal["audio", "video"]
|
|
||||||
s3Key: NonEmptyString = Field(description="S3 object key for the track file")
|
|
||||||
size: int = Field(description="File size in bytes")
|
|
||||||
|
|
||||||
|
|
||||||
class DailyWebhookEvent(BaseModel):
|
|
||||||
"""
|
|
||||||
Base structure for all Daily.co webhook events.
|
|
||||||
All events share five common fields documented below.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
|
||||||
"""
|
|
||||||
|
|
||||||
version: NonEmptyString = Field(
|
|
||||||
description="Represents the version of the event. This uses semantic versioning to inform a consumer if the payload has introduced any breaking changes"
|
|
||||||
)
|
|
||||||
type: WebhookEventType = Field(
|
|
||||||
description="Represents the type of the event described in the payload"
|
|
||||||
)
|
|
||||||
id: NonEmptyString = Field(
|
|
||||||
description="An identifier representing this specific event"
|
|
||||||
)
|
|
||||||
payload: Dict[NonEmptyString, Any] = Field(
|
|
||||||
description="An object representing the event, whose fields are described in the corresponding payload class"
|
|
||||||
)
|
|
||||||
event_ts: int = Field(
|
|
||||||
description="Documenting when the webhook itself was sent. This timestamp is different than the time of the event the webhook describes. For example, a recording.started event will contain a start_ts timestamp of when the actual recording started, and a slightly later event_ts timestamp indicating when the webhook event was sent"
|
|
||||||
)
|
|
||||||
|
|
||||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ParticipantJoinedPayload(BaseModel):
|
|
||||||
"""
|
|
||||||
Payload for participant.joined webhook event.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-joined
|
|
||||||
"""
|
|
||||||
|
|
||||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
|
||||||
session_id: NonEmptyString = Field(description="Daily.co session identifier")
|
|
||||||
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
|
|
||||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
|
||||||
joined_at: int = Field(description="Join timestamp in Unix epoch seconds")
|
|
||||||
|
|
||||||
_normalize_joined_at = field_validator("joined_at", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ParticipantLeftPayload(BaseModel):
|
|
||||||
"""
|
|
||||||
Payload for participant.left webhook event.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-left
|
|
||||||
"""
|
|
||||||
|
|
||||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
|
||||||
session_id: NonEmptyString = Field(description="Daily.co session identifier")
|
|
||||||
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
|
|
||||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
|
||||||
joined_at: int = Field(description="Join timestamp in Unix epoch seconds")
|
|
||||||
duration: int | None = Field(
|
|
||||||
None, description="Duration of participation in seconds"
|
|
||||||
)
|
|
||||||
|
|
||||||
_normalize_joined_at = field_validator("joined_at", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingStartedPayload(BaseModel):
|
|
||||||
"""
|
|
||||||
Payload for recording.started webhook event.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-started
|
|
||||||
"""
|
|
||||||
|
|
||||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
|
||||||
recording_id: NonEmptyString = Field(description="Recording identifier")
|
|
||||||
start_ts: int | None = Field(None, description="Recording start timestamp")
|
|
||||||
|
|
||||||
_normalize_start_ts = field_validator("start_ts", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingReadyToDownloadPayload(BaseModel):
|
|
||||||
"""
|
|
||||||
Payload for recording.ready-to-download webhook event.
|
|
||||||
This is sent when raw-tracks recordings are complete and uploaded to S3.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-ready-to-download
|
|
||||||
"""
|
|
||||||
|
|
||||||
type: Literal["cloud", "raw-tracks"] = Field(
|
|
||||||
description="The type of recording that was generated"
|
|
||||||
)
|
|
||||||
recording_id: NonEmptyString = Field(
|
|
||||||
description="An ID identifying the recording that was generated"
|
|
||||||
)
|
|
||||||
room_name: NonEmptyString = Field(
|
|
||||||
description="The name of the room where the recording was made"
|
|
||||||
)
|
|
||||||
start_ts: int = Field(
|
|
||||||
description="The Unix epoch time in seconds representing when the recording started"
|
|
||||||
)
|
|
||||||
status: Literal["finished"] = Field(
|
|
||||||
description="The status of the given recording (always 'finished' in ready-to-download webhook, see RecordingStatus in responses.py for full API statuses)"
|
|
||||||
)
|
|
||||||
max_participants: int = Field(
|
|
||||||
description="The number of participants on the call that were recorded"
|
|
||||||
)
|
|
||||||
duration: int = Field(description="The duration in seconds of the call")
|
|
||||||
s3_key: NonEmptyString = Field(
|
|
||||||
description="The location of the recording in the provided S3 bucket"
|
|
||||||
)
|
|
||||||
share_token: NonEmptyString | None = Field(
|
|
||||||
None, description="undocumented documented secret field"
|
|
||||||
)
|
|
||||||
tracks: list[DailyTrack] | None = Field(
|
|
||||||
None,
|
|
||||||
description="If the recording is a raw-tracks recording, a tracks field will be provided. If role permissions have been removed, the tracks field may be null",
|
|
||||||
)
|
|
||||||
|
|
||||||
_normalize_start_ts = field_validator("start_ts", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingErrorPayload(BaseModel):
|
|
||||||
"""
|
|
||||||
Payload for recording.error webhook event.
|
|
||||||
|
|
||||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-error
|
|
||||||
"""
|
|
||||||
|
|
||||||
action: Literal["clourd-recording-err", "cloud-recording-error"] = Field(
|
|
||||||
description="A string describing the event that was emitted (both variants are documented)"
|
|
||||||
)
|
|
||||||
error_msg: NonEmptyString = Field(description="The error message returned")
|
|
||||||
instance_id: NonEmptyString = Field(
|
|
||||||
description="The recording instance ID that was passed into the start recording command"
|
|
||||||
)
|
|
||||||
room_name: NonEmptyString = Field(
|
|
||||||
description="The name of the room where the recording was made"
|
|
||||||
)
|
|
||||||
timestamp: int = Field(
|
|
||||||
description="The Unix epoch time in seconds representing when the error was emitted"
|
|
||||||
)
|
|
||||||
|
|
||||||
_normalize_timestamp = field_validator("timestamp", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ParticipantJoinedEvent(BaseModel):
|
|
||||||
version: NonEmptyString
|
|
||||||
type: Literal["participant.joined"]
|
|
||||||
id: NonEmptyString
|
|
||||||
payload: ParticipantJoinedPayload
|
|
||||||
event_ts: int
|
|
||||||
|
|
||||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ParticipantLeftEvent(BaseModel):
|
|
||||||
version: NonEmptyString
|
|
||||||
type: Literal["participant.left"]
|
|
||||||
id: NonEmptyString
|
|
||||||
payload: ParticipantLeftPayload
|
|
||||||
event_ts: int
|
|
||||||
|
|
||||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingStartedEvent(BaseModel):
|
|
||||||
version: NonEmptyString
|
|
||||||
type: Literal["recording.started"]
|
|
||||||
id: NonEmptyString
|
|
||||||
payload: RecordingStartedPayload
|
|
||||||
event_ts: int
|
|
||||||
|
|
||||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingReadyEvent(BaseModel):
|
|
||||||
version: NonEmptyString
|
|
||||||
type: Literal["recording.ready-to-download"]
|
|
||||||
id: NonEmptyString
|
|
||||||
payload: RecordingReadyToDownloadPayload
|
|
||||||
event_ts: int
|
|
||||||
|
|
||||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingErrorEvent(BaseModel):
|
|
||||||
version: NonEmptyString
|
|
||||||
type: Literal["recording.error"]
|
|
||||||
id: NonEmptyString
|
|
||||||
payload: RecordingErrorPayload
|
|
||||||
event_ts: int
|
|
||||||
|
|
||||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
|
||||||
normalize_timestamp_to_int
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
DailyWebhookEventUnion = Annotated[
|
|
||||||
Union[
|
|
||||||
ParticipantJoinedEvent,
|
|
||||||
ParticipantLeftEvent,
|
|
||||||
RecordingStartedEvent,
|
|
||||||
RecordingReadyEvent,
|
|
||||||
RecordingErrorEvent,
|
|
||||||
],
|
|
||||||
Field(discriminator="type"),
|
|
||||||
]
|
|
||||||
@@ -24,14 +24,10 @@ def get_database() -> databases.Database:
|
|||||||
|
|
||||||
|
|
||||||
# import models
|
# import models
|
||||||
import reflector.db.calendar_events # noqa
|
|
||||||
import reflector.db.daily_participant_sessions # noqa
|
|
||||||
import reflector.db.meetings # noqa
|
import reflector.db.meetings # noqa
|
||||||
import reflector.db.recordings # noqa
|
import reflector.db.recordings # noqa
|
||||||
import reflector.db.rooms # noqa
|
import reflector.db.rooms # noqa
|
||||||
import reflector.db.transcripts # noqa
|
import reflector.db.transcripts # noqa
|
||||||
import reflector.db.user_api_keys # noqa
|
|
||||||
import reflector.db.users # noqa
|
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
if "postgres" not in settings.DATABASE_URL:
|
if "postgres" not in settings.DATABASE_URL:
|
||||||
|
|||||||
@@ -1,187 +0,0 @@
|
|||||||
from datetime import datetime, timedelta, timezone
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from sqlalchemy.dialects.postgresql import JSONB
|
|
||||||
|
|
||||||
from reflector.db import get_database, metadata
|
|
||||||
from reflector.utils import generate_uuid4
|
|
||||||
|
|
||||||
calendar_events = sa.Table(
|
|
||||||
"calendar_event",
|
|
||||||
metadata,
|
|
||||||
sa.Column("id", sa.String, primary_key=True),
|
|
||||||
sa.Column(
|
|
||||||
"room_id",
|
|
||||||
sa.String,
|
|
||||||
sa.ForeignKey("room.id", ondelete="CASCADE", name="fk_calendar_event_room_id"),
|
|
||||||
nullable=False,
|
|
||||||
),
|
|
||||||
sa.Column("ics_uid", sa.Text, nullable=False),
|
|
||||||
sa.Column("title", sa.Text),
|
|
||||||
sa.Column("description", sa.Text),
|
|
||||||
sa.Column("start_time", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("end_time", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("attendees", JSONB),
|
|
||||||
sa.Column("location", sa.Text),
|
|
||||||
sa.Column("ics_raw_data", sa.Text),
|
|
||||||
sa.Column("last_synced", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("is_deleted", sa.Boolean, nullable=False, server_default=sa.false()),
|
|
||||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.UniqueConstraint("room_id", "ics_uid", name="uq_room_calendar_event"),
|
|
||||||
sa.Index("idx_calendar_event_room_start", "room_id", "start_time"),
|
|
||||||
sa.Index(
|
|
||||||
"idx_calendar_event_deleted",
|
|
||||||
"is_deleted",
|
|
||||||
postgresql_where=sa.text("NOT is_deleted"),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CalendarEvent(BaseModel):
|
|
||||||
id: str = Field(default_factory=generate_uuid4)
|
|
||||||
room_id: str
|
|
||||||
ics_uid: str
|
|
||||||
title: str | None = None
|
|
||||||
description: str | None = None
|
|
||||||
start_time: datetime
|
|
||||||
end_time: datetime
|
|
||||||
attendees: list[dict[str, Any]] | None = None
|
|
||||||
location: str | None = None
|
|
||||||
ics_raw_data: str | None = None
|
|
||||||
last_synced: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
||||||
is_deleted: bool = False
|
|
||||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
||||||
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
||||||
|
|
||||||
|
|
||||||
class CalendarEventController:
|
|
||||||
async def get_by_room(
|
|
||||||
self,
|
|
||||||
room_id: str,
|
|
||||||
include_deleted: bool = False,
|
|
||||||
start_after: datetime | None = None,
|
|
||||||
end_before: datetime | None = None,
|
|
||||||
) -> list[CalendarEvent]:
|
|
||||||
query = calendar_events.select().where(calendar_events.c.room_id == room_id)
|
|
||||||
|
|
||||||
if not include_deleted:
|
|
||||||
query = query.where(calendar_events.c.is_deleted == False)
|
|
||||||
|
|
||||||
if start_after:
|
|
||||||
query = query.where(calendar_events.c.start_time >= start_after)
|
|
||||||
|
|
||||||
if end_before:
|
|
||||||
query = query.where(calendar_events.c.end_time <= end_before)
|
|
||||||
|
|
||||||
query = query.order_by(calendar_events.c.start_time.asc())
|
|
||||||
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
return [CalendarEvent(**result) for result in results]
|
|
||||||
|
|
||||||
async def get_upcoming(
|
|
||||||
self, room_id: str, minutes_ahead: int = 120
|
|
||||||
) -> list[CalendarEvent]:
|
|
||||||
"""Get upcoming events for a room within the specified minutes, including currently happening events."""
|
|
||||||
now = datetime.now(timezone.utc)
|
|
||||||
future_time = now + timedelta(minutes=minutes_ahead)
|
|
||||||
|
|
||||||
query = (
|
|
||||||
calendar_events.select()
|
|
||||||
.where(
|
|
||||||
sa.and_(
|
|
||||||
calendar_events.c.room_id == room_id,
|
|
||||||
calendar_events.c.is_deleted == False,
|
|
||||||
calendar_events.c.start_time <= future_time,
|
|
||||||
calendar_events.c.end_time >= now,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.order_by(calendar_events.c.start_time.asc())
|
|
||||||
)
|
|
||||||
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
return [CalendarEvent(**result) for result in results]
|
|
||||||
|
|
||||||
async def get_by_id(self, event_id: str) -> CalendarEvent | None:
|
|
||||||
query = calendar_events.select().where(calendar_events.c.id == event_id)
|
|
||||||
result = await get_database().fetch_one(query)
|
|
||||||
return CalendarEvent(**result) if result else None
|
|
||||||
|
|
||||||
async def get_by_ics_uid(self, room_id: str, ics_uid: str) -> CalendarEvent | None:
|
|
||||||
query = calendar_events.select().where(
|
|
||||||
sa.and_(
|
|
||||||
calendar_events.c.room_id == room_id,
|
|
||||||
calendar_events.c.ics_uid == ics_uid,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
result = await get_database().fetch_one(query)
|
|
||||||
return CalendarEvent(**result) if result else None
|
|
||||||
|
|
||||||
async def upsert(self, event: CalendarEvent) -> CalendarEvent:
|
|
||||||
existing = await self.get_by_ics_uid(event.room_id, event.ics_uid)
|
|
||||||
|
|
||||||
if existing:
|
|
||||||
event.id = existing.id
|
|
||||||
event.created_at = existing.created_at
|
|
||||||
event.updated_at = datetime.now(timezone.utc)
|
|
||||||
|
|
||||||
query = (
|
|
||||||
calendar_events.update()
|
|
||||||
.where(calendar_events.c.id == existing.id)
|
|
||||||
.values(**event.model_dump())
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
query = calendar_events.insert().values(**event.model_dump())
|
|
||||||
|
|
||||||
await get_database().execute(query)
|
|
||||||
return event
|
|
||||||
|
|
||||||
async def soft_delete_missing(
|
|
||||||
self, room_id: str, current_ics_uids: list[str]
|
|
||||||
) -> int:
|
|
||||||
"""Soft delete future events that are no longer in the calendar."""
|
|
||||||
now = datetime.now(timezone.utc)
|
|
||||||
|
|
||||||
select_query = calendar_events.select().where(
|
|
||||||
sa.and_(
|
|
||||||
calendar_events.c.room_id == room_id,
|
|
||||||
calendar_events.c.start_time > now,
|
|
||||||
calendar_events.c.is_deleted == False,
|
|
||||||
calendar_events.c.ics_uid.notin_(current_ics_uids)
|
|
||||||
if current_ics_uids
|
|
||||||
else True,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
to_delete = await get_database().fetch_all(select_query)
|
|
||||||
delete_count = len(to_delete)
|
|
||||||
|
|
||||||
if delete_count > 0:
|
|
||||||
update_query = (
|
|
||||||
calendar_events.update()
|
|
||||||
.where(
|
|
||||||
sa.and_(
|
|
||||||
calendar_events.c.room_id == room_id,
|
|
||||||
calendar_events.c.start_time > now,
|
|
||||||
calendar_events.c.is_deleted == False,
|
|
||||||
calendar_events.c.ics_uid.notin_(current_ics_uids)
|
|
||||||
if current_ics_uids
|
|
||||||
else True,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.values(is_deleted=True, updated_at=now)
|
|
||||||
)
|
|
||||||
|
|
||||||
await get_database().execute(update_query)
|
|
||||||
|
|
||||||
return delete_count
|
|
||||||
|
|
||||||
async def delete_by_room(self, room_id: str) -> int:
|
|
||||||
query = calendar_events.delete().where(calendar_events.c.room_id == room_id)
|
|
||||||
result = await get_database().execute(query)
|
|
||||||
return result.rowcount
|
|
||||||
|
|
||||||
|
|
||||||
calendar_events_controller = CalendarEventController()
|
|
||||||
@@ -1,229 +0,0 @@
|
|||||||
"""Daily.co participant session tracking.
|
|
||||||
|
|
||||||
Stores webhook data for participant.joined and participant.left events to provide
|
|
||||||
historical session information (Daily.co API only returns current participants).
|
|
||||||
"""
|
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
import sqlalchemy as sa
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from sqlalchemy.dialects.postgresql import insert
|
|
||||||
|
|
||||||
from reflector.db import get_database, metadata
|
|
||||||
from reflector.utils.string import NonEmptyString
|
|
||||||
|
|
||||||
daily_participant_sessions = sa.Table(
|
|
||||||
"daily_participant_session",
|
|
||||||
metadata,
|
|
||||||
sa.Column("id", sa.String, primary_key=True),
|
|
||||||
sa.Column(
|
|
||||||
"meeting_id",
|
|
||||||
sa.String,
|
|
||||||
sa.ForeignKey("meeting.id", ondelete="CASCADE"),
|
|
||||||
nullable=False,
|
|
||||||
),
|
|
||||||
sa.Column(
|
|
||||||
"room_id",
|
|
||||||
sa.String,
|
|
||||||
sa.ForeignKey("room.id", ondelete="CASCADE"),
|
|
||||||
nullable=False,
|
|
||||||
),
|
|
||||||
sa.Column("session_id", sa.String, nullable=False),
|
|
||||||
sa.Column("user_id", sa.String, nullable=True),
|
|
||||||
sa.Column("user_name", sa.String, nullable=False),
|
|
||||||
sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False),
|
|
||||||
sa.Column("left_at", sa.DateTime(timezone=True), nullable=True),
|
|
||||||
sa.Index("idx_daily_session_meeting_left", "meeting_id", "left_at"),
|
|
||||||
sa.Index("idx_daily_session_room", "room_id"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class DailyParticipantSession(BaseModel):
|
|
||||||
"""Daily.co participant session record.
|
|
||||||
|
|
||||||
Tracks when a participant joined and left a meeting. Populated from webhooks:
|
|
||||||
- participant.joined: Creates record with left_at=None
|
|
||||||
- participant.left: Updates record with left_at
|
|
||||||
|
|
||||||
ID format: {meeting_id}:{user_id}:{joined_at_ms}
|
|
||||||
- Ensures idempotency (duplicate webhooks don't create duplicates)
|
|
||||||
- Allows same user to rejoin (different joined_at = different session)
|
|
||||||
|
|
||||||
Duration is calculated as: left_at - joined_at (not stored)
|
|
||||||
"""
|
|
||||||
|
|
||||||
id: NonEmptyString
|
|
||||||
meeting_id: NonEmptyString
|
|
||||||
room_id: NonEmptyString
|
|
||||||
session_id: NonEmptyString # Daily.co's session_id (identifies room session)
|
|
||||||
user_id: NonEmptyString | None = None
|
|
||||||
user_name: str
|
|
||||||
joined_at: datetime
|
|
||||||
left_at: datetime | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class DailyParticipantSessionController:
|
|
||||||
"""Controller for Daily.co participant session persistence."""
|
|
||||||
|
|
||||||
async def get_by_id(self, id: str) -> DailyParticipantSession | None:
|
|
||||||
"""Get a session by its ID."""
|
|
||||||
query = daily_participant_sessions.select().where(
|
|
||||||
daily_participant_sessions.c.id == id
|
|
||||||
)
|
|
||||||
result = await get_database().fetch_one(query)
|
|
||||||
return DailyParticipantSession(**result) if result else None
|
|
||||||
|
|
||||||
async def get_open_session(
|
|
||||||
self, meeting_id: NonEmptyString, session_id: NonEmptyString
|
|
||||||
) -> DailyParticipantSession | None:
|
|
||||||
"""Get the open (not left) session for a user in a meeting."""
|
|
||||||
query = daily_participant_sessions.select().where(
|
|
||||||
sa.and_(
|
|
||||||
daily_participant_sessions.c.meeting_id == meeting_id,
|
|
||||||
daily_participant_sessions.c.session_id == session_id,
|
|
||||||
daily_participant_sessions.c.left_at.is_(None),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
|
|
||||||
if len(results) > 1:
|
|
||||||
raise ValueError(
|
|
||||||
f"Multiple open sessions for daily session {session_id} in meeting {meeting_id}: "
|
|
||||||
f"found {len(results)} sessions"
|
|
||||||
)
|
|
||||||
|
|
||||||
return DailyParticipantSession(**results[0]) if results else None
|
|
||||||
|
|
||||||
async def upsert_joined(self, session: DailyParticipantSession) -> None:
|
|
||||||
"""Insert or update when participant.joined webhook arrives.
|
|
||||||
|
|
||||||
Idempotent: Duplicate webhooks with same ID are safely ignored.
|
|
||||||
Out-of-order: If left webhook arrived first, preserves left_at.
|
|
||||||
"""
|
|
||||||
query = insert(daily_participant_sessions).values(**session.model_dump())
|
|
||||||
query = query.on_conflict_do_update(
|
|
||||||
index_elements=["id"],
|
|
||||||
set_={"user_name": session.user_name},
|
|
||||||
)
|
|
||||||
await get_database().execute(query)
|
|
||||||
|
|
||||||
async def upsert_left(self, session: DailyParticipantSession) -> None:
|
|
||||||
"""Update session when participant.left webhook arrives.
|
|
||||||
|
|
||||||
Finds the open session for this user in this meeting and updates left_at.
|
|
||||||
Works around Daily.co webhook timestamp inconsistency (joined_at differs by ~4ms between webhooks).
|
|
||||||
|
|
||||||
Handles three cases:
|
|
||||||
1. Normal flow: open session exists → updates left_at
|
|
||||||
2. Out-of-order: left arrives first → creates new record with left data
|
|
||||||
3. Duplicate: left arrives again → idempotent (DB trigger prevents left_at modification)
|
|
||||||
"""
|
|
||||||
if session.left_at is None:
|
|
||||||
raise ValueError("left_at is required for upsert_left")
|
|
||||||
|
|
||||||
if session.left_at <= session.joined_at:
|
|
||||||
raise ValueError(
|
|
||||||
f"left_at ({session.left_at}) must be after joined_at ({session.joined_at})"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Find existing open session (works around timestamp mismatch in webhooks)
|
|
||||||
existing = await self.get_open_session(session.meeting_id, session.session_id)
|
|
||||||
|
|
||||||
if existing:
|
|
||||||
# Update existing open session
|
|
||||||
query = (
|
|
||||||
daily_participant_sessions.update()
|
|
||||||
.where(daily_participant_sessions.c.id == existing.id)
|
|
||||||
.values(left_at=session.left_at)
|
|
||||||
)
|
|
||||||
await get_database().execute(query)
|
|
||||||
else:
|
|
||||||
# Out-of-order or first webhook: insert new record
|
|
||||||
query = insert(daily_participant_sessions).values(**session.model_dump())
|
|
||||||
query = query.on_conflict_do_nothing(index_elements=["id"])
|
|
||||||
await get_database().execute(query)
|
|
||||||
|
|
||||||
async def get_by_meeting(self, meeting_id: str) -> list[DailyParticipantSession]:
|
|
||||||
"""Get all participant sessions for a meeting (active and ended)."""
|
|
||||||
query = daily_participant_sessions.select().where(
|
|
||||||
daily_participant_sessions.c.meeting_id == meeting_id
|
|
||||||
)
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
return [DailyParticipantSession(**result) for result in results]
|
|
||||||
|
|
||||||
async def get_active_by_meeting(
|
|
||||||
self, meeting_id: str
|
|
||||||
) -> list[DailyParticipantSession]:
|
|
||||||
"""Get only active (not left) participant sessions for a meeting."""
|
|
||||||
query = daily_participant_sessions.select().where(
|
|
||||||
sa.and_(
|
|
||||||
daily_participant_sessions.c.meeting_id == meeting_id,
|
|
||||||
daily_participant_sessions.c.left_at.is_(None),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
return [DailyParticipantSession(**result) for result in results]
|
|
||||||
|
|
||||||
async def get_all_sessions_for_meeting(
|
|
||||||
self, meeting_id: NonEmptyString
|
|
||||||
) -> dict[NonEmptyString, DailyParticipantSession]:
|
|
||||||
query = daily_participant_sessions.select().where(
|
|
||||||
daily_participant_sessions.c.meeting_id == meeting_id
|
|
||||||
)
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
# TODO DailySessionId custom type
|
|
||||||
return {row["session_id"]: DailyParticipantSession(**row) for row in results}
|
|
||||||
|
|
||||||
async def batch_upsert_sessions(
|
|
||||||
self, sessions: list[DailyParticipantSession]
|
|
||||||
) -> None:
|
|
||||||
"""Upsert multiple sessions in single query.
|
|
||||||
|
|
||||||
Uses ON CONFLICT for idempotency. Updates user_name on conflict since they may change it during a meeting.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not sessions:
|
|
||||||
return
|
|
||||||
|
|
||||||
values = [session.model_dump() for session in sessions]
|
|
||||||
query = insert(daily_participant_sessions).values(values)
|
|
||||||
query = query.on_conflict_do_update(
|
|
||||||
index_elements=["id"],
|
|
||||||
set_={
|
|
||||||
# Preserve existing left_at to prevent race conditions
|
|
||||||
"left_at": sa.func.coalesce(
|
|
||||||
daily_participant_sessions.c.left_at,
|
|
||||||
query.excluded.left_at,
|
|
||||||
),
|
|
||||||
"user_name": query.excluded.user_name,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
await get_database().execute(query)
|
|
||||||
|
|
||||||
async def batch_close_sessions(
|
|
||||||
self, session_ids: list[NonEmptyString], left_at: datetime
|
|
||||||
) -> None:
|
|
||||||
"""Mark multiple sessions as left in single query.
|
|
||||||
|
|
||||||
Only updates sessions where left_at is NULL (protects already-closed sessions).
|
|
||||||
|
|
||||||
Left_at mismatch for existing sessions is ignored, assumed to be not important issue if ever happens.
|
|
||||||
"""
|
|
||||||
if not session_ids:
|
|
||||||
return
|
|
||||||
|
|
||||||
query = (
|
|
||||||
daily_participant_sessions.update()
|
|
||||||
.where(
|
|
||||||
sa.and_(
|
|
||||||
daily_participant_sessions.c.id.in_(session_ids),
|
|
||||||
daily_participant_sessions.c.left_at.is_(None),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.values(left_at=left_at)
|
|
||||||
)
|
|
||||||
await get_database().execute(query)
|
|
||||||
|
|
||||||
|
|
||||||
daily_participant_sessions_controller = DailyParticipantSessionController()
|
|
||||||
@@ -1,15 +1,13 @@
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Any, Literal
|
from typing import Literal
|
||||||
|
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
|
from fastapi import HTTPException
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from sqlalchemy.dialects.postgresql import JSONB
|
|
||||||
|
|
||||||
from reflector.db import get_database, metadata
|
from reflector.db import get_database, metadata
|
||||||
from reflector.db.rooms import Room
|
from reflector.db.rooms import Room
|
||||||
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform
|
|
||||||
from reflector.utils import generate_uuid4
|
from reflector.utils import generate_uuid4
|
||||||
from reflector.utils.string import assert_equal
|
|
||||||
|
|
||||||
meetings = sa.Table(
|
meetings = sa.Table(
|
||||||
"meeting",
|
"meeting",
|
||||||
@@ -20,12 +18,8 @@ meetings = sa.Table(
|
|||||||
sa.Column("host_room_url", sa.String),
|
sa.Column("host_room_url", sa.String),
|
||||||
sa.Column("start_date", sa.DateTime(timezone=True)),
|
sa.Column("start_date", sa.DateTime(timezone=True)),
|
||||||
sa.Column("end_date", sa.DateTime(timezone=True)),
|
sa.Column("end_date", sa.DateTime(timezone=True)),
|
||||||
sa.Column(
|
sa.Column("user_id", sa.String),
|
||||||
"room_id",
|
sa.Column("room_id", sa.String),
|
||||||
sa.String,
|
|
||||||
sa.ForeignKey("room.id", ondelete="CASCADE"),
|
|
||||||
nullable=True,
|
|
||||||
),
|
|
||||||
sa.Column("is_locked", sa.Boolean, nullable=False, server_default=sa.false()),
|
sa.Column("is_locked", sa.Boolean, nullable=False, server_default=sa.false()),
|
||||||
sa.Column("room_mode", sa.String, nullable=False, server_default="normal"),
|
sa.Column("room_mode", sa.String, nullable=False, server_default="normal"),
|
||||||
sa.Column("recording_type", sa.String, nullable=False, server_default="cloud"),
|
sa.Column("recording_type", sa.String, nullable=False, server_default="cloud"),
|
||||||
@@ -47,36 +41,20 @@ meetings = sa.Table(
|
|||||||
nullable=False,
|
nullable=False,
|
||||||
server_default=sa.true(),
|
server_default=sa.true(),
|
||||||
),
|
),
|
||||||
sa.Column(
|
|
||||||
"calendar_event_id",
|
|
||||||
sa.String,
|
|
||||||
sa.ForeignKey(
|
|
||||||
"calendar_event.id",
|
|
||||||
ondelete="SET NULL",
|
|
||||||
name="fk_meeting_calendar_event_id",
|
|
||||||
),
|
|
||||||
),
|
|
||||||
sa.Column("calendar_metadata", JSONB),
|
|
||||||
sa.Column(
|
|
||||||
"platform",
|
|
||||||
sa.String,
|
|
||||||
nullable=False,
|
|
||||||
server_default=assert_equal(WHEREBY_PLATFORM, "whereby"),
|
|
||||||
),
|
|
||||||
sa.Index("idx_meeting_room_id", "room_id"),
|
sa.Index("idx_meeting_room_id", "room_id"),
|
||||||
sa.Index("idx_meeting_calendar_event", "calendar_event_id"),
|
sa.Index(
|
||||||
|
"idx_one_active_meeting_per_room",
|
||||||
|
"room_id",
|
||||||
|
unique=True,
|
||||||
|
postgresql_where=sa.text("is_active = true"),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
meeting_consent = sa.Table(
|
meeting_consent = sa.Table(
|
||||||
"meeting_consent",
|
"meeting_consent",
|
||||||
metadata,
|
metadata,
|
||||||
sa.Column("id", sa.String, primary_key=True),
|
sa.Column("id", sa.String, primary_key=True),
|
||||||
sa.Column(
|
sa.Column("meeting_id", sa.String, sa.ForeignKey("meeting.id"), nullable=False),
|
||||||
"meeting_id",
|
|
||||||
sa.String,
|
|
||||||
sa.ForeignKey("meeting.id", ondelete="CASCADE"),
|
|
||||||
nullable=False,
|
|
||||||
),
|
|
||||||
sa.Column("user_id", sa.String),
|
sa.Column("user_id", sa.String),
|
||||||
sa.Column("consent_given", sa.Boolean, nullable=False),
|
sa.Column("consent_given", sa.Boolean, nullable=False),
|
||||||
sa.Column("consent_timestamp", sa.DateTime(timezone=True), nullable=False),
|
sa.Column("consent_timestamp", sa.DateTime(timezone=True), nullable=False),
|
||||||
@@ -98,18 +76,15 @@ class Meeting(BaseModel):
|
|||||||
host_room_url: str
|
host_room_url: str
|
||||||
start_date: datetime
|
start_date: datetime
|
||||||
end_date: datetime
|
end_date: datetime
|
||||||
room_id: str | None
|
user_id: str | None = None
|
||||||
|
room_id: str | None = None
|
||||||
is_locked: bool = False
|
is_locked: bool = False
|
||||||
room_mode: Literal["normal", "group"] = "normal"
|
room_mode: Literal["normal", "group"] = "normal"
|
||||||
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
||||||
recording_trigger: Literal[ # whereby-specific
|
recording_trigger: Literal[
|
||||||
"none", "prompt", "automatic", "automatic-2nd-participant"
|
"none", "prompt", "automatic", "automatic-2nd-participant"
|
||||||
] = "automatic-2nd-participant"
|
] = "automatic-2nd-participant"
|
||||||
num_clients: int = 0
|
num_clients: int = 0
|
||||||
is_active: bool = True
|
|
||||||
calendar_event_id: str | None = None
|
|
||||||
calendar_metadata: dict[str, Any] | None = None
|
|
||||||
platform: Platform = WHEREBY_PLATFORM
|
|
||||||
|
|
||||||
|
|
||||||
class MeetingController:
|
class MeetingController:
|
||||||
@@ -121,10 +96,12 @@ class MeetingController:
|
|||||||
host_room_url: str,
|
host_room_url: str,
|
||||||
start_date: datetime,
|
start_date: datetime,
|
||||||
end_date: datetime,
|
end_date: datetime,
|
||||||
|
user_id: str,
|
||||||
room: Room,
|
room: Room,
|
||||||
calendar_event_id: str | None = None,
|
|
||||||
calendar_metadata: dict[str, Any] | None = None,
|
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Create a new meeting
|
||||||
|
"""
|
||||||
meeting = Meeting(
|
meeting = Meeting(
|
||||||
id=id,
|
id=id,
|
||||||
room_name=room_name,
|
room_name=room_name,
|
||||||
@@ -132,49 +109,41 @@ class MeetingController:
|
|||||||
host_room_url=host_room_url,
|
host_room_url=host_room_url,
|
||||||
start_date=start_date,
|
start_date=start_date,
|
||||||
end_date=end_date,
|
end_date=end_date,
|
||||||
|
user_id=user_id,
|
||||||
room_id=room.id,
|
room_id=room.id,
|
||||||
is_locked=room.is_locked,
|
is_locked=room.is_locked,
|
||||||
room_mode=room.room_mode,
|
room_mode=room.room_mode,
|
||||||
recording_type=room.recording_type,
|
recording_type=room.recording_type,
|
||||||
recording_trigger=room.recording_trigger,
|
recording_trigger=room.recording_trigger,
|
||||||
calendar_event_id=calendar_event_id,
|
|
||||||
calendar_metadata=calendar_metadata,
|
|
||||||
platform=room.platform,
|
|
||||||
)
|
)
|
||||||
query = meetings.insert().values(**meeting.model_dump())
|
query = meetings.insert().values(**meeting.model_dump())
|
||||||
await get_database().execute(query)
|
await get_database().execute(query)
|
||||||
return meeting
|
return meeting
|
||||||
|
|
||||||
async def get_all_active(self, platform: str | None = None) -> list[Meeting]:
|
async def get_all_active(self) -> list[Meeting]:
|
||||||
conditions = [meetings.c.is_active]
|
"""
|
||||||
if platform is not None:
|
Get active meetings.
|
||||||
conditions.append(meetings.c.platform == platform)
|
"""
|
||||||
query = meetings.select().where(sa.and_(*conditions))
|
query = meetings.select().where(meetings.c.is_active)
|
||||||
results = await get_database().fetch_all(query)
|
return await get_database().fetch_all(query)
|
||||||
return [Meeting(**result) for result in results]
|
|
||||||
|
|
||||||
async def get_by_room_name(
|
async def get_by_room_name(
|
||||||
self,
|
self,
|
||||||
room_name: str,
|
room_name: str,
|
||||||
) -> Meeting | None:
|
) -> Meeting:
|
||||||
"""
|
"""
|
||||||
Get a meeting by room name.
|
Get a meeting by room name.
|
||||||
For backward compatibility, returns the most recent meeting.
|
|
||||||
"""
|
"""
|
||||||
query = (
|
query = meetings.select().where(meetings.c.room_name == room_name)
|
||||||
meetings.select()
|
|
||||||
.where(meetings.c.room_name == room_name)
|
|
||||||
.order_by(meetings.c.end_date.desc())
|
|
||||||
)
|
|
||||||
result = await get_database().fetch_one(query)
|
result = await get_database().fetch_one(query)
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return Meeting(**result)
|
return Meeting(**result)
|
||||||
|
|
||||||
async def get_active(self, room: Room, current_time: datetime) -> Meeting | None:
|
async def get_active(self, room: Room, current_time: datetime) -> Meeting:
|
||||||
"""
|
"""
|
||||||
Get latest active meeting for a room.
|
Get latest active meeting for a room.
|
||||||
For backward compatibility, returns the most recent active meeting.
|
|
||||||
"""
|
"""
|
||||||
end_date = getattr(meetings.c, "end_date")
|
end_date = getattr(meetings.c, "end_date")
|
||||||
query = (
|
query = (
|
||||||
@@ -191,97 +160,40 @@ class MeetingController:
|
|||||||
result = await get_database().fetch_one(query)
|
result = await get_database().fetch_one(query)
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return Meeting(**result)
|
return Meeting(**result)
|
||||||
|
|
||||||
async def get_all_active_for_room(
|
async def get_by_id(self, meeting_id: str, **kwargs) -> Meeting | None:
|
||||||
self, room: Room, current_time: datetime
|
|
||||||
) -> list[Meeting]:
|
|
||||||
end_date = getattr(meetings.c, "end_date")
|
|
||||||
query = (
|
|
||||||
meetings.select()
|
|
||||||
.where(
|
|
||||||
sa.and_(
|
|
||||||
meetings.c.room_id == room.id,
|
|
||||||
meetings.c.end_date > current_time,
|
|
||||||
meetings.c.is_active,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.order_by(end_date.desc())
|
|
||||||
)
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
return [Meeting(**result) for result in results]
|
|
||||||
|
|
||||||
async def get_active_by_calendar_event(
|
|
||||||
self, room: Room, calendar_event_id: str, current_time: datetime
|
|
||||||
) -> Meeting | None:
|
|
||||||
"""
|
"""
|
||||||
Get active meeting for a specific calendar event.
|
Get a meeting by id
|
||||||
"""
|
"""
|
||||||
query = meetings.select().where(
|
|
||||||
sa.and_(
|
|
||||||
meetings.c.room_id == room.id,
|
|
||||||
meetings.c.calendar_event_id == calendar_event_id,
|
|
||||||
meetings.c.end_date > current_time,
|
|
||||||
meetings.c.is_active,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
result = await get_database().fetch_one(query)
|
|
||||||
if not result:
|
|
||||||
return None
|
|
||||||
return Meeting(**result)
|
|
||||||
|
|
||||||
async def get_by_id(
|
|
||||||
self, meeting_id: str, room: Room | None = None
|
|
||||||
) -> Meeting | None:
|
|
||||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||||
|
|
||||||
if room:
|
|
||||||
query = query.where(meetings.c.room_id == room.id)
|
|
||||||
|
|
||||||
result = await get_database().fetch_one(query)
|
result = await get_database().fetch_one(query)
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
return None
|
||||||
return Meeting(**result)
|
return Meeting(**result)
|
||||||
|
|
||||||
async def get_by_calendar_event(
|
async def get_by_id_for_http(self, meeting_id: str, user_id: str | None) -> Meeting:
|
||||||
self, calendar_event_id: str, room: Room
|
"""
|
||||||
) -> Meeting | None:
|
Get a meeting by ID for HTTP request.
|
||||||
query = meetings.select().where(
|
|
||||||
meetings.c.calendar_event_id == calendar_event_id
|
If not found, it will raise a 404 error.
|
||||||
)
|
"""
|
||||||
if room:
|
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||||
query = query.where(meetings.c.room_id == room.id)
|
|
||||||
result = await get_database().fetch_one(query)
|
result = await get_database().fetch_one(query)
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||||
return Meeting(**result)
|
|
||||||
|
meeting = Meeting(**result)
|
||||||
|
if result["user_id"] != user_id:
|
||||||
|
meeting.host_room_url = ""
|
||||||
|
|
||||||
|
return meeting
|
||||||
|
|
||||||
async def update_meeting(self, meeting_id: str, **kwargs):
|
async def update_meeting(self, meeting_id: str, **kwargs):
|
||||||
query = meetings.update().where(meetings.c.id == meeting_id).values(**kwargs)
|
query = meetings.update().where(meetings.c.id == meeting_id).values(**kwargs)
|
||||||
await get_database().execute(query)
|
await get_database().execute(query)
|
||||||
|
|
||||||
async def increment_num_clients(self, meeting_id: str) -> None:
|
|
||||||
"""Atomically increment participant count."""
|
|
||||||
query = (
|
|
||||||
meetings.update()
|
|
||||||
.where(meetings.c.id == meeting_id)
|
|
||||||
.values(num_clients=meetings.c.num_clients + 1)
|
|
||||||
)
|
|
||||||
await get_database().execute(query)
|
|
||||||
|
|
||||||
async def decrement_num_clients(self, meeting_id: str) -> None:
|
|
||||||
"""Atomically decrement participant count (min 0)."""
|
|
||||||
query = (
|
|
||||||
meetings.update()
|
|
||||||
.where(meetings.c.id == meeting_id)
|
|
||||||
.values(
|
|
||||||
num_clients=sa.case(
|
|
||||||
(meetings.c.num_clients > 0, meetings.c.num_clients - 1), else_=0
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
await get_database().execute(query)
|
|
||||||
|
|
||||||
|
|
||||||
class MeetingConsentController:
|
class MeetingConsentController:
|
||||||
async def get_by_meeting_id(self, meeting_id: str) -> list[MeetingConsent]:
|
async def get_by_meeting_id(self, meeting_id: str) -> list[MeetingConsent]:
|
||||||
@@ -302,9 +214,10 @@ class MeetingConsentController:
|
|||||||
result = await get_database().fetch_one(query)
|
result = await get_database().fetch_one(query)
|
||||||
if result is None:
|
if result is None:
|
||||||
return None
|
return None
|
||||||
return MeetingConsent(**result)
|
return MeetingConsent(**result) if result else None
|
||||||
|
|
||||||
async def upsert(self, consent: MeetingConsent) -> MeetingConsent:
|
async def upsert(self, consent: MeetingConsent) -> MeetingConsent:
|
||||||
|
"""Create new consent or update existing one for authenticated users"""
|
||||||
if consent.user_id:
|
if consent.user_id:
|
||||||
# For authenticated users, check if consent already exists
|
# For authenticated users, check if consent already exists
|
||||||
# not transactional but we're ok with that; the consents ain't deleted anyways
|
# not transactional but we're ok with that; the consents ain't deleted anyways
|
||||||
|
|||||||
@@ -21,7 +21,6 @@ recordings = sa.Table(
|
|||||||
server_default="pending",
|
server_default="pending",
|
||||||
),
|
),
|
||||||
sa.Column("meeting_id", sa.String),
|
sa.Column("meeting_id", sa.String),
|
||||||
sa.Column("track_keys", sa.JSON, nullable=True),
|
|
||||||
sa.Index("idx_recording_meeting_id", "meeting_id"),
|
sa.Index("idx_recording_meeting_id", "meeting_id"),
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -29,13 +28,10 @@ recordings = sa.Table(
|
|||||||
class Recording(BaseModel):
|
class Recording(BaseModel):
|
||||||
id: str = Field(default_factory=generate_uuid4)
|
id: str = Field(default_factory=generate_uuid4)
|
||||||
bucket_name: str
|
bucket_name: str
|
||||||
# for single-track
|
|
||||||
object_key: str
|
object_key: str
|
||||||
recorded_at: datetime
|
recorded_at: datetime
|
||||||
status: Literal["pending", "processing", "completed", "failed"] = "pending"
|
status: Literal["pending", "processing", "completed", "failed"] = "pending"
|
||||||
meeting_id: str | None = None
|
meeting_id: str | None = None
|
||||||
# for multitrack reprocessing
|
|
||||||
track_keys: list[str] | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class RecordingController:
|
class RecordingController:
|
||||||
@@ -44,14 +40,12 @@ class RecordingController:
|
|||||||
await get_database().execute(query)
|
await get_database().execute(query)
|
||||||
return recording
|
return recording
|
||||||
|
|
||||||
async def get_by_id(self, id: str) -> Recording | None:
|
async def get_by_id(self, id: str) -> Recording:
|
||||||
query = recordings.select().where(recordings.c.id == id)
|
query = recordings.select().where(recordings.c.id == id)
|
||||||
result = await get_database().fetch_one(query)
|
result = await get_database().fetch_one(query)
|
||||||
return Recording(**result) if result else None
|
return Recording(**result) if result else None
|
||||||
|
|
||||||
async def get_by_object_key(
|
async def get_by_object_key(self, bucket_name: str, object_key: str) -> Recording:
|
||||||
self, bucket_name: str, object_key: str
|
|
||||||
) -> Recording | None:
|
|
||||||
query = recordings.select().where(
|
query = recordings.select().where(
|
||||||
recordings.c.bucket_name == bucket_name,
|
recordings.c.bucket_name == bucket_name,
|
||||||
recordings.c.object_key == object_key,
|
recordings.c.object_key == object_key,
|
||||||
@@ -63,14 +57,5 @@ class RecordingController:
|
|||||||
query = recordings.delete().where(recordings.c.id == id)
|
query = recordings.delete().where(recordings.c.id == id)
|
||||||
await get_database().execute(query)
|
await get_database().execute(query)
|
||||||
|
|
||||||
# no check for existence
|
|
||||||
async def get_by_ids(self, recording_ids: list[str]) -> list[Recording]:
|
|
||||||
if not recording_ids:
|
|
||||||
return []
|
|
||||||
|
|
||||||
query = recordings.select().where(recordings.c.id.in_(recording_ids))
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
return [Recording(**row) for row in results]
|
|
||||||
|
|
||||||
|
|
||||||
recordings_controller = RecordingController()
|
recordings_controller = RecordingController()
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
import secrets
|
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from sqlite3 import IntegrityError
|
from sqlite3 import IntegrityError
|
||||||
from typing import Literal
|
from typing import Literal
|
||||||
@@ -9,8 +8,6 @@ from pydantic import BaseModel, Field
|
|||||||
from sqlalchemy.sql import false, or_
|
from sqlalchemy.sql import false, or_
|
||||||
|
|
||||||
from reflector.db import get_database, metadata
|
from reflector.db import get_database, metadata
|
||||||
from reflector.schemas.platform import Platform
|
|
||||||
from reflector.settings import settings
|
|
||||||
from reflector.utils import generate_uuid4
|
from reflector.utils import generate_uuid4
|
||||||
|
|
||||||
rooms = sqlalchemy.Table(
|
rooms = sqlalchemy.Table(
|
||||||
@@ -43,22 +40,7 @@ rooms = sqlalchemy.Table(
|
|||||||
sqlalchemy.Column(
|
sqlalchemy.Column(
|
||||||
"is_shared", sqlalchemy.Boolean, nullable=False, server_default=false()
|
"is_shared", sqlalchemy.Boolean, nullable=False, server_default=false()
|
||||||
),
|
),
|
||||||
sqlalchemy.Column("webhook_url", sqlalchemy.String, nullable=True),
|
|
||||||
sqlalchemy.Column("webhook_secret", sqlalchemy.String, nullable=True),
|
|
||||||
sqlalchemy.Column("ics_url", sqlalchemy.Text),
|
|
||||||
sqlalchemy.Column("ics_fetch_interval", sqlalchemy.Integer, server_default="300"),
|
|
||||||
sqlalchemy.Column(
|
|
||||||
"ics_enabled", sqlalchemy.Boolean, nullable=False, server_default=false()
|
|
||||||
),
|
|
||||||
sqlalchemy.Column("ics_last_sync", sqlalchemy.DateTime(timezone=True)),
|
|
||||||
sqlalchemy.Column("ics_last_etag", sqlalchemy.Text),
|
|
||||||
sqlalchemy.Column(
|
|
||||||
"platform",
|
|
||||||
sqlalchemy.String,
|
|
||||||
nullable=False,
|
|
||||||
),
|
|
||||||
sqlalchemy.Index("idx_room_is_shared", "is_shared"),
|
sqlalchemy.Index("idx_room_is_shared", "is_shared"),
|
||||||
sqlalchemy.Index("idx_room_ics_enabled", "ics_enabled"),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -73,18 +55,10 @@ class Room(BaseModel):
|
|||||||
is_locked: bool = False
|
is_locked: bool = False
|
||||||
room_mode: Literal["normal", "group"] = "normal"
|
room_mode: Literal["normal", "group"] = "normal"
|
||||||
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
||||||
recording_trigger: Literal[ # whereby-specific
|
recording_trigger: Literal[
|
||||||
"none", "prompt", "automatic", "automatic-2nd-participant"
|
"none", "prompt", "automatic", "automatic-2nd-participant"
|
||||||
] = "automatic-2nd-participant"
|
] = "automatic-2nd-participant"
|
||||||
is_shared: bool = False
|
is_shared: bool = False
|
||||||
webhook_url: str | None = None
|
|
||||||
webhook_secret: str | None = None
|
|
||||||
ics_url: str | None = None
|
|
||||||
ics_fetch_interval: int = 300
|
|
||||||
ics_enabled: bool = False
|
|
||||||
ics_last_sync: datetime | None = None
|
|
||||||
ics_last_etag: str | None = None
|
|
||||||
platform: Platform = Field(default_factory=lambda: settings.DEFAULT_VIDEO_PLATFORM)
|
|
||||||
|
|
||||||
|
|
||||||
class RoomController:
|
class RoomController:
|
||||||
@@ -133,39 +107,22 @@ class RoomController:
|
|||||||
recording_type: str,
|
recording_type: str,
|
||||||
recording_trigger: str,
|
recording_trigger: str,
|
||||||
is_shared: bool,
|
is_shared: bool,
|
||||||
webhook_url: str = "",
|
|
||||||
webhook_secret: str = "",
|
|
||||||
ics_url: str | None = None,
|
|
||||||
ics_fetch_interval: int = 300,
|
|
||||||
ics_enabled: bool = False,
|
|
||||||
platform: Platform = settings.DEFAULT_VIDEO_PLATFORM,
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Add a new room
|
Add a new room
|
||||||
"""
|
"""
|
||||||
if webhook_url and not webhook_secret:
|
room = Room(
|
||||||
webhook_secret = secrets.token_urlsafe(32)
|
name=name,
|
||||||
|
user_id=user_id,
|
||||||
room_data = {
|
zulip_auto_post=zulip_auto_post,
|
||||||
"name": name,
|
zulip_stream=zulip_stream,
|
||||||
"user_id": user_id,
|
zulip_topic=zulip_topic,
|
||||||
"zulip_auto_post": zulip_auto_post,
|
is_locked=is_locked,
|
||||||
"zulip_stream": zulip_stream,
|
room_mode=room_mode,
|
||||||
"zulip_topic": zulip_topic,
|
recording_type=recording_type,
|
||||||
"is_locked": is_locked,
|
recording_trigger=recording_trigger,
|
||||||
"room_mode": room_mode,
|
is_shared=is_shared,
|
||||||
"recording_type": recording_type,
|
)
|
||||||
"recording_trigger": recording_trigger,
|
|
||||||
"is_shared": is_shared,
|
|
||||||
"webhook_url": webhook_url,
|
|
||||||
"webhook_secret": webhook_secret,
|
|
||||||
"ics_url": ics_url,
|
|
||||||
"ics_fetch_interval": ics_fetch_interval,
|
|
||||||
"ics_enabled": ics_enabled,
|
|
||||||
"platform": platform,
|
|
||||||
}
|
|
||||||
|
|
||||||
room = Room(**room_data)
|
|
||||||
query = rooms.insert().values(**room.model_dump())
|
query = rooms.insert().values(**room.model_dump())
|
||||||
try:
|
try:
|
||||||
await get_database().execute(query)
|
await get_database().execute(query)
|
||||||
@@ -177,9 +134,6 @@ class RoomController:
|
|||||||
"""
|
"""
|
||||||
Update a room fields with key/values in values
|
Update a room fields with key/values in values
|
||||||
"""
|
"""
|
||||||
if values.get("webhook_url") and not values.get("webhook_secret"):
|
|
||||||
values["webhook_secret"] = secrets.token_urlsafe(32)
|
|
||||||
|
|
||||||
query = rooms.update().where(rooms.c.id == room.id).values(**values)
|
query = rooms.update().where(rooms.c.id == room.id).values(**values)
|
||||||
try:
|
try:
|
||||||
await get_database().execute(query)
|
await get_database().execute(query)
|
||||||
@@ -229,13 +183,6 @@ class RoomController:
|
|||||||
|
|
||||||
return room
|
return room
|
||||||
|
|
||||||
async def get_ics_enabled(self) -> list[Room]:
|
|
||||||
query = rooms.select().where(
|
|
||||||
rooms.c.ics_enabled == True, rooms.c.ics_url != None
|
|
||||||
)
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
return [Room(**result) for result in results]
|
|
||||||
|
|
||||||
async def remove_by_id(
|
async def remove_by_id(
|
||||||
self,
|
self,
|
||||||
room_id: str,
|
room_id: str,
|
||||||
|
|||||||
@@ -8,14 +8,12 @@ from typing import Annotated, Any, Dict, Iterator
|
|||||||
|
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
import webvtt
|
import webvtt
|
||||||
from databases.interfaces import Record as DbRecord
|
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
from pydantic import (
|
from pydantic import (
|
||||||
BaseModel,
|
BaseModel,
|
||||||
Field,
|
Field,
|
||||||
NonNegativeFloat,
|
NonNegativeFloat,
|
||||||
NonNegativeInt,
|
NonNegativeInt,
|
||||||
TypeAdapter,
|
|
||||||
ValidationError,
|
ValidationError,
|
||||||
constr,
|
constr,
|
||||||
field_serializer,
|
field_serializer,
|
||||||
@@ -23,10 +21,9 @@ from pydantic import (
|
|||||||
|
|
||||||
from reflector.db import get_database
|
from reflector.db import get_database
|
||||||
from reflector.db.rooms import rooms
|
from reflector.db.rooms import rooms
|
||||||
from reflector.db.transcripts import SourceKind, TranscriptStatus, transcripts
|
from reflector.db.transcripts import SourceKind, transcripts
|
||||||
from reflector.db.utils import is_postgresql
|
from reflector.db.utils import is_postgresql
|
||||||
from reflector.logger import logger
|
from reflector.logger import logger
|
||||||
from reflector.utils.string import NonEmptyString, try_parse_non_empty_string
|
|
||||||
|
|
||||||
DEFAULT_SEARCH_LIMIT = 20
|
DEFAULT_SEARCH_LIMIT = 20
|
||||||
SNIPPET_CONTEXT_LENGTH = 50 # Characters before/after match to include
|
SNIPPET_CONTEXT_LENGTH = 50 # Characters before/after match to include
|
||||||
@@ -34,13 +31,12 @@ DEFAULT_SNIPPET_MAX_LENGTH = NonNegativeInt(150)
|
|||||||
DEFAULT_MAX_SNIPPETS = NonNegativeInt(3)
|
DEFAULT_MAX_SNIPPETS = NonNegativeInt(3)
|
||||||
LONG_SUMMARY_MAX_SNIPPETS = 2
|
LONG_SUMMARY_MAX_SNIPPETS = 2
|
||||||
|
|
||||||
SearchQueryBase = constr(min_length=1, strip_whitespace=True)
|
SearchQueryBase = constr(min_length=0, strip_whitespace=True)
|
||||||
SearchLimitBase = Annotated[int, Field(ge=1, le=100)]
|
SearchLimitBase = Annotated[int, Field(ge=1, le=100)]
|
||||||
SearchOffsetBase = Annotated[int, Field(ge=0)]
|
SearchOffsetBase = Annotated[int, Field(ge=0)]
|
||||||
SearchTotalBase = Annotated[int, Field(ge=0)]
|
SearchTotalBase = Annotated[int, Field(ge=0)]
|
||||||
|
|
||||||
SearchQuery = Annotated[SearchQueryBase, Field(description="Search query text")]
|
SearchQuery = Annotated[SearchQueryBase, Field(description="Search query text")]
|
||||||
search_query_adapter = TypeAdapter(SearchQuery)
|
|
||||||
SearchLimit = Annotated[SearchLimitBase, Field(description="Results per page")]
|
SearchLimit = Annotated[SearchLimitBase, Field(description="Results per page")]
|
||||||
SearchOffset = Annotated[
|
SearchOffset = Annotated[
|
||||||
SearchOffsetBase, Field(description="Number of results to skip")
|
SearchOffsetBase, Field(description="Number of results to skip")
|
||||||
@@ -92,7 +88,7 @@ class WebVTTProcessor:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def generate_snippets(
|
def generate_snippets(
|
||||||
webvtt_content: WebVTTContent,
|
webvtt_content: WebVTTContent,
|
||||||
query: SearchQuery,
|
query: str,
|
||||||
max_snippets: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
max_snippets: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
||||||
) -> list[str]:
|
) -> list[str]:
|
||||||
"""Generate snippets from WebVTT content."""
|
"""Generate snippets from WebVTT content."""
|
||||||
@@ -129,14 +125,12 @@ class SnippetCandidate:
|
|||||||
class SearchParameters(BaseModel):
|
class SearchParameters(BaseModel):
|
||||||
"""Validated search parameters for full-text search."""
|
"""Validated search parameters for full-text search."""
|
||||||
|
|
||||||
query_text: SearchQuery | None = None
|
query_text: SearchQuery
|
||||||
limit: SearchLimit = DEFAULT_SEARCH_LIMIT
|
limit: SearchLimit = DEFAULT_SEARCH_LIMIT
|
||||||
offset: SearchOffset = 0
|
offset: SearchOffset = 0
|
||||||
user_id: str | None = None
|
user_id: str | None = None
|
||||||
room_id: str | None = None
|
room_id: str | None = None
|
||||||
source_kind: SourceKind | None = None
|
source_kind: SourceKind | None = None
|
||||||
from_datetime: datetime | None = None
|
|
||||||
to_datetime: datetime | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class SearchResultDB(BaseModel):
|
class SearchResultDB(BaseModel):
|
||||||
@@ -163,7 +157,7 @@ class SearchResult(BaseModel):
|
|||||||
room_name: str | None = None
|
room_name: str | None = None
|
||||||
source_kind: SourceKind
|
source_kind: SourceKind
|
||||||
created_at: datetime
|
created_at: datetime
|
||||||
status: TranscriptStatus = Field(..., min_length=1)
|
status: str = Field(..., min_length=1)
|
||||||
rank: float = Field(..., ge=0, le=1)
|
rank: float = Field(..., ge=0, le=1)
|
||||||
duration: NonNegativeFloat | None = Field(..., description="Duration in seconds")
|
duration: NonNegativeFloat | None = Field(..., description="Duration in seconds")
|
||||||
search_snippets: list[str] = Field(
|
search_snippets: list[str] = Field(
|
||||||
@@ -205,13 +199,15 @@ class SnippetGenerator:
|
|||||||
prev_start = start
|
prev_start = start
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def count_matches(text: str, query: SearchQuery) -> NonNegativeInt:
|
def count_matches(text: str, query: str) -> NonNegativeInt:
|
||||||
"""Count total number of matches for a query in text."""
|
"""Count total number of matches for a query in text."""
|
||||||
ZERO = NonNegativeInt(0)
|
ZERO = NonNegativeInt(0)
|
||||||
if not text:
|
if not text:
|
||||||
logger.warning("Empty text for search query in count_matches")
|
logger.warning("Empty text for search query in count_matches")
|
||||||
return ZERO
|
return ZERO
|
||||||
assert query is not None
|
if not query:
|
||||||
|
logger.warning("Empty query for search text in count_matches")
|
||||||
|
return ZERO
|
||||||
return NonNegativeInt(
|
return NonNegativeInt(
|
||||||
sum(1 for _ in SnippetGenerator.find_all_matches(text, query))
|
sum(1 for _ in SnippetGenerator.find_all_matches(text, query))
|
||||||
)
|
)
|
||||||
@@ -247,14 +243,13 @@ class SnippetGenerator:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def generate(
|
def generate(
|
||||||
text: str,
|
text: str,
|
||||||
query: SearchQuery,
|
query: str,
|
||||||
max_length: NonNegativeInt = DEFAULT_SNIPPET_MAX_LENGTH,
|
max_length: NonNegativeInt = DEFAULT_SNIPPET_MAX_LENGTH,
|
||||||
max_snippets: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
max_snippets: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
||||||
) -> list[str]:
|
) -> list[str]:
|
||||||
"""Generate snippets from text."""
|
"""Generate snippets from text."""
|
||||||
assert query is not None
|
if not text or not query:
|
||||||
if not text:
|
logger.warning("Empty text or query for generate_snippets")
|
||||||
logger.warning("Empty text for generate_snippets")
|
|
||||||
return []
|
return []
|
||||||
|
|
||||||
candidates = (
|
candidates = (
|
||||||
@@ -275,7 +270,7 @@ class SnippetGenerator:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def from_summary(
|
def from_summary(
|
||||||
summary: str,
|
summary: str,
|
||||||
query: SearchQuery,
|
query: str,
|
||||||
max_snippets: NonNegativeInt = LONG_SUMMARY_MAX_SNIPPETS,
|
max_snippets: NonNegativeInt = LONG_SUMMARY_MAX_SNIPPETS,
|
||||||
) -> list[str]:
|
) -> list[str]:
|
||||||
"""Generate snippets from summary text."""
|
"""Generate snippets from summary text."""
|
||||||
@@ -283,9 +278,9 @@ class SnippetGenerator:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def combine_sources(
|
def combine_sources(
|
||||||
summary: NonEmptyString | None,
|
summary: str | None,
|
||||||
webvtt: WebVTTContent | None,
|
webvtt: WebVTTContent | None,
|
||||||
query: SearchQuery,
|
query: str,
|
||||||
max_total: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
max_total: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
||||||
) -> tuple[list[str], NonNegativeInt]:
|
) -> tuple[list[str], NonNegativeInt]:
|
||||||
"""Combine snippets from multiple sources and return total match count.
|
"""Combine snippets from multiple sources and return total match count.
|
||||||
@@ -294,11 +289,6 @@ class SnippetGenerator:
|
|||||||
|
|
||||||
snippets can be empty for real in case of e.g. title match
|
snippets can be empty for real in case of e.g. title match
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert (
|
|
||||||
summary is not None or webvtt is not None
|
|
||||||
), "At least one source must be present"
|
|
||||||
|
|
||||||
webvtt_matches = 0
|
webvtt_matches = 0
|
||||||
summary_matches = 0
|
summary_matches = 0
|
||||||
|
|
||||||
@@ -365,8 +355,8 @@ class SearchController:
|
|||||||
else_=rooms.c.name,
|
else_=rooms.c.name,
|
||||||
).label("room_name"),
|
).label("room_name"),
|
||||||
]
|
]
|
||||||
search_query = None
|
|
||||||
if params.query_text is not None:
|
if params.query_text:
|
||||||
search_query = sqlalchemy.func.websearch_to_tsquery(
|
search_query = sqlalchemy.func.websearch_to_tsquery(
|
||||||
"english", params.query_text
|
"english", params.query_text
|
||||||
)
|
)
|
||||||
@@ -383,9 +373,7 @@ class SearchController:
|
|||||||
transcripts.join(rooms, transcripts.c.room_id == rooms.c.id, isouter=True)
|
transcripts.join(rooms, transcripts.c.room_id == rooms.c.id, isouter=True)
|
||||||
)
|
)
|
||||||
|
|
||||||
if params.query_text is not None:
|
if params.query_text:
|
||||||
# because already initialized based on params.query_text presence above
|
|
||||||
assert search_query is not None
|
|
||||||
base_query = base_query.where(
|
base_query = base_query.where(
|
||||||
transcripts.c.search_vector_en.op("@@")(search_query)
|
transcripts.c.search_vector_en.op("@@")(search_query)
|
||||||
)
|
)
|
||||||
@@ -404,16 +392,8 @@ class SearchController:
|
|||||||
base_query = base_query.where(
|
base_query = base_query.where(
|
||||||
transcripts.c.source_kind == params.source_kind
|
transcripts.c.source_kind == params.source_kind
|
||||||
)
|
)
|
||||||
if params.from_datetime:
|
|
||||||
base_query = base_query.where(
|
|
||||||
transcripts.c.created_at >= params.from_datetime
|
|
||||||
)
|
|
||||||
if params.to_datetime:
|
|
||||||
base_query = base_query.where(
|
|
||||||
transcripts.c.created_at <= params.to_datetime
|
|
||||||
)
|
|
||||||
|
|
||||||
if params.query_text is not None:
|
if params.query_text:
|
||||||
order_by = sqlalchemy.desc(sqlalchemy.text("rank"))
|
order_by = sqlalchemy.desc(sqlalchemy.text("rank"))
|
||||||
else:
|
else:
|
||||||
order_by = sqlalchemy.desc(transcripts.c.created_at)
|
order_by = sqlalchemy.desc(transcripts.c.created_at)
|
||||||
@@ -427,29 +407,19 @@ class SearchController:
|
|||||||
)
|
)
|
||||||
total = await get_database().fetch_val(count_query)
|
total = await get_database().fetch_val(count_query)
|
||||||
|
|
||||||
def _process_result(r: DbRecord) -> SearchResult:
|
def _process_result(r) -> SearchResult:
|
||||||
r_dict: Dict[str, Any] = dict(r)
|
r_dict: Dict[str, Any] = dict(r)
|
||||||
|
|
||||||
webvtt_raw: str | None = r_dict.pop("webvtt", None)
|
webvtt_raw: str | None = r_dict.pop("webvtt", None)
|
||||||
webvtt: WebVTTContent | None
|
|
||||||
if webvtt_raw:
|
if webvtt_raw:
|
||||||
webvtt = WebVTTProcessor.parse(webvtt_raw)
|
webvtt = WebVTTProcessor.parse(webvtt_raw)
|
||||||
else:
|
else:
|
||||||
webvtt = None
|
webvtt = None
|
||||||
|
long_summary: str | None = r_dict.pop("long_summary", None)
|
||||||
long_summary_r: str | None = r_dict.pop("long_summary", None)
|
|
||||||
long_summary: NonEmptyString = try_parse_non_empty_string(long_summary_r)
|
|
||||||
room_name: str | None = r_dict.pop("room_name", None)
|
room_name: str | None = r_dict.pop("room_name", None)
|
||||||
db_result = SearchResultDB.model_validate(r_dict)
|
db_result = SearchResultDB.model_validate(r_dict)
|
||||||
|
|
||||||
at_least_one_source = webvtt is not None or long_summary is not None
|
snippets, total_match_count = SnippetGenerator.combine_sources(
|
||||||
has_query = params.query_text is not None
|
long_summary, webvtt, params.query_text, DEFAULT_MAX_SNIPPETS
|
||||||
snippets, total_match_count = (
|
|
||||||
SnippetGenerator.combine_sources(
|
|
||||||
long_summary, webvtt, params.query_text, DEFAULT_MAX_SNIPPETS
|
|
||||||
)
|
|
||||||
if has_query and at_least_one_source
|
|
||||||
else ([], 0)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return SearchResult(
|
return SearchResult(
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ from reflector.db.utils import is_postgresql
|
|||||||
from reflector.logger import logger
|
from reflector.logger import logger
|
||||||
from reflector.processors.types import Word as ProcessorWord
|
from reflector.processors.types import Word as ProcessorWord
|
||||||
from reflector.settings import settings
|
from reflector.settings import settings
|
||||||
from reflector.storage import get_transcripts_storage
|
from reflector.storage import get_recordings_storage, get_transcripts_storage
|
||||||
from reflector.utils import generate_uuid4
|
from reflector.utils import generate_uuid4
|
||||||
from reflector.utils.webvtt import topics_to_webvtt
|
from reflector.utils.webvtt import topics_to_webvtt
|
||||||
|
|
||||||
@@ -122,15 +122,6 @@ def generate_transcript_name() -> str:
|
|||||||
return f"Transcript {now.strftime('%Y-%m-%d %H:%M:%S')}"
|
return f"Transcript {now.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||||
|
|
||||||
|
|
||||||
TranscriptStatus = Literal[
|
|
||||||
"idle", "uploaded", "recording", "processing", "error", "ended"
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class StrValue(BaseModel):
|
|
||||||
value: str
|
|
||||||
|
|
||||||
|
|
||||||
class AudioWaveform(BaseModel):
|
class AudioWaveform(BaseModel):
|
||||||
data: list[float]
|
data: list[float]
|
||||||
|
|
||||||
@@ -186,7 +177,6 @@ class TranscriptParticipant(BaseModel):
|
|||||||
id: str = Field(default_factory=generate_uuid4)
|
id: str = Field(default_factory=generate_uuid4)
|
||||||
speaker: int | None
|
speaker: int | None
|
||||||
name: str
|
name: str
|
||||||
user_id: str | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class Transcript(BaseModel):
|
class Transcript(BaseModel):
|
||||||
@@ -195,7 +185,7 @@ class Transcript(BaseModel):
|
|||||||
id: str = Field(default_factory=generate_uuid4)
|
id: str = Field(default_factory=generate_uuid4)
|
||||||
user_id: str | None = None
|
user_id: str | None = None
|
||||||
name: str = Field(default_factory=generate_transcript_name)
|
name: str = Field(default_factory=generate_transcript_name)
|
||||||
status: TranscriptStatus = "idle"
|
status: str = "idle"
|
||||||
duration: float = 0
|
duration: float = 0
|
||||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||||
title: str | None = None
|
title: str | None = None
|
||||||
@@ -624,9 +614,7 @@ class TranscriptController:
|
|||||||
)
|
)
|
||||||
if recording:
|
if recording:
|
||||||
try:
|
try:
|
||||||
await get_transcripts_storage().delete_file(
|
await get_recordings_storage().delete_file(recording.object_key)
|
||||||
recording.object_key, bucket=recording.bucket_name
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Failed to delete recording object from S3",
|
"Failed to delete recording object from S3",
|
||||||
@@ -650,19 +638,6 @@ class TranscriptController:
|
|||||||
query = transcripts.delete().where(transcripts.c.recording_id == recording_id)
|
query = transcripts.delete().where(transcripts.c.recording_id == recording_id)
|
||||||
await get_database().execute(query)
|
await get_database().execute(query)
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def user_can_mutate(transcript: Transcript, user_id: str | None) -> bool:
|
|
||||||
"""
|
|
||||||
Returns True if the given user is allowed to modify the transcript.
|
|
||||||
|
|
||||||
Policy:
|
|
||||||
- Anonymous transcripts (user_id is None) cannot be modified via API
|
|
||||||
- Only the owner (matching user_id) can modify their transcript
|
|
||||||
"""
|
|
||||||
if transcript.user_id is None:
|
|
||||||
return False
|
|
||||||
return user_id and transcript.user_id == user_id
|
|
||||||
|
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def transaction(self):
|
async def transaction(self):
|
||||||
"""
|
"""
|
||||||
@@ -728,13 +703,11 @@ class TranscriptController:
|
|||||||
"""
|
"""
|
||||||
Download audio from storage
|
Download audio from storage
|
||||||
"""
|
"""
|
||||||
storage = get_transcripts_storage()
|
transcript.audio_mp3_filename.write_bytes(
|
||||||
try:
|
await get_transcripts_storage().get_file(
|
||||||
with open(transcript.audio_mp3_filename, "wb") as f:
|
transcript.storage_audio_path,
|
||||||
await storage.stream_to_fileobj(transcript.storage_audio_path, f)
|
)
|
||||||
except Exception:
|
)
|
||||||
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
|
||||||
raise
|
|
||||||
|
|
||||||
async def upsert_participant(
|
async def upsert_participant(
|
||||||
self,
|
self,
|
||||||
@@ -759,27 +732,5 @@ class TranscriptController:
|
|||||||
transcript.delete_participant(participant_id)
|
transcript.delete_participant(participant_id)
|
||||||
await self.update(transcript, {"participants": transcript.participants_dump()})
|
await self.update(transcript, {"participants": transcript.participants_dump()})
|
||||||
|
|
||||||
async def set_status(
|
|
||||||
self, transcript_id: str, status: TranscriptStatus
|
|
||||||
) -> TranscriptEvent | None:
|
|
||||||
"""
|
|
||||||
Update the status of a transcript
|
|
||||||
|
|
||||||
Will add an event STATUS + update the status field of transcript
|
|
||||||
"""
|
|
||||||
async with self.transaction():
|
|
||||||
transcript = await self.get_by_id(transcript_id)
|
|
||||||
if not transcript:
|
|
||||||
raise Exception(f"Transcript {transcript_id} not found")
|
|
||||||
if transcript.status == status:
|
|
||||||
return
|
|
||||||
resp = await self.append_event(
|
|
||||||
transcript=transcript,
|
|
||||||
event="STATUS",
|
|
||||||
data=StrValue(value=status),
|
|
||||||
)
|
|
||||||
await self.update(transcript, {"status": status})
|
|
||||||
return resp
|
|
||||||
|
|
||||||
|
|
||||||
transcripts_controller = TranscriptController()
|
transcripts_controller = TranscriptController()
|
||||||
|
|||||||
@@ -1,91 +0,0 @@
|
|||||||
import hmac
|
|
||||||
import secrets
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
from hashlib import sha256
|
|
||||||
|
|
||||||
import sqlalchemy
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
from reflector.db import get_database, metadata
|
|
||||||
from reflector.settings import settings
|
|
||||||
from reflector.utils import generate_uuid4
|
|
||||||
from reflector.utils.string import NonEmptyString
|
|
||||||
|
|
||||||
user_api_keys = sqlalchemy.Table(
|
|
||||||
"user_api_key",
|
|
||||||
metadata,
|
|
||||||
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
|
|
||||||
sqlalchemy.Column("user_id", sqlalchemy.String, nullable=False),
|
|
||||||
sqlalchemy.Column("key_hash", sqlalchemy.String, nullable=False),
|
|
||||||
sqlalchemy.Column("name", sqlalchemy.String, nullable=True),
|
|
||||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
|
||||||
sqlalchemy.Index("idx_user_api_key_hash", "key_hash", unique=True),
|
|
||||||
sqlalchemy.Index("idx_user_api_key_user_id", "user_id"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class UserApiKey(BaseModel):
|
|
||||||
id: NonEmptyString = Field(default_factory=generate_uuid4)
|
|
||||||
user_id: NonEmptyString
|
|
||||||
key_hash: NonEmptyString
|
|
||||||
name: NonEmptyString | None = None
|
|
||||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
||||||
|
|
||||||
|
|
||||||
class UserApiKeyController:
|
|
||||||
@staticmethod
|
|
||||||
def generate_key() -> NonEmptyString:
|
|
||||||
return secrets.token_urlsafe(48)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def hash_key(key: NonEmptyString) -> str:
|
|
||||||
return hmac.new(
|
|
||||||
settings.SECRET_KEY.encode(), key.encode(), digestmod=sha256
|
|
||||||
).hexdigest()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def create_key(
|
|
||||||
cls,
|
|
||||||
user_id: NonEmptyString,
|
|
||||||
name: NonEmptyString | None = None,
|
|
||||||
) -> tuple[UserApiKey, NonEmptyString]:
|
|
||||||
plaintext = cls.generate_key()
|
|
||||||
api_key = UserApiKey(
|
|
||||||
user_id=user_id,
|
|
||||||
key_hash=cls.hash_key(plaintext),
|
|
||||||
name=name,
|
|
||||||
)
|
|
||||||
query = user_api_keys.insert().values(**api_key.model_dump())
|
|
||||||
await get_database().execute(query)
|
|
||||||
return api_key, plaintext
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def verify_key(cls, plaintext_key: NonEmptyString) -> UserApiKey | None:
|
|
||||||
key_hash = cls.hash_key(plaintext_key)
|
|
||||||
query = user_api_keys.select().where(
|
|
||||||
user_api_keys.c.key_hash == key_hash,
|
|
||||||
)
|
|
||||||
result = await get_database().fetch_one(query)
|
|
||||||
return UserApiKey(**result) if result else None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
async def list_by_user_id(user_id: NonEmptyString) -> list[UserApiKey]:
|
|
||||||
query = (
|
|
||||||
user_api_keys.select()
|
|
||||||
.where(user_api_keys.c.user_id == user_id)
|
|
||||||
.order_by(user_api_keys.c.created_at.desc())
|
|
||||||
)
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
return [UserApiKey(**r) for r in results]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
async def delete_key(key_id: NonEmptyString, user_id: NonEmptyString) -> bool:
|
|
||||||
query = user_api_keys.delete().where(
|
|
||||||
(user_api_keys.c.id == key_id) & (user_api_keys.c.user_id == user_id)
|
|
||||||
)
|
|
||||||
result = await get_database().execute(query)
|
|
||||||
# asyncpg returns None for DELETE, consider it success if no exception
|
|
||||||
return result is None or result > 0
|
|
||||||
|
|
||||||
|
|
||||||
user_api_keys_controller = UserApiKeyController()
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
"""User table for storing Authentik user information."""
|
|
||||||
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
|
|
||||||
import sqlalchemy
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
from reflector.db import get_database, metadata
|
|
||||||
from reflector.utils import generate_uuid4
|
|
||||||
from reflector.utils.string import NonEmptyString
|
|
||||||
|
|
||||||
users = sqlalchemy.Table(
|
|
||||||
"user",
|
|
||||||
metadata,
|
|
||||||
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
|
|
||||||
sqlalchemy.Column("email", sqlalchemy.String, nullable=False),
|
|
||||||
sqlalchemy.Column("authentik_uid", sqlalchemy.String, nullable=False),
|
|
||||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
|
||||||
sqlalchemy.Column("updated_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
|
||||||
sqlalchemy.Index("idx_user_authentik_uid", "authentik_uid", unique=True),
|
|
||||||
sqlalchemy.Index("idx_user_email", "email", unique=False),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class User(BaseModel):
|
|
||||||
id: NonEmptyString = Field(default_factory=generate_uuid4)
|
|
||||||
email: NonEmptyString
|
|
||||||
authentik_uid: NonEmptyString
|
|
||||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
||||||
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
||||||
|
|
||||||
|
|
||||||
class UserController:
|
|
||||||
@staticmethod
|
|
||||||
async def get_by_id(user_id: NonEmptyString) -> User | None:
|
|
||||||
query = users.select().where(users.c.id == user_id)
|
|
||||||
result = await get_database().fetch_one(query)
|
|
||||||
return User(**result) if result else None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
async def get_by_authentik_uid(authentik_uid: NonEmptyString) -> User | None:
|
|
||||||
query = users.select().where(users.c.authentik_uid == authentik_uid)
|
|
||||||
result = await get_database().fetch_one(query)
|
|
||||||
return User(**result) if result else None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
async def get_by_email(email: NonEmptyString) -> User | None:
|
|
||||||
query = users.select().where(users.c.email == email)
|
|
||||||
result = await get_database().fetch_one(query)
|
|
||||||
return User(**result) if result else None
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
async def create_or_update(
|
|
||||||
id: NonEmptyString, authentik_uid: NonEmptyString, email: NonEmptyString
|
|
||||||
) -> User:
|
|
||||||
existing = await UserController.get_by_authentik_uid(authentik_uid)
|
|
||||||
now = datetime.now(timezone.utc)
|
|
||||||
|
|
||||||
if existing:
|
|
||||||
query = (
|
|
||||||
users.update()
|
|
||||||
.where(users.c.authentik_uid == authentik_uid)
|
|
||||||
.values(email=email, updated_at=now)
|
|
||||||
)
|
|
||||||
await get_database().execute(query)
|
|
||||||
return User(
|
|
||||||
id=existing.id,
|
|
||||||
authentik_uid=authentik_uid,
|
|
||||||
email=email,
|
|
||||||
created_at=existing.created_at,
|
|
||||||
updated_at=now,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
user = User(
|
|
||||||
id=id,
|
|
||||||
authentik_uid=authentik_uid,
|
|
||||||
email=email,
|
|
||||||
created_at=now,
|
|
||||||
updated_at=now,
|
|
||||||
)
|
|
||||||
query = users.insert().values(**user.model_dump())
|
|
||||||
await get_database().execute(query)
|
|
||||||
return user
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
async def list_all() -> list[User]:
|
|
||||||
query = users.select().order_by(users.c.created_at.desc())
|
|
||||||
results = await get_database().fetch_all(query)
|
|
||||||
return [User(**r) for r in results]
|
|
||||||
|
|
||||||
|
|
||||||
user_controller = UserController()
|
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
import logging
|
|
||||||
from typing import Type, TypeVar
|
from typing import Type, TypeVar
|
||||||
|
|
||||||
from llama_index.core import Settings
|
from llama_index.core import Settings
|
||||||
@@ -6,7 +5,7 @@ from llama_index.core.output_parsers import PydanticOutputParser
|
|||||||
from llama_index.core.program import LLMTextCompletionProgram
|
from llama_index.core.program import LLMTextCompletionProgram
|
||||||
from llama_index.core.response_synthesizers import TreeSummarize
|
from llama_index.core.response_synthesizers import TreeSummarize
|
||||||
from llama_index.llms.openai_like import OpenAILike
|
from llama_index.llms.openai_like import OpenAILike
|
||||||
from pydantic import BaseModel, ValidationError
|
from pydantic import BaseModel
|
||||||
|
|
||||||
T = TypeVar("T", bound=BaseModel)
|
T = TypeVar("T", bound=BaseModel)
|
||||||
|
|
||||||
@@ -62,8 +61,6 @@ class LLM:
|
|||||||
tone_name: str | None = None,
|
tone_name: str | None = None,
|
||||||
) -> T:
|
) -> T:
|
||||||
"""Get structured output from LLM for non-function-calling models"""
|
"""Get structured output from LLM for non-function-calling models"""
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
summarizer = TreeSummarize(verbose=True)
|
summarizer = TreeSummarize(verbose=True)
|
||||||
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
|
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
|
||||||
|
|
||||||
@@ -79,25 +76,8 @@ class LLM:
|
|||||||
"Please structure the above information in the following JSON format:"
|
"Please structure the above information in the following JSON format:"
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
output = await program.acall(
|
||||||
output = await program.acall(
|
analysis=str(response), format_instructions=format_instructions
|
||||||
analysis=str(response), format_instructions=format_instructions
|
)
|
||||||
)
|
|
||||||
except ValidationError as e:
|
|
||||||
# Extract the raw JSON from the error details
|
|
||||||
errors = e.errors()
|
|
||||||
if errors and "input" in errors[0]:
|
|
||||||
raw_json = errors[0]["input"]
|
|
||||||
logger.error(
|
|
||||||
f"JSON validation failed for {output_cls.__name__}. "
|
|
||||||
f"Full raw JSON output:\n{raw_json}\n"
|
|
||||||
f"Validation errors: {errors}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.error(
|
|
||||||
f"JSON validation failed for {output_cls.__name__}. "
|
|
||||||
f"Validation errors: {errors}"
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
"""Pipeline modules for audio processing."""
|
|
||||||
@@ -7,34 +7,29 @@ Uses parallel processing for transcription, diarization, and waveform generation
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import uuid
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import av
|
import av
|
||||||
import structlog
|
import structlog
|
||||||
from celery import chain, shared_task
|
from celery import shared_task
|
||||||
|
|
||||||
from reflector.asynctask import asynctask
|
|
||||||
from reflector.db.rooms import rooms_controller
|
|
||||||
from reflector.db.transcripts import (
|
from reflector.db.transcripts import (
|
||||||
SourceKind,
|
|
||||||
Transcript,
|
Transcript,
|
||||||
TranscriptStatus,
|
|
||||||
transcripts_controller,
|
transcripts_controller,
|
||||||
)
|
)
|
||||||
from reflector.logger import logger
|
from reflector.logger import logger
|
||||||
from reflector.pipelines import topic_processing
|
from reflector.pipelines.main_live_pipeline import PipelineMainBase, asynctask
|
||||||
from reflector.pipelines.main_live_pipeline import (
|
from reflector.processors import (
|
||||||
PipelineMainBase,
|
AudioFileWriterProcessor,
|
||||||
broadcast_to_sockets,
|
TranscriptFinalSummaryProcessor,
|
||||||
task_cleanup_consent,
|
TranscriptFinalTitleProcessor,
|
||||||
task_pipeline_post_to_zulip,
|
TranscriptTopicDetectorProcessor,
|
||||||
)
|
)
|
||||||
from reflector.pipelines.transcription_helpers import transcribe_file_with_processor
|
|
||||||
from reflector.processors import AudioFileWriterProcessor
|
|
||||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||||
from reflector.processors.file_diarization import FileDiarizationInput
|
from reflector.processors.file_diarization import FileDiarizationInput
|
||||||
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
|
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
|
||||||
|
from reflector.processors.file_transcript import FileTranscriptInput
|
||||||
|
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||||
from reflector.processors.transcript_diarization_assembler import (
|
from reflector.processors.transcript_diarization_assembler import (
|
||||||
TranscriptDiarizationAssemblerInput,
|
TranscriptDiarizationAssemblerInput,
|
||||||
TranscriptDiarizationAssemblerProcessor,
|
TranscriptDiarizationAssemblerProcessor,
|
||||||
@@ -48,7 +43,19 @@ from reflector.processors.types import (
|
|||||||
)
|
)
|
||||||
from reflector.settings import settings
|
from reflector.settings import settings
|
||||||
from reflector.storage import get_transcripts_storage
|
from reflector.storage import get_transcripts_storage
|
||||||
from reflector.worker.webhook import send_transcript_webhook
|
|
||||||
|
|
||||||
|
class EmptyPipeline:
|
||||||
|
"""Empty pipeline for processors that need a pipeline reference"""
|
||||||
|
|
||||||
|
def __init__(self, logger: structlog.BoundLogger):
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
|
def get_pref(self, k, d=None):
|
||||||
|
return d
|
||||||
|
|
||||||
|
async def emit(self, event):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class PipelineMainFile(PipelineMainBase):
|
class PipelineMainFile(PipelineMainBase):
|
||||||
@@ -63,7 +70,7 @@ class PipelineMainFile(PipelineMainBase):
|
|||||||
def __init__(self, transcript_id: str):
|
def __init__(self, transcript_id: str):
|
||||||
super().__init__(transcript_id=transcript_id)
|
super().__init__(transcript_id=transcript_id)
|
||||||
self.logger = logger.bind(transcript_id=self.transcript_id)
|
self.logger = logger.bind(transcript_id=self.transcript_id)
|
||||||
self.empty_pipeline = topic_processing.EmptyPipeline(logger=self.logger)
|
self.empty_pipeline = EmptyPipeline(logger=self.logger)
|
||||||
|
|
||||||
def _handle_gather_exceptions(self, results: list, operation: str) -> None:
|
def _handle_gather_exceptions(self, results: list, operation: str) -> None:
|
||||||
"""Handle exceptions from asyncio.gather with return_exceptions=True"""
|
"""Handle exceptions from asyncio.gather with return_exceptions=True"""
|
||||||
@@ -76,27 +83,12 @@ class PipelineMainFile(PipelineMainBase):
|
|||||||
exc_info=result,
|
exc_info=result,
|
||||||
)
|
)
|
||||||
|
|
||||||
@broadcast_to_sockets
|
|
||||||
async def set_status(self, transcript_id: str, status: TranscriptStatus):
|
|
||||||
async with self.lock_transaction():
|
|
||||||
return await transcripts_controller.set_status(transcript_id, status)
|
|
||||||
|
|
||||||
async def process(self, file_path: Path):
|
async def process(self, file_path: Path):
|
||||||
"""Main entry point for file processing"""
|
"""Main entry point for file processing"""
|
||||||
self.logger.info(f"Starting file pipeline for {file_path}")
|
self.logger.info(f"Starting file pipeline for {file_path}")
|
||||||
|
|
||||||
transcript = await self.get_transcript()
|
transcript = await self.get_transcript()
|
||||||
|
|
||||||
# Clear transcript as we're going to regenerate everything
|
|
||||||
async with self.transaction():
|
|
||||||
await transcripts_controller.update(
|
|
||||||
transcript,
|
|
||||||
{
|
|
||||||
"events": [],
|
|
||||||
"topics": [],
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract audio and write to transcript location
|
# Extract audio and write to transcript location
|
||||||
audio_path = await self.extract_and_write_audio(file_path, transcript)
|
audio_path = await self.extract_and_write_audio(file_path, transcript)
|
||||||
|
|
||||||
@@ -113,8 +105,6 @@ class PipelineMainFile(PipelineMainBase):
|
|||||||
|
|
||||||
self.logger.info("File pipeline complete")
|
self.logger.info("File pipeline complete")
|
||||||
|
|
||||||
await self.set_status(transcript.id, "ended")
|
|
||||||
|
|
||||||
async def extract_and_write_audio(
|
async def extract_and_write_audio(
|
||||||
self, file_path: Path, transcript: Transcript
|
self, file_path: Path, transcript: Transcript
|
||||||
) -> Path:
|
) -> Path:
|
||||||
@@ -244,7 +234,24 @@ class PipelineMainFile(PipelineMainBase):
|
|||||||
|
|
||||||
async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType:
|
async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType:
|
||||||
"""Transcribe complete file"""
|
"""Transcribe complete file"""
|
||||||
return await transcribe_file_with_processor(audio_url, language)
|
processor = FileTranscriptAutoProcessor()
|
||||||
|
input_data = FileTranscriptInput(audio_url=audio_url, language=language)
|
||||||
|
|
||||||
|
# Store result for retrieval
|
||||||
|
result: TranscriptType | None = None
|
||||||
|
|
||||||
|
async def capture_result(transcript):
|
||||||
|
nonlocal result
|
||||||
|
result = transcript
|
||||||
|
|
||||||
|
processor.on(capture_result)
|
||||||
|
await processor.push(input_data)
|
||||||
|
await processor.flush()
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
raise ValueError("No transcript captured")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
async def diarize_file(self, audio_url: str) -> list[DiarizationSegment] | None:
|
async def diarize_file(self, audio_url: str) -> list[DiarizationSegment] | None:
|
||||||
"""Get diarization for file"""
|
"""Get diarization for file"""
|
||||||
@@ -287,53 +294,63 @@ class PipelineMainFile(PipelineMainBase):
|
|||||||
async def detect_topics(
|
async def detect_topics(
|
||||||
self, transcript: TranscriptType, target_language: str
|
self, transcript: TranscriptType, target_language: str
|
||||||
) -> list[TitleSummary]:
|
) -> list[TitleSummary]:
|
||||||
return await topic_processing.detect_topics(
|
"""Detect topics from complete transcript"""
|
||||||
transcript,
|
chunk_size = 300
|
||||||
target_language,
|
topics: list[TitleSummary] = []
|
||||||
on_topic_callback=self.on_topic,
|
|
||||||
empty_pipeline=self.empty_pipeline,
|
async def on_topic(topic: TitleSummary):
|
||||||
)
|
topics.append(topic)
|
||||||
|
return await self.on_topic(topic)
|
||||||
|
|
||||||
|
topic_detector = TranscriptTopicDetectorProcessor(callback=on_topic)
|
||||||
|
topic_detector.set_pipeline(self.empty_pipeline)
|
||||||
|
|
||||||
|
for i in range(0, len(transcript.words), chunk_size):
|
||||||
|
chunk_words = transcript.words[i : i + chunk_size]
|
||||||
|
if not chunk_words:
|
||||||
|
continue
|
||||||
|
|
||||||
|
chunk_transcript = TranscriptType(
|
||||||
|
words=chunk_words, translation=transcript.translation
|
||||||
|
)
|
||||||
|
|
||||||
|
await topic_detector.push(chunk_transcript)
|
||||||
|
|
||||||
|
await topic_detector.flush()
|
||||||
|
return topics
|
||||||
|
|
||||||
async def generate_title(self, topics: list[TitleSummary]):
|
async def generate_title(self, topics: list[TitleSummary]):
|
||||||
return await topic_processing.generate_title(
|
"""Generate title from topics"""
|
||||||
topics,
|
if not topics:
|
||||||
on_title_callback=self.on_title,
|
self.logger.warning("No topics for title generation")
|
||||||
empty_pipeline=self.empty_pipeline,
|
return
|
||||||
logger=self.logger,
|
|
||||||
)
|
processor = TranscriptFinalTitleProcessor(callback=self.on_title)
|
||||||
|
processor.set_pipeline(self.empty_pipeline)
|
||||||
|
|
||||||
|
for topic in topics:
|
||||||
|
await processor.push(topic)
|
||||||
|
|
||||||
|
await processor.flush()
|
||||||
|
|
||||||
async def generate_summaries(self, topics: list[TitleSummary]):
|
async def generate_summaries(self, topics: list[TitleSummary]):
|
||||||
|
"""Generate long and short summaries from topics"""
|
||||||
|
if not topics:
|
||||||
|
self.logger.warning("No topics for summary generation")
|
||||||
|
return
|
||||||
|
|
||||||
transcript = await self.get_transcript()
|
transcript = await self.get_transcript()
|
||||||
return await topic_processing.generate_summaries(
|
processor = TranscriptFinalSummaryProcessor(
|
||||||
topics,
|
transcript=transcript,
|
||||||
transcript,
|
callback=self.on_long_summary,
|
||||||
on_long_summary_callback=self.on_long_summary,
|
on_short_summary=self.on_short_summary,
|
||||||
on_short_summary_callback=self.on_short_summary,
|
|
||||||
empty_pipeline=self.empty_pipeline,
|
|
||||||
logger=self.logger,
|
|
||||||
)
|
)
|
||||||
|
processor.set_pipeline(self.empty_pipeline)
|
||||||
|
|
||||||
|
for topic in topics:
|
||||||
|
await processor.push(topic)
|
||||||
|
|
||||||
@shared_task
|
await processor.flush()
|
||||||
@asynctask
|
|
||||||
async def task_send_webhook_if_needed(*, transcript_id: str):
|
|
||||||
"""Send webhook if this is a room recording with webhook configured"""
|
|
||||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
|
||||||
if not transcript:
|
|
||||||
return
|
|
||||||
|
|
||||||
if transcript.source_kind == SourceKind.ROOM and transcript.room_id:
|
|
||||||
room = await rooms_controller.get_by_id(transcript.room_id)
|
|
||||||
if room and room.webhook_url:
|
|
||||||
logger.info(
|
|
||||||
"Dispatching webhook",
|
|
||||||
transcript_id=transcript_id,
|
|
||||||
room_id=room.id,
|
|
||||||
webhook_url=room.webhook_url,
|
|
||||||
)
|
|
||||||
send_transcript_webhook.delay(
|
|
||||||
transcript_id, room.id, event_id=uuid.uuid4().hex
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@shared_task
|
@shared_task
|
||||||
@@ -345,33 +362,14 @@ async def task_pipeline_file_process(*, transcript_id: str):
|
|||||||
if not transcript:
|
if not transcript:
|
||||||
raise Exception(f"Transcript {transcript_id} not found")
|
raise Exception(f"Transcript {transcript_id} not found")
|
||||||
|
|
||||||
|
# Find the file to process
|
||||||
|
audio_file = next(transcript.data_path.glob("upload.*"), None)
|
||||||
|
if not audio_file:
|
||||||
|
audio_file = next(transcript.data_path.glob("audio.*"), None)
|
||||||
|
|
||||||
|
if not audio_file:
|
||||||
|
raise Exception("No audio file found to process")
|
||||||
|
|
||||||
|
# Run file pipeline
|
||||||
pipeline = PipelineMainFile(transcript_id=transcript_id)
|
pipeline = PipelineMainFile(transcript_id=transcript_id)
|
||||||
try:
|
await pipeline.process(audio_file)
|
||||||
await pipeline.set_status(transcript_id, "processing")
|
|
||||||
|
|
||||||
# Find the file to process
|
|
||||||
audio_file = next(transcript.data_path.glob("upload.*"), None)
|
|
||||||
if not audio_file:
|
|
||||||
audio_file = next(transcript.data_path.glob("audio.*"), None)
|
|
||||||
|
|
||||||
if not audio_file:
|
|
||||||
raise Exception("No audio file found to process")
|
|
||||||
|
|
||||||
await pipeline.process(audio_file)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(
|
|
||||||
f"File pipeline failed for transcript {transcript_id}: {type(e).__name__}: {str(e)}",
|
|
||||||
exc_info=True,
|
|
||||||
transcript_id=transcript_id,
|
|
||||||
)
|
|
||||||
await pipeline.set_status(transcript_id, "error")
|
|
||||||
raise
|
|
||||||
|
|
||||||
# Run post-processing chain: consent cleanup -> zulip -> webhook
|
|
||||||
post_chain = chain(
|
|
||||||
task_cleanup_consent.si(transcript_id=transcript_id),
|
|
||||||
task_pipeline_post_to_zulip.si(transcript_id=transcript_id),
|
|
||||||
task_send_webhook_if_needed.si(transcript_id=transcript_id),
|
|
||||||
)
|
|
||||||
post_chain.delay()
|
|
||||||
|
|||||||
@@ -17,11 +17,12 @@ from contextlib import asynccontextmanager
|
|||||||
from typing import Generic
|
from typing import Generic
|
||||||
|
|
||||||
import av
|
import av
|
||||||
|
import boto3
|
||||||
from celery import chord, current_task, group, shared_task
|
from celery import chord, current_task, group, shared_task
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from structlog import BoundLogger as Logger
|
from structlog import BoundLogger as Logger
|
||||||
|
|
||||||
from reflector.asynctask import asynctask
|
from reflector.db import get_database
|
||||||
from reflector.db.meetings import meeting_consent_controller, meetings_controller
|
from reflector.db.meetings import meeting_consent_controller, meetings_controller
|
||||||
from reflector.db.recordings import recordings_controller
|
from reflector.db.recordings import recordings_controller
|
||||||
from reflector.db.rooms import rooms_controller
|
from reflector.db.rooms import rooms_controller
|
||||||
@@ -31,7 +32,6 @@ from reflector.db.transcripts import (
|
|||||||
TranscriptFinalLongSummary,
|
TranscriptFinalLongSummary,
|
||||||
TranscriptFinalShortSummary,
|
TranscriptFinalShortSummary,
|
||||||
TranscriptFinalTitle,
|
TranscriptFinalTitle,
|
||||||
TranscriptStatus,
|
|
||||||
TranscriptText,
|
TranscriptText,
|
||||||
TranscriptTopic,
|
TranscriptTopic,
|
||||||
TranscriptWaveform,
|
TranscriptWaveform,
|
||||||
@@ -69,6 +69,29 @@ from reflector.zulip import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def asynctask(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
async def run_with_db():
|
||||||
|
database = get_database()
|
||||||
|
await database.connect()
|
||||||
|
try:
|
||||||
|
return await f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
await database.disconnect()
|
||||||
|
|
||||||
|
coro = run_with_db()
|
||||||
|
try:
|
||||||
|
loop = asyncio.get_running_loop()
|
||||||
|
except RuntimeError:
|
||||||
|
loop = None
|
||||||
|
if loop and loop.is_running():
|
||||||
|
return loop.run_until_complete(coro)
|
||||||
|
return asyncio.run(coro)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
def broadcast_to_sockets(func):
|
def broadcast_to_sockets(func):
|
||||||
"""
|
"""
|
||||||
Decorator to broadcast transcript event to websockets
|
Decorator to broadcast transcript event to websockets
|
||||||
@@ -84,20 +107,6 @@ def broadcast_to_sockets(func):
|
|||||||
message=resp.model_dump(mode="json"),
|
message=resp.model_dump(mode="json"),
|
||||||
)
|
)
|
||||||
|
|
||||||
transcript = await transcripts_controller.get_by_id(self.transcript_id)
|
|
||||||
if transcript and transcript.user_id:
|
|
||||||
# Emit only relevant events to the user room to avoid noisy updates.
|
|
||||||
# Allowed: STATUS, FINAL_TITLE, DURATION. All are prefixed with TRANSCRIPT_
|
|
||||||
allowed_user_events = {"STATUS", "FINAL_TITLE", "DURATION"}
|
|
||||||
if resp.event in allowed_user_events:
|
|
||||||
await self.ws_manager.send_json(
|
|
||||||
room_id=f"user:{transcript.user_id}",
|
|
||||||
message={
|
|
||||||
"event": f"TRANSCRIPT_{resp.event}",
|
|
||||||
"data": {"id": self.transcript_id, **resp.data},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
@@ -178,16 +187,9 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
|||||||
for topic in topics
|
for topic in topics
|
||||||
]
|
]
|
||||||
|
|
||||||
@asynccontextmanager
|
|
||||||
async def lock_transaction(self):
|
|
||||||
# This lock is to prevent multiple processor starting adding
|
|
||||||
# into event array at the same time
|
|
||||||
async with self._lock:
|
|
||||||
yield
|
|
||||||
|
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def transaction(self):
|
async def transaction(self):
|
||||||
async with self.lock_transaction():
|
async with self._lock:
|
||||||
async with transcripts_controller.transaction():
|
async with transcripts_controller.transaction():
|
||||||
yield
|
yield
|
||||||
|
|
||||||
@@ -196,14 +198,14 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
|||||||
# if it's the first part, update the status of the transcript
|
# if it's the first part, update the status of the transcript
|
||||||
# but do not set the ended status yet.
|
# but do not set the ended status yet.
|
||||||
if isinstance(self, PipelineMainLive):
|
if isinstance(self, PipelineMainLive):
|
||||||
status_mapping: dict[str, TranscriptStatus] = {
|
status_mapping = {
|
||||||
"started": "recording",
|
"started": "recording",
|
||||||
"push": "recording",
|
"push": "recording",
|
||||||
"flush": "processing",
|
"flush": "processing",
|
||||||
"error": "error",
|
"error": "error",
|
||||||
}
|
}
|
||||||
elif isinstance(self, PipelineMainFinalSummaries):
|
elif isinstance(self, PipelineMainFinalSummaries):
|
||||||
status_mapping: dict[str, TranscriptStatus] = {
|
status_mapping = {
|
||||||
"push": "processing",
|
"push": "processing",
|
||||||
"flush": "processing",
|
"flush": "processing",
|
||||||
"error": "error",
|
"error": "error",
|
||||||
@@ -219,8 +221,22 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
|||||||
return
|
return
|
||||||
|
|
||||||
# when the status of the pipeline changes, update the transcript
|
# when the status of the pipeline changes, update the transcript
|
||||||
async with self._lock:
|
async with self.transaction():
|
||||||
return await transcripts_controller.set_status(self.transcript_id, status)
|
transcript = await self.get_transcript()
|
||||||
|
if status == transcript.status:
|
||||||
|
return
|
||||||
|
resp = await transcripts_controller.append_event(
|
||||||
|
transcript=transcript,
|
||||||
|
event="STATUS",
|
||||||
|
data=StrValue(value=status),
|
||||||
|
)
|
||||||
|
await transcripts_controller.update(
|
||||||
|
transcript,
|
||||||
|
{
|
||||||
|
"status": status,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return resp
|
||||||
|
|
||||||
@broadcast_to_sockets
|
@broadcast_to_sockets
|
||||||
async def on_transcript(self, data):
|
async def on_transcript(self, data):
|
||||||
@@ -583,7 +599,6 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
|||||||
|
|
||||||
consent_denied = False
|
consent_denied = False
|
||||||
recording = None
|
recording = None
|
||||||
meeting = None
|
|
||||||
try:
|
try:
|
||||||
if transcript.recording_id:
|
if transcript.recording_id:
|
||||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||||
@@ -594,8 +609,8 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
|||||||
meeting.id
|
meeting.id
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Failed to fetch consent: {e}", exc_info=e)
|
logger.error(f"Failed to get fetch consent: {e}", exc_info=e)
|
||||||
raise
|
consent_denied = True
|
||||||
|
|
||||||
if not consent_denied:
|
if not consent_denied:
|
||||||
logger.info("Consent approved, keeping all files")
|
logger.info("Consent approved, keeping all files")
|
||||||
@@ -603,24 +618,25 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
|||||||
|
|
||||||
logger.info("Consent denied, cleaning up all related audio files")
|
logger.info("Consent denied, cleaning up all related audio files")
|
||||||
|
|
||||||
deletion_errors = []
|
if recording and recording.bucket_name and recording.object_key:
|
||||||
if recording and recording.bucket_name:
|
s3_whereby = boto3.client(
|
||||||
keys_to_delete = []
|
"s3",
|
||||||
if recording.track_keys:
|
aws_access_key_id=settings.AWS_WHEREBY_ACCESS_KEY_ID,
|
||||||
keys_to_delete = recording.track_keys
|
aws_secret_access_key=settings.AWS_WHEREBY_ACCESS_KEY_SECRET,
|
||||||
elif recording.object_key:
|
)
|
||||||
keys_to_delete = [recording.object_key]
|
try:
|
||||||
|
s3_whereby.delete_object(
|
||||||
master_storage = get_transcripts_storage()
|
Bucket=recording.bucket_name, Key=recording.object_key
|
||||||
for key in keys_to_delete:
|
)
|
||||||
try:
|
logger.info(
|
||||||
await master_storage.delete_file(key, bucket=recording.bucket_name)
|
f"Deleted original Whereby recording: {recording.bucket_name}/{recording.object_key}"
|
||||||
logger.info(f"Deleted recording file: {recording.bucket_name}/{key}")
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_msg = f"Failed to delete {key}: {e}"
|
logger.error(f"Failed to delete Whereby recording: {e}", exc_info=e)
|
||||||
logger.error(error_msg, exc_info=e)
|
|
||||||
deletion_errors.append(error_msg)
|
|
||||||
|
|
||||||
|
# non-transactional, files marked for deletion not actually deleted is possible
|
||||||
|
await transcripts_controller.update(transcript, {"audio_deleted": True})
|
||||||
|
# 2. Delete processed audio from transcript storage S3 bucket
|
||||||
if transcript.audio_location == "storage":
|
if transcript.audio_location == "storage":
|
||||||
storage = get_transcripts_storage()
|
storage = get_transcripts_storage()
|
||||||
try:
|
try:
|
||||||
@@ -629,28 +645,18 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
|||||||
f"Deleted processed audio from storage: {transcript.storage_audio_path}"
|
f"Deleted processed audio from storage: {transcript.storage_audio_path}"
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_msg = f"Failed to delete processed audio: {e}"
|
logger.error(f"Failed to delete processed audio: {e}", exc_info=e)
|
||||||
logger.error(error_msg, exc_info=e)
|
|
||||||
deletion_errors.append(error_msg)
|
|
||||||
|
|
||||||
|
# 3. Delete local audio files
|
||||||
try:
|
try:
|
||||||
if hasattr(transcript, "audio_mp3_filename") and transcript.audio_mp3_filename:
|
if hasattr(transcript, "audio_mp3_filename") and transcript.audio_mp3_filename:
|
||||||
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
||||||
if hasattr(transcript, "audio_wav_filename") and transcript.audio_wav_filename:
|
if hasattr(transcript, "audio_wav_filename") and transcript.audio_wav_filename:
|
||||||
transcript.audio_wav_filename.unlink(missing_ok=True)
|
transcript.audio_wav_filename.unlink(missing_ok=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_msg = f"Failed to delete local audio files: {e}"
|
logger.error(f"Failed to delete local audio files: {e}", exc_info=e)
|
||||||
logger.error(error_msg, exc_info=e)
|
|
||||||
deletion_errors.append(error_msg)
|
|
||||||
|
|
||||||
if deletion_errors:
|
logger.info("Consent cleanup done")
|
||||||
logger.warning(
|
|
||||||
f"Consent cleanup completed with {len(deletion_errors)} errors",
|
|
||||||
errors=deletion_errors,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
await transcripts_controller.update(transcript, {"audio_deleted": True})
|
|
||||||
logger.info("Consent cleanup done - all audio deleted")
|
|
||||||
|
|
||||||
|
|
||||||
@get_transcript
|
@get_transcript
|
||||||
@@ -788,7 +794,7 @@ def pipeline_post(*, transcript_id: str):
|
|||||||
chain_final_summaries,
|
chain_final_summaries,
|
||||||
) | task_pipeline_post_to_zulip.si(transcript_id=transcript_id)
|
) | task_pipeline_post_to_zulip.si(transcript_id=transcript_id)
|
||||||
|
|
||||||
return chain.delay()
|
chain.delay()
|
||||||
|
|
||||||
|
|
||||||
@get_transcript
|
@get_transcript
|
||||||
|
|||||||
@@ -1,695 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import math
|
|
||||||
import tempfile
|
|
||||||
from fractions import Fraction
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import av
|
|
||||||
from av.audio.resampler import AudioResampler
|
|
||||||
from celery import chain, shared_task
|
|
||||||
|
|
||||||
from reflector.asynctask import asynctask
|
|
||||||
from reflector.db.transcripts import (
|
|
||||||
TranscriptStatus,
|
|
||||||
TranscriptWaveform,
|
|
||||||
transcripts_controller,
|
|
||||||
)
|
|
||||||
from reflector.logger import logger
|
|
||||||
from reflector.pipelines import topic_processing
|
|
||||||
from reflector.pipelines.main_file_pipeline import task_send_webhook_if_needed
|
|
||||||
from reflector.pipelines.main_live_pipeline import (
|
|
||||||
PipelineMainBase,
|
|
||||||
broadcast_to_sockets,
|
|
||||||
task_cleanup_consent,
|
|
||||||
task_pipeline_post_to_zulip,
|
|
||||||
)
|
|
||||||
from reflector.pipelines.transcription_helpers import transcribe_file_with_processor
|
|
||||||
from reflector.processors import AudioFileWriterProcessor
|
|
||||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
|
||||||
from reflector.processors.types import TitleSummary
|
|
||||||
from reflector.processors.types import Transcript as TranscriptType
|
|
||||||
from reflector.storage import Storage, get_transcripts_storage
|
|
||||||
from reflector.utils.string import NonEmptyString
|
|
||||||
|
|
||||||
# Audio encoding constants
|
|
||||||
OPUS_STANDARD_SAMPLE_RATE = 48000
|
|
||||||
OPUS_DEFAULT_BIT_RATE = 128000
|
|
||||||
|
|
||||||
# Storage operation constants
|
|
||||||
PRESIGNED_URL_EXPIRATION_SECONDS = 7200 # 2 hours
|
|
||||||
|
|
||||||
|
|
||||||
class PipelineMainMultitrack(PipelineMainBase):
|
|
||||||
def __init__(self, transcript_id: str):
|
|
||||||
super().__init__(transcript_id=transcript_id)
|
|
||||||
self.logger = logger.bind(transcript_id=self.transcript_id)
|
|
||||||
self.empty_pipeline = topic_processing.EmptyPipeline(logger=self.logger)
|
|
||||||
|
|
||||||
async def pad_track_for_transcription(
|
|
||||||
self,
|
|
||||||
track_url: NonEmptyString,
|
|
||||||
track_idx: int,
|
|
||||||
storage: Storage,
|
|
||||||
) -> NonEmptyString:
|
|
||||||
"""
|
|
||||||
Pad a single track with silence based on stream metadata start_time.
|
|
||||||
Downloads from S3 presigned URL, processes via PyAV using tempfile, uploads to S3.
|
|
||||||
Returns presigned URL of padded track (or original URL if no padding needed).
|
|
||||||
|
|
||||||
Memory usage:
|
|
||||||
- Pattern: fixed_overhead(2-5MB) for PyAV codec/filters
|
|
||||||
- PyAV streams input efficiently (no full download, verified)
|
|
||||||
- Output written to tempfile (disk-based, not memory)
|
|
||||||
- Upload streams from file handle (boto3 chunks, typically 5-10MB)
|
|
||||||
|
|
||||||
Daily.co raw-tracks timing - Two approaches:
|
|
||||||
|
|
||||||
CURRENT APPROACH (PyAV metadata):
|
|
||||||
The WebM stream.start_time field encodes MEETING-RELATIVE timing:
|
|
||||||
- t=0: When Daily.co recording started (first participant joined)
|
|
||||||
- start_time=8.13s: This participant's track began 8.13s after recording started
|
|
||||||
- Purpose: Enables track alignment without external manifest files
|
|
||||||
|
|
||||||
This is NOT:
|
|
||||||
- Stream-internal offset (first packet timestamp relative to stream start)
|
|
||||||
- Absolute/wall-clock time
|
|
||||||
- Recording duration
|
|
||||||
|
|
||||||
ALTERNATIVE APPROACH (filename parsing):
|
|
||||||
Daily.co filenames contain Unix timestamps (milliseconds):
|
|
||||||
Format: {recording_start_ts}-{participant_id}-cam-audio-{track_start_ts}.webm
|
|
||||||
Example: 1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922.webm
|
|
||||||
|
|
||||||
Can calculate offset: (track_start_ts - recording_start_ts) / 1000
|
|
||||||
- Track 0: (1760988935922 - 1760988935484) / 1000 = 0.438s
|
|
||||||
- Track 1: (1760988943823 - 1760988935484) / 1000 = 8.339s
|
|
||||||
|
|
||||||
TIME DIFFERENCE: PyAV metadata vs filename timestamps differ by ~209ms:
|
|
||||||
- Track 0: filename=438ms, metadata=229ms (diff: 209ms)
|
|
||||||
- Track 1: filename=8339ms, metadata=8130ms (diff: 209ms)
|
|
||||||
|
|
||||||
Consistent delta suggests network/encoding delay. PyAV metadata is ground truth
|
|
||||||
(represents when audio stream actually started vs when file upload initiated).
|
|
||||||
|
|
||||||
Example with 2 participants:
|
|
||||||
Track A: start_time=0.2s → Joined 200ms after recording began
|
|
||||||
Track B: start_time=8.1s → Joined 8.1 seconds later
|
|
||||||
|
|
||||||
After padding:
|
|
||||||
Track A: [0.2s silence] + [speech...]
|
|
||||||
Track B: [8.1s silence] + [speech...]
|
|
||||||
|
|
||||||
Whisper transcription timestamps are now synchronized:
|
|
||||||
Track A word at 5.0s → happened at meeting t=5.0s
|
|
||||||
Track B word at 10.0s → happened at meeting t=10.0s
|
|
||||||
|
|
||||||
Merging just sorts by timestamp - no offset calculation needed.
|
|
||||||
|
|
||||||
Padding coincidentally involves re-encoding. It's important when we work with Daily.co + Whisper.
|
|
||||||
This is because Daily.co returns recordings with skipped frames e.g. when microphone muted.
|
|
||||||
Daily.co doesn't understand those frames and ignores them, causing timestamp issues in transcription.
|
|
||||||
Re-encoding restores those frames. We do padding and re-encoding together just because it's convenient and more performant:
|
|
||||||
we need padded values for mix mp3 anyways
|
|
||||||
"""
|
|
||||||
|
|
||||||
transcript = await self.get_transcript()
|
|
||||||
|
|
||||||
try:
|
|
||||||
# PyAV streams input from S3 URL efficiently (2-5MB fixed overhead for codec/filters)
|
|
||||||
with av.open(track_url) as in_container:
|
|
||||||
start_time_seconds = self._extract_stream_start_time_from_container(
|
|
||||||
in_container, track_idx
|
|
||||||
)
|
|
||||||
|
|
||||||
if start_time_seconds <= 0:
|
|
||||||
self.logger.info(
|
|
||||||
f"Track {track_idx} requires no padding (start_time={start_time_seconds}s)",
|
|
||||||
track_idx=track_idx,
|
|
||||||
)
|
|
||||||
return track_url
|
|
||||||
|
|
||||||
# Use tempfile instead of BytesIO for better memory efficiency
|
|
||||||
# Reduces peak memory usage during encoding/upload
|
|
||||||
with tempfile.NamedTemporaryFile(
|
|
||||||
suffix=".webm", delete=False
|
|
||||||
) as temp_file:
|
|
||||||
temp_path = temp_file.name
|
|
||||||
|
|
||||||
try:
|
|
||||||
self._apply_audio_padding_to_file(
|
|
||||||
in_container, temp_path, start_time_seconds, track_idx
|
|
||||||
)
|
|
||||||
|
|
||||||
storage_path = (
|
|
||||||
f"file_pipeline/{transcript.id}/tracks/padded_{track_idx}.webm"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Upload using file handle for streaming
|
|
||||||
with open(temp_path, "rb") as padded_file:
|
|
||||||
await storage.put_file(storage_path, padded_file)
|
|
||||||
finally:
|
|
||||||
# Clean up temp file
|
|
||||||
Path(temp_path).unlink(missing_ok=True)
|
|
||||||
|
|
||||||
padded_url = await storage.get_file_url(
|
|
||||||
storage_path,
|
|
||||||
operation="get_object",
|
|
||||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.logger.info(
|
|
||||||
f"Successfully padded track {track_idx}",
|
|
||||||
track_idx=track_idx,
|
|
||||||
start_time_seconds=start_time_seconds,
|
|
||||||
padded_url=padded_url,
|
|
||||||
)
|
|
||||||
|
|
||||||
return padded_url
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(
|
|
||||||
f"Failed to process track {track_idx}",
|
|
||||||
track_idx=track_idx,
|
|
||||||
url=track_url,
|
|
||||||
error=str(e),
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
raise Exception(
|
|
||||||
f"Track {track_idx} padding failed - transcript would have incorrect timestamps"
|
|
||||||
) from e
|
|
||||||
|
|
||||||
def _extract_stream_start_time_from_container(
|
|
||||||
self, container, track_idx: int
|
|
||||||
) -> float:
|
|
||||||
"""
|
|
||||||
Extract meeting-relative start time from WebM stream metadata.
|
|
||||||
Uses PyAV to read stream.start_time from WebM container.
|
|
||||||
More accurate than filename timestamps by ~209ms due to network/encoding delays.
|
|
||||||
"""
|
|
||||||
start_time_seconds = 0.0
|
|
||||||
try:
|
|
||||||
audio_streams = [s for s in container.streams if s.type == "audio"]
|
|
||||||
stream = audio_streams[0] if audio_streams else container.streams[0]
|
|
||||||
|
|
||||||
# 1) Try stream-level start_time (most reliable for Daily.co tracks)
|
|
||||||
if stream.start_time is not None and stream.time_base is not None:
|
|
||||||
start_time_seconds = float(stream.start_time * stream.time_base)
|
|
||||||
|
|
||||||
# 2) Fallback to container-level start_time (in av.time_base units)
|
|
||||||
if (start_time_seconds <= 0) and (container.start_time is not None):
|
|
||||||
start_time_seconds = float(container.start_time * av.time_base)
|
|
||||||
|
|
||||||
# 3) Fallback to first packet DTS in stream.time_base
|
|
||||||
if start_time_seconds <= 0:
|
|
||||||
for packet in container.demux(stream):
|
|
||||||
if packet.dts is not None:
|
|
||||||
start_time_seconds = float(packet.dts * stream.time_base)
|
|
||||||
break
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.warning(
|
|
||||||
"PyAV metadata read failed; assuming 0 start_time",
|
|
||||||
track_idx=track_idx,
|
|
||||||
error=str(e),
|
|
||||||
)
|
|
||||||
start_time_seconds = 0.0
|
|
||||||
|
|
||||||
self.logger.info(
|
|
||||||
f"Track {track_idx} stream metadata: start_time={start_time_seconds:.3f}s",
|
|
||||||
track_idx=track_idx,
|
|
||||||
)
|
|
||||||
return start_time_seconds
|
|
||||||
|
|
||||||
def _apply_audio_padding_to_file(
|
|
||||||
self,
|
|
||||||
in_container,
|
|
||||||
output_path: str,
|
|
||||||
start_time_seconds: float,
|
|
||||||
track_idx: int,
|
|
||||||
) -> None:
|
|
||||||
"""Apply silence padding to audio track using PyAV filter graph, writing to file"""
|
|
||||||
delay_ms = math.floor(start_time_seconds * 1000)
|
|
||||||
|
|
||||||
self.logger.info(
|
|
||||||
f"Padding track {track_idx} with {delay_ms}ms delay using PyAV",
|
|
||||||
track_idx=track_idx,
|
|
||||||
delay_ms=delay_ms,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
with av.open(output_path, "w", format="webm") as out_container:
|
|
||||||
in_stream = next(
|
|
||||||
(s for s in in_container.streams if s.type == "audio"), None
|
|
||||||
)
|
|
||||||
if in_stream is None:
|
|
||||||
raise Exception("No audio stream in input")
|
|
||||||
|
|
||||||
out_stream = out_container.add_stream(
|
|
||||||
"libopus", rate=OPUS_STANDARD_SAMPLE_RATE
|
|
||||||
)
|
|
||||||
out_stream.bit_rate = OPUS_DEFAULT_BIT_RATE
|
|
||||||
graph = av.filter.Graph()
|
|
||||||
|
|
||||||
abuf_args = (
|
|
||||||
f"time_base=1/{OPUS_STANDARD_SAMPLE_RATE}:"
|
|
||||||
f"sample_rate={OPUS_STANDARD_SAMPLE_RATE}:"
|
|
||||||
f"sample_fmt=s16:"
|
|
||||||
f"channel_layout=stereo"
|
|
||||||
)
|
|
||||||
src = graph.add("abuffer", args=abuf_args, name="src")
|
|
||||||
aresample_f = graph.add("aresample", args="async=1", name="ares")
|
|
||||||
# adelay requires one delay value per channel separated by '|'
|
|
||||||
delays_arg = f"{delay_ms}|{delay_ms}"
|
|
||||||
adelay_f = graph.add(
|
|
||||||
"adelay", args=f"delays={delays_arg}:all=1", name="delay"
|
|
||||||
)
|
|
||||||
sink = graph.add("abuffersink", name="sink")
|
|
||||||
|
|
||||||
src.link_to(aresample_f)
|
|
||||||
aresample_f.link_to(adelay_f)
|
|
||||||
adelay_f.link_to(sink)
|
|
||||||
graph.configure()
|
|
||||||
|
|
||||||
resampler = AudioResampler(
|
|
||||||
format="s16", layout="stereo", rate=OPUS_STANDARD_SAMPLE_RATE
|
|
||||||
)
|
|
||||||
# Decode -> resample -> push through graph -> encode Opus
|
|
||||||
for frame in in_container.decode(in_stream):
|
|
||||||
out_frames = resampler.resample(frame) or []
|
|
||||||
for rframe in out_frames:
|
|
||||||
rframe.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
|
||||||
rframe.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
|
||||||
src.push(rframe)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
f_out = sink.pull()
|
|
||||||
except Exception:
|
|
||||||
break
|
|
||||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
|
||||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
|
||||||
for packet in out_stream.encode(f_out):
|
|
||||||
out_container.mux(packet)
|
|
||||||
|
|
||||||
src.push(None)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
f_out = sink.pull()
|
|
||||||
except Exception:
|
|
||||||
break
|
|
||||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
|
||||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
|
||||||
for packet in out_stream.encode(f_out):
|
|
||||||
out_container.mux(packet)
|
|
||||||
|
|
||||||
for packet in out_stream.encode(None):
|
|
||||||
out_container.mux(packet)
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.error(
|
|
||||||
"PyAV padding failed for track",
|
|
||||||
track_idx=track_idx,
|
|
||||||
delay_ms=delay_ms,
|
|
||||||
error=str(e),
|
|
||||||
exc_info=True,
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
|
|
||||||
async def mixdown_tracks(
|
|
||||||
self,
|
|
||||||
track_urls: list[str],
|
|
||||||
writer: AudioFileWriterProcessor,
|
|
||||||
offsets_seconds: list[float] | None = None,
|
|
||||||
) -> None:
|
|
||||||
"""Multi-track mixdown using PyAV filter graph (amix), reading from S3 presigned URLs"""
|
|
||||||
|
|
||||||
target_sample_rate: int | None = None
|
|
||||||
for url in track_urls:
|
|
||||||
if not url:
|
|
||||||
continue
|
|
||||||
container = None
|
|
||||||
try:
|
|
||||||
container = av.open(url)
|
|
||||||
for frame in container.decode(audio=0):
|
|
||||||
target_sample_rate = frame.sample_rate
|
|
||||||
break
|
|
||||||
except Exception:
|
|
||||||
continue
|
|
||||||
finally:
|
|
||||||
if container is not None:
|
|
||||||
container.close()
|
|
||||||
if target_sample_rate:
|
|
||||||
break
|
|
||||||
|
|
||||||
if not target_sample_rate:
|
|
||||||
self.logger.error("Mixdown failed - no decodable audio frames found")
|
|
||||||
raise Exception("Mixdown failed: No decodable audio frames in any track")
|
|
||||||
# Build PyAV filter graph:
|
|
||||||
# N abuffer (s32/stereo)
|
|
||||||
# -> optional adelay per input (for alignment)
|
|
||||||
# -> amix (s32)
|
|
||||||
# -> aformat(s16)
|
|
||||||
# -> sink
|
|
||||||
graph = av.filter.Graph()
|
|
||||||
inputs = []
|
|
||||||
valid_track_urls = [url for url in track_urls if url]
|
|
||||||
input_offsets_seconds = None
|
|
||||||
if offsets_seconds is not None:
|
|
||||||
input_offsets_seconds = [
|
|
||||||
offsets_seconds[i] for i, url in enumerate(track_urls) if url
|
|
||||||
]
|
|
||||||
for idx, url in enumerate(valid_track_urls):
|
|
||||||
args = (
|
|
||||||
f"time_base=1/{target_sample_rate}:"
|
|
||||||
f"sample_rate={target_sample_rate}:"
|
|
||||||
f"sample_fmt=s32:"
|
|
||||||
f"channel_layout=stereo"
|
|
||||||
)
|
|
||||||
in_ctx = graph.add("abuffer", args=args, name=f"in{idx}")
|
|
||||||
inputs.append(in_ctx)
|
|
||||||
|
|
||||||
if not inputs:
|
|
||||||
self.logger.error("Mixdown failed - no valid inputs for graph")
|
|
||||||
raise Exception("Mixdown failed: No valid inputs for filter graph")
|
|
||||||
|
|
||||||
mixer = graph.add("amix", args=f"inputs={len(inputs)}:normalize=0", name="mix")
|
|
||||||
|
|
||||||
fmt = graph.add(
|
|
||||||
"aformat",
|
|
||||||
args=(
|
|
||||||
f"sample_fmts=s32:channel_layouts=stereo:sample_rates={target_sample_rate}"
|
|
||||||
),
|
|
||||||
name="fmt",
|
|
||||||
)
|
|
||||||
|
|
||||||
sink = graph.add("abuffersink", name="out")
|
|
||||||
|
|
||||||
# Optional per-input delay before mixing
|
|
||||||
delays_ms: list[int] = []
|
|
||||||
if input_offsets_seconds is not None:
|
|
||||||
base = min(input_offsets_seconds) if input_offsets_seconds else 0.0
|
|
||||||
delays_ms = [
|
|
||||||
max(0, int(round((o - base) * 1000))) for o in input_offsets_seconds
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
delays_ms = [0 for _ in inputs]
|
|
||||||
|
|
||||||
for idx, in_ctx in enumerate(inputs):
|
|
||||||
delay_ms = delays_ms[idx] if idx < len(delays_ms) else 0
|
|
||||||
if delay_ms > 0:
|
|
||||||
# adelay requires one value per channel; use same for stereo
|
|
||||||
adelay = graph.add(
|
|
||||||
"adelay",
|
|
||||||
args=f"delays={delay_ms}|{delay_ms}:all=1",
|
|
||||||
name=f"delay{idx}",
|
|
||||||
)
|
|
||||||
in_ctx.link_to(adelay)
|
|
||||||
adelay.link_to(mixer, 0, idx)
|
|
||||||
else:
|
|
||||||
in_ctx.link_to(mixer, 0, idx)
|
|
||||||
mixer.link_to(fmt)
|
|
||||||
fmt.link_to(sink)
|
|
||||||
graph.configure()
|
|
||||||
|
|
||||||
containers = []
|
|
||||||
try:
|
|
||||||
# Open all containers with cleanup guaranteed
|
|
||||||
for i, url in enumerate(valid_track_urls):
|
|
||||||
try:
|
|
||||||
c = av.open(url)
|
|
||||||
containers.append(c)
|
|
||||||
except Exception as e:
|
|
||||||
self.logger.warning(
|
|
||||||
"Mixdown: failed to open container from URL",
|
|
||||||
input=i,
|
|
||||||
url=url,
|
|
||||||
error=str(e),
|
|
||||||
)
|
|
||||||
|
|
||||||
if not containers:
|
|
||||||
self.logger.error("Mixdown failed - no valid containers opened")
|
|
||||||
raise Exception("Mixdown failed: Could not open any track containers")
|
|
||||||
|
|
||||||
decoders = [c.decode(audio=0) for c in containers]
|
|
||||||
active = [True] * len(decoders)
|
|
||||||
resamplers = [
|
|
||||||
AudioResampler(format="s32", layout="stereo", rate=target_sample_rate)
|
|
||||||
for _ in decoders
|
|
||||||
]
|
|
||||||
|
|
||||||
while any(active):
|
|
||||||
for i, (dec, is_active) in enumerate(zip(decoders, active)):
|
|
||||||
if not is_active:
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
frame = next(dec)
|
|
||||||
except StopIteration:
|
|
||||||
active[i] = False
|
|
||||||
continue
|
|
||||||
|
|
||||||
if frame.sample_rate != target_sample_rate:
|
|
||||||
continue
|
|
||||||
out_frames = resamplers[i].resample(frame) or []
|
|
||||||
for rf in out_frames:
|
|
||||||
rf.sample_rate = target_sample_rate
|
|
||||||
rf.time_base = Fraction(1, target_sample_rate)
|
|
||||||
inputs[i].push(rf)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
mixed = sink.pull()
|
|
||||||
except Exception:
|
|
||||||
break
|
|
||||||
mixed.sample_rate = target_sample_rate
|
|
||||||
mixed.time_base = Fraction(1, target_sample_rate)
|
|
||||||
await writer.push(mixed)
|
|
||||||
|
|
||||||
for in_ctx in inputs:
|
|
||||||
in_ctx.push(None)
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
mixed = sink.pull()
|
|
||||||
except Exception:
|
|
||||||
break
|
|
||||||
mixed.sample_rate = target_sample_rate
|
|
||||||
mixed.time_base = Fraction(1, target_sample_rate)
|
|
||||||
await writer.push(mixed)
|
|
||||||
finally:
|
|
||||||
# Cleanup all containers, even if processing failed
|
|
||||||
for c in containers:
|
|
||||||
if c is not None:
|
|
||||||
try:
|
|
||||||
c.close()
|
|
||||||
except Exception:
|
|
||||||
pass # Best effort cleanup
|
|
||||||
|
|
||||||
@broadcast_to_sockets
|
|
||||||
async def set_status(self, transcript_id: str, status: TranscriptStatus):
|
|
||||||
async with self.lock_transaction():
|
|
||||||
return await transcripts_controller.set_status(transcript_id, status)
|
|
||||||
|
|
||||||
async def on_waveform(self, data):
|
|
||||||
async with self.transaction():
|
|
||||||
waveform = TranscriptWaveform(waveform=data)
|
|
||||||
transcript = await self.get_transcript()
|
|
||||||
return await transcripts_controller.append_event(
|
|
||||||
transcript=transcript, event="WAVEFORM", data=waveform
|
|
||||||
)
|
|
||||||
|
|
||||||
async def process(self, bucket_name: str, track_keys: list[str]):
|
|
||||||
transcript = await self.get_transcript()
|
|
||||||
async with self.transaction():
|
|
||||||
await transcripts_controller.update(
|
|
||||||
transcript,
|
|
||||||
{
|
|
||||||
"events": [],
|
|
||||||
"topics": [],
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
source_storage = get_transcripts_storage()
|
|
||||||
transcript_storage = source_storage
|
|
||||||
|
|
||||||
track_urls: list[str] = []
|
|
||||||
for key in track_keys:
|
|
||||||
url = await source_storage.get_file_url(
|
|
||||||
key,
|
|
||||||
operation="get_object",
|
|
||||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
|
||||||
bucket=bucket_name,
|
|
||||||
)
|
|
||||||
track_urls.append(url)
|
|
||||||
self.logger.info(
|
|
||||||
f"Generated presigned URL for track from {bucket_name}",
|
|
||||||
key=key,
|
|
||||||
)
|
|
||||||
|
|
||||||
created_padded_files = set()
|
|
||||||
padded_track_urls: list[str] = []
|
|
||||||
for idx, url in enumerate(track_urls):
|
|
||||||
padded_url = await self.pad_track_for_transcription(
|
|
||||||
url, idx, transcript_storage
|
|
||||||
)
|
|
||||||
padded_track_urls.append(padded_url)
|
|
||||||
if padded_url != url:
|
|
||||||
storage_path = f"file_pipeline/{transcript.id}/tracks/padded_{idx}.webm"
|
|
||||||
created_padded_files.add(storage_path)
|
|
||||||
self.logger.info(f"Track {idx} processed, padded URL: {padded_url}")
|
|
||||||
|
|
||||||
transcript.data_path.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
mp3_writer = AudioFileWriterProcessor(
|
|
||||||
path=str(transcript.audio_mp3_filename),
|
|
||||||
on_duration=self.on_duration,
|
|
||||||
)
|
|
||||||
await self.mixdown_tracks(padded_track_urls, mp3_writer, offsets_seconds=None)
|
|
||||||
await mp3_writer.flush()
|
|
||||||
|
|
||||||
if not transcript.audio_mp3_filename.exists():
|
|
||||||
raise Exception(
|
|
||||||
"Mixdown failed - no MP3 file generated. Cannot proceed without playable audio."
|
|
||||||
)
|
|
||||||
|
|
||||||
storage_path = f"{transcript.id}/audio.mp3"
|
|
||||||
# Use file handle streaming to avoid loading entire MP3 into memory
|
|
||||||
mp3_size = transcript.audio_mp3_filename.stat().st_size
|
|
||||||
with open(transcript.audio_mp3_filename, "rb") as mp3_file:
|
|
||||||
await transcript_storage.put_file(storage_path, mp3_file)
|
|
||||||
mp3_url = await transcript_storage.get_file_url(storage_path)
|
|
||||||
|
|
||||||
await transcripts_controller.update(transcript, {"audio_location": "storage"})
|
|
||||||
|
|
||||||
self.logger.info(
|
|
||||||
f"Uploaded mixed audio to storage",
|
|
||||||
storage_path=storage_path,
|
|
||||||
size=mp3_size,
|
|
||||||
url=mp3_url,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.logger.info("Generating waveform from mixed audio")
|
|
||||||
waveform_processor = AudioWaveformProcessor(
|
|
||||||
audio_path=transcript.audio_mp3_filename,
|
|
||||||
waveform_path=transcript.audio_waveform_filename,
|
|
||||||
on_waveform=self.on_waveform,
|
|
||||||
)
|
|
||||||
waveform_processor.set_pipeline(self.empty_pipeline)
|
|
||||||
await waveform_processor.flush()
|
|
||||||
self.logger.info("Waveform generated successfully")
|
|
||||||
|
|
||||||
speaker_transcripts: list[TranscriptType] = []
|
|
||||||
for idx, padded_url in enumerate(padded_track_urls):
|
|
||||||
if not padded_url:
|
|
||||||
continue
|
|
||||||
|
|
||||||
t = await self.transcribe_file(padded_url, transcript.source_language)
|
|
||||||
|
|
||||||
if not t.words:
|
|
||||||
self.logger.debug(f"no words in track {idx}")
|
|
||||||
# not skipping, it may be silence or indistinguishable mumbling
|
|
||||||
|
|
||||||
for w in t.words:
|
|
||||||
w.speaker = idx
|
|
||||||
|
|
||||||
speaker_transcripts.append(t)
|
|
||||||
self.logger.info(
|
|
||||||
f"Track {idx} transcribed successfully with {len(t.words)} words",
|
|
||||||
track_idx=idx,
|
|
||||||
)
|
|
||||||
|
|
||||||
valid_track_count = len([url for url in padded_track_urls if url])
|
|
||||||
if valid_track_count > 0 and len(speaker_transcripts) != valid_track_count:
|
|
||||||
raise Exception(
|
|
||||||
f"Only {len(speaker_transcripts)}/{valid_track_count} tracks transcribed successfully. "
|
|
||||||
f"All tracks must succeed to avoid incomplete transcripts."
|
|
||||||
)
|
|
||||||
|
|
||||||
if not speaker_transcripts:
|
|
||||||
raise Exception("No valid track transcriptions")
|
|
||||||
|
|
||||||
self.logger.info(f"Cleaning up {len(created_padded_files)} temporary S3 files")
|
|
||||||
cleanup_tasks = []
|
|
||||||
for storage_path in created_padded_files:
|
|
||||||
cleanup_tasks.append(transcript_storage.delete_file(storage_path))
|
|
||||||
|
|
||||||
if cleanup_tasks:
|
|
||||||
cleanup_results = await asyncio.gather(
|
|
||||||
*cleanup_tasks, return_exceptions=True
|
|
||||||
)
|
|
||||||
for storage_path, result in zip(created_padded_files, cleanup_results):
|
|
||||||
if isinstance(result, Exception):
|
|
||||||
self.logger.warning(
|
|
||||||
"Failed to cleanup temporary padded track",
|
|
||||||
storage_path=storage_path,
|
|
||||||
error=str(result),
|
|
||||||
)
|
|
||||||
|
|
||||||
merged_words = []
|
|
||||||
for t in speaker_transcripts:
|
|
||||||
merged_words.extend(t.words)
|
|
||||||
merged_words.sort(
|
|
||||||
key=lambda w: w.start if hasattr(w, "start") and w.start is not None else 0
|
|
||||||
)
|
|
||||||
|
|
||||||
merged_transcript = TranscriptType(words=merged_words, translation=None)
|
|
||||||
|
|
||||||
await self.on_transcript(merged_transcript)
|
|
||||||
|
|
||||||
topics = await self.detect_topics(merged_transcript, transcript.target_language)
|
|
||||||
await asyncio.gather(
|
|
||||||
self.generate_title(topics),
|
|
||||||
self.generate_summaries(topics),
|
|
||||||
return_exceptions=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
await self.set_status(transcript.id, "ended")
|
|
||||||
|
|
||||||
async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType:
|
|
||||||
return await transcribe_file_with_processor(audio_url, language)
|
|
||||||
|
|
||||||
async def detect_topics(
|
|
||||||
self, transcript: TranscriptType, target_language: str
|
|
||||||
) -> list[TitleSummary]:
|
|
||||||
return await topic_processing.detect_topics(
|
|
||||||
transcript,
|
|
||||||
target_language,
|
|
||||||
on_topic_callback=self.on_topic,
|
|
||||||
empty_pipeline=self.empty_pipeline,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def generate_title(self, topics: list[TitleSummary]):
|
|
||||||
return await topic_processing.generate_title(
|
|
||||||
topics,
|
|
||||||
on_title_callback=self.on_title,
|
|
||||||
empty_pipeline=self.empty_pipeline,
|
|
||||||
logger=self.logger,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def generate_summaries(self, topics: list[TitleSummary]):
|
|
||||||
transcript = await self.get_transcript()
|
|
||||||
return await topic_processing.generate_summaries(
|
|
||||||
topics,
|
|
||||||
transcript,
|
|
||||||
on_long_summary_callback=self.on_long_summary,
|
|
||||||
on_short_summary_callback=self.on_short_summary,
|
|
||||||
empty_pipeline=self.empty_pipeline,
|
|
||||||
logger=self.logger,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@shared_task
|
|
||||||
@asynctask
|
|
||||||
async def task_pipeline_multitrack_process(
|
|
||||||
*, transcript_id: str, bucket_name: str, track_keys: list[str]
|
|
||||||
):
|
|
||||||
pipeline = PipelineMainMultitrack(transcript_id=transcript_id)
|
|
||||||
try:
|
|
||||||
await pipeline.set_status(transcript_id, "processing")
|
|
||||||
await pipeline.process(bucket_name, track_keys)
|
|
||||||
except Exception:
|
|
||||||
await pipeline.set_status(transcript_id, "error")
|
|
||||||
raise
|
|
||||||
|
|
||||||
post_chain = chain(
|
|
||||||
task_cleanup_consent.si(transcript_id=transcript_id),
|
|
||||||
task_pipeline_post_to_zulip.si(transcript_id=transcript_id),
|
|
||||||
task_send_webhook_if_needed.si(transcript_id=transcript_id),
|
|
||||||
)
|
|
||||||
post_chain.delay()
|
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
"""
|
|
||||||
Topic processing utilities
|
|
||||||
==========================
|
|
||||||
|
|
||||||
Shared topic detection, title generation, and summarization logic
|
|
||||||
used across file and multitrack pipelines.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Callable
|
|
||||||
|
|
||||||
import structlog
|
|
||||||
|
|
||||||
from reflector.db.transcripts import Transcript
|
|
||||||
from reflector.processors import (
|
|
||||||
TranscriptFinalSummaryProcessor,
|
|
||||||
TranscriptFinalTitleProcessor,
|
|
||||||
TranscriptTopicDetectorProcessor,
|
|
||||||
)
|
|
||||||
from reflector.processors.types import TitleSummary
|
|
||||||
from reflector.processors.types import Transcript as TranscriptType
|
|
||||||
|
|
||||||
|
|
||||||
class EmptyPipeline:
|
|
||||||
def __init__(self, logger: structlog.BoundLogger):
|
|
||||||
self.logger = logger
|
|
||||||
|
|
||||||
def get_pref(self, k, d=None):
|
|
||||||
return d
|
|
||||||
|
|
||||||
async def emit(self, event):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
async def detect_topics(
|
|
||||||
transcript: TranscriptType,
|
|
||||||
target_language: str,
|
|
||||||
*,
|
|
||||||
on_topic_callback: Callable,
|
|
||||||
empty_pipeline: EmptyPipeline,
|
|
||||||
) -> list[TitleSummary]:
|
|
||||||
chunk_size = 300
|
|
||||||
topics: list[TitleSummary] = []
|
|
||||||
|
|
||||||
async def on_topic(topic: TitleSummary):
|
|
||||||
topics.append(topic)
|
|
||||||
return await on_topic_callback(topic)
|
|
||||||
|
|
||||||
topic_detector = TranscriptTopicDetectorProcessor(callback=on_topic)
|
|
||||||
topic_detector.set_pipeline(empty_pipeline)
|
|
||||||
|
|
||||||
for i in range(0, len(transcript.words), chunk_size):
|
|
||||||
chunk_words = transcript.words[i : i + chunk_size]
|
|
||||||
if not chunk_words:
|
|
||||||
continue
|
|
||||||
|
|
||||||
chunk_transcript = TranscriptType(
|
|
||||||
words=chunk_words, translation=transcript.translation
|
|
||||||
)
|
|
||||||
|
|
||||||
await topic_detector.push(chunk_transcript)
|
|
||||||
|
|
||||||
await topic_detector.flush()
|
|
||||||
return topics
|
|
||||||
|
|
||||||
|
|
||||||
async def generate_title(
|
|
||||||
topics: list[TitleSummary],
|
|
||||||
*,
|
|
||||||
on_title_callback: Callable,
|
|
||||||
empty_pipeline: EmptyPipeline,
|
|
||||||
logger: structlog.BoundLogger,
|
|
||||||
):
|
|
||||||
if not topics:
|
|
||||||
logger.warning("No topics for title generation")
|
|
||||||
return
|
|
||||||
|
|
||||||
processor = TranscriptFinalTitleProcessor(callback=on_title_callback)
|
|
||||||
processor.set_pipeline(empty_pipeline)
|
|
||||||
|
|
||||||
for topic in topics:
|
|
||||||
await processor.push(topic)
|
|
||||||
|
|
||||||
await processor.flush()
|
|
||||||
|
|
||||||
|
|
||||||
async def generate_summaries(
|
|
||||||
topics: list[TitleSummary],
|
|
||||||
transcript: Transcript,
|
|
||||||
*,
|
|
||||||
on_long_summary_callback: Callable,
|
|
||||||
on_short_summary_callback: Callable,
|
|
||||||
empty_pipeline: EmptyPipeline,
|
|
||||||
logger: structlog.BoundLogger,
|
|
||||||
):
|
|
||||||
if not topics:
|
|
||||||
logger.warning("No topics for summary generation")
|
|
||||||
return
|
|
||||||
|
|
||||||
processor = TranscriptFinalSummaryProcessor(
|
|
||||||
transcript=transcript,
|
|
||||||
callback=on_long_summary_callback,
|
|
||||||
on_short_summary=on_short_summary_callback,
|
|
||||||
)
|
|
||||||
processor.set_pipeline(empty_pipeline)
|
|
||||||
|
|
||||||
for topic in topics:
|
|
||||||
await processor.push(topic)
|
|
||||||
|
|
||||||
await processor.flush()
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
from reflector.processors.file_transcript import FileTranscriptInput
|
|
||||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
|
||||||
from reflector.processors.types import Transcript as TranscriptType
|
|
||||||
|
|
||||||
|
|
||||||
async def transcribe_file_with_processor(
|
|
||||||
audio_url: str,
|
|
||||||
language: str,
|
|
||||||
processor_name: str | None = None,
|
|
||||||
) -> TranscriptType:
|
|
||||||
processor = (
|
|
||||||
FileTranscriptAutoProcessor(name=processor_name)
|
|
||||||
if processor_name
|
|
||||||
else FileTranscriptAutoProcessor()
|
|
||||||
)
|
|
||||||
input_data = FileTranscriptInput(audio_url=audio_url, language=language)
|
|
||||||
|
|
||||||
result: TranscriptType | None = None
|
|
||||||
|
|
||||||
async def capture_result(transcript):
|
|
||||||
nonlocal result
|
|
||||||
result = transcript
|
|
||||||
|
|
||||||
processor.on(capture_result)
|
|
||||||
await processor.push(input_data)
|
|
||||||
await processor.flush()
|
|
||||||
|
|
||||||
if not result:
|
|
||||||
processor_label = processor_name or "default"
|
|
||||||
raise ValueError(
|
|
||||||
f"No transcript captured from {processor_label} processor for audio: {audio_url}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return result
|
|
||||||
@@ -11,7 +11,10 @@ from reflector.processors.audio_chunker_auto import AudioChunkerAutoProcessor
|
|||||||
|
|
||||||
class AudioChunkerSileroProcessor(AudioChunkerProcessor):
|
class AudioChunkerSileroProcessor(AudioChunkerProcessor):
|
||||||
"""
|
"""
|
||||||
Assemble audio frames into chunks with VAD-based speech detection using Silero VAD
|
Assemble audio frames into chunks with VAD-based speech detection using Silero VAD.
|
||||||
|
|
||||||
|
Expects input audio to be already downscaled to 16kHz mono s16 format
|
||||||
|
(handled by AudioDownscaleProcessor in the pipeline).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -31,12 +34,13 @@ class AudioChunkerSileroProcessor(AudioChunkerProcessor):
|
|||||||
self._init_vad(use_onnx)
|
self._init_vad(use_onnx)
|
||||||
|
|
||||||
def _init_vad(self, use_onnx=False):
|
def _init_vad(self, use_onnx=False):
|
||||||
"""Initialize Silero VAD model"""
|
"""Initialize Silero VAD model for 16kHz audio"""
|
||||||
try:
|
try:
|
||||||
torch.set_num_threads(1)
|
torch.set_num_threads(1)
|
||||||
self.vad_model = load_silero_vad(onnx=use_onnx)
|
self.vad_model = load_silero_vad(onnx=use_onnx)
|
||||||
|
# VAD expects 16kHz audio (guaranteed by AudioDownscaleProcessor)
|
||||||
self.vad_iterator = VADIterator(self.vad_model, sampling_rate=16000)
|
self.vad_iterator = VADIterator(self.vad_model, sampling_rate=16000)
|
||||||
self.logger.info("Silero VAD initialized successfully")
|
self.logger.info("Silero VAD initialized for 16kHz audio")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Failed to initialize Silero VAD: {e}")
|
self.logger.error(f"Failed to initialize Silero VAD: {e}")
|
||||||
@@ -75,7 +79,7 @@ class AudioChunkerSileroProcessor(AudioChunkerProcessor):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
# Processing block with current buffer size
|
# Processing block with current buffer size
|
||||||
print(f"Processing block: {len(self.frames)} frames in buffer")
|
# print(f"Processing block: {len(self.frames)} frames in buffer")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Convert frames to numpy array for VAD
|
# Convert frames to numpy array for VAD
|
||||||
@@ -189,38 +193,29 @@ class AudioChunkerSileroProcessor(AudioChunkerProcessor):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def _frames_to_numpy(self, frames: list[av.AudioFrame]) -> Optional[np.ndarray]:
|
def _frames_to_numpy(self, frames: list[av.AudioFrame]) -> Optional[np.ndarray]:
|
||||||
"""Convert av.AudioFrame list to numpy array for VAD processing"""
|
"""Convert av.AudioFrame list to numpy array for VAD processing
|
||||||
|
|
||||||
|
Input frames are already 16kHz mono s16 format from AudioDownscaleProcessor.
|
||||||
|
Only need to convert s16 to float32 for Silero VAD.
|
||||||
|
"""
|
||||||
if not frames:
|
if not frames:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
audio_data = []
|
# Concatenate all frame arrays
|
||||||
for frame in frames:
|
audio_arrays = [frame.to_ndarray().flatten() for frame in frames]
|
||||||
frame_array = frame.to_ndarray()
|
if not audio_arrays:
|
||||||
|
|
||||||
if len(frame_array.shape) == 2:
|
|
||||||
frame_array = frame_array.flatten()
|
|
||||||
|
|
||||||
audio_data.append(frame_array)
|
|
||||||
|
|
||||||
if not audio_data:
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
combined_audio = np.concatenate(audio_data)
|
combined_audio = np.concatenate(audio_arrays)
|
||||||
|
|
||||||
# Ensure float32 format
|
# Convert s16 to float32 (Silero VAD requires float32 in range [-1.0, 1.0])
|
||||||
if combined_audio.dtype == np.int16:
|
# Input is guaranteed to be s16 from AudioDownscaleProcessor
|
||||||
# Normalize int16 audio to float32 in range [-1.0, 1.0]
|
return combined_audio.astype(np.float32) / 32768.0
|
||||||
combined_audio = combined_audio.astype(np.float32) / 32768.0
|
|
||||||
elif combined_audio.dtype != np.float32:
|
|
||||||
combined_audio = combined_audio.astype(np.float32)
|
|
||||||
|
|
||||||
return combined_audio
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Error converting frames to numpy: {e}")
|
self.logger.error(f"Error converting frames to numpy: {e}")
|
||||||
|
return None
|
||||||
return None
|
|
||||||
|
|
||||||
def _find_speech_segment_end(self, audio_array: np.ndarray) -> Optional[int]:
|
def _find_speech_segment_end(self, audio_array: np.ndarray) -> Optional[int]:
|
||||||
"""Find complete speech segments and return frame index at segment end"""
|
"""Find complete speech segments and return frame index at segment end"""
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ class FileDiarizationModalProcessor(FileDiarizationProcessor):
|
|||||||
"audio_file_url": data.audio_url,
|
"audio_file_url": data.audio_url,
|
||||||
"timestamp": 0,
|
"timestamp": 0,
|
||||||
},
|
},
|
||||||
follow_redirects=True,
|
|
||||||
)
|
)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
diarization_data = response.json()["diarization"]
|
diarization_data = response.json()["diarization"]
|
||||||
|
|||||||
@@ -54,18 +54,7 @@ class FileTranscriptModalProcessor(FileTranscriptProcessor):
|
|||||||
"language": data.language,
|
"language": data.language,
|
||||||
"batch": True,
|
"batch": True,
|
||||||
},
|
},
|
||||||
follow_redirects=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if response.status_code != 200:
|
|
||||||
error_body = response.text
|
|
||||||
self.logger.error(
|
|
||||||
"Modal API error",
|
|
||||||
audio_url=data.audio_url,
|
|
||||||
status_code=response.status_code,
|
|
||||||
error_body=error_body,
|
|
||||||
)
|
|
||||||
|
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
result = response.json()
|
result = response.json()
|
||||||
|
|
||||||
@@ -78,9 +67,6 @@ class FileTranscriptModalProcessor(FileTranscriptProcessor):
|
|||||||
for word_info in result.get("words", [])
|
for word_info in result.get("words", [])
|
||||||
]
|
]
|
||||||
|
|
||||||
# words come not in order
|
|
||||||
words.sort(key=lambda w: w.start)
|
|
||||||
|
|
||||||
return Transcript(words=words)
|
return Transcript(words=words)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -165,7 +165,6 @@ class SummaryBuilder:
|
|||||||
self.llm: LLM = llm
|
self.llm: LLM = llm
|
||||||
self.model_name: str = llm.model_name
|
self.model_name: str = llm.model_name
|
||||||
self.logger = logger or structlog.get_logger()
|
self.logger = logger or structlog.get_logger()
|
||||||
self.participant_instructions: str | None = None
|
|
||||||
if filename:
|
if filename:
|
||||||
self.read_transcript_from_file(filename)
|
self.read_transcript_from_file(filename)
|
||||||
|
|
||||||
@@ -192,61 +191,14 @@ class SummaryBuilder:
|
|||||||
self, prompt: str, output_cls: Type[T], tone_name: str | None = None
|
self, prompt: str, output_cls: Type[T], tone_name: str | None = None
|
||||||
) -> T:
|
) -> T:
|
||||||
"""Generic function to get structured output from LLM for non-function-calling models."""
|
"""Generic function to get structured output from LLM for non-function-calling models."""
|
||||||
# Add participant instructions to the prompt if available
|
|
||||||
enhanced_prompt = self._enhance_prompt_with_participants(prompt)
|
|
||||||
return await self.llm.get_structured_response(
|
return await self.llm.get_structured_response(
|
||||||
enhanced_prompt, [self.transcript], output_cls, tone_name=tone_name
|
prompt, [self.transcript], output_cls, tone_name=tone_name
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _get_response(
|
|
||||||
self, prompt: str, texts: list[str], tone_name: str | None = None
|
|
||||||
) -> str:
|
|
||||||
"""Get text response with automatic participant instructions injection."""
|
|
||||||
enhanced_prompt = self._enhance_prompt_with_participants(prompt)
|
|
||||||
return await self.llm.get_response(enhanced_prompt, texts, tone_name=tone_name)
|
|
||||||
|
|
||||||
def _enhance_prompt_with_participants(self, prompt: str) -> str:
|
|
||||||
"""Add participant instructions to any prompt if participants are known."""
|
|
||||||
if self.participant_instructions:
|
|
||||||
self.logger.debug("Adding participant instructions to prompt")
|
|
||||||
return f"{prompt}\n\n{self.participant_instructions}"
|
|
||||||
return prompt
|
|
||||||
|
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
# Participants
|
# Participants
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
|
|
||||||
def set_known_participants(self, participants: list[str]) -> None:
|
|
||||||
"""
|
|
||||||
Set known participants directly without LLM identification.
|
|
||||||
This is used when participants are already identified and stored.
|
|
||||||
They are appended at the end of the transcript, providing more context for the assistant.
|
|
||||||
"""
|
|
||||||
if not participants:
|
|
||||||
self.logger.warning("No participants provided")
|
|
||||||
return
|
|
||||||
|
|
||||||
self.logger.info(
|
|
||||||
"Using known participants",
|
|
||||||
participants=participants,
|
|
||||||
)
|
|
||||||
|
|
||||||
participants_md = self.format_list_md(participants)
|
|
||||||
self.transcript += f"\n\n# Participants\n\n{participants_md}"
|
|
||||||
|
|
||||||
# Set instructions that will be automatically added to all prompts
|
|
||||||
participants_list = ", ".join(participants)
|
|
||||||
self.participant_instructions = dedent(
|
|
||||||
f"""
|
|
||||||
# IMPORTANT: Participant Names
|
|
||||||
The following participants are identified in this conversation: {participants_list}
|
|
||||||
|
|
||||||
You MUST use these specific participant names when referring to people in your response.
|
|
||||||
Do NOT use generic terms like "a participant", "someone", "attendee", "Speaker 1", "Speaker 2", etc.
|
|
||||||
Always refer to people by their actual names (e.g., "John suggested..." not "A participant suggested...").
|
|
||||||
"""
|
|
||||||
).strip()
|
|
||||||
|
|
||||||
async def identify_participants(self) -> None:
|
async def identify_participants(self) -> None:
|
||||||
"""
|
"""
|
||||||
From a transcript, try to identify the participants using TreeSummarize with structured output.
|
From a transcript, try to identify the participants using TreeSummarize with structured output.
|
||||||
@@ -280,19 +232,6 @@ class SummaryBuilder:
|
|||||||
if unique_participants:
|
if unique_participants:
|
||||||
participants_md = self.format_list_md(unique_participants)
|
participants_md = self.format_list_md(unique_participants)
|
||||||
self.transcript += f"\n\n# Participants\n\n{participants_md}"
|
self.transcript += f"\n\n# Participants\n\n{participants_md}"
|
||||||
|
|
||||||
# Set instructions that will be automatically added to all prompts
|
|
||||||
participants_list = ", ".join(unique_participants)
|
|
||||||
self.participant_instructions = dedent(
|
|
||||||
f"""
|
|
||||||
# IMPORTANT: Participant Names
|
|
||||||
The following participants are identified in this conversation: {participants_list}
|
|
||||||
|
|
||||||
You MUST use these specific participant names when referring to people in your response.
|
|
||||||
Do NOT use generic terms like "a participant", "someone", "attendee", "Speaker 1", "Speaker 2", etc.
|
|
||||||
Always refer to people by their actual names (e.g., "John suggested..." not "A participant suggested...").
|
|
||||||
"""
|
|
||||||
).strip()
|
|
||||||
else:
|
else:
|
||||||
self.logger.warning("No participants identified in the transcript")
|
self.logger.warning("No participants identified in the transcript")
|
||||||
|
|
||||||
@@ -379,13 +318,13 @@ class SummaryBuilder:
|
|||||||
for subject in self.subjects:
|
for subject in self.subjects:
|
||||||
detailed_prompt = DETAILED_SUBJECT_PROMPT_TEMPLATE.format(subject=subject)
|
detailed_prompt = DETAILED_SUBJECT_PROMPT_TEMPLATE.format(subject=subject)
|
||||||
|
|
||||||
detailed_response = await self._get_response(
|
detailed_response = await self.llm.get_response(
|
||||||
detailed_prompt, [self.transcript], tone_name="Topic assistant"
|
detailed_prompt, [self.transcript], tone_name="Topic assistant"
|
||||||
)
|
)
|
||||||
|
|
||||||
paragraph_prompt = PARAGRAPH_SUMMARY_PROMPT
|
paragraph_prompt = PARAGRAPH_SUMMARY_PROMPT
|
||||||
|
|
||||||
paragraph_response = await self._get_response(
|
paragraph_response = await self.llm.get_response(
|
||||||
paragraph_prompt, [str(detailed_response)], tone_name="Topic summarizer"
|
paragraph_prompt, [str(detailed_response)], tone_name="Topic summarizer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -406,7 +345,7 @@ class SummaryBuilder:
|
|||||||
|
|
||||||
recap_prompt = RECAP_PROMPT
|
recap_prompt = RECAP_PROMPT
|
||||||
|
|
||||||
recap_response = await self._get_response(
|
recap_response = await self.llm.get_response(
|
||||||
recap_prompt, [summaries_text], tone_name="Recap summarizer"
|
recap_prompt, [summaries_text], tone_name="Recap summarizer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -26,25 +26,7 @@ class TranscriptFinalSummaryProcessor(Processor):
|
|||||||
async def get_summary_builder(self, text) -> SummaryBuilder:
|
async def get_summary_builder(self, text) -> SummaryBuilder:
|
||||||
builder = SummaryBuilder(self.llm, logger=self.logger)
|
builder = SummaryBuilder(self.llm, logger=self.logger)
|
||||||
builder.set_transcript(text)
|
builder.set_transcript(text)
|
||||||
|
await builder.identify_participants()
|
||||||
# Use known participants if available, otherwise identify them
|
|
||||||
if self.transcript and self.transcript.participants:
|
|
||||||
# Extract participant names from the stored participants
|
|
||||||
participant_names = [p.name for p in self.transcript.participants if p.name]
|
|
||||||
if participant_names:
|
|
||||||
self.logger.info(
|
|
||||||
f"Using {len(participant_names)} known participants from transcript"
|
|
||||||
)
|
|
||||||
builder.set_known_participants(participant_names)
|
|
||||||
else:
|
|
||||||
self.logger.info(
|
|
||||||
"Participants field exists but is empty, identifying participants"
|
|
||||||
)
|
|
||||||
await builder.identify_participants()
|
|
||||||
else:
|
|
||||||
self.logger.info("No participants stored, identifying participants")
|
|
||||||
await builder.identify_participants()
|
|
||||||
|
|
||||||
await builder.generate_summary()
|
await builder.generate_summary()
|
||||||
return builder
|
return builder
|
||||||
|
|
||||||
@@ -67,30 +49,18 @@ class TranscriptFinalSummaryProcessor(Processor):
|
|||||||
speakermap = {}
|
speakermap = {}
|
||||||
if self.transcript:
|
if self.transcript:
|
||||||
speakermap = {
|
speakermap = {
|
||||||
p.speaker: p.name
|
participant["speaker"]: participant["name"]
|
||||||
for p in (self.transcript.participants or [])
|
for participant in self.transcript.participants
|
||||||
if p.speaker is not None and p.name
|
|
||||||
}
|
}
|
||||||
self.logger.info(
|
|
||||||
f"Built speaker map with {len(speakermap)} participants",
|
|
||||||
speakermap=speakermap,
|
|
||||||
)
|
|
||||||
|
|
||||||
# build the transcript as a single string
|
# build the transcript as a single string
|
||||||
# Replace speaker IDs with actual participant names if available
|
# XXX: unsure if the participants name as replaced directly in speaker ?
|
||||||
text_transcript = []
|
text_transcript = []
|
||||||
unique_speakers = set()
|
|
||||||
for topic in self.chunks:
|
for topic in self.chunks:
|
||||||
for segment in topic.transcript.as_segments():
|
for segment in topic.transcript.as_segments():
|
||||||
name = speakermap.get(segment.speaker, f"Speaker {segment.speaker}")
|
name = speakermap.get(segment.speaker, f"Speaker {segment.speaker}")
|
||||||
unique_speakers.add((segment.speaker, name))
|
|
||||||
text_transcript.append(f"{name}: {segment.text}")
|
text_transcript.append(f"{name}: {segment.text}")
|
||||||
|
|
||||||
self.logger.info(
|
|
||||||
f"Built transcript with {len(unique_speakers)} unique speakers",
|
|
||||||
speakers=list(unique_speakers),
|
|
||||||
)
|
|
||||||
|
|
||||||
text_transcript = "\n".join(text_transcript)
|
text_transcript = "\n".join(text_transcript)
|
||||||
|
|
||||||
last_chunk = self.chunks[-1]
|
last_chunk = self.chunks[-1]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
|
|
||||||
from pydantic import AliasChoices, BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from reflector.llm import LLM
|
from reflector.llm import LLM
|
||||||
from reflector.processors.base import Processor
|
from reflector.processors.base import Processor
|
||||||
@@ -34,14 +34,8 @@ TOPIC_PROMPT = dedent(
|
|||||||
class TopicResponse(BaseModel):
|
class TopicResponse(BaseModel):
|
||||||
"""Structured response for topic detection"""
|
"""Structured response for topic detection"""
|
||||||
|
|
||||||
title: str = Field(
|
title: str = Field(description="A descriptive title for the topic being discussed")
|
||||||
description="A descriptive title for the topic being discussed",
|
summary: str = Field(description="A concise 1-2 sentence summary of the discussion")
|
||||||
validation_alias=AliasChoices("title", "Title"),
|
|
||||||
)
|
|
||||||
summary: str = Field(
|
|
||||||
description="A concise 1-2 sentence summary of the discussion",
|
|
||||||
validation_alias=AliasChoices("summary", "Summary"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TranscriptTopicDetectorProcessor(Processor):
|
class TranscriptTopicDetectorProcessor(Processor):
|
||||||
|
|||||||
@@ -4,8 +4,11 @@ import tempfile
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Annotated, TypedDict
|
from typing import Annotated, TypedDict
|
||||||
|
|
||||||
|
from profanityfilter import ProfanityFilter
|
||||||
from pydantic import BaseModel, Field, PrivateAttr
|
from pydantic import BaseModel, Field, PrivateAttr
|
||||||
|
|
||||||
|
from reflector.redis_cache import redis_cache
|
||||||
|
|
||||||
|
|
||||||
class DiarizationSegment(TypedDict):
|
class DiarizationSegment(TypedDict):
|
||||||
"""Type definition for diarization segment containing speaker information"""
|
"""Type definition for diarization segment containing speaker information"""
|
||||||
@@ -17,6 +20,9 @@ class DiarizationSegment(TypedDict):
|
|||||||
|
|
||||||
PUNC_RE = re.compile(r"[.;:?!…]")
|
PUNC_RE = re.compile(r"[.;:?!…]")
|
||||||
|
|
||||||
|
profanity_filter = ProfanityFilter()
|
||||||
|
profanity_filter.set_censor("*")
|
||||||
|
|
||||||
|
|
||||||
class AudioFile(BaseModel):
|
class AudioFile(BaseModel):
|
||||||
name: str
|
name: str
|
||||||
@@ -118,11 +124,21 @@ def words_to_segments(words: list[Word]) -> list[TranscriptSegment]:
|
|||||||
|
|
||||||
class Transcript(BaseModel):
|
class Transcript(BaseModel):
|
||||||
translation: str | None = None
|
translation: str | None = None
|
||||||
words: list[Word] = []
|
words: list[Word] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def raw_text(self):
|
||||||
|
# Uncensored text
|
||||||
|
return "".join([word.text for word in self.words])
|
||||||
|
|
||||||
|
@redis_cache(prefix="profanity", duration=3600 * 24 * 7)
|
||||||
|
def _get_censored_text(self, text: str):
|
||||||
|
return profanity_filter.censor(text).strip()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def text(self):
|
def text(self):
|
||||||
return "".join([word.text for word in self.words])
|
# Censored text
|
||||||
|
return self._get_censored_text(self.raw_text)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def human_timestamp(self):
|
def human_timestamp(self):
|
||||||
@@ -154,6 +170,12 @@ class Transcript(BaseModel):
|
|||||||
word.start += offset
|
word.start += offset
|
||||||
word.end += offset
|
word.end += offset
|
||||||
|
|
||||||
|
def clone(self):
|
||||||
|
words = [
|
||||||
|
Word(text=word.text, start=word.start, end=word.end) for word in self.words
|
||||||
|
]
|
||||||
|
return Transcript(text=self.text, translation=self.translation, words=words)
|
||||||
|
|
||||||
def as_segments(self) -> list[TranscriptSegment]:
|
def as_segments(self) -> list[TranscriptSegment]:
|
||||||
return words_to_segments(self.words)
|
return words_to_segments(self.words)
|
||||||
|
|
||||||
|
|||||||
@@ -1,17 +1,10 @@
|
|||||||
import asyncio
|
|
||||||
import functools
|
import functools
|
||||||
import json
|
import json
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import redis
|
import redis
|
||||||
import redis.asyncio as redis_async
|
|
||||||
import structlog
|
|
||||||
from redis.exceptions import LockError
|
|
||||||
|
|
||||||
from reflector.settings import settings
|
from reflector.settings import settings
|
||||||
|
|
||||||
logger = structlog.get_logger(__name__)
|
|
||||||
|
|
||||||
redis_clients = {}
|
redis_clients = {}
|
||||||
|
|
||||||
|
|
||||||
@@ -28,12 +21,6 @@ def get_redis_client(db=0):
|
|||||||
return redis_clients[db]
|
return redis_clients[db]
|
||||||
|
|
||||||
|
|
||||||
async def get_async_redis_client(db: int = 0):
|
|
||||||
return await redis_async.from_url(
|
|
||||||
f"redis://{settings.REDIS_HOST}:{settings.REDIS_PORT}/{db}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def redis_cache(prefix="cache", duration=3600, db=settings.REDIS_CACHE_DB, argidx=1):
|
def redis_cache(prefix="cache", duration=3600, db=settings.REDIS_CACHE_DB, argidx=1):
|
||||||
"""
|
"""
|
||||||
Cache the result of a function in Redis.
|
Cache the result of a function in Redis.
|
||||||
@@ -62,87 +49,3 @@ def redis_cache(prefix="cache", duration=3600, db=settings.REDIS_CACHE_DB, argid
|
|||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
class RedisAsyncLock:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
key: str,
|
|
||||||
timeout: int = 120,
|
|
||||||
extend_interval: int = 30,
|
|
||||||
skip_if_locked: bool = False,
|
|
||||||
blocking: bool = True,
|
|
||||||
blocking_timeout: Optional[float] = None,
|
|
||||||
):
|
|
||||||
self.key = f"async_lock:{key}"
|
|
||||||
self.timeout = timeout
|
|
||||||
self.extend_interval = extend_interval
|
|
||||||
self.skip_if_locked = skip_if_locked
|
|
||||||
self.blocking = blocking
|
|
||||||
self.blocking_timeout = blocking_timeout
|
|
||||||
self._lock = None
|
|
||||||
self._redis = None
|
|
||||||
self._extend_task = None
|
|
||||||
self._acquired = False
|
|
||||||
|
|
||||||
async def _extend_lock_periodically(self):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
await asyncio.sleep(self.extend_interval)
|
|
||||||
if self._lock:
|
|
||||||
await self._lock.extend(self.timeout, replace_ttl=True)
|
|
||||||
logger.debug("Extended lock", key=self.key)
|
|
||||||
except LockError:
|
|
||||||
logger.warning("Failed to extend lock", key=self.key)
|
|
||||||
break
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
break
|
|
||||||
except Exception as e:
|
|
||||||
logger.error("Error extending lock", key=self.key, error=str(e))
|
|
||||||
break
|
|
||||||
|
|
||||||
async def __aenter__(self):
|
|
||||||
self._redis = await get_async_redis_client()
|
|
||||||
self._lock = self._redis.lock(
|
|
||||||
self.key,
|
|
||||||
timeout=self.timeout,
|
|
||||||
blocking=self.blocking,
|
|
||||||
blocking_timeout=self.blocking_timeout,
|
|
||||||
)
|
|
||||||
|
|
||||||
self._acquired = await self._lock.acquire()
|
|
||||||
|
|
||||||
if not self._acquired:
|
|
||||||
if self.skip_if_locked:
|
|
||||||
logger.warning(
|
|
||||||
"Lock already acquired by another process, skipping", key=self.key
|
|
||||||
)
|
|
||||||
return self
|
|
||||||
else:
|
|
||||||
raise LockError(f"Failed to acquire lock: {self.key}")
|
|
||||||
|
|
||||||
self._extend_task = asyncio.create_task(self._extend_lock_periodically())
|
|
||||||
logger.info("Acquired lock", key=self.key)
|
|
||||||
return self
|
|
||||||
|
|
||||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
if self._extend_task:
|
|
||||||
self._extend_task.cancel()
|
|
||||||
try:
|
|
||||||
await self._extend_task
|
|
||||||
except asyncio.CancelledError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if self._acquired and self._lock:
|
|
||||||
try:
|
|
||||||
await self._lock.release()
|
|
||||||
logger.info("Released lock", key=self.key)
|
|
||||||
except LockError:
|
|
||||||
logger.debug("Lock already released or expired", key=self.key)
|
|
||||||
|
|
||||||
if self._redis:
|
|
||||||
await self._redis.aclose()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def acquired(self) -> bool:
|
|
||||||
return self._acquired
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
from typing import Literal
|
|
||||||
|
|
||||||
Platform = Literal["whereby", "daily"]
|
|
||||||
WHEREBY_PLATFORM: Platform = "whereby"
|
|
||||||
DAILY_PLATFORM: Platform = "daily"
|
|
||||||
@@ -1,408 +0,0 @@
|
|||||||
"""
|
|
||||||
ICS Calendar Synchronization Service
|
|
||||||
|
|
||||||
This module provides services for fetching, parsing, and synchronizing ICS (iCalendar)
|
|
||||||
calendar feeds with room booking data in the database.
|
|
||||||
|
|
||||||
Key Components:
|
|
||||||
- ICSFetchService: Handles HTTP fetching and parsing of ICS calendar data
|
|
||||||
- ICSSyncService: Manages the synchronization process between ICS feeds and database
|
|
||||||
|
|
||||||
Example Usage:
|
|
||||||
# Sync a room's calendar
|
|
||||||
room = Room(id="room1", name="conference-room", ics_url="https://cal.example.com/room.ics")
|
|
||||||
result = await ics_sync_service.sync_room_calendar(room)
|
|
||||||
|
|
||||||
# Result structure:
|
|
||||||
{
|
|
||||||
"status": "success", # success|unchanged|error|skipped
|
|
||||||
"hash": "abc123...", # MD5 hash of ICS content
|
|
||||||
"events_found": 5, # Events matching this room
|
|
||||||
"total_events": 12, # Total events in calendar within time window
|
|
||||||
"events_created": 2, # New events added to database
|
|
||||||
"events_updated": 3, # Existing events modified
|
|
||||||
"events_deleted": 1 # Events soft-deleted (no longer in calendar)
|
|
||||||
}
|
|
||||||
|
|
||||||
Event Matching:
|
|
||||||
Events are matched to rooms by checking if the room's full URL appears in the
|
|
||||||
event's LOCATION or DESCRIPTION fields. Only events within a 25-hour window
|
|
||||||
(1 hour ago to 24 hours from now) are processed.
|
|
||||||
|
|
||||||
Input: ICS calendar URL (e.g., "https://calendar.google.com/calendar/ical/...")
|
|
||||||
Output: EventData objects with structured calendar information:
|
|
||||||
{
|
|
||||||
"ics_uid": "event123@google.com",
|
|
||||||
"title": "Team Meeting",
|
|
||||||
"description": "Weekly sync meeting",
|
|
||||||
"location": "https://meet.company.com/conference-room",
|
|
||||||
"start_time": datetime(2024, 1, 15, 14, 0, tzinfo=UTC),
|
|
||||||
"end_time": datetime(2024, 1, 15, 15, 0, tzinfo=UTC),
|
|
||||||
"attendees": [
|
|
||||||
{"email": "user@company.com", "name": "John Doe", "role": "ORGANIZER"},
|
|
||||||
{"email": "attendee@company.com", "name": "Jane Smith", "status": "ACCEPTED"}
|
|
||||||
],
|
|
||||||
"ics_raw_data": "BEGIN:VEVENT\nUID:event123@google.com\n..."
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
import hashlib
|
|
||||||
from datetime import date, datetime, timedelta, timezone
|
|
||||||
from enum import Enum
|
|
||||||
from typing import TypedDict
|
|
||||||
|
|
||||||
import httpx
|
|
||||||
import pytz
|
|
||||||
import structlog
|
|
||||||
from icalendar import Calendar, Event
|
|
||||||
|
|
||||||
from reflector.db.calendar_events import CalendarEvent, calendar_events_controller
|
|
||||||
from reflector.db.rooms import Room, rooms_controller
|
|
||||||
from reflector.redis_cache import RedisAsyncLock
|
|
||||||
from reflector.settings import settings
|
|
||||||
|
|
||||||
logger = structlog.get_logger()
|
|
||||||
|
|
||||||
EVENT_WINDOW_DELTA_START = timedelta(hours=-1)
|
|
||||||
EVENT_WINDOW_DELTA_END = timedelta(hours=24)
|
|
||||||
|
|
||||||
|
|
||||||
class SyncStatus(str, Enum):
|
|
||||||
SUCCESS = "success"
|
|
||||||
UNCHANGED = "unchanged"
|
|
||||||
ERROR = "error"
|
|
||||||
SKIPPED = "skipped"
|
|
||||||
|
|
||||||
|
|
||||||
class AttendeeData(TypedDict, total=False):
|
|
||||||
email: str | None
|
|
||||||
name: str | None
|
|
||||||
status: str | None
|
|
||||||
role: str | None
|
|
||||||
|
|
||||||
|
|
||||||
class EventData(TypedDict):
|
|
||||||
ics_uid: str
|
|
||||||
title: str | None
|
|
||||||
description: str | None
|
|
||||||
location: str | None
|
|
||||||
start_time: datetime
|
|
||||||
end_time: datetime
|
|
||||||
attendees: list[AttendeeData]
|
|
||||||
ics_raw_data: str
|
|
||||||
|
|
||||||
|
|
||||||
class SyncStats(TypedDict):
|
|
||||||
events_created: int
|
|
||||||
events_updated: int
|
|
||||||
events_deleted: int
|
|
||||||
|
|
||||||
|
|
||||||
class SyncResultBase(TypedDict):
|
|
||||||
status: SyncStatus
|
|
||||||
|
|
||||||
|
|
||||||
class SyncResult(SyncResultBase, total=False):
|
|
||||||
hash: str | None
|
|
||||||
events_found: int
|
|
||||||
total_events: int
|
|
||||||
events_created: int
|
|
||||||
events_updated: int
|
|
||||||
events_deleted: int
|
|
||||||
error: str | None
|
|
||||||
reason: str | None
|
|
||||||
|
|
||||||
|
|
||||||
class ICSFetchService:
|
|
||||||
def __init__(self):
|
|
||||||
self.client = httpx.AsyncClient(
|
|
||||||
timeout=30.0, headers={"User-Agent": "Reflector/1.0"}
|
|
||||||
)
|
|
||||||
|
|
||||||
async def fetch_ics(self, url: str) -> str:
|
|
||||||
response = await self.client.get(url)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
return response.text
|
|
||||||
|
|
||||||
def parse_ics(self, ics_content: str) -> Calendar:
|
|
||||||
return Calendar.from_ical(ics_content)
|
|
||||||
|
|
||||||
def extract_room_events(
|
|
||||||
self, calendar: Calendar, room_name: str, room_url: str
|
|
||||||
) -> tuple[list[EventData], int]:
|
|
||||||
events = []
|
|
||||||
total_events = 0
|
|
||||||
now = datetime.now(timezone.utc)
|
|
||||||
window_start = now + EVENT_WINDOW_DELTA_START
|
|
||||||
window_end = now + EVENT_WINDOW_DELTA_END
|
|
||||||
|
|
||||||
for component in calendar.walk():
|
|
||||||
if component.name != "VEVENT":
|
|
||||||
continue
|
|
||||||
|
|
||||||
status = component.get("STATUS", "").upper()
|
|
||||||
if status == "CANCELLED":
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Count total non-cancelled events in the time window
|
|
||||||
event_data = self._parse_event(component)
|
|
||||||
if event_data and window_start <= event_data["start_time"] <= window_end:
|
|
||||||
total_events += 1
|
|
||||||
|
|
||||||
# Check if event matches this room
|
|
||||||
if self._event_matches_room(component, room_name, room_url):
|
|
||||||
events.append(event_data)
|
|
||||||
|
|
||||||
return events, total_events
|
|
||||||
|
|
||||||
def _event_matches_room(self, event: Event, room_name: str, room_url: str) -> bool:
|
|
||||||
location = str(event.get("LOCATION", ""))
|
|
||||||
description = str(event.get("DESCRIPTION", ""))
|
|
||||||
|
|
||||||
# Only match full room URL
|
|
||||||
# XXX leaved here as a patterns, to later be extended with tinyurl or such too
|
|
||||||
patterns = [
|
|
||||||
room_url,
|
|
||||||
]
|
|
||||||
|
|
||||||
# Check location and description for patterns
|
|
||||||
text_to_check = f"{location} {description}".lower()
|
|
||||||
for pattern in patterns:
|
|
||||||
if pattern.lower() in text_to_check:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _parse_event(self, event: Event) -> EventData | None:
|
|
||||||
uid = str(event.get("UID", ""))
|
|
||||||
summary = str(event.get("SUMMARY", ""))
|
|
||||||
description = str(event.get("DESCRIPTION", ""))
|
|
||||||
location = str(event.get("LOCATION", ""))
|
|
||||||
dtstart = event.get("DTSTART")
|
|
||||||
dtend = event.get("DTEND")
|
|
||||||
|
|
||||||
if not dtstart:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Convert fields
|
|
||||||
start_time = self._normalize_datetime(
|
|
||||||
dtstart.dt if hasattr(dtstart, "dt") else dtstart
|
|
||||||
)
|
|
||||||
end_time = (
|
|
||||||
self._normalize_datetime(dtend.dt if hasattr(dtend, "dt") else dtend)
|
|
||||||
if dtend
|
|
||||||
else start_time + timedelta(hours=1)
|
|
||||||
)
|
|
||||||
attendees = self._parse_attendees(event)
|
|
||||||
|
|
||||||
# Get raw event data for storage
|
|
||||||
raw_data = event.to_ical().decode("utf-8")
|
|
||||||
|
|
||||||
return {
|
|
||||||
"ics_uid": uid,
|
|
||||||
"title": summary,
|
|
||||||
"description": description,
|
|
||||||
"location": location,
|
|
||||||
"start_time": start_time,
|
|
||||||
"end_time": end_time,
|
|
||||||
"attendees": attendees,
|
|
||||||
"ics_raw_data": raw_data,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _normalize_datetime(self, dt) -> datetime:
|
|
||||||
# Ensure datetime is with timezone, if not, assume UTC
|
|
||||||
if isinstance(dt, date) and not isinstance(dt, datetime):
|
|
||||||
dt = datetime.combine(dt, datetime.min.time())
|
|
||||||
dt = pytz.UTC.localize(dt)
|
|
||||||
elif isinstance(dt, datetime):
|
|
||||||
if dt.tzinfo is None:
|
|
||||||
dt = pytz.UTC.localize(dt)
|
|
||||||
else:
|
|
||||||
dt = dt.astimezone(pytz.UTC)
|
|
||||||
|
|
||||||
return dt
|
|
||||||
|
|
||||||
def _parse_attendees(self, event: Event) -> list[AttendeeData]:
|
|
||||||
# Extracts attendee information from both ATTENDEE and ORGANIZER properties.
|
|
||||||
# Handles malformed comma-separated email addresses in single ATTENDEE fields
|
|
||||||
# by splitting them into separate attendee entries. Returns a list of attendee
|
|
||||||
# data including email, name, status, and role information.
|
|
||||||
final_attendees = []
|
|
||||||
|
|
||||||
attendees = event.get("ATTENDEE", [])
|
|
||||||
if not isinstance(attendees, list):
|
|
||||||
attendees = [attendees]
|
|
||||||
for att in attendees:
|
|
||||||
email_str = str(att).replace("mailto:", "") if att else None
|
|
||||||
|
|
||||||
# Handle malformed comma-separated email addresses in a single ATTENDEE field
|
|
||||||
if email_str and "," in email_str:
|
|
||||||
# Split comma-separated emails and create separate attendee entries
|
|
||||||
email_parts = [email.strip() for email in email_str.split(",")]
|
|
||||||
for email in email_parts:
|
|
||||||
if email and "@" in email:
|
|
||||||
clean_email = email.replace("MAILTO:", "").replace(
|
|
||||||
"mailto:", ""
|
|
||||||
)
|
|
||||||
att_data: AttendeeData = {
|
|
||||||
"email": clean_email,
|
|
||||||
"name": att.params.get("CN")
|
|
||||||
if hasattr(att, "params") and email == email_parts[0]
|
|
||||||
else None,
|
|
||||||
"status": att.params.get("PARTSTAT")
|
|
||||||
if hasattr(att, "params") and email == email_parts[0]
|
|
||||||
else None,
|
|
||||||
"role": att.params.get("ROLE")
|
|
||||||
if hasattr(att, "params") and email == email_parts[0]
|
|
||||||
else None,
|
|
||||||
}
|
|
||||||
final_attendees.append(att_data)
|
|
||||||
else:
|
|
||||||
# Normal single attendee
|
|
||||||
att_data: AttendeeData = {
|
|
||||||
"email": email_str,
|
|
||||||
"name": att.params.get("CN") if hasattr(att, "params") else None,
|
|
||||||
"status": att.params.get("PARTSTAT")
|
|
||||||
if hasattr(att, "params")
|
|
||||||
else None,
|
|
||||||
"role": att.params.get("ROLE") if hasattr(att, "params") else None,
|
|
||||||
}
|
|
||||||
final_attendees.append(att_data)
|
|
||||||
|
|
||||||
# Add organizer
|
|
||||||
organizer = event.get("ORGANIZER")
|
|
||||||
if organizer:
|
|
||||||
org_email = (
|
|
||||||
str(organizer).replace("mailto:", "").replace("MAILTO:", "")
|
|
||||||
if organizer
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
org_data: AttendeeData = {
|
|
||||||
"email": org_email,
|
|
||||||
"name": organizer.params.get("CN")
|
|
||||||
if hasattr(organizer, "params")
|
|
||||||
else None,
|
|
||||||
"role": "ORGANIZER",
|
|
||||||
}
|
|
||||||
final_attendees.append(org_data)
|
|
||||||
|
|
||||||
return final_attendees
|
|
||||||
|
|
||||||
|
|
||||||
class ICSSyncService:
|
|
||||||
def __init__(self):
|
|
||||||
self.fetch_service = ICSFetchService()
|
|
||||||
|
|
||||||
async def sync_room_calendar(self, room: Room) -> SyncResult:
|
|
||||||
async with RedisAsyncLock(
|
|
||||||
f"ics_sync_room:{room.id}", skip_if_locked=True
|
|
||||||
) as lock:
|
|
||||||
if not lock.acquired:
|
|
||||||
logger.warning("ICS sync already in progress for room", room_id=room.id)
|
|
||||||
return {
|
|
||||||
"status": SyncStatus.SKIPPED,
|
|
||||||
"reason": "Sync already in progress",
|
|
||||||
}
|
|
||||||
|
|
||||||
return await self._sync_room_calendar(room)
|
|
||||||
|
|
||||||
async def _sync_room_calendar(self, room: Room) -> SyncResult:
|
|
||||||
if not room.ics_enabled or not room.ics_url:
|
|
||||||
return {"status": SyncStatus.SKIPPED, "reason": "ICS not configured"}
|
|
||||||
|
|
||||||
try:
|
|
||||||
if not self._should_sync(room):
|
|
||||||
return {"status": SyncStatus.SKIPPED, "reason": "Not time to sync yet"}
|
|
||||||
|
|
||||||
ics_content = await self.fetch_service.fetch_ics(room.ics_url)
|
|
||||||
calendar = self.fetch_service.parse_ics(ics_content)
|
|
||||||
|
|
||||||
content_hash = hashlib.md5(ics_content.encode()).hexdigest()
|
|
||||||
if room.ics_last_etag == content_hash:
|
|
||||||
logger.info("No changes in ICS for room", room_id=room.id)
|
|
||||||
room_url = f"{settings.UI_BASE_URL}/{room.name}"
|
|
||||||
events, total_events = self.fetch_service.extract_room_events(
|
|
||||||
calendar, room.name, room_url
|
|
||||||
)
|
|
||||||
return {
|
|
||||||
"status": SyncStatus.UNCHANGED,
|
|
||||||
"hash": content_hash,
|
|
||||||
"events_found": len(events),
|
|
||||||
"total_events": total_events,
|
|
||||||
"events_created": 0,
|
|
||||||
"events_updated": 0,
|
|
||||||
"events_deleted": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Extract matching events
|
|
||||||
room_url = f"{settings.UI_BASE_URL}/{room.name}"
|
|
||||||
events, total_events = self.fetch_service.extract_room_events(
|
|
||||||
calendar, room.name, room_url
|
|
||||||
)
|
|
||||||
sync_result = await self._sync_events_to_database(room.id, events)
|
|
||||||
|
|
||||||
# Update room sync metadata
|
|
||||||
await rooms_controller.update(
|
|
||||||
room,
|
|
||||||
{
|
|
||||||
"ics_last_sync": datetime.now(timezone.utc),
|
|
||||||
"ics_last_etag": content_hash,
|
|
||||||
},
|
|
||||||
mutate=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
return {
|
|
||||||
"status": SyncStatus.SUCCESS,
|
|
||||||
"hash": content_hash,
|
|
||||||
"events_found": len(events),
|
|
||||||
"total_events": total_events,
|
|
||||||
**sync_result,
|
|
||||||
}
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error("Failed to sync ICS for room", room_id=room.id, error=str(e))
|
|
||||||
return {"status": SyncStatus.ERROR, "error": str(e)}
|
|
||||||
|
|
||||||
def _should_sync(self, room: Room) -> bool:
|
|
||||||
if not room.ics_last_sync:
|
|
||||||
return True
|
|
||||||
|
|
||||||
time_since_sync = datetime.now(timezone.utc) - room.ics_last_sync
|
|
||||||
return time_since_sync.total_seconds() >= room.ics_fetch_interval
|
|
||||||
|
|
||||||
async def _sync_events_to_database(
|
|
||||||
self, room_id: str, events: list[EventData]
|
|
||||||
) -> SyncStats:
|
|
||||||
created = 0
|
|
||||||
updated = 0
|
|
||||||
|
|
||||||
current_ics_uids = []
|
|
||||||
|
|
||||||
for event_data in events:
|
|
||||||
calendar_event = CalendarEvent(room_id=room_id, **event_data)
|
|
||||||
existing = await calendar_events_controller.get_by_ics_uid(
|
|
||||||
room_id, event_data["ics_uid"]
|
|
||||||
)
|
|
||||||
|
|
||||||
if existing:
|
|
||||||
updated += 1
|
|
||||||
else:
|
|
||||||
created += 1
|
|
||||||
|
|
||||||
await calendar_events_controller.upsert(calendar_event)
|
|
||||||
current_ics_uids.append(event_data["ics_uid"])
|
|
||||||
|
|
||||||
# Soft delete events that are no longer in calendar
|
|
||||||
deleted = await calendar_events_controller.soft_delete_missing(
|
|
||||||
room_id, current_ics_uids
|
|
||||||
)
|
|
||||||
|
|
||||||
return {
|
|
||||||
"events_created": created,
|
|
||||||
"events_updated": updated,
|
|
||||||
"events_deleted": deleted,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
ics_sync_service = ICSSyncService()
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user