mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-20 20:29:06 +00:00
Compare commits
120 Commits
0.1.1
...
mathieu/ji
| Author | SHA1 | Date | |
|---|---|---|---|
| e91979abbc | |||
| 95e8011975 | |||
| c546e69739 | |||
|
|
3f1fe8c9bf | ||
| 5f143fe364 | |||
|
|
79f161436e | ||
|
|
5cba5d310d | ||
| 43ea9349f5 | |||
|
|
b3a8e9739d | ||
|
|
369ecdff13 | ||
| fc363bd49b | |||
|
|
962038ee3f | ||
|
|
3b85ff3bdf | ||
|
|
cde99ca271 | ||
|
|
f81fe9948a | ||
|
|
5a5b323382 | ||
| 02a3938822 | |||
|
|
7f5a4c9ddc | ||
|
|
08d88ec349 | ||
|
|
c4d2825c81 | ||
| 0663700a61 | |||
| 293f7d4f1f | |||
| dc82f8bb3b | |||
| 41224a424c | |||
| dd0089906f | |||
| fa559b1970 | |||
| c26ce65083 | |||
| 52eff2acc0 | |||
| 7875ec3432 | |||
| 398be06fad | |||
| da700069d9 | |||
| 51229a1790 | |||
| 2d2c23f7cc | |||
| 0acb9cac79 | |||
| d861d92cc2 | |||
| 24ff83a2ec | |||
| 249234238c | |||
| 42a603d5c3 | |||
| 6d2092f950 | |||
| f2bb6aaecb | |||
| 2b136ac7b0 | |||
| 3f4fc26483 | |||
| 8e5ef5bca6 | |||
| d49fdcb38d | |||
| d42380abf1 | |||
| cf64e1a3d9 | |||
| ea53ca7000 | |||
| 457823e1c1 | |||
|
|
695d1a957d | ||
| ccffdba75b | |||
| 84a381220b | |||
| 5f2f0e9317 | |||
| 88ed7cfa78 | |||
| 6f0c7c1a5e | |||
| 9dfd76996f | |||
| 55cc8637c6 | |||
| f5331a2107 | |||
|
|
124ce03bf8 | ||
| 7030e0f236 | |||
| 37f0110892 | |||
| cf2896a7f4 | |||
| aabf2c2572 | |||
| 6a7b08f016 | |||
| e2736563d9 | |||
| 0f54b7782d | |||
| 359280dd34 | |||
| 9265d201b5 | |||
| 52f9f533d7 | |||
| 0c3878ac3c | |||
|
|
d70beee51b | ||
| bc5b351d2b | |||
|
|
07981e8090 | ||
| 7e366f6338 | |||
| 7592679a35 | |||
| af16178f86 | |||
| 3ea7f6b7b6 | |||
|
|
009590c080 | ||
|
|
fe5d344cff | ||
|
|
86455ce573 | ||
| 2fccd81bcd | |||
| 1311714451 | |||
| b9d891d342 | |||
| 9eab952c63 | |||
|
|
6fb5cb21c2 | ||
|
|
a42ed12982 | ||
| 1aa52a99b6 | |||
|
|
2a97290f2e | ||
| 7963cc8a52 | |||
| d12424848d | |||
|
|
6e765875d5 | ||
|
|
e0f4acf28b | ||
|
|
12359ea4eb | ||
| 267b7401ea | |||
| aea9de393c | |||
| dc177af3ff | |||
| 5bd8233657 | |||
| 28ac031ff6 | |||
| 1878834ce6 | |||
| f5b82d44e3 | |||
| ad56165b54 | |||
| 4ee19ed015 | |||
| 406164033d | |||
| 81d316cb56 | |||
| db3beae5cd | |||
|
|
03b9a18c1b | ||
|
|
7e3027adb6 | ||
|
|
27b43d85ab | ||
| 2289a1a231 | |||
| d0e130eb13 | |||
| 24fabe3e86 | |||
| 6fedbbe63f | |||
| b39175cdc9 | |||
| 2a2af5fff2 | |||
| ad44492cae | |||
| 901a239952 | |||
| d77b5611f8 | |||
| fc38345d65 | |||
| 5a1d662dc4 | |||
| 033bd4bc48 | |||
| 0eb670ca19 |
30
.github/pull_request_template.md
vendored
30
.github/pull_request_template.md
vendored
@@ -1,19 +1,21 @@
|
||||
## ⚠️ Insert the PR TITLE replacing this text ⚠️
|
||||
<!--- Provide a general summary of your changes in the Title above -->
|
||||
|
||||
⚠️ Describe your PR replacing this text. Post screenshots or videos whenever possible. ⚠️
|
||||
## Description
|
||||
<!--- Describe your changes in detail -->
|
||||
|
||||
### Checklist
|
||||
## Related Issue
|
||||
<!--- This project only accepts pull requests related to open issues -->
|
||||
<!--- If suggesting a new feature or change, please discuss it in an issue first -->
|
||||
<!--- If fixing a bug, there should be an issue describing it with steps to reproduce -->
|
||||
<!--- Please link to the issue here: -->
|
||||
|
||||
- [ ] My branch is updated with main (mandatory)
|
||||
- [ ] I wrote unit tests for this (if applies)
|
||||
- [ ] I have included migrations and tested them locally (if applies)
|
||||
- [ ] I have manually tested this feature locally
|
||||
## Motivation and Context
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
|
||||
> IMPORTANT: Remember that you are responsible for merging this PR after it's been reviewed, and once deployed
|
||||
> you should perform manual testing to make sure everything went smoothly.
|
||||
|
||||
### Urgency
|
||||
|
||||
- [ ] Urgent (deploy ASAP)
|
||||
- [ ] Non-urgent (deploying in next release is ok)
|
||||
## How Has This Been Tested?
|
||||
<!--- Please describe in detail how you tested your changes. -->
|
||||
<!--- Include details of your testing environment, and the tests you ran to -->
|
||||
<!--- see how your change affects other areas of the code, etc. -->
|
||||
|
||||
## Screenshots (if appropriate):
|
||||
|
||||
19
.github/workflows/conventional_commit_pr.yml
vendored
19
.github/workflows/conventional_commit_pr.yml
vendored
@@ -1,19 +0,0 @@
|
||||
name: Conventional commit PR
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
cog_check_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: check conventional commit compliance
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# pick the pr HEAD instead of the merge commit
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Conventional commit check
|
||||
uses: cocogitto/cocogitto-action@v3
|
||||
with:
|
||||
check-latest-tag-only: true
|
||||
35
.github/workflows/db_migrations.yml
vendored
35
.github/workflows/db_migrations.yml
vendored
@@ -2,6 +2,8 @@ name: Test Database Migrations
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "server/migrations/**"
|
||||
- "server/reflector/db/**"
|
||||
@@ -17,10 +19,43 @@ on:
|
||||
jobs:
|
||||
test-migrations:
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: db-ubuntu-latest-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:17
|
||||
env:
|
||||
POSTGRES_USER: reflector
|
||||
POSTGRES_PASSWORD: reflector
|
||||
POSTGRES_DB: reflector
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd pg_isready -h 127.0.0.1 -p 5432
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
env:
|
||||
DATABASE_URL: postgresql://reflector:reflector@localhost:5432/reflector
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install PostgreSQL client
|
||||
run: sudo apt-get update && sudo apt-get install -y postgresql-client | cat
|
||||
|
||||
- name: Wait for Postgres
|
||||
run: |
|
||||
for i in {1..30}; do
|
||||
if pg_isready -h localhost -p 5432; then
|
||||
echo "Postgres is ready"
|
||||
break
|
||||
fi
|
||||
echo "Waiting for Postgres... ($i)" && sleep 1
|
||||
done
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v3
|
||||
with:
|
||||
|
||||
77
.github/workflows/deploy.yml
vendored
77
.github/workflows/deploy.yml
vendored
@@ -8,18 +8,30 @@ env:
|
||||
ECR_REPOSITORY: reflector
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: linux-amd64
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: linux-arm64
|
||||
arch: arm64
|
||||
|
||||
runs-on: ${{ matrix.runner }}
|
||||
|
||||
permissions:
|
||||
deployments: write
|
||||
contents: read
|
||||
|
||||
outputs:
|
||||
registry: ${{ steps.login-ecr.outputs.registry }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@0e613a0980cbf65ed5b322eb7a1e075d28913a83
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
@@ -27,21 +39,52 @@ jobs:
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@62f4f872db3836360b72999f4b87f1ff13310f3a
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
- name: Build and push ${{ matrix.arch }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: server
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: ${{ matrix.platform }}
|
||||
push: true
|
||||
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
provenance: false
|
||||
|
||||
create-manifest:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
|
||||
permissions:
|
||||
deployments: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Create and push multi-arch manifest
|
||||
run: |
|
||||
# Get the registry URL (since we can't easily access job outputs in matrix)
|
||||
ECR_REGISTRY=$(aws ecr describe-registry --query 'registryId' --output text).dkr.ecr.${{ env.AWS_REGION }}.amazonaws.com
|
||||
|
||||
docker manifest create \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-amd64 \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-arm64
|
||||
|
||||
docker manifest push $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest
|
||||
|
||||
echo "✅ Multi-arch manifest pushed: $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest"
|
||||
|
||||
24
.github/workflows/pre-commit.yml
vendored
Normal file
24
.github/workflows/pre-commit.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: pre-commit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 10
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: "www/pnpm-lock.yaml"
|
||||
- name: Install dependencies
|
||||
run: cd www && pnpm install --frozen-lockfile
|
||||
- uses: pre-commit/action@v3.0.1
|
||||
45
.github/workflows/test_next_server.yml
vendored
Normal file
45
.github/workflows/test_next_server.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Test Next Server
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "www/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "www/**"
|
||||
|
||||
jobs:
|
||||
test-next-server:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./www
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 8
|
||||
|
||||
- name: Setup Node.js cache
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'pnpm'
|
||||
cache-dependency-path: './www/pnpm-lock.yaml'
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm test
|
||||
49
.github/workflows/test_server.yml
vendored
49
.github/workflows/test_server.yml
vendored
@@ -5,12 +5,17 @@ on:
|
||||
paths:
|
||||
- "server/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- "server/**"
|
||||
|
||||
jobs:
|
||||
pytest:
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: pytest-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
services:
|
||||
redis:
|
||||
image: redis:6
|
||||
@@ -19,29 +24,47 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v3
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
working-directory: server
|
||||
|
||||
- name: Tests
|
||||
run: |
|
||||
cd server
|
||||
uv run -m pytest -v tests
|
||||
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
docker-amd64:
|
||||
runs-on: linux-amd64
|
||||
concurrency:
|
||||
group: docker-amd64-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build AMD64
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: server
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64
|
||||
cache-from: type=gha,scope=amd64
|
||||
cache-to: type=gha,mode=max,scope=amd64
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
|
||||
docker-arm64:
|
||||
runs-on: linux-arm64
|
||||
concurrency:
|
||||
group: docker-arm64-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build ARM64
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: server
|
||||
platforms: linux/arm64
|
||||
cache-from: type=gha,scope=arm64
|
||||
cache-to: type=gha,mode=max,scope=arm64
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -11,3 +11,10 @@ ngrok.log
|
||||
restart-dev.sh
|
||||
*.log
|
||||
data/
|
||||
www/REFACTOR.md
|
||||
www/reload-frontend
|
||||
server/test.sqlite
|
||||
CLAUDE.local.md
|
||||
www/.env.development
|
||||
www/.env.production
|
||||
.playwright-mcp
|
||||
|
||||
1
.gitleaksignore
Normal file
1
.gitleaksignore
Normal file
@@ -0,0 +1 @@
|
||||
b9d891d3424f371642cb032ecfd0e2564470a72c:server/tests/test_transcripts_recording_deletion.py:generic-api-key:15
|
||||
@@ -3,10 +3,10 @@
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: yarn-format
|
||||
name: run yarn format
|
||||
- id: format
|
||||
name: run format
|
||||
language: system
|
||||
entry: bash -c 'cd www && yarn format'
|
||||
entry: bash -c 'cd www && pnpm format'
|
||||
pass_filenames: false
|
||||
files: ^www/
|
||||
|
||||
@@ -15,25 +15,20 @@ repos:
|
||||
hooks:
|
||||
- id: debug-statements
|
||||
- id: trailing-whitespace
|
||||
exclude: ^server/trials
|
||||
- id: detect-private-key
|
||||
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.1.1
|
||||
hooks:
|
||||
- id: black
|
||||
files: ^server/(reflector|tests)/
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.12.0
|
||||
hooks:
|
||||
- id: isort
|
||||
name: isort (python)
|
||||
files: ^server/(gpu|evaluate|reflector)/
|
||||
args: [ "--profile", "black", "--filter-files" ]
|
||||
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.6.5
|
||||
rev: v0.8.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
files: ^server/(reflector|tests)/
|
||||
args:
|
||||
- --fix
|
||||
# Uses select rules from server/pyproject.toml
|
||||
files: ^server/
|
||||
- id: ruff-format
|
||||
files: ^server/
|
||||
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.28.0
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
|
||||
195
CHANGELOG.md
195
CHANGELOG.md
@@ -1,5 +1,200 @@
|
||||
# Changelog
|
||||
|
||||
## [0.10.0](https://github.com/Monadical-SAS/reflector/compare/v0.9.0...v0.10.0) (2025-09-11)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* replace nextjs-config with environment variables ([#632](https://github.com/Monadical-SAS/reflector/issues/632)) ([369ecdf](https://github.com/Monadical-SAS/reflector/commit/369ecdff13f3862d926a9c0b87df52c9d94c4dde))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* anonymous users transcript permissions ([#621](https://github.com/Monadical-SAS/reflector/issues/621)) ([f81fe99](https://github.com/Monadical-SAS/reflector/commit/f81fe9948a9237b3e0001b2d8ca84f54d76878f9))
|
||||
* auth post ([#624](https://github.com/Monadical-SAS/reflector/issues/624)) ([cde99ca](https://github.com/Monadical-SAS/reflector/commit/cde99ca2716f84ba26798f289047732f0448742e))
|
||||
* auth post ([#626](https://github.com/Monadical-SAS/reflector/issues/626)) ([3b85ff3](https://github.com/Monadical-SAS/reflector/commit/3b85ff3bdf4fb053b103070646811bc990c0e70a))
|
||||
* auth post ([#627](https://github.com/Monadical-SAS/reflector/issues/627)) ([962038e](https://github.com/Monadical-SAS/reflector/commit/962038ee3f2a555dc3c03856be0e4409456e0996))
|
||||
* missing follow_redirects=True on modal endpoint ([#630](https://github.com/Monadical-SAS/reflector/issues/630)) ([fc363bd](https://github.com/Monadical-SAS/reflector/commit/fc363bd49b17b075e64f9186e5e0185abc325ea7))
|
||||
* sync backend and frontend token refresh logic ([#614](https://github.com/Monadical-SAS/reflector/issues/614)) ([5a5b323](https://github.com/Monadical-SAS/reflector/commit/5a5b3233820df9536da75e87ce6184a983d4713a))
|
||||
|
||||
## [0.9.0](https://github.com/Monadical-SAS/reflector/compare/v0.8.2...v0.9.0) (2025-09-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* frontend openapi react query ([#606](https://github.com/Monadical-SAS/reflector/issues/606)) ([c4d2825](https://github.com/Monadical-SAS/reflector/commit/c4d2825c81f81ad8835629fbf6ea8c7383f8c31b))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* align whisper transcriber api with parakeet ([#602](https://github.com/Monadical-SAS/reflector/issues/602)) ([0663700](https://github.com/Monadical-SAS/reflector/commit/0663700a615a4af69a03c96c410f049e23ec9443))
|
||||
* kv use tls explicit ([#610](https://github.com/Monadical-SAS/reflector/issues/610)) ([08d88ec](https://github.com/Monadical-SAS/reflector/commit/08d88ec349f38b0d13e0fa4cb73486c8dfd31836))
|
||||
* source kind for file processing ([#601](https://github.com/Monadical-SAS/reflector/issues/601)) ([dc82f8b](https://github.com/Monadical-SAS/reflector/commit/dc82f8bb3bdf3ab3d4088e592a30fd63907319e1))
|
||||
* token refresh locking ([#613](https://github.com/Monadical-SAS/reflector/issues/613)) ([7f5a4c9](https://github.com/Monadical-SAS/reflector/commit/7f5a4c9ddc7fd098860c8bdda2ca3b57f63ded2f))
|
||||
|
||||
## [0.8.2](https://github.com/Monadical-SAS/reflector/compare/v0.8.1...v0.8.2) (2025-08-29)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* search-logspam ([#593](https://github.com/Monadical-SAS/reflector/issues/593)) ([695d1a9](https://github.com/Monadical-SAS/reflector/commit/695d1a957d4cd862753049f9beed88836cabd5ab))
|
||||
|
||||
## [0.8.1](https://github.com/Monadical-SAS/reflector/compare/v0.8.0...v0.8.1) (2025-08-29)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* make webhook secret/url allowing null ([#590](https://github.com/Monadical-SAS/reflector/issues/590)) ([84a3812](https://github.com/Monadical-SAS/reflector/commit/84a381220bc606231d08d6f71d4babc818fa3c75))
|
||||
|
||||
## [0.8.0](https://github.com/Monadical-SAS/reflector/compare/v0.7.3...v0.8.0) (2025-08-29)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **cleanup:** add automatic data retention for public instances ([#574](https://github.com/Monadical-SAS/reflector/issues/574)) ([6f0c7c1](https://github.com/Monadical-SAS/reflector/commit/6f0c7c1a5e751713366886c8e764c2009e12ba72))
|
||||
* **rooms:** add webhook for transcript completion ([#578](https://github.com/Monadical-SAS/reflector/issues/578)) ([88ed7cf](https://github.com/Monadical-SAS/reflector/commit/88ed7cfa7804794b9b54cad4c3facc8a98cf85fd))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* file pipeline status reporting and websocket updates ([#589](https://github.com/Monadical-SAS/reflector/issues/589)) ([9dfd769](https://github.com/Monadical-SAS/reflector/commit/9dfd76996f851cc52be54feea078adbc0816dc57))
|
||||
* Igor/evaluation ([#575](https://github.com/Monadical-SAS/reflector/issues/575)) ([124ce03](https://github.com/Monadical-SAS/reflector/commit/124ce03bf86044c18313d27228a25da4bc20c9c5))
|
||||
* optimize parakeet transcription batching algorithm ([#577](https://github.com/Monadical-SAS/reflector/issues/577)) ([7030e0f](https://github.com/Monadical-SAS/reflector/commit/7030e0f23649a8cf6c1eb6d5889684a41ce849ec))
|
||||
|
||||
## [0.7.3](https://github.com/Monadical-SAS/reflector/compare/v0.7.2...v0.7.3) (2025-08-22)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* cleaned repo, and get git-leaks clean ([359280d](https://github.com/Monadical-SAS/reflector/commit/359280dd340433ba4402ed69034094884c825e67))
|
||||
* restore previous behavior on live pipeline + audio downscaler ([#561](https://github.com/Monadical-SAS/reflector/issues/561)) ([9265d20](https://github.com/Monadical-SAS/reflector/commit/9265d201b590d23c628c5f19251b70f473859043))
|
||||
|
||||
## [0.7.2](https://github.com/Monadical-SAS/reflector/compare/v0.7.1...v0.7.2) (2025-08-21)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* docker image not loading libgomp.so.1 for torch ([#560](https://github.com/Monadical-SAS/reflector/issues/560)) ([773fccd](https://github.com/Monadical-SAS/reflector/commit/773fccd93e887c3493abc2e4a4864dddce610177))
|
||||
* include shared rooms to search ([#558](https://github.com/Monadical-SAS/reflector/issues/558)) ([499eced](https://github.com/Monadical-SAS/reflector/commit/499eced3360b84fb3a90e1c8a3b554290d21adc2))
|
||||
|
||||
## [0.7.1](https://github.com/Monadical-SAS/reflector/compare/v0.7.0...v0.7.1) (2025-08-21)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* webvtt db null expectation mismatch ([#556](https://github.com/Monadical-SAS/reflector/issues/556)) ([e67ad1a](https://github.com/Monadical-SAS/reflector/commit/e67ad1a4a2054467bfeb1e0258fbac5868aaaf21))
|
||||
|
||||
## [0.7.0](https://github.com/Monadical-SAS/reflector/compare/v0.6.1...v0.7.0) (2025-08-21)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* delete recording with transcript ([#547](https://github.com/Monadical-SAS/reflector/issues/547)) ([99cc984](https://github.com/Monadical-SAS/reflector/commit/99cc9840b3f5de01e0adfbfae93234042d706d13))
|
||||
* pipeline improvement with file processing, parakeet, silero-vad ([#540](https://github.com/Monadical-SAS/reflector/issues/540)) ([bcc29c9](https://github.com/Monadical-SAS/reflector/commit/bcc29c9e0050ae215f89d460e9d645aaf6a5e486))
|
||||
* postgresql migration and removal of sqlite in pytest ([#546](https://github.com/Monadical-SAS/reflector/issues/546)) ([cd1990f](https://github.com/Monadical-SAS/reflector/commit/cd1990f8f0fe1503ef5069512f33777a73a93d7f))
|
||||
* search backend ([#537](https://github.com/Monadical-SAS/reflector/issues/537)) ([5f9b892](https://github.com/Monadical-SAS/reflector/commit/5f9b89260c9ef7f3c921319719467df22830453f))
|
||||
* search frontend ([#551](https://github.com/Monadical-SAS/reflector/issues/551)) ([3657242](https://github.com/Monadical-SAS/reflector/commit/365724271ca6e615e3425125a69ae2b46ce39285))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* evaluation cli event wrap ([#536](https://github.com/Monadical-SAS/reflector/issues/536)) ([941c3db](https://github.com/Monadical-SAS/reflector/commit/941c3db0bdacc7b61fea412f3746cc5a7cb67836))
|
||||
* use structlog not logging ([#550](https://github.com/Monadical-SAS/reflector/issues/550)) ([27e2f81](https://github.com/Monadical-SAS/reflector/commit/27e2f81fda5232e53edc729d3e99c5ef03adbfe9))
|
||||
|
||||
## [0.6.1](https://github.com/Monadical-SAS/reflector/compare/v0.6.0...v0.6.1) (2025-08-06)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* delayed waveform loading ([#538](https://github.com/Monadical-SAS/reflector/issues/538)) ([ef64146](https://github.com/Monadical-SAS/reflector/commit/ef64146325d03f64dd9a1fe40234fb3e7e957ae2))
|
||||
|
||||
## [0.6.0](https://github.com/Monadical-SAS/reflector/compare/v0.5.0...v0.6.0) (2025-08-05)
|
||||
|
||||
|
||||
### ⚠ BREAKING CHANGES
|
||||
|
||||
* Configuration keys have changed. Update your .env file:
|
||||
- TRANSCRIPT_MODAL_API_KEY → TRANSCRIPT_API_KEY
|
||||
- LLM_MODAL_API_KEY → (removed, use TRANSCRIPT_API_KEY)
|
||||
- Add DIARIZATION_API_KEY and TRANSLATE_API_KEY if using those services
|
||||
|
||||
### Features
|
||||
|
||||
* implement service-specific Modal API keys with auto processor pattern ([#528](https://github.com/Monadical-SAS/reflector/issues/528)) ([650befb](https://github.com/Monadical-SAS/reflector/commit/650befb291c47a1f49e94a01ab37d8fdfcd2b65d))
|
||||
* use llamaindex everywhere ([#525](https://github.com/Monadical-SAS/reflector/issues/525)) ([3141d17](https://github.com/Monadical-SAS/reflector/commit/3141d172bc4d3b3d533370c8e6e351ea762169bf))
|
||||
|
||||
|
||||
### Miscellaneous Chores
|
||||
|
||||
* **main:** release 0.6.0 ([ecdbf00](https://github.com/Monadical-SAS/reflector/commit/ecdbf003ea2476c3e95fd231adaeb852f2943df0))
|
||||
|
||||
## [0.5.0](https://github.com/Monadical-SAS/reflector/compare/v0.4.0...v0.5.0) (2025-07-31)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* new summary using phi-4 and llama-index ([#519](https://github.com/Monadical-SAS/reflector/issues/519)) ([1bf9ce0](https://github.com/Monadical-SAS/reflector/commit/1bf9ce07c12f87f89e68a1dbb3b2c96c5ee62466))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* remove unused settings and utils files ([#522](https://github.com/Monadical-SAS/reflector/issues/522)) ([2af4790](https://github.com/Monadical-SAS/reflector/commit/2af4790e4be9e588f282fbc1bb171c88a03d6479))
|
||||
|
||||
## [0.4.0](https://github.com/Monadical-SAS/reflector/compare/v0.3.2...v0.4.0) (2025-07-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Diarization cli ([#509](https://github.com/Monadical-SAS/reflector/issues/509)) ([ffc8003](https://github.com/Monadical-SAS/reflector/commit/ffc8003e6dad236930a27d0fe3e2f2adfb793890))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* remove faulty import Meeting ([#512](https://github.com/Monadical-SAS/reflector/issues/512)) ([0e68c79](https://github.com/Monadical-SAS/reflector/commit/0e68c798434e1b481f9482cc3a4702ea00365df4))
|
||||
* room concurrency (theoretically) ([#511](https://github.com/Monadical-SAS/reflector/issues/511)) ([7bb3676](https://github.com/Monadical-SAS/reflector/commit/7bb367653afeb2778cff697a0eb217abf0b81b84))
|
||||
|
||||
## [0.3.2](https://github.com/Monadical-SAS/reflector/compare/v0.3.1...v0.3.2) (2025-07-22)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* match font size for the filter sidebar ([#507](https://github.com/Monadical-SAS/reflector/issues/507)) ([4b8ba5d](https://github.com/Monadical-SAS/reflector/commit/4b8ba5db1733557e27b098ad3d1cdecadf97ae52))
|
||||
* whereby consent not displaying ([#505](https://github.com/Monadical-SAS/reflector/issues/505)) ([1120552](https://github.com/Monadical-SAS/reflector/commit/1120552c2c83d084d3a39272ad49b6aeda1af98f))
|
||||
|
||||
## [0.3.1](https://github.com/Monadical-SAS/reflector/compare/v0.3.0...v0.3.1) (2025-07-22)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* remove fief out of the source code ([#502](https://github.com/Monadical-SAS/reflector/issues/502)) ([890dd15](https://github.com/Monadical-SAS/reflector/commit/890dd15ba5a2be10dbb841e9aeb75d377885f4af))
|
||||
* remove primary color for room action menu ([#504](https://github.com/Monadical-SAS/reflector/issues/504)) ([2e33f89](https://github.com/Monadical-SAS/reflector/commit/2e33f89c0f9e5fbaafa80e8d2ae9788450ea2f31))
|
||||
|
||||
## [0.3.0](https://github.com/Monadical-SAS/reflector/compare/v0.2.1...v0.3.0) (2025-07-21)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* migrate from chakra 2 to chakra 3 ([#500](https://github.com/Monadical-SAS/reflector/issues/500)) ([a858464](https://github.com/Monadical-SAS/reflector/commit/a858464c7a80e5497acf801d933bf04092f8b526))
|
||||
|
||||
## [0.2.1](https://github.com/Monadical-SAS/reflector/compare/v0.2.0...v0.2.1) (2025-07-18)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* separate browsing page into different components, limit to 10 by default ([#498](https://github.com/Monadical-SAS/reflector/issues/498)) ([c752da6](https://github.com/Monadical-SAS/reflector/commit/c752da6b97c96318aff079a5b2a6eceadfbfcad1))
|
||||
|
||||
## [0.2.0](https://github.com/Monadical-SAS/reflector/compare/0.1.1...v0.2.0) (2025-07-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* improve transcript listing with room_id ([#496](https://github.com/Monadical-SAS/reflector/issues/496)) ([d2b5de5](https://github.com/Monadical-SAS/reflector/commit/d2b5de543fc0617fc220caa6a8a290e4040cb10b))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* don't attempt to load waveform/mp3 if audio was deleted ([#495](https://github.com/Monadical-SAS/reflector/issues/495)) ([f4578a7](https://github.com/Monadical-SAS/reflector/commit/f4578a743fd0f20312fbd242fa9cccdfaeb20a9e))
|
||||
|
||||
## [0.1.1](https://github.com/Monadical-SAS/reflector/compare/0.1.0...v0.1.1) (2025-07-17)
|
||||
|
||||
|
||||
|
||||
23
CLAUDE.md
23
CLAUDE.md
@@ -62,29 +62,28 @@ uv run python -m reflector.tools.process path/to/audio.wav
|
||||
**Setup:**
|
||||
```bash
|
||||
# Install dependencies
|
||||
yarn install
|
||||
pnpm install
|
||||
|
||||
# Copy configuration templates
|
||||
cp .env_template .env
|
||||
cp config-template.ts config.ts
|
||||
```
|
||||
|
||||
**Development:**
|
||||
```bash
|
||||
# Start development server
|
||||
yarn dev
|
||||
pnpm dev
|
||||
|
||||
# Generate TypeScript API client from OpenAPI spec
|
||||
yarn openapi
|
||||
pnpm openapi
|
||||
|
||||
# Lint code
|
||||
yarn lint
|
||||
pnpm lint
|
||||
|
||||
# Format code
|
||||
yarn format
|
||||
pnpm format
|
||||
|
||||
# Build for production
|
||||
yarn build
|
||||
pnpm build
|
||||
```
|
||||
|
||||
### Docker Compose (Full Stack)
|
||||
@@ -144,9 +143,11 @@ All endpoints prefixed `/v1/`:
|
||||
**Backend** (`server/.env`):
|
||||
- `DATABASE_URL` - Database connection string
|
||||
- `REDIS_URL` - Redis broker for Celery
|
||||
- `MODAL_TOKEN_ID`, `MODAL_TOKEN_SECRET` - Modal.com GPU processing
|
||||
- `TRANSCRIPT_BACKEND=modal` + `TRANSCRIPT_MODAL_API_KEY` - Modal.com transcription
|
||||
- `DIARIZATION_BACKEND=modal` + `DIARIZATION_MODAL_API_KEY` - Modal.com diarization
|
||||
- `TRANSLATION_BACKEND=modal` + `TRANSLATION_MODAL_API_KEY` - Modal.com translation
|
||||
- `WHEREBY_API_KEY` - Video platform integration
|
||||
- `REFLECTOR_AUTH_BACKEND` - Authentication method (none, fief, jwt)
|
||||
- `REFLECTOR_AUTH_BACKEND` - Authentication method (none, jwt)
|
||||
|
||||
**Frontend** (`www/.env`):
|
||||
- `NEXTAUTH_URL`, `NEXTAUTH_SECRET` - Authentication configuration
|
||||
@@ -172,3 +173,7 @@ Modal.com integration for scalable ML processing:
|
||||
- **Audio Routing**: Use BlackHole (Mac) for merging multiple audio sources
|
||||
- **WebRTC**: Ensure proper CORS configuration for cross-origin streaming
|
||||
- **Database**: Run `uv run alembic upgrade head` after pulling schema changes
|
||||
|
||||
## Pipeline/worker related info
|
||||
|
||||
If you need to do any worker/pipeline related work, search for "Pipeline" classes and their "create" or "build" methods to find the main processor sequence. Look for task orchestration patterns (like "chord", "group", or "chain") to identify the post-processing flow with parallel execution chains. This will give you abstract vision on how processing pipeling is organized.
|
||||
|
||||
91
README.md
91
README.md
@@ -1,43 +1,60 @@
|
||||
<div align="center">
|
||||
<img width="100" alt="image" src="https://github.com/user-attachments/assets/66fb367b-2c89-4516-9912-f47ac59c6a7f"/>
|
||||
|
||||
# Reflector
|
||||
|
||||
Reflector Audio Management and Analysis is a cutting-edge web application under development by Monadical. It utilizes AI to record meetings, providing a permanent record with transcripts, translations, and automated summaries.
|
||||
Reflector is an AI-powered audio transcription and meeting analysis platform that provides real-time transcription, speaker diarization, translation and summarization for audio content and live meetings. It works 100% with local models (whisper/parakeet, pyannote, seamless-m4t, and your local llm like phi-4).
|
||||
|
||||
[](https://github.com/monadical-sas/cubbi/actions/workflows/pytests.yml)
|
||||
[](https://opensource.org/licenses/AGPL-v3)
|
||||
[](https://github.com/monadical-sas/reflector/actions/workflows/test_server.yml)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
</div>
|
||||
</div>
|
||||
|
||||
## Screenshots
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/user-attachments/assets/3a976930-56c1-47ef-8c76-55d3864309e3">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/3a976930-56c1-47ef-8c76-55d3864309e3" />
|
||||
<a href="https://github.com/user-attachments/assets/21f5597c-2930-4899-a154-f7bd61a59e97">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/21f5597c-2930-4899-a154-f7bd61a59e97" />
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
<a href="https://github.com/user-attachments/assets/bfe3bde3-08af-4426-a9a1-11ad5cd63b33">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/bfe3bde3-08af-4426-a9a1-11ad5cd63b33" />
|
||||
<a href="https://github.com/user-attachments/assets/f6b9399a-5e51-4bae-b807-59128d0a940c">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/f6b9399a-5e51-4bae-b807-59128d0a940c" />
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
<a href="https://github.com/user-attachments/assets/7b60c9d0-efe4-474f-a27b-ea13bd0fabdc">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/7b60c9d0-efe4-474f-a27b-ea13bd0fabdc" />
|
||||
<a href="https://github.com/user-attachments/assets/a42ce460-c1fd-4489-a995-270516193897">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/a42ce460-c1fd-4489-a995-270516193897" />
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
<a href="https://github.com/user-attachments/assets/21929f6d-c309-42fe-9c11-f1299e50fbd4">
|
||||
<img width="700" alt="image" src="https://github.com/user-attachments/assets/21929f6d-c309-42fe-9c11-f1299e50fbd4" />
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## What is Reflector?
|
||||
|
||||
Reflector is a web application that utilizes local models to process audio content, providing:
|
||||
|
||||
- **Real-time Transcription**: Convert speech to text using [Whisper](https://github.com/openai/whisper) (multi-language) or [Parakeet](https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2) (English) models
|
||||
- **Speaker Diarization**: Identify and label different speakers using [Pyannote](https://github.com/pyannote/pyannote-audio) 3.1
|
||||
- **Live Translation**: Translate audio content in real-time to many languages with [Facebook Seamless-M4T](https://github.com/facebookresearch/seamless_communication)
|
||||
- **Topic Detection & Summarization**: Extract key topics and generate concise summaries using LLMs
|
||||
- **Meeting Recording**: Create permanent records of meetings with searchable transcripts
|
||||
|
||||
Currently we provide [modal.com](https://modal.com/) gpu template to deploy.
|
||||
|
||||
## Background
|
||||
|
||||
The project architecture consists of three primary components:
|
||||
|
||||
- **Front-End**: NextJS React project hosted on Vercel, located in `www/`.
|
||||
- **Back-End**: Python server that offers an API and data persistence, found in `server/`.
|
||||
- **GPU implementation**: Providing services such as speech-to-text transcription, topic generation, automated summaries, and translations. Most reliable option is Modal deployment
|
||||
- **Front-End**: NextJS React project hosted on Vercel, located in `www/`.
|
||||
- **GPU implementation**: Providing services such as speech-to-text transcription, topic generation, automated summaries, and translations.
|
||||
|
||||
It also uses authentik for authentication if activated, and Vercel for deployment and configuration of the front-end.
|
||||
It also uses authentik for authentication if activated.
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
@@ -72,24 +89,25 @@ Note: We currently do not have instructions for Windows users.
|
||||
|
||||
## Installation
|
||||
|
||||
*Note: we're working toward better installation, theses instructions are not accurate for now*
|
||||
|
||||
### Frontend
|
||||
|
||||
Start with `cd backend`.
|
||||
Start with `cd www`.
|
||||
|
||||
**Installation**
|
||||
|
||||
```bash
|
||||
yarn install
|
||||
cp .env_template .env
|
||||
cp config-template.ts config.ts
|
||||
pnpm install
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
Then, fill in the environment variables in `.env` and the configuration in `config.ts` as needed. If you are unsure on how to proceed, ask in Zulip.
|
||||
Then, fill in the environment variables in `.env` as needed. If you are unsure on how to proceed, ask in Zulip.
|
||||
|
||||
**Run in development mode**
|
||||
|
||||
```bash
|
||||
yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Then (after completing server setup and starting it) open [http://localhost:3000](http://localhost:3000) to view it in the browser.
|
||||
@@ -99,7 +117,7 @@ Then (after completing server setup and starting it) open [http://localhost:3000
|
||||
To generate the TypeScript files from the openapi.json file, make sure the python server is running, then run:
|
||||
|
||||
```bash
|
||||
yarn openapi
|
||||
pnpm openapi
|
||||
```
|
||||
|
||||
### Backend
|
||||
@@ -149,3 +167,34 @@ You can manually process an audio file by calling the process tool:
|
||||
```bash
|
||||
uv run python -m reflector.tools.process path/to/audio.wav
|
||||
```
|
||||
|
||||
|
||||
## Feature Flags
|
||||
|
||||
Reflector uses environment variable-based feature flags to control application functionality. These flags allow you to enable or disable features without code changes.
|
||||
|
||||
### Available Feature Flags
|
||||
|
||||
| Feature Flag | Environment Variable |
|
||||
|-------------|---------------------|
|
||||
| `requireLogin` | `NEXT_PUBLIC_FEATURE_REQUIRE_LOGIN` |
|
||||
| `privacy` | `NEXT_PUBLIC_FEATURE_PRIVACY` |
|
||||
| `browse` | `NEXT_PUBLIC_FEATURE_BROWSE` |
|
||||
| `sendToZulip` | `NEXT_PUBLIC_FEATURE_SEND_TO_ZULIP` |
|
||||
| `rooms` | `NEXT_PUBLIC_FEATURE_ROOMS` |
|
||||
|
||||
### Setting Feature Flags
|
||||
|
||||
Feature flags are controlled via environment variables using the pattern `NEXT_PUBLIC_FEATURE_{FEATURE_NAME}` where `{FEATURE_NAME}` is the SCREAMING_SNAKE_CASE version of the feature name.
|
||||
|
||||
**Examples:**
|
||||
```bash
|
||||
# Enable user authentication requirement
|
||||
NEXT_PUBLIC_FEATURE_REQUIRE_LOGIN=true
|
||||
|
||||
# Disable browse functionality
|
||||
NEXT_PUBLIC_FEATURE_BROWSE=false
|
||||
|
||||
# Enable Zulip integration
|
||||
NEXT_PUBLIC_FEATURE_SEND_TO_ZULIP=true
|
||||
```
|
||||
|
||||
@@ -6,6 +6,7 @@ services:
|
||||
- 1250:1250
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
@@ -16,6 +17,7 @@ services:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
@@ -26,6 +28,7 @@ services:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
@@ -39,11 +42,12 @@ services:
|
||||
image: node:18
|
||||
ports:
|
||||
- "3000:3000"
|
||||
command: sh -c "yarn install && yarn dev"
|
||||
command: sh -c "corepack enable && pnpm install && pnpm dev"
|
||||
restart: unless-stopped
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ./www:/app/
|
||||
- /app/node_modules
|
||||
env_file:
|
||||
- ./www/.env.local
|
||||
|
||||
|
||||
369
docs/jitsi.md
Normal file
369
docs/jitsi.md
Normal file
@@ -0,0 +1,369 @@
|
||||
# Jitsi Integration for Reflector
|
||||
|
||||
This document contains research and planning notes for integrating Jitsi Meet as a replacement for Whereby in Reflector.
|
||||
|
||||
## Overview
|
||||
|
||||
Jitsi Meet is an open-source video conferencing solution that can replace Whereby in Reflector, providing:
|
||||
- Cost reduction (no per-minute charges)
|
||||
- Direct recording access via Jibri
|
||||
- Real-time event webhooks
|
||||
- Full customization and control
|
||||
|
||||
## Current Whereby Integration Analysis
|
||||
|
||||
### Architecture
|
||||
1. **Room Creation**: User creates a "room" template in Reflector DB with settings
|
||||
2. **Meeting Creation**: `/rooms/{room_name}/meeting` endpoint calls Whereby API to create meeting
|
||||
3. **Recording**: Whereby handles recording automatically to S3 bucket
|
||||
4. **Webhooks**: Whereby sends events for participant tracking
|
||||
|
||||
### Database Structure
|
||||
```python
|
||||
# Room = Template/Configuration
|
||||
class Room:
|
||||
id, name, user_id
|
||||
recording_type, recording_trigger # cloud, automatic-2nd-participant
|
||||
webhook_url, webhook_secret
|
||||
|
||||
# Meeting = Actual Whereby Meeting Instance
|
||||
class Meeting:
|
||||
id # Whereby meetingId
|
||||
room_name # Generated by Whereby
|
||||
room_url, host_room_url # Whereby URLs
|
||||
num_clients # Updated via webhooks
|
||||
```
|
||||
|
||||
## Jitsi Components
|
||||
|
||||
### Core Architecture
|
||||
- **Jitsi Meet**: Web frontend (Next.js + React)
|
||||
- **Prosody**: XMPP server for messaging/rooms
|
||||
- **Jicofo**: Conference focus (orchestration)
|
||||
- **JVB**: Videobridge (media routing)
|
||||
- **Jibri**: Recording service
|
||||
- **Jigasi**: SIP gateway (optional, for phone dial-in)
|
||||
|
||||
### Exposure Requirements
|
||||
- **Web service**: 443/80 (frontend)
|
||||
- **JVB**: 10000/UDP (media streams) - **MUST EXPOSE**
|
||||
- **Prosody**: 5280 (BOSH/WebSocket) - can proxy via web
|
||||
- **Jicofo, Jibri, Jigasi**: Internal only
|
||||
|
||||
## Recording with Jibri
|
||||
|
||||
### How Jibri Works
|
||||
- Each Jibri instance handles **one recording at a time**
|
||||
- Records mixed audio/video to MP4 format
|
||||
- Uses Chrome headless + ffmpeg for capture
|
||||
- Supports finalize scripts for post-processing
|
||||
|
||||
### Jibri Pool for Scaling
|
||||
- Multiple Jibri instances join "jibribrewery" MUC
|
||||
- Jicofo distributes recording requests to available instances
|
||||
- Automatic load balancing and failover
|
||||
|
||||
```yaml
|
||||
# Multiple Jibri instances
|
||||
jibri1:
|
||||
environment:
|
||||
- JIBRI_INSTANCE_ID=jibri1
|
||||
- JIBRI_BREWERY_MUC=jibribrewery
|
||||
|
||||
jibri2:
|
||||
environment:
|
||||
- JIBRI_INSTANCE_ID=jibri2
|
||||
- JIBRI_BREWERY_MUC=jibribrewery
|
||||
```
|
||||
|
||||
### Recording Automation Options
|
||||
1. **Environment Variables**: `ENABLE_RECORDING=1`, `AUTO_RECORDING=1`
|
||||
2. **URL Parameters**: `?config.autoRecord=true`
|
||||
3. **JWT Token**: Include recording permissions in JWT
|
||||
4. **API Control**: `api.executeCommand('startRecording')`
|
||||
|
||||
### Post-Processing Integration
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# finalize.sh - runs after recording completion
|
||||
RECORDING_FILE=$1
|
||||
MEETING_METADATA=$2
|
||||
ROOM_NAME=$3
|
||||
|
||||
# Copy to Reflector-accessible location
|
||||
cp "$RECORDING_FILE" /shared/reflector-uploads/
|
||||
|
||||
# Trigger Reflector processing
|
||||
curl -X POST "http://reflector-api:8000/v1/transcripts/process" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"file_path\": \"/shared/reflector-uploads/$(basename $RECORDING_FILE)\",
|
||||
\"room_name\": \"$ROOM_NAME\",
|
||||
\"source\": \"jitsi\"
|
||||
}"
|
||||
```
|
||||
|
||||
## React Integration
|
||||
|
||||
### Official React SDK
|
||||
```bash
|
||||
npm i @jitsi/react-sdk
|
||||
```
|
||||
|
||||
```jsx
|
||||
import { JitsiMeeting } from '@jitsi/react-sdk'
|
||||
|
||||
<JitsiMeeting
|
||||
room="meeting-room"
|
||||
serverURL="https://your-jitsi.domain"
|
||||
jwt="your-jwt-token"
|
||||
config={{
|
||||
startWithAudioMuted: true,
|
||||
fileRecordingsEnabled: true,
|
||||
autoRecord: true
|
||||
}}
|
||||
onParticipantJoined={(participant) => {
|
||||
// Track participant events
|
||||
}}
|
||||
onRecordingStatusChanged={(status) => {
|
||||
// Handle recording events
|
||||
}}
|
||||
/>
|
||||
```
|
||||
|
||||
## Authentication & Room Control
|
||||
|
||||
### JWT-Based Access Control
|
||||
```python
|
||||
def generate_jitsi_jwt(payload):
|
||||
return jwt.encode({
|
||||
"aud": "jitsi",
|
||||
"iss": "reflector",
|
||||
"sub": "reflector-user",
|
||||
"room": payload["room"],
|
||||
"exp": int(payload["exp"].timestamp()),
|
||||
"context": {
|
||||
"user": {
|
||||
"name": payload["user_name"],
|
||||
"moderator": payload.get("moderator", False)
|
||||
},
|
||||
"features": {
|
||||
"recording": payload.get("recording", True)
|
||||
}
|
||||
}
|
||||
}, JITSI_JWT_SECRET)
|
||||
```
|
||||
|
||||
### Prevent Anonymous Room Creation
|
||||
```bash
|
||||
# Environment configuration
|
||||
ENABLE_AUTH=1
|
||||
ENABLE_GUESTS=0
|
||||
AUTH_TYPE=jwt
|
||||
JWT_APP_ID=reflector
|
||||
JWT_APP_SECRET=your-secret-key
|
||||
```
|
||||
|
||||
## Webhook Integration
|
||||
|
||||
### Real-time Events via Prosody
|
||||
Custom event-sync module can send webhooks for:
|
||||
- Participant join/leave
|
||||
- Recording start/stop
|
||||
- Room creation/destruction
|
||||
- Mute/unmute events
|
||||
|
||||
```lua
|
||||
-- mod_event_sync.lua
|
||||
module:hook("muc-occupant-joined", function(event)
|
||||
send_event({
|
||||
type = "participant_joined",
|
||||
room = event.room.jid,
|
||||
participant = {
|
||||
nick = event.occupant.nick,
|
||||
jid = event.occupant.jid,
|
||||
},
|
||||
timestamp = os.time(),
|
||||
});
|
||||
end);
|
||||
```
|
||||
|
||||
### Jibri Recording Webhooks
|
||||
```bash
|
||||
# Environment variable
|
||||
JIBRI_WEBHOOK_SUBSCRIBERS=https://your-reflector.com/webhooks/jibri
|
||||
```
|
||||
|
||||
## Proposed Reflector Integration
|
||||
|
||||
### Modified Database Schema
|
||||
```python
|
||||
class Meeting(BaseModel):
|
||||
id: str # Our generated meeting ID
|
||||
room_name: str # Generated: reflector-{room.name}-{timestamp}
|
||||
room_url: str # https://jitsi.domain/room_name?jwt=token
|
||||
host_room_url: str # Same but with moderator JWT
|
||||
# Add Jitsi-specific fields
|
||||
jitsi_jwt: str # JWT token
|
||||
jitsi_room_id: str # Internal room identifier
|
||||
recording_status: str # pending, recording, completed
|
||||
recording_file_path: Optional[str]
|
||||
```
|
||||
|
||||
### API Replacement
|
||||
```python
|
||||
# Replace whereby.py with jitsi.py
|
||||
async def create_meeting(room_name_prefix: str, end_date: datetime, room: Room):
|
||||
# Generate unique room name
|
||||
jitsi_room = f"reflector-{room.name}-{int(time.time())}"
|
||||
|
||||
# Generate JWT tokens
|
||||
user_jwt = generate_jwt(room=jitsi_room, moderator=False, exp=end_date)
|
||||
host_jwt = generate_jwt(room=jitsi_room, moderator=True, exp=end_date)
|
||||
|
||||
return {
|
||||
"meetingId": generate_uuid4(), # Our ID
|
||||
"roomName": jitsi_room,
|
||||
"roomUrl": f"https://jitsi.domain/{jitsi_room}?jwt={user_jwt}",
|
||||
"hostRoomUrl": f"https://jitsi.domain/{jitsi_room}?jwt={host_jwt}",
|
||||
"startDate": datetime.now().isoformat(),
|
||||
"endDate": end_date.isoformat(),
|
||||
}
|
||||
```
|
||||
|
||||
### Webhook Endpoints
|
||||
```python
|
||||
# Replace whereby webhook with jitsi webhooks
|
||||
@router.post("/jitsi/events")
|
||||
async def jitsi_events_webhook(event_data: dict):
|
||||
event_type = event_data.get("event")
|
||||
room_name = event_data.get("room", "").split("@")[0]
|
||||
|
||||
meeting = await Meeting.get_by_room(room_name)
|
||||
|
||||
if event_type == "muc-occupant-joined":
|
||||
# Update participant count
|
||||
meeting.num_clients += 1
|
||||
|
||||
elif event_type == "jibri-recording-on":
|
||||
meeting.recording_status = "recording"
|
||||
|
||||
elif event_type == "jibri-recording-off":
|
||||
meeting.recording_status = "processing"
|
||||
await process_meeting_recording.delay(meeting.id)
|
||||
|
||||
@router.post("/jibri/recording-complete")
|
||||
async def recording_complete(data: dict):
|
||||
# Handle finalize script webhook
|
||||
room_name = data.get("room_name")
|
||||
file_path = data.get("file_path")
|
||||
|
||||
meeting = await Meeting.get_by_room(room_name)
|
||||
meeting.recording_file_path = file_path
|
||||
meeting.recording_status = "completed"
|
||||
|
||||
# Start Reflector processing
|
||||
await process_recording_for_transcription(meeting.id, file_path)
|
||||
```
|
||||
|
||||
## Deployment with Docker
|
||||
|
||||
### Official docker-jitsi-meet
|
||||
```bash
|
||||
# Download official release
|
||||
wget $(wget -q -O - https://api.github.com/repos/jitsi/docker-jitsi-meet/releases/latest | grep zip | cut -d\" -f4)
|
||||
|
||||
# Setup
|
||||
mkdir -p ~/.jitsi-meet-cfg/{web,transcripts,prosody/config,prosody/prosody-plugins-custom,jicofo,jvb,jigasi,jibri}
|
||||
./gen-passwords.sh # Generate secure passwords
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Coolify Integration
|
||||
```yaml
|
||||
services:
|
||||
web:
|
||||
ports: ["80:80", "443:443"]
|
||||
jvb:
|
||||
ports: ["10000:10000/udp"] # Must expose for media
|
||||
jibri1:
|
||||
environment:
|
||||
- JIBRI_INSTANCE_ID=jibri1
|
||||
- JIBRI_FINALIZE_RECORDING_SCRIPT_PATH=/config/finalize.sh
|
||||
jibri2:
|
||||
environment:
|
||||
- JIBRI_INSTANCE_ID=jibri2
|
||||
```
|
||||
|
||||
## Benefits vs Whereby
|
||||
|
||||
### Cost & Control
|
||||
✅ **No per-minute charges** - significant cost savings
|
||||
✅ **Full recording control** - direct file access
|
||||
✅ **Custom branding** - complete UI control
|
||||
✅ **Self-hosted** - no vendor lock-in
|
||||
|
||||
### Technical Advantages
|
||||
✅ **Real-time events** - immediate webhook notifications
|
||||
✅ **Rich participant metadata** - detailed tracking
|
||||
✅ **JWT security** - token-based access with expiration
|
||||
✅ **Multiple recording formats** - audio-only options
|
||||
✅ **Scalable architecture** - horizontal Jibri scaling
|
||||
|
||||
### Integration Benefits
|
||||
✅ **Same API surface** - minimal changes to existing code
|
||||
✅ **React SDK** - better frontend integration
|
||||
✅ **Direct processing** - no S3 download delays
|
||||
✅ **Event-driven architecture** - better real-time capabilities
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
1. **Deploy Jitsi Stack** - Set up docker-jitsi-meet with multiple Jibri instances
|
||||
2. **Create jitsi.py** - Replace whereby.py with Jitsi API functions
|
||||
3. **Update Database** - Add Jitsi-specific fields to Meeting model
|
||||
4. **Webhook Integration** - Replace Whereby webhooks with Jitsi events
|
||||
5. **Frontend Updates** - Replace Whereby embed with Jitsi React SDK
|
||||
6. **Testing & Migration** - Gradual rollout with fallback to Whereby
|
||||
|
||||
## Recording Limitations & Considerations
|
||||
|
||||
### Current Limitations
|
||||
- **Mixed audio only** - Jibri doesn't separate participant tracks natively
|
||||
- **One recording per Jibri** - requires multiple instances for concurrent recordings
|
||||
- **Chrome dependency** - Jibri uses headless Chrome for recording
|
||||
|
||||
### Metadata Capabilities
|
||||
✅ **Participant join/leave timestamps** - via webhooks
|
||||
✅ **Speaking time tracking** - via audio level events
|
||||
✅ **Meeting duration** - precise timing
|
||||
✅ **Room-specific data** - custom metadata in JWT
|
||||
|
||||
### Alternative Recording Methods
|
||||
- **Local recording** - browser-based, per-participant
|
||||
- **Custom recording** - lib-jitsi-meet for individual streams
|
||||
- **Third-party solutions** - Recall.ai, Otter.ai integrations
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### JWT Configuration
|
||||
- **Room-specific tokens** - limit access to specific rooms
|
||||
- **Time-based expiration** - automatic cleanup
|
||||
- **Feature permissions** - control recording, moderation rights
|
||||
- **User identification** - embed user metadata in tokens
|
||||
|
||||
### Access Control
|
||||
- **No anonymous rooms** - all rooms require valid JWT
|
||||
- **API-only creation** - prevent direct room access
|
||||
- **Webhook verification** - HMAC signature validation
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Deploy test Jitsi instance** - validate recording pipeline
|
||||
2. **Prototype jitsi.py** - create equivalent API functions
|
||||
3. **Test webhook integration** - ensure event delivery works
|
||||
4. **Performance testing** - validate multiple concurrent recordings
|
||||
5. **Migration strategy** - plan gradual transition from Whereby
|
||||
|
||||
---
|
||||
|
||||
*This document serves as the comprehensive planning and research notes for Jitsi integration in Reflector. It should be updated as implementation progresses and new insights are discovered.*
|
||||
720
docs/video-jitsi.md
Normal file
720
docs/video-jitsi.md
Normal file
@@ -0,0 +1,720 @@
|
||||
# Jitsi Meet Integration Configuration Guide
|
||||
|
||||
This guide explains how to configure Reflector to use your self-hosted Jitsi Meet installation for video meetings, recording, and participant tracking.
|
||||
|
||||
## Overview
|
||||
|
||||
Jitsi Meet is an open-source video conferencing platform that can be self-hosted. Reflector integrates with Jitsi Meet to:
|
||||
|
||||
- Create secure meeting rooms with JWT authentication
|
||||
- Track participant join/leave events via Prosody webhooks
|
||||
- Record meetings using Jibri recording service
|
||||
- Process recordings for transcription and analysis
|
||||
|
||||
## Requirements
|
||||
|
||||
### Self-Hosted Jitsi Meet
|
||||
|
||||
You need a complete Jitsi Meet installation including:
|
||||
|
||||
1. **Jitsi Meet Web Interface** - The main meeting interface
|
||||
2. **Prosody XMPP Server** - Handles room management and authentication
|
||||
3. **Jicofo (JItsi COnference FOcus)** - Manages media sessions
|
||||
4. **Jitsi Videobridge (JVB)** - Handles WebRTC media routing
|
||||
5. **Jibri Recording Service** - Records meetings (optional but recommended)
|
||||
|
||||
### System Requirements
|
||||
|
||||
- **Domain with SSL Certificate** - Required for WebRTC functionality
|
||||
- **Prosody mod_event_sync** - For webhook event handling
|
||||
- **JWT Authentication** - For secure room access control
|
||||
- **Storage Solution** - For recording files (local or cloud)
|
||||
|
||||
## Configuration Variables
|
||||
|
||||
Add the following environment variables to your Reflector `.env` file:
|
||||
|
||||
### Required Variables
|
||||
|
||||
```bash
|
||||
# Jitsi Meet Domain (without https://)
|
||||
JITSI_DOMAIN=meet.example.com
|
||||
|
||||
# JWT Secret for room authentication (generate with: openssl rand -hex 32)
|
||||
JITSI_JWT_SECRET=your-64-character-hex-secret-here
|
||||
|
||||
# Webhook secret for event handling (generate with: openssl rand -hex 16)
|
||||
JITSI_WEBHOOK_SECRET=your-32-character-hex-secret-here
|
||||
```
|
||||
|
||||
### Optional Variables
|
||||
|
||||
```bash
|
||||
# Application identifier (should match Jitsi configuration)
|
||||
JITSI_APP_ID=reflector
|
||||
|
||||
# JWT issuer and audience (should match Jitsi configuration)
|
||||
JITSI_JWT_ISSUER=reflector
|
||||
JITSI_JWT_AUDIENCE=jitsi
|
||||
```
|
||||
|
||||
## Installation Steps
|
||||
|
||||
### 1. Jitsi Meet Server Installation
|
||||
|
||||
#### Quick Installation (Ubuntu/Debian)
|
||||
|
||||
```bash
|
||||
# Add Jitsi repository
|
||||
curl -fsSL https://download.jitsi.org/jitsi-key.gpg.key | sudo gpg --dearmor -o /usr/share/keyrings/jitsi-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/jitsi-keyring.gpg] https://download.jitsi.org stable/" | sudo tee /etc/apt/sources.list.d/jitsi-stable.list
|
||||
|
||||
# Install Jitsi Meet
|
||||
sudo apt update
|
||||
sudo apt install jitsi-meet
|
||||
|
||||
# Configure SSL certificate
|
||||
sudo /usr/share/jitsi-meet/scripts/install-letsencrypt-cert.sh
|
||||
```
|
||||
|
||||
#### Docker Installation
|
||||
|
||||
```bash
|
||||
# Clone Jitsi Docker repository
|
||||
git clone https://github.com/jitsi/docker-jitsi-meet
|
||||
cd docker-jitsi-meet
|
||||
|
||||
# Copy environment template
|
||||
cp env.example .env
|
||||
|
||||
# Edit configuration
|
||||
nano .env
|
||||
|
||||
# Start services
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### 2. JWT Authentication Setup
|
||||
|
||||
#### Update Prosody Configuration
|
||||
|
||||
Edit `/etc/prosody/conf.d/your-domain.cfg.lua`:
|
||||
|
||||
```lua
|
||||
VirtualHost "meet.example.com"
|
||||
authentication = "token"
|
||||
app_id = "reflector"
|
||||
app_secret = "your-jwt-secret-here"
|
||||
|
||||
-- Allow anonymous access for guests
|
||||
c2s_require_encryption = false
|
||||
admins = { "focusUser@auth.meet.example.com" }
|
||||
|
||||
modules_enabled = {
|
||||
"bosh";
|
||||
"pubsub";
|
||||
"ping";
|
||||
"roster";
|
||||
"saslauth";
|
||||
"tls";
|
||||
"dialback";
|
||||
"disco";
|
||||
"carbons";
|
||||
"pep";
|
||||
"private";
|
||||
"blocklist";
|
||||
"vcard";
|
||||
"version";
|
||||
"uptime";
|
||||
"time";
|
||||
"ping";
|
||||
"register";
|
||||
"admin_adhoc";
|
||||
"token_verification";
|
||||
"event_sync"; -- Required for webhooks
|
||||
}
|
||||
```
|
||||
|
||||
#### Configure Jitsi Meet Interface
|
||||
|
||||
Edit `/etc/jitsi/meet/your-domain-config.js`:
|
||||
|
||||
```javascript
|
||||
var config = {
|
||||
hosts: {
|
||||
domain: 'meet.example.com',
|
||||
muc: 'conference.meet.example.com'
|
||||
},
|
||||
|
||||
// Enable JWT authentication
|
||||
enableUserRolesBasedOnToken: true,
|
||||
|
||||
// Recording configuration
|
||||
fileRecordingsEnabled: true,
|
||||
liveStreamingEnabled: false,
|
||||
|
||||
// Reflector integration settings
|
||||
prejoinPageEnabled: true,
|
||||
requireDisplayName: true
|
||||
};
|
||||
```
|
||||
|
||||
### 3. Webhook Event Configuration
|
||||
|
||||
#### Install Event Sync Module
|
||||
|
||||
```bash
|
||||
# Download the module
|
||||
cd /usr/share/jitsi-meet/prosody-plugins/
|
||||
wget https://raw.githubusercontent.com/jitsi-contrib/prosody-plugins/main/mod_event_sync.lua
|
||||
```
|
||||
|
||||
#### Configure Event Sync
|
||||
|
||||
Add to your Prosody configuration:
|
||||
|
||||
```lua
|
||||
Component "conference.meet.example.com" "muc"
|
||||
storage = "memory"
|
||||
modules_enabled = {
|
||||
"muc_meeting_id";
|
||||
"muc_domain_mapper";
|
||||
"polls";
|
||||
"event_sync"; -- Enable event sync
|
||||
}
|
||||
|
||||
-- Event sync webhook configuration
|
||||
event_sync_url = "https://your-reflector-domain.com/v1/jitsi/events"
|
||||
event_sync_secret = "your-webhook-secret-here"
|
||||
|
||||
-- Events to track
|
||||
event_sync_events = {
|
||||
"muc-occupant-joined",
|
||||
"muc-occupant-left",
|
||||
"jibri-recording-on",
|
||||
"jibri-recording-off"
|
||||
}
|
||||
|
||||
#### Webhook Event Payload Examples
|
||||
|
||||
**Participant Joined Event:**
|
||||
```json
|
||||
{
|
||||
"event": "muc-occupant-joined",
|
||||
"room": "reflector-my-room-uuid123",
|
||||
"timestamp": "2025-01-15T10:30:00.000Z",
|
||||
"data": {
|
||||
"occupant_id": "participant-456",
|
||||
"nick": "John Doe",
|
||||
"role": "participant",
|
||||
"affiliation": "none"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Recording Started Event:**
|
||||
```json
|
||||
{
|
||||
"event": "jibri-recording-on",
|
||||
"room": "reflector-my-room-uuid123",
|
||||
"timestamp": "2025-01-15T10:32:00.000Z",
|
||||
"data": {
|
||||
"recording_id": "rec-789",
|
||||
"initiator": "moderator-123"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Recording Completed Event:**
|
||||
```json
|
||||
{
|
||||
"room_name": "reflector-my-room-uuid123",
|
||||
"recording_file": "/var/recordings/rec-789.mp4",
|
||||
"recording_status": "completed",
|
||||
"timestamp": "2025-01-15T11:15:00.000Z"
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Jibri Recording Setup (Optional)
|
||||
|
||||
#### Install Jibri
|
||||
|
||||
```bash
|
||||
# Install Jibri package
|
||||
sudo apt install jibri
|
||||
|
||||
# Create recording directory
|
||||
sudo mkdir -p /var/recordings
|
||||
sudo chown jibri:jibri /var/recordings
|
||||
```
|
||||
|
||||
#### Configure Jibri
|
||||
|
||||
Edit `/etc/jitsi/jibri/jibri.conf`:
|
||||
|
||||
```hocon
|
||||
jibri {
|
||||
recording {
|
||||
recordings-directory = "/var/recordings"
|
||||
finalize-script = "/opt/jitsi/jibri/finalize.sh"
|
||||
}
|
||||
|
||||
api {
|
||||
xmpp {
|
||||
environments = [{
|
||||
name = "prod environment"
|
||||
xmpp-server-hosts = ["meet.example.com"]
|
||||
xmpp-domain = "meet.example.com"
|
||||
|
||||
control-muc {
|
||||
domain = "internal.auth.meet.example.com"
|
||||
room-name = "JibriBrewery"
|
||||
nickname = "jibri-nickname"
|
||||
}
|
||||
|
||||
control-login {
|
||||
domain = "auth.meet.example.com"
|
||||
username = "jibri"
|
||||
password = "jibri-password"
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Create Finalize Script
|
||||
|
||||
Create `/opt/jitsi/jibri/finalize.sh`:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Jibri finalize script for Reflector integration
|
||||
|
||||
RECORDING_FILE="$1"
|
||||
ROOM_NAME="$2"
|
||||
REFLECTOR_API_URL="${REFLECTOR_API_URL:-http://localhost:1250}"
|
||||
|
||||
# Prepare webhook payload
|
||||
TIMESTAMP=$(date -u +%Y-%m-%dT%H:%M:%S.%3NZ)
|
||||
PAYLOAD=$(cat <<EOF
|
||||
{
|
||||
"room_name": "$ROOM_NAME",
|
||||
"recording_file": "$RECORDING_FILE",
|
||||
"recording_status": "completed",
|
||||
"timestamp": "$TIMESTAMP"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Generate signature
|
||||
SIGNATURE=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$JITSI_WEBHOOK_SECRET" | cut -d' ' -f2)
|
||||
|
||||
# Send webhook to Reflector
|
||||
curl -X POST "$REFLECTOR_API_URL/v1/jibri/recording-complete" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: $SIGNATURE" \
|
||||
-d "$PAYLOAD"
|
||||
|
||||
echo "Recording finalization webhook sent for room: $ROOM_NAME"
|
||||
```
|
||||
|
||||
Make executable:
|
||||
```bash
|
||||
sudo chmod +x /opt/jitsi/jibri/finalize.sh
|
||||
```
|
||||
|
||||
### 5. Restart Services
|
||||
|
||||
After configuration changes:
|
||||
|
||||
```bash
|
||||
sudo systemctl restart prosody
|
||||
sudo systemctl restart jicofo
|
||||
sudo systemctl restart jitsi-videobridge2
|
||||
sudo systemctl restart jibri
|
||||
sudo systemctl restart nginx
|
||||
```
|
||||
|
||||
## Room Configuration
|
||||
|
||||
### Creating Jitsi Rooms
|
||||
|
||||
Create rooms with Jitsi platform in Reflector:
|
||||
|
||||
```bash
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN" \
|
||||
-d '{
|
||||
"name": "my-jitsi-room",
|
||||
"platform": "jitsi",
|
||||
"recording_type": "cloud",
|
||||
"recording_trigger": "automatic-2nd-participant",
|
||||
"is_locked": false,
|
||||
"room_mode": "normal"
|
||||
}'
|
||||
```
|
||||
|
||||
### Meeting Creation
|
||||
|
||||
Meetings automatically use JWT authentication:
|
||||
|
||||
```bash
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms/my-jitsi-room/meeting" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN"
|
||||
```
|
||||
|
||||
Response includes JWT-authenticated URLs:
|
||||
```json
|
||||
{
|
||||
"id": "meeting-uuid",
|
||||
"room_name": "reflector-my-jitsi-room-123456",
|
||||
"room_url": "https://meet.example.com/room?jwt=user-token",
|
||||
"host_room_url": "https://meet.example.com/room?jwt=moderator-token"
|
||||
}
|
||||
```
|
||||
|
||||
## Features and Capabilities
|
||||
|
||||
### JWT Authentication
|
||||
|
||||
Reflector automatically generates JWT tokens with:
|
||||
- **Room Access Control** - Secure room entry
|
||||
- **User Roles** - Moderator vs participant permissions
|
||||
- **Expiration** - Configurable token lifetime (default 8 hours)
|
||||
- **Custom Claims** - Room-specific metadata
|
||||
|
||||
### Recording Options
|
||||
|
||||
**Recording Types:**
|
||||
- `"none"` - No recording
|
||||
- `"local"` - Local Jibri recording
|
||||
- `"cloud"` - Cloud recording (requires external storage)
|
||||
|
||||
**Recording Triggers:**
|
||||
- `"none"` - Manual recording only
|
||||
- `"prompt"` - Prompt users to start
|
||||
- `"automatic"` - Start immediately
|
||||
- `"automatic-2nd-participant"` - Start when 2nd person joins
|
||||
|
||||
### Event Tracking and Storage
|
||||
|
||||
Reflector automatically stores all webhook events in the `meetings` table for comprehensive meeting analytics:
|
||||
|
||||
**Supported Event Types:**
|
||||
- `muc-occupant-joined` - Participant joined the meeting
|
||||
- `muc-occupant-left` - Participant left the meeting
|
||||
- `jibri-recording-on` - Recording started
|
||||
- `jibri-recording-off` - Recording stopped
|
||||
- `recording_completed` - Recording file ready for processing
|
||||
|
||||
**Event Storage Structure:**
|
||||
Each webhook event is stored as a JSON object in the `meetings.events` column:
|
||||
```json
|
||||
{
|
||||
"type": "muc-occupant-joined",
|
||||
"timestamp": "2025-01-15T10:30:00.123456Z",
|
||||
"data": {
|
||||
"timestamp": "2025-01-15T10:30:00Z",
|
||||
"user_id": "participant-123",
|
||||
"display_name": "John Doe"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Querying Stored Events:**
|
||||
```sql
|
||||
-- Get all events for a meeting
|
||||
SELECT events FROM meeting WHERE id = 'meeting-uuid';
|
||||
|
||||
-- Count participant joins
|
||||
SELECT json_array_length(
|
||||
json_extract(events, '$[*] ? (@.type == "muc-occupant-joined")')
|
||||
) as total_joins FROM meeting WHERE id = 'meeting-uuid';
|
||||
```
|
||||
|
||||
## Testing and Verification
|
||||
|
||||
### Health Check
|
||||
|
||||
Test Jitsi webhook integration:
|
||||
|
||||
```bash
|
||||
curl "https://your-reflector-domain.com/v1/jitsi/health"
|
||||
```
|
||||
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"status": "ok",
|
||||
"service": "jitsi-webhooks",
|
||||
"timestamp": "2025-01-15T10:30:00.000Z",
|
||||
"webhook_secret_configured": true
|
||||
}
|
||||
```
|
||||
|
||||
### JWT Token Testing
|
||||
|
||||
Verify JWT generation works:
|
||||
```bash
|
||||
# Create a test meeting
|
||||
MEETING=$(curl -X POST "https://your-reflector-domain.com/v1/rooms/test-room/meeting" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN" | jq -r '.room_url')
|
||||
|
||||
echo "Test meeting URL: $MEETING"
|
||||
```
|
||||
|
||||
### Webhook Testing
|
||||
|
||||
#### Manual Webhook Event Testing
|
||||
|
||||
Test participant join event:
|
||||
```bash
|
||||
# Generate proper signature
|
||||
PAYLOAD='{"event":"muc-occupant-joined","room":"reflector-test-room-uuid","timestamp":"2025-01-15T10:30:00.000Z","data":{"user_id":"test-user","display_name":"Test User"}}'
|
||||
SIGNATURE=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$JITSI_WEBHOOK_SECRET" | cut -d' ' -f2)
|
||||
|
||||
curl -X POST "https://your-reflector-domain.com/v1/jitsi/events" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: $SIGNATURE" \
|
||||
-d "$PAYLOAD"
|
||||
```
|
||||
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"status": "ok",
|
||||
"event": "muc-occupant-joined",
|
||||
"room": "reflector-test-room-uuid"
|
||||
}
|
||||
```
|
||||
|
||||
#### Recording Webhook Testing
|
||||
|
||||
Test recording completion event:
|
||||
```bash
|
||||
PAYLOAD='{"room_name":"reflector-test-room-uuid","recording_file":"/recordings/test.mp4","recording_status":"completed","timestamp":"2025-01-15T10:30:00.000Z"}'
|
||||
SIGNATURE=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$JITSI_WEBHOOK_SECRET" | cut -d' ' -f2)
|
||||
|
||||
curl -X POST "https://your-reflector-domain.com/v1/jibri/recording-complete" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: $SIGNATURE" \
|
||||
-d "$PAYLOAD"
|
||||
```
|
||||
|
||||
#### Event Storage Verification
|
||||
|
||||
Verify events were stored:
|
||||
```bash
|
||||
# Check meeting events via API (requires authentication)
|
||||
curl -H "Authorization: Bearer $AUTH_TOKEN" \
|
||||
"https://your-reflector-domain.com/v1/meetings/{meeting-id}"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### JWT Authentication Failures
|
||||
|
||||
**Symptoms**: Users cannot join rooms, "Authentication failed" errors
|
||||
|
||||
**Solutions**:
|
||||
1. Verify `JITSI_JWT_SECRET` matches Prosody configuration
|
||||
2. Check JWT token hasn't expired (default 8 hours)
|
||||
3. Ensure system clocks are synchronized between servers
|
||||
4. Validate JWT issuer/audience configuration matches
|
||||
|
||||
**Debug JWT tokens**:
|
||||
```bash
|
||||
# Decode JWT payload
|
||||
echo "JWT_TOKEN_HERE" | cut -d'.' -f2 | base64 -d | jq
|
||||
```
|
||||
|
||||
#### Webhook Events Not Received
|
||||
|
||||
**Symptoms**: Participant counts not updating, no recording events
|
||||
|
||||
**Solutions**:
|
||||
1. Verify `mod_event_sync` is loaded in Prosody
|
||||
2. Check webhook URL is accessible from Jitsi server
|
||||
3. Validate webhook signature generation
|
||||
4. Review Prosody and Reflector logs
|
||||
|
||||
**Debug webhook connectivity**:
|
||||
```bash
|
||||
# Test from Jitsi server
|
||||
curl -v "https://your-reflector-domain.com/v1/jitsi/health"
|
||||
|
||||
# Check Prosody logs
|
||||
sudo tail -f /var/log/prosody/prosody.log
|
||||
```
|
||||
|
||||
#### Webhook Signature Verification Issues
|
||||
|
||||
**Symptoms**: HTTP 401 "Invalid webhook signature" errors
|
||||
|
||||
**Solutions**:
|
||||
1. Verify webhook secret matches between Jitsi and Reflector
|
||||
2. Check payload encoding (no extra whitespace)
|
||||
3. Ensure proper HMAC-SHA256 signature generation
|
||||
|
||||
**Debug signature generation**:
|
||||
```bash
|
||||
# Test signature manually
|
||||
PAYLOAD='{"event":"test","room":"test","timestamp":"2025-01-15T10:30:00.000Z","data":{}}'
|
||||
SECRET="your-webhook-secret-here"
|
||||
|
||||
# Generate signature (should match X-Jitsi-Signature header)
|
||||
echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$SECRET" | cut -d' ' -f2
|
||||
|
||||
# Test with curl
|
||||
curl -X POST "https://your-reflector-domain.com/v1/jitsi/events" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: $(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$SECRET" | cut -d' ' -f2)" \
|
||||
-d "$PAYLOAD" -v
|
||||
```
|
||||
|
||||
#### Event Storage Problems
|
||||
|
||||
**Symptoms**: Events received but not stored in database
|
||||
|
||||
**Solutions**:
|
||||
1. Check database connectivity and permissions
|
||||
2. Verify meeting exists before event processing
|
||||
3. Review Reflector application logs
|
||||
4. Ensure JSON column support in database
|
||||
|
||||
**Debug event storage**:
|
||||
```bash
|
||||
# Check meeting exists
|
||||
curl -H "Authorization: Bearer $TOKEN" \
|
||||
"https://your-reflector-domain.com/v1/meetings/{meeting-id}"
|
||||
|
||||
# Monitor database queries (if using PostgreSQL)
|
||||
sudo -u postgres psql -c "SELECT * FROM pg_stat_activity WHERE query LIKE '%meeting%';"
|
||||
|
||||
# Check Reflector logs for event processing
|
||||
sudo journalctl -u reflector -f | grep -E "(event|webhook|jitsi)"
|
||||
```
|
||||
|
||||
#### Recording Issues
|
||||
|
||||
**Symptoms**: Recordings not starting, finalize script errors
|
||||
|
||||
**Solutions**:
|
||||
1. Verify Jibri service status: `sudo systemctl status jibri`
|
||||
2. Check recording directory permissions: `/var/recordings`
|
||||
3. Validate finalize script execution permissions
|
||||
4. Monitor Jibri logs: `sudo journalctl -u jibri -f`
|
||||
|
||||
**Test finalize script**:
|
||||
```bash
|
||||
sudo -u jibri /opt/jitsi/jibri/finalize.sh "/test/recording.mp4" "test-room"
|
||||
```
|
||||
|
||||
#### Meeting Creation Failures
|
||||
|
||||
**Symptoms**: HTTP 500 errors when creating meetings
|
||||
|
||||
**Solutions**:
|
||||
1. Check Reflector logs for JWT generation errors
|
||||
2. Verify all required environment variables are set
|
||||
3. Ensure Jitsi domain is accessible from Reflector
|
||||
4. Test JWT secret configuration
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Verify Prosody configuration
|
||||
sudo prosodyctl check config
|
||||
|
||||
# Check Jitsi services status
|
||||
sudo systemctl status prosody jicofo jitsi-videobridge2
|
||||
|
||||
# Test JWT generation
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms/test/meeting" \
|
||||
-H "Authorization: Bearer $TOKEN" -v
|
||||
|
||||
# Monitor webhook events
|
||||
sudo tail -f /var/log/reflector/app.log | grep jitsi
|
||||
|
||||
# Check SSL certificates
|
||||
sudo certbot certificates
|
||||
```
|
||||
|
||||
### Performance Optimization
|
||||
|
||||
#### Scaling Considerations
|
||||
|
||||
**Single Server Limits:**
|
||||
- ~50 concurrent participants per JVB instance
|
||||
- ~10 concurrent Jibri recordings
|
||||
- CPU and bandwidth become bottlenecks
|
||||
|
||||
**Multi-Server Setup:**
|
||||
- Multiple JVB instances for scaling
|
||||
- Dedicated Jibri recording servers
|
||||
- Load balancing for high availability
|
||||
|
||||
#### Resource Monitoring
|
||||
|
||||
```bash
|
||||
# Monitor JVB performance
|
||||
sudo systemctl status jitsi-videobridge2
|
||||
sudo journalctl -u jitsi-videobridge2 -f
|
||||
|
||||
# Check Prosody connections
|
||||
sudo prosodyctl mod_admin_telnet
|
||||
> c2s:show()
|
||||
> muc:rooms()
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
### JWT Security
|
||||
- Use strong, unique secrets (32+ characters)
|
||||
- Rotate JWT secrets regularly
|
||||
- Implement proper token expiration
|
||||
- Never log or expose JWT tokens
|
||||
|
||||
### Network Security
|
||||
- Use HTTPS/WSS for all communications
|
||||
- Implement proper firewall rules
|
||||
- Consider VPN for server-to-server communication
|
||||
- Monitor for unauthorized access attempts
|
||||
|
||||
### Recording Security
|
||||
- Encrypt recordings at rest
|
||||
- Implement access controls for recording files
|
||||
- Regular security audits of file permissions
|
||||
- Comply with data protection regulations
|
||||
|
||||
## Migration from Whereby
|
||||
|
||||
If migrating from Whereby to Jitsi:
|
||||
|
||||
1. **Parallel Setup** - Configure Jitsi alongside existing Whereby
|
||||
2. **Room Migration** - Update room platform field to "jitsi"
|
||||
3. **Test Integration** - Verify meeting creation and webhooks
|
||||
4. **User Training** - Different UI and feature set
|
||||
5. **Monitor Performance** - Watch for issues during transition
|
||||
6. **Cleanup** - Remove Whereby configuration when stable
|
||||
|
||||
## Support and Resources
|
||||
|
||||
### Jitsi Community Resources
|
||||
- **Documentation**: [jitsi.github.io/handbook](https://jitsi.github.io/handbook/)
|
||||
- **Community Forum**: [community.jitsi.org](https://community.jitsi.org/)
|
||||
- **GitHub Issues**: [github.com/jitsi/jitsi-meet](https://github.com/jitsi/jitsi-meet)
|
||||
|
||||
### Professional Support
|
||||
- **8x8 Commercial Support** - Professional Jitsi hosting and support
|
||||
- **Community Consulting** - Third-party Jitsi implementation services
|
||||
|
||||
### Monitoring and Maintenance
|
||||
- Monitor system resources (CPU, memory, bandwidth)
|
||||
- Regular security updates for all components
|
||||
- Backup configuration files and certificates
|
||||
- Test disaster recovery procedures
|
||||
276
docs/video-whereby.md
Normal file
276
docs/video-whereby.md
Normal file
@@ -0,0 +1,276 @@
|
||||
# Whereby Integration Configuration Guide
|
||||
|
||||
This guide explains how to configure Reflector to use Whereby as your video meeting platform for room creation, recording, and participant tracking.
|
||||
|
||||
## Overview
|
||||
|
||||
Whereby is a browser-based video meeting platform that provides hosted meeting rooms with recording capabilities. Reflector integrates with Whereby's API to:
|
||||
|
||||
- Create secure meeting rooms with custom branding
|
||||
- Handle participant join/leave events via webhooks
|
||||
- Automatically record meetings to AWS S3 storage
|
||||
- Track meeting sessions and participant counts
|
||||
|
||||
## Requirements
|
||||
|
||||
### Whereby Account Setup
|
||||
|
||||
1. **Whereby Account**: Sign up for a Whereby business account at [whereby.com](https://whereby.com/business)
|
||||
2. **API Access**: Request API access from Whereby support (required for programmatic room creation)
|
||||
3. **Webhook Configuration**: Configure webhooks in your Whereby dashboard to point to your Reflector instance
|
||||
|
||||
### AWS S3 Storage
|
||||
|
||||
Whereby requires AWS S3 for recording storage. You need:
|
||||
- AWS account with S3 access
|
||||
- Dedicated S3 bucket for Whereby recordings
|
||||
- AWS IAM credentials with S3 write permissions
|
||||
|
||||
## Configuration Variables
|
||||
|
||||
Add the following environment variables to your Reflector `.env` file:
|
||||
|
||||
### Required Variables
|
||||
|
||||
```bash
|
||||
# Whereby API Configuration
|
||||
WHEREBY_API_KEY=your-whereby-jwt-api-key
|
||||
WHEREBY_WEBHOOK_SECRET=your-webhook-secret-from-whereby
|
||||
|
||||
# AWS S3 Storage for Recordings
|
||||
AWS_WHEREBY_ACCESS_KEY_ID=your-aws-access-key
|
||||
AWS_WHEREBY_ACCESS_KEY_SECRET=your-aws-secret-key
|
||||
RECORDING_STORAGE_AWS_BUCKET_NAME=your-s3-bucket-name
|
||||
```
|
||||
|
||||
### Optional Variables
|
||||
|
||||
```bash
|
||||
# Whereby API URL (defaults to production)
|
||||
WHEREBY_API_URL=https://api.whereby.dev/v1
|
||||
|
||||
# SQS Configuration (for recording processing)
|
||||
AWS_PROCESS_RECORDING_QUEUE_URL=https://sqs.region.amazonaws.com/account/queue
|
||||
SQS_POLLING_TIMEOUT_SECONDS=60
|
||||
```
|
||||
|
||||
## Configuration Steps
|
||||
|
||||
### 1. Whereby API Key Setup
|
||||
|
||||
1. **Contact Whereby Support** to request API access for your account
|
||||
2. **Generate JWT Token** in your Whereby dashboard under API settings
|
||||
3. **Copy the JWT token** and set it as `WHEREBY_API_KEY` in your environment
|
||||
|
||||
The API key is a JWT token that looks like:
|
||||
```
|
||||
eyJ[...truncated JWT token...]
|
||||
```
|
||||
|
||||
### 2. Webhook Configuration
|
||||
|
||||
1. **Access Whereby Dashboard** and navigate to webhook settings
|
||||
2. **Set Webhook URL** to your Reflector instance:
|
||||
```
|
||||
https://your-reflector-domain.com/v1/whereby
|
||||
```
|
||||
3. **Configure Events** to send the following event types:
|
||||
- `room.client.joined` - When participants join
|
||||
- `room.client.left` - When participants leave
|
||||
4. **Generate Webhook Secret** and set it as `WHEREBY_WEBHOOK_SECRET`
|
||||
5. **Save Configuration** in your Whereby dashboard
|
||||
|
||||
### 3. AWS S3 Storage Setup
|
||||
|
||||
1. **Create S3 Bucket** dedicated for Whereby recordings
|
||||
2. **Create IAM User** with programmatic access
|
||||
3. **Attach S3 Policy** with the following permissions:
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:PutObjectAcl",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": "arn:aws:s3:::your-bucket-name/*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
4. **Configure Environment Variables** with the IAM credentials
|
||||
|
||||
### 4. Room Configuration
|
||||
|
||||
When creating rooms in Reflector, set the platform to use Whereby:
|
||||
|
||||
```bash
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN" \
|
||||
-d '{
|
||||
"name": "my-whereby-room",
|
||||
"platform": "whereby",
|
||||
"recording_type": "cloud",
|
||||
"recording_trigger": "automatic-2nd-participant",
|
||||
"is_locked": false,
|
||||
"room_mode": "normal"
|
||||
}'
|
||||
```
|
||||
|
||||
## Meeting Features
|
||||
|
||||
### Recording Options
|
||||
|
||||
Whereby supports three recording types:
|
||||
- **`none`**: No recording
|
||||
- **`local`**: Local recording (not recommended for production)
|
||||
- **`cloud`**: Cloud recording to S3 (recommended)
|
||||
|
||||
### Recording Triggers
|
||||
|
||||
Control when recordings start:
|
||||
- **`none`**: No automatic recording
|
||||
- **`prompt`**: Prompt users to start recording
|
||||
- **`automatic`**: Start immediately when meeting begins
|
||||
- **`automatic-2nd-participant`**: Start when second participant joins
|
||||
|
||||
### Room Modes
|
||||
|
||||
- **`normal`**: Standard meeting room
|
||||
- **`group`**: Group meeting with advanced features
|
||||
|
||||
## Webhook Event Handling
|
||||
|
||||
Reflector automatically handles these Whereby webhook events:
|
||||
|
||||
### Participant Tracking
|
||||
```json
|
||||
{
|
||||
"type": "room.client.joined",
|
||||
"data": {
|
||||
"meetingId": "room-uuid",
|
||||
"numClients": 2
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Recording Events
|
||||
Whereby sends recording completion events that trigger Reflector's processing pipeline:
|
||||
- Audio transcription
|
||||
- Speaker diarization
|
||||
- Summary generation
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### API Authentication Errors
|
||||
**Symptoms**: 401 Unauthorized errors when creating meetings
|
||||
|
||||
**Solutions**:
|
||||
1. Verify your `WHEREBY_API_KEY` is correct and not expired
|
||||
2. Ensure you have API access enabled on your Whereby account
|
||||
3. Contact Whereby support if API access is not available
|
||||
|
||||
#### Webhook Signature Validation Failed
|
||||
**Symptoms**: Webhook events rejected with 401 errors
|
||||
|
||||
**Solutions**:
|
||||
1. Verify `WHEREBY_WEBHOOK_SECRET` matches your Whereby dashboard configuration
|
||||
2. Check webhook URL is correctly configured in Whereby dashboard
|
||||
3. Ensure webhook endpoint is accessible from Whereby servers
|
||||
|
||||
#### Recording Upload Failures
|
||||
**Symptoms**: Recordings not appearing in S3 bucket
|
||||
|
||||
**Solutions**:
|
||||
1. Verify AWS credentials have S3 write permissions
|
||||
2. Check S3 bucket name is correct and accessible
|
||||
3. Ensure AWS region settings match your bucket location
|
||||
4. Review AWS CloudTrail logs for permission issues
|
||||
|
||||
#### Participant Count Not Updating
|
||||
**Symptoms**: Meeting participant counts remain at 0
|
||||
|
||||
**Solutions**:
|
||||
1. Verify webhook events are being received at `/v1/whereby`
|
||||
2. Check webhook signature validation is passing
|
||||
3. Ensure meeting IDs match between Whereby and Reflector database
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Test Whereby API connectivity
|
||||
curl -H "Authorization: Bearer $WHEREBY_API_KEY" \
|
||||
https://api.whereby.dev/v1/meetings
|
||||
|
||||
# Check webhook endpoint health
|
||||
curl https://your-reflector-domain.com/v1/whereby/health
|
||||
|
||||
# Verify S3 bucket access
|
||||
aws s3 ls s3://your-bucket-name --profile whereby-user
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### API Key Security
|
||||
- Store API keys securely using environment variables
|
||||
- Rotate API keys regularly
|
||||
- Never commit API keys to version control
|
||||
- Use separate keys for development and production
|
||||
|
||||
### Webhook Security
|
||||
- Always validate webhook signatures using HMAC-SHA256
|
||||
- Use HTTPS for all webhook endpoints
|
||||
- Implement rate limiting on webhook endpoints
|
||||
- Monitor webhook events for suspicious activity
|
||||
|
||||
### Recording Privacy
|
||||
- Ensure S3 bucket access is restricted to authorized users
|
||||
- Consider encryption at rest for sensitive recordings
|
||||
- Implement retention policies for recorded content
|
||||
- Comply with data protection regulations (GDPR, etc.)
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Meeting Scaling
|
||||
- Monitor concurrent meeting limits on your Whereby plan
|
||||
- Implement meeting cleanup for expired sessions
|
||||
- Use appropriate room modes for different use cases
|
||||
|
||||
### Recording Processing
|
||||
- Configure SQS for asynchronous recording processing
|
||||
- Monitor S3 storage usage and costs
|
||||
- Implement automatic cleanup of processed recordings
|
||||
|
||||
### Webhook Reliability
|
||||
- Implement webhook retry mechanisms
|
||||
- Monitor webhook delivery success rates
|
||||
- Log webhook events for debugging and auditing
|
||||
|
||||
## Migration from Other Platforms
|
||||
|
||||
If migrating from another video platform:
|
||||
|
||||
1. **Update Room Configuration**: Change existing rooms to use `"platform": "whereby"`
|
||||
2. **Configure Webhooks**: Set up Whereby webhook endpoints
|
||||
3. **Test Integration**: Verify meeting creation and event handling
|
||||
4. **Monitor Performance**: Watch for any issues during transition
|
||||
5. **Update Documentation**: Inform users of any workflow changes
|
||||
|
||||
## Support
|
||||
|
||||
For Whereby-specific issues:
|
||||
- **Whereby Support**: [whereby.com/support](https://whereby.com/support)
|
||||
- **API Documentation**: [whereby.dev](https://whereby.dev)
|
||||
- **Status Page**: [status.whereby.com](https://status.whereby.com)
|
||||
|
||||
For Reflector integration issues:
|
||||
- Check application logs for error details
|
||||
- Verify environment variable configuration
|
||||
- Test webhook connectivity and authentication
|
||||
- Review AWS permissions and S3 access
|
||||
474
docs/video_platforms.md
Normal file
474
docs/video_platforms.md
Normal file
@@ -0,0 +1,474 @@
|
||||
# Video Platforms Architecture (PR #529 Analysis)
|
||||
|
||||
This document analyzes the video platforms refactoring implemented in PR #529 for daily.co integration, providing a blueprint for extending support to Jitsi and other video conferencing platforms.
|
||||
|
||||
## Overview
|
||||
|
||||
The video platforms refactoring introduces a clean abstraction layer that allows Reflector to support multiple video conferencing providers (Whereby, Daily.co, etc.) without changing core application logic. This architecture enables:
|
||||
|
||||
- Seamless switching between video platforms
|
||||
- Platform-specific feature support
|
||||
- Isolated platform code organization
|
||||
- Consistent API surface across platforms
|
||||
- Feature flags for gradual migration
|
||||
|
||||
## Architecture Components
|
||||
|
||||
### 1. **Directory Structure**
|
||||
|
||||
```
|
||||
server/reflector/video_platforms/
|
||||
├── __init__.py # Public API exports
|
||||
├── base.py # Abstract base classes
|
||||
├── factory.py # Platform client factory
|
||||
├── registry.py # Platform registration system
|
||||
├── whereby.py # Whereby implementation
|
||||
├── daily.py # Daily.co implementation
|
||||
└── mock.py # Testing implementation
|
||||
```
|
||||
|
||||
### 2. **Core Abstract Classes**
|
||||
|
||||
#### `VideoPlatformClient` (base.py)
|
||||
Abstract base class defining the interface all platforms must implement:
|
||||
|
||||
```python
|
||||
class VideoPlatformClient(ABC):
|
||||
PLATFORM_NAME: str = ""
|
||||
|
||||
@abstractmethod
|
||||
async def create_meeting(self, room_name_prefix: str, end_date: datetime, room: Room) -> MeetingData
|
||||
|
||||
@abstractmethod
|
||||
async def get_room_sessions(self, room_name: str) -> Dict[str, Any]
|
||||
|
||||
@abstractmethod
|
||||
async def delete_room(self, room_name: str) -> bool
|
||||
|
||||
@abstractmethod
|
||||
async def upload_logo(self, room_name: str, logo_path: str) -> bool
|
||||
|
||||
@abstractmethod
|
||||
def verify_webhook_signature(self, body: bytes, signature: str, timestamp: Optional[str] = None) -> bool
|
||||
```
|
||||
|
||||
#### `MeetingData` (base.py)
|
||||
Standardized meeting data structure returned by all platforms:
|
||||
|
||||
```python
|
||||
class MeetingData(BaseModel):
|
||||
meeting_id: str
|
||||
room_name: str
|
||||
room_url: str
|
||||
host_room_url: str
|
||||
platform: str
|
||||
extra_data: Dict[str, Any] = {} # Platform-specific data
|
||||
```
|
||||
|
||||
#### `VideoPlatformConfig` (base.py)
|
||||
Unified configuration structure for all platforms:
|
||||
|
||||
```python
|
||||
class VideoPlatformConfig(BaseModel):
|
||||
api_key: str
|
||||
webhook_secret: str
|
||||
api_url: Optional[str] = None
|
||||
subdomain: Optional[str] = None
|
||||
s3_bucket: Optional[str] = None
|
||||
s3_region: Optional[str] = None
|
||||
aws_role_arn: Optional[str] = None
|
||||
aws_access_key_id: Optional[str] = None
|
||||
aws_access_key_secret: Optional[str] = None
|
||||
```
|
||||
|
||||
### 3. **Platform Registration System**
|
||||
|
||||
#### Registry Pattern (registry.py)
|
||||
- Automatic registration of built-in platforms
|
||||
- Runtime platform discovery
|
||||
- Type-safe client instantiation
|
||||
|
||||
```python
|
||||
# Auto-registration of platforms
|
||||
_PLATFORMS: Dict[str, Type[VideoPlatformClient]] = {}
|
||||
|
||||
def register_platform(name: str, client_class: Type[VideoPlatformClient])
|
||||
def get_platform_client(platform: str, config: VideoPlatformConfig) -> VideoPlatformClient
|
||||
```
|
||||
|
||||
#### Factory System (factory.py)
|
||||
- Configuration management per platform
|
||||
- Platform selection logic
|
||||
- Feature flag integration
|
||||
|
||||
```python
|
||||
def get_platform_for_room(room_id: Optional[str] = None) -> str:
|
||||
"""Determine which platform to use based on feature flags."""
|
||||
if not settings.DAILY_MIGRATION_ENABLED:
|
||||
return "whereby"
|
||||
|
||||
if room_id and room_id in settings.DAILY_MIGRATION_ROOM_IDS:
|
||||
return "daily"
|
||||
|
||||
return settings.DEFAULT_VIDEO_PLATFORM
|
||||
```
|
||||
|
||||
### 4. **Database Schema Changes**
|
||||
|
||||
#### Room Model Updates
|
||||
Added `platform` field to track which video platform each room uses:
|
||||
|
||||
```python
|
||||
# Database Schema
|
||||
platform_column = sqlalchemy.Column(
|
||||
"platform",
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
server_default="whereby"
|
||||
)
|
||||
|
||||
# Pydantic Model
|
||||
class Room(BaseModel):
|
||||
platform: Literal["whereby", "daily"] = "whereby"
|
||||
```
|
||||
|
||||
#### Meeting Model Updates
|
||||
Added `platform` field to meetings for tracking and debugging:
|
||||
|
||||
```python
|
||||
# Database Schema
|
||||
platform_column = sqlalchemy.Column(
|
||||
"platform",
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
server_default="whereby"
|
||||
)
|
||||
|
||||
# Pydantic Model
|
||||
class Meeting(BaseModel):
|
||||
platform: Literal["whereby", "daily"] = "whereby"
|
||||
```
|
||||
|
||||
**Key Decision**: No platform-specific fields were added to models. Instead, the `extra_data` field in `MeetingData` handles platform-specific information, following the user's rule of using generic `provider_data` as JSON if needed.
|
||||
|
||||
### 5. **Settings Configuration**
|
||||
|
||||
#### Feature Flags
|
||||
```python
|
||||
# Migration control
|
||||
DAILY_MIGRATION_ENABLED: bool = True
|
||||
DAILY_MIGRATION_ROOM_IDS: list[str] = []
|
||||
DEFAULT_VIDEO_PLATFORM: str = "daily"
|
||||
|
||||
# Daily.co specific settings
|
||||
DAILY_API_KEY: str | None = None
|
||||
DAILY_WEBHOOK_SECRET: str | None = None
|
||||
DAILY_SUBDOMAIN: str | None = None
|
||||
AWS_DAILY_S3_BUCKET: str | None = None
|
||||
AWS_DAILY_S3_REGION: str = "us-west-2"
|
||||
AWS_DAILY_ROLE_ARN: str | None = None
|
||||
```
|
||||
|
||||
#### Configuration Pattern
|
||||
Each platform gets its own configuration namespace while sharing common patterns:
|
||||
|
||||
```python
|
||||
def get_platform_config(platform: str) -> VideoPlatformConfig:
|
||||
if platform == "whereby":
|
||||
return VideoPlatformConfig(
|
||||
api_key=settings.WHEREBY_API_KEY or "",
|
||||
webhook_secret=settings.WHEREBY_WEBHOOK_SECRET or "",
|
||||
# ... whereby-specific config
|
||||
)
|
||||
elif platform == "daily":
|
||||
return VideoPlatformConfig(
|
||||
api_key=settings.DAILY_API_KEY or "",
|
||||
webhook_secret=settings.DAILY_WEBHOOK_SECRET or "",
|
||||
# ... daily-specific config
|
||||
)
|
||||
```
|
||||
|
||||
### 6. **API Integration Updates**
|
||||
|
||||
#### Room Creation (views/rooms.py)
|
||||
Updated to use platform factory instead of direct Whereby calls:
|
||||
|
||||
```python
|
||||
@router.post("/rooms/{room_name}/meeting")
|
||||
async def rooms_create_meeting(room_name: str, user: UserInfo):
|
||||
# OLD: Direct Whereby integration
|
||||
# whereby_meeting = await create_meeting("", end_date=end_date, room=room)
|
||||
|
||||
# NEW: Platform abstraction
|
||||
platform = get_platform_for_room(room.id)
|
||||
client = create_platform_client(platform)
|
||||
|
||||
meeting_data = await client.create_meeting(
|
||||
room_name_prefix=room.name, end_date=end_date, room=room
|
||||
)
|
||||
|
||||
await client.upload_logo(meeting_data.room_name, "./images/logo.png")
|
||||
```
|
||||
|
||||
### 7. **Webhook Handling**
|
||||
|
||||
#### Separate Webhook Endpoints
|
||||
Each platform gets its own webhook endpoint with platform-specific signature verification:
|
||||
|
||||
```python
|
||||
# views/daily.py
|
||||
@router.post("/daily_webhook")
|
||||
async def daily_webhook(event: DailyWebhookEvent, request: Request):
|
||||
# Verify Daily.co signature
|
||||
body = await request.body()
|
||||
signature = request.headers.get("X-Daily-Signature", "")
|
||||
|
||||
if not verify_daily_webhook_signature(body, signature):
|
||||
raise HTTPException(status_code=401)
|
||||
|
||||
# Handle platform-specific events
|
||||
if event.type == "participant.joined":
|
||||
await _handle_participant_joined(event)
|
||||
```
|
||||
|
||||
#### Consistent Event Handling
|
||||
Despite different event formats, the core business logic remains the same:
|
||||
|
||||
```python
|
||||
async def _handle_participant_joined(event):
|
||||
room_name = event.data.get("room", {}).get("name") # Daily.co format
|
||||
meeting = await meetings_controller.get_by_room_name(room_name)
|
||||
if meeting:
|
||||
current_count = getattr(meeting, "num_clients", 0)
|
||||
await meetings_controller.update_meeting(
|
||||
meeting.id, num_clients=current_count + 1
|
||||
)
|
||||
```
|
||||
|
||||
### 8. **Worker Task Integration**
|
||||
|
||||
#### New Task for Daily.co Recording Processing
|
||||
Added platform-specific recording processing while maintaining the same pipeline:
|
||||
|
||||
```python
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def process_recording_from_url(recording_url: str, meeting_id: str, recording_id: str):
|
||||
"""Process recording from Direct URL (Daily.co webhook)."""
|
||||
logger.info("Processing recording from URL for meeting: %s", meeting_id)
|
||||
# Uses same processing pipeline as Whereby S3 recordings
|
||||
```
|
||||
|
||||
**Key Decision**: Worker tasks remain in main worker module but could be moved to platform-specific folders as suggested by the user.
|
||||
|
||||
### 9. **Testing Infrastructure**
|
||||
|
||||
#### Comprehensive Test Suite
|
||||
- Unit tests for each platform client
|
||||
- Integration tests for platform switching
|
||||
- Mock platform for testing without external dependencies
|
||||
- Webhook signature verification tests
|
||||
|
||||
```python
|
||||
class TestPlatformIntegration:
|
||||
"""Integration tests for platform switching."""
|
||||
|
||||
async def test_platform_switching_preserves_interface(self):
|
||||
"""Test that different platforms provide consistent interface."""
|
||||
# Test both Mock and Daily platforms return MeetingData objects
|
||||
# with consistent fields
|
||||
```
|
||||
|
||||
## Implementation Patterns for Jitsi Integration
|
||||
|
||||
Based on the daily.co implementation, here's how Jitsi should be integrated:
|
||||
|
||||
### 1. **Jitsi Client Implementation**
|
||||
|
||||
```python
|
||||
# video_platforms/jitsi.py
|
||||
class JitsiClient(VideoPlatformClient):
|
||||
PLATFORM_NAME = "jitsi"
|
||||
|
||||
async def create_meeting(self, room_name_prefix: str, end_date: datetime, room: Room) -> MeetingData:
|
||||
# Generate unique room name
|
||||
jitsi_room = f"reflector-{room.name}-{int(time.time())}"
|
||||
|
||||
# Generate JWT tokens
|
||||
user_jwt = self._generate_jwt(room=jitsi_room, moderator=False, exp=end_date)
|
||||
host_jwt = self._generate_jwt(room=jitsi_room, moderator=True, exp=end_date)
|
||||
|
||||
return MeetingData(
|
||||
meeting_id=generate_uuid4(),
|
||||
room_name=jitsi_room,
|
||||
room_url=f"https://jitsi.domain/{jitsi_room}?jwt={user_jwt}",
|
||||
host_room_url=f"https://jitsi.domain/{jitsi_room}?jwt={host_jwt}",
|
||||
platform=self.PLATFORM_NAME,
|
||||
extra_data={"user_jwt": user_jwt, "host_jwt": host_jwt}
|
||||
)
|
||||
```
|
||||
|
||||
### 2. **Settings Integration**
|
||||
|
||||
```python
|
||||
# settings.py
|
||||
JITSI_DOMAIN: str = "meet.jit.si"
|
||||
JITSI_JWT_SECRET: str | None = None
|
||||
JITSI_WEBHOOK_SECRET: str | None = None
|
||||
JITSI_API_URL: str | None = None # If using Jitsi API
|
||||
```
|
||||
|
||||
### 3. **Factory Registration**
|
||||
|
||||
```python
|
||||
# registry.py
|
||||
def _register_builtin_platforms():
|
||||
from .jitsi import JitsiClient
|
||||
register_platform("jitsi", JitsiClient)
|
||||
|
||||
# factory.py
|
||||
def get_platform_config(platform: str) -> VideoPlatformConfig:
|
||||
elif platform == "jitsi":
|
||||
return VideoPlatformConfig(
|
||||
api_key="", # Jitsi may not need API key
|
||||
webhook_secret=settings.JITSI_WEBHOOK_SECRET or "",
|
||||
api_url=settings.JITSI_API_URL,
|
||||
)
|
||||
```
|
||||
|
||||
### 4. **Webhook Integration**
|
||||
|
||||
```python
|
||||
# views/jitsi.py
|
||||
@router.post("/jitsi/events")
|
||||
async def jitsi_events_webhook(event_data: dict):
|
||||
# Handle Prosody event-sync webhook format
|
||||
event_type = event_data.get("event")
|
||||
room_name = event_data.get("room", "").split("@")[0]
|
||||
|
||||
if event_type == "muc-occupant-joined":
|
||||
# Same participant handling logic as other platforms
|
||||
```
|
||||
|
||||
## Key Benefits of This Architecture
|
||||
|
||||
### 1. **Isolation and Organization**
|
||||
- Platform-specific code contained in separate modules
|
||||
- No platform logic leaking into core application
|
||||
- Easy to add/remove platforms without affecting others
|
||||
|
||||
### 2. **Consistent Interface**
|
||||
- All platforms implement the same abstract methods
|
||||
- Standardized `MeetingData` structure
|
||||
- Uniform error handling and logging
|
||||
|
||||
### 3. **Gradual Migration Support**
|
||||
- Feature flags for controlled rollouts
|
||||
- Room-specific platform selection
|
||||
- Fallback mechanisms for platform failures
|
||||
|
||||
### 4. **Configuration Management**
|
||||
- Centralized settings per platform
|
||||
- Consistent naming patterns
|
||||
- Environment-based configuration
|
||||
|
||||
### 5. **Testing and Quality**
|
||||
- Mock platform for testing
|
||||
- Comprehensive test coverage
|
||||
- Platform-specific test utilities
|
||||
|
||||
## Migration Strategy Applied
|
||||
|
||||
The daily.co implementation demonstrates a careful migration approach:
|
||||
|
||||
### 1. **Backward Compatibility**
|
||||
- Default platform remains "whereby"
|
||||
- Existing rooms continue using Whereby unless explicitly migrated
|
||||
- Same API endpoints and response formats
|
||||
|
||||
### 2. **Feature Flag Control**
|
||||
```python
|
||||
# Gradual rollout control
|
||||
DAILY_MIGRATION_ENABLED: bool = True
|
||||
DAILY_MIGRATION_ROOM_IDS: list[str] = [] # Specific rooms to migrate
|
||||
DEFAULT_VIDEO_PLATFORM: str = "daily" # New rooms default
|
||||
```
|
||||
|
||||
### 3. **Data Integrity**
|
||||
- Platform field tracks which service each room/meeting uses
|
||||
- No data loss during migration
|
||||
- Platform-specific data preserved in `extra_data`
|
||||
|
||||
### 4. **Monitoring and Rollback**
|
||||
- Comprehensive logging of platform selection
|
||||
- Easy rollback by changing feature flags
|
||||
- Platform-specific error tracking
|
||||
|
||||
## Recommendations for Jitsi Integration
|
||||
|
||||
Based on this analysis and the user's requirements:
|
||||
|
||||
### 1. **Follow the Pattern**
|
||||
- Create `video_platforms/jitsi/` directory with:
|
||||
- `client.py` - Main JitsiClient implementation
|
||||
- `tasks.py` - Jitsi-specific worker tasks
|
||||
- `__init__.py` - Module exports
|
||||
|
||||
### 2. **Settings Organization**
|
||||
- Use `JITSI_*` prefix for all Jitsi settings
|
||||
- Follow the same configuration pattern as Daily.co
|
||||
- Support both environment variables and config files
|
||||
|
||||
### 3. **Generic Database Fields**
|
||||
- Avoid platform-specific columns in database
|
||||
- Use `provider_data` JSON field if platform-specific data needed
|
||||
- Keep `platform` field as simple string identifier
|
||||
|
||||
### 4. **Worker Task Migration**
|
||||
According to user requirements, migrate platform-specific tasks:
|
||||
```
|
||||
video_platforms/
|
||||
├── whereby/
|
||||
│ ├── client.py (moved from whereby.py)
|
||||
│ └── tasks.py (moved from worker/whereby_tasks.py)
|
||||
├── daily/
|
||||
│ ├── client.py (moved from daily.py)
|
||||
│ └── tasks.py (moved from worker/daily_tasks.py)
|
||||
└── jitsi/
|
||||
├── client.py (new JitsiClient)
|
||||
└── tasks.py (new Jitsi recording tasks)
|
||||
```
|
||||
|
||||
### 5. **Webhook Architecture**
|
||||
- Create `views/jitsi.py` for Jitsi-specific webhooks
|
||||
- Follow the same signature verification pattern
|
||||
- Reuse existing participant tracking logic
|
||||
|
||||
## Implementation Checklist for Jitsi
|
||||
|
||||
- [ ] Create `video_platforms/jitsi/` directory structure
|
||||
- [ ] Implement `JitsiClient` following the abstract interface
|
||||
- [ ] Add Jitsi settings to configuration
|
||||
- [ ] Register Jitsi platform in factory/registry
|
||||
- [ ] Create Jitsi webhook endpoint
|
||||
- [ ] Implement JWT token generation for room access
|
||||
- [ ] Add Jitsi recording processing tasks
|
||||
- [ ] Create comprehensive test suite
|
||||
- [ ] Update database migrations for platform field
|
||||
- [ ] Document Jitsi-specific configuration
|
||||
|
||||
## Conclusion
|
||||
|
||||
The video platforms refactoring in PR #529 provides an excellent foundation for adding Jitsi support. The architecture is well-designed with clear separation of concerns, consistent interfaces, and excellent extensibility. The daily.co implementation demonstrates how to add a new platform while maintaining backward compatibility and providing gradual migration capabilities.
|
||||
|
||||
The pattern should be directly applicable to Jitsi integration, with the main differences being:
|
||||
- JWT-based authentication instead of API keys
|
||||
- Different webhook event formats
|
||||
- Jibri recording pipeline integration
|
||||
- Self-hosted deployment considerations
|
||||
|
||||
This architecture successfully achieves the user's goals of:
|
||||
1. Settings-based configuration
|
||||
2. Generic database fields (no provider-specific columns)
|
||||
3. Platform isolation in separate directories
|
||||
4. Worker task organization within platform folders
|
||||
@@ -1,21 +0,0 @@
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=***REMOVED***
|
||||
|
||||
LLM_BACKEND=modal
|
||||
LLM_URL=https://monadical-sas--reflector-llm-web.modal.run
|
||||
LLM_MODAL_API_KEY=***REMOVED***
|
||||
|
||||
AUTH_BACKEND=fief
|
||||
AUTH_FIEF_URL=https://auth.reflector.media/reflector-local
|
||||
AUTH_FIEF_CLIENT_ID=***REMOVED***
|
||||
AUTH_FIEF_CLIENT_SECRET=<ask in zulip> <-----------------------------------------------------------------------------------------
|
||||
|
||||
TRANSLATE_URL=https://monadical-sas--reflector-translator-web.modal.run
|
||||
ZEPHYR_LLM_URL=https://monadical-sas--reflector-llm-zephyr-web.modal.run
|
||||
DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
|
||||
|
||||
BASE_URL=https://xxxxx.ngrok.app
|
||||
DIARIZATION_ENABLED=false
|
||||
|
||||
SQS_POLLING_TIMEOUT_SECONDS=60
|
||||
4
server/.gitignore
vendored
4
server/.gitignore
vendored
@@ -176,7 +176,9 @@ artefacts/
|
||||
audio_*.wav
|
||||
|
||||
# ignore local database
|
||||
reflector.sqlite3
|
||||
*.sqlite3
|
||||
*.db
|
||||
data/
|
||||
|
||||
dump.rdb
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
UV_LINK_MODE=copy
|
||||
UV_LINK_MODE=copy \
|
||||
UV_NO_CACHE=1
|
||||
|
||||
# builder install base dependencies
|
||||
WORKDIR /tmp
|
||||
@@ -13,8 +14,8 @@ ENV PATH="/root/.local/bin/:$PATH"
|
||||
# install application dependencies
|
||||
RUN mkdir -p /app
|
||||
WORKDIR /app
|
||||
COPY pyproject.toml uv.lock /app/
|
||||
RUN touch README.md && env uv sync --compile-bytecode --locked
|
||||
COPY pyproject.toml uv.lock README.md /app/
|
||||
RUN uv sync --compile-bytecode --locked
|
||||
|
||||
# pre-download nltk packages
|
||||
RUN uv run python -c "import nltk; nltk.download('punkt_tab'); nltk.download('averaged_perceptron_tagger_eng')"
|
||||
@@ -26,4 +27,15 @@ COPY migrations /app/migrations
|
||||
COPY reflector /app/reflector
|
||||
WORKDIR /app
|
||||
|
||||
# Create symlink for libgomp if it doesn't exist (for ARM64 compatibility)
|
||||
RUN if [ "$(uname -m)" = "aarch64" ] && [ ! -f /usr/lib/libgomp.so.1 ]; then \
|
||||
LIBGOMP_PATH=$(find /app/.venv/lib -path "*/torch.libs/libgomp*.so.*" 2>/dev/null | head -n1); \
|
||||
if [ -n "$LIBGOMP_PATH" ]; then \
|
||||
ln -sf "$LIBGOMP_PATH" /usr/lib/libgomp.so.1; \
|
||||
fi \
|
||||
fi
|
||||
|
||||
# Pre-check just to make sure the image will not fail
|
||||
RUN uv run python -c "import silero_vad.model"
|
||||
|
||||
CMD ["./runserver.sh"]
|
||||
|
||||
@@ -20,3 +20,25 @@ Polls SQS every 60 seconds via /server/reflector/worker/process.py:24-62:
|
||||
# Every 60 seconds, check for new recordings
|
||||
sqs = boto3.client("sqs", ...)
|
||||
response = sqs.receive_message(QueueUrl=queue_url, ...)
|
||||
|
||||
# Requeue
|
||||
|
||||
```bash
|
||||
uv run /app/requeue_uploaded_file.py TRANSCRIPT_ID
|
||||
```
|
||||
|
||||
## Pipeline Management
|
||||
|
||||
### Continue stuck pipeline from final summaries (identify_participants) step:
|
||||
|
||||
```bash
|
||||
uv run python -c "from reflector.pipelines.main_live_pipeline import task_pipeline_final_summaries; result = task_pipeline_final_summaries.delay(transcript_id='TRANSCRIPT_ID'); print(f'Task queued: {result.id}')"
|
||||
```
|
||||
|
||||
### Run full post-processing pipeline (continues to completion):
|
||||
|
||||
```bash
|
||||
uv run python -c "from reflector.pipelines.main_live_pipeline import pipeline_post; pipeline_post(transcript_id='TRANSCRIPT_ID')"
|
||||
```
|
||||
|
||||
.
|
||||
|
||||
212
server/contrib/jitsi/README.md
Normal file
212
server/contrib/jitsi/README.md
Normal file
@@ -0,0 +1,212 @@
|
||||
# Event Logger for Docker-Jitsi-Meet
|
||||
|
||||
A Prosody module that logs Jitsi meeting events to JSONL files alongside recordings, enabling complete participant tracking and speaker statistics.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Running docker-jitsi-meet installation
|
||||
- Jibri configured for recording
|
||||
|
||||
## Installation
|
||||
|
||||
### Step 1: Copy the Module
|
||||
|
||||
Copy the Prosody module to your custom plugins directory:
|
||||
|
||||
```bash
|
||||
# Create the directory if it doesn't exist
|
||||
mkdir -p ~/.jitsi-meet-cfg/prosody/prosody-plugins-custom
|
||||
|
||||
# Copy the module
|
||||
cp mod_event_logger.lua ~/.jitsi-meet-cfg/prosody/prosody-plugins-custom/
|
||||
```
|
||||
|
||||
### Step 2: Update Your .env File
|
||||
|
||||
Add or modify these variables in your `.env` file:
|
||||
|
||||
```bash
|
||||
# If XMPP_MUC_MODULES already exists, append event_logger
|
||||
# Example: XMPP_MUC_MODULES=existing_module,event_logger
|
||||
XMPP_MUC_MODULES=event_logger
|
||||
|
||||
# Optional: Configure the module (these are defaults)
|
||||
JIBRI_RECORDINGS_PATH=/config/recordings
|
||||
JIBRI_LOG_SPEAKER_STATS=true
|
||||
JIBRI_SPEAKER_STATS_INTERVAL=10
|
||||
```
|
||||
|
||||
**Important**: If you already have `XMPP_MUC_MODULES` defined, add `event_logger` to the comma-separated list:
|
||||
```bash
|
||||
# Existing modules + our module
|
||||
XMPP_MUC_MODULES=mod_info,mod_alert,event_logger
|
||||
```
|
||||
|
||||
### Step 3: Modify docker-compose.yml
|
||||
|
||||
Add a shared recordings volume so Prosody can write events alongside Jibri recordings:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
prosody:
|
||||
# ... existing configuration ...
|
||||
volumes:
|
||||
- ${CONFIG}/prosody/config:/config:Z
|
||||
- ${CONFIG}/prosody/prosody-plugins-custom:/prosody-plugins-custom:Z
|
||||
- ${CONFIG}/recordings:/config/recordings:Z # Add this line
|
||||
environment:
|
||||
# Add if not using .env file
|
||||
- XMPP_MUC_MODULES=${XMPP_MUC_MODULES:-event_logger}
|
||||
- JIBRI_RECORDINGS_PATH=/config/recordings
|
||||
|
||||
jibri:
|
||||
# ... existing configuration ...
|
||||
volumes:
|
||||
- ${CONFIG}/jibri:/config:Z
|
||||
- ${CONFIG}/recordings:/config/recordings:Z # Add this line
|
||||
environment:
|
||||
# For Reflector webhook integration (optional)
|
||||
- REFLECTOR_WEBHOOK_URL=${REFLECTOR_WEBHOOK_URL:-}
|
||||
- JIBRI_FINALIZE_RECORDING_SCRIPT_PATH=/config/finalize.sh
|
||||
```
|
||||
|
||||
### Step 4: Add Finalize Script (Optional - For Reflector Integration)
|
||||
|
||||
If you want to notify Reflector when recordings complete:
|
||||
|
||||
```bash
|
||||
# Copy the finalize script
|
||||
cp finalize.sh ~/.jitsi-meet-cfg/jibri/finalize.sh
|
||||
chmod +x ~/.jitsi-meet-cfg/jibri/finalize.sh
|
||||
|
||||
# Add to .env
|
||||
REFLECTOR_WEBHOOK_URL=http://your-reflector-api:8000
|
||||
```
|
||||
|
||||
### Step 5: Restart Services
|
||||
|
||||
```bash
|
||||
docker-compose down
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
## What Gets Created
|
||||
|
||||
After a recording, you'll find in `~/.jitsi-meet-cfg/recordings/{session-id}/`:
|
||||
- `recording.mp4` - The video recording (created by Jibri)
|
||||
- `metadata.json` - Basic metadata (created by Jibri)
|
||||
- `events.jsonl` - Complete participant timeline (created by this module)
|
||||
|
||||
## Event Format
|
||||
|
||||
Each line in `events.jsonl` is a JSON object:
|
||||
|
||||
```json
|
||||
{"type":"room_created","timestamp":1234567890,"room_name":"TestRoom","room_jid":"testroom@conference.meet.jitsi","meeting_url":"https://meet.jitsi/TestRoom"}
|
||||
{"type":"recording_started","timestamp":1234567891,"room_name":"TestRoom","session_id":"20240115120000_TestRoom","jibri_jid":"jibri@recorder.meet.jitsi"}
|
||||
{"type":"participant_joined","timestamp":1234567892,"room_name":"TestRoom","participant":{"jid":"user1@meet.jitsi/web","nick":"John Doe","id":"user1@meet.jitsi","is_moderator":false}}
|
||||
{"type":"speaker_active","timestamp":1234567895,"room_name":"TestRoom","speaker_jid":"user1@meet.jitsi","speaker_nick":"John Doe","duration":10}
|
||||
{"type":"participant_left","timestamp":1234567920,"room_name":"TestRoom","participant":{"jid":"user1@meet.jitsi/web","nick":"John Doe","duration_seconds":28}}
|
||||
{"type":"recording_stopped","timestamp":1234567950,"room_name":"TestRoom","session_id":"20240115120000_TestRoom","meeting_url":"https://meet.jitsi/TestRoom"}
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
All configuration can be done via environment variables:
|
||||
|
||||
| Environment Variable | Default | Description |
|
||||
|---------------------|---------|-------------|
|
||||
| `JIBRI_RECORDINGS_PATH` | `/config/recordings` | Path where recordings are stored |
|
||||
| `JIBRI_LOG_SPEAKER_STATS` | `true` | Enable speaker statistics logging |
|
||||
| `JIBRI_SPEAKER_STATS_INTERVAL` | `10` | Seconds between speaker stats updates |
|
||||
|
||||
## Verifying Installation
|
||||
|
||||
Check that the module is loaded:
|
||||
```bash
|
||||
docker-compose logs prosody | grep "Event Logger"
|
||||
# Should see: "Event Logger loaded - writing to /config/recordings"
|
||||
```
|
||||
|
||||
Check for events after a recording:
|
||||
```bash
|
||||
ls -la ~/.jitsi-meet-cfg/recordings/*/events.jsonl
|
||||
cat ~/.jitsi-meet-cfg/recordings/*/events.jsonl | jq .
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No events.jsonl file created
|
||||
|
||||
1. **Check module is enabled**:
|
||||
```bash
|
||||
docker-compose exec prosody grep -r "event_logger" /config
|
||||
```
|
||||
|
||||
2. **Verify volume permissions**:
|
||||
```bash
|
||||
docker-compose exec prosody ls -la /config/recordings
|
||||
```
|
||||
|
||||
3. **Check Prosody logs for errors**:
|
||||
```bash
|
||||
docker-compose logs prosody | grep -i error
|
||||
```
|
||||
|
||||
### Module not loading
|
||||
|
||||
1. **Verify file exists in container**:
|
||||
```bash
|
||||
docker-compose exec prosody ls -la /prosody-plugins-custom/
|
||||
```
|
||||
|
||||
2. **Check XMPP_MUC_MODULES format** (must be comma-separated, no spaces):
|
||||
- ✅ Correct: `XMPP_MUC_MODULES=mod1,mod2,event_logger`
|
||||
- ❌ Wrong: `XMPP_MUC_MODULES=mod1, mod2, event_logger`
|
||||
|
||||
## Common docker-compose.yml Patterns
|
||||
|
||||
### Minimal Addition (if you trust defaults)
|
||||
```yaml
|
||||
services:
|
||||
prosody:
|
||||
volumes:
|
||||
- ${CONFIG}/recordings:/config/recordings:Z # Just add this
|
||||
```
|
||||
|
||||
### Full Configuration
|
||||
```yaml
|
||||
services:
|
||||
prosody:
|
||||
volumes:
|
||||
- ${CONFIG}/prosody/config:/config:Z
|
||||
- ${CONFIG}/prosody/prosody-plugins-custom:/prosody-plugins-custom:Z
|
||||
- ${CONFIG}/recordings:/config/recordings:Z
|
||||
environment:
|
||||
- XMPP_MUC_MODULES=event_logger
|
||||
- JIBRI_RECORDINGS_PATH=/config/recordings
|
||||
- JIBRI_LOG_SPEAKER_STATS=true
|
||||
- JIBRI_SPEAKER_STATS_INTERVAL=10
|
||||
|
||||
jibri:
|
||||
volumes:
|
||||
- ${CONFIG}/jibri:/config:Z
|
||||
- ${CONFIG}/recordings:/config/recordings:Z
|
||||
environment:
|
||||
- JIBRI_RECORDING_DIR=/config/recordings
|
||||
- JIBRI_FINALIZE_RECORDING_SCRIPT_PATH=/config/finalize.sh
|
||||
```
|
||||
|
||||
## Integration with Reflector
|
||||
|
||||
The finalize.sh script will automatically notify Reflector when a recording completes if `REFLECTOR_WEBHOOK_URL` is set. Reflector will receive:
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "20240115120000_TestRoom",
|
||||
"path": "20240115120000_TestRoom",
|
||||
"meeting_url": "https://meet.jitsi/TestRoom"
|
||||
}
|
||||
```
|
||||
|
||||
Reflector then processes the recording along with the complete participant timeline from `events.jsonl`.
|
||||
49
server/contrib/jitsi/finalize.sh
Executable file
49
server/contrib/jitsi/finalize.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
# Jibri finalize script to notify Reflector when recording is complete
|
||||
# This script is called by Jibri with the recording directory as argument
|
||||
|
||||
RECORDING_PATH="$1"
|
||||
SESSION_ID=$(basename "$RECORDING_PATH")
|
||||
METADATA_FILE="$RECORDING_PATH/metadata.json"
|
||||
|
||||
# Extract meeting URL from Jibri's metadata
|
||||
MEETING_URL=""
|
||||
if [ -f "$METADATA_FILE" ]; then
|
||||
MEETING_URL=$(jq -r '.meeting_url' "$METADATA_FILE" 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
echo "[$(date)] Recording finalized: $RECORDING_PATH"
|
||||
echo "[$(date)] Session ID: $SESSION_ID"
|
||||
echo "[$(date)] Meeting URL: $MEETING_URL"
|
||||
|
||||
# Check if events.jsonl was created by our Prosody module
|
||||
if [ -f "$RECORDING_PATH/events.jsonl" ]; then
|
||||
EVENT_COUNT=$(wc -l < "$RECORDING_PATH/events.jsonl")
|
||||
echo "[$(date)] Found events.jsonl with $EVENT_COUNT events"
|
||||
else
|
||||
echo "[$(date)] Warning: No events.jsonl found"
|
||||
fi
|
||||
|
||||
# Notify Reflector if webhook URL is configured
|
||||
if [ -n "$REFLECTOR_WEBHOOK_URL" ]; then
|
||||
echo "[$(date)] Notifying Reflector at: $REFLECTOR_WEBHOOK_URL"
|
||||
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$REFLECTOR_WEBHOOK_URL/api/v1/jibri/recording-ready" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"session_id\":\"$SESSION_ID\",\"path\":\"$SESSION_ID\",\"meeting_url\":\"$MEETING_URL\"}")
|
||||
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
BODY=$(echo "$RESPONSE" | sed '$d')
|
||||
|
||||
if [ "$HTTP_CODE" = "200" ]; then
|
||||
echo "[$(date)] Reflector notified successfully"
|
||||
echo "[$(date)] Response: $BODY"
|
||||
else
|
||||
echo "[$(date)] Failed to notify Reflector. HTTP code: $HTTP_CODE"
|
||||
echo "[$(date)] Response: $BODY"
|
||||
fi
|
||||
else
|
||||
echo "[$(date)] No REFLECTOR_WEBHOOK_URL configured, skipping notification"
|
||||
fi
|
||||
|
||||
echo "[$(date)] Finalize script completed"
|
||||
372
server/contrib/jitsi/mod_event_logger.lua
Normal file
372
server/contrib/jitsi/mod_event_logger.lua
Normal file
@@ -0,0 +1,372 @@
|
||||
local json = require "util.json"
|
||||
local st = require "util.stanza"
|
||||
local jid_bare = require "util.jid".bare
|
||||
|
||||
local recordings_path = os.getenv("JIBRI_RECORDINGS_PATH") or
|
||||
module:get_option_string("jibri_recordings_path", "/recordings")
|
||||
|
||||
-- room_jid -> { session_id, participants = {jid -> info} }
|
||||
local active_recordings = {}
|
||||
-- room_jid -> { participants = {jid -> info}, created_at }
|
||||
local room_states = {}
|
||||
|
||||
local function get_timestamp()
|
||||
return os.time()
|
||||
end
|
||||
|
||||
local function write_event(session_id, event)
|
||||
if not session_id then
|
||||
module:log("warn", "No session_id for event: %s", event.type)
|
||||
return
|
||||
end
|
||||
|
||||
local session_dir = string.format("%s/%s", recordings_path, session_id)
|
||||
local event_file = string.format("%s/events.jsonl", session_dir)
|
||||
|
||||
module:log("info", "Writing event %s to %s", event.type, event_file)
|
||||
|
||||
-- Create directory
|
||||
local mkdir_cmd = string.format("mkdir -p '%s' 2>&1", session_dir)
|
||||
local mkdir_result = os.execute(mkdir_cmd)
|
||||
module:log("debug", "mkdir result: %s", tostring(mkdir_result))
|
||||
|
||||
local file, err = io.open(event_file, "a")
|
||||
if file then
|
||||
local json_str = json.encode(event)
|
||||
file:write(json_str .. "\n")
|
||||
file:close()
|
||||
module:log("info", "Successfully wrote event %s", event.type)
|
||||
else
|
||||
module:log("error", "Failed to write event to %s: %s", event_file, err)
|
||||
end
|
||||
end
|
||||
|
||||
local function extract_participant_info(occupant)
|
||||
local info = {
|
||||
jid = occupant.jid,
|
||||
bare_jid = occupant.bare_jid,
|
||||
nick = occupant.nick,
|
||||
display_name = nil,
|
||||
role = occupant.role
|
||||
}
|
||||
|
||||
local presence = occupant:get_presence()
|
||||
if presence then
|
||||
local nick_element = presence:get_child("nick", "http://jabber.org/protocol/nick")
|
||||
if nick_element then
|
||||
info.display_name = nick_element:get_text()
|
||||
end
|
||||
|
||||
local identity = presence:get_child("identity")
|
||||
if identity then
|
||||
local user = identity:get_child("user")
|
||||
if user then
|
||||
local name = user:get_child("name")
|
||||
if name then
|
||||
info.display_name = name:get_text()
|
||||
end
|
||||
|
||||
local id_element = user:get_child("id")
|
||||
if id_element then
|
||||
info.id = id_element:get_text()
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if not info.display_name and occupant.nick then
|
||||
local _, _, resource = occupant.nick:match("([^@]+)@([^/]+)/(.+)")
|
||||
if resource then
|
||||
info.display_name = resource
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return info
|
||||
end
|
||||
|
||||
local function get_room_participant_count(room)
|
||||
local count = 0
|
||||
for _ in room:each_occupant() do
|
||||
count = count + 1
|
||||
end
|
||||
return count
|
||||
end
|
||||
|
||||
local function snapshot_room_participants(room)
|
||||
local participants = {}
|
||||
local total = 0
|
||||
local skipped = 0
|
||||
|
||||
module:log("info", "Snapshotting room participants")
|
||||
|
||||
for _, occupant in room:each_occupant() do
|
||||
total = total + 1
|
||||
-- Skip recorders (Jibri)
|
||||
if occupant.bare_jid and (occupant.bare_jid:match("^recorder@") or
|
||||
occupant.bare_jid:match("^jibri@")) then
|
||||
skipped = skipped + 1
|
||||
else
|
||||
local info = extract_participant_info(occupant)
|
||||
participants[occupant.jid] = info
|
||||
module:log("debug", "Added participant: %s", info.display_name or info.bare_jid)
|
||||
end
|
||||
end
|
||||
|
||||
module:log("info", "Snapshot: %d total, %d participants", total, total - skipped)
|
||||
return participants
|
||||
end
|
||||
|
||||
-- Import utility functions if available
|
||||
local util = module:require "util";
|
||||
local get_room_from_jid = util.get_room_from_jid;
|
||||
local room_jid_match_rewrite = util.room_jid_match_rewrite;
|
||||
|
||||
-- Main IQ handler for Jibri stanzas
|
||||
module:hook("pre-iq/full", function(event)
|
||||
local stanza = event.stanza
|
||||
if stanza.name ~= "iq" then
|
||||
return
|
||||
end
|
||||
|
||||
local jibri = stanza:get_child('jibri', 'http://jitsi.org/protocol/jibri')
|
||||
if not jibri then
|
||||
return
|
||||
end
|
||||
|
||||
module:log("info", "=== Jibri IQ intercepted ===")
|
||||
|
||||
local action = jibri.attr.action
|
||||
local session_id = jibri.attr.session_id
|
||||
local room_jid = jibri.attr.room
|
||||
local recording_mode = jibri.attr.recording_mode
|
||||
local app_data = jibri.attr.app_data
|
||||
|
||||
module:log("info", "Jibri %s - session: %s, room: %s, mode: %s",
|
||||
action or "?", session_id or "?", room_jid or "?", recording_mode or "?")
|
||||
|
||||
if not room_jid or not session_id then
|
||||
module:log("warn", "Missing room_jid or session_id")
|
||||
return
|
||||
end
|
||||
|
||||
-- Get the room using util function
|
||||
local room = get_room_from_jid(room_jid_match_rewrite(jid_bare(stanza.attr.to)))
|
||||
if not room then
|
||||
-- Try with the room_jid directly
|
||||
room = get_room_from_jid(room_jid)
|
||||
end
|
||||
|
||||
if not room then
|
||||
module:log("error", "Room not found for jid: %s", room_jid)
|
||||
return
|
||||
end
|
||||
|
||||
module:log("info", "Room found: %s", room:get_name() or room_jid)
|
||||
|
||||
if action == "start" then
|
||||
module:log("info", "Recording START for session %s", session_id)
|
||||
|
||||
-- Count and snapshot participants
|
||||
local participant_count = 0
|
||||
for _ in room:each_occupant() do
|
||||
participant_count = participant_count + 1
|
||||
end
|
||||
|
||||
local participants = snapshot_room_participants(room)
|
||||
local participant_list = {}
|
||||
for jid, info in pairs(participants) do
|
||||
table.insert(participant_list, info)
|
||||
end
|
||||
|
||||
active_recordings[room_jid] = {
|
||||
session_id = session_id,
|
||||
participants = participants,
|
||||
started_at = get_timestamp()
|
||||
}
|
||||
|
||||
write_event(session_id, {
|
||||
type = "recording_started",
|
||||
timestamp = get_timestamp(),
|
||||
room_jid = room_jid,
|
||||
room_name = room:get_name(),
|
||||
session_id = session_id,
|
||||
recording_mode = recording_mode,
|
||||
app_data = app_data,
|
||||
participant_count = participant_count,
|
||||
participants_at_start = participant_list
|
||||
})
|
||||
|
||||
elseif action == "stop" then
|
||||
module:log("info", "Recording STOP for session %s", session_id)
|
||||
|
||||
local recording = active_recordings[room_jid]
|
||||
if recording and recording.session_id == session_id then
|
||||
write_event(session_id, {
|
||||
type = "recording_stopped",
|
||||
timestamp = get_timestamp(),
|
||||
room_jid = room_jid,
|
||||
room_name = room:get_name(),
|
||||
session_id = session_id,
|
||||
duration = get_timestamp() - recording.started_at,
|
||||
participant_count = get_room_participant_count(room)
|
||||
})
|
||||
|
||||
active_recordings[room_jid] = nil
|
||||
else
|
||||
module:log("warn", "No active recording found for room %s", room_jid)
|
||||
end
|
||||
end
|
||||
end);
|
||||
|
||||
-- Room and participant event hooks
|
||||
local function setup_room_hooks(host_module)
|
||||
module:log("info", "Setting up room hooks on %s", host_module.host or "unknown")
|
||||
|
||||
-- Room created
|
||||
host_module:hook("muc-room-created", function(event)
|
||||
local room = event.room
|
||||
local room_jid = room.jid
|
||||
|
||||
room_states[room_jid] = {
|
||||
participants = {},
|
||||
created_at = get_timestamp()
|
||||
}
|
||||
|
||||
module:log("info", "Room created: %s", room_jid)
|
||||
end)
|
||||
|
||||
-- Room destroyed
|
||||
host_module:hook("muc-room-destroyed", function(event)
|
||||
local room = event.room
|
||||
local room_jid = room.jid
|
||||
|
||||
room_states[room_jid] = nil
|
||||
active_recordings[room_jid] = nil
|
||||
|
||||
module:log("info", "Room destroyed: %s", room_jid)
|
||||
end)
|
||||
|
||||
-- Occupant joined
|
||||
host_module:hook("muc-occupant-joined", function(event)
|
||||
local room = event.room
|
||||
local occupant = event.occupant
|
||||
local room_jid = room.jid
|
||||
|
||||
-- Skip recorders
|
||||
if occupant.bare_jid and (occupant.bare_jid:match("^recorder@") or
|
||||
occupant.bare_jid:match("^jibri@")) then
|
||||
return
|
||||
end
|
||||
|
||||
local participant_info = extract_participant_info(occupant)
|
||||
|
||||
-- Update room state
|
||||
if room_states[room_jid] then
|
||||
room_states[room_jid].participants[occupant.jid] = participant_info
|
||||
end
|
||||
|
||||
-- Log to active recording if exists
|
||||
local recording = active_recordings[room_jid]
|
||||
if recording then
|
||||
recording.participants[occupant.jid] = participant_info
|
||||
|
||||
write_event(recording.session_id, {
|
||||
type = "participant_joined",
|
||||
timestamp = get_timestamp(),
|
||||
room_jid = room_jid,
|
||||
room_name = room:get_name(),
|
||||
participant = participant_info,
|
||||
participant_count = get_room_participant_count(room)
|
||||
})
|
||||
end
|
||||
|
||||
module:log("info", "Participant joined %s: %s (%d total)",
|
||||
room:get_name() or room_jid,
|
||||
participant_info.display_name or participant_info.bare_jid,
|
||||
get_room_participant_count(room))
|
||||
end)
|
||||
|
||||
-- Occupant left
|
||||
host_module:hook("muc-occupant-left", function(event)
|
||||
local room = event.room
|
||||
local occupant = event.occupant
|
||||
local room_jid = room.jid
|
||||
|
||||
-- Skip recorders
|
||||
if occupant.bare_jid and (occupant.bare_jid:match("^recorder@") or
|
||||
occupant.bare_jid:match("^jibri@")) then
|
||||
return
|
||||
end
|
||||
|
||||
local participant_info = extract_participant_info(occupant)
|
||||
|
||||
-- Update room state
|
||||
if room_states[room_jid] then
|
||||
room_states[room_jid].participants[occupant.jid] = nil
|
||||
end
|
||||
|
||||
-- Log to active recording if exists
|
||||
local recording = active_recordings[room_jid]
|
||||
if recording then
|
||||
if recording.participants[occupant.jid] then
|
||||
recording.participants[occupant.jid] = nil
|
||||
end
|
||||
|
||||
write_event(recording.session_id, {
|
||||
type = "participant_left",
|
||||
timestamp = get_timestamp(),
|
||||
room_jid = room_jid,
|
||||
room_name = room:get_name(),
|
||||
participant = participant_info,
|
||||
participant_count = get_room_participant_count(room)
|
||||
})
|
||||
end
|
||||
|
||||
module:log("info", "Participant left %s: %s (%d remaining)",
|
||||
room:get_name() or room_jid,
|
||||
participant_info.display_name or participant_info.bare_jid,
|
||||
get_room_participant_count(room))
|
||||
end)
|
||||
end
|
||||
|
||||
-- Module initialization
|
||||
local current_host = module:get_host()
|
||||
local host_type = module:get_host_type()
|
||||
|
||||
module:log("info", "Event Logger loading on %s (type: %s)", current_host, host_type or "unknown")
|
||||
module:log("info", "Recording path: %s", recordings_path)
|
||||
|
||||
-- Setup room hooks based on host type
|
||||
if host_type == "component" and current_host:match("^[^.]+%.") then
|
||||
setup_room_hooks(module)
|
||||
else
|
||||
-- Try to find and hook to MUC component
|
||||
local process_host_module = util.process_host_module
|
||||
local muc_component_host = module:get_option_string("muc_component") or
|
||||
module:get_option_string("main_muc")
|
||||
|
||||
if not muc_component_host then
|
||||
local possible_hosts = {
|
||||
"muc." .. current_host,
|
||||
"conference." .. current_host,
|
||||
"rooms." .. current_host
|
||||
}
|
||||
|
||||
for _, host in ipairs(possible_hosts) do
|
||||
if prosody.hosts[host] then
|
||||
muc_component_host = host
|
||||
module:log("info", "Auto-detected MUC component: %s", muc_component_host)
|
||||
break
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if muc_component_host then
|
||||
process_host_module(muc_component_host, function(host_module, host)
|
||||
module:log("info", "Hooking to MUC events on %s", host)
|
||||
setup_room_hooks(host_module)
|
||||
end)
|
||||
else
|
||||
module:log("error", "Could not find MUC component")
|
||||
end
|
||||
end
|
||||
95
server/docs/data_retention.md
Normal file
95
server/docs/data_retention.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Data Retention and Cleanup
|
||||
|
||||
## Overview
|
||||
|
||||
For public instances of Reflector, a data retention policy is automatically enforced to delete anonymous user data after a configurable period (default: 7 days). This ensures compliance with privacy expectations and prevents unbounded storage growth.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
- `PUBLIC_MODE` (bool): Must be set to `true` to enable automatic cleanup
|
||||
- `PUBLIC_DATA_RETENTION_DAYS` (int): Number of days to retain anonymous data (default: 7)
|
||||
|
||||
### What Gets Deleted
|
||||
|
||||
When data reaches the retention period, the following items are automatically removed:
|
||||
|
||||
1. **Transcripts** from anonymous users (where `user_id` is NULL):
|
||||
- Database records
|
||||
- Local files (audio.wav, audio.mp3, audio.json waveform)
|
||||
- Storage files (cloud storage if configured)
|
||||
|
||||
## Automatic Cleanup
|
||||
|
||||
### Celery Beat Schedule
|
||||
|
||||
When `PUBLIC_MODE=true`, a Celery beat task runs daily at 3 AM to clean up old data:
|
||||
|
||||
```python
|
||||
# Automatically scheduled when PUBLIC_MODE=true
|
||||
"cleanup_old_public_data": {
|
||||
"task": "reflector.worker.cleanup.cleanup_old_public_data",
|
||||
"schedule": crontab(hour=3, minute=0), # Daily at 3 AM
|
||||
}
|
||||
```
|
||||
|
||||
### Running the Worker
|
||||
|
||||
Ensure both Celery worker and beat scheduler are running:
|
||||
|
||||
```bash
|
||||
# Start Celery worker
|
||||
uv run celery -A reflector.worker.app worker --loglevel=info
|
||||
|
||||
# Start Celery beat scheduler (in another terminal)
|
||||
uv run celery -A reflector.worker.app beat
|
||||
```
|
||||
|
||||
## Manual Cleanup
|
||||
|
||||
For testing or manual intervention, use the cleanup tool:
|
||||
|
||||
```bash
|
||||
# Delete data older than 7 days (default)
|
||||
uv run python -m reflector.tools.cleanup_old_data
|
||||
|
||||
# Delete data older than 30 days
|
||||
uv run python -m reflector.tools.cleanup_old_data --days 30
|
||||
```
|
||||
|
||||
Note: The manual tool uses the same implementation as the Celery worker task to ensure consistency.
|
||||
|
||||
## Important Notes
|
||||
|
||||
1. **User Data Deletion**: Only anonymous data (where `user_id` is NULL) is deleted. Authenticated user data is preserved.
|
||||
|
||||
2. **Storage Cleanup**: The system properly cleans up both local files and cloud storage when configured.
|
||||
|
||||
3. **Error Handling**: If individual deletions fail, the cleanup continues and logs errors. Failed deletions are reported in the task output.
|
||||
|
||||
4. **Public Instance Only**: The automatic cleanup task only runs when `PUBLIC_MODE=true` to prevent accidental data loss in private deployments.
|
||||
|
||||
## Testing
|
||||
|
||||
Run the cleanup tests:
|
||||
|
||||
```bash
|
||||
uv run pytest tests/test_cleanup.py -v
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
Check Celery logs for cleanup task execution:
|
||||
|
||||
```bash
|
||||
# Look for cleanup task logs
|
||||
grep "cleanup_old_public_data" celery.log
|
||||
grep "Starting cleanup of old public data" celery.log
|
||||
```
|
||||
|
||||
Task statistics are logged after each run:
|
||||
- Number of transcripts deleted
|
||||
- Number of meetings deleted
|
||||
- Number of orphaned recordings deleted
|
||||
- Any errors encountered
|
||||
194
server/docs/gpu/api-transcription.md
Normal file
194
server/docs/gpu/api-transcription.md
Normal file
@@ -0,0 +1,194 @@
|
||||
## Reflector GPU Transcription API (Specification)
|
||||
|
||||
This document defines the Reflector GPU transcription API that all implementations must adhere to. Current implementations include NVIDIA Parakeet (NeMo) and Whisper (faster-whisper), both deployed on Modal.com. The API surface and response shapes are OpenAI/Whisper-compatible, so clients can switch implementations by changing only the base URL.
|
||||
|
||||
### Base URL and Authentication
|
||||
|
||||
- Example base URLs (Modal web endpoints):
|
||||
|
||||
- Parakeet: `https://<account>--reflector-transcriber-parakeet-web.modal.run`
|
||||
- Whisper: `https://<account>--reflector-transcriber-web.modal.run`
|
||||
|
||||
- All endpoints are served under `/v1` and require a Bearer token:
|
||||
|
||||
```
|
||||
Authorization: Bearer <REFLECTOR_GPU_APIKEY>
|
||||
```
|
||||
|
||||
Note: To switch implementations, deploy the desired variant and point `TRANSCRIPT_URL` to its base URL. The API is identical.
|
||||
|
||||
### Supported file types
|
||||
|
||||
`mp3, mp4, mpeg, mpga, m4a, wav, webm`
|
||||
|
||||
### Models and languages
|
||||
|
||||
- Parakeet (NVIDIA NeMo): default `nvidia/parakeet-tdt-0.6b-v2`
|
||||
- Language support: only `en`. Other languages return HTTP 400.
|
||||
- Whisper (faster-whisper): default `large-v2` (or deployment-specific)
|
||||
- Language support: multilingual (per Whisper model capabilities).
|
||||
|
||||
Note: The `model` parameter is accepted by all implementations for interface parity. Some backends may treat it as informational.
|
||||
|
||||
### Endpoints
|
||||
|
||||
#### POST /v1/audio/transcriptions
|
||||
|
||||
Transcribe one or more uploaded audio files.
|
||||
|
||||
Request: multipart/form-data
|
||||
|
||||
- `file` (File) — optional. Single file to transcribe.
|
||||
- `files` (File[]) — optional. One or more files to transcribe.
|
||||
- `model` (string) — optional. Defaults to the implementation-specific model (see above).
|
||||
- `language` (string) — optional, defaults to `en`.
|
||||
- Parakeet: only `en` is accepted; other values return HTTP 400
|
||||
- Whisper: model-dependent; typically multilingual
|
||||
- `batch` (boolean) — optional, defaults to `false`.
|
||||
|
||||
Notes:
|
||||
|
||||
- Provide either `file` or `files`, not both. If neither is provided, HTTP 400.
|
||||
- `batch` requires `files`; using `batch=true` without `files` returns HTTP 400.
|
||||
- Response shape for multiple files is the same regardless of `batch`.
|
||||
- Files sent to this endpoint are processed in a single pass (no VAD/chunking). This is intended for short clips (roughly ≤ 30s; depends on GPU memory/model). For longer audio, prefer `/v1/audio/transcriptions-from-url` which supports VAD-based chunking.
|
||||
|
||||
Responses
|
||||
|
||||
Single file response:
|
||||
|
||||
```json
|
||||
{
|
||||
"text": "transcribed text",
|
||||
"words": [
|
||||
{ "word": "hello", "start": 0.0, "end": 0.5 },
|
||||
{ "word": "world", "start": 0.5, "end": 1.0 }
|
||||
],
|
||||
"filename": "audio.mp3"
|
||||
}
|
||||
```
|
||||
|
||||
Multiple files response:
|
||||
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{"filename": "a1.mp3", "text": "...", "words": [...]},
|
||||
{"filename": "a2.mp3", "text": "...", "words": [...]}]
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- Word objects always include keys: `word`, `start`, `end`.
|
||||
- Some implementations may include a trailing space in `word` to match Whisper tokenization behavior; clients should trim if needed.
|
||||
|
||||
Example curl (single file):
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $REFLECTOR_GPU_APIKEY" \
|
||||
-F "file=@/path/to/audio.mp3" \
|
||||
-F "language=en" \
|
||||
"$BASE_URL/v1/audio/transcriptions"
|
||||
```
|
||||
|
||||
Example curl (multiple files, batch):
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $REFLECTOR_GPU_APIKEY" \
|
||||
-F "files=@/path/a1.mp3" -F "files=@/path/a2.mp3" \
|
||||
-F "batch=true" -F "language=en" \
|
||||
"$BASE_URL/v1/audio/transcriptions"
|
||||
```
|
||||
|
||||
#### POST /v1/audio/transcriptions-from-url
|
||||
|
||||
Transcribe a single remote audio file by URL.
|
||||
|
||||
Request: application/json
|
||||
|
||||
Body parameters:
|
||||
|
||||
- `audio_file_url` (string) — required. URL of the audio file to transcribe.
|
||||
- `model` (string) — optional. Defaults to the implementation-specific model (see above).
|
||||
- `language` (string) — optional, defaults to `en`. Parakeet only accepts `en`.
|
||||
- `timestamp_offset` (number) — optional, defaults to `0.0`. Added to each word's `start`/`end` in the response.
|
||||
|
||||
```json
|
||||
{
|
||||
"audio_file_url": "https://example.com/audio.mp3",
|
||||
"model": "nvidia/parakeet-tdt-0.6b-v2",
|
||||
"language": "en",
|
||||
"timestamp_offset": 0.0
|
||||
}
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```json
|
||||
{
|
||||
"text": "transcribed text",
|
||||
"words": [
|
||||
{ "word": "hello", "start": 10.0, "end": 10.5 },
|
||||
{ "word": "world", "start": 10.5, "end": 11.0 }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- `timestamp_offset` is added to each word’s `start`/`end` in the response.
|
||||
- Implementations may perform VAD-based chunking and batching for long-form audio; word timings are adjusted accordingly.
|
||||
|
||||
Example curl:
|
||||
|
||||
```bash
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer $REFLECTOR_GPU_APIKEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"audio_file_url": "https://example.com/audio.mp3",
|
||||
"language": "en",
|
||||
"timestamp_offset": 0
|
||||
}' \
|
||||
"$BASE_URL/v1/audio/transcriptions-from-url"
|
||||
```
|
||||
|
||||
### Error handling
|
||||
|
||||
- 400 Bad Request
|
||||
- Parakeet: `language` other than `en`
|
||||
- Missing required parameters (`file`/`files` for upload; `audio_file_url` for URL endpoint)
|
||||
- Unsupported file extension
|
||||
- 401 Unauthorized
|
||||
- Missing or invalid Bearer token
|
||||
- 404 Not Found
|
||||
- `audio_file_url` does not exist
|
||||
|
||||
### Implementation details
|
||||
|
||||
- GPUs: A10G for small-file/live, L40S for large-file URL transcription (subject to deployment)
|
||||
- VAD chunking and segment batching; word timings adjusted and overlapping ends constrained
|
||||
- Pads very short segments (< 0.5s) to avoid model crashes on some backends
|
||||
|
||||
### Server configuration (Reflector API)
|
||||
|
||||
Set the Reflector server to use the Modal backend and point `TRANSCRIPT_URL` to your chosen deployment:
|
||||
|
||||
```
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://<account>--reflector-transcriber-parakeet-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=<REFLECTOR_GPU_APIKEY>
|
||||
```
|
||||
|
||||
### Conformance tests
|
||||
|
||||
Use the pytest-based conformance tests to validate any new implementation (including self-hosted) against this spec:
|
||||
|
||||
```
|
||||
TRANSCRIPT_URL=https://<your-deployment-base> \
|
||||
TRANSCRIPT_MODAL_API_KEY=your-api-key \
|
||||
uv run -m pytest -m gpu_modal --no-cov server/tests/test_gpu_modal_transcript.py
|
||||
```
|
||||
493
server/docs/platform-jitsi.md
Normal file
493
server/docs/platform-jitsi.md
Normal file
@@ -0,0 +1,493 @@
|
||||
# Jitsi Integration Configuration Guide
|
||||
|
||||
This guide provides step-by-step instructions for configuring Reflector to work with a self-hosted Jitsi Meet installation for video meetings and recording.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before configuring Jitsi integration, ensure you have:
|
||||
|
||||
- **Self-hosted Jitsi Meet installation** (version 2.0.8922 or later recommended)
|
||||
- **Jibri recording service** configured and running
|
||||
- **Prosody XMPP server** with mod_event_sync module installed
|
||||
- **Docker or system deployment** of Reflector with access to environment variables
|
||||
- **SSL certificates** for secure communication between services
|
||||
|
||||
## Environment Configuration
|
||||
|
||||
Add the following environment variables to your Reflector deployment:
|
||||
|
||||
### Required Settings
|
||||
|
||||
```bash
|
||||
# Jitsi Meet domain (without https://)
|
||||
JITSI_DOMAIN=meet.example.com
|
||||
|
||||
# JWT secret for room authentication (generate with: openssl rand -hex 32)
|
||||
JITSI_JWT_SECRET=your-64-character-hex-secret-here
|
||||
|
||||
# Webhook secret for secure event handling (generate with: openssl rand -hex 16)
|
||||
JITSI_WEBHOOK_SECRET=your-32-character-hex-secret-here
|
||||
|
||||
# Application identifier (should match Jitsi configuration)
|
||||
JITSI_APP_ID=reflector
|
||||
|
||||
# JWT issuer and audience (should match Jitsi configuration)
|
||||
JITSI_JWT_ISSUER=reflector
|
||||
JITSI_JWT_AUDIENCE=jitsi
|
||||
```
|
||||
|
||||
### Example .env Configuration
|
||||
|
||||
```bash
|
||||
# Add to your server/.env file
|
||||
JITSI_DOMAIN=meet.mycompany.com
|
||||
JITSI_JWT_SECRET=$(openssl rand -hex 32)
|
||||
JITSI_WEBHOOK_SECRET=$(openssl rand -hex 16)
|
||||
JITSI_APP_ID=reflector
|
||||
JITSI_JWT_ISSUER=reflector
|
||||
JITSI_JWT_AUDIENCE=jitsi
|
||||
```
|
||||
|
||||
## Jitsi Meet Server Configuration
|
||||
|
||||
### 1. JWT Authentication Setup
|
||||
|
||||
Edit `/etc/prosody/conf.d/[YOUR_DOMAIN].cfg.lua`:
|
||||
|
||||
```lua
|
||||
VirtualHost "meet.example.com"
|
||||
authentication = "token"
|
||||
app_id = "reflector"
|
||||
app_secret = "your-jwt-secret-here"
|
||||
|
||||
-- Allow anonymous access for non-authenticated users
|
||||
c2s_require_encryption = false
|
||||
admins = { "focusUser@auth.meet.example.com" }
|
||||
|
||||
modules_enabled = {
|
||||
"bosh";
|
||||
"pubsub";
|
||||
"ping";
|
||||
"roster";
|
||||
"saslauth";
|
||||
"tls";
|
||||
"dialback";
|
||||
"disco";
|
||||
"carbons";
|
||||
"pep";
|
||||
"private";
|
||||
"blocklist";
|
||||
"vcard";
|
||||
"version";
|
||||
"uptime";
|
||||
"time";
|
||||
"ping";
|
||||
"register";
|
||||
"admin_adhoc";
|
||||
"token_verification";
|
||||
"event_sync"; -- Required for webhook events
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Room Access Control
|
||||
|
||||
Edit `/etc/jitsi/meet/meet.example.com-config.js`:
|
||||
|
||||
```javascript
|
||||
var config = {
|
||||
hosts: {
|
||||
domain: 'meet.example.com',
|
||||
muc: 'conference.meet.example.com'
|
||||
},
|
||||
|
||||
// Enable JWT authentication
|
||||
enableUserRolesBasedOnToken: true,
|
||||
|
||||
// Recording configuration
|
||||
fileRecordingsEnabled: true,
|
||||
liveStreamingEnabled: false,
|
||||
|
||||
// Reflector-specific settings
|
||||
prejoinPageEnabled: true,
|
||||
requireDisplayName: true,
|
||||
};
|
||||
```
|
||||
|
||||
### 3. Interface Configuration
|
||||
|
||||
Edit `/usr/share/jitsi-meet/interface_config.js`:
|
||||
|
||||
```javascript
|
||||
var interfaceConfig = {
|
||||
// Customize for Reflector branding
|
||||
APP_NAME: 'Reflector Meeting',
|
||||
DEFAULT_WELCOME_PAGE_LOGO_URL: 'https://your-domain.com/logo.png',
|
||||
|
||||
// Hide unnecessary buttons
|
||||
TOOLBAR_BUTTONS: [
|
||||
'microphone', 'camera', 'closedcaptions', 'desktop',
|
||||
'fullscreen', 'fodeviceselection', 'hangup',
|
||||
'chat', 'recording', 'livestreaming', 'etherpad',
|
||||
'sharedvideo', 'settings', 'raisehand', 'videoquality',
|
||||
'filmstrip', 'invite', 'feedback', 'stats', 'shortcuts',
|
||||
'tileview', 'videobackgroundblur', 'download', 'help',
|
||||
'mute-everyone'
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
## Jibri Configuration
|
||||
|
||||
### 1. Recording Service Setup
|
||||
|
||||
Edit `/etc/jitsi/jibri/jibri.conf`:
|
||||
|
||||
```hocon
|
||||
jibri {
|
||||
recording {
|
||||
recordings-directory = "/var/recordings"
|
||||
finalize-script = "/opt/jitsi/jibri/finalize.sh"
|
||||
}
|
||||
|
||||
api {
|
||||
xmpp {
|
||||
environments = [{
|
||||
name = "prod environment"
|
||||
xmpp-server-hosts = ["meet.example.com"]
|
||||
xmpp-domain = "meet.example.com"
|
||||
|
||||
control-muc {
|
||||
domain = "internal.auth.meet.example.com"
|
||||
room-name = "JibriBrewery"
|
||||
nickname = "jibri-nickname"
|
||||
}
|
||||
|
||||
control-login {
|
||||
domain = "auth.meet.example.com"
|
||||
username = "jibri"
|
||||
password = "jibri-password"
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Finalize Script Setup
|
||||
|
||||
Create `/opt/jitsi/jibri/finalize.sh`:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Jibri finalize script for Reflector integration
|
||||
|
||||
RECORDING_FILE="$1"
|
||||
ROOM_NAME="$2"
|
||||
REFLECTOR_API_URL="${REFLECTOR_API_URL:-http://localhost:1250}"
|
||||
WEBHOOK_SECRET="${JITSI_WEBHOOK_SECRET}"
|
||||
|
||||
# Generate webhook signature
|
||||
generate_signature() {
|
||||
local payload="$1"
|
||||
echo -n "$payload" | openssl dgst -sha256 -hmac "$WEBHOOK_SECRET" | cut -d' ' -f2
|
||||
}
|
||||
|
||||
# Prepare webhook payload
|
||||
TIMESTAMP=$(date -u +%Y-%m-%dT%H:%M:%S.%3NZ)
|
||||
PAYLOAD=$(cat <<EOF
|
||||
{
|
||||
"room_name": "$ROOM_NAME",
|
||||
"recording_file": "$RECORDING_FILE",
|
||||
"recording_status": "completed",
|
||||
"timestamp": "$TIMESTAMP"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Generate signature
|
||||
SIGNATURE=$(generate_signature "$PAYLOAD")
|
||||
|
||||
# Send webhook to Reflector
|
||||
curl -X POST "$REFLECTOR_API_URL/v1/jibri/recording-complete" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: $SIGNATURE" \
|
||||
-d "$PAYLOAD" \
|
||||
--max-time 30
|
||||
|
||||
echo "Recording finalization webhook sent for room: $ROOM_NAME"
|
||||
```
|
||||
|
||||
Make the script executable:
|
||||
|
||||
```bash
|
||||
chmod +x /opt/jitsi/jibri/finalize.sh
|
||||
```
|
||||
|
||||
## Prosody Event Configuration
|
||||
|
||||
### 1. Event-Sync Module Installation
|
||||
|
||||
Install the mod_event_sync module:
|
||||
|
||||
```bash
|
||||
# Download the module
|
||||
cd /usr/share/jitsi-meet/prosody-plugins/
|
||||
wget https://raw.githubusercontent.com/jitsi-contrib/prosody-plugins/main/mod_event_sync.lua
|
||||
|
||||
# Or if using git
|
||||
git clone https://github.com/jitsi-contrib/prosody-plugins.git
|
||||
cp prosody-plugins/mod_event_sync.lua /usr/share/jitsi-meet/prosody-plugins/
|
||||
```
|
||||
|
||||
### 2. Webhook Configuration
|
||||
|
||||
Add to `/etc/prosody/conf.d/[YOUR_DOMAIN].cfg.lua`:
|
||||
|
||||
```lua
|
||||
Component "conference.meet.example.com" "muc"
|
||||
storage = "memory"
|
||||
modules_enabled = {
|
||||
"muc_meeting_id";
|
||||
"muc_domain_mapper";
|
||||
"polls";
|
||||
"event_sync"; -- Enable event sync
|
||||
}
|
||||
|
||||
-- Event sync webhook configuration
|
||||
event_sync_url = "https://your-reflector-domain.com/v1/jitsi/events"
|
||||
event_sync_secret = "your-webhook-secret-here"
|
||||
|
||||
-- Events to track
|
||||
event_sync_events = {
|
||||
"muc-occupant-joined",
|
||||
"muc-occupant-left",
|
||||
"jibri-recording-on",
|
||||
"jibri-recording-off"
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Restart Services
|
||||
|
||||
After configuration changes, restart all services:
|
||||
|
||||
```bash
|
||||
systemctl restart prosody
|
||||
systemctl restart jicofo
|
||||
systemctl restart jitsi-videobridge2
|
||||
systemctl restart jibri
|
||||
systemctl restart nginx
|
||||
```
|
||||
|
||||
## Reflector Room Configuration
|
||||
|
||||
### 1. Create Jitsi Room
|
||||
|
||||
When creating rooms in Reflector, set the platform field:
|
||||
|
||||
```bash
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "my-jitsi-room",
|
||||
"platform": "jitsi",
|
||||
"recording_type": "cloud",
|
||||
"recording_trigger": "automatic-2nd-participant",
|
||||
"is_locked": false,
|
||||
"room_mode": "normal"
|
||||
}'
|
||||
```
|
||||
|
||||
### 2. Meeting Creation
|
||||
|
||||
Meetings will automatically use Jitsi when the room platform is set to "jitsi":
|
||||
|
||||
```bash
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms/my-jitsi-room/meeting" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN"
|
||||
```
|
||||
|
||||
## Testing the Integration
|
||||
|
||||
### 1. Health Check
|
||||
|
||||
Verify Jitsi webhook configuration:
|
||||
|
||||
```bash
|
||||
curl "https://your-reflector-domain.com/v1/jitsi/health"
|
||||
```
|
||||
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"status": "ok",
|
||||
"service": "jitsi-webhooks",
|
||||
"timestamp": "2025-01-15T10:30:00.000Z",
|
||||
"webhook_secret_configured": true
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Room Creation Test
|
||||
|
||||
1. Create a Jitsi room via Reflector API
|
||||
2. Start a meeting - should generate Jitsi Meet URL with JWT token
|
||||
3. Join with multiple participants - should trigger participant events
|
||||
4. Start recording - should trigger Jibri recording workflow
|
||||
|
||||
### 3. Webhook Event Test
|
||||
|
||||
Monitor Reflector logs for incoming webhook events:
|
||||
|
||||
```bash
|
||||
# Check for participant events
|
||||
curl -X POST "https://your-reflector-domain.com/v1/jitsi/events" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: test-signature" \
|
||||
-d '{
|
||||
"event": "muc-occupant-joined",
|
||||
"room": "test-room-name",
|
||||
"timestamp": "2025-01-15T10:30:00.000Z",
|
||||
"data": {}
|
||||
}'
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### JWT Authentication Failures
|
||||
|
||||
**Symptoms:** Users can't join rooms, "Authentication failed" errors
|
||||
|
||||
**Solutions:**
|
||||
1. Verify JWT secret matches between Jitsi and Reflector
|
||||
2. Check JWT token expiration (default 8 hours)
|
||||
3. Ensure system clocks are synchronized
|
||||
4. Validate JWT issuer/audience configuration
|
||||
|
||||
```bash
|
||||
# Debug JWT tokens
|
||||
echo "JWT_TOKEN_HERE" | cut -d'.' -f2 | base64 -d | jq
|
||||
```
|
||||
|
||||
#### Webhook Events Not Received
|
||||
|
||||
**Symptoms:** Participant counts not updating, recording events missing
|
||||
|
||||
**Solutions:**
|
||||
1. Verify event_sync module is loaded in Prosody
|
||||
2. Check webhook URL accessibility from Jitsi server
|
||||
3. Validate webhook signature generation
|
||||
4. Review Prosody and Reflector logs
|
||||
|
||||
```bash
|
||||
# Test webhook connectivity
|
||||
curl -v "https://your-reflector-domain.com/v1/jitsi/health"
|
||||
|
||||
# Check Prosody logs
|
||||
tail -f /var/log/prosody/prosody.log
|
||||
|
||||
# Check Reflector logs
|
||||
docker logs your-reflector-container
|
||||
```
|
||||
|
||||
#### Recording Issues
|
||||
|
||||
**Symptoms:** Recordings not starting, finalize script errors
|
||||
|
||||
**Solutions:**
|
||||
1. Verify Jibri service status and configuration
|
||||
2. Check recording directory permissions
|
||||
3. Validate finalize script execution permissions
|
||||
4. Monitor Jibri logs for errors
|
||||
|
||||
```bash
|
||||
# Check Jibri status
|
||||
systemctl status jibri
|
||||
|
||||
# Test finalize script
|
||||
sudo -u jibri /opt/jitsi/jibri/finalize.sh "/test/recording.mp4" "test-room"
|
||||
|
||||
# Check Jibri logs
|
||||
journalctl -u jibri -f
|
||||
```
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Verify Jitsi configuration
|
||||
prosodyctl check config
|
||||
|
||||
# Test JWT generation
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms/test/meeting" \
|
||||
-H "Authorization: Bearer $TOKEN" -v
|
||||
|
||||
# Monitor webhook events
|
||||
tail -f /var/log/reflector/app.log | grep jitsi
|
||||
|
||||
# Check room participant counts
|
||||
curl "https://your-reflector-domain.com/v1/rooms" \
|
||||
-H "Authorization: Bearer $TOKEN" | jq '.data[].num_clients'
|
||||
```
|
||||
|
||||
### Performance Optimization
|
||||
|
||||
#### For High-Concurrent Usage
|
||||
|
||||
1. **Jitsi Videobridge Tuning:**
|
||||
```bash
|
||||
# /etc/jitsi/videobridge/sip-communicator.properties
|
||||
org.jitsi.videobridge.STATISTICS_INTERVAL=5000
|
||||
org.jitsi.videobridge.load.INITIAL_STREAM_LIMIT=50
|
||||
```
|
||||
|
||||
2. **Database Connection Pooling:**
|
||||
```python
|
||||
# In your Reflector settings
|
||||
DATABASE_POOL_SIZE=20
|
||||
DATABASE_MAX_OVERFLOW=30
|
||||
```
|
||||
|
||||
3. **Redis Configuration:**
|
||||
```bash
|
||||
# For webhook event caching
|
||||
REDIS_URL=redis://localhost:6379/1
|
||||
WEBHOOK_EVENT_TTL=3600
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Network Security
|
||||
- Use HTTPS/WSS for all communications
|
||||
- Implement proper firewall rules
|
||||
- Consider VPN for server-to-server communication
|
||||
|
||||
### Authentication Security
|
||||
- Rotate JWT secrets regularly
|
||||
- Use strong webhook secrets (32+ characters)
|
||||
- Implement rate limiting on webhook endpoints
|
||||
|
||||
### Recording Security
|
||||
- Encrypt recordings at rest
|
||||
- Implement access controls for recording files
|
||||
- Regular security audits of file permissions
|
||||
|
||||
## Support
|
||||
|
||||
For additional support:
|
||||
|
||||
1. **Reflector Issues:** Check GitHub issues or create new ones
|
||||
2. **Jitsi Community:** [Community Forum](https://community.jitsi.org/)
|
||||
3. **Documentation:** [Jitsi Developer Guide](https://jitsi.github.io/handbook/)
|
||||
|
||||
## Migration from Whereby
|
||||
|
||||
If migrating from Whereby integration:
|
||||
|
||||
1. Update existing rooms to use "jitsi" platform
|
||||
2. Verify webhook configurations are updated
|
||||
3. Test recording workflows thoroughly
|
||||
4. Monitor participant event accuracy
|
||||
5. Update any custom integrations using meeting APIs
|
||||
|
||||
The platform abstraction layer ensures smooth migration with minimal API changes.
|
||||
212
server/docs/webhook.md
Normal file
212
server/docs/webhook.md
Normal file
@@ -0,0 +1,212 @@
|
||||
# Reflector Webhook Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
Reflector supports webhook notifications to notify external systems when transcript processing is completed. Webhooks can be configured per room and are triggered automatically after a transcript is successfully processed.
|
||||
|
||||
## Configuration
|
||||
|
||||
Webhooks are configured at the room level with two fields:
|
||||
- `webhook_url`: The HTTPS endpoint to receive webhook notifications
|
||||
- `webhook_secret`: Optional secret key for HMAC signature verification (auto-generated if not provided)
|
||||
|
||||
## Events
|
||||
|
||||
### `transcript.completed`
|
||||
|
||||
Triggered when a transcript has been fully processed, including transcription, diarization, summarization, and topic detection.
|
||||
|
||||
### `test`
|
||||
|
||||
A test event that can be triggered manually to verify webhook configuration.
|
||||
|
||||
## Webhook Request Format
|
||||
|
||||
### Headers
|
||||
|
||||
All webhook requests include the following headers:
|
||||
|
||||
| Header | Description | Example |
|
||||
|--------|-------------|---------|
|
||||
| `Content-Type` | Always `application/json` | `application/json` |
|
||||
| `User-Agent` | Identifies Reflector as the source | `Reflector-Webhook/1.0` |
|
||||
| `X-Webhook-Event` | The event type | `transcript.completed` or `test` |
|
||||
| `X-Webhook-Retry` | Current retry attempt number | `0`, `1`, `2`... |
|
||||
| `X-Webhook-Signature` | HMAC signature (if secret configured) | `t=1735306800,v1=abc123...` |
|
||||
|
||||
### Signature Verification
|
||||
|
||||
If a webhook secret is configured, Reflector includes an HMAC-SHA256 signature in the `X-Webhook-Signature` header to verify the webhook authenticity.
|
||||
|
||||
The signature format is: `t={timestamp},v1={signature}`
|
||||
|
||||
To verify the signature:
|
||||
1. Extract the timestamp and signature from the header
|
||||
2. Create the signed payload: `{timestamp}.{request_body}`
|
||||
3. Compute HMAC-SHA256 of the signed payload using your webhook secret
|
||||
4. Compare the computed signature with the received signature
|
||||
|
||||
Example verification (Python):
|
||||
```python
|
||||
import hmac
|
||||
import hashlib
|
||||
|
||||
def verify_webhook_signature(payload: bytes, signature_header: str, secret: str) -> bool:
|
||||
# Parse header: "t=1735306800,v1=abc123..."
|
||||
parts = dict(part.split("=") for part in signature_header.split(","))
|
||||
timestamp = parts["t"]
|
||||
received_signature = parts["v1"]
|
||||
|
||||
# Create signed payload
|
||||
signed_payload = f"{timestamp}.{payload.decode('utf-8')}"
|
||||
|
||||
# Compute expected signature
|
||||
expected_signature = hmac.new(
|
||||
secret.encode("utf-8"),
|
||||
signed_payload.encode("utf-8"),
|
||||
hashlib.sha256
|
||||
).hexdigest()
|
||||
|
||||
# Compare signatures
|
||||
return hmac.compare_digest(expected_signature, received_signature)
|
||||
```
|
||||
|
||||
## Event Payloads
|
||||
|
||||
### `transcript.completed` Event
|
||||
|
||||
This event includes a convenient URL for accessing the transcript:
|
||||
- `frontend_url`: Direct link to view the transcript in the web interface
|
||||
|
||||
```json
|
||||
{
|
||||
"event": "transcript.completed",
|
||||
"event_id": "transcript.completed-abc-123-def-456",
|
||||
"timestamp": "2025-08-27T12:34:56.789012Z",
|
||||
"transcript": {
|
||||
"id": "abc-123-def-456",
|
||||
"room_id": "room-789",
|
||||
"created_at": "2025-08-27T12:00:00Z",
|
||||
"duration": 1800.5,
|
||||
"title": "Q3 Product Planning Meeting",
|
||||
"short_summary": "Team discussed Q3 product roadmap, prioritizing mobile app features and API improvements.",
|
||||
"long_summary": "The product team met to finalize the Q3 roadmap. Key decisions included...",
|
||||
"webvtt": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v Speaker 1>Welcome everyone to today's meeting...",
|
||||
"topics": [
|
||||
{
|
||||
"title": "Introduction and Agenda",
|
||||
"summary": "Meeting kickoff with agenda review",
|
||||
"timestamp": 0.0,
|
||||
"duration": 120.0,
|
||||
"webvtt": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v Speaker 1>Welcome everyone..."
|
||||
},
|
||||
{
|
||||
"title": "Mobile App Features Discussion",
|
||||
"summary": "Team reviewed proposed mobile app features for Q3",
|
||||
"timestamp": 120.0,
|
||||
"duration": 600.0,
|
||||
"webvtt": "WEBVTT\n\n00:02:00.000 --> 00:02:10.000\n<v Speaker 2>Let's talk about the mobile app..."
|
||||
}
|
||||
],
|
||||
"participants": [
|
||||
{
|
||||
"id": "participant-1",
|
||||
"name": "John Doe",
|
||||
"speaker": "Speaker 1"
|
||||
},
|
||||
{
|
||||
"id": "participant-2",
|
||||
"name": "Jane Smith",
|
||||
"speaker": "Speaker 2"
|
||||
}
|
||||
],
|
||||
"source_language": "en",
|
||||
"target_language": "en",
|
||||
"status": "completed",
|
||||
"frontend_url": "https://app.reflector.com/transcripts/abc-123-def-456"
|
||||
},
|
||||
"room": {
|
||||
"id": "room-789",
|
||||
"name": "Product Team Room"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `test` Event
|
||||
|
||||
```json
|
||||
{
|
||||
"event": "test",
|
||||
"event_id": "test.2025-08-27T12:34:56.789012Z",
|
||||
"timestamp": "2025-08-27T12:34:56.789012Z",
|
||||
"message": "This is a test webhook from Reflector",
|
||||
"room": {
|
||||
"id": "room-789",
|
||||
"name": "Product Team Room"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Retry Policy
|
||||
|
||||
Webhooks are delivered with automatic retry logic to handle transient failures. When a webhook delivery fails due to server errors or network issues, Reflector will automatically retry the delivery multiple times over an extended period.
|
||||
|
||||
### Retry Mechanism
|
||||
|
||||
Reflector implements an exponential backoff strategy for webhook retries:
|
||||
|
||||
- **Initial retry delay**: 60 seconds after the first failure
|
||||
- **Exponential backoff**: Each subsequent retry waits approximately twice as long as the previous one
|
||||
- **Maximum retry interval**: 1 hour (backoff is capped at this duration)
|
||||
- **Maximum retry attempts**: 30 attempts total
|
||||
- **Total retry duration**: Retries continue for approximately 24 hours
|
||||
|
||||
### How Retries Work
|
||||
|
||||
When a webhook fails, Reflector will:
|
||||
1. Wait 60 seconds, then retry (attempt #1)
|
||||
2. If it fails again, wait ~2 minutes, then retry (attempt #2)
|
||||
3. Continue doubling the wait time up to a maximum of 1 hour between attempts
|
||||
4. Keep retrying at 1-hour intervals until successful or 30 attempts are exhausted
|
||||
|
||||
The `X-Webhook-Retry` header indicates the current retry attempt number (0 for the initial attempt, 1 for first retry, etc.), allowing your endpoint to track retry attempts.
|
||||
|
||||
### Retry Behavior by HTTP Status Code
|
||||
|
||||
| Status Code | Behavior |
|
||||
|-------------|----------|
|
||||
| 2xx (Success) | No retry, webhook marked as delivered |
|
||||
| 4xx (Client Error) | No retry, request is considered permanently failed |
|
||||
| 5xx (Server Error) | Automatic retry with exponential backoff |
|
||||
| Network/Timeout Error | Automatic retry with exponential backoff |
|
||||
|
||||
**Important Notes:**
|
||||
- Webhooks timeout after 30 seconds. If your endpoint takes longer to respond, it will be considered a timeout error and retried.
|
||||
- During the retry period (~24 hours), you may receive the same webhook multiple times if your endpoint experiences intermittent failures.
|
||||
- There is no mechanism to manually retry failed webhooks after the retry period expires.
|
||||
|
||||
## Testing Webhooks
|
||||
|
||||
You can test your webhook configuration before processing transcripts:
|
||||
|
||||
```http
|
||||
POST /v1/rooms/{room_id}/webhook/test
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"status_code": 200,
|
||||
"message": "Webhook test successful",
|
||||
"response_preview": "OK"
|
||||
}
|
||||
```
|
||||
|
||||
Or in case of failure:
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Webhook request timed out (10 seconds)"
|
||||
}
|
||||
```
|
||||
@@ -7,11 +7,9 @@
|
||||
## User authentication
|
||||
## =======================================================
|
||||
|
||||
## Using fief (fief.dev)
|
||||
AUTH_BACKEND=fief
|
||||
AUTH_FIEF_URL=https://auth.reflector.media/reflector-local
|
||||
AUTH_FIEF_CLIENT_ID=***REMOVED***
|
||||
AUTH_FIEF_CLIENT_SECRET=<ask in zulip>
|
||||
## Using jwt/authentik
|
||||
AUTH_BACKEND=jwt
|
||||
AUTH_JWT_AUDIENCE=
|
||||
|
||||
## =======================================================
|
||||
## Transcription backend
|
||||
@@ -22,24 +20,24 @@ AUTH_FIEF_CLIENT_SECRET=<ask in zulip>
|
||||
|
||||
## Using local whisper
|
||||
#TRANSCRIPT_BACKEND=whisper
|
||||
#WHISPER_MODEL_SIZE=tiny
|
||||
|
||||
## Using serverless modal.com (require reflector-gpu-modal deployed)
|
||||
#TRANSCRIPT_BACKEND=modal
|
||||
#TRANSCRIPT_URL=https://xxxxx--reflector-transcriber-web.modal.run
|
||||
#TRANSLATE_URL=https://xxxxx--reflector-translator-web.modal.run
|
||||
#TRANSCRIPT_MODAL_API_KEY=xxxxx
|
||||
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=***REMOVED***
|
||||
TRANSCRIPT_MODAL_API_KEY=
|
||||
|
||||
## =======================================================
|
||||
## Transcription backend
|
||||
## Translation backend
|
||||
##
|
||||
## Only available in modal atm
|
||||
## =======================================================
|
||||
TRANSLATION_BACKEND=modal
|
||||
TRANSLATE_URL=https://monadical-sas--reflector-translator-web.modal.run
|
||||
#TRANSLATION_MODAL_API_KEY=xxxxx
|
||||
|
||||
## =======================================================
|
||||
## LLM backend
|
||||
@@ -49,28 +47,11 @@ TRANSLATE_URL=https://monadical-sas--reflector-translator-web.modal.run
|
||||
## llm backend implementation
|
||||
## =======================================================
|
||||
|
||||
## Using serverless modal.com (require reflector-gpu-modal deployed)
|
||||
LLM_BACKEND=modal
|
||||
LLM_URL=https://monadical-sas--reflector-llm-web.modal.run
|
||||
LLM_MODAL_API_KEY=***REMOVED***
|
||||
ZEPHYR_LLM_URL=https://monadical-sas--reflector-llm-zephyr-web.modal.run
|
||||
|
||||
|
||||
## Using OpenAI
|
||||
#LLM_BACKEND=openai
|
||||
#LLM_OPENAI_KEY=xxx
|
||||
#LLM_OPENAI_MODEL=gpt-3.5-turbo
|
||||
|
||||
## Using GPT4ALL
|
||||
#LLM_BACKEND=openai
|
||||
#LLM_URL=http://localhost:4891/v1/completions
|
||||
#LLM_OPENAI_MODEL="GPT4All Falcon"
|
||||
|
||||
## Default LLM MODEL NAME
|
||||
#DEFAULT_LLM=lmsys/vicuna-13b-v1.5
|
||||
|
||||
## Cache directory to store models
|
||||
CACHE_DIR=data
|
||||
## Context size for summary generation (tokens)
|
||||
# LLM_MODEL=microsoft/phi-4
|
||||
LLM_CONTEXT_WINDOW=16000
|
||||
LLM_URL=
|
||||
LLM_API_KEY=sk-
|
||||
|
||||
## =======================================================
|
||||
## Diarization
|
||||
@@ -79,7 +60,9 @@ CACHE_DIR=data
|
||||
## To allow diarization, you need to expose expose the files to be dowloded by the pipeline
|
||||
## =======================================================
|
||||
DIARIZATION_ENABLED=false
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
|
||||
#DIARIZATION_MODAL_API_KEY=xxxxx
|
||||
|
||||
|
||||
## =======================================================
|
||||
@@ -88,4 +71,3 @@ DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
|
||||
|
||||
## Sentry DSN configuration
|
||||
#SENTRY_DSN=
|
||||
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
This repository hold an API for the GPU implementation of the Reflector API service,
|
||||
and use [Modal.com](https://modal.com)
|
||||
|
||||
- `reflector_llm.py` - LLM API
|
||||
- `reflector_transcriber.py` - Transcription API
|
||||
- `reflector_diarizer.py` - Diarization API
|
||||
- `reflector_transcriber.py` - Transcription API (Whisper)
|
||||
- `reflector_transcriber_parakeet.py` - Transcription API (NVIDIA Parakeet)
|
||||
- `reflector_translator.py` - Translation API
|
||||
|
||||
## Modal.com deployment
|
||||
|
||||
@@ -18,21 +20,29 @@ $ modal deploy reflector_transcriber.py
|
||||
...
|
||||
└── 🔨 Created web => https://xxxx--reflector-transcriber-web.modal.run
|
||||
|
||||
$ modal deploy reflector_transcriber_parakeet.py
|
||||
...
|
||||
└── 🔨 Created web => https://xxxx--reflector-transcriber-parakeet-web.modal.run
|
||||
|
||||
$ modal deploy reflector_llm.py
|
||||
...
|
||||
└── 🔨 Created web => https://xxxx--reflector-llm-web.modal.run
|
||||
```
|
||||
|
||||
Then in your reflector api configuration `.env`, you can set theses keys:
|
||||
Then in your reflector api configuration `.env`, you can set these keys:
|
||||
|
||||
```
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://xxxx--reflector-transcriber-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=REFLECTOR_APIKEY
|
||||
|
||||
LLM_BACKEND=modal
|
||||
LLM_URL=https://xxxx--reflector-llm-web.modal.run
|
||||
LLM_MODAL_API_KEY=REFLECTOR_APIKEY
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=https://xxxx--reflector-diarizer-web.modal.run
|
||||
DIARIZATION_MODAL_API_KEY=REFLECTOR_APIKEY
|
||||
|
||||
TRANSLATION_BACKEND=modal
|
||||
TRANSLATION_URL=https://xxxx--reflector-translator-web.modal.run
|
||||
TRANSLATION_MODAL_API_KEY=REFLECTOR_APIKEY
|
||||
```
|
||||
|
||||
## API
|
||||
@@ -63,6 +73,86 @@ Authorization: bearer <REFLECTOR_APIKEY>
|
||||
|
||||
### Transcription
|
||||
|
||||
#### Parakeet Transcriber (`reflector_transcriber_parakeet.py`)
|
||||
|
||||
NVIDIA Parakeet is a state-of-the-art ASR model optimized for real-time transcription with superior word-level timestamps.
|
||||
|
||||
**GPU Configuration:**
|
||||
- **A10G GPU** - Used for `/v1/audio/transcriptions` endpoint (small files, live transcription)
|
||||
- Higher concurrency (max_inputs=10)
|
||||
- Optimized for multiple small audio files
|
||||
- Supports batch processing for efficiency
|
||||
|
||||
- **L40S GPU** - Used for `/v1/audio/transcriptions-from-url` endpoint (large files)
|
||||
- Lower concurrency but more powerful processing
|
||||
- Optimized for single large audio files
|
||||
- VAD-based chunking for long-form audio
|
||||
|
||||
##### `/v1/audio/transcriptions` - Small file transcription
|
||||
|
||||
**request** (multipart/form-data)
|
||||
- `file` or `files[]` - audio file(s) to transcribe
|
||||
- `model` - model name (default: `nvidia/parakeet-tdt-0.6b-v2`)
|
||||
- `language` - language code (default: `en`)
|
||||
- `batch` - whether to use batch processing for multiple files (default: `true`)
|
||||
|
||||
**response**
|
||||
```json
|
||||
{
|
||||
"text": "transcribed text",
|
||||
"words": [
|
||||
{"word": "hello", "start": 0.0, "end": 0.5},
|
||||
{"word": "world", "start": 0.5, "end": 1.0}
|
||||
],
|
||||
"filename": "audio.mp3"
|
||||
}
|
||||
```
|
||||
|
||||
For multiple files with batch=true:
|
||||
```json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"filename": "audio1.mp3",
|
||||
"text": "transcribed text",
|
||||
"words": [...]
|
||||
},
|
||||
{
|
||||
"filename": "audio2.mp3",
|
||||
"text": "transcribed text",
|
||||
"words": [...]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
##### `/v1/audio/transcriptions-from-url` - Large file transcription
|
||||
|
||||
**request** (application/json)
|
||||
```json
|
||||
{
|
||||
"audio_file_url": "https://example.com/audio.mp3",
|
||||
"model": "nvidia/parakeet-tdt-0.6b-v2",
|
||||
"language": "en",
|
||||
"timestamp_offset": 0.0
|
||||
}
|
||||
```
|
||||
|
||||
**response**
|
||||
```json
|
||||
{
|
||||
"text": "transcribed text from large file",
|
||||
"words": [
|
||||
{"word": "hello", "start": 0.0, "end": 0.5},
|
||||
{"word": "world", "start": 0.5, "end": 1.0}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Supported file types:** mp3, mp4, mpeg, mpga, m4a, wav, webm
|
||||
|
||||
#### Whisper Transcriber (`reflector_transcriber.py`)
|
||||
|
||||
`POST /transcribe`
|
||||
|
||||
**request** (multipart/form-data)
|
||||
|
||||
@@ -4,14 +4,80 @@ Reflector GPU backend - diarizer
|
||||
"""
|
||||
|
||||
import os
|
||||
import uuid
|
||||
from typing import Mapping, NewType
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import modal.gpu
|
||||
from modal import App, Image, Secret, asgi_app, enter, method
|
||||
from pydantic import BaseModel
|
||||
import modal
|
||||
|
||||
PYANNOTE_MODEL_NAME: str = "pyannote/speaker-diarization-3.1"
|
||||
MODEL_DIR = "/root/diarization_models"
|
||||
app = App(name="reflector-diarizer")
|
||||
UPLOADS_PATH = "/uploads"
|
||||
SUPPORTED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||
|
||||
DiarizerUniqFilename = NewType("DiarizerUniqFilename", str)
|
||||
AudioFileExtension = NewType("AudioFileExtension", str)
|
||||
|
||||
app = modal.App(name="reflector-diarizer")
|
||||
|
||||
# Volume for temporary file uploads
|
||||
upload_volume = modal.Volume.from_name("diarizer-uploads", create_if_missing=True)
|
||||
|
||||
|
||||
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
|
||||
parsed_url = urlparse(url)
|
||||
url_path = parsed_url.path
|
||||
|
||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
||||
if url_path.lower().endswith(f".{ext}"):
|
||||
return AudioFileExtension(ext)
|
||||
|
||||
content_type = headers.get("content-type", "").lower()
|
||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
||||
return AudioFileExtension("mp3")
|
||||
if "audio/wav" in content_type:
|
||||
return AudioFileExtension("wav")
|
||||
if "audio/mp4" in content_type:
|
||||
return AudioFileExtension("mp4")
|
||||
|
||||
raise ValueError(
|
||||
f"Unsupported audio format for URL: {url}. "
|
||||
f"Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
)
|
||||
|
||||
|
||||
def download_audio_to_volume(
|
||||
audio_file_url: str,
|
||||
) -> tuple[DiarizerUniqFilename, AudioFileExtension]:
|
||||
import requests
|
||||
from fastapi import HTTPException
|
||||
|
||||
print(f"Checking audio file at: {audio_file_url}")
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
||||
|
||||
print(f"Downloading audio file from: {audio_file_url}")
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
|
||||
if response.status_code != 200:
|
||||
print(f"Download failed with status {response.status_code}: {response.text}")
|
||||
raise HTTPException(
|
||||
status_code=response.status_code,
|
||||
detail=f"Failed to download audio file: {response.status_code}",
|
||||
)
|
||||
|
||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
||||
unique_filename = DiarizerUniqFilename(f"{uuid.uuid4()}.{audio_suffix}")
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
print(f"Writing file to: {file_path} (size: {len(response.content)} bytes)")
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
upload_volume.commit()
|
||||
print(f"File saved as: {unique_filename}")
|
||||
return unique_filename, audio_suffix
|
||||
|
||||
|
||||
def migrate_cache_llm():
|
||||
@@ -39,7 +105,7 @@ def download_pyannote_audio():
|
||||
|
||||
|
||||
diarizer_image = (
|
||||
Image.debian_slim(python_version="3.10.8")
|
||||
modal.Image.debian_slim(python_version="3.10.8")
|
||||
.pip_install(
|
||||
"pyannote.audio==3.1.0",
|
||||
"requests",
|
||||
@@ -55,7 +121,8 @@ diarizer_image = (
|
||||
"hf-transfer",
|
||||
)
|
||||
.run_function(
|
||||
download_pyannote_audio, secrets=[Secret.from_name("my-huggingface-secret")]
|
||||
download_pyannote_audio,
|
||||
secrets=[modal.Secret.from_name("hf_token")],
|
||||
)
|
||||
.run_function(migrate_cache_llm)
|
||||
.env(
|
||||
@@ -70,53 +137,60 @@ diarizer_image = (
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu=modal.gpu.A100(size="40GB"),
|
||||
gpu="A100",
|
||||
timeout=60 * 30,
|
||||
scaledown_window=60,
|
||||
allow_concurrent_inputs=1,
|
||||
image=diarizer_image,
|
||||
volumes={UPLOADS_PATH: upload_volume},
|
||||
enable_memory_snapshot=True,
|
||||
experimental_options={"enable_gpu_snapshot": True},
|
||||
secrets=[
|
||||
modal.Secret.from_name("hf_token"),
|
||||
],
|
||||
)
|
||||
@modal.concurrent(max_inputs=1)
|
||||
class Diarizer:
|
||||
@enter()
|
||||
@modal.enter(snap=True)
|
||||
def enter(self):
|
||||
import torch
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
self.use_gpu = torch.cuda.is_available()
|
||||
self.device = "cuda" if self.use_gpu else "cpu"
|
||||
print(f"Using device: {self.device}")
|
||||
self.diarization_pipeline = Pipeline.from_pretrained(
|
||||
PYANNOTE_MODEL_NAME, cache_dir=MODEL_DIR
|
||||
PYANNOTE_MODEL_NAME,
|
||||
cache_dir=MODEL_DIR,
|
||||
use_auth_token=os.environ["HF_TOKEN"],
|
||||
)
|
||||
self.diarization_pipeline.to(torch.device(self.device))
|
||||
|
||||
@method()
|
||||
def diarize(self, audio_data: str, audio_suffix: str, timestamp: float):
|
||||
import tempfile
|
||||
|
||||
@modal.method()
|
||||
def diarize(self, filename: str, timestamp: float = 0.0):
|
||||
import torchaudio
|
||||
|
||||
with tempfile.NamedTemporaryFile("wb+", suffix=f".{audio_suffix}") as fp:
|
||||
fp.write(audio_data)
|
||||
upload_volume.reload()
|
||||
|
||||
print("Diarizing audio")
|
||||
waveform, sample_rate = torchaudio.load(fp.name)
|
||||
diarization = self.diarization_pipeline(
|
||||
{"waveform": waveform, "sample_rate": sample_rate}
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
print(f"Diarizing audio from: {file_path}")
|
||||
waveform, sample_rate = torchaudio.load(file_path)
|
||||
diarization = self.diarization_pipeline(
|
||||
{"waveform": waveform, "sample_rate": sample_rate}
|
||||
)
|
||||
|
||||
words = []
|
||||
for diarization_segment, _, speaker in diarization.itertracks(yield_label=True):
|
||||
words.append(
|
||||
{
|
||||
"start": round(timestamp + diarization_segment.start, 3),
|
||||
"end": round(timestamp + diarization_segment.end, 3),
|
||||
"speaker": int(speaker[-2:]),
|
||||
}
|
||||
)
|
||||
|
||||
words = []
|
||||
for diarization_segment, _, speaker in diarization.itertracks(
|
||||
yield_label=True
|
||||
):
|
||||
words.append(
|
||||
{
|
||||
"start": round(timestamp + diarization_segment.start, 3),
|
||||
"end": round(timestamp + diarization_segment.end, 3),
|
||||
"speaker": int(speaker[-2:]),
|
||||
}
|
||||
)
|
||||
print("Diarization complete")
|
||||
return {"diarization": words}
|
||||
print("Diarization complete")
|
||||
return {"diarization": words}
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
@@ -127,17 +201,18 @@ class Diarizer:
|
||||
@app.function(
|
||||
timeout=60 * 10,
|
||||
scaledown_window=60 * 3,
|
||||
allow_concurrent_inputs=40,
|
||||
secrets=[
|
||||
Secret.from_name("reflector-gpu"),
|
||||
modal.Secret.from_name("reflector-gpu"),
|
||||
],
|
||||
volumes={UPLOADS_PATH: upload_volume},
|
||||
image=diarizer_image,
|
||||
)
|
||||
@asgi_app()
|
||||
@modal.concurrent(max_inputs=40)
|
||||
@modal.asgi_app()
|
||||
def web():
|
||||
import requests
|
||||
from fastapi import Depends, FastAPI, HTTPException, status
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
diarizerstub = Diarizer()
|
||||
|
||||
@@ -153,35 +228,26 @@ def web():
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
def validate_audio_file(audio_file_url: str):
|
||||
# Check if the audio file exists
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(
|
||||
status_code=response.status_code,
|
||||
detail="The audio file does not exist.",
|
||||
)
|
||||
|
||||
class DiarizationResponse(BaseModel):
|
||||
result: dict
|
||||
|
||||
@app.post(
|
||||
"/diarize", dependencies=[Depends(apikey_auth), Depends(validate_audio_file)]
|
||||
)
|
||||
def diarize(
|
||||
audio_file_url: str, timestamp: float = 0.0
|
||||
) -> HTTPException | DiarizationResponse:
|
||||
# Currently the uploaded files are in mp3 format
|
||||
audio_suffix = "mp3"
|
||||
@app.post("/diarize", dependencies=[Depends(apikey_auth)])
|
||||
def diarize(audio_file_url: str, timestamp: float = 0.0) -> DiarizationResponse:
|
||||
unique_filename, audio_suffix = download_audio_to_volume(audio_file_url)
|
||||
|
||||
print("Downloading audio file")
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
print("Audio file downloaded successfully")
|
||||
|
||||
func = diarizerstub.diarize.spawn(
|
||||
audio_data=response.content, audio_suffix=audio_suffix, timestamp=timestamp
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
try:
|
||||
func = diarizerstub.diarize.spawn(
|
||||
filename=unique_filename, timestamp=timestamp
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
finally:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
print(f"Deleting file: {file_path}")
|
||||
os.remove(file_path)
|
||||
upload_volume.commit()
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up {unique_filename}: {e}")
|
||||
|
||||
return app
|
||||
|
||||
@@ -1,214 +0,0 @@
|
||||
"""
|
||||
Reflector GPU backend - LLM
|
||||
===========================
|
||||
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
from typing import Optional
|
||||
|
||||
import modal
|
||||
from modal import App, Image, Secret, asgi_app, enter, exit, method
|
||||
|
||||
# LLM
|
||||
LLM_MODEL: str = "lmsys/vicuna-13b-v1.5"
|
||||
LLM_LOW_CPU_MEM_USAGE: bool = True
|
||||
LLM_TORCH_DTYPE: str = "bfloat16"
|
||||
LLM_MAX_NEW_TOKENS: int = 300
|
||||
|
||||
IMAGE_MODEL_DIR = "/root/llm_models"
|
||||
|
||||
app = App(name="reflector-llm")
|
||||
|
||||
|
||||
def download_llm():
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
print("Downloading LLM model")
|
||||
snapshot_download(LLM_MODEL, cache_dir=IMAGE_MODEL_DIR)
|
||||
print("LLM model downloaded")
|
||||
|
||||
|
||||
def migrate_cache_llm():
|
||||
"""
|
||||
XXX The cache for model files in Transformers v4.22.0 has been updated.
|
||||
Migrating your old cache. This is a one-time only operation. You can
|
||||
interrupt this and resume the migration later on by calling
|
||||
`transformers.utils.move_cache()`.
|
||||
"""
|
||||
from transformers.utils.hub import move_cache
|
||||
|
||||
print("Moving LLM cache")
|
||||
move_cache(cache_dir=IMAGE_MODEL_DIR, new_cache_dir=IMAGE_MODEL_DIR)
|
||||
print("LLM cache moved")
|
||||
|
||||
|
||||
llm_image = (
|
||||
Image.debian_slim(python_version="3.10.8")
|
||||
.apt_install("git")
|
||||
.pip_install(
|
||||
"transformers",
|
||||
"torch",
|
||||
"sentencepiece",
|
||||
"protobuf",
|
||||
"jsonformer==0.12.0",
|
||||
"accelerate==0.21.0",
|
||||
"einops==0.6.1",
|
||||
"hf-transfer~=0.1",
|
||||
"huggingface_hub==0.16.4",
|
||||
)
|
||||
.env({"HF_HUB_ENABLE_HF_TRANSFER": "1"})
|
||||
.run_function(download_llm)
|
||||
.run_function(migrate_cache_llm)
|
||||
)
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu="A100",
|
||||
timeout=60 * 5,
|
||||
scaledown_window=60 * 5,
|
||||
allow_concurrent_inputs=15,
|
||||
image=llm_image,
|
||||
)
|
||||
class LLM:
|
||||
@enter()
|
||||
def enter(self):
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
|
||||
print("Instance llm model")
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
LLM_MODEL,
|
||||
torch_dtype=getattr(torch, LLM_TORCH_DTYPE),
|
||||
low_cpu_mem_usage=LLM_LOW_CPU_MEM_USAGE,
|
||||
cache_dir=IMAGE_MODEL_DIR,
|
||||
local_files_only=True,
|
||||
)
|
||||
|
||||
# JSONFormer doesn't yet support generation configs
|
||||
print("Instance llm generation config")
|
||||
model.config.max_new_tokens = LLM_MAX_NEW_TOKENS
|
||||
|
||||
# generation configuration
|
||||
gen_cfg = GenerationConfig.from_model_config(model.config)
|
||||
gen_cfg.max_new_tokens = LLM_MAX_NEW_TOKENS
|
||||
|
||||
# load tokenizer
|
||||
print("Instance llm tokenizer")
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
LLM_MODEL, cache_dir=IMAGE_MODEL_DIR, local_files_only=True
|
||||
)
|
||||
|
||||
# move model to gpu
|
||||
print("Move llm model to GPU")
|
||||
model = model.cuda()
|
||||
|
||||
print("Warmup llm done")
|
||||
self.model = model
|
||||
self.tokenizer = tokenizer
|
||||
self.gen_cfg = gen_cfg
|
||||
self.GenerationConfig = GenerationConfig
|
||||
|
||||
self.lock = threading.Lock()
|
||||
|
||||
@exit()
|
||||
def exit():
|
||||
print("Exit llm")
|
||||
|
||||
@method()
|
||||
def generate(
|
||||
self, prompt: str, gen_schema: str | None, gen_cfg: str | None
|
||||
) -> dict:
|
||||
"""
|
||||
Perform a generation action using the LLM
|
||||
"""
|
||||
print(f"Generate {prompt=}")
|
||||
if gen_cfg:
|
||||
gen_cfg = self.GenerationConfig.from_dict(json.loads(gen_cfg))
|
||||
else:
|
||||
gen_cfg = self.gen_cfg
|
||||
|
||||
# If a gen_schema is given, conform to gen_schema
|
||||
with self.lock:
|
||||
if gen_schema:
|
||||
import jsonformer
|
||||
|
||||
print(f"Schema {gen_schema=}")
|
||||
jsonformer_llm = jsonformer.Jsonformer(
|
||||
model=self.model,
|
||||
tokenizer=self.tokenizer,
|
||||
json_schema=json.loads(gen_schema),
|
||||
prompt=prompt,
|
||||
max_string_token_length=gen_cfg.max_new_tokens,
|
||||
)
|
||||
response = jsonformer_llm()
|
||||
else:
|
||||
# If no gen_schema, perform prompt only generation
|
||||
|
||||
# tokenize prompt
|
||||
input_ids = self.tokenizer.encode(prompt, return_tensors="pt").to(
|
||||
self.model.device
|
||||
)
|
||||
output = self.model.generate(input_ids, generation_config=gen_cfg)
|
||||
|
||||
# decode output
|
||||
response = self.tokenizer.decode(
|
||||
output[0].cpu(), skip_special_tokens=True
|
||||
)
|
||||
response = response[len(prompt) :]
|
||||
print(f"Generated {response=}")
|
||||
return {"text": response}
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Web API
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
@app.function(
|
||||
scaledown_window=60 * 10,
|
||||
timeout=60 * 5,
|
||||
allow_concurrent_inputs=45,
|
||||
secrets=[
|
||||
Secret.from_name("reflector-gpu"),
|
||||
],
|
||||
)
|
||||
@asgi_app()
|
||||
def web():
|
||||
from fastapi import Depends, FastAPI, HTTPException, status
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
llmstub = LLM()
|
||||
|
||||
app = FastAPI()
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
if apikey != os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
class LLMRequest(BaseModel):
|
||||
prompt: str
|
||||
gen_schema: Optional[dict] = None
|
||||
gen_cfg: Optional[dict] = None
|
||||
|
||||
@app.post("/llm", dependencies=[Depends(apikey_auth)])
|
||||
def llm(
|
||||
req: LLMRequest,
|
||||
):
|
||||
gen_schema = json.dumps(req.gen_schema) if req.gen_schema else None
|
||||
gen_cfg = json.dumps(req.gen_cfg) if req.gen_cfg else None
|
||||
func = llmstub.generate.spawn(
|
||||
prompt=req.prompt, gen_schema=gen_schema, gen_cfg=gen_cfg
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
|
||||
return app
|
||||
@@ -1,220 +0,0 @@
|
||||
"""
|
||||
Reflector GPU backend - LLM
|
||||
===========================
|
||||
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
from typing import Optional
|
||||
|
||||
import modal
|
||||
from modal import App, Image, Secret, asgi_app, enter, exit, method
|
||||
|
||||
# LLM
|
||||
LLM_MODEL: str = "HuggingFaceH4/zephyr-7b-alpha"
|
||||
LLM_LOW_CPU_MEM_USAGE: bool = True
|
||||
LLM_TORCH_DTYPE: str = "bfloat16"
|
||||
LLM_MAX_NEW_TOKENS: int = 300
|
||||
|
||||
IMAGE_MODEL_DIR = "/root/llm_models/zephyr"
|
||||
|
||||
app = App(name="reflector-llm-zephyr")
|
||||
|
||||
|
||||
def download_llm():
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
print("Downloading LLM model")
|
||||
snapshot_download(LLM_MODEL, cache_dir=IMAGE_MODEL_DIR)
|
||||
print("LLM model downloaded")
|
||||
|
||||
|
||||
def migrate_cache_llm():
|
||||
"""
|
||||
XXX The cache for model files in Transformers v4.22.0 has been updated.
|
||||
Migrating your old cache. This is a one-time only operation. You can
|
||||
interrupt this and resume the migration later on by calling
|
||||
`transformers.utils.move_cache()`.
|
||||
"""
|
||||
from transformers.utils.hub import move_cache
|
||||
|
||||
print("Moving LLM cache")
|
||||
move_cache(cache_dir=IMAGE_MODEL_DIR, new_cache_dir=IMAGE_MODEL_DIR)
|
||||
print("LLM cache moved")
|
||||
|
||||
|
||||
llm_image = (
|
||||
Image.debian_slim(python_version="3.10.8")
|
||||
.apt_install("git")
|
||||
.pip_install(
|
||||
"transformers==4.34.0",
|
||||
"torch",
|
||||
"sentencepiece",
|
||||
"protobuf",
|
||||
"jsonformer==0.12.0",
|
||||
"accelerate==0.21.0",
|
||||
"einops==0.6.1",
|
||||
"hf-transfer~=0.1",
|
||||
"huggingface_hub==0.16.4",
|
||||
)
|
||||
.env({"HF_HUB_ENABLE_HF_TRANSFER": "1"})
|
||||
.run_function(download_llm)
|
||||
.run_function(migrate_cache_llm)
|
||||
)
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu="A10G",
|
||||
timeout=60 * 5,
|
||||
scaledown_window=60 * 5,
|
||||
allow_concurrent_inputs=10,
|
||||
image=llm_image,
|
||||
)
|
||||
class LLM:
|
||||
@enter()
|
||||
def enter(self):
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
|
||||
print("Instance llm model")
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
LLM_MODEL,
|
||||
torch_dtype=getattr(torch, LLM_TORCH_DTYPE),
|
||||
low_cpu_mem_usage=LLM_LOW_CPU_MEM_USAGE,
|
||||
cache_dir=IMAGE_MODEL_DIR,
|
||||
local_files_only=True,
|
||||
)
|
||||
|
||||
# JSONFormer doesn't yet support generation configs
|
||||
print("Instance llm generation config")
|
||||
model.config.max_new_tokens = LLM_MAX_NEW_TOKENS
|
||||
|
||||
# generation configuration
|
||||
gen_cfg = GenerationConfig.from_model_config(model.config)
|
||||
gen_cfg.max_new_tokens = LLM_MAX_NEW_TOKENS
|
||||
|
||||
# load tokenizer
|
||||
print("Instance llm tokenizer")
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
LLM_MODEL, cache_dir=IMAGE_MODEL_DIR, local_files_only=True
|
||||
)
|
||||
gen_cfg.pad_token_id = tokenizer.eos_token_id
|
||||
gen_cfg.eos_token_id = tokenizer.eos_token_id
|
||||
tokenizer.pad_token = tokenizer.eos_token
|
||||
model.config.pad_token_id = tokenizer.eos_token_id
|
||||
|
||||
# move model to gpu
|
||||
print("Move llm model to GPU")
|
||||
model = model.cuda()
|
||||
|
||||
print("Warmup llm done")
|
||||
self.model = model
|
||||
self.tokenizer = tokenizer
|
||||
self.gen_cfg = gen_cfg
|
||||
self.GenerationConfig = GenerationConfig
|
||||
self.lock = threading.Lock()
|
||||
|
||||
@exit()
|
||||
def exit():
|
||||
print("Exit llm")
|
||||
|
||||
@method()
|
||||
def generate(
|
||||
self, prompt: str, gen_schema: str | None, gen_cfg: str | None
|
||||
) -> dict:
|
||||
"""
|
||||
Perform a generation action using the LLM
|
||||
"""
|
||||
print(f"Generate {prompt=}")
|
||||
if gen_cfg:
|
||||
gen_cfg = self.GenerationConfig.from_dict(json.loads(gen_cfg))
|
||||
gen_cfg.pad_token_id = self.tokenizer.eos_token_id
|
||||
gen_cfg.eos_token_id = self.tokenizer.eos_token_id
|
||||
else:
|
||||
gen_cfg = self.gen_cfg
|
||||
|
||||
# If a gen_schema is given, conform to gen_schema
|
||||
with self.lock:
|
||||
if gen_schema:
|
||||
import jsonformer
|
||||
|
||||
print(f"Schema {gen_schema=}")
|
||||
jsonformer_llm = jsonformer.Jsonformer(
|
||||
model=self.model,
|
||||
tokenizer=self.tokenizer,
|
||||
json_schema=json.loads(gen_schema),
|
||||
prompt=prompt,
|
||||
max_string_token_length=gen_cfg.max_new_tokens,
|
||||
)
|
||||
response = jsonformer_llm()
|
||||
else:
|
||||
# If no gen_schema, perform prompt only generation
|
||||
|
||||
# tokenize prompt
|
||||
input_ids = self.tokenizer.encode(prompt, return_tensors="pt").to(
|
||||
self.model.device
|
||||
)
|
||||
output = self.model.generate(input_ids, generation_config=gen_cfg)
|
||||
|
||||
# decode output
|
||||
response = self.tokenizer.decode(
|
||||
output[0].cpu(), skip_special_tokens=True
|
||||
)
|
||||
response = response[len(prompt) :]
|
||||
response = {"long_summary": response}
|
||||
print(f"Generated {response=}")
|
||||
return {"text": response}
|
||||
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Web API
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
|
||||
@app.function(
|
||||
scaledown_window=60 * 10,
|
||||
timeout=60 * 5,
|
||||
allow_concurrent_inputs=30,
|
||||
secrets=[
|
||||
Secret.from_name("reflector-gpu"),
|
||||
],
|
||||
)
|
||||
@asgi_app()
|
||||
def web():
|
||||
from fastapi import Depends, FastAPI, HTTPException, status
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
llmstub = LLM()
|
||||
|
||||
app = FastAPI()
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
if apikey != os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
class LLMRequest(BaseModel):
|
||||
prompt: str
|
||||
gen_schema: Optional[dict] = None
|
||||
gen_cfg: Optional[dict] = None
|
||||
|
||||
@app.post("/llm", dependencies=[Depends(apikey_auth)])
|
||||
def llm(
|
||||
req: LLMRequest,
|
||||
):
|
||||
gen_schema = json.dumps(req.gen_schema) if req.gen_schema else None
|
||||
gen_cfg = json.dumps(req.gen_cfg) if req.gen_cfg else None
|
||||
func = llmstub.generate.spawn(
|
||||
prompt=req.prompt, gen_schema=gen_schema, gen_cfg=gen_cfg
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
|
||||
return app
|
||||
@@ -1,41 +1,78 @@
|
||||
import os
|
||||
import tempfile
|
||||
import sys
|
||||
import threading
|
||||
import uuid
|
||||
from typing import Generator, Mapping, NamedTuple, NewType, TypedDict
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import modal
|
||||
from pydantic import BaseModel
|
||||
|
||||
MODELS_DIR = "/models"
|
||||
|
||||
MODEL_NAME = "large-v2"
|
||||
MODEL_COMPUTE_TYPE: str = "float16"
|
||||
MODEL_NUM_WORKERS: int = 1
|
||||
|
||||
MINUTES = 60 # seconds
|
||||
SAMPLERATE = 16000
|
||||
UPLOADS_PATH = "/uploads"
|
||||
CACHE_PATH = "/models"
|
||||
SUPPORTED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||
VAD_CONFIG = {
|
||||
"batch_max_duration": 30.0,
|
||||
"silence_padding": 0.5,
|
||||
"window_size": 512,
|
||||
}
|
||||
|
||||
volume = modal.Volume.from_name("models", create_if_missing=True)
|
||||
|
||||
WhisperUniqFilename = NewType("WhisperUniqFilename", str)
|
||||
AudioFileExtension = NewType("AudioFileExtension", str)
|
||||
|
||||
app = modal.App("reflector-transcriber")
|
||||
|
||||
model_cache = modal.Volume.from_name("models", create_if_missing=True)
|
||||
upload_volume = modal.Volume.from_name("whisper-uploads", create_if_missing=True)
|
||||
|
||||
|
||||
class TimeSegment(NamedTuple):
|
||||
"""Represents a time segment with start and end times."""
|
||||
|
||||
start: float
|
||||
end: float
|
||||
|
||||
|
||||
class AudioSegment(NamedTuple):
|
||||
"""Represents an audio segment with timing and audio data."""
|
||||
|
||||
start: float
|
||||
end: float
|
||||
audio: any
|
||||
|
||||
|
||||
class TranscriptResult(NamedTuple):
|
||||
"""Represents a transcription result with text and word timings."""
|
||||
|
||||
text: str
|
||||
words: list["WordTiming"]
|
||||
|
||||
|
||||
class WordTiming(TypedDict):
|
||||
"""Represents a word with its timing information."""
|
||||
|
||||
word: str
|
||||
start: float
|
||||
end: float
|
||||
|
||||
|
||||
def download_model():
|
||||
from faster_whisper import download_model
|
||||
|
||||
volume.reload()
|
||||
model_cache.reload()
|
||||
|
||||
download_model(MODEL_NAME, cache_dir=MODELS_DIR)
|
||||
download_model(MODEL_NAME, cache_dir=CACHE_PATH)
|
||||
|
||||
volume.commit()
|
||||
model_cache.commit()
|
||||
|
||||
|
||||
image = (
|
||||
modal.Image.debian_slim(python_version="3.12")
|
||||
.pip_install(
|
||||
"huggingface_hub==0.27.1",
|
||||
"hf-transfer==0.1.9",
|
||||
"torch==2.5.1",
|
||||
"faster-whisper==1.1.1",
|
||||
)
|
||||
.env(
|
||||
{
|
||||
"HF_HUB_ENABLE_HF_TRANSFER": "1",
|
||||
@@ -45,19 +82,98 @@ image = (
|
||||
),
|
||||
}
|
||||
)
|
||||
.run_function(download_model, volumes={MODELS_DIR: volume})
|
||||
.apt_install("ffmpeg")
|
||||
.pip_install(
|
||||
"huggingface_hub==0.27.1",
|
||||
"hf-transfer==0.1.9",
|
||||
"torch==2.5.1",
|
||||
"faster-whisper==1.1.1",
|
||||
"fastapi==0.115.12",
|
||||
"requests",
|
||||
"librosa==0.10.1",
|
||||
"numpy<2",
|
||||
"silero-vad==5.1.0",
|
||||
)
|
||||
.run_function(download_model, volumes={CACHE_PATH: model_cache})
|
||||
)
|
||||
|
||||
|
||||
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
|
||||
parsed_url = urlparse(url)
|
||||
url_path = parsed_url.path
|
||||
|
||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
||||
if url_path.lower().endswith(f".{ext}"):
|
||||
return AudioFileExtension(ext)
|
||||
|
||||
content_type = headers.get("content-type", "").lower()
|
||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
||||
return AudioFileExtension("mp3")
|
||||
if "audio/wav" in content_type:
|
||||
return AudioFileExtension("wav")
|
||||
if "audio/mp4" in content_type:
|
||||
return AudioFileExtension("mp4")
|
||||
|
||||
raise ValueError(
|
||||
f"Unsupported audio format for URL: {url}. "
|
||||
f"Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
)
|
||||
|
||||
|
||||
def download_audio_to_volume(
|
||||
audio_file_url: str,
|
||||
) -> tuple[WhisperUniqFilename, AudioFileExtension]:
|
||||
import requests
|
||||
from fastapi import HTTPException
|
||||
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
||||
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
response.raise_for_status()
|
||||
|
||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
||||
unique_filename = WhisperUniqFilename(f"{uuid.uuid4()}.{audio_suffix}")
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
upload_volume.commit()
|
||||
return unique_filename, audio_suffix
|
||||
|
||||
|
||||
def pad_audio(audio_array, sample_rate: int = SAMPLERATE):
|
||||
"""Add 0.5s of silence if audio is shorter than the silence_padding window.
|
||||
|
||||
Whisper does not require this strictly, but aligning behavior with Parakeet
|
||||
avoids edge-case crashes on extremely short inputs and makes comparisons easier.
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
audio_duration = len(audio_array) / sample_rate
|
||||
if audio_duration < VAD_CONFIG["silence_padding"]:
|
||||
silence_samples = int(sample_rate * VAD_CONFIG["silence_padding"])
|
||||
silence = np.zeros(silence_samples, dtype=np.float32)
|
||||
return np.concatenate([audio_array, silence])
|
||||
return audio_array
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu="A10G",
|
||||
timeout=5 * MINUTES,
|
||||
scaledown_window=5 * MINUTES,
|
||||
allow_concurrent_inputs=6,
|
||||
image=image,
|
||||
volumes={MODELS_DIR: volume},
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
)
|
||||
class Transcriber:
|
||||
@modal.concurrent(max_inputs=10)
|
||||
class TranscriberWhisperLive:
|
||||
"""Live transcriber class for small audio segments (A10G).
|
||||
|
||||
Mirrors the Parakeet live class API but uses Faster-Whisper under the hood.
|
||||
"""
|
||||
|
||||
@modal.enter()
|
||||
def enter(self):
|
||||
import faster_whisper
|
||||
@@ -71,23 +187,200 @@ class Transcriber:
|
||||
device=self.device,
|
||||
compute_type=MODEL_COMPUTE_TYPE,
|
||||
num_workers=MODEL_NUM_WORKERS,
|
||||
download_root=MODELS_DIR,
|
||||
download_root=CACHE_PATH,
|
||||
local_files_only=True,
|
||||
)
|
||||
print(f"Model is on device: {self.device}")
|
||||
|
||||
@modal.method()
|
||||
def transcribe_segment(
|
||||
self,
|
||||
audio_data: str,
|
||||
audio_suffix: str,
|
||||
language: str,
|
||||
filename: str,
|
||||
language: str = "en",
|
||||
):
|
||||
with tempfile.NamedTemporaryFile("wb+", suffix=f".{audio_suffix}") as fp:
|
||||
fp.write(audio_data)
|
||||
"""Transcribe a single uploaded audio file by filename."""
|
||||
upload_volume.reload()
|
||||
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
with self.lock:
|
||||
with NoStdStreams():
|
||||
segments, _ = self.model.transcribe(
|
||||
file_path,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
|
||||
segments = list(segments)
|
||||
text = "".join(segment.text for segment in segments).strip()
|
||||
words = [
|
||||
{
|
||||
"word": word.word,
|
||||
"start": round(float(word.start), 2),
|
||||
"end": round(float(word.end), 2),
|
||||
}
|
||||
for segment in segments
|
||||
for word in segment.words
|
||||
]
|
||||
|
||||
return {"text": text, "words": words}
|
||||
|
||||
@modal.method()
|
||||
def transcribe_batch(
|
||||
self,
|
||||
filenames: list[str],
|
||||
language: str = "en",
|
||||
):
|
||||
"""Transcribe multiple uploaded audio files and return per-file results."""
|
||||
upload_volume.reload()
|
||||
|
||||
results = []
|
||||
for filename in filenames:
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"Batch file not found: {file_path}")
|
||||
|
||||
with self.lock:
|
||||
with NoStdStreams():
|
||||
segments, _ = self.model.transcribe(
|
||||
file_path,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
|
||||
segments = list(segments)
|
||||
text = "".join(seg.text for seg in segments).strip()
|
||||
words = [
|
||||
{
|
||||
"word": w.word,
|
||||
"start": round(float(w.start), 2),
|
||||
"end": round(float(w.end), 2),
|
||||
}
|
||||
for seg in segments
|
||||
for w in seg.words
|
||||
]
|
||||
|
||||
results.append(
|
||||
{
|
||||
"filename": filename,
|
||||
"text": text,
|
||||
"words": words,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu="L40S",
|
||||
timeout=15 * MINUTES,
|
||||
image=image,
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
)
|
||||
class TranscriberWhisperFile:
|
||||
"""File transcriber for larger/longer audio, using VAD-driven batching (L40S)."""
|
||||
|
||||
@modal.enter()
|
||||
def enter(self):
|
||||
import faster_whisper
|
||||
import torch
|
||||
from silero_vad import load_silero_vad
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self.use_gpu = torch.cuda.is_available()
|
||||
self.device = "cuda" if self.use_gpu else "cpu"
|
||||
self.model = faster_whisper.WhisperModel(
|
||||
MODEL_NAME,
|
||||
device=self.device,
|
||||
compute_type=MODEL_COMPUTE_TYPE,
|
||||
num_workers=MODEL_NUM_WORKERS,
|
||||
download_root=CACHE_PATH,
|
||||
local_files_only=True,
|
||||
)
|
||||
self.vad_model = load_silero_vad(onnx=False)
|
||||
|
||||
@modal.method()
|
||||
def transcribe_segment(
|
||||
self, filename: str, timestamp_offset: float = 0.0, language: str = "en"
|
||||
):
|
||||
import librosa
|
||||
import numpy as np
|
||||
from silero_vad import VADIterator
|
||||
|
||||
def vad_segments(
|
||||
audio_array,
|
||||
sample_rate: int = SAMPLERATE,
|
||||
window_size: int = VAD_CONFIG["window_size"],
|
||||
) -> Generator[TimeSegment, None, None]:
|
||||
"""Generate speech segments as TimeSegment using Silero VAD."""
|
||||
iterator = VADIterator(self.vad_model, sampling_rate=sample_rate)
|
||||
start = None
|
||||
for i in range(0, len(audio_array), window_size):
|
||||
chunk = audio_array[i : i + window_size]
|
||||
if len(chunk) < window_size:
|
||||
chunk = np.pad(
|
||||
chunk, (0, window_size - len(chunk)), mode="constant"
|
||||
)
|
||||
speech = iterator(chunk)
|
||||
if not speech:
|
||||
continue
|
||||
if "start" in speech:
|
||||
start = speech["start"]
|
||||
continue
|
||||
if "end" in speech and start is not None:
|
||||
end = speech["end"]
|
||||
yield TimeSegment(
|
||||
start / float(SAMPLERATE), end / float(SAMPLERATE)
|
||||
)
|
||||
start = None
|
||||
iterator.reset_states()
|
||||
|
||||
upload_volume.reload()
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
audio_array, _sr = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
||||
|
||||
# Batch segments up to ~30s windows by merging contiguous VAD segments
|
||||
merged_batches: list[TimeSegment] = []
|
||||
batch_start = None
|
||||
batch_end = None
|
||||
max_duration = VAD_CONFIG["batch_max_duration"]
|
||||
for segment in vad_segments(audio_array):
|
||||
seg_start, seg_end = segment.start, segment.end
|
||||
if batch_start is None:
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
continue
|
||||
if seg_end - batch_start <= max_duration:
|
||||
batch_end = seg_end
|
||||
else:
|
||||
merged_batches.append(TimeSegment(batch_start, batch_end))
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
if batch_start is not None and batch_end is not None:
|
||||
merged_batches.append(TimeSegment(batch_start, batch_end))
|
||||
|
||||
all_text = []
|
||||
all_words = []
|
||||
|
||||
for segment in merged_batches:
|
||||
start_time, end_time = segment.start, segment.end
|
||||
s_idx = int(start_time * SAMPLERATE)
|
||||
e_idx = int(end_time * SAMPLERATE)
|
||||
segment = audio_array[s_idx:e_idx]
|
||||
segment = pad_audio(segment, SAMPLERATE)
|
||||
|
||||
with self.lock:
|
||||
segments, _ = self.model.transcribe(
|
||||
fp.name,
|
||||
segment,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
@@ -96,66 +389,220 @@ class Transcriber:
|
||||
)
|
||||
|
||||
segments = list(segments)
|
||||
text = "".join(segment.text for segment in segments)
|
||||
text = "".join(seg.text for seg in segments).strip()
|
||||
words = [
|
||||
{"word": word.word, "start": word.start, "end": word.end}
|
||||
for segment in segments
|
||||
for word in segment.words
|
||||
{
|
||||
"word": w.word,
|
||||
"start": round(float(w.start) + start_time + timestamp_offset, 2),
|
||||
"end": round(float(w.end) + start_time + timestamp_offset, 2),
|
||||
}
|
||||
for seg in segments
|
||||
for w in seg.words
|
||||
]
|
||||
if text:
|
||||
all_text.append(text)
|
||||
all_words.extend(words)
|
||||
|
||||
return {"text": text, "words": words}
|
||||
return {"text": " ".join(all_text), "words": all_words}
|
||||
|
||||
|
||||
def detect_audio_format(url: str, headers: dict) -> str:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from fastapi import HTTPException
|
||||
|
||||
url_path = urlparse(url).path
|
||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
||||
if url_path.lower().endswith(f".{ext}"):
|
||||
return ext
|
||||
|
||||
content_type = headers.get("content-type", "").lower()
|
||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
||||
return "mp3"
|
||||
if "audio/wav" in content_type:
|
||||
return "wav"
|
||||
if "audio/mp4" in content_type:
|
||||
return "mp4"
|
||||
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=(
|
||||
f"Unsupported audio format for URL. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def download_audio_to_volume(audio_file_url: str) -> tuple[str, str]:
|
||||
import requests
|
||||
from fastapi import HTTPException
|
||||
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
||||
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
response.raise_for_status()
|
||||
|
||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
upload_volume.commit()
|
||||
return unique_filename, audio_suffix
|
||||
|
||||
|
||||
@app.function(
|
||||
scaledown_window=60,
|
||||
timeout=60,
|
||||
allow_concurrent_inputs=40,
|
||||
timeout=600,
|
||||
secrets=[
|
||||
modal.Secret.from_name("reflector-gpu"),
|
||||
],
|
||||
volumes={MODELS_DIR: volume},
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
image=image,
|
||||
)
|
||||
@modal.concurrent(max_inputs=40)
|
||||
@modal.asgi_app()
|
||||
def web():
|
||||
from fastapi import Body, Depends, FastAPI, HTTPException, UploadFile, status
|
||||
from fastapi import (
|
||||
Body,
|
||||
Depends,
|
||||
FastAPI,
|
||||
Form,
|
||||
HTTPException,
|
||||
UploadFile,
|
||||
status,
|
||||
)
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from typing_extensions import Annotated
|
||||
|
||||
transcriber = Transcriber()
|
||||
transcriber_live = TranscriberWhisperLive()
|
||||
transcriber_file = TranscriberWhisperFile()
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
supported_file_types = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
if apikey != os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
if apikey == os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
return
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
class TranscriptResponse(BaseModel):
|
||||
result: dict
|
||||
class TranscriptResponse(dict):
|
||||
pass
|
||||
|
||||
@app.post("/v1/audio/transcriptions", dependencies=[Depends(apikey_auth)])
|
||||
def transcribe(
|
||||
file: UploadFile,
|
||||
model: str = "whisper-1",
|
||||
language: Annotated[str, Body(...)] = "en",
|
||||
) -> TranscriptResponse:
|
||||
audio_data = file.file.read()
|
||||
audio_suffix = file.filename.split(".")[-1]
|
||||
assert audio_suffix in supported_file_types
|
||||
file: UploadFile = None,
|
||||
files: list[UploadFile] | None = None,
|
||||
model: str = Form(MODEL_NAME),
|
||||
language: str = Form("en"),
|
||||
batch: bool = Form(False),
|
||||
):
|
||||
if not file and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Either 'file' or 'files' parameter is required"
|
||||
)
|
||||
if batch and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Batch transcription requires 'files'"
|
||||
)
|
||||
|
||||
func = transcriber.transcribe_segment.spawn(
|
||||
audio_data=audio_data,
|
||||
audio_suffix=audio_suffix,
|
||||
language=language,
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
upload_files = [file] if file else files
|
||||
|
||||
uploaded_filenames: list[str] = []
|
||||
for upload_file in upload_files:
|
||||
audio_suffix = upload_file.filename.split(".")[-1]
|
||||
if audio_suffix not in SUPPORTED_FILE_EXTENSIONS:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=(
|
||||
f"Unsupported audio format. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
),
|
||||
)
|
||||
|
||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
with open(file_path, "wb") as f:
|
||||
content = upload_file.file.read()
|
||||
f.write(content)
|
||||
uploaded_filenames.append(unique_filename)
|
||||
|
||||
upload_volume.commit()
|
||||
|
||||
try:
|
||||
if batch and len(upload_files) > 1:
|
||||
func = transcriber_live.transcribe_batch.spawn(
|
||||
filenames=uploaded_filenames,
|
||||
language=language,
|
||||
)
|
||||
results = func.get()
|
||||
return {"results": results}
|
||||
|
||||
results = []
|
||||
for filename in uploaded_filenames:
|
||||
func = transcriber_live.transcribe_segment.spawn(
|
||||
filename=filename,
|
||||
language=language,
|
||||
)
|
||||
result = func.get()
|
||||
result["filename"] = filename
|
||||
results.append(result)
|
||||
|
||||
return {"results": results} if len(results) > 1 else results[0]
|
||||
finally:
|
||||
for filename in uploaded_filenames:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
os.remove(file_path)
|
||||
except Exception:
|
||||
pass
|
||||
upload_volume.commit()
|
||||
|
||||
@app.post("/v1/audio/transcriptions-from-url", dependencies=[Depends(apikey_auth)])
|
||||
def transcribe_from_url(
|
||||
audio_file_url: str = Body(
|
||||
..., description="URL of the audio file to transcribe"
|
||||
),
|
||||
model: str = Body(MODEL_NAME),
|
||||
language: str = Body("en"),
|
||||
timestamp_offset: float = Body(0.0),
|
||||
):
|
||||
unique_filename, _audio_suffix = download_audio_to_volume(audio_file_url)
|
||||
try:
|
||||
func = transcriber_file.transcribe_segment.spawn(
|
||||
filename=unique_filename,
|
||||
timestamp_offset=timestamp_offset,
|
||||
language=language,
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
finally:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
os.remove(file_path)
|
||||
upload_volume.commit()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return app
|
||||
|
||||
|
||||
class NoStdStreams:
|
||||
def __init__(self):
|
||||
self.devnull = open(os.devnull, "w")
|
||||
|
||||
def __enter__(self):
|
||||
self._stdout, self._stderr = sys.stdout, sys.stderr
|
||||
self._stdout.flush()
|
||||
self._stderr.flush()
|
||||
sys.stdout, sys.stderr = self.devnull, self.devnull
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
sys.stdout, sys.stderr = self._stdout, self._stderr
|
||||
self.devnull.close()
|
||||
|
||||
658
server/gpu/modal_deployments/reflector_transcriber_parakeet.py
Normal file
658
server/gpu/modal_deployments/reflector_transcriber_parakeet.py
Normal file
@@ -0,0 +1,658 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import uuid
|
||||
from typing import Generator, Mapping, NamedTuple, NewType, TypedDict
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import modal
|
||||
|
||||
MODEL_NAME = "nvidia/parakeet-tdt-0.6b-v2"
|
||||
SUPPORTED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||
SAMPLERATE = 16000
|
||||
UPLOADS_PATH = "/uploads"
|
||||
CACHE_PATH = "/cache"
|
||||
VAD_CONFIG = {
|
||||
"batch_max_duration": 30.0,
|
||||
"silence_padding": 0.5,
|
||||
"window_size": 512,
|
||||
}
|
||||
|
||||
ParakeetUniqFilename = NewType("ParakeetUniqFilename", str)
|
||||
AudioFileExtension = NewType("AudioFileExtension", str)
|
||||
|
||||
|
||||
class TimeSegment(NamedTuple):
|
||||
"""Represents a time segment with start and end times."""
|
||||
|
||||
start: float
|
||||
end: float
|
||||
|
||||
|
||||
class AudioSegment(NamedTuple):
|
||||
"""Represents an audio segment with timing and audio data."""
|
||||
|
||||
start: float
|
||||
end: float
|
||||
audio: any
|
||||
|
||||
|
||||
class TranscriptResult(NamedTuple):
|
||||
"""Represents a transcription result with text and word timings."""
|
||||
|
||||
text: str
|
||||
words: list["WordTiming"]
|
||||
|
||||
|
||||
class WordTiming(TypedDict):
|
||||
"""Represents a word with its timing information."""
|
||||
|
||||
word: str
|
||||
start: float
|
||||
end: float
|
||||
|
||||
|
||||
app = modal.App("reflector-transcriber-parakeet")
|
||||
|
||||
# Volume for caching model weights
|
||||
model_cache = modal.Volume.from_name("parakeet-model-cache", create_if_missing=True)
|
||||
# Volume for temporary file uploads
|
||||
upload_volume = modal.Volume.from_name("parakeet-uploads", create_if_missing=True)
|
||||
|
||||
image = (
|
||||
modal.Image.from_registry(
|
||||
"nvidia/cuda:12.8.0-cudnn-devel-ubuntu22.04", add_python="3.12"
|
||||
)
|
||||
.env(
|
||||
{
|
||||
"HF_HUB_ENABLE_HF_TRANSFER": "1",
|
||||
"HF_HOME": "/cache",
|
||||
"DEBIAN_FRONTEND": "noninteractive",
|
||||
"CXX": "g++",
|
||||
"CC": "g++",
|
||||
}
|
||||
)
|
||||
.apt_install("ffmpeg")
|
||||
.pip_install(
|
||||
"hf_transfer==0.1.9",
|
||||
"huggingface_hub[hf-xet]==0.31.2",
|
||||
"nemo_toolkit[asr]==2.3.0",
|
||||
"cuda-python==12.8.0",
|
||||
"fastapi==0.115.12",
|
||||
"numpy<2",
|
||||
"librosa==0.10.1",
|
||||
"requests",
|
||||
"silero-vad==5.1.0",
|
||||
"torch",
|
||||
)
|
||||
.entrypoint([]) # silence chatty logs by container on start
|
||||
)
|
||||
|
||||
|
||||
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
|
||||
parsed_url = urlparse(url)
|
||||
url_path = parsed_url.path
|
||||
|
||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
||||
if url_path.lower().endswith(f".{ext}"):
|
||||
return AudioFileExtension(ext)
|
||||
|
||||
content_type = headers.get("content-type", "").lower()
|
||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
||||
return AudioFileExtension("mp3")
|
||||
if "audio/wav" in content_type:
|
||||
return AudioFileExtension("wav")
|
||||
if "audio/mp4" in content_type:
|
||||
return AudioFileExtension("mp4")
|
||||
|
||||
raise ValueError(
|
||||
f"Unsupported audio format for URL: {url}. "
|
||||
f"Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
)
|
||||
|
||||
|
||||
def download_audio_to_volume(
|
||||
audio_file_url: str,
|
||||
) -> tuple[ParakeetUniqFilename, AudioFileExtension]:
|
||||
import requests
|
||||
from fastapi import HTTPException
|
||||
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
||||
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
response.raise_for_status()
|
||||
|
||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
||||
unique_filename = ParakeetUniqFilename(f"{uuid.uuid4()}.{audio_suffix}")
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
upload_volume.commit()
|
||||
return unique_filename, audio_suffix
|
||||
|
||||
|
||||
def pad_audio(audio_array, sample_rate: int = SAMPLERATE):
|
||||
"""Add 0.5 seconds of silence if audio is less than 500ms.
|
||||
|
||||
This is a workaround for a Parakeet bug where very short audio (<500ms) causes:
|
||||
ValueError: `char_offsets`: [] and `processed_tokens`: [157, 834, 834, 841]
|
||||
have to be of the same length
|
||||
|
||||
See: https://github.com/NVIDIA/NeMo/issues/8451
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
audio_duration = len(audio_array) / sample_rate
|
||||
if audio_duration < 0.5:
|
||||
silence_samples = int(sample_rate * 0.5)
|
||||
silence = np.zeros(silence_samples, dtype=np.float32)
|
||||
return np.concatenate([audio_array, silence])
|
||||
return audio_array
|
||||
|
||||
|
||||
@app.cls(
|
||||
gpu="A10G",
|
||||
timeout=600,
|
||||
scaledown_window=300,
|
||||
image=image,
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
enable_memory_snapshot=True,
|
||||
experimental_options={"enable_gpu_snapshot": True},
|
||||
)
|
||||
@modal.concurrent(max_inputs=10)
|
||||
class TranscriberParakeetLive:
|
||||
@modal.enter(snap=True)
|
||||
def enter(self):
|
||||
import nemo.collections.asr as nemo_asr
|
||||
|
||||
logging.getLogger("nemo_logger").setLevel(logging.CRITICAL)
|
||||
|
||||
self.lock = threading.Lock()
|
||||
self.model = nemo_asr.models.ASRModel.from_pretrained(model_name=MODEL_NAME)
|
||||
device = next(self.model.parameters()).device
|
||||
print(f"Model is on device: {device}")
|
||||
|
||||
@modal.method()
|
||||
def transcribe_segment(
|
||||
self,
|
||||
filename: str,
|
||||
):
|
||||
import librosa
|
||||
|
||||
upload_volume.reload()
|
||||
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
||||
padded_audio = pad_audio(audio_array, sample_rate)
|
||||
|
||||
with self.lock:
|
||||
with NoStdStreams():
|
||||
(output,) = self.model.transcribe([padded_audio], timestamps=True)
|
||||
|
||||
text = output.text.strip()
|
||||
words: list[WordTiming] = [
|
||||
WordTiming(
|
||||
# XXX the space added here is to match the output of whisper
|
||||
# whisper add space to each words, while parakeet don't
|
||||
word=word_info["word"] + " ",
|
||||
start=round(word_info["start"], 2),
|
||||
end=round(word_info["end"], 2),
|
||||
)
|
||||
for word_info in output.timestamp["word"]
|
||||
]
|
||||
|
||||
return {"text": text, "words": words}
|
||||
|
||||
@modal.method()
|
||||
def transcribe_batch(
|
||||
self,
|
||||
filenames: list[str],
|
||||
):
|
||||
import librosa
|
||||
|
||||
upload_volume.reload()
|
||||
|
||||
results = []
|
||||
audio_arrays = []
|
||||
|
||||
# Load all audio files with padding
|
||||
for filename in filenames:
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"Batch file not found: {file_path}")
|
||||
|
||||
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
||||
padded_audio = pad_audio(audio_array, sample_rate)
|
||||
audio_arrays.append(padded_audio)
|
||||
|
||||
with self.lock:
|
||||
with NoStdStreams():
|
||||
outputs = self.model.transcribe(audio_arrays, timestamps=True)
|
||||
|
||||
# Process results for each file
|
||||
for i, (filename, output) in enumerate(zip(filenames, outputs)):
|
||||
text = output.text.strip()
|
||||
|
||||
words: list[WordTiming] = [
|
||||
WordTiming(
|
||||
word=word_info["word"] + " ",
|
||||
start=round(word_info["start"], 2),
|
||||
end=round(word_info["end"], 2),
|
||||
)
|
||||
for word_info in output.timestamp["word"]
|
||||
]
|
||||
|
||||
results.append(
|
||||
{
|
||||
"filename": filename,
|
||||
"text": text,
|
||||
"words": words,
|
||||
}
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
# L40S class for file transcription (bigger files)
|
||||
@app.cls(
|
||||
gpu="L40S",
|
||||
timeout=900,
|
||||
image=image,
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
enable_memory_snapshot=True,
|
||||
experimental_options={"enable_gpu_snapshot": True},
|
||||
)
|
||||
class TranscriberParakeetFile:
|
||||
@modal.enter(snap=True)
|
||||
def enter(self):
|
||||
import nemo.collections.asr as nemo_asr
|
||||
import torch
|
||||
from silero_vad import load_silero_vad
|
||||
|
||||
logging.getLogger("nemo_logger").setLevel(logging.CRITICAL)
|
||||
|
||||
self.model = nemo_asr.models.ASRModel.from_pretrained(model_name=MODEL_NAME)
|
||||
device = next(self.model.parameters()).device
|
||||
print(f"Model is on device: {device}")
|
||||
|
||||
torch.set_num_threads(1)
|
||||
self.vad_model = load_silero_vad(onnx=False)
|
||||
print("Silero VAD initialized")
|
||||
|
||||
@modal.method()
|
||||
def transcribe_segment(
|
||||
self,
|
||||
filename: str,
|
||||
timestamp_offset: float = 0.0,
|
||||
):
|
||||
import librosa
|
||||
import numpy as np
|
||||
from silero_vad import VADIterator
|
||||
|
||||
def load_and_convert_audio(file_path):
|
||||
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
|
||||
return audio_array
|
||||
|
||||
def vad_segment_generator(
|
||||
audio_array,
|
||||
) -> Generator[TimeSegment, None, None]:
|
||||
"""Generate speech segments using VAD with start/end sample indices"""
|
||||
vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE)
|
||||
window_size = VAD_CONFIG["window_size"]
|
||||
start = None
|
||||
|
||||
for i in range(0, len(audio_array), window_size):
|
||||
chunk = audio_array[i : i + window_size]
|
||||
if len(chunk) < window_size:
|
||||
chunk = np.pad(
|
||||
chunk, (0, window_size - len(chunk)), mode="constant"
|
||||
)
|
||||
|
||||
speech_dict = vad_iterator(chunk)
|
||||
if not speech_dict:
|
||||
continue
|
||||
|
||||
if "start" in speech_dict:
|
||||
start = speech_dict["start"]
|
||||
continue
|
||||
|
||||
if "end" in speech_dict and start is not None:
|
||||
end = speech_dict["end"]
|
||||
start_time = start / float(SAMPLERATE)
|
||||
end_time = end / float(SAMPLERATE)
|
||||
|
||||
yield TimeSegment(start_time, end_time)
|
||||
start = None
|
||||
|
||||
vad_iterator.reset_states()
|
||||
|
||||
def batch_speech_segments(
|
||||
segments: Generator[TimeSegment, None, None], max_duration: int
|
||||
) -> Generator[TimeSegment, None, None]:
|
||||
"""
|
||||
Input segments:
|
||||
[0-2] [3-5] [6-8] [10-11] [12-15] [17-19] [20-22]
|
||||
|
||||
↓ (max_duration=10)
|
||||
|
||||
Output batches:
|
||||
[0-8] [10-19] [20-22]
|
||||
|
||||
Note: silences are kept for better transcription, previous implementation was
|
||||
passing segments separatly, but the output was less accurate.
|
||||
"""
|
||||
batch_start_time = None
|
||||
batch_end_time = None
|
||||
|
||||
for segment in segments:
|
||||
start_time, end_time = segment.start, segment.end
|
||||
if batch_start_time is None or batch_end_time is None:
|
||||
batch_start_time = start_time
|
||||
batch_end_time = end_time
|
||||
continue
|
||||
|
||||
total_duration = end_time - batch_start_time
|
||||
|
||||
if total_duration <= max_duration:
|
||||
batch_end_time = end_time
|
||||
continue
|
||||
|
||||
yield TimeSegment(batch_start_time, batch_end_time)
|
||||
batch_start_time = start_time
|
||||
batch_end_time = end_time
|
||||
|
||||
if batch_start_time is None or batch_end_time is None:
|
||||
return
|
||||
|
||||
yield TimeSegment(batch_start_time, batch_end_time)
|
||||
|
||||
def batch_segment_to_audio_segment(
|
||||
segments: Generator[TimeSegment, None, None],
|
||||
audio_array,
|
||||
) -> Generator[AudioSegment, None, None]:
|
||||
"""Extract audio segments and apply padding for Parakeet compatibility.
|
||||
|
||||
Uses pad_audio to ensure segments are at least 0.5s long, preventing
|
||||
Parakeet crashes. This padding may cause slight timing overlaps between
|
||||
segments, which are corrected by enforce_word_timing_constraints.
|
||||
"""
|
||||
for segment in segments:
|
||||
start_time, end_time = segment.start, segment.end
|
||||
start_sample = int(start_time * SAMPLERATE)
|
||||
end_sample = int(end_time * SAMPLERATE)
|
||||
audio_segment = audio_array[start_sample:end_sample]
|
||||
|
||||
padded_segment = pad_audio(audio_segment, SAMPLERATE)
|
||||
|
||||
yield AudioSegment(start_time, end_time, padded_segment)
|
||||
|
||||
def transcribe_batch(model, audio_segments: list) -> list:
|
||||
with NoStdStreams():
|
||||
outputs = model.transcribe(audio_segments, timestamps=True)
|
||||
return outputs
|
||||
|
||||
def enforce_word_timing_constraints(
|
||||
words: list[WordTiming],
|
||||
) -> list[WordTiming]:
|
||||
"""Enforce that word end times don't exceed the start time of the next word.
|
||||
|
||||
Due to silence padding added in batch_segment_to_audio_segment for better
|
||||
transcription accuracy, word timings from different segments may overlap.
|
||||
This function ensures there are no overlaps by adjusting end times.
|
||||
"""
|
||||
if len(words) <= 1:
|
||||
return words
|
||||
|
||||
enforced_words = []
|
||||
for i, word in enumerate(words):
|
||||
enforced_word = word.copy()
|
||||
|
||||
if i < len(words) - 1:
|
||||
next_start = words[i + 1]["start"]
|
||||
if enforced_word["end"] > next_start:
|
||||
enforced_word["end"] = next_start
|
||||
|
||||
enforced_words.append(enforced_word)
|
||||
|
||||
return enforced_words
|
||||
|
||||
def emit_results(
|
||||
results: list,
|
||||
segments_info: list[AudioSegment],
|
||||
) -> Generator[TranscriptResult, None, None]:
|
||||
"""Yield transcribed text and word timings from model output, adjusting timestamps to absolute positions."""
|
||||
for i, (output, segment) in enumerate(zip(results, segments_info)):
|
||||
start_time, end_time = segment.start, segment.end
|
||||
text = output.text.strip()
|
||||
words: list[WordTiming] = [
|
||||
WordTiming(
|
||||
word=word_info["word"] + " ",
|
||||
start=round(
|
||||
word_info["start"] + start_time + timestamp_offset, 2
|
||||
),
|
||||
end=round(word_info["end"] + start_time + timestamp_offset, 2),
|
||||
)
|
||||
for word_info in output.timestamp["word"]
|
||||
]
|
||||
|
||||
yield TranscriptResult(text, words)
|
||||
|
||||
upload_volume.reload()
|
||||
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
if not os.path.exists(file_path):
|
||||
raise FileNotFoundError(f"File not found: {file_path}")
|
||||
|
||||
audio_array = load_and_convert_audio(file_path)
|
||||
total_duration = len(audio_array) / float(SAMPLERATE)
|
||||
|
||||
all_text_parts: list[str] = []
|
||||
all_words: list[WordTiming] = []
|
||||
|
||||
raw_segments = vad_segment_generator(audio_array)
|
||||
speech_segments = batch_speech_segments(
|
||||
raw_segments,
|
||||
VAD_CONFIG["batch_max_duration"],
|
||||
)
|
||||
audio_segments = batch_segment_to_audio_segment(speech_segments, audio_array)
|
||||
|
||||
for batch in audio_segments:
|
||||
audio_segment = batch.audio
|
||||
results = transcribe_batch(self.model, [audio_segment])
|
||||
|
||||
for result in emit_results(
|
||||
results,
|
||||
[batch],
|
||||
):
|
||||
if not result.text:
|
||||
continue
|
||||
all_text_parts.append(result.text)
|
||||
all_words.extend(result.words)
|
||||
|
||||
all_words = enforce_word_timing_constraints(all_words)
|
||||
|
||||
combined_text = " ".join(all_text_parts)
|
||||
return {"text": combined_text, "words": all_words}
|
||||
|
||||
|
||||
@app.function(
|
||||
scaledown_window=60,
|
||||
timeout=600,
|
||||
secrets=[
|
||||
modal.Secret.from_name("reflector-gpu"),
|
||||
],
|
||||
volumes={CACHE_PATH: model_cache, UPLOADS_PATH: upload_volume},
|
||||
image=image,
|
||||
)
|
||||
@modal.concurrent(max_inputs=40)
|
||||
@modal.asgi_app()
|
||||
def web():
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from fastapi import (
|
||||
Body,
|
||||
Depends,
|
||||
FastAPI,
|
||||
Form,
|
||||
HTTPException,
|
||||
UploadFile,
|
||||
status,
|
||||
)
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
transcriber_live = TranscriberParakeetLive()
|
||||
transcriber_file = TranscriberParakeetFile()
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
if apikey == os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
return
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
class TranscriptResponse(BaseModel):
|
||||
result: dict
|
||||
|
||||
@app.post("/v1/audio/transcriptions", dependencies=[Depends(apikey_auth)])
|
||||
def transcribe(
|
||||
file: UploadFile = None,
|
||||
files: list[UploadFile] | None = None,
|
||||
model: str = Form(MODEL_NAME),
|
||||
language: str = Form("en"),
|
||||
batch: bool = Form(False),
|
||||
):
|
||||
# Parakeet only supports English
|
||||
if language != "en":
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Parakeet model only supports English. Got language='{language}'",
|
||||
)
|
||||
# Handle both single file and multiple files
|
||||
if not file and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Either 'file' or 'files' parameter is required"
|
||||
)
|
||||
if batch and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Batch transcription requires 'files'"
|
||||
)
|
||||
|
||||
upload_files = [file] if file else files
|
||||
|
||||
# Upload files to volume
|
||||
uploaded_filenames = []
|
||||
for upload_file in upload_files:
|
||||
audio_suffix = upload_file.filename.split(".")[-1]
|
||||
assert audio_suffix in SUPPORTED_FILE_EXTENSIONS
|
||||
|
||||
# Generate unique filename
|
||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
|
||||
print(f"Writing file to: {file_path}")
|
||||
with open(file_path, "wb") as f:
|
||||
content = upload_file.file.read()
|
||||
f.write(content)
|
||||
|
||||
uploaded_filenames.append(unique_filename)
|
||||
|
||||
upload_volume.commit()
|
||||
|
||||
try:
|
||||
# Use A10G live transcriber for per-file transcription
|
||||
if batch and len(upload_files) > 1:
|
||||
# Use batch transcription
|
||||
func = transcriber_live.transcribe_batch.spawn(
|
||||
filenames=uploaded_filenames,
|
||||
)
|
||||
results = func.get()
|
||||
return {"results": results}
|
||||
|
||||
# Per-file transcription
|
||||
results = []
|
||||
for filename in uploaded_filenames:
|
||||
func = transcriber_live.transcribe_segment.spawn(
|
||||
filename=filename,
|
||||
)
|
||||
result = func.get()
|
||||
result["filename"] = filename
|
||||
results.append(result)
|
||||
|
||||
return {"results": results} if len(results) > 1 else results[0]
|
||||
|
||||
finally:
|
||||
for filename in uploaded_filenames:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{filename}"
|
||||
print(f"Deleting file: {file_path}")
|
||||
os.remove(file_path)
|
||||
except Exception as e:
|
||||
print(f"Error deleting {filename}: {e}")
|
||||
|
||||
upload_volume.commit()
|
||||
|
||||
@app.post("/v1/audio/transcriptions-from-url", dependencies=[Depends(apikey_auth)])
|
||||
def transcribe_from_url(
|
||||
audio_file_url: str = Body(
|
||||
..., description="URL of the audio file to transcribe"
|
||||
),
|
||||
model: str = Body(MODEL_NAME),
|
||||
language: str = Body("en", description="Language code (only 'en' supported)"),
|
||||
timestamp_offset: float = Body(0.0),
|
||||
):
|
||||
# Parakeet only supports English
|
||||
if language != "en":
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Parakeet model only supports English. Got language='{language}'",
|
||||
)
|
||||
unique_filename, audio_suffix = download_audio_to_volume(audio_file_url)
|
||||
|
||||
try:
|
||||
func = transcriber_file.transcribe_segment.spawn(
|
||||
filename=unique_filename,
|
||||
timestamp_offset=timestamp_offset,
|
||||
)
|
||||
result = func.get()
|
||||
return result
|
||||
finally:
|
||||
try:
|
||||
file_path = f"{UPLOADS_PATH}/{unique_filename}"
|
||||
print(f"Deleting file: {file_path}")
|
||||
os.remove(file_path)
|
||||
upload_volume.commit()
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up {unique_filename}: {e}")
|
||||
|
||||
return app
|
||||
|
||||
|
||||
class NoStdStreams:
|
||||
def __init__(self):
|
||||
self.devnull = open(os.devnull, "w")
|
||||
|
||||
def __enter__(self):
|
||||
self._stdout, self._stderr = sys.stdout, sys.stderr
|
||||
self._stdout.flush()
|
||||
self._stderr.flush()
|
||||
sys.stdout, sys.stderr = self.devnull, self.devnull
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
sys.stdout, sys.stderr = self._stdout, self._stderr
|
||||
self.devnull.close()
|
||||
@@ -1,171 +0,0 @@
|
||||
# # Run an OpenAI-Compatible vLLM Server
|
||||
|
||||
import modal
|
||||
|
||||
MODELS_DIR = "/llamas"
|
||||
MODEL_NAME = "NousResearch/Hermes-3-Llama-3.1-8B"
|
||||
N_GPU = 1
|
||||
|
||||
|
||||
def download_llm():
|
||||
from huggingface_hub import snapshot_download
|
||||
|
||||
print("Downloading LLM model")
|
||||
snapshot_download(
|
||||
MODEL_NAME,
|
||||
local_dir=f"{MODELS_DIR}/{MODEL_NAME}",
|
||||
ignore_patterns=[
|
||||
"*.pt",
|
||||
"*.bin",
|
||||
"*.pth",
|
||||
"original/*",
|
||||
], # Ensure safetensors
|
||||
)
|
||||
print("LLM model downloaded")
|
||||
|
||||
|
||||
def move_cache():
|
||||
from transformers.utils import move_cache as transformers_move_cache
|
||||
|
||||
transformers_move_cache()
|
||||
|
||||
|
||||
vllm_image = (
|
||||
modal.Image.debian_slim(python_version="3.10")
|
||||
.pip_install("vllm==0.5.3post1")
|
||||
.env({"HF_HUB_ENABLE_HF_TRANSFER": "1"})
|
||||
.pip_install(
|
||||
# "accelerate==0.34.2",
|
||||
"einops==0.8.0",
|
||||
"hf-transfer~=0.1",
|
||||
)
|
||||
.run_function(download_llm)
|
||||
.run_function(move_cache)
|
||||
.pip_install(
|
||||
"bitsandbytes>=0.42.9",
|
||||
)
|
||||
)
|
||||
|
||||
app = modal.App("reflector-vllm-hermes3")
|
||||
|
||||
|
||||
@app.function(
|
||||
image=vllm_image,
|
||||
gpu=modal.gpu.A100(count=N_GPU, size="40GB"),
|
||||
timeout=60 * 5,
|
||||
scaledown_window=60 * 5,
|
||||
allow_concurrent_inputs=100,
|
||||
secrets=[
|
||||
modal.Secret.from_name("reflector-gpu"),
|
||||
],
|
||||
)
|
||||
@modal.asgi_app()
|
||||
def serve():
|
||||
import os
|
||||
|
||||
import fastapi
|
||||
import vllm.entrypoints.openai.api_server as api_server
|
||||
from vllm.engine.arg_utils import AsyncEngineArgs
|
||||
from vllm.engine.async_llm_engine import AsyncLLMEngine
|
||||
from vllm.entrypoints.logger import RequestLogger
|
||||
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
|
||||
from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion
|
||||
from vllm.usage.usage_lib import UsageContext
|
||||
|
||||
TOKEN = os.environ["REFLECTOR_GPU_APIKEY"]
|
||||
|
||||
# create a fastAPI app that uses vLLM's OpenAI-compatible router
|
||||
web_app = fastapi.FastAPI(
|
||||
title=f"OpenAI-compatible {MODEL_NAME} server",
|
||||
description="Run an OpenAI-compatible LLM server with vLLM on modal.com",
|
||||
version="0.0.1",
|
||||
docs_url="/docs",
|
||||
)
|
||||
|
||||
# security: CORS middleware for external requests
|
||||
http_bearer = fastapi.security.HTTPBearer(
|
||||
scheme_name="Bearer Token",
|
||||
description="See code for authentication details.",
|
||||
)
|
||||
web_app.add_middleware(
|
||||
fastapi.middleware.cors.CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# security: inject dependency on authed routes
|
||||
async def is_authenticated(api_key: str = fastapi.Security(http_bearer)):
|
||||
if api_key.credentials != TOKEN:
|
||||
raise fastapi.HTTPException(
|
||||
status_code=fastapi.status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid authentication credentials",
|
||||
)
|
||||
return {"username": "authenticated_user"}
|
||||
|
||||
router = fastapi.APIRouter(dependencies=[fastapi.Depends(is_authenticated)])
|
||||
|
||||
# wrap vllm's router in auth router
|
||||
router.include_router(api_server.router)
|
||||
# add authed vllm to our fastAPI app
|
||||
web_app.include_router(router)
|
||||
|
||||
engine_args = AsyncEngineArgs(
|
||||
model=MODELS_DIR + "/" + MODEL_NAME,
|
||||
tensor_parallel_size=N_GPU,
|
||||
gpu_memory_utilization=0.90,
|
||||
# max_model_len=8096,
|
||||
enforce_eager=False, # capture the graph for faster inference, but slower cold starts (30s > 20s)
|
||||
# --- 4 bits load
|
||||
# quantization="bitsandbytes",
|
||||
# load_format="bitsandbytes",
|
||||
)
|
||||
|
||||
engine = AsyncLLMEngine.from_engine_args(
|
||||
engine_args, usage_context=UsageContext.OPENAI_API_SERVER
|
||||
)
|
||||
|
||||
model_config = get_model_config(engine)
|
||||
|
||||
request_logger = RequestLogger(max_log_len=2048)
|
||||
|
||||
api_server.openai_serving_chat = OpenAIServingChat(
|
||||
engine,
|
||||
model_config=model_config,
|
||||
served_model_names=[MODEL_NAME],
|
||||
chat_template=None,
|
||||
response_role="assistant",
|
||||
lora_modules=[],
|
||||
prompt_adapters=[],
|
||||
request_logger=request_logger,
|
||||
)
|
||||
api_server.openai_serving_completion = OpenAIServingCompletion(
|
||||
engine,
|
||||
model_config=model_config,
|
||||
served_model_names=[MODEL_NAME],
|
||||
lora_modules=[],
|
||||
prompt_adapters=[],
|
||||
request_logger=request_logger,
|
||||
)
|
||||
|
||||
return web_app
|
||||
|
||||
|
||||
def get_model_config(engine):
|
||||
import asyncio
|
||||
|
||||
try: # adapted from vLLM source -- https://github.com/vllm-project/vllm/blob/507ef787d85dec24490069ffceacbd6b161f4f72/vllm/entrypoints/openai/api_server.py#L235C1-L247C1
|
||||
event_loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
event_loop = None
|
||||
|
||||
if event_loop is not None and event_loop.is_running():
|
||||
# If the current is instanced by Ray Serve,
|
||||
# there is already a running event loop
|
||||
model_config = event_loop.run_until_complete(engine.get_model_config())
|
||||
else:
|
||||
# When using single vLLM without engine_use_ray
|
||||
model_config = asyncio.run(engine.get_model_config())
|
||||
|
||||
return model_config
|
||||
@@ -1,16 +0,0 @@
|
||||
LOAD DATABASE
|
||||
FROM sqlite:///app/reflector.sqlite3
|
||||
INTO pgsql://reflector:reflector@postgres:5432/reflector
|
||||
WITH
|
||||
include drop,
|
||||
create tables,
|
||||
create indexes,
|
||||
reset sequences,
|
||||
preserve index names,
|
||||
prefetch rows = 10
|
||||
SET
|
||||
work_mem to '512MB',
|
||||
maintenance_work_mem to '1024MB'
|
||||
CAST
|
||||
column transcript.duration to float using (lambda (val) (when val (format nil "~f" val)))
|
||||
;
|
||||
@@ -1 +1,3 @@
|
||||
Generic single-database configuration.
|
||||
|
||||
Both data migrations and schema migrations must be in migrations.
|
||||
@@ -1,9 +1,10 @@
|
||||
from logging.config import fileConfig
|
||||
|
||||
from alembic import context
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
|
||||
from reflector.db import metadata
|
||||
from reflector.settings import settings
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
# access to the values within the .ini file in use.
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
"""Add webhook fields to rooms
|
||||
|
||||
Revision ID: 0194f65cd6d3
|
||||
Revises: 5a8907fd1d78
|
||||
Create Date: 2025-08-27 09:03:19.610995
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0194f65cd6d3"
|
||||
down_revision: Union[str, None] = "5a8907fd1d78"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column("webhook_url", sa.String(), nullable=True))
|
||||
batch_op.add_column(sa.Column("webhook_secret", sa.String(), nullable=True))
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.drop_column("webhook_secret")
|
||||
batch_op.drop_column("webhook_url")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -8,7 +8,6 @@ Create Date: 2024-09-24 16:12:56.944133
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
"""add_long_summary_to_search_vector
|
||||
|
||||
Revision ID: 0ab2d7ffaa16
|
||||
Revises: b1c33bd09963
|
||||
Create Date: 2025-08-15 13:27:52.680211
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0ab2d7ffaa16"
|
||||
down_revision: Union[str, None] = "b1c33bd09963"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Drop the existing search vector column and index
|
||||
op.drop_index("idx_transcript_search_vector_en", table_name="transcript")
|
||||
op.drop_column("transcript", "search_vector_en")
|
||||
|
||||
# Recreate the search vector column with long_summary included
|
||||
op.execute("""
|
||||
ALTER TABLE transcript ADD COLUMN search_vector_en tsvector
|
||||
GENERATED ALWAYS AS (
|
||||
setweight(to_tsvector('english', coalesce(title, '')), 'A') ||
|
||||
setweight(to_tsvector('english', coalesce(long_summary, '')), 'B') ||
|
||||
setweight(to_tsvector('english', coalesce(webvtt, '')), 'C')
|
||||
) STORED
|
||||
""")
|
||||
|
||||
# Recreate the GIN index for the search vector
|
||||
op.create_index(
|
||||
"idx_transcript_search_vector_en",
|
||||
"transcript",
|
||||
["search_vector_en"],
|
||||
postgresql_using="gin",
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop the updated search vector column and index
|
||||
op.drop_index("idx_transcript_search_vector_en", table_name="transcript")
|
||||
op.drop_column("transcript", "search_vector_en")
|
||||
|
||||
# Recreate the original search vector column without long_summary
|
||||
op.execute("""
|
||||
ALTER TABLE transcript ADD COLUMN search_vector_en tsvector
|
||||
GENERATED ALWAYS AS (
|
||||
setweight(to_tsvector('english', coalesce(title, '')), 'A') ||
|
||||
setweight(to_tsvector('english', coalesce(webvtt, '')), 'B')
|
||||
) STORED
|
||||
""")
|
||||
|
||||
# Recreate the GIN index for the search vector
|
||||
op.create_index(
|
||||
"idx_transcript_search_vector_en",
|
||||
"transcript",
|
||||
["search_vector_en"],
|
||||
postgresql_using="gin",
|
||||
)
|
||||
@@ -0,0 +1,25 @@
|
||||
"""add_webvtt_field_to_transcript
|
||||
|
||||
Revision ID: 0bc0f3ff0111
|
||||
Revises: b7df9609542c
|
||||
Create Date: 2025-08-05 19:36:41.740957
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
revision: str = "0bc0f3ff0111"
|
||||
down_revision: Union[str, None] = "b7df9609542c"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column("transcript", sa.Column("webvtt", sa.Text(), nullable=True))
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("transcript", "webvtt")
|
||||
@@ -0,0 +1,36 @@
|
||||
"""remove user_id from meeting table
|
||||
|
||||
Revision ID: 0ce521cda2ee
|
||||
Revises: 6dec9fb5b46c
|
||||
Create Date: 2025-09-10 12:40:55.688899
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0ce521cda2ee"
|
||||
down_revision: Union[str, None] = "6dec9fb5b46c"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.drop_column("user_id")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column("user_id", sa.VARCHAR(), autoincrement=False, nullable=True)
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -5,11 +5,11 @@ Revises: f819277e5169
|
||||
Create Date: 2023-11-07 11:12:21.614198
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0fea6d96b096"
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
"""add_full_text_search
|
||||
|
||||
Revision ID: 116b2f287eab
|
||||
Revises: 0bc0f3ff0111
|
||||
Create Date: 2025-08-07 11:27:38.473517
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
revision: str = "116b2f287eab"
|
||||
down_revision: Union[str, None] = "0bc0f3ff0111"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
conn = op.get_bind()
|
||||
if conn.dialect.name != "postgresql":
|
||||
return
|
||||
|
||||
op.execute("""
|
||||
ALTER TABLE transcript ADD COLUMN search_vector_en tsvector
|
||||
GENERATED ALWAYS AS (
|
||||
setweight(to_tsvector('english', coalesce(title, '')), 'A') ||
|
||||
setweight(to_tsvector('english', coalesce(webvtt, '')), 'B')
|
||||
) STORED
|
||||
""")
|
||||
|
||||
op.create_index(
|
||||
"idx_transcript_search_vector_en",
|
||||
"transcript",
|
||||
["search_vector_en"],
|
||||
postgresql_using="gin",
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
conn = op.get_bind()
|
||||
if conn.dialect.name != "postgresql":
|
||||
return
|
||||
|
||||
op.drop_index("idx_transcript_search_vector_en", table_name="transcript")
|
||||
op.drop_column("transcript", "search_vector_en")
|
||||
@@ -5,26 +5,26 @@ Revises: 0fea6d96b096
|
||||
Create Date: 2023-11-30 15:56:03.341466
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '125031f7cb78'
|
||||
down_revision: Union[str, None] = '0fea6d96b096'
|
||||
revision: str = "125031f7cb78"
|
||||
down_revision: Union[str, None] = "0fea6d96b096"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('transcript', sa.Column('participants', sa.JSON(), nullable=True))
|
||||
op.add_column("transcript", sa.Column("participants", sa.JSON(), nullable=True))
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_column('transcript', 'participants')
|
||||
op.drop_column("transcript", "participants")
|
||||
# ### end Alembic commands ###
|
||||
|
||||
@@ -5,6 +5,7 @@ Revises: f819277e5169
|
||||
Create Date: 2025-06-17 14:00:03.000000
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
@@ -19,16 +20,16 @@ depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
'meeting_consent',
|
||||
sa.Column('id', sa.String(), nullable=False),
|
||||
sa.Column('meeting_id', sa.String(), nullable=False),
|
||||
sa.Column('user_id', sa.String(), nullable=True),
|
||||
sa.Column('consent_given', sa.Boolean(), nullable=False),
|
||||
sa.Column('consent_timestamp', sa.DateTime(), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.ForeignKeyConstraint(['meeting_id'], ['meeting.id']),
|
||||
"meeting_consent",
|
||||
sa.Column("id", sa.String(), nullable=False),
|
||||
sa.Column("meeting_id", sa.String(), nullable=False),
|
||||
sa.Column("user_id", sa.String(), nullable=True),
|
||||
sa.Column("consent_given", sa.Boolean(), nullable=False),
|
||||
sa.Column("consent_timestamp", sa.DateTime(), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
sa.ForeignKeyConstraint(["meeting_id"], ["meeting.id"]),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table('meeting_consent')
|
||||
op.drop_table("meeting_consent")
|
||||
|
||||
@@ -5,6 +5,7 @@ Revises: 20250617140003
|
||||
Create Date: 2025-06-18 14:00:00.000000
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
"""Add events column to meetings table
|
||||
|
||||
Revision ID: 2890b5104577
|
||||
Revises: 6e6ea8e607c5
|
||||
Create Date: 2025-09-02 17:51:41.620777
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "2890b5104577"
|
||||
down_revision: Union[str, None] = "6e6ea8e607c5"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"events", sa.JSON(), server_default=sa.text("'[]'"), nullable=False
|
||||
)
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.drop_column("events")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -0,0 +1,32 @@
|
||||
"""clean up orphaned room_id references in meeting table
|
||||
|
||||
Revision ID: 2ae3db106d4e
|
||||
Revises: def1b5867d4c
|
||||
Create Date: 2025-09-11 10:35:15.759967
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "2ae3db106d4e"
|
||||
down_revision: Union[str, None] = "def1b5867d4c"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Set room_id to NULL for meetings that reference non-existent rooms
|
||||
op.execute("""
|
||||
UPDATE meeting
|
||||
SET room_id = NULL
|
||||
WHERE room_id IS NOT NULL
|
||||
AND room_id NOT IN (SELECT id FROM room WHERE id IS NOT NULL)
|
||||
""")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Cannot restore orphaned references - no operation needed
|
||||
pass
|
||||
@@ -5,36 +5,40 @@ Revises: ccd68dc784ff
|
||||
Create Date: 2025-07-15 16:53:40.397394
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '2cf0b60a9d34'
|
||||
down_revision: Union[str, None] = 'ccd68dc784ff'
|
||||
revision: str = "2cf0b60a9d34"
|
||||
down_revision: Union[str, None] = "ccd68dc784ff"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('transcript', schema=None) as batch_op:
|
||||
batch_op.alter_column('duration',
|
||||
existing_type=sa.INTEGER(),
|
||||
type_=sa.Float(),
|
||||
existing_nullable=True)
|
||||
with op.batch_alter_table("transcript", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"duration",
|
||||
existing_type=sa.INTEGER(),
|
||||
type_=sa.Float(),
|
||||
existing_nullable=True,
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('transcript', schema=None) as batch_op:
|
||||
batch_op.alter_column('duration',
|
||||
existing_type=sa.Float(),
|
||||
type_=sa.INTEGER(),
|
||||
existing_nullable=True)
|
||||
with op.batch_alter_table("transcript", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"duration",
|
||||
existing_type=sa.Float(),
|
||||
type_=sa.INTEGER(),
|
||||
existing_nullable=True,
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
@@ -5,17 +5,17 @@ Revises: 9920ecfe2735
|
||||
Create Date: 2023-11-02 19:53:09.116240
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.sql import table, column
|
||||
from alembic import op
|
||||
from sqlalchemy import select
|
||||
|
||||
from sqlalchemy.sql import column, table
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '38a927dcb099'
|
||||
down_revision: Union[str, None] = '9920ecfe2735'
|
||||
revision: str = "38a927dcb099"
|
||||
down_revision: Union[str, None] = "9920ecfe2735"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
@@ -5,13 +5,13 @@ Revises: 38a927dcb099
|
||||
Create Date: 2023-11-10 18:12:17.886522
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.sql import table, column
|
||||
from alembic import op
|
||||
from sqlalchemy import select
|
||||
|
||||
from sqlalchemy.sql import column, table
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "4814901632bc"
|
||||
@@ -24,9 +24,11 @@ def upgrade() -> None:
|
||||
# for all the transcripts, calculate the duration from the mp3
|
||||
# and update the duration column
|
||||
from pathlib import Path
|
||||
from reflector.settings import settings
|
||||
|
||||
import av
|
||||
|
||||
from reflector.settings import settings
|
||||
|
||||
bind = op.get_bind()
|
||||
transcript = table(
|
||||
"transcript", column("id", sa.String), column("duration", sa.Float)
|
||||
|
||||
@@ -5,14 +5,11 @@ Revises:
|
||||
Create Date: 2023-08-29 10:54:45.142974
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '543ed284d69a'
|
||||
revision: str = "543ed284d69a"
|
||||
down_revision: Union[str, None] = None
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
"""add cascade delete to meeting consent foreign key
|
||||
|
||||
Revision ID: 5a8907fd1d78
|
||||
Revises: 0ab2d7ffaa16
|
||||
Create Date: 2025-08-26 17:26:50.945491
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "5a8907fd1d78"
|
||||
down_revision: Union[str, None] = "0ab2d7ffaa16"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting_consent", schema=None) as batch_op:
|
||||
batch_op.drop_constraint(
|
||||
batch_op.f("meeting_consent_meeting_id_fkey"), type_="foreignkey"
|
||||
)
|
||||
batch_op.create_foreign_key(
|
||||
batch_op.f("meeting_consent_meeting_id_fkey"),
|
||||
"meeting",
|
||||
["meeting_id"],
|
||||
["id"],
|
||||
ondelete="CASCADE",
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting_consent", schema=None) as batch_op:
|
||||
batch_op.drop_constraint(
|
||||
batch_op.f("meeting_consent_meeting_id_fkey"), type_="foreignkey"
|
||||
)
|
||||
batch_op.create_foreign_key(
|
||||
batch_op.f("meeting_consent_meeting_id_fkey"),
|
||||
"meeting",
|
||||
["meeting_id"],
|
||||
["id"],
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -0,0 +1,28 @@
|
||||
"""webhook url and secret null by default
|
||||
|
||||
|
||||
Revision ID: 61882a919591
|
||||
Revises: 0194f65cd6d3
|
||||
Create Date: 2025-08-29 11:46:36.738091
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "61882a919591"
|
||||
down_revision: Union[str, None] = "0194f65cd6d3"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
pass
|
||||
# ### end Alembic commands ###
|
||||
@@ -8,9 +8,8 @@ Create Date: 2025-06-27 09:04:21.006823
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "62dea3db63a5"
|
||||
@@ -33,7 +32,7 @@ def upgrade() -> None:
|
||||
sa.Column("user_id", sa.String(), nullable=True),
|
||||
sa.Column("room_id", sa.String(), nullable=True),
|
||||
sa.Column(
|
||||
"is_locked", sa.Boolean(), server_default=sa.text("0"), nullable=False
|
||||
"is_locked", sa.Boolean(), server_default=sa.text("false"), nullable=False
|
||||
),
|
||||
sa.Column("room_mode", sa.String(), server_default="normal", nullable=False),
|
||||
sa.Column(
|
||||
@@ -54,12 +53,15 @@ def upgrade() -> None:
|
||||
sa.Column("user_id", sa.String(), nullable=False),
|
||||
sa.Column("created_at", sa.DateTime(), nullable=False),
|
||||
sa.Column(
|
||||
"zulip_auto_post", sa.Boolean(), server_default=sa.text("0"), nullable=False
|
||||
"zulip_auto_post",
|
||||
sa.Boolean(),
|
||||
server_default=sa.text("false"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("zulip_stream", sa.String(), nullable=True),
|
||||
sa.Column("zulip_topic", sa.String(), nullable=True),
|
||||
sa.Column(
|
||||
"is_locked", sa.Boolean(), server_default=sa.text("0"), nullable=False
|
||||
"is_locked", sa.Boolean(), server_default=sa.text("false"), nullable=False
|
||||
),
|
||||
sa.Column("room_mode", sa.String(), server_default="normal", nullable=False),
|
||||
sa.Column(
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
"""make meeting room_id required and add foreign key
|
||||
|
||||
Revision ID: 6dec9fb5b46c
|
||||
Revises: 61882a919591
|
||||
Create Date: 2025-09-10 10:47:06.006819
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "6dec9fb5b46c"
|
||||
down_revision: Union[str, None] = "61882a919591"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=False)
|
||||
batch_op.create_foreign_key(
|
||||
None, "room", ["room_id"], ["id"], ondelete="CASCADE"
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.drop_constraint("meeting_room_id_fkey", type_="foreignkey")
|
||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=True)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -0,0 +1,44 @@
|
||||
"""Add VideoPlatform enum for rooms and meetings
|
||||
|
||||
Revision ID: 6e6ea8e607c5
|
||||
Revises: 61882a919591
|
||||
Create Date: 2025-09-02 17:33:21.022214
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "6e6ea8e607c5"
|
||||
down_revision: Union[str, None] = "61882a919591"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column("platform", sa.String(), server_default="whereby", nullable=False)
|
||||
)
|
||||
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column("platform", sa.String(), server_default="whereby", nullable=False)
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.drop_column("platform")
|
||||
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.drop_column("platform")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -20,11 +20,14 @@ depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
sourcekind_enum = sa.Enum("room", "live", "file", name="sourcekind")
|
||||
sourcekind_enum.create(op.get_bind())
|
||||
|
||||
op.add_column(
|
||||
"transcript",
|
||||
sa.Column(
|
||||
"source_kind",
|
||||
sa.Enum("ROOM", "LIVE", "FILE", name="sourcekind"),
|
||||
sourcekind_enum,
|
||||
nullable=True,
|
||||
),
|
||||
)
|
||||
@@ -43,6 +46,8 @@ def upgrade() -> None:
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_column("transcript", "source_kind")
|
||||
sourcekind_enum = sa.Enum(name="sourcekind")
|
||||
sourcekind_enum.drop(op.get_bind())
|
||||
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
@@ -5,26 +5,28 @@ Revises: 62dea3db63a5
|
||||
Create Date: 2024-09-06 14:02:06.649665
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '764ce6db4388'
|
||||
down_revision: Union[str, None] = '62dea3db63a5'
|
||||
revision: str = "764ce6db4388"
|
||||
down_revision: Union[str, None] = "62dea3db63a5"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('transcript', sa.Column('zulip_message_id', sa.Integer(), nullable=True))
|
||||
op.add_column(
|
||||
"transcript", sa.Column("zulip_message_id", sa.Integer(), nullable=True)
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_column('transcript', 'zulip_message_id')
|
||||
op.drop_column("transcript", "zulip_message_id")
|
||||
# ### end Alembic commands ###
|
||||
|
||||
@@ -0,0 +1,106 @@
|
||||
"""populate_webvtt_from_topics
|
||||
|
||||
Revision ID: 8120ebc75366
|
||||
Revises: 116b2f287eab
|
||||
Create Date: 2025-08-11 19:11:01.316947
|
||||
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
from sqlalchemy import text
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "8120ebc75366"
|
||||
down_revision: Union[str, None] = "116b2f287eab"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def topics_to_webvtt(topics):
|
||||
"""Convert topics list to WebVTT format string."""
|
||||
if not topics:
|
||||
return None
|
||||
|
||||
lines = ["WEBVTT", ""]
|
||||
|
||||
for topic in topics:
|
||||
start_time = format_timestamp(topic.get("start"))
|
||||
end_time = format_timestamp(topic.get("end"))
|
||||
text = topic.get("text", "").strip()
|
||||
|
||||
if start_time and end_time and text:
|
||||
lines.append(f"{start_time} --> {end_time}")
|
||||
lines.append(text)
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines).strip()
|
||||
|
||||
|
||||
def format_timestamp(seconds):
|
||||
"""Format seconds to WebVTT timestamp format (HH:MM:SS.mmm)."""
|
||||
if seconds is None:
|
||||
return None
|
||||
|
||||
hours = int(seconds // 3600)
|
||||
minutes = int((seconds % 3600) // 60)
|
||||
secs = seconds % 60
|
||||
|
||||
return f"{hours:02d}:{minutes:02d}:{secs:06.3f}"
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Populate WebVTT field for all transcripts with topics."""
|
||||
|
||||
# Get connection
|
||||
connection = op.get_bind()
|
||||
|
||||
# Query all transcripts with topics
|
||||
result = connection.execute(
|
||||
text("SELECT id, topics FROM transcript WHERE topics IS NOT NULL")
|
||||
)
|
||||
|
||||
rows = result.fetchall()
|
||||
print(f"Found {len(rows)} transcripts with topics")
|
||||
|
||||
updated_count = 0
|
||||
error_count = 0
|
||||
|
||||
for row in rows:
|
||||
transcript_id = row[0]
|
||||
topics_data = row[1]
|
||||
|
||||
if not topics_data:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Parse JSON if it's a string
|
||||
if isinstance(topics_data, str):
|
||||
topics_data = json.loads(topics_data)
|
||||
|
||||
# Convert topics to WebVTT format
|
||||
webvtt_content = topics_to_webvtt(topics_data)
|
||||
|
||||
if webvtt_content:
|
||||
# Update the webvtt field
|
||||
connection.execute(
|
||||
text("UPDATE transcript SET webvtt = :webvtt WHERE id = :id"),
|
||||
{"webvtt": webvtt_content, "id": transcript_id},
|
||||
)
|
||||
updated_count += 1
|
||||
print(f"✓ Updated transcript {transcript_id}")
|
||||
|
||||
except Exception as e:
|
||||
error_count += 1
|
||||
print(f"✗ Error updating transcript {transcript_id}: {e}")
|
||||
|
||||
print(f"\nMigration complete!")
|
||||
print(f" Updated: {updated_count}")
|
||||
print(f" Errors: {error_count}")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Clear WebVTT field for all transcripts."""
|
||||
op.execute(text("UPDATE transcript SET webvtt = NULL"))
|
||||
@@ -9,8 +9,6 @@ Create Date: 2025-07-15 19:30:19.876332
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "88d292678ba2"
|
||||
@@ -21,7 +19,7 @@ depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
def upgrade() -> None:
|
||||
import json
|
||||
import re
|
||||
|
||||
from sqlalchemy import text
|
||||
|
||||
# Get database connection
|
||||
@@ -58,7 +56,9 @@ def upgrade() -> None:
|
||||
fixed_events = json.dumps(jevents)
|
||||
assert "NaN" not in fixed_events
|
||||
except (json.JSONDecodeError, AssertionError) as e:
|
||||
print(f"Warning: Invalid JSON for transcript {transcript_id}, skipping: {e}")
|
||||
print(
|
||||
f"Warning: Invalid JSON for transcript {transcript_id}, skipping: {e}"
|
||||
)
|
||||
continue
|
||||
|
||||
# Update the record with fixed JSON
|
||||
|
||||
@@ -5,13 +5,13 @@ Revises: 99365b0cd87b
|
||||
Create Date: 2023-11-02 18:55:17.019498
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.sql import table, column
|
||||
from alembic import op
|
||||
from sqlalchemy import select
|
||||
|
||||
from sqlalchemy.sql import column, table
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "9920ecfe2735"
|
||||
|
||||
@@ -8,8 +8,8 @@ Create Date: 2023-09-01 20:19:47.216334
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "99365b0cd87b"
|
||||
@@ -22,7 +22,7 @@ def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.execute(
|
||||
"UPDATE transcript SET events = "
|
||||
'REPLACE(events, \'"event": "SUMMARY"\', \'"event": "LONG_SUMMARY"\');'
|
||||
'REPLACE(events::text, \'"event": "SUMMARY"\', \'"event": "LONG_SUMMARY"\')::json;'
|
||||
)
|
||||
op.alter_column("transcript", "summary", new_column_name="long_summary")
|
||||
op.add_column("transcript", sa.Column("title", sa.String(), nullable=True))
|
||||
@@ -34,7 +34,7 @@ def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.execute(
|
||||
"UPDATE transcript SET events = "
|
||||
'REPLACE(events, \'"event": "LONG_SUMMARY"\', \'"event": "SUMMARY"\');'
|
||||
'REPLACE(events::text, \'"event": "LONG_SUMMARY"\', \'"event": "SUMMARY"\')::json;'
|
||||
)
|
||||
with op.batch_alter_table("transcript", schema=None) as batch_op:
|
||||
batch_op.alter_column("long_summary", nullable=True, new_column_name="summary")
|
||||
|
||||
121
server/migrations/versions/9f5c78d352d6_datetime_timezone.py
Normal file
121
server/migrations/versions/9f5c78d352d6_datetime_timezone.py
Normal file
@@ -0,0 +1,121 @@
|
||||
"""datetime timezone
|
||||
|
||||
Revision ID: 9f5c78d352d6
|
||||
Revises: 8120ebc75366
|
||||
Create Date: 2025-08-13 19:18:27.113593
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "9f5c78d352d6"
|
||||
down_revision: Union[str, None] = "8120ebc75366"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"start_date",
|
||||
existing_type=postgresql.TIMESTAMP(),
|
||||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=True,
|
||||
)
|
||||
batch_op.alter_column(
|
||||
"end_date",
|
||||
existing_type=postgresql.TIMESTAMP(),
|
||||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=True,
|
||||
)
|
||||
|
||||
with op.batch_alter_table("meeting_consent", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"consent_timestamp",
|
||||
existing_type=postgresql.TIMESTAMP(),
|
||||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=False,
|
||||
)
|
||||
|
||||
with op.batch_alter_table("recording", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"recorded_at",
|
||||
existing_type=postgresql.TIMESTAMP(),
|
||||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=False,
|
||||
)
|
||||
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"created_at",
|
||||
existing_type=postgresql.TIMESTAMP(),
|
||||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=False,
|
||||
)
|
||||
|
||||
with op.batch_alter_table("transcript", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"created_at",
|
||||
existing_type=postgresql.TIMESTAMP(),
|
||||
type_=sa.DateTime(timezone=True),
|
||||
existing_nullable=True,
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("transcript", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"created_at",
|
||||
existing_type=sa.DateTime(timezone=True),
|
||||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=True,
|
||||
)
|
||||
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"created_at",
|
||||
existing_type=sa.DateTime(timezone=True),
|
||||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=False,
|
||||
)
|
||||
|
||||
with op.batch_alter_table("recording", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"recorded_at",
|
||||
existing_type=sa.DateTime(timezone=True),
|
||||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=False,
|
||||
)
|
||||
|
||||
with op.batch_alter_table("meeting_consent", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"consent_timestamp",
|
||||
existing_type=sa.DateTime(timezone=True),
|
||||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=False,
|
||||
)
|
||||
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.alter_column(
|
||||
"end_date",
|
||||
existing_type=sa.DateTime(timezone=True),
|
||||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=True,
|
||||
)
|
||||
batch_op.alter_column(
|
||||
"start_date",
|
||||
existing_type=sa.DateTime(timezone=True),
|
||||
type_=postgresql.TIMESTAMP(),
|
||||
existing_nullable=True,
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -25,7 +25,7 @@ def upgrade() -> None:
|
||||
sa.Column(
|
||||
"is_shared",
|
||||
sa.Boolean(),
|
||||
server_default=sa.text("0"),
|
||||
server_default=sa.text("false"),
|
||||
nullable=False,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -9,8 +9,6 @@ Create Date: 2025-07-15 20:09:40.253018
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "a9c9c229ee36"
|
||||
|
||||
@@ -5,30 +5,37 @@ Revises: 6ea59639f30e
|
||||
Create Date: 2025-01-28 10:06:50.446233
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = 'b0e5f7876032'
|
||||
down_revision: Union[str, None] = '6ea59639f30e'
|
||||
revision: str = "b0e5f7876032"
|
||||
down_revision: Union[str, None] = "6ea59639f30e"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('meeting', schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column('is_active', sa.Boolean(), server_default=sa.text('1'), nullable=False))
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"is_active",
|
||||
sa.Boolean(),
|
||||
server_default=sa.text("true"),
|
||||
nullable=False,
|
||||
)
|
||||
)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table('meeting', schema=None) as batch_op:
|
||||
batch_op.drop_column('is_active')
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.drop_column("is_active")
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
"""add_search_optimization_indexes
|
||||
|
||||
Revision ID: b1c33bd09963
|
||||
Revises: 9f5c78d352d6
|
||||
Create Date: 2025-08-14 17:26:02.117408
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "b1c33bd09963"
|
||||
down_revision: Union[str, None] = "9f5c78d352d6"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add indexes for actual search filtering patterns used in frontend
|
||||
# Based on /browse page filters: room_id and source_kind
|
||||
|
||||
# Index for room_id + created_at (for room-specific searches with date ordering)
|
||||
op.create_index(
|
||||
"idx_transcript_room_id_created_at",
|
||||
"transcript",
|
||||
["room_id", "created_at"],
|
||||
if_not_exists=True,
|
||||
)
|
||||
|
||||
# Index for source_kind alone (actively used filter in frontend)
|
||||
op.create_index(
|
||||
"idx_transcript_source_kind", "transcript", ["source_kind"], if_not_exists=True
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Remove the indexes in reverse order
|
||||
op.drop_index("idx_transcript_source_kind", "transcript", if_exists=True)
|
||||
op.drop_index("idx_transcript_room_id_created_at", "transcript", if_exists=True)
|
||||
@@ -8,9 +8,8 @@ Create Date: 2025-06-27 08:57:16.306940
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "b3df9681cae9"
|
||||
|
||||
@@ -8,9 +8,8 @@ Create Date: 2024-10-11 13:45:28.914902
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "b469348df210"
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
"""add_unique_constraint_one_active_meeting_per_room
|
||||
|
||||
Revision ID: b7df9609542c
|
||||
Revises: d7fbb74b673b
|
||||
Create Date: 2025-07-25 16:27:06.959868
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "b7df9609542c"
|
||||
down_revision: Union[str, None] = "d7fbb74b673b"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Create a partial unique index that ensures only one active meeting per room
|
||||
# This works for both PostgreSQL and SQLite
|
||||
op.create_index(
|
||||
"idx_one_active_meeting_per_room",
|
||||
"meeting",
|
||||
["room_id"],
|
||||
unique=True,
|
||||
postgresql_where=sa.text("is_active = true"),
|
||||
sqlite_where=sa.text("is_active = 1"),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_index("idx_one_active_meeting_per_room", table_name="meeting")
|
||||
@@ -5,25 +5,31 @@ Revises: 125031f7cb78
|
||||
Create Date: 2023-12-13 15:37:51.303970
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = 'b9348748bbbc'
|
||||
down_revision: Union[str, None] = '125031f7cb78'
|
||||
revision: str = "b9348748bbbc"
|
||||
down_revision: Union[str, None] = "125031f7cb78"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.add_column('transcript', sa.Column('reviewed', sa.Boolean(), server_default=sa.text('0'), nullable=False))
|
||||
op.add_column(
|
||||
"transcript",
|
||||
sa.Column(
|
||||
"reviewed", sa.Boolean(), server_default=sa.text("false"), nullable=False
|
||||
),
|
||||
)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.drop_column('transcript', 'reviewed')
|
||||
op.drop_column("transcript", "reviewed")
|
||||
# ### end Alembic commands ###
|
||||
|
||||
@@ -9,8 +9,6 @@ Create Date: 2025-07-15 11:48:42.854741
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "ccd68dc784ff"
|
||||
|
||||
@@ -8,9 +8,8 @@ Create Date: 2025-06-27 09:27:25.302152
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "d3ff3a39297f"
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
"""Add room_id to transcript
|
||||
|
||||
Revision ID: d7fbb74b673b
|
||||
Revises: a9c9c229ee36
|
||||
Create Date: 2025-07-17 12:00:00.000000
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "d7fbb74b673b"
|
||||
down_revision: Union[str, None] = "a9c9c229ee36"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Add room_id column to transcript table
|
||||
op.add_column("transcript", sa.Column("room_id", sa.String(), nullable=True))
|
||||
|
||||
# Add index for room_id for better query performance
|
||||
op.create_index("idx_transcript_room_id", "transcript", ["room_id"])
|
||||
|
||||
# Populate room_id for existing ROOM-type transcripts
|
||||
# This joins through recording -> meeting -> room to get the room_id
|
||||
op.execute("""
|
||||
UPDATE transcript AS t
|
||||
SET room_id = r.id
|
||||
FROM recording rec
|
||||
JOIN meeting m ON rec.meeting_id = m.id
|
||||
JOIN room r ON m.room_id = r.id
|
||||
WHERE t.recording_id = rec.id
|
||||
AND t.source_kind = 'room'
|
||||
AND t.room_id IS NULL
|
||||
""")
|
||||
|
||||
# Fix missing meeting_id for ROOM-type transcripts
|
||||
# The meeting_id field exists but was never populated
|
||||
op.execute("""
|
||||
UPDATE transcript AS t
|
||||
SET meeting_id = rec.meeting_id
|
||||
FROM recording rec
|
||||
WHERE t.recording_id = rec.id
|
||||
AND t.source_kind = 'room'
|
||||
AND t.meeting_id IS NULL
|
||||
AND rec.meeting_id IS NOT NULL
|
||||
""")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop the index first
|
||||
op.drop_index("idx_transcript_room_id", "transcript")
|
||||
|
||||
# Drop the room_id column
|
||||
op.drop_column("transcript", "room_id")
|
||||
@@ -0,0 +1,34 @@
|
||||
"""make meeting room_id nullable but keep foreign key
|
||||
|
||||
Revision ID: def1b5867d4c
|
||||
Revises: 0ce521cda2ee
|
||||
Create Date: 2025-09-11 09:42:18.697264
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "def1b5867d4c"
|
||||
down_revision: Union[str, None] = "0ce521cda2ee"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=True)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=False)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
@@ -5,11 +5,11 @@ Revises: 4814901632bc
|
||||
Create Date: 2023-11-16 10:29:09.351664
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "f819277e5169"
|
||||
|
||||
@@ -22,7 +22,6 @@ dependencies = [
|
||||
"fastapi-pagination>=0.12.6",
|
||||
"databases[aiosqlite, asyncpg]>=0.7.0",
|
||||
"sqlalchemy<1.5",
|
||||
"fief-client[fastapi]>=0.17.0",
|
||||
"alembic>=1.11.3",
|
||||
"nltk>=3.8.1",
|
||||
"prometheus-fastapi-instrumentator>=6.1.0",
|
||||
@@ -33,12 +32,15 @@ dependencies = [
|
||||
"redis>=5.0.1",
|
||||
"python-jose[cryptography]>=3.3.0",
|
||||
"python-multipart>=0.0.6",
|
||||
"faster-whisper>=0.10.0",
|
||||
"transformers>=4.36.2",
|
||||
"black==24.1.1",
|
||||
"jsonschema>=4.23.0",
|
||||
"openai>=1.59.7",
|
||||
"psycopg2-binary>=2.9.10",
|
||||
"llama-index>=0.12.52",
|
||||
"llama-index-llms-openai-like>=0.4.0",
|
||||
"pytest-env>=1.1.5",
|
||||
"webvtt-py>=0.5.0",
|
||||
"PyJWT>=2.8.0",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
@@ -55,6 +57,9 @@ tests = [
|
||||
"httpx-ws>=0.4.1",
|
||||
"pytest-httpx>=0.23.1",
|
||||
"pytest-celery>=0.0.0",
|
||||
"pytest-recording>=0.13.4",
|
||||
"pytest-docker>=3.2.3",
|
||||
"asgi-lifespan>=2.1.0",
|
||||
]
|
||||
aws = ["aioboto3>=11.2.0"]
|
||||
evaluation = [
|
||||
@@ -63,6 +68,15 @@ evaluation = [
|
||||
"tqdm>=4.66.0",
|
||||
"pydantic>=2.1.1",
|
||||
]
|
||||
local = [
|
||||
"pyannote-audio>=3.3.2",
|
||||
"faster-whisper>=0.10.0",
|
||||
]
|
||||
silero-vad = [
|
||||
"silero-vad>=5.1.2",
|
||||
"torch>=2.8.0",
|
||||
"torchaudio>=2.8.0",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
default-groups = [
|
||||
@@ -70,6 +84,21 @@ default-groups = [
|
||||
"tests",
|
||||
"aws",
|
||||
"evaluation",
|
||||
"local",
|
||||
"silero-vad"
|
||||
]
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-cpu"
|
||||
url = "https://download.pytorch.org/whl/cpu"
|
||||
explicit = true
|
||||
|
||||
[tool.uv.sources]
|
||||
torch = [
|
||||
{ index = "pytorch-cpu" },
|
||||
]
|
||||
torchaudio = [
|
||||
{ index = "pytorch-cpu" },
|
||||
]
|
||||
|
||||
[build-system]
|
||||
@@ -82,10 +111,28 @@ packages = ["reflector"]
|
||||
[tool.coverage.run]
|
||||
source = ["reflector"]
|
||||
|
||||
[tool.pytest_env]
|
||||
ENVIRONMENT = "pytest"
|
||||
DATABASE_URL = "postgresql://test_user:test_password@localhost:15432/reflector_test"
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
addopts = "-ra -q --disable-pytest-warnings --cov --cov-report html -v"
|
||||
testpaths = ["tests"]
|
||||
asyncio_mode = "auto"
|
||||
markers = [
|
||||
"gpu_modal: mark test to run only with GPU Modal endpoints (deselect with '-m \"not gpu_modal\"')",
|
||||
]
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
"I", # isort - import sorting
|
||||
"F401", # unused imports
|
||||
"PLC0415", # import-outside-top-level - detect inline imports
|
||||
]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"reflector/processors/summary/summary_builder.py" = ["E501"]
|
||||
"gpu/**.py" = ["PLC0415"]
|
||||
"reflector/tools/**.py" = ["PLC0415"]
|
||||
"migrations/versions/**.py" = ["PLC0415"]
|
||||
"tests/**.py" = ["PLC0415"]
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
import reflector.auth # noqa
|
||||
import reflector.db # noqa
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.routing import APIRoute
|
||||
from fastapi_pagination import add_pagination
|
||||
from prometheus_fastapi_instrumentator import Instrumentator
|
||||
|
||||
import reflector.auth # noqa
|
||||
import reflector.db # noqa
|
||||
from reflector.events import subscribers_shutdown, subscribers_startup
|
||||
from reflector.logger import logger
|
||||
from reflector.metrics import metrics_init
|
||||
from reflector.settings import settings
|
||||
from reflector.video_platforms.jitsi import router as jitsi_router
|
||||
from reflector.video_platforms.whereby import router as whereby_router
|
||||
from reflector.views.jibri_webhook import router as jibri_webhook_router
|
||||
from reflector.views.meetings import router as meetings_router
|
||||
from reflector.views.rooms import router as rooms_router
|
||||
from reflector.views.rtc_offer import router as rtc_offer_router
|
||||
@@ -25,7 +29,6 @@ from reflector.views.transcripts_upload import router as transcripts_upload_rout
|
||||
from reflector.views.transcripts_webrtc import router as transcripts_webrtc_router
|
||||
from reflector.views.transcripts_websocket import router as transcripts_websocket_router
|
||||
from reflector.views.user import router as user_router
|
||||
from reflector.views.whereby import router as whereby_router
|
||||
from reflector.views.zulip import router as zulip_router
|
||||
|
||||
try:
|
||||
@@ -85,6 +88,8 @@ app.include_router(transcripts_process_router, prefix="/v1")
|
||||
app.include_router(user_router, prefix="/v1")
|
||||
app.include_router(zulip_router, prefix="/v1")
|
||||
app.include_router(whereby_router, prefix="/v1")
|
||||
app.include_router(jitsi_router, prefix="/v1")
|
||||
app.include_router(jibri_webhook_router) # No /v1 prefix, uses /api/v1/jibri
|
||||
add_pagination(app)
|
||||
|
||||
# prepare celery
|
||||
|
||||
27
server/reflector/asynctask.py
Normal file
27
server/reflector/asynctask.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import asyncio
|
||||
import functools
|
||||
|
||||
from reflector.db import get_database
|
||||
|
||||
|
||||
def asynctask(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
async def run_with_db():
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
return await f(*args, **kwargs)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
coro = run_with_db()
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
loop = None
|
||||
if loop and loop.is_running():
|
||||
return loop.run_until_complete(coro)
|
||||
return asyncio.run(coro)
|
||||
|
||||
return wrapper
|
||||
@@ -1,7 +1,8 @@
|
||||
from reflector.settings import settings
|
||||
from reflector.logger import logger
|
||||
import importlib
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
|
||||
logger.info(f"User authentication using {settings.AUTH_BACKEND}")
|
||||
module_name = f"reflector.auth.auth_{settings.AUTH_BACKEND}"
|
||||
auth_module = importlib.import_module(module_name)
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
from fastapi.security import OAuth2AuthorizationCodeBearer
|
||||
from fief_client import FiefAccessTokenInfo, FiefAsync, FiefUserInfo
|
||||
from fief_client.integrations.fastapi import FiefAuth
|
||||
from reflector.settings import settings
|
||||
|
||||
fief = FiefAsync(
|
||||
settings.AUTH_FIEF_URL,
|
||||
settings.AUTH_FIEF_CLIENT_ID,
|
||||
settings.AUTH_FIEF_CLIENT_SECRET,
|
||||
)
|
||||
|
||||
scheme = OAuth2AuthorizationCodeBearer(
|
||||
f"{settings.AUTH_FIEF_URL}/authorize",
|
||||
f"{settings.AUTH_FIEF_URL}/api/token",
|
||||
scopes={"openid": "openid", "offline_access": "offline_access"},
|
||||
auto_error=False,
|
||||
)
|
||||
|
||||
auth = FiefAuth(fief, scheme)
|
||||
|
||||
UserInfo = FiefUserInfo
|
||||
AccessTokenInfo = FiefAccessTokenInfo
|
||||
authenticated = auth.authenticated()
|
||||
current_user = auth.current_user()
|
||||
current_user_optional = auth.current_user(optional=True)
|
||||
@@ -4,6 +4,7 @@ from fastapi import Depends, HTTPException
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from jose import JWTError, jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import Annotated
|
||||
|
||||
from fastapi import Depends
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
import signal
|
||||
from typing import NoReturn
|
||||
|
||||
from aiortc.contrib.signaling import add_signaling_arguments, create_signaling
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.stream_client import StreamClient
|
||||
from typing import NoReturn
|
||||
|
||||
|
||||
async def main() -> NoReturn:
|
||||
@@ -51,7 +51,7 @@ async def main() -> NoReturn:
|
||||
|
||||
logger.info(f"Cancelling {len(tasks)} outstanding tasks")
|
||||
await asyncio.gather(*tasks, return_exceptions=True)
|
||||
logger.info(f'{"Flushing metrics"}')
|
||||
logger.info(f"{'Flushing metrics'}")
|
||||
loop.stop()
|
||||
|
||||
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
|
||||
|
||||
@@ -1,11 +1,28 @@
|
||||
import contextvars
|
||||
from typing import Optional
|
||||
|
||||
import databases
|
||||
import sqlalchemy
|
||||
|
||||
from reflector.events import subscribers_shutdown, subscribers_startup
|
||||
from reflector.settings import settings
|
||||
|
||||
database = databases.Database(settings.DATABASE_URL)
|
||||
metadata = sqlalchemy.MetaData()
|
||||
|
||||
_database_context: contextvars.ContextVar[Optional[databases.Database]] = (
|
||||
contextvars.ContextVar("database", default=None)
|
||||
)
|
||||
|
||||
|
||||
def get_database() -> databases.Database:
|
||||
"""Get database instance for current asyncio context"""
|
||||
db = _database_context.get()
|
||||
if db is None:
|
||||
db = databases.Database(settings.DATABASE_URL)
|
||||
_database_context.set(db)
|
||||
return db
|
||||
|
||||
|
||||
# import models
|
||||
import reflector.db.meetings # noqa
|
||||
import reflector.db.recordings # noqa
|
||||
@@ -13,16 +30,18 @@ import reflector.db.rooms # noqa
|
||||
import reflector.db.transcripts # noqa
|
||||
|
||||
kwargs = {}
|
||||
if "sqlite" in settings.DATABASE_URL:
|
||||
kwargs["connect_args"] = {"check_same_thread": False}
|
||||
if "postgres" not in settings.DATABASE_URL:
|
||||
raise Exception("Only postgres database is supported in reflector")
|
||||
engine = sqlalchemy.create_engine(settings.DATABASE_URL, **kwargs)
|
||||
|
||||
|
||||
@subscribers_startup.append
|
||||
async def database_connect(_):
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
|
||||
|
||||
@subscribers_shutdown.append
|
||||
async def database_disconnect(_):
|
||||
database = get_database()
|
||||
await database.disconnect()
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from datetime import datetime
|
||||
from typing import Literal
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, List, Literal
|
||||
|
||||
import sqlalchemy as sa
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
from reflector.db import database, metadata
|
||||
from reflector.db.rooms import Room
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.db.rooms import Room, VideoPlatform
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
meetings = sa.Table(
|
||||
@@ -15,10 +15,14 @@ meetings = sa.Table(
|
||||
sa.Column("room_name", sa.String),
|
||||
sa.Column("room_url", sa.String),
|
||||
sa.Column("host_room_url", sa.String),
|
||||
sa.Column("start_date", sa.DateTime),
|
||||
sa.Column("end_date", sa.DateTime),
|
||||
sa.Column("user_id", sa.String),
|
||||
sa.Column("room_id", sa.String),
|
||||
sa.Column("start_date", sa.DateTime(timezone=True)),
|
||||
sa.Column("end_date", sa.DateTime(timezone=True)),
|
||||
sa.Column(
|
||||
"room_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("room.id", ondelete="CASCADE"),
|
||||
nullable=True,
|
||||
),
|
||||
sa.Column("is_locked", sa.Boolean, nullable=False, server_default=sa.false()),
|
||||
sa.Column("room_mode", sa.String, nullable=False, server_default="normal"),
|
||||
sa.Column("recording_type", sa.String, nullable=False, server_default="cloud"),
|
||||
@@ -40,17 +44,30 @@ meetings = sa.Table(
|
||||
nullable=False,
|
||||
server_default=sa.true(),
|
||||
),
|
||||
sa.Column("platform", sa.String, nullable=False, server_default="whereby"),
|
||||
sa.Column("events", sa.JSON, nullable=False, server_default=sa.text("'[]'")),
|
||||
sa.Index("idx_meeting_room_id", "room_id"),
|
||||
sa.Index(
|
||||
"idx_one_active_meeting_per_room",
|
||||
"room_id",
|
||||
unique=True,
|
||||
postgresql_where=sa.text("is_active = true"),
|
||||
),
|
||||
)
|
||||
|
||||
meeting_consent = sa.Table(
|
||||
"meeting_consent",
|
||||
metadata,
|
||||
sa.Column("id", sa.String, primary_key=True),
|
||||
sa.Column("meeting_id", sa.String, sa.ForeignKey("meeting.id"), nullable=False),
|
||||
sa.Column(
|
||||
"meeting_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("meeting.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("user_id", sa.String),
|
||||
sa.Column("consent_given", sa.Boolean, nullable=False),
|
||||
sa.Column("consent_timestamp", sa.DateTime, nullable=False),
|
||||
sa.Column("consent_timestamp", sa.DateTime(timezone=True), nullable=False),
|
||||
)
|
||||
|
||||
|
||||
@@ -69,8 +86,7 @@ class Meeting(BaseModel):
|
||||
host_room_url: str
|
||||
start_date: datetime
|
||||
end_date: datetime
|
||||
user_id: str | None = None
|
||||
room_id: str | None = None
|
||||
room_id: str | None
|
||||
is_locked: bool = False
|
||||
room_mode: Literal["normal", "group"] = "normal"
|
||||
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
||||
@@ -78,6 +94,8 @@ class Meeting(BaseModel):
|
||||
"none", "prompt", "automatic", "automatic-2nd-participant"
|
||||
] = "automatic-2nd-participant"
|
||||
num_clients: int = 0
|
||||
platform: VideoPlatform = VideoPlatform.WHEREBY
|
||||
events: List[Dict[str, Any]] = Field(default_factory=list)
|
||||
|
||||
|
||||
class MeetingController:
|
||||
@@ -89,12 +107,8 @@ class MeetingController:
|
||||
host_room_url: str,
|
||||
start_date: datetime,
|
||||
end_date: datetime,
|
||||
user_id: str,
|
||||
room: Room,
|
||||
):
|
||||
"""
|
||||
Create a new meeting
|
||||
"""
|
||||
meeting = Meeting(
|
||||
id=id,
|
||||
room_name=room_name,
|
||||
@@ -102,42 +116,33 @@ class MeetingController:
|
||||
host_room_url=host_room_url,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
user_id=user_id,
|
||||
room_id=room.id,
|
||||
is_locked=room.is_locked,
|
||||
room_mode=room.room_mode,
|
||||
recording_type=room.recording_type,
|
||||
recording_trigger=room.recording_trigger,
|
||||
platform=room.platform,
|
||||
)
|
||||
query = meetings.insert().values(**meeting.model_dump())
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
return meeting
|
||||
|
||||
async def get_all_active(self) -> list[Meeting]:
|
||||
"""
|
||||
Get active meetings.
|
||||
"""
|
||||
query = meetings.select().where(meetings.c.is_active)
|
||||
return await database.fetch_all(query)
|
||||
return await get_database().fetch_all(query)
|
||||
|
||||
async def get_by_room_name(
|
||||
self,
|
||||
room_name: str,
|
||||
) -> Meeting:
|
||||
"""
|
||||
Get a meeting by room name.
|
||||
"""
|
||||
) -> Meeting | None:
|
||||
query = meetings.select().where(meetings.c.room_name == room_name)
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_active(self, room: Room, current_time: datetime) -> Meeting:
|
||||
"""
|
||||
Get latest active meeting for a room.
|
||||
"""
|
||||
async def get_active(self, room: Room, current_time: datetime) -> Meeting | None:
|
||||
end_date = getattr(meetings.c, "end_date")
|
||||
query = (
|
||||
meetings.select()
|
||||
@@ -150,42 +155,84 @@ class MeetingController:
|
||||
)
|
||||
.order_by(end_date.desc())
|
||||
)
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_by_id(self, meeting_id: str, **kwargs) -> Meeting | None:
|
||||
"""
|
||||
Get a meeting by id
|
||||
"""
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_by_id_for_http(self, meeting_id: str, user_id: str | None) -> Meeting:
|
||||
"""
|
||||
Get a meeting by ID for HTTP request.
|
||||
|
||||
If not found, it will raise a 404 error.
|
||||
"""
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
result = await database.fetch_one(query)
|
||||
if not result:
|
||||
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||
|
||||
meeting = Meeting(**result)
|
||||
if result["user_id"] != user_id:
|
||||
meeting.host_room_url = ""
|
||||
|
||||
return meeting
|
||||
|
||||
async def update_meeting(self, meeting_id: str, **kwargs):
|
||||
query = meetings.update().where(meetings.c.id == meeting_id).values(**kwargs)
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def add_event(
|
||||
self, meeting_id: str, event_type: str, event_data: Dict[str, Any] = None
|
||||
):
|
||||
"""Add an event to a meeting's events list."""
|
||||
if event_data is None:
|
||||
event_data = {}
|
||||
|
||||
event = {
|
||||
"type": event_type,
|
||||
"timestamp": datetime.now(tz=timezone.utc).isoformat(),
|
||||
"data": event_data,
|
||||
}
|
||||
|
||||
# Get current events
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return
|
||||
|
||||
current_events = result["events"] or []
|
||||
current_events.append(event)
|
||||
|
||||
# Update with new events list
|
||||
update_query = (
|
||||
meetings.update()
|
||||
.where(meetings.c.id == meeting_id)
|
||||
.values(events=current_events)
|
||||
)
|
||||
await get_database().execute(update_query)
|
||||
|
||||
async def participant_joined(
|
||||
self, meeting_id: str, participant_data: Dict[str, Any] = None
|
||||
):
|
||||
"""Record a participant joined event."""
|
||||
await self.add_event(meeting_id, "participant_joined", participant_data)
|
||||
|
||||
async def participant_left(
|
||||
self, meeting_id: str, participant_data: Dict[str, Any] = None
|
||||
):
|
||||
"""Record a participant left event."""
|
||||
await self.add_event(meeting_id, "participant_left", participant_data)
|
||||
|
||||
async def recording_started(
|
||||
self, meeting_id: str, recording_data: Dict[str, Any] = None
|
||||
):
|
||||
"""Record a recording started event."""
|
||||
await self.add_event(meeting_id, "recording_started", recording_data)
|
||||
|
||||
async def recording_stopped(
|
||||
self, meeting_id: str, recording_data: Dict[str, Any] = None
|
||||
):
|
||||
"""Record a recording stopped event."""
|
||||
await self.add_event(meeting_id, "recording_stopped", recording_data)
|
||||
|
||||
async def get_events(self, meeting_id: str) -> List[Dict[str, Any]]:
|
||||
"""Get all events for a meeting."""
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return []
|
||||
return result["events"] or []
|
||||
|
||||
|
||||
class MeetingConsentController:
|
||||
@@ -193,7 +240,7 @@ class MeetingConsentController:
|
||||
query = meeting_consent.select().where(
|
||||
meeting_consent.c.meeting_id == meeting_id
|
||||
)
|
||||
results = await database.fetch_all(query)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [MeetingConsent(**result) for result in results]
|
||||
|
||||
async def get_by_meeting_and_user(
|
||||
@@ -204,10 +251,10 @@ class MeetingConsentController:
|
||||
meeting_consent.c.meeting_id == meeting_id,
|
||||
meeting_consent.c.user_id == user_id,
|
||||
)
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if result is None:
|
||||
return None
|
||||
return MeetingConsent(**result) if result else None
|
||||
return MeetingConsent(**result)
|
||||
|
||||
async def upsert(self, consent: MeetingConsent) -> MeetingConsent:
|
||||
"""Create new consent or update existing one for authenticated users"""
|
||||
@@ -226,14 +273,14 @@ class MeetingConsentController:
|
||||
consent_timestamp=consent.consent_timestamp,
|
||||
)
|
||||
)
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
|
||||
existing.consent_given = consent.consent_given
|
||||
existing.consent_timestamp = consent.consent_timestamp
|
||||
return existing
|
||||
|
||||
query = meeting_consent.insert().values(**consent.model_dump())
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
return consent
|
||||
|
||||
async def has_any_denial(self, meeting_id: str) -> bool:
|
||||
@@ -242,7 +289,7 @@ class MeetingConsentController:
|
||||
meeting_consent.c.meeting_id == meeting_id,
|
||||
meeting_consent.c.consent_given.is_(False),
|
||||
)
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
return result is not None
|
||||
|
||||
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
from reflector.db import database
|
||||
from reflector.db.meetings import meetings
|
||||
from reflector.db.rooms import rooms
|
||||
from reflector.db.transcripts import transcripts
|
||||
|
||||
users_to_migrate = [
|
||||
["123@lifex.pink", "63b727f5-485d-449f-b528-563d779b11ef", None],
|
||||
["ana@monadical.com", "1bae2e4d-5c04-49c2-932f-a86266a6ca13", None],
|
||||
["cspencer@sprocket.org", "614ed0be-392e-488c-bd19-6a9730fd0e9e", None],
|
||||
["daniel.f.lopez.j@gmail.com", "ca9561bd-c989-4a1e-8877-7081cf62ae7f", None],
|
||||
["jenalee@monadical.com", "c7c1e79e-b068-4b28-a9f4-29d98b1697ed", None],
|
||||
["jennifer@rootandseed.com", "f5321727-7546-4b2b-b69d-095a931ef0c4", None],
|
||||
["jose@monadical.com", "221f079c-7ce0-4677-90b7-0359b6315e27", None],
|
||||
["labenclayton@gmail.com", "40078cd0-543c-40e4-9c2e-5ce57a686428", None],
|
||||
["mathieu@monadical.com", "c7a36151-851e-4afa-9fab-aaca834bfd30", None],
|
||||
["michal.flak.96@gmail.com", "3096eb5e-b590-41fc-a0d1-d152c1895402", None],
|
||||
["sara@monadical.com", "31ab0cfe-5d2c-4c7a-84de-a29494714c99", None],
|
||||
["sara@monadical.com", "b871e5f0-754e-447f-9c3d-19f629f0082b", None],
|
||||
["sebastian@monadical.com", "f024f9d0-15d0-480f-8529-43959fc8b639", None],
|
||||
["sergey@monadical.com", "5c4798eb-b9ab-4721-a540-bd96fc434156", None],
|
||||
["sergey@monadical.com", "9dd8a6b4-247e-48fe-b1fb-4c84dd3c01bc", None],
|
||||
["transient.tran@gmail.com", "617ba2d3-09b6-4b1f-a435-a7f41c3ce060", None],
|
||||
]
|
||||
|
||||
|
||||
async def migrate_user(email, user_id):
|
||||
# if the email match the email in the users_to_migrate list
|
||||
# reassign all transcripts/rooms/meetings to the new user_id
|
||||
|
||||
user_ids = [user[1] for user in users_to_migrate if user[0] == email]
|
||||
if not user_ids:
|
||||
return
|
||||
|
||||
# do not migrate back
|
||||
if user_id in user_ids:
|
||||
return
|
||||
|
||||
for old_user_id in user_ids:
|
||||
query = (
|
||||
transcripts.update()
|
||||
.where(transcripts.c.user_id == old_user_id)
|
||||
.values(user_id=user_id)
|
||||
)
|
||||
await database.execute(query)
|
||||
|
||||
query = (
|
||||
rooms.update().where(rooms.c.user_id == old_user_id).values(user_id=user_id)
|
||||
)
|
||||
await database.execute(query)
|
||||
|
||||
query = (
|
||||
meetings.update()
|
||||
.where(meetings.c.user_id == old_user_id)
|
||||
.values(user_id=user_id)
|
||||
)
|
||||
await database.execute(query)
|
||||
@@ -3,7 +3,8 @@ from typing import Literal
|
||||
|
||||
import sqlalchemy as sa
|
||||
from pydantic import BaseModel, Field
|
||||
from reflector.db import database, metadata
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
recordings = sa.Table(
|
||||
@@ -12,7 +13,7 @@ recordings = sa.Table(
|
||||
sa.Column("id", sa.String, primary_key=True),
|
||||
sa.Column("bucket_name", sa.String, nullable=False),
|
||||
sa.Column("object_key", sa.String, nullable=False),
|
||||
sa.Column("recorded_at", sa.DateTime, nullable=False),
|
||||
sa.Column("recorded_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column(
|
||||
"status",
|
||||
sa.String,
|
||||
@@ -36,12 +37,12 @@ class Recording(BaseModel):
|
||||
class RecordingController:
|
||||
async def create(self, recording: Recording):
|
||||
query = recordings.insert().values(**recording.model_dump())
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
return recording
|
||||
|
||||
async def get_by_id(self, id: str) -> Recording:
|
||||
query = recordings.select().where(recordings.c.id == id)
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
return Recording(**result) if result else None
|
||||
|
||||
async def get_by_object_key(self, bucket_name: str, object_key: str) -> Recording:
|
||||
@@ -49,8 +50,12 @@ class RecordingController:
|
||||
recordings.c.bucket_name == bucket_name,
|
||||
recordings.c.object_key == object_key,
|
||||
)
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
return Recording(**result) if result else None
|
||||
|
||||
async def remove_by_id(self, id: str) -> None:
|
||||
query = recordings.delete().where(recordings.c.id == id)
|
||||
await get_database().execute(query)
|
||||
|
||||
|
||||
recordings_controller = RecordingController()
|
||||
|
||||
@@ -1,21 +1,30 @@
|
||||
from datetime import datetime
|
||||
import secrets
|
||||
from datetime import datetime, timezone
|
||||
from enum import StrEnum
|
||||
from sqlite3 import IntegrityError
|
||||
from typing import Literal
|
||||
|
||||
import sqlalchemy
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
from reflector.db import database, metadata
|
||||
from reflector.utils import generate_uuid4
|
||||
from sqlalchemy.sql import false, or_
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
|
||||
class VideoPlatform(StrEnum):
|
||||
WHEREBY = "whereby"
|
||||
JITSI = "jitsi"
|
||||
|
||||
|
||||
rooms = sqlalchemy.Table(
|
||||
"room",
|
||||
metadata,
|
||||
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
|
||||
sqlalchemy.Column("name", sqlalchemy.String, nullable=False, unique=True),
|
||||
sqlalchemy.Column("user_id", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime, nullable=False),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
||||
sqlalchemy.Column(
|
||||
"zulip_auto_post", sqlalchemy.Boolean, nullable=False, server_default=false()
|
||||
),
|
||||
@@ -39,6 +48,11 @@ rooms = sqlalchemy.Table(
|
||||
sqlalchemy.Column(
|
||||
"is_shared", sqlalchemy.Boolean, nullable=False, server_default=false()
|
||||
),
|
||||
sqlalchemy.Column("webhook_url", sqlalchemy.String, nullable=True),
|
||||
sqlalchemy.Column("webhook_secret", sqlalchemy.String, nullable=True),
|
||||
sqlalchemy.Column(
|
||||
"platform", sqlalchemy.String, nullable=False, server_default="whereby"
|
||||
),
|
||||
sqlalchemy.Index("idx_room_is_shared", "is_shared"),
|
||||
)
|
||||
|
||||
@@ -47,7 +61,7 @@ class Room(BaseModel):
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
name: str
|
||||
user_id: str
|
||||
created_at: datetime = Field(default_factory=datetime.utcnow)
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
zulip_auto_post: bool = False
|
||||
zulip_stream: str = ""
|
||||
zulip_topic: str = ""
|
||||
@@ -58,6 +72,9 @@ class Room(BaseModel):
|
||||
"none", "prompt", "automatic", "automatic-2nd-participant"
|
||||
] = "automatic-2nd-participant"
|
||||
is_shared: bool = False
|
||||
webhook_url: str | None = None
|
||||
webhook_secret: str | None = None
|
||||
platform: VideoPlatform = VideoPlatform.WHEREBY
|
||||
|
||||
|
||||
class RoomController:
|
||||
@@ -91,7 +108,7 @@ class RoomController:
|
||||
if return_query:
|
||||
return query
|
||||
|
||||
results = await database.fetch_all(query)
|
||||
results = await get_database().fetch_all(query)
|
||||
return results
|
||||
|
||||
async def add(
|
||||
@@ -106,10 +123,16 @@ class RoomController:
|
||||
recording_type: str,
|
||||
recording_trigger: str,
|
||||
is_shared: bool,
|
||||
webhook_url: str = "",
|
||||
webhook_secret: str = "",
|
||||
platform: str = "whereby",
|
||||
):
|
||||
"""
|
||||
Add a new room
|
||||
"""
|
||||
if webhook_url and not webhook_secret:
|
||||
webhook_secret = secrets.token_urlsafe(32)
|
||||
|
||||
room = Room(
|
||||
name=name,
|
||||
user_id=user_id,
|
||||
@@ -121,10 +144,13 @@ class RoomController:
|
||||
recording_type=recording_type,
|
||||
recording_trigger=recording_trigger,
|
||||
is_shared=is_shared,
|
||||
webhook_url=webhook_url,
|
||||
webhook_secret=webhook_secret,
|
||||
platform=platform,
|
||||
)
|
||||
query = rooms.insert().values(**room.model_dump())
|
||||
try:
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
except IntegrityError:
|
||||
raise HTTPException(status_code=400, detail="Room name is not unique")
|
||||
return room
|
||||
@@ -133,9 +159,12 @@ class RoomController:
|
||||
"""
|
||||
Update a room fields with key/values in values
|
||||
"""
|
||||
if values.get("webhook_url") and not values.get("webhook_secret"):
|
||||
values["webhook_secret"] = secrets.token_urlsafe(32)
|
||||
|
||||
query = rooms.update().where(rooms.c.id == room.id).values(**values)
|
||||
try:
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
except IntegrityError:
|
||||
raise HTTPException(status_code=400, detail="Room name is not unique")
|
||||
|
||||
@@ -150,7 +179,7 @@ class RoomController:
|
||||
query = rooms.select().where(rooms.c.id == room_id)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(rooms.c.user_id == kwargs["user_id"])
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
return Room(**result)
|
||||
@@ -162,7 +191,7 @@ class RoomController:
|
||||
query = rooms.select().where(rooms.c.name == room_name)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(rooms.c.user_id == kwargs["user_id"])
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
return Room(**result)
|
||||
@@ -174,7 +203,7 @@ class RoomController:
|
||||
If not found, it will raise a 404 error.
|
||||
"""
|
||||
query = rooms.select().where(rooms.c.id == meeting_id)
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
|
||||
@@ -196,7 +225,7 @@ class RoomController:
|
||||
if user_id is not None and room.user_id != user_id:
|
||||
return
|
||||
query = rooms.delete().where(rooms.c.id == room_id)
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
|
||||
|
||||
rooms_controller = RoomController()
|
||||
|
||||
468
server/reflector/db/search.py
Normal file
468
server/reflector/db/search.py
Normal file
@@ -0,0 +1,468 @@
|
||||
"""Search functionality for transcripts and other entities."""
|
||||
|
||||
import itertools
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from io import StringIO
|
||||
from typing import Annotated, Any, Dict, Iterator
|
||||
|
||||
import sqlalchemy
|
||||
import webvtt
|
||||
from databases.interfaces import Record as DbRecord
|
||||
from fastapi import HTTPException
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
Field,
|
||||
NonNegativeFloat,
|
||||
NonNegativeInt,
|
||||
TypeAdapter,
|
||||
ValidationError,
|
||||
constr,
|
||||
field_serializer,
|
||||
)
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.db.rooms import rooms
|
||||
from reflector.db.transcripts import SourceKind, TranscriptStatus, transcripts
|
||||
from reflector.db.utils import is_postgresql
|
||||
from reflector.logger import logger
|
||||
from reflector.utils.string import NonEmptyString, try_parse_non_empty_string
|
||||
|
||||
DEFAULT_SEARCH_LIMIT = 20
|
||||
SNIPPET_CONTEXT_LENGTH = 50 # Characters before/after match to include
|
||||
DEFAULT_SNIPPET_MAX_LENGTH = NonNegativeInt(150)
|
||||
DEFAULT_MAX_SNIPPETS = NonNegativeInt(3)
|
||||
LONG_SUMMARY_MAX_SNIPPETS = 2
|
||||
|
||||
SearchQueryBase = constr(min_length=1, strip_whitespace=True)
|
||||
SearchLimitBase = Annotated[int, Field(ge=1, le=100)]
|
||||
SearchOffsetBase = Annotated[int, Field(ge=0)]
|
||||
SearchTotalBase = Annotated[int, Field(ge=0)]
|
||||
|
||||
SearchQuery = Annotated[SearchQueryBase, Field(description="Search query text")]
|
||||
search_query_adapter = TypeAdapter(SearchQuery)
|
||||
SearchLimit = Annotated[SearchLimitBase, Field(description="Results per page")]
|
||||
SearchOffset = Annotated[
|
||||
SearchOffsetBase, Field(description="Number of results to skip")
|
||||
]
|
||||
SearchTotal = Annotated[
|
||||
SearchTotalBase, Field(description="Total number of search results")
|
||||
]
|
||||
|
||||
WEBVTT_SPEC_HEADER = "WEBVTT"
|
||||
|
||||
WebVTTContent = Annotated[
|
||||
str,
|
||||
Field(min_length=len(WEBVTT_SPEC_HEADER), description="WebVTT content"),
|
||||
]
|
||||
|
||||
|
||||
class WebVTTProcessor:
|
||||
"""Stateless processor for WebVTT content operations."""
|
||||
|
||||
@staticmethod
|
||||
def parse(raw_content: str) -> WebVTTContent:
|
||||
"""Parse WebVTT content and return it as a string."""
|
||||
if not raw_content.startswith(WEBVTT_SPEC_HEADER):
|
||||
raise ValueError(f"Invalid WebVTT content, no header {WEBVTT_SPEC_HEADER}")
|
||||
return raw_content
|
||||
|
||||
@staticmethod
|
||||
def extract_text(webvtt_content: WebVTTContent) -> str:
|
||||
"""Extract plain text from WebVTT content using webvtt library."""
|
||||
try:
|
||||
buffer = StringIO(webvtt_content)
|
||||
vtt = webvtt.read_buffer(buffer)
|
||||
return " ".join(caption.text for caption in vtt if caption.text)
|
||||
except webvtt.errors.MalformedFileError as e:
|
||||
logger.warning(f"Malformed WebVTT content: {e}")
|
||||
return ""
|
||||
except (UnicodeDecodeError, ValueError) as e:
|
||||
logger.warning(f"Failed to decode WebVTT content: {e}")
|
||||
return ""
|
||||
except AttributeError as e:
|
||||
logger.error(
|
||||
f"WebVTT parsing error - unexpected format: {e}", exc_info=True
|
||||
)
|
||||
return ""
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error parsing WebVTT: {e}", exc_info=True)
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def generate_snippets(
|
||||
webvtt_content: WebVTTContent,
|
||||
query: SearchQuery,
|
||||
max_snippets: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
||||
) -> list[str]:
|
||||
"""Generate snippets from WebVTT content."""
|
||||
return SnippetGenerator.generate(
|
||||
WebVTTProcessor.extract_text(webvtt_content),
|
||||
query,
|
||||
max_snippets=max_snippets,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SnippetCandidate:
|
||||
"""Represents a candidate snippet with its position."""
|
||||
|
||||
_text: str
|
||||
start: NonNegativeInt
|
||||
_original_text_length: int
|
||||
|
||||
@property
|
||||
def end(self) -> NonNegativeInt:
|
||||
"""Calculate end position from start and raw text length."""
|
||||
return self.start + len(self._text)
|
||||
|
||||
def text(self) -> str:
|
||||
"""Get display text with ellipses added if needed."""
|
||||
result = self._text.strip()
|
||||
if self.start > 0:
|
||||
result = "..." + result
|
||||
if self.end < self._original_text_length:
|
||||
result = result + "..."
|
||||
return result
|
||||
|
||||
|
||||
class SearchParameters(BaseModel):
|
||||
"""Validated search parameters for full-text search."""
|
||||
|
||||
query_text: SearchQuery | None = None
|
||||
limit: SearchLimit = DEFAULT_SEARCH_LIMIT
|
||||
offset: SearchOffset = 0
|
||||
user_id: str | None = None
|
||||
room_id: str | None = None
|
||||
source_kind: SourceKind | None = None
|
||||
|
||||
|
||||
class SearchResultDB(BaseModel):
|
||||
"""Intermediate model for validating raw database results."""
|
||||
|
||||
id: str = Field(..., min_length=1)
|
||||
created_at: datetime
|
||||
status: str = Field(..., min_length=1)
|
||||
duration: float | None = Field(None, ge=0)
|
||||
user_id: str | None = None
|
||||
title: str | None = None
|
||||
source_kind: SourceKind
|
||||
room_id: str | None = None
|
||||
rank: float = Field(..., ge=0, le=1)
|
||||
|
||||
|
||||
class SearchResult(BaseModel):
|
||||
"""Public search result model with computed fields."""
|
||||
|
||||
id: str = Field(..., min_length=1)
|
||||
title: str | None = None
|
||||
user_id: str | None = None
|
||||
room_id: str | None = None
|
||||
room_name: str | None = None
|
||||
source_kind: SourceKind
|
||||
created_at: datetime
|
||||
status: TranscriptStatus = Field(..., min_length=1)
|
||||
rank: float = Field(..., ge=0, le=1)
|
||||
duration: NonNegativeFloat | None = Field(..., description="Duration in seconds")
|
||||
search_snippets: list[str] = Field(
|
||||
description="Text snippets around search matches"
|
||||
)
|
||||
total_match_count: NonNegativeInt = Field(
|
||||
default=0, description="Total number of matches found in the transcript"
|
||||
)
|
||||
|
||||
@field_serializer("created_at", when_used="json")
|
||||
def serialize_datetime(self, dt: datetime) -> str:
|
||||
if dt.tzinfo is None:
|
||||
return dt.isoformat() + "Z"
|
||||
return dt.isoformat()
|
||||
|
||||
|
||||
class SnippetGenerator:
|
||||
"""Stateless generator for text snippets and match operations."""
|
||||
|
||||
@staticmethod
|
||||
def find_all_matches(text: str, query: str) -> Iterator[int]:
|
||||
"""Generate all match positions for a query in text."""
|
||||
if not text:
|
||||
logger.warning("Empty text for search query in find_all_matches")
|
||||
return
|
||||
if not query:
|
||||
logger.warning("Empty query for search text in find_all_matches")
|
||||
return
|
||||
|
||||
text_lower = text.lower()
|
||||
query_lower = query.lower()
|
||||
start = 0
|
||||
prev_start = start
|
||||
while (pos := text_lower.find(query_lower, start)) != -1:
|
||||
yield pos
|
||||
start = pos + len(query_lower)
|
||||
if start <= prev_start:
|
||||
raise ValueError("panic! find_all_matches is not incremental")
|
||||
prev_start = start
|
||||
|
||||
@staticmethod
|
||||
def count_matches(text: str, query: SearchQuery) -> NonNegativeInt:
|
||||
"""Count total number of matches for a query in text."""
|
||||
ZERO = NonNegativeInt(0)
|
||||
if not text:
|
||||
logger.warning("Empty text for search query in count_matches")
|
||||
return ZERO
|
||||
assert query is not None
|
||||
return NonNegativeInt(
|
||||
sum(1 for _ in SnippetGenerator.find_all_matches(text, query))
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def create_snippet(
|
||||
text: str, match_pos: int, max_length: int = DEFAULT_SNIPPET_MAX_LENGTH
|
||||
) -> SnippetCandidate:
|
||||
"""Create a snippet from a match position."""
|
||||
snippet_start = NonNegativeInt(max(0, match_pos - SNIPPET_CONTEXT_LENGTH))
|
||||
snippet_end = min(len(text), match_pos + max_length - SNIPPET_CONTEXT_LENGTH)
|
||||
|
||||
snippet_text = text[snippet_start:snippet_end]
|
||||
|
||||
return SnippetCandidate(
|
||||
_text=snippet_text, start=snippet_start, _original_text_length=len(text)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def filter_non_overlapping(
|
||||
candidates: Iterator[SnippetCandidate],
|
||||
) -> Iterator[str]:
|
||||
"""Filter out overlapping snippets and return only display text."""
|
||||
last_end = 0
|
||||
for candidate in candidates:
|
||||
display_text = candidate.text()
|
||||
# it means that next overlapping snippets simply don't get included
|
||||
# it's fine as simplistic logic and users probably won't care much because they already have their search results just fin
|
||||
if candidate.start >= last_end and display_text:
|
||||
yield display_text
|
||||
last_end = candidate.end
|
||||
|
||||
@staticmethod
|
||||
def generate(
|
||||
text: str,
|
||||
query: SearchQuery,
|
||||
max_length: NonNegativeInt = DEFAULT_SNIPPET_MAX_LENGTH,
|
||||
max_snippets: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
||||
) -> list[str]:
|
||||
"""Generate snippets from text."""
|
||||
assert query is not None
|
||||
if not text:
|
||||
logger.warning("Empty text for generate_snippets")
|
||||
return []
|
||||
|
||||
candidates = (
|
||||
SnippetGenerator.create_snippet(text, pos, max_length)
|
||||
for pos in SnippetGenerator.find_all_matches(text, query)
|
||||
)
|
||||
filtered = SnippetGenerator.filter_non_overlapping(candidates)
|
||||
snippets = list(itertools.islice(filtered, max_snippets))
|
||||
|
||||
# Fallback to first word search if no full matches
|
||||
# it's another assumption: proper snippet logic generation is quite complicated and tied to db logic, so simplification is used here
|
||||
if not snippets and " " in query:
|
||||
first_word = query.split()[0]
|
||||
return SnippetGenerator.generate(text, first_word, max_length, max_snippets)
|
||||
|
||||
return snippets
|
||||
|
||||
@staticmethod
|
||||
def from_summary(
|
||||
summary: str,
|
||||
query: SearchQuery,
|
||||
max_snippets: NonNegativeInt = LONG_SUMMARY_MAX_SNIPPETS,
|
||||
) -> list[str]:
|
||||
"""Generate snippets from summary text."""
|
||||
return SnippetGenerator.generate(summary, query, max_snippets=max_snippets)
|
||||
|
||||
@staticmethod
|
||||
def combine_sources(
|
||||
summary: NonEmptyString | None,
|
||||
webvtt: WebVTTContent | None,
|
||||
query: SearchQuery,
|
||||
max_total: NonNegativeInt = DEFAULT_MAX_SNIPPETS,
|
||||
) -> tuple[list[str], NonNegativeInt]:
|
||||
"""Combine snippets from multiple sources and return total match count.
|
||||
|
||||
Returns (snippets, total_match_count) tuple.
|
||||
|
||||
snippets can be empty for real in case of e.g. title match
|
||||
"""
|
||||
|
||||
assert (
|
||||
summary is not None or webvtt is not None
|
||||
), "At least one source must be present"
|
||||
|
||||
webvtt_matches = 0
|
||||
summary_matches = 0
|
||||
|
||||
if webvtt:
|
||||
webvtt_text = WebVTTProcessor.extract_text(webvtt)
|
||||
webvtt_matches = SnippetGenerator.count_matches(webvtt_text, query)
|
||||
|
||||
if summary:
|
||||
summary_matches = SnippetGenerator.count_matches(summary, query)
|
||||
|
||||
total_matches = NonNegativeInt(webvtt_matches + summary_matches)
|
||||
|
||||
summary_snippets = (
|
||||
SnippetGenerator.from_summary(summary, query) if summary else []
|
||||
)
|
||||
|
||||
if len(summary_snippets) >= max_total:
|
||||
return summary_snippets[:max_total], total_matches
|
||||
|
||||
remaining = max_total - len(summary_snippets)
|
||||
webvtt_snippets = (
|
||||
WebVTTProcessor.generate_snippets(webvtt, query, remaining)
|
||||
if webvtt
|
||||
else []
|
||||
)
|
||||
|
||||
return summary_snippets + webvtt_snippets, total_matches
|
||||
|
||||
|
||||
class SearchController:
|
||||
"""Controller for search operations across different entities."""
|
||||
|
||||
@classmethod
|
||||
async def search_transcripts(
|
||||
cls, params: SearchParameters
|
||||
) -> tuple[list[SearchResult], int]:
|
||||
"""
|
||||
Full-text search for transcripts using PostgreSQL tsvector.
|
||||
Returns (results, total_count).
|
||||
"""
|
||||
|
||||
if not is_postgresql():
|
||||
logger.warning(
|
||||
"Full-text search requires PostgreSQL. Returning empty results."
|
||||
)
|
||||
return [], 0
|
||||
|
||||
base_columns = [
|
||||
transcripts.c.id,
|
||||
transcripts.c.title,
|
||||
transcripts.c.created_at,
|
||||
transcripts.c.duration,
|
||||
transcripts.c.status,
|
||||
transcripts.c.user_id,
|
||||
transcripts.c.room_id,
|
||||
transcripts.c.source_kind,
|
||||
transcripts.c.webvtt,
|
||||
transcripts.c.long_summary,
|
||||
sqlalchemy.case(
|
||||
(
|
||||
transcripts.c.room_id.isnot(None) & rooms.c.id.is_(None),
|
||||
"Deleted Room",
|
||||
),
|
||||
else_=rooms.c.name,
|
||||
).label("room_name"),
|
||||
]
|
||||
search_query = None
|
||||
if params.query_text is not None:
|
||||
search_query = sqlalchemy.func.websearch_to_tsquery(
|
||||
"english", params.query_text
|
||||
)
|
||||
rank_column = sqlalchemy.func.ts_rank(
|
||||
transcripts.c.search_vector_en,
|
||||
search_query,
|
||||
32, # normalization flag: rank/(rank+1) for 0-1 range
|
||||
).label("rank")
|
||||
else:
|
||||
rank_column = sqlalchemy.cast(1.0, sqlalchemy.Float).label("rank")
|
||||
|
||||
columns = base_columns + [rank_column]
|
||||
base_query = sqlalchemy.select(columns).select_from(
|
||||
transcripts.join(rooms, transcripts.c.room_id == rooms.c.id, isouter=True)
|
||||
)
|
||||
|
||||
if params.query_text is not None:
|
||||
# because already initialized based on params.query_text presence above
|
||||
assert search_query is not None
|
||||
base_query = base_query.where(
|
||||
transcripts.c.search_vector_en.op("@@")(search_query)
|
||||
)
|
||||
|
||||
if params.user_id:
|
||||
base_query = base_query.where(
|
||||
sqlalchemy.or_(
|
||||
transcripts.c.user_id == params.user_id, rooms.c.is_shared
|
||||
)
|
||||
)
|
||||
else:
|
||||
base_query = base_query.where(rooms.c.is_shared)
|
||||
if params.room_id:
|
||||
base_query = base_query.where(transcripts.c.room_id == params.room_id)
|
||||
if params.source_kind:
|
||||
base_query = base_query.where(
|
||||
transcripts.c.source_kind == params.source_kind
|
||||
)
|
||||
|
||||
if params.query_text is not None:
|
||||
order_by = sqlalchemy.desc(sqlalchemy.text("rank"))
|
||||
else:
|
||||
order_by = sqlalchemy.desc(transcripts.c.created_at)
|
||||
|
||||
query = base_query.order_by(order_by).limit(params.limit).offset(params.offset)
|
||||
|
||||
rs = await get_database().fetch_all(query)
|
||||
|
||||
count_query = sqlalchemy.select([sqlalchemy.func.count()]).select_from(
|
||||
base_query.alias("search_results")
|
||||
)
|
||||
total = await get_database().fetch_val(count_query)
|
||||
|
||||
def _process_result(r: DbRecord) -> SearchResult:
|
||||
r_dict: Dict[str, Any] = dict(r)
|
||||
|
||||
webvtt_raw: str | None = r_dict.pop("webvtt", None)
|
||||
webvtt: WebVTTContent | None
|
||||
if webvtt_raw:
|
||||
webvtt = WebVTTProcessor.parse(webvtt_raw)
|
||||
else:
|
||||
webvtt = None
|
||||
|
||||
long_summary_r: str | None = r_dict.pop("long_summary", None)
|
||||
long_summary: NonEmptyString = try_parse_non_empty_string(long_summary_r)
|
||||
room_name: str | None = r_dict.pop("room_name", None)
|
||||
db_result = SearchResultDB.model_validate(r_dict)
|
||||
|
||||
at_least_one_source = webvtt is not None or long_summary is not None
|
||||
has_query = params.query_text is not None
|
||||
snippets, total_match_count = (
|
||||
SnippetGenerator.combine_sources(
|
||||
long_summary, webvtt, params.query_text, DEFAULT_MAX_SNIPPETS
|
||||
)
|
||||
if has_query and at_least_one_source
|
||||
else ([], 0)
|
||||
)
|
||||
|
||||
return SearchResult(
|
||||
**db_result.model_dump(),
|
||||
room_name=room_name,
|
||||
search_snippets=snippets,
|
||||
total_match_count=total_match_count,
|
||||
)
|
||||
|
||||
try:
|
||||
results = [_process_result(r) for r in rs]
|
||||
except ValidationError as e:
|
||||
logger.error(f"Invalid search result data: {e}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=500, detail="Internal search result data consistency error"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing search results: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
return results, total
|
||||
|
||||
|
||||
search_controller = SearchController()
|
||||
webvtt_processor = WebVTTProcessor()
|
||||
snippet_generator = SnippetGenerator()
|
||||
@@ -3,20 +3,27 @@ import json
|
||||
import os
|
||||
import shutil
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import datetime, timezone
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal
|
||||
|
||||
import sqlalchemy
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_serializer
|
||||
from reflector.db import database, metadata
|
||||
from sqlalchemy import Enum
|
||||
from sqlalchemy.dialects.postgresql import TSVECTOR
|
||||
from sqlalchemy.sql import false, or_
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.rooms import rooms
|
||||
from reflector.db.utils import is_postgresql
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.types import Word as ProcessorWord
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.storage import get_recordings_storage, get_transcripts_storage
|
||||
from reflector.utils import generate_uuid4
|
||||
from sqlalchemy import Enum
|
||||
from sqlalchemy.sql import false, or_
|
||||
from reflector.utils.webvtt import topics_to_webvtt
|
||||
|
||||
|
||||
class SourceKind(enum.StrEnum):
|
||||
@@ -33,7 +40,7 @@ transcripts = sqlalchemy.Table(
|
||||
sqlalchemy.Column("status", sqlalchemy.String),
|
||||
sqlalchemy.Column("locked", sqlalchemy.Boolean),
|
||||
sqlalchemy.Column("duration", sqlalchemy.Float),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True)),
|
||||
sqlalchemy.Column("title", sqlalchemy.String),
|
||||
sqlalchemy.Column("short_summary", sqlalchemy.String),
|
||||
sqlalchemy.Column("long_summary", sqlalchemy.String),
|
||||
@@ -74,18 +81,56 @@ transcripts = sqlalchemy.Table(
|
||||
# the main "audio deleted" is the presence of the audio itself / consents not-given
|
||||
# same field could've been in recording/meeting, and it's maybe even ok to dupe it at need
|
||||
sqlalchemy.Column("audio_deleted", sqlalchemy.Boolean),
|
||||
sqlalchemy.Column("room_id", sqlalchemy.String),
|
||||
sqlalchemy.Column("webvtt", sqlalchemy.Text),
|
||||
sqlalchemy.Index("idx_transcript_recording_id", "recording_id"),
|
||||
sqlalchemy.Index("idx_transcript_user_id", "user_id"),
|
||||
sqlalchemy.Index("idx_transcript_created_at", "created_at"),
|
||||
sqlalchemy.Index("idx_transcript_user_id_recording_id", "user_id", "recording_id"),
|
||||
sqlalchemy.Index("idx_transcript_room_id", "room_id"),
|
||||
sqlalchemy.Index("idx_transcript_source_kind", "source_kind"),
|
||||
sqlalchemy.Index("idx_transcript_room_id_created_at", "room_id", "created_at"),
|
||||
)
|
||||
|
||||
# Add PostgreSQL-specific full-text search column
|
||||
# This matches the migration in migrations/versions/116b2f287eab_add_full_text_search.py
|
||||
if is_postgresql():
|
||||
transcripts.append_column(
|
||||
sqlalchemy.Column(
|
||||
"search_vector_en",
|
||||
TSVECTOR,
|
||||
sqlalchemy.Computed(
|
||||
"setweight(to_tsvector('english', coalesce(title, '')), 'A') || "
|
||||
"setweight(to_tsvector('english', coalesce(long_summary, '')), 'B') || "
|
||||
"setweight(to_tsvector('english', coalesce(webvtt, '')), 'C')",
|
||||
persisted=True,
|
||||
),
|
||||
)
|
||||
)
|
||||
# Add GIN index for the search vector
|
||||
transcripts.append_constraint(
|
||||
sqlalchemy.Index(
|
||||
"idx_transcript_search_vector_en",
|
||||
"search_vector_en",
|
||||
postgresql_using="gin",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def generate_transcript_name() -> str:
|
||||
now = datetime.now(timezone.utc)
|
||||
return f"Transcript {now.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
|
||||
|
||||
TranscriptStatus = Literal[
|
||||
"idle", "uploaded", "recording", "processing", "error", "ended"
|
||||
]
|
||||
|
||||
|
||||
class StrValue(BaseModel):
|
||||
value: str
|
||||
|
||||
|
||||
class AudioWaveform(BaseModel):
|
||||
data: list[float]
|
||||
|
||||
@@ -144,14 +189,18 @@ class TranscriptParticipant(BaseModel):
|
||||
|
||||
|
||||
class Transcript(BaseModel):
|
||||
"""Full transcript model with all fields."""
|
||||
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
user_id: str | None = None
|
||||
name: str = Field(default_factory=generate_transcript_name)
|
||||
status: str = "idle"
|
||||
locked: bool = False
|
||||
status: TranscriptStatus = "idle"
|
||||
duration: float = 0
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
title: str | None = None
|
||||
source_kind: SourceKind
|
||||
room_id: str | None = None
|
||||
locked: bool = False
|
||||
short_summary: str | None = None
|
||||
long_summary: str | None = None
|
||||
topics: list[TranscriptTopic] = []
|
||||
@@ -165,8 +214,8 @@ class Transcript(BaseModel):
|
||||
meeting_id: str | None = None
|
||||
recording_id: str | None = None
|
||||
zulip_message_id: int | None = None
|
||||
source_kind: SourceKind
|
||||
audio_deleted: bool | None = None
|
||||
webvtt: str | None = None
|
||||
|
||||
@field_serializer("created_at", when_used="json")
|
||||
def serialize_datetime(self, dt: datetime) -> str:
|
||||
@@ -267,10 +316,12 @@ class Transcript(BaseModel):
|
||||
# we need to create an url to be used for diarization
|
||||
# we can't use the audio_mp3_filename because it's not accessible
|
||||
# from the diarization processor
|
||||
from datetime import timedelta
|
||||
|
||||
from reflector.app import app
|
||||
from reflector.views.transcripts import create_access_token
|
||||
# TODO don't import app in db
|
||||
from reflector.app import app # noqa: PLC0415
|
||||
|
||||
# TODO a util + don''t import views in db
|
||||
from reflector.views.transcripts import create_access_token # noqa: PLC0415
|
||||
|
||||
path = app.url_path_for(
|
||||
"transcript_get_audio_mp3",
|
||||
@@ -331,17 +382,9 @@ class TranscriptController:
|
||||
- `room_id`: filter transcripts by room ID
|
||||
- `search_term`: filter transcripts by search term
|
||||
"""
|
||||
from reflector.db.meetings import meetings
|
||||
from reflector.db.recordings import recordings
|
||||
from reflector.db.rooms import rooms
|
||||
|
||||
query = (
|
||||
transcripts.select()
|
||||
.join(
|
||||
recordings, transcripts.c.recording_id == recordings.c.id, isouter=True
|
||||
)
|
||||
.join(meetings, recordings.c.meeting_id == meetings.c.id, isouter=True)
|
||||
.join(rooms, meetings.c.room_id == rooms.c.id, isouter=True)
|
||||
query = transcripts.select().join(
|
||||
rooms, transcripts.c.room_id == rooms.c.id, isouter=True
|
||||
)
|
||||
|
||||
if user_id:
|
||||
@@ -355,7 +398,7 @@ class TranscriptController:
|
||||
query = query.where(transcripts.c.source_kind == source_kind)
|
||||
|
||||
if room_id:
|
||||
query = query.where(rooms.c.id == room_id)
|
||||
query = query.where(transcripts.c.room_id == room_id)
|
||||
|
||||
if search_term:
|
||||
query = query.where(transcripts.c.title.ilike(f"%{search_term}%"))
|
||||
@@ -368,7 +411,6 @@ class TranscriptController:
|
||||
query = query.with_only_columns(
|
||||
transcript_columns
|
||||
+ [
|
||||
rooms.c.id.label("room_id"),
|
||||
rooms.c.name.label("room_name"),
|
||||
]
|
||||
)
|
||||
@@ -390,7 +432,7 @@ class TranscriptController:
|
||||
if return_query:
|
||||
return query
|
||||
|
||||
results = await database.fetch_all(query)
|
||||
results = await get_database().fetch_all(query)
|
||||
return results
|
||||
|
||||
async def get_by_id(self, transcript_id: str, **kwargs) -> Transcript | None:
|
||||
@@ -400,7 +442,7 @@ class TranscriptController:
|
||||
query = transcripts.select().where(transcripts.c.id == transcript_id)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(transcripts.c.user_id == kwargs["user_id"])
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
return Transcript(**result)
|
||||
@@ -414,11 +456,27 @@ class TranscriptController:
|
||||
query = transcripts.select().where(transcripts.c.recording_id == recording_id)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(transcripts.c.user_id == kwargs["user_id"])
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
return Transcript(**result)
|
||||
|
||||
async def get_by_room_id(self, room_id: str, **kwargs) -> list[Transcript]:
|
||||
"""
|
||||
Get transcripts by room_id (direct access without joins)
|
||||
"""
|
||||
query = transcripts.select().where(transcripts.c.room_id == room_id)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(transcripts.c.user_id == kwargs["user_id"])
|
||||
if "order_by" in kwargs:
|
||||
order_by = kwargs["order_by"]
|
||||
field = getattr(transcripts.c, order_by[1:])
|
||||
if order_by.startswith("-"):
|
||||
field = field.desc()
|
||||
query = query.order_by(field)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Transcript(**result) for result in results]
|
||||
|
||||
async def get_by_id_for_http(
|
||||
self,
|
||||
transcript_id: str,
|
||||
@@ -434,7 +492,7 @@ class TranscriptController:
|
||||
to determine if the user can access the transcript.
|
||||
"""
|
||||
query = transcripts.select().where(transcripts.c.id == transcript_id)
|
||||
result = await database.fetch_one(query)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
raise HTTPException(status_code=404, detail="Transcript not found")
|
||||
|
||||
@@ -469,6 +527,8 @@ class TranscriptController:
|
||||
user_id: str | None = None,
|
||||
recording_id: str | None = None,
|
||||
share_mode: str = "private",
|
||||
meeting_id: str | None = None,
|
||||
room_id: str | None = None,
|
||||
):
|
||||
"""
|
||||
Add a new transcript
|
||||
@@ -481,25 +541,56 @@ class TranscriptController:
|
||||
user_id=user_id,
|
||||
recording_id=recording_id,
|
||||
share_mode=share_mode,
|
||||
meeting_id=meeting_id,
|
||||
room_id=room_id,
|
||||
)
|
||||
query = transcripts.insert().values(**transcript.model_dump())
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
return transcript
|
||||
|
||||
async def update(self, transcript: Transcript, values: dict, mutate=True):
|
||||
# TODO investigate why mutate= is used. it's used in one place currently, maybe because of ORM field updates.
|
||||
# using mutate=True is discouraged
|
||||
async def update(
|
||||
self, transcript: Transcript, values: dict, mutate=False
|
||||
) -> Transcript:
|
||||
"""
|
||||
Update a transcript fields with key/values in values
|
||||
Update a transcript fields with key/values in values.
|
||||
Returns a copy of the transcript with updated values.
|
||||
"""
|
||||
values = TranscriptController._handle_topics_update(values)
|
||||
|
||||
query = (
|
||||
transcripts.update()
|
||||
.where(transcripts.c.id == transcript.id)
|
||||
.values(**values)
|
||||
)
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
if mutate:
|
||||
for key, value in values.items():
|
||||
setattr(transcript, key, value)
|
||||
|
||||
updated_transcript = transcript.model_copy(update=values)
|
||||
return updated_transcript
|
||||
|
||||
@staticmethod
|
||||
def _handle_topics_update(values: dict) -> dict:
|
||||
"""Auto-update WebVTT when topics are updated."""
|
||||
|
||||
if values.get("webvtt") is not None:
|
||||
logger.warn("trying to update read-only webvtt column")
|
||||
pass
|
||||
|
||||
topics_data = values.get("topics")
|
||||
if topics_data is None:
|
||||
return values
|
||||
|
||||
return {
|
||||
**values,
|
||||
"webvtt": topics_to_webvtt(
|
||||
[TranscriptTopic(**topic_dict) for topic_dict in topics_data]
|
||||
),
|
||||
}
|
||||
|
||||
async def remove_by_id(
|
||||
self,
|
||||
transcript_id: str,
|
||||
@@ -513,23 +604,55 @@ class TranscriptController:
|
||||
return
|
||||
if user_id is not None and transcript.user_id != user_id:
|
||||
return
|
||||
if transcript.audio_location == "storage" and not transcript.audio_deleted:
|
||||
try:
|
||||
await get_transcripts_storage().delete_file(
|
||||
transcript.storage_audio_path
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete transcript audio from storage",
|
||||
exc_info=e,
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
transcript.unlink()
|
||||
if transcript.recording_id:
|
||||
try:
|
||||
recording = await recordings_controller.get_by_id(
|
||||
transcript.recording_id
|
||||
)
|
||||
if recording:
|
||||
try:
|
||||
await get_recordings_storage().delete_file(recording.object_key)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete recording object from S3",
|
||||
exc_info=e,
|
||||
recording_id=transcript.recording_id,
|
||||
)
|
||||
await recordings_controller.remove_by_id(transcript.recording_id)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete recording row",
|
||||
exc_info=e,
|
||||
recording_id=transcript.recording_id,
|
||||
)
|
||||
query = transcripts.delete().where(transcripts.c.id == transcript_id)
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def remove_by_recording_id(self, recording_id: str):
|
||||
"""
|
||||
Remove a transcript by recording_id
|
||||
"""
|
||||
query = transcripts.delete().where(transcripts.c.recording_id == recording_id)
|
||||
await database.execute(query)
|
||||
await get_database().execute(query)
|
||||
|
||||
@asynccontextmanager
|
||||
async def transaction(self):
|
||||
"""
|
||||
A context manager for database transaction
|
||||
"""
|
||||
async with database.transaction(isolation="serializable"):
|
||||
async with get_database().transaction(isolation="serializable"):
|
||||
yield
|
||||
|
||||
async def append_event(
|
||||
@@ -542,11 +665,7 @@ class TranscriptController:
|
||||
Append an event to a transcript
|
||||
"""
|
||||
resp = transcript.add_event(event=event, data=data)
|
||||
await self.update(
|
||||
transcript,
|
||||
{"events": transcript.events_dump()},
|
||||
mutate=False,
|
||||
)
|
||||
await self.update(transcript, {"events": transcript.events_dump()})
|
||||
return resp
|
||||
|
||||
async def upsert_topic(
|
||||
@@ -558,11 +677,7 @@ class TranscriptController:
|
||||
Upsert topics to a transcript
|
||||
"""
|
||||
transcript.upsert_topic(topic)
|
||||
await self.update(
|
||||
transcript,
|
||||
{"topics": transcript.topics_dump()},
|
||||
mutate=False,
|
||||
)
|
||||
await self.update(transcript, {"topics": transcript.topics_dump()})
|
||||
|
||||
async def move_mp3_to_storage(self, transcript: Transcript):
|
||||
"""
|
||||
@@ -587,7 +702,8 @@ class TranscriptController:
|
||||
)
|
||||
|
||||
# indicate on the transcript that the audio is now on storage
|
||||
await self.update(transcript, {"audio_location": "storage"})
|
||||
# mutates transcript argument
|
||||
await self.update(transcript, {"audio_location": "storage"}, mutate=True)
|
||||
|
||||
# unlink the local file
|
||||
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
||||
@@ -611,11 +727,7 @@ class TranscriptController:
|
||||
Add/update a participant to a transcript
|
||||
"""
|
||||
result = transcript.upsert_participant(participant)
|
||||
await self.update(
|
||||
transcript,
|
||||
{"participants": transcript.participants_dump()},
|
||||
mutate=False,
|
||||
)
|
||||
await self.update(transcript, {"participants": transcript.participants_dump()})
|
||||
return result
|
||||
|
||||
async def delete_participant(
|
||||
@@ -627,11 +739,29 @@ class TranscriptController:
|
||||
Delete a participant from a transcript
|
||||
"""
|
||||
transcript.delete_participant(participant_id)
|
||||
await self.update(
|
||||
transcript,
|
||||
{"participants": transcript.participants_dump()},
|
||||
mutate=False,
|
||||
)
|
||||
await self.update(transcript, {"participants": transcript.participants_dump()})
|
||||
|
||||
async def set_status(
|
||||
self, transcript_id: str, status: TranscriptStatus
|
||||
) -> TranscriptEvent | None:
|
||||
"""
|
||||
Update the status of a transcript
|
||||
|
||||
Will add an event STATUS + update the status field of transcript
|
||||
"""
|
||||
async with self.transaction():
|
||||
transcript = await self.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
raise Exception(f"Transcript {transcript_id} not found")
|
||||
if transcript.status == status:
|
||||
return
|
||||
resp = await self.append_event(
|
||||
transcript=transcript,
|
||||
event="STATUS",
|
||||
data=StrValue(value=status),
|
||||
)
|
||||
await self.update(transcript, {"status": status})
|
||||
return resp
|
||||
|
||||
|
||||
transcripts_controller = TranscriptController()
|
||||
|
||||
9
server/reflector/db/utils.py
Normal file
9
server/reflector/db/utils.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""Database utility functions."""
|
||||
|
||||
from reflector.db import get_database
|
||||
|
||||
|
||||
def is_postgresql() -> bool:
|
||||
return get_database().url.scheme and get_database().url.scheme.startswith(
|
||||
"postgresql"
|
||||
)
|
||||
227
server/reflector/jibri_events.py
Normal file
227
server/reflector/jibri_events.py
Normal file
@@ -0,0 +1,227 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
|
||||
class ParticipantInfo(BaseModel):
|
||||
jid: str
|
||||
nick: str
|
||||
id: str
|
||||
is_moderator: bool = False
|
||||
|
||||
|
||||
class ParticipantLeftInfo(BaseModel):
|
||||
jid: str
|
||||
nick: Optional[str] = None
|
||||
duration_seconds: Optional[int] = None
|
||||
|
||||
|
||||
class RoomCreatedEvent(BaseModel):
|
||||
type: Literal["room_created"]
|
||||
timestamp: int
|
||||
room_name: str
|
||||
room_jid: str
|
||||
meeting_url: str
|
||||
|
||||
|
||||
class RecordingStartedEvent(BaseModel):
|
||||
type: Literal["recording_started"]
|
||||
timestamp: int
|
||||
room_name: str
|
||||
session_id: str
|
||||
jibri_jid: str
|
||||
|
||||
|
||||
class RecordingStoppedEvent(BaseModel):
|
||||
type: Literal["recording_stopped"]
|
||||
timestamp: int
|
||||
room_name: str
|
||||
session_id: str
|
||||
meeting_url: str
|
||||
|
||||
|
||||
class ParticipantJoinedEvent(BaseModel):
|
||||
type: Literal["participant_joined"]
|
||||
timestamp: int
|
||||
room_name: str
|
||||
participant: ParticipantInfo
|
||||
|
||||
|
||||
class ParticipantLeftEvent(BaseModel):
|
||||
type: Literal["participant_left"]
|
||||
timestamp: int
|
||||
room_name: str
|
||||
participant: ParticipantLeftInfo
|
||||
|
||||
|
||||
class SpeakerActiveEvent(BaseModel):
|
||||
type: Literal["speaker_active"]
|
||||
timestamp: int
|
||||
room_name: str
|
||||
speaker_jid: str
|
||||
speaker_nick: str
|
||||
duration: int
|
||||
|
||||
|
||||
class DominantSpeakerChangedEvent(BaseModel):
|
||||
type: Literal["dominant_speaker_changed"]
|
||||
timestamp: int
|
||||
room_name: str
|
||||
previous: str
|
||||
current: str
|
||||
|
||||
|
||||
JitsiEvent = Union[
|
||||
RoomCreatedEvent,
|
||||
RecordingStartedEvent,
|
||||
RecordingStoppedEvent,
|
||||
ParticipantJoinedEvent,
|
||||
ParticipantLeftEvent,
|
||||
SpeakerActiveEvent,
|
||||
DominantSpeakerChangedEvent,
|
||||
]
|
||||
|
||||
|
||||
class RoomInfo(TypedDict):
|
||||
name: str
|
||||
jid: str
|
||||
created_at: int
|
||||
meeting_url: str
|
||||
recording_stopped_at: Optional[int]
|
||||
|
||||
|
||||
class ParticipantData(TypedDict):
|
||||
jid: str
|
||||
nick: str
|
||||
id: str
|
||||
is_moderator: bool
|
||||
joined_at: int
|
||||
left_at: Optional[int]
|
||||
duration: Optional[int]
|
||||
events: List[str]
|
||||
|
||||
|
||||
class SpeakerStats(TypedDict):
|
||||
total_time: int
|
||||
nick: str
|
||||
|
||||
|
||||
class ParsedMetadata(TypedDict):
|
||||
room: RoomInfo
|
||||
participants: List[ParticipantData]
|
||||
speaker_stats: Dict[str, SpeakerStats]
|
||||
event_count: int
|
||||
|
||||
|
||||
class JitsiEventParser:
|
||||
def parse_event(self, event_data: Dict[str, Any]) -> Optional[JitsiEvent]:
|
||||
event_type = event_data.get("type")
|
||||
|
||||
try:
|
||||
if event_type == "room_created":
|
||||
return RoomCreatedEvent(**event_data)
|
||||
elif event_type == "recording_started":
|
||||
return RecordingStartedEvent(**event_data)
|
||||
elif event_type == "recording_stopped":
|
||||
return RecordingStoppedEvent(**event_data)
|
||||
elif event_type == "participant_joined":
|
||||
return ParticipantJoinedEvent(**event_data)
|
||||
elif event_type == "participant_left":
|
||||
return ParticipantLeftEvent(**event_data)
|
||||
elif event_type == "speaker_active":
|
||||
return SpeakerActiveEvent(**event_data)
|
||||
elif event_type == "dominant_speaker_changed":
|
||||
return DominantSpeakerChangedEvent(**event_data)
|
||||
else:
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def parse_events_file(self, recording_path: str) -> ParsedMetadata:
|
||||
events_file = Path(recording_path) / "events.jsonl"
|
||||
|
||||
room_info: RoomInfo = {
|
||||
"name": "",
|
||||
"jid": "",
|
||||
"created_at": 0,
|
||||
"meeting_url": "",
|
||||
"recording_stopped_at": None,
|
||||
}
|
||||
|
||||
if not events_file.exists():
|
||||
return ParsedMetadata(
|
||||
room=room_info, participants=[], speaker_stats={}, event_count=0
|
||||
)
|
||||
|
||||
events: List[JitsiEvent] = []
|
||||
participants: Dict[str, ParticipantData] = {}
|
||||
speaker_stats: Dict[str, SpeakerStats] = {}
|
||||
|
||||
with open(events_file, "r") as f:
|
||||
for line in f:
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
try:
|
||||
event_data = json.loads(line)
|
||||
event = self.parse_event(event_data)
|
||||
|
||||
if event is None:
|
||||
continue
|
||||
|
||||
events.append(event)
|
||||
|
||||
if isinstance(event, RoomCreatedEvent):
|
||||
room_info = {
|
||||
"name": event.room_name,
|
||||
"jid": event.room_jid,
|
||||
"created_at": event.timestamp,
|
||||
"meeting_url": event.meeting_url,
|
||||
"recording_stopped_at": None,
|
||||
}
|
||||
|
||||
elif isinstance(event, ParticipantJoinedEvent):
|
||||
participants[event.participant.id] = {
|
||||
"jid": event.participant.jid,
|
||||
"nick": event.participant.nick,
|
||||
"id": event.participant.id,
|
||||
"is_moderator": event.participant.is_moderator,
|
||||
"joined_at": event.timestamp,
|
||||
"left_at": None,
|
||||
"duration": None,
|
||||
"events": ["joined"],
|
||||
}
|
||||
|
||||
elif isinstance(event, ParticipantLeftEvent):
|
||||
participant_id = event.participant.jid.split("/")[0]
|
||||
if participant_id in participants:
|
||||
participants[participant_id]["left_at"] = event.timestamp
|
||||
participants[participant_id]["duration"] = (
|
||||
event.participant.duration_seconds
|
||||
)
|
||||
participants[participant_id]["events"].append("left")
|
||||
|
||||
elif isinstance(event, SpeakerActiveEvent):
|
||||
if event.speaker_jid not in speaker_stats:
|
||||
speaker_stats[event.speaker_jid] = {
|
||||
"total_time": 0,
|
||||
"nick": event.speaker_nick,
|
||||
}
|
||||
speaker_stats[event.speaker_jid]["total_time"] += event.duration
|
||||
|
||||
elif isinstance(event, RecordingStoppedEvent):
|
||||
room_info["recording_stopped_at"] = event.timestamp
|
||||
room_info["meeting_url"] = event.meeting_url
|
||||
|
||||
except (json.JSONDecodeError, Exception):
|
||||
continue
|
||||
|
||||
return ParsedMetadata(
|
||||
room=room_info,
|
||||
participants=list(participants.values()),
|
||||
speaker_stats=speaker_stats,
|
||||
event_count=len(events),
|
||||
)
|
||||
83
server/reflector/llm.py
Normal file
83
server/reflector/llm.py
Normal file
@@ -0,0 +1,83 @@
|
||||
from typing import Type, TypeVar
|
||||
|
||||
from llama_index.core import Settings
|
||||
from llama_index.core.output_parsers import PydanticOutputParser
|
||||
from llama_index.core.program import LLMTextCompletionProgram
|
||||
from llama_index.core.response_synthesizers import TreeSummarize
|
||||
from llama_index.llms.openai_like import OpenAILike
|
||||
from pydantic import BaseModel
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
|
||||
STRUCTURED_RESPONSE_PROMPT_TEMPLATE = """
|
||||
Based on the following analysis, provide the information in the requested JSON format:
|
||||
|
||||
Analysis:
|
||||
{analysis}
|
||||
|
||||
{format_instructions}
|
||||
"""
|
||||
|
||||
|
||||
class LLM:
|
||||
def __init__(self, settings, temperature: float = 0.4, max_tokens: int = 2048):
|
||||
self.settings_obj = settings
|
||||
self.model_name = settings.LLM_MODEL
|
||||
self.url = settings.LLM_URL
|
||||
self.api_key = settings.LLM_API_KEY
|
||||
self.context_window = settings.LLM_CONTEXT_WINDOW
|
||||
self.temperature = temperature
|
||||
self.max_tokens = max_tokens
|
||||
|
||||
# Configure llamaindex Settings
|
||||
self._configure_llamaindex()
|
||||
|
||||
def _configure_llamaindex(self):
|
||||
"""Configure llamaindex Settings with OpenAILike LLM"""
|
||||
Settings.llm = OpenAILike(
|
||||
model=self.model_name,
|
||||
api_base=self.url,
|
||||
api_key=self.api_key,
|
||||
context_window=self.context_window,
|
||||
is_chat_model=True,
|
||||
is_function_calling_model=False,
|
||||
temperature=self.temperature,
|
||||
max_tokens=self.max_tokens,
|
||||
)
|
||||
|
||||
async def get_response(
|
||||
self, prompt: str, texts: list[str], tone_name: str | None = None
|
||||
) -> str:
|
||||
"""Get a text response using TreeSummarize for non-function-calling models"""
|
||||
summarizer = TreeSummarize(verbose=False)
|
||||
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
|
||||
return str(response).strip()
|
||||
|
||||
async def get_structured_response(
|
||||
self,
|
||||
prompt: str,
|
||||
texts: list[str],
|
||||
output_cls: Type[T],
|
||||
tone_name: str | None = None,
|
||||
) -> T:
|
||||
"""Get structured output from LLM for non-function-calling models"""
|
||||
summarizer = TreeSummarize(verbose=True)
|
||||
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
|
||||
|
||||
output_parser = PydanticOutputParser(output_cls)
|
||||
|
||||
program = LLMTextCompletionProgram.from_defaults(
|
||||
output_parser=output_parser,
|
||||
prompt_template_str=STRUCTURED_RESPONSE_PROMPT_TEMPLATE,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
format_instructions = output_parser.format(
|
||||
"Please structure the above information in the following JSON format:"
|
||||
)
|
||||
|
||||
output = await program.acall(
|
||||
analysis=str(response), format_instructions=format_instructions
|
||||
)
|
||||
|
||||
return output
|
||||
@@ -1,2 +0,0 @@
|
||||
from .base import LLM # noqa: F401
|
||||
from .llm_params import LLMTaskParams # noqa: F401
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user