Compare commits

..

21 Commits

Author SHA1 Message Date
Igor Loskutov
1bf73c8199 sync with parent 2025-10-21 11:59:26 -04:00
d82abf65ba Emit multriack pipeline events 2025-10-21 16:31:31 +02:00
Igor Loskutov
7d239fe380 dailico track merge vibe 2025-10-21 10:30:19 -04:00
acb6e90f28 Generate waveforms for the mixed audio 2025-10-21 13:33:31 +02:00
Igor Loskutov
f844b9fc1f Merge branch 'igor/dailico-2' of github-monadical:Monadical-SAS/reflector into igor/dailico-2 2025-10-17 10:00:40 -04:00
96f05020cc Align tracks of a multitrack recording 2025-10-17 15:27:27 +02:00
fc79ff3114 Use explicit track keys for processing 2025-10-17 14:42:07 +02:00
Igor Loskutov
3641e2e599 apply platform from envs in priority: non-dry 2025-10-16 15:08:19 -04:00
c23518d2e3 Trigger multitrack processing for daily recordings 2025-10-16 20:05:26 +02:00
23edffe2a2 Mixdown with pyav filter graph 2025-10-16 17:14:55 +02:00
e59770ecc9 Mixdown audio tracks 2025-10-16 17:14:55 +02:00
6301f2afa6 Add multitrack pipeline 2025-10-16 17:14:55 +02:00
9ac7f0e8e2 chore(main): release 0.14.0 (#670) 2025-10-16 17:14:55 +02:00
Igor Loskutov
0a84a9351a stub processor (vibe) self-review 2025-10-10 20:41:08 -04:00
Igor Loskutov
ca22084845 stub processor (vibe) self-review 2025-10-10 18:45:19 -04:00
Igor Loskutov
f945f84be9 stub processor (vibe) 2025-10-10 18:05:31 -04:00
Igor Loskutov
4c523c8eec dont show recording ui on call 2025-10-10 12:45:10 -04:00
Igor Loskutov
0fcf8b6875 doc update (vibe) 2025-10-10 10:57:35 -04:00
Igor Loskutov
446cb748ae vibe dailyco 2025-10-09 17:04:16 -04:00
Igor Loskutov
3e1339a8ea vibe dailyco 2025-10-09 15:52:23 -04:00
Igor Loskutov
807819bb2f llm instructions 2025-10-08 13:06:04 -04:00
262 changed files with 10758 additions and 53432 deletions

90
.github/workflows/deploy.yml vendored Normal file
View File

@@ -0,0 +1,90 @@
name: Deploy to Amazon ECS
on: [workflow_dispatch]
env:
# 950402358378.dkr.ecr.us-east-1.amazonaws.com/reflector
AWS_REGION: us-east-1
ECR_REPOSITORY: reflector
jobs:
build:
strategy:
matrix:
include:
- platform: linux/amd64
runner: linux-amd64
arch: amd64
- platform: linux/arm64
runner: linux-arm64
arch: arm64
runs-on: ${{ matrix.runner }}
permissions:
contents: read
outputs:
registry: ${{ steps.login-ecr.outputs.registry }}
steps:
- uses: actions/checkout@v4
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build and push ${{ matrix.arch }}
uses: docker/build-push-action@v5
with:
context: server
platforms: ${{ matrix.platform }}
push: true
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest-${{ matrix.arch }}
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
provenance: false
create-manifest:
runs-on: ubuntu-latest
needs: [build]
permissions:
deployments: write
contents: read
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
uses: aws-actions/amazon-ecr-login@v2
- name: Create and push multi-arch manifest
run: |
# Get the registry URL (since we can't easily access job outputs in matrix)
ECR_REGISTRY=$(aws ecr describe-registry --query 'registryId' --output text).dkr.ecr.${{ env.AWS_REGION }}.amazonaws.com
docker manifest create \
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest \
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-amd64 \
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-arm64
docker manifest push $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest
echo "✅ Multi-arch manifest pushed: $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest"

View File

@@ -1,31 +1,35 @@
name: Build and Push Backend Docker Image (Docker Hub) name: Build and Push Frontend Docker Image
on: on:
push: push:
tags: branches:
- "v*" - main
paths:
- 'www/**'
- '.github/workflows/docker-frontend.yml'
workflow_dispatch: workflow_dispatch:
env: env:
REGISTRY: docker.io REGISTRY: ghcr.io
IMAGE_NAME: monadicalsas/reflector-backend IMAGE_NAME: ${{ github.repository }}-frontend
jobs: jobs:
build-and-push: build-and-push:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: read contents: read
packages: write
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Log in to Docker Hub - name: Log in to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
registry: ${{ env.REGISTRY }} registry: ${{ env.REGISTRY }}
username: monadicalsas username: ${{ github.actor }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata - name: Extract metadata
id: meta id: meta
@@ -34,7 +38,7 @@ jobs:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: | tags: |
type=ref,event=branch type=ref,event=branch
type=ref,event=tag type=sha,prefix={{branch}}-
type=raw,value=latest,enable={{is_default_branch}} type=raw,value=latest,enable={{is_default_branch}}
- name: Set up Docker Buildx - name: Set up Docker Buildx
@@ -43,11 +47,11 @@ jobs:
- name: Build and push Docker image - name: Build and push Docker image
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: ./server context: ./www
file: ./server/Dockerfile file: ./www/Dockerfile
push: true push: true
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha cache-from: type=gha
cache-to: type=gha,mode=max cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64

View File

@@ -1,70 +0,0 @@
name: Build and Push Frontend Docker Image
on:
push:
tags:
- "v*"
workflow_dispatch:
env:
REGISTRY: docker.io
IMAGE_NAME: monadicalsas/reflector-frontend
jobs:
build-and-push:
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: monadicalsas
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=raw,value=latest,enable={{is_default_branch}}
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: ./www
file: ./www/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
deploy:
needs: build-and-push
runs-on: ubuntu-latest
if: success()
strategy:
matrix:
environment: [reflector-monadical, reflector-media]
environment: ${{ matrix.environment }}
steps:
- name: Trigger Coolify deployment
run: |
curl -X POST "${{ secrets.COOLIFY_WEBHOOK_URL }}" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${{ secrets.COOLIFY_WEBHOOK_TOKEN }}" \
-f || (echo "Failed to trigger Coolify deployment for ${{ matrix.environment }}" && exit 1)

4
.gitignore vendored
View File

@@ -1,7 +1,6 @@
.DS_Store .DS_Store
server/.env server/.env
.env .env
Caddyfile
server/exportdanswer server/exportdanswer
.vercel .vercel
.env*.local .env*.local
@@ -19,6 +18,3 @@ CLAUDE.local.md
www/.env.development www/.env.development
www/.env.production www/.env.production
.playwright-mcp .playwright-mcp
docs/pnpm-lock.yaml
.secrets
opencode.json

View File

@@ -1,5 +1 @@
b9d891d3424f371642cb032ecfd0e2564470a72c:server/tests/test_transcripts_recording_deletion.py:generic-api-key:15 b9d891d3424f371642cb032ecfd0e2564470a72c:server/tests/test_transcripts_recording_deletion.py:generic-api-key:15
docs/docs/installation/auth-setup.md:curl-auth-header:250
docs/docs/installation/daily-setup.md:curl-auth-header:277
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:74
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:83

View File

@@ -1,24 +0,0 @@
# Example secrets file for GitHub Actions workflows
# Copy this to .secrets and fill in your values
# These secrets should be configured in GitHub repository settings:
# Settings > Secrets and variables > Actions
# DockerHub Configuration (required for frontend and backend deployment)
# Create a Docker Hub access token at https://hub.docker.com/settings/security
# Username: monadicalsas
DOCKERHUB_TOKEN=your-dockerhub-access-token
# GitHub Token (required for frontend and backend deployment)
# Used by docker/metadata-action for extracting image metadata
# Can use the default GITHUB_TOKEN or create a personal access token
GITHUB_TOKEN=your-github-token-or-use-default-GITHUB_TOKEN
# Coolify Deployment Webhook (required for frontend deployment)
# Used to trigger automatic deployment after image push
# Configure these secrets in GitHub Environments:
# Each environment should have:
# - COOLIFY_WEBHOOK_URL: The webhook URL for that specific deployment
# - COOLIFY_WEBHOOK_TOKEN: The webhook token (can be the same for both if using same token)
# Optional: GitHub Actions Cache Token (for local testing with act)
GHA_CACHE_TOKEN=your-github-token-or-empty

View File

@@ -1,185 +1,5 @@
# Changelog # Changelog
## [0.27.0](https://github.com/Monadical-SAS/reflector/compare/v0.26.0...v0.27.0) (2025-12-26)
### Features
* devex/hatchet log progress track ([#813](https://github.com/Monadical-SAS/reflector/issues/813)) ([2d0df48](https://github.com/Monadical-SAS/reflector/commit/2d0df487674e5486208cd599e3338ebff8b6e470))
### Bug Fixes
* webhook parity, pipeline rename, waveform constant fix ([#806](https://github.com/Monadical-SAS/reflector/issues/806)) ([5f7b1ff](https://github.com/Monadical-SAS/reflector/commit/5f7b1ff1a68ebbb907684c7c5f55c1f82dac8550))
## [0.26.0](https://github.com/Monadical-SAS/reflector/compare/v0.25.0...v0.26.0) (2025-12-23)
### Features
* parallelize hatchet ([#804](https://github.com/Monadical-SAS/reflector/issues/804)) ([594bcc0](https://github.com/Monadical-SAS/reflector/commit/594bcc09e0ca744163de2f1525ebbf7c52a68448))
## [0.25.0](https://github.com/Monadical-SAS/reflector/compare/v0.24.0...v0.25.0) (2025-12-22)
### Features
* consent disable feature ([#799](https://github.com/Monadical-SAS/reflector/issues/799)) ([2257834](https://github.com/Monadical-SAS/reflector/commit/225783496f2e265d5cb58e3539a20bf6b55589b8))
* durable ([#794](https://github.com/Monadical-SAS/reflector/issues/794)) ([1dac999](https://github.com/Monadical-SAS/reflector/commit/1dac999b56997582ce400e7d56e915adc1e4728d))
* increase daily recording max duration ([#801](https://github.com/Monadical-SAS/reflector/issues/801)) ([f580b99](https://github.com/Monadical-SAS/reflector/commit/f580b996eef49cce16433c505abfc6454dd45de1))
### Bug Fixes
* logout redirect ([#802](https://github.com/Monadical-SAS/reflector/issues/802)) ([f0ee7b5](https://github.com/Monadical-SAS/reflector/commit/f0ee7b531a0911f214ccbb84d399e9a6c9b700c0))
## [0.24.0](https://github.com/Monadical-SAS/reflector/compare/v0.23.2...v0.24.0) (2025-12-18)
### Features
* identify action items ([#790](https://github.com/Monadical-SAS/reflector/issues/790)) ([964cd78](https://github.com/Monadical-SAS/reflector/commit/964cd78bb699d83d012ae4b8c96565df25b90a5d))
### Bug Fixes
* automatically reprocess daily recordings ([#797](https://github.com/Monadical-SAS/reflector/issues/797)) ([5f458aa](https://github.com/Monadical-SAS/reflector/commit/5f458aa4a7ec3d00ca5ec49d62fcc8ad232b138e))
* daily video optimisation ([#789](https://github.com/Monadical-SAS/reflector/issues/789)) ([16284e1](https://github.com/Monadical-SAS/reflector/commit/16284e1ac3faede2b74f0d91b50c0b5612af2c35))
* main menu login ([#800](https://github.com/Monadical-SAS/reflector/issues/800)) ([0bc971b](https://github.com/Monadical-SAS/reflector/commit/0bc971ba966a52d719c8c240b47dc7b3bdea4391))
* retry on workflow timeout ([#798](https://github.com/Monadical-SAS/reflector/issues/798)) ([5f7dfad](https://github.com/Monadical-SAS/reflector/commit/5f7dfadabd3e8017406ad3720ba495a59963ee34))
## [0.23.2](https://github.com/Monadical-SAS/reflector/compare/v0.23.1...v0.23.2) (2025-12-11)
### Bug Fixes
* build on push tags ([#785](https://github.com/Monadical-SAS/reflector/issues/785)) ([d7f140b](https://github.com/Monadical-SAS/reflector/commit/d7f140b7d1f4660d5da7a0da1357f68869e0b5cd))
## [0.23.1](https://github.com/Monadical-SAS/reflector/compare/v0.23.0...v0.23.1) (2025-12-11)
### Bug Fixes
* populate room_name in transcript GET endpoint ([#783](https://github.com/Monadical-SAS/reflector/issues/783)) ([0eba147](https://github.com/Monadical-SAS/reflector/commit/0eba1470181c7b9e0a79964a1ef28c09bcbdd9d7))
## [0.23.0](https://github.com/Monadical-SAS/reflector/compare/v0.22.4...v0.23.0) (2025-12-10)
### Features
* dockerhub ci ([#772](https://github.com/Monadical-SAS/reflector/issues/772)) ([00549f1](https://github.com/Monadical-SAS/reflector/commit/00549f153ade922cf4cb6c5358a7d11a39c426d2))
* llm retries ([#739](https://github.com/Monadical-SAS/reflector/issues/739)) ([61f0e29](https://github.com/Monadical-SAS/reflector/commit/61f0e29d4c51eab54ee67af92141fbb171e8ccaa))
### Bug Fixes
* celery inspect bug sidestep in restart script ([#766](https://github.com/Monadical-SAS/reflector/issues/766)) ([ec17ed7](https://github.com/Monadical-SAS/reflector/commit/ec17ed7b587cf6ee143646baaee67a7c017044d4))
* deploy frontend to coolify ([#779](https://github.com/Monadical-SAS/reflector/issues/779)) ([91650ec](https://github.com/Monadical-SAS/reflector/commit/91650ec65f65713faa7ee0dcfb75af427b7c4ba0))
* hide rooms settings instead of disabling ([#763](https://github.com/Monadical-SAS/reflector/issues/763)) ([3ad78be](https://github.com/Monadical-SAS/reflector/commit/3ad78be7628c0d029296b301a0e87236c76b7598))
* return participant emails from transcript endpoint ([#769](https://github.com/Monadical-SAS/reflector/issues/769)) ([d3a5cd1](https://github.com/Monadical-SAS/reflector/commit/d3a5cd12d2d0d9c32af2d5bd9322e030ef69b85d))
## [0.22.4](https://github.com/Monadical-SAS/reflector/compare/v0.22.3...v0.22.4) (2025-12-02)
### Bug Fixes
* Multitrack mixdown optimisation 2 ([#764](https://github.com/Monadical-SAS/reflector/issues/764)) ([bd5df1c](https://github.com/Monadical-SAS/reflector/commit/bd5df1ce2ebf35d7f3413b295e56937a9a28ef7b))
## [0.22.3](https://github.com/Monadical-SAS/reflector/compare/v0.22.2...v0.22.3) (2025-12-02)
### Bug Fixes
* align daily room settings ([#759](https://github.com/Monadical-SAS/reflector/issues/759)) ([28f87c0](https://github.com/Monadical-SAS/reflector/commit/28f87c09dc459846873d0dde65b03e3d7b2b9399))
## [0.22.2](https://github.com/Monadical-SAS/reflector/compare/v0.22.1...v0.22.2) (2025-12-02)
### Bug Fixes
* daily auto refresh fix ([#755](https://github.com/Monadical-SAS/reflector/issues/755)) ([fe47c46](https://github.com/Monadical-SAS/reflector/commit/fe47c46489c5aa0cc538109f7559cc9accb35c01))
* Skip mixdown for multitrack ([#760](https://github.com/Monadical-SAS/reflector/issues/760)) ([b51b7aa](https://github.com/Monadical-SAS/reflector/commit/b51b7aa9176c1a53ba57ad99f5e976c804a1e80c))
## [0.22.1](https://github.com/Monadical-SAS/reflector/compare/v0.22.0...v0.22.1) (2025-11-27)
### Bug Fixes
* participants update from daily ([#749](https://github.com/Monadical-SAS/reflector/issues/749)) ([7f0b728](https://github.com/Monadical-SAS/reflector/commit/7f0b728991c1b9f9aae702c96297eae63b561ef5))
## [0.22.0](https://github.com/Monadical-SAS/reflector/compare/v0.21.0...v0.22.0) (2025-11-26)
### Features
* Multitrack segmentation ([#747](https://github.com/Monadical-SAS/reflector/issues/747)) ([d63040e](https://github.com/Monadical-SAS/reflector/commit/d63040e2fdc07e7b272e85a39eb2411cd6a14798))
## [0.21.0](https://github.com/Monadical-SAS/reflector/compare/v0.20.0...v0.21.0) (2025-11-26)
### Features
* add transcript format parameter to GET endpoint ([#709](https://github.com/Monadical-SAS/reflector/issues/709)) ([f6ca075](https://github.com/Monadical-SAS/reflector/commit/f6ca07505f34483b02270a2ef3bd809e9d2e1045))
## [0.20.0](https://github.com/Monadical-SAS/reflector/compare/v0.19.0...v0.20.0) (2025-11-25)
### Features
* link transcript participants ([#737](https://github.com/Monadical-SAS/reflector/issues/737)) ([9bec398](https://github.com/Monadical-SAS/reflector/commit/9bec39808fc6322612d8b87e922a6f7901fc01c1))
* transcript restart script ([#742](https://github.com/Monadical-SAS/reflector/issues/742)) ([86d5e26](https://github.com/Monadical-SAS/reflector/commit/86d5e26224bb55a0f1cc785aeda52065bb92ee6f))
## [0.19.0](https://github.com/Monadical-SAS/reflector/compare/v0.18.0...v0.19.0) (2025-11-25)
### Features
* dailyco api module ([#725](https://github.com/Monadical-SAS/reflector/issues/725)) ([4287f8b](https://github.com/Monadical-SAS/reflector/commit/4287f8b8aeee60e51db7539f4dcbda5f6e696bd8))
* dailyco poll ([#730](https://github.com/Monadical-SAS/reflector/issues/730)) ([8e438ca](https://github.com/Monadical-SAS/reflector/commit/8e438ca285152bd48fdc42767e706fb448d3525c))
* multitrack cli ([#735](https://github.com/Monadical-SAS/reflector/issues/735)) ([11731c9](https://github.com/Monadical-SAS/reflector/commit/11731c9d38439b04e93b1c3afbd7090bad11a11f))
### Bug Fixes
* default platform fix ([#736](https://github.com/Monadical-SAS/reflector/issues/736)) ([c442a62](https://github.com/Monadical-SAS/reflector/commit/c442a627873ca667656eeaefb63e54ab10b8d19e))
* parakeet vad not getting the end timestamp ([#728](https://github.com/Monadical-SAS/reflector/issues/728)) ([18ed713](https://github.com/Monadical-SAS/reflector/commit/18ed7133693653ef4ddac6c659a8c14b320d1657))
* start raw tracks recording ([#729](https://github.com/Monadical-SAS/reflector/issues/729)) ([3e47c2c](https://github.com/Monadical-SAS/reflector/commit/3e47c2c0573504858e0d2e1798b6ed31f16b4a5d))
## [0.18.0](https://github.com/Monadical-SAS/reflector/compare/v0.17.0...v0.18.0) (2025-11-14)
### Features
* daily QOL: participants dictionary ([#721](https://github.com/Monadical-SAS/reflector/issues/721)) ([b20cad7](https://github.com/Monadical-SAS/reflector/commit/b20cad76e69fb6a76405af299a005f1ddcf60eae))
### Bug Fixes
* add proccessing page to file upload and reprocessing ([#650](https://github.com/Monadical-SAS/reflector/issues/650)) ([28a7258](https://github.com/Monadical-SAS/reflector/commit/28a7258e45317b78e60e6397be2bc503647eaace))
* copy transcript ([#674](https://github.com/Monadical-SAS/reflector/issues/674)) ([a9a4f32](https://github.com/Monadical-SAS/reflector/commit/a9a4f32324f66c838e081eee42bb9502f38c1db1))
## [0.17.0](https://github.com/Monadical-SAS/reflector/compare/v0.16.0...v0.17.0) (2025-11-13)
### Features
* add API key management UI ([#716](https://github.com/Monadical-SAS/reflector/issues/716)) ([372202b](https://github.com/Monadical-SAS/reflector/commit/372202b0e1a86823900b0aa77be1bfbc2893d8a1))
* daily.co support as alternative to whereby ([#691](https://github.com/Monadical-SAS/reflector/issues/691)) ([1473fd8](https://github.com/Monadical-SAS/reflector/commit/1473fd82dc472c394cbaa2987212ad662a74bcac))
## [0.16.0](https://github.com/Monadical-SAS/reflector/compare/v0.15.0...v0.16.0) (2025-10-24)
### Features
* search date filter ([#710](https://github.com/Monadical-SAS/reflector/issues/710)) ([962c40e](https://github.com/Monadical-SAS/reflector/commit/962c40e2b6428ac42fd10aea926782d7a6f3f902))
## [0.15.0](https://github.com/Monadical-SAS/reflector/compare/v0.14.0...v0.15.0) (2025-10-20)
### Features
* api tokens ([#705](https://github.com/Monadical-SAS/reflector/issues/705)) ([9a258ab](https://github.com/Monadical-SAS/reflector/commit/9a258abc0209b0ac3799532a507ea6a9125d703a))
## [0.14.0](https://github.com/Monadical-SAS/reflector/compare/v0.13.1...v0.14.0) (2025-10-08) ## [0.14.0](https://github.com/Monadical-SAS/reflector/compare/v0.13.1...v0.14.0) (2025-10-08)

345
CODER_BRIEFING.md Normal file
View File

@@ -0,0 +1,345 @@
# Multi-Provider Video Platform Implementation - Coder Briefing
## Your Mission
Implement multi-provider video platform support in Reflector, allowing the system to work with both Whereby and Daily.co video conferencing providers. The goal is to abstract the current Whereby-only implementation and add Daily.co as a second provider, with the ability to switch between them via environment variables.
**Branch:** `igor/dailico-2` (you're already on it)
**Estimated Time:** 12-16 hours (senior engineer)
**Complexity:** Medium-High (requires careful integration with existing codebase)
---
## What You Have
### 1. **PLAN.md** - Your Technical Specification (2,452 lines)
- Complete step-by-step implementation guide
- All code examples you need
- Architecture diagrams and design rationale
- Testing strategy and success metrics
- **Read this first** to understand the overall approach
### 2. **IMPLEMENTATION_GUIDE.md** - Your Practical Guide
- What to copy vs. adapt vs. rewrite
- Common pitfalls and how to avoid them
- Verification checklists for each phase
- Decision trees for implementation choices
- **Use this as your day-to-day reference**
### 3. **Reference Implementation** - `./reflector-dailyco-reference/`
- Working implementation from 2.5 months ago
- Good architecture and patterns
- **BUT:** 91 commits behind current main, DO NOT merge directly
- Use for inspiration and code patterns only
---
## Critical Context: Why Not Just Merge?
The reference branch (`origin/igor/feat-dailyco`) was started on August 1, 2025 and is now severely diverged from main:
- **91 commits behind main**
- Main has 12x more changes (45,840 insertions vs 3,689)
- Main added: calendar integration, webhooks, full-text search, React Query migration, security fixes
- Reference removed: features that main still has and needs
**Merging would be a disaster.** We're implementing fresh on current main, using the reference for validated patterns.
---
## High-Level Approach
### Phase 1: Analysis (2 hours)
- Study current Whereby integration
- Define abstraction requirements
- Create standard data models
### Phase 2: Abstraction Layer (4-5 hours)
- Build platform abstraction (base class, registry, factory)
- Extract Whereby into the abstraction
- Update database schema (add `platform` field)
- Integrate into rooms.py **without breaking calendar/webhooks**
### Phase 3: Daily.co Implementation (4-5 hours)
- Implement Daily.co client
- Add webhook handler
- Create frontend components (rewrite API calls for React Query)
- Add recording processing
### Phase 4: Testing (2-3 hours)
- Unit tests for platform abstraction
- Integration tests for webhooks
- Manual testing with both providers
---
## Key Files You'll Touch
### Backend (New)
```
server/reflector/video_platforms/
├── __init__.py
├── base.py ← Abstract base class
├── models.py ← Platform, MeetingData, VideoPlatformConfig
├── registry.py ← Platform registration system
├── factory.py ← Client creation and config
├── whereby.py ← Whereby client wrapper
├── daily.py ← Daily.co client
└── mock.py ← Mock client for testing
server/reflector/views/daily.py ← Daily.co webhooks
server/tests/test_video_platforms.py ← Platform tests
server/tests/test_daily_webhook.py ← Webhook tests
```
### Backend (Modified - Careful!)
```
server/reflector/settings.py ← Add Daily.co settings
server/reflector/db/rooms.py ← Add platform field, PRESERVE calendar fields
server/reflector/db/meetings.py ← Add platform field
server/reflector/views/rooms.py ← Integrate abstraction, PRESERVE calendar/webhooks
server/reflector/worker/process.py ← Add process_recording_from_url task
server/reflector/app.py ← Register daily router
server/env.example ← Document new env vars
```
### Frontend (New)
```
www/app/[roomName]/components/
├── RoomContainer.tsx ← Platform router
├── DailyRoom.tsx ← Daily.co component (rewrite API calls!)
└── WherebyRoom.tsx ← Extract existing logic
```
### Frontend (Modified)
```
www/app/[roomName]/page.tsx ← Use RoomContainer
www/package.json ← Add @daily-co/daily-js
```
### Database
```
server/migrations/versions/XXXXXX_add_platform_support.py ← Generate fresh migration
```
---
## Critical Warnings ⚠️
### 1. **DO NOT Copy Database Migrations**
The reference migration has the wrong `down_revision` and is based on old schema.
```bash
# Instead:
cd server
uv run alembic revision -m "add_platform_support"
# Then edit the generated file
```
### 2. **DO NOT Remove Main's Features**
Main has calendar integration, webhooks, ICS sync that reference doesn't have.
When modifying `rooms.py`, only change meeting creation logic, preserve everything else.
### 3. **DO NOT Copy Frontend API Calls**
Reference uses old OpenAPI client. Main uses React Query.
Check how main currently makes API calls and replicate that pattern.
### 4. **DO NOT Copy package.json/migrations**
These files are severely outdated in reference.
### 5. **Preserve Type Safety**
Use `TYPE_CHECKING` imports to avoid circular dependencies:
```python
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from reflector.db.rooms import Room
```
---
## How to Start
### Day 1 Morning: Setup & Understanding (2-3 hours)
```bash
# 1. Verify you're on the right branch
git branch
# Should show: igor/dailico-2
# 2. Read the docs (in order)
# - PLAN.md (skim to understand scope, read Phase 1 carefully)
# - IMPLEMENTATION_GUIDE.md (read fully, bookmark it)
# 3. Study current Whereby integration
cat server/reflector/views/rooms.py | grep -A 20 "whereby"
cat www/app/[roomName]/page.tsx
# 4. Check reference implementation structure
ls -la reflector-dailyco-reference/server/reflector/video_platforms/
```
### Day 1 Afternoon: Phase 1 Execution (2-3 hours)
```bash
# 5. Copy video_platforms directory from reference
cp -r reflector-dailyco-reference/server/reflector/video_platforms/ \
server/reflector/
# 6. Review and fix imports
cd server
uv run ruff check reflector/video_platforms/
# 7. Add settings to settings.py (see PLAN.md Phase 2.7)
# 8. Test imports work
uv run python -c "from reflector.video_platforms import create_platform_client; print('OK')"
```
### Day 2: Phase 2 - Database & Integration (4-5 hours)
```bash
# 9. Generate migration
uv run alembic revision -m "add_platform_support"
# Edit the file following PLAN.md Phase 2.8
# 10. Update Room/Meeting models
# Add platform field, PRESERVE all existing fields
# 11. Integrate into rooms.py
# Carefully modify meeting creation, preserve calendar/webhooks
# 12. Add Daily.co webhook handler
cp reflector-dailyco-reference/server/reflector/views/daily.py \
server/reflector/views/
# Register in app.py
```
### Day 3: Phase 3 - Frontend & Testing (4-5 hours)
```bash
# 13. Create frontend components
mkdir -p www/app/[roomName]/components
# 14. Add Daily.co dependency
cd www
pnpm add @daily-co/daily-js@^0.81.0
# 15. Create RoomContainer, DailyRoom, WherebyRoom
# IMPORTANT: Rewrite API calls using React Query patterns
# 16. Regenerate types
pnpm openapi
# 17. Copy and adapt tests
cp reflector-dailyco-reference/server/tests/test_*.py server/tests/
# 18. Run tests
cd server
REDIS_HOST=localhost \
CELERY_BROKER_URL=redis://localhost:6379/1 \
uv run pytest tests/test_video_platforms.py -v
```
---
## Verification Checklist
After implementation, all of these must pass:
**Backend:**
- [ ] `cd server && uv run ruff check .` passes
- [ ] `uv run alembic upgrade head` works cleanly
- [ ] `uv run pytest tests/test_video_platforms.py` passes
- [ ] Can import: `from reflector.video_platforms import create_platform_client`
- [ ] Settings has all Daily.co variables
**Frontend:**
- [ ] `cd www && pnpm lint` passes
- [ ] No TypeScript errors
- [ ] `pnpm openapi` generates platform field
- [ ] No `@ts-ignore` for platform field
**Integration:**
- [ ] Whereby meetings still work (existing flow unchanged)
- [ ] Calendar/webhook features still work in rooms.py
- [ ] env.example documents all new variables
---
## When You're Stuck
### Check These Resources:
1. **PLAN.md** - Detailed code examples for your exact scenario
2. **IMPLEMENTATION_GUIDE.md** - Common pitfalls section
3. **Reference code** - See how it was solved before
4. **Git diff** - Compare reference to your implementation
### Compare Files:
```bash
# See what reference did
diff reflector-dailyco-reference/server/reflector/views/rooms.py \
server/reflector/views/rooms.py
# See what changed in main since reference branch
git log --oneline --since="2025-08-01" -- server/reflector/views/rooms.py
```
### Common Issues:
- **Circular imports:** Use `TYPE_CHECKING` pattern
- **Tests fail with postgres error:** Use `REDIS_HOST=localhost` env vars
- **Frontend API calls broken:** Check current React Query patterns in main
- **Migrations fail:** Ensure you generated fresh, not copied
---
## Success Looks Like
When you're done:
- ✅ All tests pass
- ✅ Linting passes
- ✅ Can create Whereby meetings (unchanged behavior)
- ✅ Can create Daily.co meetings (with env vars)
- ✅ Calendar/webhooks still work
- ✅ Frontend has no TypeScript errors
- ✅ Platform selection via environment variables works
---
## Communication
If you need clarification on requirements, have questions about architecture decisions, or find issues with the spec, document them clearly with:
- What you expected
- What you found
- Your proposed solution
The PLAN.md document is comprehensive but you may find edge cases. Use your engineering judgment and document decisions.
---
## Final Notes
**This is not a simple copy-paste job.** You're doing careful integration work where you need to:
- Understand the abstraction pattern (PLAN.md)
- Preserve all of main's features
- Adapt reference code to current patterns
- Think about edge cases and testing
Take your time with Phase 2 (rooms.py integration) - that's where most bugs will come from if you accidentally break calendar/webhook features.
**Good luck! You've got comprehensive specs, working reference code, and a clean starting point. You can do this.**
---
## Quick Reference
```bash
# Your workspace
├── PLAN.md ← Complete technical spec (read first)
├── IMPLEMENTATION_GUIDE.md ← Practical guide (bookmark this)
├── CODER_BRIEFING.md ← This file
└── reflector-dailyco-reference/ ← Reference implementation (inspiration only)
# Key commands
cd server && uv run ruff check . # Lint backend
cd www && pnpm lint # Lint frontend
cd server && uv run alembic revision -m "..." # Create migration
cd www && pnpm openapi # Regenerate types
cd server && uv run pytest -v # Run tests
```

View File

@@ -1,22 +0,0 @@
# Reflector Caddyfile
# Replace example.com with your actual domains
# CORS is handled by the backend - Caddy just proxies
#
# For environment variable substitution, set:
# FRONTEND_DOMAIN=app.example.com
# API_DOMAIN=api.example.com
# AUTHENTIK_DOMAIN=authentik.example.com (optional, for authentication)
# Or edit this file directly with your domains.
{$FRONTEND_DOMAIN:app.example.com} {
reverse_proxy web:3000
}
{$API_DOMAIN:api.example.com} {
reverse_proxy server:1250
}
# Uncomment if using Authentik for authentication (see auth-setup.md)
# {$AUTHENTIK_DOMAIN:authentik.example.com} {
# reverse_proxy authentik-server-1:9000
# }

489
IMPLEMENTATION_GUIDE.md Normal file
View File

@@ -0,0 +1,489 @@
# Daily.co Implementation Guide
## Overview
Implement multi-provider video platform support (Whereby + Daily.co) following PLAN.md.
## Reference Code Location
- **Reference branch:** `origin/igor/feat-dailyco` (on remote)
- **Worktree location:** `./reflector-dailyco-reference/`
- **Status:** Reference only - DO NOT merge or copy directly
## What Exists in Reference Branch (For Inspiration)
### ✅ Can Use As Reference (Well-Implemented)
```
server/reflector/video_platforms/
├── base.py ← Platform abstraction (good design, copy-safe)
├── models.py ← Data models (copy-safe)
├── registry.py ← Registry pattern (copy-safe)
├── factory.py ← Factory pattern (needs settings updates)
├── whereby.py ← Whereby client (needs adaptation)
├── daily.py ← Daily.co client (needs adaptation)
└── mock.py ← Mock client (copy-safe for tests)
server/reflector/views/daily.py ← Webhook handler (needs adaptation)
server/tests/test_video_platforms.py ← Tests (good reference)
server/tests/test_daily_webhook.py ← Tests (good reference)
www/app/[roomName]/components/
├── RoomContainer.tsx ← Platform router (needs React Query)
├── DailyRoom.tsx ← Daily component (needs React Query)
└── WherebyRoom.tsx ← Whereby extraction (needs React Query)
```
### ⚠️ Needs Significant Changes (Use Logic Only)
- `server/reflector/db/rooms.py` - Reference removed calendar/webhook fields that main has
- `server/reflector/db/meetings.py` - Same issue (missing user_id handling differences)
- `server/reflector/views/rooms.py` - Main has calendar integration, webhooks, ICS sync
- `server/reflector/worker/process.py` - Main has different recording flow
- Migration files - Must regenerate against current main schema
### ❌ Do NOT Use (Outdated/Incompatible)
- `package.json`/`pnpm-lock.yaml` - Main uses different dependency versions
- Frontend API client calls - Main uses React Query (reference uses old OpenAPI client)
- Database migrations - Must create new ones from scratch
- Any files that delete features present in main (search, calendar, webhooks)
## Key Differences: Reference vs Current Main
| Aspect | Reference Branch | Current Main | Action Required |
|--------|------------------|--------------|-----------------|
| **API client** | Old OpenAPI generated | React Query hooks | Rewrite all API calls |
| **Database schema** | Simplified (removed features) | Has calendar, webhooks, full-text search | Merge carefully, preserve main features |
| **Settings** | Aug 2025 structure | Current structure | Adapt carefully |
| **Migrations** | Branched from Aug 1 | Current main (91+ commits ahead) | Regenerate from scratch |
| **Frontend deps** | `@daily-co/daily-js@0.81.0` | Check current versions | Update to compatible versions |
| **Package manager** | yarn | pnpm (maybe both?) | Use what main uses |
## Branch Divergence Analysis
**The reference branch is 91 commits behind main and severely diverged:**
- Reference: 8 commits, 3,689 insertions, 425 deletions
- Main since divergence: 320 files changed, 45,840 insertions, 16,827 deletions
- **Main has 12x more changes**
**Major features in main that reference lacks:**
1. Calendar integration (ICS sync with rooms)
2. Self-hosted GPU API infrastructure
3. Frontend OpenAPI React Query migration
4. Full-text search (backend + frontend)
5. Webhook system for room events
6. Environment variable migration
7. Security fixes and auth improvements
8. Docker production frontend
9. Meeting user ID removal (schema change)
10. NextJS version upgrades
**High conflict risk files:**
- `server/reflector/views/rooms.py` - 12x more changes in main
- `server/reflector/db/rooms.py` - Main added 7+ fields
- `www/package.json` - NextJS major version bump
- Database migrations - 20+ new migrations in main
## Implementation Approach
### Phase 1: Copy Clean Abstractions (1-2 hours)
**Files to copy directly from reference:**
```bash
# Core abstraction (review but mostly safe to copy)
cp -r reflector-dailyco-reference/server/reflector/video_platforms/ \
server/reflector/
# BUT review each file for:
# - Import paths (make sure they match current main)
# - Settings references (adapt to current settings.py)
# - Type imports (ensure no circular dependencies)
```
**After copying, immediately:**
```bash
cd server
# Check for issues
uv run ruff check reflector/video_platforms/
# Fix any import errors or type issues
```
### Phase 2: Adapt to Current Main (2-3 hours)
**2.1 Settings Integration**
File: `server/reflector/settings.py`
Add at the appropriate location (near existing Whereby settings):
```python
# Daily.co API Integration (NEW)
DAILY_API_KEY: str | None = None
DAILY_WEBHOOK_SECRET: str | None = None
DAILY_SUBDOMAIN: str | None = None
AWS_DAILY_S3_BUCKET: str | None = None
AWS_DAILY_S3_REGION: str = "us-west-2"
AWS_DAILY_ROLE_ARN: str | None = None
# Platform Migration Feature Flags (NEW)
DAILY_MIGRATION_ENABLED: bool = False # Conservative default
DAILY_MIGRATION_ROOM_IDS: list[str] = []
DEFAULT_VIDEO_PLATFORM: Literal["whereby", "daily"] = "whereby"
```
**2.2 Database Migration**
⚠️ **CRITICAL: Do NOT copy migration from reference**
Generate new migration:
```bash
cd server
uv run alembic revision -m "add_platform_support"
```
Edit the generated migration file to add `platform` column:
```python
def upgrade():
with op.batch_alter_table("room", schema=None) as batch_op:
batch_op.add_column(
sa.Column("platform", sa.String(), nullable=False, server_default="whereby")
)
with op.batch_alter_table("meeting", schema=None) as batch_op:
batch_op.add_column(
sa.Column("platform", sa.String(), nullable=False, server_default="whereby")
)
```
**2.3 Update Database Models**
File: `server/reflector/db/rooms.py`
Add platform field (preserve all existing fields from main):
```python
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from reflector.video_platforms.models import Platform
class Room:
# ... ALL existing fields from main (calendar, webhooks, etc.) ...
# NEW: Platform field
platform: "Platform" = sqlalchemy.Column(
sqlalchemy.String,
nullable=False,
server_default="whereby",
)
```
File: `server/reflector/db/meetings.py`
Same approach - add platform field, preserve everything from main.
**2.4 Integrate Platform Abstraction into rooms.py**
⚠️ **This is the most delicate part - main has calendar/webhook features**
File: `server/reflector/views/rooms.py`
Strategy:
1. Add imports at top
2. Modify meeting creation logic only
3. Preserve all calendar/webhook/ICS logic from main
```python
# Add imports
from reflector.video_platforms import (
create_platform_client,
get_platform_for_room,
)
# In create_meeting endpoint:
# OLD: Direct Whereby API calls
# NEW: Platform abstraction
# Find the meeting creation section and replace:
platform = get_platform_for_room(room.id)
client = create_platform_client(platform)
meeting_data = await client.create_meeting(
room_name_prefix=room.name,
end_date=meeting_data.end_date,
room=room,
)
# Then create Meeting record with meeting_data.platform, meeting_data.meeting_id, etc.
```
**2.5 Add Daily.co Webhook Handler**
Copy from reference, minimal changes needed:
```bash
cp reflector-dailyco-reference/server/reflector/views/daily.py \
server/reflector/views/
```
Register in `server/reflector/app.py`:
```python
from reflector.views import daily
app.include_router(daily.router, prefix="/v1/daily", tags=["daily"])
```
**2.6 Add Recording Processing Task**
File: `server/reflector/worker/process.py`
Add the `process_recording_from_url` task from reference (copy the function).
### Phase 3: Frontend Adaptation (3-4 hours)
**3.1 Determine Current API Client Pattern**
First, check how main currently makes API calls:
```bash
cd www
grep -r "api\." app/ | head -20
# Look for patterns like: api.v1Something()
```
**3.2 Create Components**
Copy component structure from reference but **rewrite all API calls**:
```bash
mkdir -p www/app/[roomName]/components
```
Files to create:
- `RoomContainer.tsx` - Platform router (mostly copy-safe, just fix imports)
- `DailyRoom.tsx` - Needs React Query API calls
- `WherebyRoom.tsx` - Extract current room page logic
**Example React Query pattern** (adapt to your actual API):
```typescript
import { api } from '@/app/api/client'
// In DailyRoom.tsx
const handleConsent = async () => {
try {
await api.v1MeetingAudioConsent({
path: { meeting_id: meeting.id },
body: { consent: true },
})
// ...
} catch (error) {
// ...
}
}
```
**3.3 Add Daily.co Dependency**
Check current package manager:
```bash
cd www
ls package-lock.json yarn.lock pnpm-lock.yaml
```
Then install:
```bash
# If using pnpm
pnpm add @daily-co/daily-js@^0.81.0
# If using yarn
yarn add @daily-co/daily-js@^0.81.0
```
**3.4 Update TypeScript Types**
After backend changes, regenerate types:
```bash
cd www
pnpm openapi # or yarn openapi
```
This should pick up the new `platform` field on Meeting type.
### Phase 4: Testing (2-3 hours)
**4.1 Copy Test Structure**
```bash
cp reflector-dailyco-reference/server/tests/test_video_platforms.py \
server/tests/
cp reflector-dailyco-reference/server/tests/test_daily_webhook.py \
server/tests/
```
**4.2 Fix Test Imports and Fixtures**
Update imports to match current test infrastructure:
- Check `server/tests/conftest.py` for fixture patterns
- Update database access patterns if changed
- Fix any import errors
**4.3 Run Tests**
```bash
cd server
# Run with environment variables for Mac
REDIS_HOST=localhost \
CELERY_BROKER_URL=redis://localhost:6379/1 \
CELERY_RESULT_BACKEND=redis://localhost:6379/1 \
uv run pytest tests/test_video_platforms.py -v
```
### Phase 5: Environment Configuration
**Update `server/env.example`:**
Add at the end:
```bash
# Daily.co API Integration
DAILY_API_KEY=your-daily-api-key
DAILY_WEBHOOK_SECRET=your-daily-webhook-secret
DAILY_SUBDOMAIN=your-subdomain
AWS_DAILY_S3_BUCKET=your-daily-bucket
AWS_DAILY_S3_REGION=us-west-2
AWS_DAILY_ROLE_ARN=arn:aws:iam::ACCOUNT:role/DailyRecording
# Platform Selection
DAILY_MIGRATION_ENABLED=false # Master switch
DAILY_MIGRATION_ROOM_IDS=[] # Specific room IDs
DEFAULT_VIDEO_PLATFORM=whereby # Default platform
```
## Decision Tree: Copy vs Adapt vs Rewrite
```
┌─ Is it pure abstraction logic? (base.py, registry.py, models.py)
│ YES → Copy directly, review imports
│ NO → Continue ↓
├─ Does it touch database models?
│ YES → Adapt carefully, preserve main's fields
│ NO → Continue ↓
├─ Does it make API calls on frontend?
│ YES → Rewrite using React Query
│ NO → Continue ↓
├─ Is it a database migration?
│ YES → Generate fresh from current schema
│ NO → Continue ↓
└─ Does it touch rooms.py or core business logic?
YES → Merge carefully, preserve calendar/webhooks
NO → Safe to adapt from reference
```
## Verification Checklist
After each phase, verify:
**Phase 1 (Abstraction Layer):**
- [ ] `uv run ruff check server/reflector/video_platforms/` passes
- [ ] No circular import errors
- [ ] Can import `from reflector.video_platforms import create_platform_client`
**Phase 2 (Backend Integration):**
- [ ] `uv run ruff check server/` passes
- [ ] Migration file generated (not copied)
- [ ] Room and Meeting models have platform field
- [ ] rooms.py still has calendar/webhook features
**Phase 3 (Frontend):**
- [ ] `pnpm lint` passes
- [ ] No TypeScript errors
- [ ] No `@ts-ignore` for platform field
- [ ] API calls use React Query patterns
**Phase 4 (Testing):**
- [ ] Tests can be collected: `pytest tests/test_video_platforms.py --collect-only`
- [ ] Database fixtures work
- [ ] Mock platform works
**Phase 5 (Config):**
- [ ] env.example has Daily.co variables
- [ ] settings.py has all new variables
- [ ] No duplicate variable definitions
## Common Pitfalls
### 1. Database Schema Conflicts
**Problem:** Reference removed fields that main has (calendar, webhooks)
**Solution:** Always preserve main's fields, only add platform field
### 2. Migration Conflicts
**Problem:** Reference migration has wrong `down_revision`
**Solution:** Always generate fresh migration from current main
### 3. Frontend API Calls
**Problem:** Reference uses old API client patterns
**Solution:** Check current main's API usage, replicate that pattern
### 4. Import Errors
**Problem:** Circular imports with TYPE_CHECKING
**Solution:** Use `if TYPE_CHECKING:` for Room/Meeting imports in video_platforms
### 5. Test Database Issues
**Problem:** Tests fail with "could not translate host name 'postgres'"
**Solution:** Use environment variables: `REDIS_HOST=localhost DATABASE_URL=...`
### 6. Preserved Features Broken
**Problem:** Calendar/webhook features stop working
**Solution:** Carefully review rooms.py diff, only change meeting creation, not calendar logic
## File Modification Summary
**New files (can copy):**
- `server/reflector/video_platforms/*.py` (entire directory)
- `server/reflector/views/daily.py`
- `server/tests/test_video_platforms.py`
- `server/tests/test_daily_webhook.py`
- `www/app/[roomName]/components/RoomContainer.tsx`
- `www/app/[roomName]/components/DailyRoom.tsx`
- `www/app/[roomName]/components/WherebyRoom.tsx`
**Modified files (careful merging):**
- `server/reflector/settings.py` - Add Daily.co settings
- `server/reflector/db/rooms.py` - Add platform field
- `server/reflector/db/meetings.py` - Add platform field
- `server/reflector/views/rooms.py` - Integrate platform abstraction
- `server/reflector/worker/process.py` - Add process_recording_from_url
- `server/reflector/app.py` - Register daily router
- `server/env.example` - Add Daily.co variables
- `www/app/[roomName]/page.tsx` - Use RoomContainer
- `www/package.json` - Add @daily-co/daily-js
**Generated files (do not copy):**
- `server/migrations/versions/XXXXXX_add_platform_support.py` - Generate fresh
## Success Metrics
Implementation is complete when:
- [ ] All tests pass (including new platform tests)
- [ ] Linting passes (ruff, pnpm lint)
- [ ] Migration applies cleanly: `uv run alembic upgrade head`
- [ ] Can create Whereby meeting (existing flow unchanged)
- [ ] Can create Daily.co meeting (with env vars set)
- [ ] Frontend loads without TypeScript errors
- [ ] No features from main were accidentally removed
## Getting Help
**Reference documentation locations:**
- Implementation plan: `PLAN.md`
- Reference implementation: `./reflector-dailyco-reference/`
- Current main codebase: `./ ` (current directory)
**Compare implementations:**
```bash
# Compare specific files
diff reflector-dailyco-reference/server/reflector/video_platforms/base.py \
server/reflector/video_platforms/base.py
# See what changed in rooms.py between reference branch point and now
git log --oneline --since="2025-08-01" -- server/reflector/views/rooms.py
```
**Key insight:** The reference branch validates the approach and provides working code patterns, but you're implementing fresh against current main to avoid merge conflicts and preserve all new features.

2517
PLAN.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -168,12 +168,6 @@ You can manually process an audio file by calling the process tool:
uv run python -m reflector.tools.process path/to/audio.wav uv run python -m reflector.tools.process path/to/audio.wav
``` ```
## Reprocessing any transcription
```bash
uv run -m reflector.tools.process_transcript 81ec38d1-9dd7-43d2-b3f8-51f4d34a07cd --sync
```
## Build-time env variables ## Build-time env variables
Next.js projects are more used to NEXT_PUBLIC_ prefixed buildtime vars. We don't have those for the reason we need to serve a ccustomizable prebuild docker container. Next.js projects are more used to NEXT_PUBLIC_ prefixed buildtime vars. We don't have those for the reason we need to serve a ccustomizable prebuild docker container.

View File

@@ -1,61 +1,28 @@
# Production Docker Compose configuration # Production Docker Compose configuration for Frontend
# Usage: docker compose -f docker-compose.prod.yml up -d # Usage: docker compose -f docker-compose.prod.yml up -d
#
# Prerequisites:
# 1. Copy .env.example to .env and configure for both server/ and www/
# 2. Copy Caddyfile.example to Caddyfile and edit with your domains
# 3. Deploy Modal GPU functions (see gpu/modal_deployments/deploy-all.sh)
services: services:
web: web:
image: monadicalsas/reflector-frontend:latest build:
restart: unless-stopped context: ./www
env_file: dockerfile: Dockerfile
- ./www/.env image: reflector-frontend:latest
pull_policy: always
environment: environment:
- KV_URL=redis://redis:6379 - KV_URL=${KV_URL:-redis://redis:6379}
- SITE_URL=${SITE_URL}
- API_URL=${API_URL}
- WEBSOCKET_URL=${WEBSOCKET_URL}
- NEXTAUTH_URL=${NEXTAUTH_URL:-http://localhost:3000}
- NEXTAUTH_SECRET=${NEXTAUTH_SECRET:-changeme-in-production}
- AUTHENTIK_ISSUER=${AUTHENTIK_ISSUER}
- AUTHENTIK_CLIENT_ID=${AUTHENTIK_CLIENT_ID}
- AUTHENTIK_CLIENT_SECRET=${AUTHENTIK_CLIENT_SECRET}
- AUTHENTIK_REFRESH_TOKEN_URL=${AUTHENTIK_REFRESH_TOKEN_URL}
- SENTRY_DSN=${SENTRY_DSN}
- SENTRY_IGNORE_API_RESOLUTION_ERROR=${SENTRY_IGNORE_API_RESOLUTION_ERROR:-1}
depends_on: depends_on:
- redis - redis
server:
image: monadicalsas/reflector-backend:latest
restart: unless-stopped restart: unless-stopped
env_file:
- ./server/.env
environment:
ENTRYPOINT: server
depends_on:
- postgres
- redis
volumes:
- server_data:/app/data
- ./server/reflector/auth/jwt/keys:/app/reflector/auth/jwt/keys:ro
worker:
image: monadicalsas/reflector-backend:latest
restart: unless-stopped
env_file:
- ./server/.env
environment:
ENTRYPOINT: worker
depends_on:
- postgres
- redis
volumes:
- server_data:/app/data
- ./server/reflector/auth/jwt/keys:/app/reflector/auth/jwt/keys:ro
beat:
image: monadicalsas/reflector-backend:latest
restart: unless-stopped
env_file:
- ./server/.env
environment:
ENTRYPOINT: beat
depends_on:
- postgres
- redis
redis: redis:
image: redis:7.2-alpine image: redis:7.2-alpine
@@ -68,46 +35,5 @@ services:
volumes: volumes:
- redis_data:/data - redis_data:/data
postgres:
image: postgres:17-alpine
restart: unless-stopped
environment:
POSTGRES_USER: reflector
POSTGRES_PASSWORD: reflector
POSTGRES_DB: reflector
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U reflector"]
interval: 30s
timeout: 3s
retries: 3
caddy:
image: caddy:2-alpine
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- caddy_data:/data
- caddy_config:/config
depends_on:
- web
- server
docs:
build: ./docs
restart: unless-stopped
volumes: volumes:
redis_data: redis_data:
postgres_data:
server_data:
caddy_data:
caddy_config:
networks:
default:
attachable: true

View File

@@ -34,20 +34,6 @@ services:
environment: environment:
ENTRYPOINT: beat ENTRYPOINT: beat
hatchet-worker:
build:
context: server
volumes:
- ./server/:/app/
- /app/.venv
env_file:
- ./server/.env
environment:
ENTRYPOINT: hatchet-worker
depends_on:
hatchet:
condition: service_healthy
redis: redis:
image: redis:7.2 image: redis:7.2
ports: ports:
@@ -69,7 +55,6 @@ services:
postgres: postgres:
image: postgres:17 image: postgres:17
command: postgres -c 'max_connections=200'
ports: ports:
- 5432:5432 - 5432:5432
environment: environment:
@@ -78,42 +63,6 @@ services:
POSTGRES_DB: reflector POSTGRES_DB: reflector
volumes: volumes:
- ./data/postgres:/var/lib/postgresql/data - ./data/postgres:/var/lib/postgresql/data
- ./server/docker/init-hatchet-db.sql:/docker-entrypoint-initdb.d/init-hatchet-db.sql:ro
healthcheck:
test: ["CMD-SHELL", "pg_isready -d reflector -U reflector"]
interval: 10s
timeout: 10s
retries: 5
start_period: 10s
hatchet:
image: ghcr.io/hatchet-dev/hatchet/hatchet-lite:latest
ports:
- "8889:8888"
- "7078:7077"
depends_on:
postgres:
condition: service_healthy
environment:
DATABASE_URL: "postgresql://reflector:reflector@postgres:5432/hatchet?sslmode=disable"
SERVER_AUTH_COOKIE_DOMAIN: localhost
SERVER_AUTH_COOKIE_INSECURE: "t"
SERVER_GRPC_BIND_ADDRESS: "0.0.0.0"
SERVER_GRPC_INSECURE: "t"
SERVER_GRPC_BROADCAST_ADDRESS: hatchet:7077
SERVER_GRPC_PORT: "7077"
SERVER_URL: http://localhost:8889
SERVER_AUTH_SET_EMAIL_VERIFIED: "t"
# SERVER_DEFAULT_ENGINE_VERSION: "V1" # default
SERVER_INTERNAL_CLIENT_INTERNAL_GRPC_BROADCAST_ADDRESS: hatchet:7077
volumes:
- ./data/hatchet-config:/config
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8888/api/live"]
interval: 30s
timeout: 10s
retries: 5
start_period: 30s
networks: networks:
default: default:

20
docs/.gitignore vendored
View File

@@ -1,20 +0,0 @@
# Dependencies
/node_modules
# Production
/build
# Generated files
.docusaurus
.cache-loader
# Misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*

View File

@@ -1,39 +0,0 @@
FROM node:18-alpine AS builder
WORKDIR /app
# Install curl for fetching OpenAPI spec
RUN apk add --no-cache curl
# Copy package files
COPY package*.json ./
# Install dependencies
RUN npm ci
# Copy source
COPY . .
# Fetch OpenAPI spec from production API
ARG OPENAPI_URL=https://api-reflector.monadical.com/openapi.json
RUN mkdir -p ./static && curl -sf "${OPENAPI_URL}" -o ./static/openapi.json || echo '{}' > ./static/openapi.json
# Fix docusaurus config: change onBrokenLinks to 'warn' for Docker build
RUN sed -i "s/onBrokenLinks: 'throw'/onBrokenLinks: 'warn'/g" docusaurus.config.ts
# Build static site (skip prebuild hook by calling docusaurus directly)
RUN npx docusaurus build
# Production image
FROM nginx:alpine
# Copy built static files
COPY --from=builder /app/build /usr/share/nginx/html
# Healthcheck for container orchestration
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD wget --no-verbose --tries=1 --spider http://localhost/ || exit 1
# Expose port
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]

View File

@@ -1,41 +0,0 @@
# Website
This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator.
### Installation
```
$ yarn
```
### Local Development
```
$ yarn start
```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
### Build
```
$ yarn build
```
This command generates static content into the `build` directory and can be served using any static contents hosting service.
### Deployment
Using SSH:
```
$ USE_SSH=true yarn deploy
```
Not using SSH:
```
$ GIT_USER=<Your GitHub username> yarn deploy
```
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.

View File

@@ -1,48 +0,0 @@
# Documentation TODO - PR #778 Review Comments
Remaining items from Tito's review. See CHANGES.md for completed items.
---
## Remaining Items
| File | Issue | Priority | Notes |
|------|-------|----------|-------|
| ~~`intro.md:10`~~ | ~~Add screenshots~~ | ~~Low~~ | ✅ **DONE** - Added transcript view screenshot |
| `file-pipeline.md:47` | chunk_size example shows 30s | Low | Unclear what example config should show (~16s actual) |
| ~~`self-hosted-gpu-setup.md:235`~~ | ~~systemd template in repo~~ | ~~Medium~~ | ✅ **REMOVED** - Systemd support removed entirely |
| ~~`installation/overview.md:85`~~ | ~~uv tool install~~ | ~~Low~~ | ✅ **DONE** - Changed to `uv tool install modal` |
| ~~`installation/overview.md:101`~~ | ~~"Why systemd?"~~ | ~~Low~~ | ✅ **REMOVED** - Systemd support removed entirely |
| `installation/overview.md:271` | Caddyfile copy removal | Low | Keeping for clarity |
---
## Skipped (Decided Not To Fix)
| File | Issue | Reason |
|------|-------|--------|
| `installation/overview.md:40` | Model size requirements | Uncertain about exact requirements |
| `installation/overview.md:136` | WebRTC ports | Handled by Daily/Whereby, not us |
| `installation/overview.md:136` | Security section | Risk of incomplete/misleading docs |
| `installation/overview.md:179` | AWS setup order | Low priority, works as-is |
| `installation/overview.md:410` | Redundant next steps | Issue doesn't exist (file ends at 401) |
---
## Completed
See CHANGES.md for full list. Summary:
### Removals (9)
- Encrypted data storage, session management, analytics claims
- "coming soon" GPU, 30-second segments, CPU optimization
- Encryption at rest, manual migrations, modprobe commands
### Fixes (9)
- WebRTC + Daily/Whereby, 4 API endpoints, Docker docs link
- NVIDIA steps merged, compose.yml referenced, cross-reference duplicate
- tee→nano, MOV format, troubleshooting link
### Previously Fixed (7)
- Blog removal, Daily.co added, rate limiting removed (x2)
- PII claim removed, python→yaml, LUFS removed

View File

@@ -1,777 +0,0 @@
#!/bin/bash
# Create directory structure
mkdir -p docs/concepts
mkdir -p docs/installation
mkdir -p docs/pipelines
mkdir -p docs/reference/architecture
mkdir -p docs/reference/processors
mkdir -p docs/reference/api
# Create all documentation files with content
echo "Creating documentation files..."
# Concepts - Modes
cat > docs/concepts/modes.md << 'EOF'
---
sidebar_position: 2
title: Operating Modes
---
# Operating Modes
Reflector operates in two distinct modes to accommodate different use cases and security requirements.
## Public Mode
Public mode provides immediate access to core transcription features without requiring authentication.
### Features Available
- **File Upload**: Process audio files up to 2GB
- **Live Transcription**: Stream audio from microphone
- **Basic Processing**: Transcription and diarization
- **Temporary Storage**: Results available for 24 hours
### Limitations
- No persistent storage
- No meeting rooms
- Limited to single-user sessions
- No team collaboration features
### Use Cases
- Quick transcription needs
- Testing and evaluation
- Individual users
- Public demonstrations
## Private Mode
Private mode unlocks the full potential of Reflector with authentication and persistent storage.
### Additional Features
- **Virtual Meeting Rooms**: Whereby integration
- **Team Collaboration**: Share transcripts with team
- **Persistent Storage**: Long-term transcript archive
- **Advanced Analytics**: Meeting insights and trends
- **Custom Integration**: Webhooks and API access
- **User Management**: Role-based access control
### Authentication Options
#### Authentik Integration
Enterprise-grade SSO with support for:
- SAML 2.0
- OAuth 2.0 / OIDC
- LDAP / Active Directory
- Multi-factor authentication
#### JWT Authentication
Stateless token-based auth for:
- API access
- Service-to-service communication
- Mobile applications
### Room Management
Virtual rooms provide dedicated spaces for meetings:
- **Persistent URLs**: Same link for recurring meetings
- **Access Control**: Invite-only or open rooms
- **Recording Consent**: Automatic consent management
- **Custom Settings**: Per-room configuration
## Mode Selection
The mode is determined by your deployment configuration:
```yaml
# Public Mode (no authentication)
REFLECTOR_AUTH_BACKEND=none
# Private Mode (with authentication)
REFLECTOR_AUTH_BACKEND=jwt
# or
REFLECTOR_AUTH_BACKEND=authentik
```
## Feature Comparison
| Feature | Public Mode | Private Mode |
|---------|------------|--------------|
| File Upload | ✅ | ✅ |
| Live Transcription | ✅ | ✅ |
| Speaker Diarization | ✅ | ✅ |
| Translation | ✅ | ✅ |
| Summarization | ✅ | ✅ |
| Meeting Rooms | ❌ | ✅ |
| Persistent Storage | ❌ | ✅ |
| Team Collaboration | ❌ | ✅ |
| API Access | Limited | Full |
| User Management | ❌ | ✅ |
| Custom Branding | ❌ | ✅ |
| Analytics | ❌ | ✅ |
| Webhooks | ❌ | ✅ |
## Security Considerations
### Public Mode Security
- Rate limiting to prevent abuse
- File size restrictions
- Automatic cleanup of old data
- No PII storage
### Private Mode Security
- Encrypted data storage
- Audit logging
- Session management
- Access control lists
- Data retention policies
## Choosing the Right Mode
### Choose Public Mode if:
- You need quick, one-time transcriptions
- You're evaluating Reflector
- You don't need persistent storage
- You're processing non-sensitive content
### Choose Private Mode if:
- You need team collaboration
- You require persistent storage
- You're processing sensitive content
- You need meeting room functionality
- You want advanced analytics
EOF
# Concepts - Independence
cat > docs/concepts/independence.md << 'EOF'
---
sidebar_position: 3
title: Data Independence
---
# Data Independence & Privacy
Reflector is designed with privacy and data independence as core principles, giving you complete control over your data and processing.
## Privacy by Design
### No Third-Party Data Sharing
Your audio and transcripts are never shared with third parties:
- **Local Processing**: All ML models can run on your infrastructure
- **No Training on User Data**: Your content is never used to improve models
- **Isolated Processing**: Each transcript is processed in isolation
- **No Analytics Tracking**: No usage analytics sent to external services
### Data Ownership
You maintain complete ownership of all data:
- **Export Anytime**: Download all your transcripts and audio
- **Delete on Demand**: Permanent deletion with no recovery
- **API Access**: Full programmatic access to your data
- **No Vendor Lock-in**: Standard formats for easy migration
## Processing Transparency
### What Happens to Your Audio
1. **Upload/Stream**: Audio received by your server
2. **Temporary Storage**: Stored only for processing duration
3. **Processing**: ML models process audio locally or on Modal
4. **Results Storage**: Transcripts stored in your database
5. **Cleanup**: Original audio deleted (unless configured otherwise)
### Local vs Cloud Processing
#### Local Processing
When configured for local processing:
- All models run on your hardware
- No data leaves your infrastructure
- Complete air-gap capability
- Higher hardware requirements
#### Modal.com Processing
When using Modal for GPU acceleration:
- Audio chunks sent to Modal for processing
- Processed immediately and deleted
- No long-term storage on Modal
- Modal's security: SOC 2 Type II compliant
### Data Retention
Default retention policies:
- **Public Mode**: 24 hours then automatic deletion
- **Private Mode**: Configurable (default: indefinite)
- **Audio Files**: Deleted after processing (configurable)
- **Transcripts**: Retained based on policy
## Compliance Features
### GDPR Compliance
- **Right to Access**: Export all user data
- **Right to Deletion**: Permanent data removal
- **Data Portability**: Standard export formats
- **Privacy by Default**: Minimal data collection
### HIPAA Considerations
For healthcare deployments:
- **Self-hosted Option**: Complete infrastructure control
- **Encryption**: At rest and in transit
- **Audit Logging**: Complete access trail
- **Access Controls**: Role-based permissions
### Industry Standards
- **TLS 1.3**: Modern encryption for data in transit
- **AES-256**: Encryption for data at rest
- **JWT Tokens**: Secure, stateless authentication
- **OWASP Guidelines**: Security best practices
## Self-Hosted Deployment
### Complete Independence
Self-hosting provides maximum control:
- **Your Infrastructure**: Run on your servers
- **Your Network**: No external connections required
- **Your Policies**: Implement custom retention
- **Your Compliance**: Meet specific requirements
### Air-Gap Capability
Reflector can run completely offline:
1. Download all models during setup
2. Configure for local processing only
3. Disable all external integrations
4. Run in isolated network environment
## Data Flow Control
### Configurable Processing
Control where each step happens:
```yaml
# All local processing
TRANSCRIPT_BACKEND=local
DIARIZATION_BACKEND=local
TRANSLATION_BACKEND=local
# Hybrid approach
TRANSCRIPT_BACKEND=modal # Fast GPU processing
DIARIZATION_BACKEND=local # Sensitive speaker data
TRANSLATION_BACKEND=modal # Non-sensitive translation
```
### Storage Options
Choose where data is stored:
- **Local Filesystem**: Complete control
- **PostgreSQL**: Self-hosted database
- **S3-Compatible**: MinIO or AWS with encryption
- **Hybrid**: Different storage for different data types
## Security Architecture
### Defense in Depth
Multiple layers of security:
1. **Network Security**: Firewalls and VPNs
2. **Application Security**: Input validation and sanitization
3. **Data Security**: Encryption and access controls
4. **Operational Security**: Logging and monitoring
### Zero Trust Principles
- **Verify Everything**: All requests authenticated
- **Least Privilege**: Minimal permissions granted
- **Assume Breach**: Design for compromise containment
- **Encrypt Everything**: No plaintext transmission
## Audit and Compliance
### Audit Logging
Comprehensive logging of:
- **Access Events**: Who accessed what and when
- **Processing Events**: What was processed and how
- **Configuration Changes**: System modifications
- **Security Events**: Failed authentication attempts
### Compliance Reporting
Generate reports for:
- **Data Processing**: What data was processed
- **Data Access**: Who accessed the data
- **Data Retention**: What was retained or deleted
- **Security Events**: Security-related incidents
## Best Practices
### For Maximum Privacy
1. **Self-host** all components
2. **Use local processing** for all models
3. **Implement short retention** periods
4. **Encrypt all storage** at rest
5. **Use VPN** for all connections
6. **Regular audits** of access logs
### For Balanced Approach
1. **Self-host core services** (database, API)
2. **Use Modal for processing** (faster, cost-effective)
3. **Implement encryption** everywhere
4. **Regular backups** with encryption
5. **Monitor access** patterns
EOF
# Concepts - Pipeline
cat > docs/concepts/pipeline.md << 'EOF'
---
sidebar_position: 4
title: Processing Pipeline
---
# Processing Pipeline
Reflector uses a sophisticated pipeline architecture to process audio efficiently and accurately.
## Pipeline Overview
The processing pipeline consists of modular components that can be combined and configured based on your needs:
```mermaid
graph LR
A[Audio Input] --> B[Pre-processing]
B --> C[Chunking]
C --> D[Transcription]
D --> E[Diarization]
E --> F[Alignment]
F --> G[Post-processing]
G --> H[Output]
```
## Pipeline Components
### Audio Input
Accepts various input sources:
- **File Upload**: MP3, WAV, M4A, WebM, MP4
- **WebRTC Stream**: Live browser audio
- **Recording Integration**: Whereby recordings
- **API Upload**: Direct API submission
### Pre-processing
Prepares audio for optimal processing:
- **Format Conversion**: Convert to 16kHz mono WAV
- **Normalization**: Adjust volume to -23 LUFS
- **Noise Reduction**: Optional background noise removal
- **Validation**: Check duration and quality
### Chunking
Splits audio for parallel processing:
- **Fixed Size**: 30-second chunks by default
- **Overlap**: 1-second overlap for continuity
- **Smart Boundaries**: Attempt to split at silence
- **Metadata**: Track chunk positions
### Transcription
Converts speech to text:
- **Model Selection**: Whisper or Parakeet
- **Language Detection**: Automatic or specified
- **Timestamp Generation**: Word-level timing
- **Confidence Scores**: Quality indicators
### Diarization
Identifies different speakers:
- **Voice Activity Detection**: Find speech segments
- **Speaker Embedding**: Extract voice characteristics
- **Clustering**: Group similar voices
- **Label Assignment**: Assign speaker IDs
### Alignment
Merges all processing results:
- **Chunk Assembly**: Combine transcription chunks
- **Speaker Mapping**: Align speakers with text
- **Overlap Resolution**: Handle chunk boundaries
- **Timeline Creation**: Build unified timeline
### Post-processing
Enhances the final output:
- **Formatting**: Apply punctuation and capitalization
- **Translation**: Convert to target languages
- **Summarization**: Generate concise summaries
- **Topic Extraction**: Identify key themes
- **Action Items**: Extract tasks and decisions
## Processing Modes
### Batch Processing
For uploaded files:
- Optimized for throughput
- Parallel chunk processing
- Higher accuracy models
- Complete file analysis
### Stream Processing
For live audio:
- Optimized for latency
- Sequential processing
- Real-time feedback
- Progressive results
### Hybrid Processing
For meetings:
- Stream during meeting
- Batch after completion
- Best of both modes
- Maximum accuracy
## Pipeline Configuration
### Model Selection
Choose models based on requirements:
```python
# High accuracy (slower)
config = {
"transcription_model": "whisper-large-v3",
"diarization_model": "pyannote-3.1",
"translation_model": "seamless-m4t-large"
}
# Balanced (default)
config = {
"transcription_model": "whisper-base",
"diarization_model": "pyannote-3.1",
"translation_model": "seamless-m4t-medium"
}
# Fast processing
config = {
"transcription_model": "whisper-tiny",
"diarization_model": "pyannote-3.1-fast",
"translation_model": "seamless-m4t-small"
}
```
### Processing Options
Customize pipeline behavior:
```yaml
# Parallel processing
max_parallel_chunks: 10
chunk_size_seconds: 30
chunk_overlap_seconds: 1
# Quality settings
enable_noise_reduction: true
enable_normalization: true
min_speech_confidence: 0.5
# Post-processing
enable_translation: true
target_languages: ["es", "fr", "de"]
enable_summarization: true
summary_length: "medium"
```
## Performance Characteristics
### Processing Times
For 1 hour of audio:
| Pipeline Config | Processing Time | Accuracy |
|----------------|-----------------|----------|
| Fast | 2-3 minutes | 85-90% |
| Balanced | 5-8 minutes | 92-95% |
| High Accuracy | 15-20 minutes | 95-98% |
### Resource Usage
| Component | CPU Usage | Memory | GPU |
|-----------|-----------|---------|-----|
| Transcription | Medium | 2-4 GB | Required |
| Diarization | High | 4-8 GB | Required |
| Translation | Low | 2-3 GB | Optional |
| Post-processing | Low | 1-2 GB | Not needed |
## Pipeline Orchestration
### Celery Task Chain
The pipeline is orchestrated using Celery:
```python
chain = (
chunk_audio.s(audio_id) |
group(transcribe_chunk.s(chunk) for chunk in chunks) |
merge_transcriptions.s() |
diarize_audio.s() |
align_speakers.s() |
post_process.s()
)
```
### Error Handling
Robust error recovery:
- **Automatic Retry**: Failed tasks retry up to 3 times
- **Partial Recovery**: Continue with successful chunks
- **Fallback Models**: Use alternative models on failure
- **Error Reporting**: Detailed error messages
### Progress Tracking
Real-time progress updates:
- **Chunk Progress**: Track individual chunk processing
- **Overall Progress**: Percentage completion
- **ETA Calculation**: Estimated completion time
- **WebSocket Updates**: Live progress to clients
## Optimization Strategies
### GPU Utilization
Maximize GPU efficiency:
- **Batch Processing**: Process multiple chunks together
- **Model Caching**: Keep models loaded in memory
- **Dynamic Batching**: Adjust batch size based on GPU memory
- **Multi-GPU Support**: Distribute across available GPUs
### Memory Management
Efficient memory usage:
- **Streaming Processing**: Process large files in chunks
- **Garbage Collection**: Clean up after each chunk
- **Memory Limits**: Prevent out-of-memory errors
- **Disk Caching**: Use disk for large intermediate results
### Network Optimization
Minimize network overhead:
- **Compression**: Compress audio before transfer
- **CDN Integration**: Use CDN for static assets
- **Connection Pooling**: Reuse network connections
- **Parallel Uploads**: Multiple concurrent uploads
## Quality Assurance
### Accuracy Metrics
Monitor processing quality:
- **Word Error Rate (WER)**: Transcription accuracy
- **Diarization Error Rate (DER)**: Speaker identification accuracy
- **Translation BLEU Score**: Translation quality
- **Summary Coherence**: Summary quality metrics
### Validation Steps
Ensure output quality:
- **Confidence Thresholds**: Filter low-confidence segments
- **Consistency Checks**: Verify timeline consistency
- **Language Validation**: Ensure correct language detection
- **Format Validation**: Check output format compliance
## Advanced Features
### Custom Models
Use your own models:
- **Fine-tuned Whisper**: Domain-specific models
- **Custom Diarization**: Trained on your speakers
- **Specialized Post-processing**: Industry-specific formatting
### Pipeline Extensions
Add custom processing steps:
- **Sentiment Analysis**: Analyze emotional tone
- **Entity Extraction**: Identify people, places, organizations
- **Custom Metrics**: Calculate domain-specific metrics
- **Integration Hooks**: Call external services
EOF
# Create installation documentation
cat > docs/installation/overview.md << 'EOF'
---
sidebar_position: 1
title: Installation Overview
---
# Installation Overview
Reflector is designed for self-hosted deployment, giving you complete control over your infrastructure and data.
## Deployment Options
### Docker Deployment (Recommended)
The easiest way to deploy Reflector:
- Pre-configured containers
- Automated dependency management
- Consistent environment
- Easy updates
### Manual Installation
For custom deployments:
- Greater control over configuration
- Integration with existing infrastructure
- Custom optimization options
- Development environments
## Requirements
### System Requirements
**Minimum Requirements:**
- CPU: 4 cores
- RAM: 8 GB
- Storage: 50 GB
- OS: Ubuntu 20.04+ or similar Linux
**Recommended Requirements:**
- CPU: 8+ cores
- RAM: 16 GB
- Storage: 100 GB SSD
- GPU: NVIDIA GPU with 8GB+ VRAM (for local processing)
### Network Requirements
- Public IP address (for WebRTC)
- Ports: 80, 443, 8000, 3000
- Domain name (for SSL)
- SSL certificate (Let's Encrypt supported)
## Required Services
### Core Services
These services are required for basic operation:
1. **PostgreSQL** - Primary database
2. **Redis** - Message broker and cache
3. **Docker** - Container runtime
### GPU Processing
Choose one:
- **Modal.com** - Serverless GPU (recommended)
- **Local GPU** - Self-hosted GPU processing
### Optional Services
Enhance functionality with:
- **AWS S3** - Long-term storage
- **Whereby** - Video conferencing rooms
- **Authentik** - Enterprise authentication
- **Zulip** - Chat integration
## Quick Start
### Using Docker Compose
1. Clone the repository:
```bash
git clone https://github.com/monadical-sas/reflector.git
cd reflector
```
2. Navigate to docker directory:
```bash
cd docker
```
3. Copy and configure environment:
```bash
cp .env.example .env
# Edit .env with your settings
```
4. Start services:
```bash
docker compose up -d
```
5. Access Reflector:
- Frontend: https://your-domain.com
- API: https://your-domain.com/api
## Configuration Overview
### Essential Configuration
```env
# Database
DATABASE_URL=postgresql://user:pass@localhost/reflector
# Redis
REDIS_URL=redis://localhost:6379
# Modal.com (for GPU processing)
TRANSCRIPT_MODAL_API_KEY=your-key
DIARIZATION_MODAL_API_KEY=your-key
# Domain
DOMAIN=your-domain.com
```
### Security Configuration
```env
# Authentication
REFLECTOR_AUTH_BACKEND=jwt
NEXTAUTH_SECRET=generate-strong-secret
# SSL (handled by Caddy)
# Automatic with Let's Encrypt
```
## Service Architecture
```mermaid
graph TD
A[Caddy Reverse Proxy] --> B[Frontend - Next.js]
A --> C[Backend - FastAPI]
C --> D[PostgreSQL]
C --> E[Redis]
C --> F[Celery Workers]
F --> G[Modal.com GPU]
```
## Next Steps
1. **Review Requirements**: [System Requirements](./requirements)
2. **Docker Setup**: [Docker Deployment Guide](./docker-setup)
3. **Configure Services**:
- [Modal.com Setup](./modal-setup)
- [Whereby Setup](./whereby-setup)
- [AWS S3 Setup](./aws-setup)
4. **Optional Services**:
- [Authentik Setup](./authentik-setup)
- [Zulip Setup](./zulip-setup)
## Getting Help
- [Troubleshooting Guide](../reference/troubleshooting)
- [GitHub Issues](https://github.com/monadical-sas/reflector/issues)
- [Community Discord](#)
EOF
chmod +x create-docs.sh
echo "Documentation creation script ready. Run ./create-docs.sh to generate all docs."

View File

@@ -1,115 +0,0 @@
---
sidebar_position: 2
title: Operating Modes
---
# Operating Modes
Reflector operates in two distinct modes to accommodate different use cases and security requirements.
## Public Mode
Public mode provides immediate access to core transcription features without requiring authentication.
### Features Available
- **File Upload**: Process audio files
- **Live Transcription**: Stream audio from microphone
- **Basic Processing**: Transcription and diarization
- **Temporary Storage**: Temporary data retention (configurable)
### Limitations
- No persistent storage
- No meeting rooms
- Limited to single-user sessions
- No team collaboration features
### Use Cases
- Quick transcription needs
- Testing and evaluation
- Individual users
- Public demonstrations
## Private Mode
Private mode unlocks the full potential of Reflector with authentication and persistent storage.
### Additional Features
- **Virtual Meeting Rooms**: Whereby and Daily.co integration
- **Team Collaboration**: Share transcripts with team
- **Persistent Storage**: Long-term transcript archive
- **Meeting History**: Search and browse past transcripts
- **Custom Integration**: Webhooks and API access
- **User Management**: Role-based access control
### Authentication Options
#### Authentik Integration
Enterprise-grade SSO with support for:
- SAML 2.0
- OAuth 2.0 / OIDC
- LDAP / Active Directory
- Multi-factor authentication
### Room Management
Virtual rooms provide dedicated spaces for meetings:
- **Persistent URLs**: Same link for recurring meetings
- **Access Control**: Invite-only or open rooms
- **Recording Consent**: Automatic consent management
- **Custom Settings**: Per-room configuration
## Mode Selection
The mode is determined by your deployment configuration:
```yaml
# Public Mode (no authentication)
AUTH_BACKEND=none
# Private Mode (with authentication)
AUTH_BACKEND=jwt
```
See [Authentication Setup](../installation/auth-setup) for configuring JWT authentication.
## Feature Comparison
| Feature | Public Mode | Private Mode |
|---------|------------|--------------|
| File Upload | ✅ | ✅ |
| Live Transcription | ✅ | ✅ |
| Speaker Diarization | ✅ | ✅ |
| Summarization | ✅ | ✅ |
| Meeting Rooms | ❌ | ✅ |
| Persistent Storage | ❌ | ✅ |
| Team Collaboration | ❌ | ✅ |
| API Access | Limited | Full |
| User Management | ❌ | ✅ |
| Custom Branding | ❌ | ✅ |
| Meeting History | ❌ | ✅ |
| Webhooks | ❌ | ✅ |
## Security Considerations
### Public Mode Security
- File size restrictions
- Automatic cleanup of old data
### Private Mode Security
- Access control lists
- Data retention policies
## Choosing the Right Mode
### Choose Public Mode if:
- You need quick, one-time transcriptions
- You're evaluating Reflector
- You don't need persistent storage
- You're processing non-sensitive content
### Choose Private Mode if:
- You need team collaboration
- You require persistent storage
- You're processing sensitive content
- You need meeting room functionality
- You want searchable meeting history

View File

@@ -1,201 +0,0 @@
---
sidebar_position: 1
title: Architecture Overview
---
# Architecture Overview
Reflector is built as a modern, scalable, microservices-based application designed to handle audio processing workloads efficiently while maintaining data privacy and control.
## System Components
### Frontend Application
The user interface is built with **Next.js 15** using the App Router pattern, providing:
- Server-side rendering for optimal performance
- Real-time WebSocket connections for live transcription
- WebRTC support for audio streaming and live meetings (via Daily.co or Whereby)
- Responsive design with Chakra UI components
### Backend API Server
The core API is powered by **FastAPI**, a modern Python framework that provides:
- High-performance async request handling
- Automatic OpenAPI documentation generation
- Type safety with Pydantic models
- WebSocket support for real-time updates
### Processing Pipeline
Audio processing is handled through a modular pipeline architecture:
```
Audio Input → Chunking → Transcription → Diarization → Post-Processing → Storage
```
Each step can run independently and in parallel, allowing for:
- Scalable processing of large files
- Real-time streaming capabilities
- Fault tolerance and retry mechanisms
### Worker Architecture
Background tasks are managed by **Celery** workers with **Redis** as the message broker:
- Distributed task processing
- Priority queues for time-sensitive operations
- Automatic retry on failure
- Progress tracking and notifications
### GPU Acceleration
ML models run on GPU-accelerated infrastructure:
- **Modal.com** for serverless GPU processing
- **Self-hosted GPU** with Docker deployment
- Automatic scaling based on demand
- Cost-effective pay-per-use model
## Data Flow
### Daily.co Meeting Recording Flow
1. **Recording**: Daily.co captures separate audio tracks per participant
2. **Webhook**: Daily.co notifies Reflector when recording is ready
3. **Track Download**: Individual participant tracks fetched from S3
4. **Padding**: Tracks padded with silence based on join time for synchronization
5. **Transcription**: Each track transcribed independently (speaker = track index)
6. **Merge**: Transcriptions sorted by timestamp and combined
7. **Mixdown**: Tracks mixed to single MP3 for playback
8. **Post-Processing**: Topics, title, and summaries generated via LLM
9. **Delivery**: Results stored and user notified via WebSocket
### File Upload Flow
1. **Upload**: User uploads audio file through web interface
2. **Storage**: File stored temporarily
3. **Transcription**: Full file transcribed via Whisper
4. **Diarization**: ML-based speaker identification (Pyannote)
5. **Post-Processing**: Topics, title, summaries
6. **Delivery**: Results stored and user notified
### Live Streaming Flow
1. **WebRTC Connection**: Browser establishes peer connection via Daily.co or Whereby
2. **Audio Capture**: Microphone audio streamed to server
3. **Buffering**: Audio buffered for processing
4. **Real-time Processing**: Segments transcribed as they arrive
5. **WebSocket Updates**: Results streamed back to client
6. **Continuous Assembly**: Full transcript built progressively
## Deployment Architecture
### Container-Based Deployment
All components are containerized for consistent deployment:
```yaml
services:
web: # Next.js application
server: # FastAPI server
worker: # Celery workers
redis: # Message broker
postgres: # Database
caddy: # Reverse proxy
```
### Networking
- **Host Network Mode**: Required for WebRTC/ICE compatibility
- **Caddy Reverse Proxy**: Handles SSL termination and routing
- **WebSocket Upgrade**: Supports real-time connections
## Scalability Considerations
### Horizontal Scaling
- **Stateless Backend**: Multiple API server instances
- **Worker Pools**: Add workers based on queue depth
- **Database Pooling**: Connection management for concurrent access
### Vertical Scaling
- **GPU Workers**: Scale up for faster model inference
- **Memory Optimization**: Efficient audio buffering
## Security Architecture
### Authentication & Authorization
- **JWT Tokens**: Stateless authentication
- **Authentik Integration**: Enterprise SSO support
- **Role-Based Access**: Granular permissions
### Data Protection
- **Encryption in Transit**: TLS for all connections
- **Temporary Storage**: Automatic cleanup of processed files
### Privacy by Design
- **Local Processing**: Option to process entirely on-premises
- **No Training on User Data**: Models are pre-trained
- **Data Isolation**: Multi-tenant data separation
## Integration Points
### External Services
- **Modal.com**: GPU processing
- **AWS S3**: Long-term storage
- **Whereby**: Video conferencing rooms
- **Zulip**: Chat integration (optional)
### APIs and Webhooks
- **RESTful API**: Standard CRUD operations
- **WebSocket API**: Real-time updates
- **Webhook Notifications**: Processing completion events
- **OpenAPI Specification**: Machine-readable API definition
## Performance Optimization
### Caching Strategy
- **Redis Cache**: Frequently accessed data
- **CDN**: Static asset delivery
- **Browser Cache**: Client-side optimization
### Database Optimization
- **Indexed Queries**: Fast search and retrieval
- **Connection Pooling**: Efficient resource usage
- **Query Optimization**: N+1 query prevention
### Processing Optimization
- **Batch Processing**: Efficient GPU utilization
- **Parallel Execution**: Multi-core CPU usage
- **Stream Processing**: Reduced memory footprint
## Monitoring and Observability
### Metrics Collection
- **Application Metrics**: Request rates, response times
- **System Metrics**: CPU, memory, disk usage
- **Business Metrics**: Transcription accuracy, processing times
### Logging
- **Structured Logging**: JSON format for analysis
- **Log Aggregation**: Centralized log management
- **Error Tracking**: Sentry integration
### Health Checks
- **Liveness Probes**: Component availability
- **Readiness Probes**: Service readiness
- **Dependency Checks**: External service status

View File

@@ -1,183 +0,0 @@
---
sidebar_position: 4
title: Processing Pipeline
---
# Processing Pipeline
Reflector uses a modular pipeline architecture to process audio efficiently and accurately.
## Pipeline Overview
The processing pipeline consists of modular components that can be combined and configured based on your needs:
```mermaid
graph LR
A[Audio Input] --> B[Pre-processing]
B --> C[Chunking]
C --> D[Transcription]
D --> E[Diarization]
E --> F[Alignment]
F --> G[Post-processing]
G --> H[Output]
```
## Pipeline Components
### Audio Input
Accepts various input sources:
- **File Upload**: MP3, WAV, M4A, WebM, MP4
- **WebRTC Stream**: Live browser audio
- **Recording Integration**: Daily.co and Whereby recordings
- **API Upload**: Direct API submission
### Pre-processing
Prepares audio for optimal processing:
- **Format Conversion**: Convert to 16kHz mono WAV
- **Noise Reduction**: Optional background noise removal
- **Validation**: Check duration and quality
### Chunking
Splits audio for parallel processing:
- **Configurable Size**: Audio split into processable segments
- **Silence Detection**: Optional splitting at natural pauses
- **Metadata**: Track chunk positions
### Transcription
Converts speech to text:
- **Model Selection**: Whisper or Parakeet
- **Language Detection**: Automatic or specified
- **Timestamp Generation**: Word-level timing
- **Confidence Scores**: Quality indicators
### Diarization
Identifies different speakers:
- **Voice Activity Detection**: Find speech segments
- **Speaker Embedding**: Extract voice characteristics
- **Clustering**: Group similar voices
- **Label Assignment**: Assign speaker IDs
### Alignment
Merges all processing results:
- **Chunk Assembly**: Combine transcription chunks
- **Speaker Mapping**: Align speakers with text
- **Overlap Resolution**: Handle chunk boundaries
- **Timeline Creation**: Build unified timeline
### Post-processing
Enhances the final output:
- **Formatting**: Apply punctuation and capitalization
- **Summarization**: Generate concise summaries
- **Topic Extraction**: Identify key themes
- **Action Items**: Extract tasks and decisions
## Processing Modes
### Batch Processing
For uploaded files:
- Optimized for throughput
- Parallel chunk processing
- Higher accuracy models
- Complete file analysis
### Stream Processing
For live audio:
- Optimized for latency
- Sequential processing
- Real-time feedback
- Progressive results
### Hybrid Processing
For meetings:
- Stream during meeting
- Batch after completion
- Best of both modes
- Maximum accuracy
## Pipeline Orchestration
### Error Handling
Error recovery:
- **Automatic Retry**: Failed tasks retry up to 3 times
- **Partial Recovery**: Continue with successful chunks
- **Fallback Models**: Use alternative models on failure
- **Error Reporting**: Detailed error messages
### Progress Tracking
Real-time progress updates:
- **Chunk Progress**: Track individual chunk processing
- **Overall Progress**: Percentage completion
- **ETA Calculation**: Estimated completion time
- **WebSocket Updates**: Live progress to clients
## Optimization Strategies
### GPU Utilization
Maximize GPU efficiency:
- **Batch Processing**: Process multiple chunks together
- **Model Caching**: Keep models loaded in memory
- **Dynamic Batching**: Adjust batch size based on GPU memory
- **Multi-GPU Support**: Distribute across available GPUs
### Memory Management
Efficient memory usage:
- **Streaming Processing**: Process large files in chunks
- **Garbage Collection**: Clean up after each chunk
- **Memory Limits**: Prevent out-of-memory errors
- **Disk Caching**: Use disk for large intermediate results
### Network Optimization
Minimize network overhead:
- **Compression**: Compress audio before transfer
- **CDN Integration**: Use CDN for static assets
- **Connection Pooling**: Reuse network connections
- **Parallel Uploads**: Multiple concurrent uploads
## Quality Assurance
### Accuracy Metrics
Monitor processing quality:
- **Word Error Rate (WER)**: Transcription accuracy
- **Diarization Error Rate (DER)**: Speaker identification accuracy
- **Summary Coherence**: Summary quality metrics
### Validation Steps
Ensure output quality:
- **Confidence Thresholds**: Filter low-confidence segments
- **Consistency Checks**: Verify timeline consistency
- **Language Validation**: Ensure correct language detection
- **Format Validation**: Check output format compliance
## Advanced Features
### Custom Models
Use your own models:
- **Fine-tuned Whisper**: Domain-specific models
- **Custom Diarization**: Trained on your speakers
- **Specialized Post-processing**: Industry-specific formatting
### Pipeline Extensions
Add custom processing steps:
- **Sentiment Analysis**: Analyze emotional tone
- **Entity Extraction**: Identify people, places, organizations
- **Custom Metrics**: Calculate domain-specific metrics
- **Integration Hooks**: Call external services

View File

@@ -1,285 +0,0 @@
---
sidebar_position: 5
title: Authentication Setup
---
# Authentication Setup
This page covers authentication setup in detail. For the complete deployment guide, see [Deployment Guide](./overview).
Reflector uses [Authentik](https://goauthentik.io/) for OAuth/OIDC authentication. This guide walks you through setting up Authentik and connecting it to Reflector.
The guide simplistically sets Authentic on the same server as Reflector. You can use your own Authentic instance instead.
## Overview
Reflector's authentication flow:
1. User clicks "Sign In" on frontend
2. Frontend redirects to Authentik login page
3. User authenticates with Authentik
4. Authentik redirects back with OAuth tokens
5. Frontend stores tokens, backends verify JWT signature
## Option 1: Self-Hosted Authentik (Same Server)
This setup runs Authentik on the same server as Reflector, with Caddy proxying to both.
### Deploy Authentik
```bash
# Create directory for Authentik
mkdir -p ~/authentik && cd ~/authentik
# Download docker-compose file
curl -O https://goauthentik.io/docker-compose.yml
# Generate secrets and bootstrap credentials
cat > .env << 'EOF'
PG_PASS=$(openssl rand -base64 36 | tr -d '\n')
AUTHENTIK_SECRET_KEY=$(openssl rand -base64 60 | tr -d '\n')
# Privacy-focused choice for self-hosted deployments
AUTHENTIK_ERROR_REPORTING__ENABLED=false
AUTHENTIK_BOOTSTRAP_PASSWORD=YourSecurePassword123
AUTHENTIK_BOOTSTRAP_EMAIL=admin@example.com
EOF
# Start Authentik
sudo docker compose up -d
```
Authentik takes ~2 minutes to run migrations and apply blueprints on first start.
### Connect Authentik to Reflector's Network
If Authentik runs in a separate Docker Compose project, connect it to Reflector's network so Caddy can proxy to it:
```bash
# Wait for Authentik to be healthy
# Connect Authentik server to Reflector's network
sudo docker network connect reflector_default authentik-server-1
```
**Important:** This step must be repeated if you restart Authentik with `docker compose down`. Add it to your deployment scripts or use `docker compose up -d` (which preserves containers) instead of down/up.
### Add Authentik to Caddy
Uncomment the Authentik section in your `Caddyfile` and set your domain:
```bash
nano Caddyfile
```
Uncomment and edit:
```
{$AUTHENTIK_DOMAIN:authentik.example.com} {
reverse_proxy authentik-server-1:9000
}
```
Reload Caddy:
```bash
docker compose -f docker-compose.prod.yml exec caddy caddy reload --config /etc/caddy/Caddyfile
```
### Create OAuth2 Provider in Authentik
**Option A: Automated Setup (Recommended)**
**Location: Reflector server**
Run the setup script from the Reflector repository:
```bash
ssh user@your-server-ip
cd ~/reflector
./scripts/setup-authentik-oauth.sh https://authentik.example.com YourSecurePassword123 https://app.example.com
```
**Important:** The script must be run from the `~/reflector` directory on your server, as it creates files using relative paths.
The script will output the configuration values to add to your `.env` files. Skip to "Update docker-compose.prod.yml".
**Option B: Manual Setup**
1. **Login to Authentik Admin** at `https://authentik.example.com/`
- Username: `akadmin`
- Password: The `AUTHENTIK_BOOTSTRAP_PASSWORD` you set in .env
2. **Create OAuth2 Provider:**
- Go to **Applications > Providers > Create**
- Select **OAuth2/OpenID Provider**
- Configure:
- **Name**: `Reflector`
- **Authorization flow**: `default-provider-authorization-implicit-consent`
- **Client type**: `Confidential`
- **Client ID**: Note this value (auto-generated)
- **Client Secret**: Note this value (auto-generated)
- **Redirect URIs**: Add entry with:
```
https://app.example.com/api/auth/callback/authentik
```
- Scroll down to **Advanced protocol settings**
- In **Scopes**, add these three mappings:
- `authentik default OAuth Mapping: OpenID 'email'`
- `authentik default OAuth Mapping: OpenID 'openid'`
- `authentik default OAuth Mapping: OpenID 'profile'`
- Click **Finish**
3. **Create Application:**
- Go to **Applications > Applications > Create**
- Configure:
- **Name**: `Reflector`
- **Slug**: `reflector` (auto-filled)
- **Provider**: Select the `Reflector` provider you just created
- Click **Create**
### Get Public Key for JWT Verification
**Location: Reflector server**
Extract the public key from Authentik's JWKS endpoint:
```bash
mkdir -p ~/reflector/server/reflector/auth/jwt/keys
curl -s https://authentik.example.com/application/o/reflector/jwks/ | \
jq -r '.keys[0].x5c[0]' | base64 -d | openssl x509 -pubkey -noout \
> ~/reflector/server/reflector/auth/jwt/keys/authentik_public.pem
```
### Update docker-compose.prod.yml
**Location: Reflector server**
**Note:** This step is already done in the current `docker-compose.prod.yml`. Verify the volume mounts exist:
```yaml
server:
image: monadicalsas/reflector-backend:latest
# ... other config ...
volumes:
- server_data:/app/data
- ./server/reflector/auth/jwt/keys:/app/reflector/auth/jwt/keys:ro
worker:
image: monadicalsas/reflector-backend:latest
# ... other config ...
volumes:
- server_data:/app/data
- ./server/reflector/auth/jwt/keys:/app/reflector/auth/jwt/keys:ro
```
### Configure Reflector Backend
**Location: Reflector server**
Update `server/.env`:
```env
# Authentication
AUTH_BACKEND=jwt
AUTH_JWT_PUBLIC_KEY=authentik_public.pem
AUTH_JWT_AUDIENCE=<your-client-id>
CORS_ALLOW_CREDENTIALS=true
```
Replace `<your-client-id>` with the Client ID from previous steps.
### Configure Reflector Frontend
**Location: Reflector server**
Update `www/.env`:
```env
# Authentication
FEATURE_REQUIRE_LOGIN=true
# Authentik OAuth
AUTHENTIK_ISSUER=https://authentik.example.com/application/o/reflector
AUTHENTIK_REFRESH_TOKEN_URL=https://authentik.example.com/application/o/token/
AUTHENTIK_CLIENT_ID=<your-client-id>
AUTHENTIK_CLIENT_SECRET=<your-client-secret>
# NextAuth
NEXTAUTH_SECRET=<generate-with-openssl-rand-hex-32>
```
### Restart Services
**Location: Reflector server**
```bash
cd ~/reflector
sudo docker compose -f docker-compose.prod.yml up -d --force-recreate server worker web
```
### Verify Authentication
1. Visit `https://app.example.com`
2. Click "Log in" or navigate to `/api/auth/signin`
3. Click "Sign in with Authentik"
4. Login with your Authentik credentials
5. You should be redirected back and see "Log out" in the header
## Option 2: Disable Authentication
For testing or internal deployments where authentication isn't needed:
**Backend `server/.env`:**
```env
AUTH_BACKEND=none
```
**Frontend `www/.env`:**
```env
FEATURE_REQUIRE_LOGIN=false
```
**Note:** The pre-built Docker images have `FEATURE_REQUIRE_LOGIN=true` baked in. To disable auth, you'll need to rebuild the frontend image with the env var set at build time, or set up Authentik.
## Troubleshooting
### "Invalid redirect URI" error
- Verify the redirect URI in Authentik matches exactly:
```
https://app.example.com/api/auth/callback/authentik
```
- Check for trailing slashes - they must match exactly
### "Invalid audience" JWT error
- Ensure `AUTH_JWT_AUDIENCE` in `server/.env` matches the Client ID from Authentik
- The audience value is the OAuth Client ID, not the issuer URL
### "JWT verification failed" error
- Verify the public key file is mounted in the container
- Check `AUTH_JWT_PUBLIC_KEY` points to the correct filename
- Ensure the key was extracted from the correct provider's JWKS endpoint
### Caddy returns 503 for Authentik
- Verify Authentik container is connected to Reflector's network:
```bash
sudo docker network connect reflector_default authentik-server-1
```
- Check Authentik is healthy: `cd ~/authentik && sudo docker compose ps`
### Users can't access protected pages
- Verify `FEATURE_REQUIRE_LOGIN=true` in frontend
- Check `AUTH_BACKEND=jwt` in backend
- Verify CORS settings allow credentials
### Token refresh errors
- Ensure Redis is running (frontend uses Redis for token caching)
- Verify `KV_URL` is set correctly in frontend env
- Check `AUTHENTIK_REFRESH_TOKEN_URL` is correct
## API Key Authentication
For programmatic access (scripts, integrations), users can generate API keys:
1. Login to Reflector
2. Go to Settings > API Keys
3. Click "Generate New Key"
4. Use the key in requests:
```bash
curl -H "X-API-Key: your-api-key" https://api.example.com/v1/transcripts
```
API keys are stored hashed and can be revoked at any time.

View File

@@ -1,165 +0,0 @@
---
sidebar_position: 6
title: Daily.co Setup
---
# Daily.co Setup
This page covers Daily.co video platform setup for live meeting rooms. For the complete deployment guide, see [Deployment Guide](./overview).
Daily.co enables live video meetings with automatic recording and transcription.
## What You'll Set Up
```
User joins meeting → Daily.co video room → Recording to S3 → [Webhook] → Reflector transcribes
```
## Prerequisites
- [ ] **Daily.co account** - Free tier at https://dashboard.daily.co
- [ ] **AWS account** - For S3 storage
- [ ] **Reflector deployed** - Complete steps from [Deployment Guide](./overview)
---
## Create Daily.co Account
1. Visit https://dashboard.daily.co and sign up
2. Verify your email
3. Note your subdomain (e.g., `yourname.daily.co` → subdomain is `yourname`)
---
## Get Daily.co API Key
1. In Daily.co dashboard, go to **Developers**
2. Click **API Keys**
3. Click **Create API Key**
4. Copy the key (starts with a long string)
Save this for later.
---
## Create AWS S3 Bucket
Daily.co needs somewhere to store recordings before Reflector processes them.
```bash
# Choose a unique bucket name
BUCKET_NAME="reflector-dailyco-yourname" # -yourname is not a requirement, you can name the bucket as you wish
AWS_REGION="us-east-1"
# Create bucket
aws s3 mb s3://$BUCKET_NAME --region $AWS_REGION
# Enable versioning (required)
aws s3api put-bucket-versioning \
--bucket $BUCKET_NAME \
--versioning-configuration Status=Enabled
```
---
## Create IAM Role for Daily.co
Daily.co needs permission to write recordings to your S3 bucket.
Follow the guide https://docs.daily.co/guides/products/live-streaming-recording/storing-recordings-in-a-custom-s3-bucket
Save the role ARN - you'll need it soon.
It looks like: `arn:aws:iam::123456789012:role/DailyCo`
Shortly, you'll need to set up a role and give this role your s3 bucket access
No additional setup is required from Daily.co settings website side: the app code takes care of letting Daily know where to save the recordings.
---
## Configure Reflector
**Location: Reflector server**
Add to `server/.env`:
```env
# Daily.co Configuration
DEFAULT_VIDEO_PLATFORM=daily
DAILY_API_KEY=<your-api-key-from-daily-setup>
DAILY_SUBDOMAIN=<your-subdomain-from-daily-setup>
# S3 Storage for Daily.co recordings
DAILYCO_STORAGE_AWS_BUCKET_NAME=<your-bucket-from-daily-setup>
DAILYCO_STORAGE_AWS_REGION=us-east-1
DAILYCO_STORAGE_AWS_ROLE_ARN=<your-role-arn-from-daily-setup>
# Transcript storage (should already be configured from main setup)
# TRANSCRIPT_STORAGE_BACKEND=aws
# TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=<your-key>
# TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=<your-secret>
# TRANSCRIPT_STORAGE_AWS_BUCKET_NAME=<your-bucket-name>
# TRANSCRIPT_STORAGE_AWS_REGION=<your-bucket-region>
```
---
## Restart Services
After changing `.env` files, reload with `up -d`:
```bash
sudo docker compose -f docker-compose.prod.yml up -d server worker
```
**Note**: `docker compose up -d` detects env changes and recreates containers automatically.
---
## Test Live Room
1. Visit your Reflector frontend: `https://app.example.com`
2. Go to **Rooms**
3. Click **Create Room**
4. Select **Daily** as the platform
5. Allow camera/microphone access
6. You should see Daily.co video interface
7. Speak for 10-20 seconds
8. Leave the meeting
9. Recording should appear in **Transcripts** within 5 minutes (if webhooks aren't set up yet, see [Webhook Configuration](#webhook-configuration-optional) below)
---
## Troubleshooting
### Recording doesn't appear in S3
1. Check Daily.co dashboard → **Logs** for errors
2. Verify IAM role trust policy has correct Daily.co account ID and your Daily.co subdomain
3. Verify that the bucket has
### Recording in S3 but not transcribed
1. Check webhook is configured (Reflector should auto-create it)
2. Check worker logs:
```bash
docker compose -f docker-compose.prod.yml logs worker --tail 50
```
3. Verify `DAILYCO_STORAGE_AWS_*` vars in `server/.env`
### "Access Denied" when Daily.co tries to write to S3
1. Double-check IAM role ARN in Daily.co settings
2. Verify bucket name matches exactly
3. Check IAM policy has `s3:PutObject` permission
---
## Webhook Configuration [optional]
`manage_daily_webhook.py` script guides you through creating a webhook for Daily recordings.
The webhook isn't required - polling mechanism is the default and performed automatically.
This guide won't go deep into webhook setup.

View File

@@ -1,192 +0,0 @@
---
sidebar_position: 3
title: Docker Reference
---
# Docker Reference
This page documents the Docker Compose configuration for Reflector. For the complete deployment guide, see [Deployment Guide](./overview).
## Services
The `docker-compose.prod.yml` includes these services:
| Service | Image | Purpose |
|---------|-------|---------|
| `web` | `monadicalsas/reflector-frontend` | Next.js frontend |
| `server` | `monadicalsas/reflector-backend` | FastAPI backend |
| `worker` | `monadicalsas/reflector-backend` | Celery worker for background tasks |
| `beat` | `monadicalsas/reflector-backend` | Celery beat scheduler |
| `redis` | `redis:7.2-alpine` | Message broker and cache |
| `postgres` | `postgres:17-alpine` | Primary database |
| `caddy` | `caddy:2-alpine` | Reverse proxy with auto-SSL |
## Environment Files
Reflector uses two separate environment files:
### Backend (`server/.env`)
Used by: `server`, `worker`, `beat`
Key variables:
```env
# Database connection
DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
# Redis
REDIS_HOST=redis
CELERY_BROKER_URL=redis://redis:6379/1
CELERY_RESULT_BACKEND=redis://redis:6379/1
# API domain and CORS
BASE_URL=https://api.example.com
CORS_ORIGIN=https://app.example.com
# Modal GPU processing
TRANSCRIPT_BACKEND=modal
TRANSCRIPT_URL=https://...
TRANSCRIPT_MODAL_API_KEY=...
```
### Frontend (`www/.env`)
Used by: `web`
Key variables:
```env
# Domain configuration
SITE_URL=https://app.example.com
API_URL=https://api.example.com
WEBSOCKET_URL=wss://api.example.com
SERVER_API_URL=http://server:1250
# Authentication
NEXTAUTH_URL=https://app.example.com
NEXTAUTH_SECRET=...
```
Note: `API_URL` is used client-side (browser), `SERVER_API_URL` is used server-side (SSR).
## Volumes
| Volume | Purpose |
|--------|---------|
| `redis_data` | Redis persistence |
| `postgres_data` | PostgreSQL data |
| `server_data` | Uploaded files, local storage |
| `caddy_data` | SSL certificates |
| `caddy_config` | Caddy configuration |
## Network
All services share the default network. The network is marked `attachable: true` to allow external containers (like Authentik) to join.
## Common Commands
### Start all services
```bash
docker compose -f docker-compose.prod.yml up -d
```
### View logs
```bash
# All services
docker compose -f docker-compose.prod.yml logs -f
# Specific service
docker compose -f docker-compose.prod.yml logs server --tail 50
```
### Restart a service
```bash
# Quick restart (doesn't reload .env changes)
docker compose -f docker-compose.prod.yml restart server
# Reload .env and restart
docker compose -f docker-compose.prod.yml up -d server
```
### Run database migrations
```bash
docker compose -f docker-compose.prod.yml exec server uv run alembic upgrade head
```
### Access database
```bash
docker compose -f docker-compose.prod.yml exec postgres psql -U reflector
```
### Pull latest images
```bash
docker compose -f docker-compose.prod.yml pull
docker compose -f docker-compose.prod.yml up -d
```
### Stop all services
```bash
docker compose -f docker-compose.prod.yml down
```
### Full reset (WARNING: deletes data)
```bash
docker compose -f docker-compose.prod.yml down -v
```
## Customization
### Using a different database
To use an external PostgreSQL:
1. Remove `postgres` service from compose file
2. Update `DATABASE_URL` in `server/.env`:
```env
DATABASE_URL=postgresql+asyncpg://user:pass@external-host:5432/reflector
```
### Using external Redis
1. Remove `redis` service from compose file
2. Update Redis settings in `server/.env`:
```env
REDIS_HOST=external-redis-host
CELERY_BROKER_URL=redis://external-redis-host:6379/1
```
### Adding Authentik
To add Authentik for authentication, see [Authentication Setup](./auth-setup). Quick steps:
1. Deploy Authentik separately
2. Connect to Reflector's network:
```bash
docker network connect reflector_default authentik-server-1
```
3. Add to Caddyfile:
```
authentik.example.com {
reverse_proxy authentik-server-1:9000
}
```
## Caddyfile Reference
The Caddyfile supports environment variable substitution:
```
{$FRONTEND_DOMAIN:app.example.com} {
reverse_proxy web:3000
}
{$API_DOMAIN:api.example.com} {
reverse_proxy server:1250
}
```
Set `FRONTEND_DOMAIN` and `API_DOMAIN` environment variables, or edit the file directly.
### Reload Caddy after changes
```bash
docker compose -f docker-compose.prod.yml exec caddy caddy reload --config /etc/caddy/Caddyfile
```

View File

@@ -1,139 +0,0 @@
---
sidebar_position: 10
title: Docs Website Deployment
---
# Docs Website Deployment
This guide covers deploying the Reflector documentation website. **This is optional and intended for internal/experimental use only.**
## Overview
The documentation is built using Docusaurus and deployed as a static nginx-served site.
## Prerequisites
- Reflector already deployed (Steps 1-7 from [Deployment Guide](./overview))
- DNS A record for docs subdomain (e.g., `docs.example.com`)
## Deployment Steps
### Step 1: Pre-fetch OpenAPI Spec
The docs site includes API reference from your running backend. Fetch it before building:
```bash
cd ~/reflector
docker compose -f docker-compose.prod.yml exec server curl -s http://localhost:1250/openapi.json > docs/static/openapi.json
```
This creates `docs/static/openapi.json` (should be ~70KB) which will be copied during Docker build.
**Why not fetch during build?** Docker build containers are network-isolated and can't access the running backend services.
### Step 2: Verify Dockerfile
The Dockerfile is already in `docs/Dockerfile`:
```dockerfile
FROM node:18-alpine AS builder
WORKDIR /app
# Copy package files
COPY package*.json ./
# Inshall dependencies
RUN npm ci
# Copy source (includes static/openapi.json if pre-fetched)
COPY . .
# Fix docusaurus config: change onBrokenLinks to 'warn' for Docker build
RUN sed -i "s/onBrokenLinks: 'throw'/onBrokenLinks: 'warn'/g" docusaurus.config.ts
# Build static site
RUN npx docusaurus build
FROM nginx:alpine
COPY --from=builder /app/build /usr/share/nginx/html
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]
```
### Step 3: Add Docs Service to docker-compose.prod.yml
Add this service to `docker-compose.prod.yml`:
```yaml
docs:
build: ./docs
restart: unless-stopped
networks:
- default
```
### Step 4: Add Caddy Route
Add to `Caddyfile`:
```
{$DOCS_DOMAIN:docs.example.com} {
reverse_proxy docs:80
}
```
### Step 5: Build and Deploy
```bash
cd ~/reflector
docker compose -f docker-compose.prod.yml up -d --build docs
docker compose -f docker-compose.prod.yml exec caddy caddy reload --config /etc/caddy/Caddyfile
```
### Step 6: Verify
```bash
# Check container status
docker compose -f docker-compose.prod.yml ps docs
# Should show "Up"
# Test URL
curl -I https://docs.example.com
# Should return HTTP/2 200
```
Visit `https://docs.example.com` in your browser
## Updating Documentation
When docs are updated:
```bash
cd ~/reflector
git pull
# Refresh OpenAPI spec from backend
docker compose -f docker-compose.prod.yml exec server curl -s http://localhost:1250/openapi.json > docs/static/openapi.json
# Rebuild docs
docker compose -f docker-compose.prod.yml up -d --build docs
```
## Troubleshooting
### Missing openapi.json during build
- Make sure you ran the pre-fetch step first (Step 1)
- Verify `docs/static/openapi.json` exists and is ~70KB
- Re-run: `docker compose exec server curl -s http://localhost:1250/openapi.json > docs/static/openapi.json`
### Build fails with "Docusaurus found broken links"
- This happens if `onBrokenLinks: 'throw'` is set in docusaurus.config.ts
- Solution is already in Dockerfile: uses `sed` to change to `'warn'` during build
### 404 on all pages
- Docusaurus baseUrl might be wrong - should be `/` for custom domain
- Check `docs/docusaurus.config.ts`: `baseUrl: '/'`
### Docs not updating after rebuild
- Force rebuild: `docker compose -f docker-compose.prod.yml build --no-cache docs`
- Then: `docker compose -f docker-compose.prod.yml up -d docs`

View File

@@ -1,171 +0,0 @@
---
sidebar_position: 4
title: Modal.com Setup
---
# Modal.com Setup
This page covers Modal.com GPU setup in detail. For the complete deployment guide, see [Deployment Guide](./overview).
Reflector uses [Modal.com](https://modal.com) for GPU-accelerated audio processing. This guide walks you through deploying the required GPU functions.
## What is Modal.com?
Modal is a serverless GPU platform. You deploy Python code that runs on their GPUs, and pay only for actual compute time. Reflector uses Modal for:
- **Transcription**: Whisper model for speech-to-text
- **Diarization**: Pyannote model for speaker identification
## Prerequisites
1. **Modal.com account** - Sign up at https://modal.com (free tier available)
2. **HuggingFace account** - Required for Pyannote diarization models:
- Create account at https://huggingface.co
- Accept **both** Pyannote licenses:
- https://huggingface.co/pyannote/speaker-diarization-3.1
- https://huggingface.co/pyannote/segmentation-3.0
- Generate access token at https://huggingface.co/settings/tokens
## Deployment
**Location: YOUR LOCAL COMPUTER (laptop/desktop)**
Modal CLI requires browser authentication, so this must run on a machine with a browser - not on a headless server.
### Install Modal CLI
```bash
uv tool install modal
```
### Authenticate with Modal
```bash
modal setup
```
This opens your browser for authentication. Complete the login flow.
### Clone Repository and Deploy
```bash
git clone https://github.com/monadical-sas/reflector.git
cd reflector/gpu/modal_deployments
./deploy-all.sh --hf-token YOUR_HUGGINGFACE_TOKEN
```
Or run interactively (script will prompt for token):
```bash
./deploy-all.sh
```
### What the Script Does
1. **Prompts for HuggingFace token** - Needed to download the Pyannote diarization model
2. **Generates API key** - Creates a secure random key for authenticating requests to GPU functions
3. **Creates Modal secrets**:
- `hf_token` - Your HuggingFace token
- `reflector-gpu` - The generated API key
4. **Deploys GPU functions** - Transcriber (Whisper) and Diarizer (Pyannote)
5. **Outputs configuration** - Prints URLs and API key to console
### Example Output
```
==========================================
Reflector GPU Functions Deployment
==========================================
Generating API key for GPU services...
Creating Modal secrets...
-> Creating secret: hf_token
-> Creating secret: reflector-gpu
Deploying transcriber (Whisper)...
-> https://yourname--reflector-transcriber-web.modal.run
Deploying diarizer (Pyannote)...
-> https://yourname--reflector-diarizer-web.modal.run
==========================================
Deployment complete!
==========================================
Copy these values to your server's server/.env file:
# --- Modal GPU Configuration ---
TRANSCRIPT_BACKEND=modal
TRANSCRIPT_URL=https://yourname--reflector-transcriber-web.modal.run
TRANSCRIPT_MODAL_API_KEY=abc123...
DIARIZATION_BACKEND=modal
DIARIZATION_URL=https://yourname--reflector-diarizer-web.modal.run
DIARIZATION_MODAL_API_KEY=abc123...
# --- End Modal Configuration ---
```
Copy the output and paste it into your `server/.env` file on your server.
## Costs
Modal charges based on GPU compute time:
- Functions scale to zero when not in use (no cost when idle)
- You only pay for actual processing time
- Free tier includes $30/month of credits
Typical costs for audio processing:
- Transcription: ~$0.01-0.05 per minute of audio
- Diarization: ~$0.02-0.10 per minute of audio
## Troubleshooting
### "Modal CLI not installed"
```bash
uv tool install modal
```
### "Not authenticated with Modal"
```bash
modal setup
# Complete browser authentication
```
### "Failed to create secret hf_token"
- Verify your HuggingFace token is valid
- Ensure you've accepted the Pyannote license
- Token needs `read` permission
### Deployment fails
Check the Modal dashboard for detailed error logs:
- Visit https://modal.com/apps
- Click on the failed function
- View build and runtime logs
### Re-running deployment
The script is safe to re-run. It will:
- Update existing secrets if they exist
- Redeploy functions with latest code
- Output new configuration (API key stays the same if secret exists)
## Manual Deployment (Advanced)
If you prefer to deploy functions individually:
```bash
cd gpu/modal_deployments
# Create secrets manually
modal secret create hf_token HF_TOKEN=your-hf-token
modal secret create reflector-gpu REFLECTOR_GPU_APIKEY=$(openssl rand -hex 32)
# Deploy each function
modal deploy reflector_transcriber.py
modal deploy reflector_diarizer.py
```
## Monitoring
View your deployed functions and their usage:
- **Modal Dashboard**: https://modal.com/apps
- **Function logs**: Click on any function to view logs
- **Usage**: View compute time and costs in the dashboard

View File

@@ -1,411 +0,0 @@
---
sidebar_position: 1
title: Deployment Guide
---
# Deployment Guide
This guide walks you through deploying Reflector from scratch. Follow these steps in order.
## What You'll Set Up
```mermaid
flowchart LR
User --> Caddy["Caddy (auto-SSL)"]
Caddy --> Frontend["Frontend (Next.js)"]
Caddy --> Backend["Backend (FastAPI)"]
Backend --> PostgreSQL
Backend --> Redis
Backend --> Workers["Celery Workers"]
Workers --> PostgreSQL
Workers --> Redis
Workers --> GPU["GPU Processing<br/>(Modal.com OR Self-hosted)"]
```
## Prerequisites
Before starting, you need:
- **Production server** - 4+ cores, 8GB+ RAM, public IP
- **Two domain names** - e.g., `app.example.com` (frontend) and `api.example.com` (backend)
- **GPU processing** - Choose one:
- Modal.com account, OR
- GPU server with NVIDIA GPU (8GB+ VRAM)
- **HuggingFace account** - Free at https://huggingface.co
- Accept both Pyannote licenses (required for speaker diarization):
- https://huggingface.co/pyannote/speaker-diarization-3.1
- https://huggingface.co/pyannote/segmentation-3.0
- **LLM API** - For summaries and topic detection. Choose one:
- OpenAI API key at https://platform.openai.com/account/api-keys, OR
- Any OpenAI-compatible endpoint (vLLM, LiteLLM, Ollama, etc.)
- **AWS S3 bucket** - For storing audio files and transcripts (see [S3 Setup](#create-s3-bucket-for-transcript-storage) below)
### Optional (for live meeting rooms)
- [ ] **Daily.co account** - Free tier at https://dashboard.daily.co
- [ ] **AWS S3 bucket + IAM Role** - For Daily.co recording storage (separate from transcript storage)
---
## Configure DNS
```
Type: A Name: app Value: <your-server-ip>
Type: A Name: api Value: <your-server-ip>
```
---
## Deploy GPU Processing
Reflector requires GPU processing for transcription and speaker diarization. Choose one option:
| | **Modal.com (Cloud)** | **Self-Hosted GPU** |
|---|---|---|
| **Best for** | No GPU hardware, zero maintenance | Own GPU server, full control |
| **Pricing** | Pay-per-use | Fixed infrastructure cost |
### Option A: Modal.com (Serverless Cloud GPU)
#### Accept HuggingFace Licenses
Visit both pages and click "Accept":
- https://huggingface.co/pyannote/speaker-diarization-3.1
- https://huggingface.co/pyannote/segmentation-3.0
Generate a token at https://huggingface.co/settings/tokens
#### Deploy to Modal
There's an install script to help with this setup. It's using modal API to set all necessary moving parts.
As an alternative, all those operations that script does could be performed in modal settings in modal UI.
```bash
uv tool install modal
modal setup # opens browser for authentication
git clone https://github.com/monadical-sas/reflector.git
cd reflector/gpu/modal_deployments
./deploy-all.sh --hf-token YOUR_HUGGINGFACE_TOKEN
```
**Save the output** - copy the configuration block, you'll need it soon.
See [Modal Setup](./modal-setup) for troubleshooting and details.
### Option B: Self-Hosted GPU
**Location: YOUR GPU SERVER**
Requires: NVIDIA GPU with 8GB+ VRAM, Ubuntu 22.04+, 40-50GB disk.
See [Self-Hosted GPU Setup](./self-hosted-gpu-setup) for complete instructions. Quick summary:
1. Install NVIDIA drivers and Docker
2. Clone repository: `git clone https://github.com/monadical-sas/reflector.git`
3. Configure `.env` with HuggingFace token
4. Start service with Docker compose
5. Set up Caddy reverse proxy for HTTPS
**Save your API key and HTTPS URL** - you'll need them soon.
---
## Prepare Server
**Location: dedicated reflector server**
### Install Docker
```bash
ssh user@your-server-ip
curl -fsSL https://get.docker.com | sh
sudo usermod -aG docker $USER
# Log out and back in for group changes
exit
ssh user@your-server-ip
docker --version # verify
```
### Firewall
Ensure ports 80 (HTTP) and 443 (HTTPS) are open for inbound traffic. The method varies by cloud provider and OS configuration.
**For live transcription without Daily/Whereby rooms**: WebRTC requires UDP port range 49152-65535 for media traffic.
### Clone Repository
The Docker images contain all application code. You clone the repository for configuration files and the compose definition:
```bash
git clone https://github.com/monadical-sas/reflector.git
cd reflector
```
---
## Create S3 Bucket for Transcript Storage
Reflector requires AWS S3 to store audio files during processing.
### Create Bucket
```bash
# Choose a unique bucket name
BUCKET_NAME="reflector-transcripts-yourname"
AWS_REGION="us-east-1"
# Create bucket
aws s3 mb s3://$BUCKET_NAME --region $AWS_REGION
```
### Create IAM User
Create an IAM user with S3 access for Reflector:
1. Go to AWS IAM Console → Users → Create User
2. Name: `reflector-transcripts`
3. Attach policy: `AmazonS3FullAccess` (or create a custom policy for just your bucket)
4. Create access key (Access key ID + Secret access key)
Save these credentials - you'll need them in the next step.
---
## Configure Environment
Reflector has two env files:
- `server/.env` - Backend configuration
- `www/.env` - Frontend configuration
### Backend Configuration
```bash
cp server/.env.example server/.env
nano server/.env
```
**Required settings:**
```env
# Database (defaults work with docker-compose.prod.yml)
DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
# Redis
REDIS_HOST=redis
CELERY_BROKER_URL=redis://redis:6379/1
CELERY_RESULT_BACKEND=redis://redis:6379/1
# Your domains
BASE_URL=https://api.example.com
CORS_ORIGIN=https://app.example.com
CORS_ALLOW_CREDENTIALS=true
# Secret key - generate with: openssl rand -hex 32
SECRET_KEY=<your-generated-secret>
# GPU Processing - choose ONE option:
# Option A: Modal.com (paste from deploy-all.sh output)
TRANSCRIPT_BACKEND=modal
TRANSCRIPT_URL=https://yourname--reflector-transcriber-web.modal.run
TRANSCRIPT_MODAL_API_KEY=<from-deploy-all.sh-output>
DIARIZATION_BACKEND=modal
DIARIZATION_URL=https://yourname--reflector-diarizer-web.modal.run
DIARIZATION_MODAL_API_KEY=<from-deploy-all.sh-output>
# Option B: Self-hosted GPU (use your GPU server URL and API key)
# TRANSCRIPT_BACKEND=modal
# TRANSCRIPT_URL=https://gpu.example.com
# TRANSCRIPT_MODAL_API_KEY=<your-generated-api-key>
# DIARIZATION_BACKEND=modal
# DIARIZATION_URL=https://gpu.example.com
# DIARIZATION_MODAL_API_KEY=<your-generated-api-key>
# Storage - where to store audio files and transcripts (requires AWS S3)
TRANSCRIPT_STORAGE_BACKEND=aws
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
TRANSCRIPT_STORAGE_AWS_BUCKET_NAME=reflector-media
TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
# LLM - for generating titles, summaries, and topics
LLM_API_KEY=sk-your-openai-api-key
LLM_MODEL=gpt-4o-mini
# LLM_URL=https://api.openai.com/v1 # Optional: custom endpoint (vLLM, LiteLLM, Ollama, etc.)
# Auth - disable for initial setup (see a dedicated step for authentication)
AUTH_BACKEND=none
```
### Frontend Configuration
```bash
cp www/.env.example www/.env
nano www/.env
```
**Required settings:**
```env
# Your domains
SITE_URL=https://app.example.com
API_URL=https://api.example.com
WEBSOCKET_URL=wss://api.example.com
SERVER_API_URL=http://server:1250
# NextAuth
NEXTAUTH_URL=https://app.example.com
NEXTAUTH_SECRET=<generate-with-openssl-rand-hex-32>
# Disable login requirement for initial setup
FEATURE_REQUIRE_LOGIN=false
```
---
## Configure Caddy
```bash
cp Caddyfile.example Caddyfile
nano Caddyfile
```
Replace `example.com` with your domains. The `{$VAR:default}` syntax uses Caddy's env var substitution - you can either edit the file directly or set `FRONTEND_DOMAIN` and `API_DOMAIN` environment variables.
```
{$FRONTEND_DOMAIN:app.example.com} {
reverse_proxy web:3000
}
{$API_DOMAIN:api.example.com} {
reverse_proxy server:1250
}
```
---
## Start Services
```bash
docker compose -f docker-compose.prod.yml up -d
```
Wait for containers to start (first run may take 1-2 minutes to pull images and initialize).
---
## Verify Deployment
### Check services
```bash
docker compose -f docker-compose.prod.yml ps
# All should show "Up"
```
### Test API
```bash
curl https://api.example.com/health
# Should return: {"status":"healthy"}
```
### Test Frontend
- Visit https://app.example.com
- You should see the Reflector interface
- Try uploading an audio file to test transcription
If any verification fails, see [Troubleshooting](#troubleshooting) below.
---
## Enable Authentication (Required for Live Rooms)
By default, Reflector is open (no login required). **Authentication is required if you want to use Live Meeting Rooms.**
See [Authentication Setup](./auth-setup) for full Authentik OAuth configuration.
Quick summary:
1. Deploy Authentik on your server
2. Create OAuth provider in Authentik
3. Extract public key for JWT verification
4. Update `server/.env`: `AUTH_BACKEND=jwt` + `AUTH_JWT_AUDIENCE`
5. Update `www/.env`: `FEATURE_REQUIRE_LOGIN=true` + Authentik credentials
6. Mount JWT keys volume and restart services
---
## Enable Live Meeting Rooms
**Requires: Authentication Step**
Live rooms require Daily.co and AWS S3. See [Daily.co Setup](./daily-setup) for complete S3/IAM configuration instructions.
Note that Reflector also supports Whereby as a call provider - this doc doesn't cover its setup yet.
Quick config - Add to `server/.env`:
```env
DEFAULT_VIDEO_PLATFORM=daily
DAILY_API_KEY=<from-daily.co-dashboard>
DAILY_SUBDOMAIN=<your-daily-subdomain>
# S3 for recording storage
DAILYCO_STORAGE_AWS_BUCKET_NAME=<your-bucket>
DAILYCO_STORAGE_AWS_REGION=us-east-1
DAILYCO_STORAGE_AWS_ROLE_ARN=<arn:aws:iam::ACCOUNT:role/DailyCo>
```
Reload env and restart:
```bash
docker compose -f docker-compose.prod.yml up -d server worker
```
---
## Troubleshooting
### Check logs for errors
```bash
docker compose -f docker-compose.prod.yml logs server --tail 20
docker compose -f docker-compose.prod.yml logs worker --tail 20
```
### Services won't start
```bash
docker compose -f docker-compose.prod.yml logs
```
### CORS errors in browser
- Verify `CORS_ORIGIN` in `server/.env` matches your frontend domain exactly (including `https://`)
- Reload env: `docker compose -f docker-compose.prod.yml up -d server`
### SSL certificate errors
- Caddy auto-provisions Let's Encrypt certificates
- Ensure ports 80 and 443 are open
- Check: `docker compose -f docker-compose.prod.yml logs caddy`
### Transcription not working
- Check Modal dashboard: https://modal.com/apps
- Verify URLs in `server/.env` match deployed functions
- Check worker logs: `docker compose -f docker-compose.prod.yml logs worker`
### "Login required" but auth not configured
- Set `FEATURE_REQUIRE_LOGIN=false` in `www/.env`
- Rebuild frontend: `docker compose -f docker-compose.prod.yml up -d --force-recreate web`
### Database migrations or connectivity issues
Migrations run automatically on server startup. To check database connectivity or debug migration failures:
```bash
# Check server logs for migration errors
docker compose -f docker-compose.prod.yml logs server | grep -i -E "(alembic|migration|database|postgres)"
# Verify database connectivity
docker compose -f docker-compose.prod.yml exec server uv run python -c "from reflector.db import engine; print('DB connected')"
# Manually run migrations (if needed)
docker compose -f docker-compose.prod.yml exec server uv run alembic upgrade head
```

View File

@@ -1,63 +0,0 @@
---
sidebar_position: 2
title: System Requirements
---
# System Requirements
This page lists hardware and software requirements. For the complete deployment guide, see [Deployment Guide](./overview).
## Server Requirements
### Minimum Requirements
- **CPU**: 4 cores
- **RAM**: 8 GB
- **Storage**: 50 GB SSD
- **OS**: Ubuntu 22.04+ or compatible Linux
- **Network**: Public IP address
### Recommended Requirements
- **CPU**: 8+ cores
- **RAM**: 16 GB
- **Storage**: 100 GB SSD
- **Network**: 1 Gbps connection
## Software Requirements
- Docker Engine 20.10+
- Docker Compose 2.0+
## External Services
### Required
- **Two domain names** - One for frontend (e.g., `app.example.com`), one for API (e.g., `api.example.com`)
- **Modal.com account** - For GPU-accelerated transcription and diarization (free tier available)
- **HuggingFace account** - For Pyannote diarization model access
- **LLM API** - For generating summaries and topic detection. Options:
- OpenAI API (https://platform.openai.com/account/api-keys)
- Any OpenAI-compatible endpoint (vLLM, LiteLLM, Ollama)
- Self-hosted: Phi-4 14B 4-bit recommended (~8GB VRAM)
### Required for Live Meeting Rooms
- **Daily.co account** - For video conferencing (free tier available at https://dashboard.daily.co)
- **AWS S3 bucket + IAM Role** - For Daily.co to store recordings
- **Another AWS S3 bucket (optional, can reuse the one above)** - For Reflector to store "compiled" mp3 files and transient diarization process temporary files
### Optional
- **AWS S3** - For cloud storage of recordings and transcripts
- **Authentik** - For SSO/OIDC authentication
- **Sentry** - For error tracking
## Development Requirements
For local development only (not required for production deployment):
- Node.js 22+ (for frontend development)
- Python 3.12+ (for backend development)
- pnpm (for frontend package management)
- uv (for Python package management)

View File

@@ -1,307 +0,0 @@
---
sidebar_position: 5
title: Self-Hosted GPU Setup
---
# Self-Hosted GPU Setup
This guide covers deploying Reflector's GPU processing on your own server instead of Modal.com. For the complete deployment guide, see [Deployment Guide](./overview).
## When to Use Self-Hosted GPU
**Choose self-hosted GPU if you:**
- Have GPU hardware available (NVIDIA required)
- Want full control over processing
- Prefer fixed infrastructure costs over pay-per-use
- Have privacy or data locality requirements
- Need to process audio without external API calls
**Choose Modal.com instead if you:**
- Don't have GPU hardware
- Want zero infrastructure management
- Prefer pay-per-use pricing
- Need instant scaling for variable workloads
See [Modal.com Setup](./modal-setup) for cloud GPU deployment.
## What Gets Deployed
The self-hosted GPU service provides the same API endpoints as Modal:
- `POST /v1/audio/transcriptions` - Whisper transcription
- `POST /v1/audio/transcriptions-from-url` - Transcribe from URL
- `POST /diarize` - Pyannote speaker diarization
- `POST /translate` - Audio translation
Your main Reflector server connects to this service exactly like it connects to Modal - only the URL changes.
## Prerequisites
### Hardware
- **GPU**: NVIDIA GPU with 8GB+ VRAM (tested on Tesla T4 with 15GB)
- **CPU**: 4+ cores recommended
- **RAM**: 8GB minimum, 16GB recommended
- **Disk**: 40-50GB minimum
### Software
- Public IP address
- Domain name with DNS A record pointing to server
### Accounts
- **HuggingFace account** with accepted Pyannote licenses:
- https://huggingface.co/pyannote/speaker-diarization-3.1
- https://huggingface.co/pyannote/segmentation-3.0
- **HuggingFace access token** from https://huggingface.co/settings/tokens
## Docker Deployment
### Step 1: Install NVIDIA Driver
```bash
sudo apt update
sudo apt install -y nvidia-driver-535
sudo reboot
# After reboot, verify installation
nvidia-smi
```
Expected output: GPU details with driver version and CUDA version.
### Step 2: Install Docker
Follow the [official Docker installation guide](https://docs.docker.com/engine/install/ubuntu/) for your distribution.
After installation, add your user to the docker group:
```bash
sudo usermod -aG docker $USER
# Log out and back in for group changes
exit
# SSH back in
```
### Step 3: Install NVIDIA Container Toolkit
```bash
# Add NVIDIA repository and install toolkit
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | \
sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
sudo nvidia-ctk runtime configure --runtime=docker
sudo systemctl restart docker
```
### Step 4: Clone Repository and Configure
```bash
git clone https://github.com/monadical-sas/reflector.git
cd reflector/gpu/self_hosted
# Create environment file
cat > .env << EOF
REFLECTOR_GPU_APIKEY=$(openssl rand -hex 16)
HF_TOKEN=your_huggingface_token_here
EOF
# Note the generated API key - you'll need it for main server config
cat .env
```
### Step 5: Build and Start
The repository includes a `compose.yml` file. Build and start:
```bash
# Build image (takes ~5 minutes, downloads ~10GB)
sudo docker compose build
# Start service
sudo docker compose up -d
# Wait for startup and verify
sleep 30
sudo docker compose logs
```
Look for: `INFO: Application startup complete. Uvicorn running on http://0.0.0.0:8000`
### Step 7: Verify GPU Access
```bash
# Check GPU is accessible from container
sudo docker exec $(sudo docker ps -q) nvidia-smi
```
Should show GPU with ~3GB VRAM used (models loaded).
---
## Configure HTTPS with Caddy
Caddy handles SSL automatically.
### Install Caddy
```bash
sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https curl
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | \
sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | \
sudo tee /etc/apt/sources.list.d/caddy-stable.list
sudo apt update
sudo apt install -y caddy
```
### Configure Reverse Proxy
Edit the Caddyfile with your domain:
```bash
sudo nano /etc/caddy/Caddyfile
```
Add (replace `gpu.example.com` with your domain):
```
gpu.example.com {
reverse_proxy localhost:8000
}
```
Reload Caddy (auto-provisions SSL certificate):
```bash
sudo systemctl reload caddy
```
### Verify HTTPS
```bash
curl -I https://gpu.example.com/docs
# Should return HTTP/2 200
```
---
## Configure Main Reflector Server
On your main Reflector server, update `server/.env`:
```env
# GPU Processing - Self-hosted
TRANSCRIPT_BACKEND=modal
TRANSCRIPT_URL=https://gpu.example.com
TRANSCRIPT_MODAL_API_KEY=<your-generated-api-key>
DIARIZATION_BACKEND=modal
DIARIZATION_URL=https://gpu.example.com
DIARIZATION_MODAL_API_KEY=<your-generated-api-key>
```
**Note:** The backend type is `modal` because the self-hosted GPU service implements the same API contract as Modal.com. This allows you to switch between cloud and self-hosted GPU processing by only changing the URL and API key.
Restart services to apply:
```bash
docker compose -f docker-compose.prod.yml restart server worker
```
---
## Service Management
All commands in this section assume you're in `~/reflector/gpu/self_hosted/`.
```bash
# View logs
sudo docker compose logs -f
# Restart service
sudo docker compose restart
# Stop service
sudo docker compose down
# Check status
sudo docker compose ps
```
### Monitor GPU
```bash
# Check GPU usage
nvidia-smi
# Watch in real-time
watch -n 1 nvidia-smi
```
**Typical GPU memory usage:**
- Idle (models loaded): ~3GB VRAM
- During transcription: ~4-5GB VRAM
---
## Troubleshooting
### nvidia-smi fails after driver install
```bash
# Manually load kernel modules
sudo modprobe nvidia
nvidia-smi
```
### Service fails with "Could not download pyannote pipeline"
1. Verify HF_TOKEN is valid: `echo $HF_TOKEN`
2. Check model access at https://huggingface.co/pyannote/speaker-diarization-3.1
3. Update .env with correct token
4. Restart service: `sudo docker compose restart`
### Cannot connect to HTTPS endpoint
1. Verify DNS resolves: `dig +short gpu.example.com`
2. Check firewall: `sudo ufw status` (ports 80, 443 must be open)
3. Check Caddy: `sudo systemctl status caddy`
4. View Caddy logs: `sudo journalctl -u caddy -n 50`
### SSL certificate not provisioning
Requirements for Let's Encrypt:
- Ports 80 and 443 publicly accessible
- DNS resolves to server's public IP
- Valid domain (not localhost or private IP)
### Docker container won't start
```bash
# Check logs
sudo docker compose logs
# Common issues:
# - Port 8000 already in use
# - GPU not accessible (nvidia-ctk not configured)
# - Missing .env file
```
---
## Updating
```bash
cd ~/reflector/gpu/self_hosted
git pull
sudo docker compose build
sudo docker compose up -d
```

View File

@@ -1,61 +0,0 @@
---
sidebar_position: 1
title: Introduction
---
# Welcome to Reflector
Reflector is a privacy-focused, self-hosted AI-powered audio transcription and meeting analysis platform that provides real-time transcription, speaker diarization, and summarization for audio content and live meetings. With complete control over your data and infrastructure, you can run models on your own hardware (roadmap - currently supports Modal.com for GPU processing).
## What is Reflector?
Reflector is a web application that utilizes AI to process audio content, providing:
- **Real-time Transcription**: Convert speech to text using [Whisper](https://github.com/openai/whisper) (multi-language) or [Parakeet](https://github.com/NVIDIA/NeMo) (English) models
- **Speaker Diarization**: Identify and label different speakers using [Pyannote](https://github.com/pyannote/pyannote-audio) 3.1
- **Topic Detection & Summarization**: Extract key topics and generate concise summaries using LLMs
- **Meeting Recording**: Create permanent records of meetings with searchable transcripts
![Reflector Transcript View](/img/reflector-transcript-view.png)
## Features
| Feature | Public Mode | Private Mode |
|--------------------------------------------|------------|--------------|
| **Authentication** | None required | Required |
| **Audio Upload** | ✅ | ✅ |
| **Live Microphone Streaming** | ✅ | ✅ |
| **Transcription** | ✅ | ✅ |
| **Speaker Diarization** | ✅ | ✅ |
| **Topic Detection** | ✅ | ✅ |
| **Summarization** | ✅ | ✅ |
| **Virtual Meeting Rooms (Whereby, Daily)** | ❌ | ✅ |
| **Browse Transcripts Page** | ❌ | ✅ |
| **Search Functionality** | ❌ | ✅ |
| **Persistent Storage** | ❌ | ✅ |
## Architecture Overview
Reflector consists of three main components:
- **Frontend**: React application built with Next.js
- **Backend**: Python server using FastAPI
- **Processing**: Scalable GPU workers for ML inference (Modal.com or local)
## Getting Started
Ready to deploy Reflector? Head over to our [Installation Guide](./installation/overview) to set up your own instance.
For a quick overview of how Reflector processes audio, check out our [Pipeline Documentation](./pipelines/overview).
## Open Source
Reflector is open source software developed by [Monadical](https://monadical.com) and licensed under the **MIT License**. We welcome contributions from the community!
- [GitHub Repository](https://github.com/monadical-sas/reflector)
- [Issue Tracker](https://github.com/monadical-sas/reflector/issues)
- [Pull Requests](https://github.com/monadical-sas/reflector/pulls)
## Support
Need help? Reach out to the community through GitHub Discussions.

View File

@@ -1,83 +0,0 @@
---
sidebar_position: 2
title: File Processing Pipeline
---
# File Processing Pipeline
The file processing pipeline handles uploaded audio files, optimizing for accuracy and throughput.
## Pipeline Stages
### 1. Input Stage
**Accepted Formats:**
- MP3 (most common)
- WAV (uncompressed)
- M4A (Apple format)
- WebM (browser recordings)
- MP4 (video with audio track)
**File Validation:**
- Sample rate: Any (will be resampled to 16kHz)
### 2. Pre-processing
**Audio Normalization:**
```yaml
# Convert to standard format
- Sample rate: 16kHz (Whisper requirement)
- Channels: Mono
- Bit depth: 16-bit
- Format: WAV
```
**Noise Reduction (Optional):**
- Background noise removal
- Echo cancellation
- High-pass filter for rumble
### 3. Chunking Strategy
Audio is split into segments for processing:
- Configurable chunk sizes
- Optional silence detection for natural breaks
- Parallel processing of chunks
### 4. Transcription Processing
Transcription uses OpenAI Whisper models via Modal.com or self-hosted GPU:
- Automatic language detection
- Word-level timestamps
### 5. Diarization (Speaker Identification)
Speaker diarization uses Pyannote 3.1:
1. **Voice Activity Detection (VAD)** - Identifies speech segments
2. **Speaker Embedding** - Extracts voice characteristics
3. **Clustering** - Groups similar voices
4. **Segmentation** - Assigns speaker labels to time segments
### 6. Alignment & Merging
- Combines transcription with speaker diarization
- Maps speaker labels to transcript segments
- Resolves timing overlaps
- Validates timeline consistency
### 7. Post-processing Chain
- **Text Formatting**: Punctuation, capitalization
- **Topic Detection**: LLM-based topic extraction
- **Summarization**: AI-generated summaries and action items
### 8. Storage & Delivery
**File Storage:**
- Original audio: S3 (optional)
- Transcript exports: JSON, VTT, TXT
**Notifications:**
- WebSocket updates during processing
- Webhook notifications on completion (optional)

View File

@@ -1,28 +0,0 @@
---
title: API Reference
---
# API Reference
The complete API documentation is auto-generated from the OpenAPI specification.
## Interactive Documentation
When running Reflector, interactive API docs are available at:
- **Swagger UI**: `https://your-api-domain/docs`
- **ReDoc**: `https://your-api-domain/redoc`
## OpenAPI Specification
The raw OpenAPI 3.0 specification can be downloaded from:
```
https://your-api-domain/openapi.json
```
A static copy is also available: [openapi.json](/openapi.json)
## Authentication
See [Authentication Setup](../installation/auth-setup) for configuring API authentication.

View File

@@ -1,112 +0,0 @@
---
sidebar_position: 100
title: Roadmap
---
# Product Roadmap
Our development roadmap for Reflector, focusing on expanding capabilities while maintaining privacy and performance.
## Planned Features
### 🌍 Multi-Language Support Enhancement
**Current State:**
- Whisper supports multi-language transcription
- Parakeet supports English only with high accuracy
**Planned Improvements:**
- Default language selection per room/user
- Automatic language detection improvements
- Multi-language diarization support
- RTL (Right-to-Left) language UI support
- Language-specific post-processing rules
### 🏠 Self-Hosted Room Providers
**Jitsi Integration**
Moving beyond Whereby to support self-hosted video conferencing:
- No API keys required
- Complete control over video infrastructure
- Custom branding and configuration
- Lower operational costs
- Enhanced privacy with self-hosted video
**Implementation Plan:**
- WebRTC bridge for Jitsi Meet
- Room management API integration
- Recording synchronization
- Participant tracking
### 📅 Calendar Integration
**Planned Capabilities:**
- Google Calendar synchronization
- Microsoft Outlook integration
- Automatic meeting room creation
- Pre-meeting document preparation
- Post-meeting transcript delivery
- Recurring meeting support
**Features:**
- Auto-join scheduled meetings
- Calendar-based access control
- Meeting agenda import
- Action item export to calendar
## Future Considerations
### Enhanced Analytics
- Meeting insights dashboard
- Speaker participation metrics
- Topic trends over time
- Team collaboration patterns
### Advanced AI Features
- Real-time sentiment analysis
- Emotion detection
- Meeting quality scores
- Automated coaching suggestions
### Integration Ecosystem
- Slack/Teams notifications
- CRM integration (Salesforce, HubSpot)
- Project management tools (Jira, Asana)
- Knowledge bases (Notion, Confluence)
### Performance Improvements
- WebAssembly for client-side processing
- Edge computing support
- 5G network optimization
- Blockchain for transcript verification
## Contributing
We welcome community contributions! Areas where you can help:
1. **Language Support**: Add support for your language
2. **Integrations**: Connect with your favorite tools
3. **Models**: Fine-tune models for specific domains
4. **Documentation**: Improve guides and examples
See our [Contributing Guide](https://github.com/monadical-sas/reflector/blob/main/CONTRIBUTING.md) for details.
## Timeline
We don't provide specific dates as development depends on community contributions and priorities. Features are generally released when they're ready and properly tested.
## Feature Requests
Have an idea for Reflector? We'd love to hear it!
- [Open a GitHub Issue](https://github.com/monadical-sas/reflector/issues/new)
- [Join our Discord](#)
- [Email us](mailto:reflector@monadical.com)
## Stay Updated
- Watch our [GitHub repository](https://github.com/monadical-sas/reflector)
- Follow our [blog](#)
- Subscribe to our [newsletter](#)

View File

@@ -1,163 +0,0 @@
import {themes as prismThemes} from 'prism-react-renderer';
import type {Config} from '@docusaurus/types';
import type * as Preset from '@docusaurus/preset-classic';
import type * as OpenApiPlugin from 'docusaurus-plugin-openapi-docs';
const config: Config = {
title: 'Reflector',
tagline: 'AI-powered audio transcription and meeting analysis platform',
favicon: 'img/favicon.ico',
url: 'https://monadical-sas.github.io',
baseUrl: '/',
organizationName: 'monadical-sas',
projectName: 'reflector',
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
markdown: {
mermaid: true,
},
i18n: {
defaultLocale: 'en',
locales: ['en'],
},
presets: [
[
'classic',
{
docs: {
sidebarPath: './sidebars.ts',
editUrl: 'https://github.com/monadical-sas/reflector/tree/main/docs/',
},
blog: false,
theme: {
customCss: './src/css/custom.css',
},
} satisfies Preset.Options,
],
],
plugins: [
[
'docusaurus-plugin-openapi-docs',
{
id: 'openapi',
docsPluginId: 'classic',
config: {
reflectorapi: {
specPath: 'static/openapi.json', // Use local file fetched by script
outputDir: 'docs/reference/api-generated',
sidebarOptions: {
groupPathsBy: 'tag',
categoryLinkSource: 'tag',
},
downloadUrl: '/openapi.json',
hideSendButton: false,
showExtensions: true,
},
} satisfies OpenApiPlugin.Options,
},
],
],
themes: ['docusaurus-theme-openapi-docs', '@docusaurus/theme-mermaid'],
themeConfig: {
image: 'img/reflector-social-card.jpg',
colorMode: {
defaultMode: 'light',
disableSwitch: false,
respectPrefersColorScheme: true,
},
navbar: {
title: 'Reflector',
logo: {
alt: 'Reflector Logo',
src: 'img/reflector-logo.svg',
},
items: [
{
type: 'docSidebar',
sidebarId: 'tutorialSidebar',
position: 'left',
label: 'Documentation',
},
{
to: '/docs/reference/api',
label: 'API',
position: 'left',
},
{
href: 'https://github.com/monadical-sas/reflector',
label: 'GitHub',
position: 'right',
},
],
},
footer: {
style: 'dark',
links: [
{
title: 'Documentation',
items: [
{
label: 'Introduction',
to: '/docs/intro',
},
{
label: 'Installation',
to: '/docs/installation/overview',
},
{
label: 'API Reference',
to: '/docs/reference/api',
},
],
},
{
title: 'Resources',
items: [
{
label: 'Architecture',
to: '/docs/reference/architecture/overview',
},
{
label: 'Pipelines',
to: '/docs/pipelines/overview',
},
{
label: 'Roadmap',
to: '/docs/roadmap',
},
],
},
{
title: 'More',
items: [
{
label: 'GitHub',
href: 'https://github.com/monadical-sas/reflector',
},
{
label: 'Docker Hub',
href: 'https://hub.docker.com/r/reflector/backend',
},
],
},
],
copyright: `Copyright © ${new Date().getFullYear()} <a href="https://monadical.com" target="_blank" rel="noopener noreferrer">Monadical</a>. Licensed under MIT. Built with Docusaurus.`,
},
prism: {
theme: prismThemes.github,
darkTheme: prismThemes.dracula,
additionalLanguages: ['python', 'bash', 'docker', 'yaml'],
},
} satisfies Preset.ThemeConfig,
};
export default config;

23526
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,53 +0,0 @@
{
"name": "docs",
"version": "0.0.0",
"private": true,
"scripts": {
"docusaurus": "docusaurus",
"start": "docusaurus start",
"build": "docusaurus build",
"swizzle": "docusaurus swizzle",
"deploy": "docusaurus deploy",
"clear": "docusaurus clear",
"serve": "docusaurus serve",
"write-translations": "docusaurus write-translations",
"write-heading-ids": "docusaurus write-heading-ids",
"typecheck": "tsc",
"fetch-openapi": "./scripts/fetch-openapi.sh",
"gen-api-docs": "npm run fetch-openapi && docusaurus gen-api-docs reflector",
"prebuild": "npm run fetch-openapi"
},
"dependencies": {
"@docusaurus/core": "3.6.3",
"@docusaurus/preset-classic": "3.6.3",
"@mdx-js/react": "^3.0.0",
"clsx": "^2.0.0",
"docusaurus-plugin-openapi-docs": "^4.5.1",
"docusaurus-theme-openapi-docs": "^4.5.1",
"@docusaurus/theme-mermaid": "3.6.3",
"prism-react-renderer": "^2.3.0",
"react": "^18.0.0",
"react-dom": "^18.0.0"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "3.6.3",
"@docusaurus/tsconfig": "3.6.3",
"@docusaurus/types": "3.6.3",
"typescript": "~5.6.2"
},
"browserslist": {
"production": [
">0.5%",
"not dead",
"not op_mini all"
],
"development": [
"last 3 chrome version",
"last 3 firefox version",
"last 5 safari version"
]
},
"engines": {
"node": ">=18.0"
}
}

View File

@@ -1,115 +0,0 @@
#!/bin/bash
# Script to fetch OpenAPI specification from FastAPI backend
# Used during documentation build process
set -e
echo "📡 Fetching OpenAPI specification from FastAPI backend..."
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Configuration
BACKEND_DIR="../server"
OPENAPI_OUTPUT="./static/openapi.json"
SERVER_PORT=1250 # Reflector uses port 1250 by default
MAX_WAIT=30
# Check if backend directory exists
if [ ! -d "$BACKEND_DIR" ]; then
echo -e "${RED}Error: Backend directory not found at $BACKEND_DIR${NC}"
exit 1
fi
# Function to check if server is running
check_server() {
curl -s -o /dev/null -w "%{http_code}" "http://localhost:${SERVER_PORT}/openapi.json" 2>/dev/null
}
# Function to cleanup on exit
cleanup() {
if [ ! -z "$SERVER_PID" ]; then
echo -e "\n${YELLOW}Stopping FastAPI server (PID: $SERVER_PID)...${NC}"
kill $SERVER_PID 2>/dev/null || true
wait $SERVER_PID 2>/dev/null || true
fi
}
# Set trap to cleanup on exit
trap cleanup EXIT INT TERM
# Change to backend directory
cd "$BACKEND_DIR"
# Check if uv is installed
if ! command -v uv &> /dev/null; then
echo -e "${YELLOW}uv not found, checking for python...${NC}"
if command -v python3 &> /dev/null; then
PYTHON_CMD="python3"
elif command -v python &> /dev/null; then
PYTHON_CMD="python"
else
echo -e "${RED}Error: Neither uv nor python found${NC}"
exit 1
fi
RUN_CMD="$PYTHON_CMD -m"
else
RUN_CMD="uv run -m"
fi
# Start the FastAPI server in the background (let it use default port 1250)
echo -e "${YELLOW}Starting FastAPI server...${NC}"
$RUN_CMD reflector.app > /dev/null 2>&1 &
SERVER_PID=$!
# Wait for server to be ready
echo -n "Waiting for server to be ready"
WAITED=0
while [ $WAITED -lt $MAX_WAIT ]; do
if [ "$(check_server)" = "200" ]; then
echo -e " ${GREEN}${NC}"
break
fi
echo -n "."
sleep 1
WAITED=$((WAITED + 1))
done
if [ $WAITED -ge $MAX_WAIT ]; then
echo -e " ${RED}${NC}"
echo -e "${RED}Error: Server failed to start within ${MAX_WAIT} seconds${NC}"
exit 1
fi
# Change back to docs directory
cd - > /dev/null
# Create static directory if it doesn't exist
mkdir -p "$(dirname "$OPENAPI_OUTPUT")"
# Fetch the OpenAPI specification
echo -e "${YELLOW}Fetching OpenAPI specification...${NC}"
if curl -s "http://localhost:${SERVER_PORT}/openapi.json" -o "$OPENAPI_OUTPUT"; then
echo -e "${GREEN}✓ OpenAPI specification saved to $OPENAPI_OUTPUT${NC}"
# Validate JSON
if command -v jq &> /dev/null; then
if jq empty "$OPENAPI_OUTPUT" 2>/dev/null; then
echo -e "${GREEN}✓ OpenAPI specification is valid JSON${NC}"
# Pretty print the JSON
jq . "$OPENAPI_OUTPUT" > "${OPENAPI_OUTPUT}.tmp" && mv "${OPENAPI_OUTPUT}.tmp" "$OPENAPI_OUTPUT"
else
echo -e "${RED}Error: Invalid JSON in OpenAPI specification${NC}"
exit 1
fi
fi
else
echo -e "${RED}Error: Failed to fetch OpenAPI specification${NC}"
exit 1
fi
echo -e "${GREEN}✅ OpenAPI specification successfully fetched!${NC}"

View File

@@ -1,58 +0,0 @@
import type {SidebarsConfig} from '@docusaurus/plugin-content-docs';
const sidebars: SidebarsConfig = {
tutorialSidebar: [
'intro',
{
type: 'category',
label: 'Concepts',
collapsed: false,
items: [
'concepts/overview',
'concepts/modes',
'concepts/pipeline',
],
},
{
type: 'category',
label: 'Installation',
collapsed: false,
items: [
'installation/overview',
'installation/requirements',
'installation/docker-setup',
'installation/modal-setup',
'installation/self-hosted-gpu-setup',
'installation/auth-setup',
'installation/daily-setup',
],
},
{
type: 'category',
label: 'Pipelines',
items: [
'pipelines/file-pipeline',
],
},
{
type: 'category',
label: 'Reference',
items: [
{
type: 'category',
label: 'API',
items: [
{
type: 'link',
label: 'OpenAPI Reference',
href: '/docs/reference/api',
},
],
},
],
},
'roadmap',
],
};
export default sidebars;

View File

@@ -1,70 +0,0 @@
import clsx from 'clsx';
import Heading from '@theme/Heading';
import styles from './styles.module.css';
type FeatureItem = {
title: string;
Svg: React.ComponentType<React.ComponentProps<'svg'>>;
description: JSX.Element;
};
const FeatureList: FeatureItem[] = [
{
title: 'Easy to Use',
Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default,
description: (
<>
Docusaurus was designed from the ground up to be easily installed and
used to get your website up and running quickly.
</>
),
},
{
title: 'Focus on What Matters',
Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default,
description: (
<>
Docusaurus lets you focus on your docs, and we&apos;ll do the chores. Go
ahead and move your docs into the <code>docs</code> directory.
</>
),
},
{
title: 'Powered by React',
Svg: require('@site/static/img/undraw_docusaurus_react.svg').default,
description: (
<>
Extend or customize your website layout by reusing React. Docusaurus can
be extended while reusing the same header and footer.
</>
),
},
];
function Feature({title, Svg, description}: FeatureItem) {
return (
<div className={clsx('col col--4')}>
<div className="text--center">
<Svg className={styles.featureSvg} role="img" />
</div>
<div className="text--center padding-horiz--md">
<Heading as="h3">{title}</Heading>
<p>{description}</p>
</div>
</div>
);
}
export default function HomepageFeatures(): JSX.Element {
return (
<section className={styles.features}>
<div className="container">
<div className="row">
{FeatureList.map((props, idx) => (
<Feature key={idx} {...props} />
))}
</div>
</div>
</section>
);
}

View File

@@ -1,11 +0,0 @@
.features {
display: flex;
align-items: center;
padding: 2rem 0;
width: 100%;
}
.featureSvg {
height: 200px;
width: 200px;
}

View File

@@ -1,46 +0,0 @@
/**
* Reflector Documentation Theme
* Based on frontend colors from www/app/styles/theme.ts
*/
@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700&display=swap');
:root {
--ifm-color-primary: #3158E2;
--ifm-color-primary-dark: #2847C9;
--ifm-color-primary-darker: #2442BF;
--ifm-color-primary-darkest: #1D369C;
--ifm-color-primary-light: #4A6FE5;
--ifm-color-primary-lighter: #5F81E8;
--ifm-color-primary-lightest: #8DA6F0;
--ifm-background-color: #FFFFFF;
--ifm-background-surface-color: #F4F4F4;
--ifm-font-color-base: #1A202C;
--ifm-font-color-secondary: #838383;
--ifm-code-font-size: 95%;
--docusaurus-highlighted-code-line-bg: rgba(49, 88, 226, 0.1);
--ifm-font-family-base: 'Poppins', system-ui, -apple-system, sans-serif;
--ifm-font-family-monospace: 'Fira Code', 'Monaco', 'Consolas', monospace;
--ifm-navbar-background-color: #FFFFFF;
--ifm-heading-font-weight: 600;
}
[data-theme='dark'] {
--ifm-color-primary: #B1CBFF;
--ifm-color-primary-dark: #91B3FF;
--ifm-color-primary-darker: #81A7FF;
--ifm-color-primary-darkest: #5189FF;
--ifm-color-primary-light: #D1DFFF;
--ifm-color-primary-lighter: #E1EBFF;
--ifm-color-primary-lightest: #F0F5FF;
--ifm-background-color: #0C0D0E;
--ifm-background-surface-color: #1A202C;
--ifm-font-color-base: #E2E8F0;
--ifm-font-color-secondary: #A0AEC0;
--docusaurus-highlighted-code-line-bg: rgba(177, 203, 255, 0.1);
--ifm-navbar-background-color: #1A202C;
}

View File

@@ -1,23 +0,0 @@
/**
* CSS files with the .module.css suffix will be treated as CSS modules
* and scoped locally.
*/
.heroBanner {
padding: 4rem 0;
text-align: center;
position: relative;
overflow: hidden;
}
@media screen and (max-width: 996px) {
.heroBanner {
padding: 2rem;
}
}
.buttons {
display: flex;
align-items: center;
justify-content: center;
}

View File

@@ -1,7 +0,0 @@
import React from 'react';
import { Redirect } from '@docusaurus/router';
import useBaseUrl from '@docusaurus/useBaseUrl';
export default function Home(): JSX.Element {
return <Redirect to={useBaseUrl('/docs/intro')} />;
}

View File

@@ -1,7 +0,0 @@
---
title: Markdown page example
---
# Markdown page example
You don't need React to write simple standalone pages.

View File

View File

View File

View File

@@ -1,17 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 27.9.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 500 500" style="enable-background:new 0 0 500 500;" xml:space="preserve">
<style type="text/css">
.st0{fill:#B6B6B6;}
.st1{fill:#4A4A4A;}
</style>
<g>
<polygon class="st0" points="227.5,51.5 86.5,150.1 100.8,383.9 244.3,249.8 "/>
<polygon class="st1" points="305.4,421.4 423.9,286 244.3,249.8 100.8,383.9 "/>
</g>
<image style="overflow:visible;" width="1504" height="1128" xlink:href="Ref/original-12843059d855efa50c3a12db8586ced7.jpg" transform="matrix(1 0 0 1 1857.8739 723.9433)">
</image>
<image style="overflow:visible;" width="1504" height="1128" xlink:href="Ref/original-f72ce8039f760337a51b47d045b477b8.jpg" transform="matrix(1 0 0 1 1857.8739 -512.4843)">
</image>
</svg>

Before

Width:  |  Height:  |  Size: 965 B

View File

@@ -1,17 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 27.9.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 500 500" style="enable-background:new 0 0 500 500;" xml:space="preserve">
<style type="text/css">
.st0{fill:#B6B6B6;}
.st1{fill:#4A4A4A;}
</style>
<g>
<polygon class="st0" points="227.5,51.5 86.5,150.1 100.8,383.9 244.3,249.8 "/>
<polygon class="st1" points="305.4,421.4 423.9,286 244.3,249.8 100.8,383.9 "/>
</g>
<image style="overflow:visible;" width="1504" height="1128" xlink:href="Ref/original-12843059d855efa50c3a12db8586ced7.jpg" transform="matrix(1 0 0 1 1857.8739 723.9433)">
</image>
<image style="overflow:visible;" width="1504" height="1128" xlink:href="Ref/original-f72ce8039f760337a51b47d045b477b8.jpg" transform="matrix(1 0 0 1 1857.8739 -512.4843)">
</image>
</svg>

Before

Width:  |  Height:  |  Size: 965 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 525 KiB

View File

@@ -1,171 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="1088" height="687.962" viewBox="0 0 1088 687.962">
<title>Easy to Use</title>
<g id="Group_12" data-name="Group 12" transform="translate(-57 -56)">
<g id="Group_11" data-name="Group 11" transform="translate(57 56)">
<path id="Path_83" data-name="Path 83" d="M1017.81,560.461c-5.27,45.15-16.22,81.4-31.25,110.31-20,38.52-54.21,54.04-84.77,70.28a193.275,193.275,0,0,1-27.46,11.94c-55.61,19.3-117.85,14.18-166.74,3.99a657.282,657.282,0,0,0-104.09-13.16q-14.97-.675-29.97-.67c-15.42.02-293.07,5.29-360.67-131.57-16.69-33.76-28.13-75-32.24-125.27-11.63-142.12,52.29-235.46,134.74-296.47,155.97-115.41,369.76-110.57,523.43,7.88C941.15,276.621,1036.99,396.031,1017.81,560.461Z" transform="translate(-56 -106.019)" fill="#3f3d56"/>
<path id="Path_84" data-name="Path 84" d="M986.56,670.771c-20,38.52-47.21,64.04-77.77,80.28a193.272,193.272,0,0,1-27.46,11.94c-55.61,19.3-117.85,14.18-166.74,3.99a657.3,657.3,0,0,0-104.09-13.16q-14.97-.675-29.97-.67-23.13.03-46.25,1.72c-100.17,7.36-253.82-6.43-321.42-143.29L382,283.981,444.95,445.6l20.09,51.59,55.37-75.98L549,381.981l130.2,149.27,36.8-81.27L970.78,657.9l14.21,11.59Z" transform="translate(-56 -106.019)" fill="#f2f2f2"/>
<path id="Path_85" data-name="Path 85" d="M302,282.962l26-57,36,83-31-60Z" opacity="0.1"/>
<path id="Path_86" data-name="Path 86" d="M610.5,753.821q-14.97-.675-29.97-.67L465.04,497.191Z" transform="translate(-56 -106.019)" opacity="0.1"/>
<path id="Path_87" data-name="Path 87" d="M464.411,315.191,493,292.962l130,150-132-128Z" opacity="0.1"/>
<path id="Path_88" data-name="Path 88" d="M908.79,751.051a193.265,193.265,0,0,1-27.46,11.94L679.2,531.251Z" transform="translate(-56 -106.019)" opacity="0.1"/>
<circle id="Ellipse_11" data-name="Ellipse 11" cx="3" cy="3" r="3" transform="translate(479 98.962)" fill="#f2f2f2"/>
<circle id="Ellipse_12" data-name="Ellipse 12" cx="3" cy="3" r="3" transform="translate(396 201.962)" fill="#f2f2f2"/>
<circle id="Ellipse_13" data-name="Ellipse 13" cx="2" cy="2" r="2" transform="translate(600 220.962)" fill="#f2f2f2"/>
<circle id="Ellipse_14" data-name="Ellipse 14" cx="2" cy="2" r="2" transform="translate(180 265.962)" fill="#f2f2f2"/>
<circle id="Ellipse_15" data-name="Ellipse 15" cx="2" cy="2" r="2" transform="translate(612 96.962)" fill="#f2f2f2"/>
<circle id="Ellipse_16" data-name="Ellipse 16" cx="2" cy="2" r="2" transform="translate(736 192.962)" fill="#f2f2f2"/>
<circle id="Ellipse_17" data-name="Ellipse 17" cx="2" cy="2" r="2" transform="translate(858 344.962)" fill="#f2f2f2"/>
<path id="Path_89" data-name="Path 89" d="M306,121.222h-2.76v-2.76h-1.48v2.76H299V122.7h2.76v2.759h1.48V122.7H306Z" fill="#f2f2f2"/>
<path id="Path_90" data-name="Path 90" d="M848,424.222h-2.76v-2.76h-1.48v2.76H841V425.7h2.76v2.759h1.48V425.7H848Z" fill="#f2f2f2"/>
<path id="Path_91" data-name="Path 91" d="M1144,719.981c0,16.569-243.557,74-544,74s-544-57.431-544-74,243.557,14,544,14S1144,703.413,1144,719.981Z" transform="translate(-56 -106.019)" fill="#3f3d56"/>
<path id="Path_92" data-name="Path 92" d="M1144,719.981c0,16.569-243.557,74-544,74s-544-57.431-544-74,243.557,14,544,14S1144,703.413,1144,719.981Z" transform="translate(-56 -106.019)" opacity="0.1"/>
<ellipse id="Ellipse_18" data-name="Ellipse 18" cx="544" cy="30" rx="544" ry="30" transform="translate(0 583.962)" fill="#3f3d56"/>
<path id="Path_93" data-name="Path 93" d="M624,677.981c0,33.137-14.775,24-33,24s-33,9.137-33-24,33-96,33-96S624,644.844,624,677.981Z" transform="translate(-56 -106.019)" fill="#ff6584"/>
<path id="Path_94" data-name="Path 94" d="M606,690.66c0,15.062-6.716,10.909-15,10.909s-15,4.153-15-10.909,15-43.636,15-43.636S606,675.6,606,690.66Z" transform="translate(-56 -106.019)" opacity="0.1"/>
<rect id="Rectangle_97" data-name="Rectangle 97" width="92" height="18" rx="9" transform="translate(489 604.962)" fill="#2f2e41"/>
<rect id="Rectangle_98" data-name="Rectangle 98" width="92" height="18" rx="9" transform="translate(489 586.962)" fill="#2f2e41"/>
<path id="Path_95" data-name="Path 95" d="M193,596.547c0,55.343,34.719,100.126,77.626,100.126" transform="translate(-56 -106.019)" fill="#3f3d56"/>
<path id="Path_96" data-name="Path 96" d="M270.626,696.673c0-55.965,38.745-101.251,86.626-101.251" transform="translate(-56 -106.019)" fill="#6c63ff"/>
<path id="Path_97" data-name="Path 97" d="M221.125,601.564c0,52.57,22.14,95.109,49.5,95.109" transform="translate(-56 -106.019)" fill="#6c63ff"/>
<path id="Path_98" data-name="Path 98" d="M270.626,696.673c0-71.511,44.783-129.377,100.126-129.377" transform="translate(-56 -106.019)" fill="#3f3d56"/>
<path id="Path_99" data-name="Path 99" d="M254.3,697.379s11.009-.339,14.326-2.7,16.934-5.183,17.757-1.395,16.544,18.844,4.115,18.945-28.879-1.936-32.19-3.953S254.3,697.379,254.3,697.379Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
<path id="Path_100" data-name="Path 100" d="M290.716,710.909c-12.429.1-28.879-1.936-32.19-3.953-2.522-1.536-3.527-7.048-3.863-9.591l-.368.014s.7,8.879,4.009,10.9,19.761,4.053,32.19,3.953c3.588-.029,4.827-1.305,4.759-3.2C294.755,710.174,293.386,710.887,290.716,710.909Z" transform="translate(-56 -106.019)" opacity="0.2"/>
<path id="Path_101" data-name="Path 101" d="M777.429,633.081c0,38.029,23.857,68.8,53.341,68.8" transform="translate(-56 -106.019)" fill="#3f3d56"/>
<path id="Path_102" data-name="Path 102" d="M830.769,701.882c0-38.456,26.623-69.575,59.525-69.575" transform="translate(-56 -106.019)" fill="#6c63ff"/>
<path id="Path_103" data-name="Path 103" d="M796.755,636.528c0,36.124,15.213,65.354,34.014,65.354" transform="translate(-56 -106.019)" fill="#6c63ff"/>
<path id="Path_104" data-name="Path 104" d="M830.769,701.882c0-49.139,30.773-88.9,68.8-88.9" transform="translate(-56 -106.019)" fill="#3f3d56"/>
<path id="Path_105" data-name="Path 105" d="M819.548,702.367s7.565-.233,9.844-1.856,11.636-3.562,12.2-.958,11.368,12.949,2.828,13.018-19.844-1.33-22.119-2.716S819.548,702.367,819.548,702.367Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
<path id="Path_106" data-name="Path 106" d="M844.574,711.664c-8.54.069-19.844-1.33-22.119-2.716-1.733-1.056-2.423-4.843-2.654-6.59l-.253.01s.479,6.1,2.755,7.487,13.579,2.785,22.119,2.716c2.465-.02,3.317-.9,3.27-2.2C847.349,711.159,846.409,711.649,844.574,711.664Z" transform="translate(-56 -106.019)" opacity="0.2"/>
<path id="Path_107" data-name="Path 107" d="M949.813,724.718s11.36-1.729,14.5-4.591,16.89-7.488,18.217-3.667,19.494,17.447,6.633,19.107-30.153,1.609-33.835-.065S949.813,724.718,949.813,724.718Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
<path id="Path_108" data-name="Path 108" d="M989.228,734.173c-12.86,1.659-30.153,1.609-33.835-.065-2.8-1.275-4.535-6.858-5.2-9.45l-.379.061s1.833,9.109,5.516,10.783,20.975,1.725,33.835.065c3.712-.479,4.836-1.956,4.529-3.906C993.319,732.907,991.991,733.817,989.228,734.173Z" transform="translate(-56 -106.019)" opacity="0.2"/>
<path id="Path_109" data-name="Path 109" d="M670.26,723.9s9.587-1.459,12.237-3.875,14.255-6.32,15.374-3.095,16.452,14.725,5.6,16.125-25.448,1.358-28.555-.055S670.26,723.9,670.26,723.9Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
<path id="Path_110" data-name="Path 110" d="M703.524,731.875c-10.853,1.4-25.448,1.358-28.555-.055-2.367-1.076-3.827-5.788-4.39-7.976l-.32.051s1.547,7.687,4.655,9.1,17.7,1.456,28.555.055c3.133-.4,4.081-1.651,3.822-3.3C706.977,730.807,705.856,731.575,703.524,731.875Z" transform="translate(-56 -106.019)" opacity="0.2"/>
<path id="Path_111" data-name="Path 111" d="M178.389,719.109s7.463-1.136,9.527-3.016,11.1-4.92,11.969-2.409,12.808,11.463,4.358,12.553-19.811,1.057-22.23-.043S178.389,719.109,178.389,719.109Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
<path id="Path_112" data-name="Path 112" d="M204.285,725.321c-8.449,1.09-19.811,1.057-22.23-.043-1.842-.838-2.979-4.506-3.417-6.209l-.249.04s1.2,5.984,3.624,7.085,13.781,1.133,22.23.043c2.439-.315,3.177-1.285,2.976-2.566C206.973,724.489,206.1,725.087,204.285,725.321Z" transform="translate(-56 -106.019)" opacity="0.2"/>
<path id="Path_113" data-name="Path 113" d="M439.7,707.337c0,30.22-42.124,20.873-93.7,20.873s-93.074,9.347-93.074-20.873,42.118-36.793,93.694-36.793S439.7,677.117,439.7,707.337Z" transform="translate(-56 -106.019)" opacity="0.1"/>
<path id="Path_114" data-name="Path 114" d="M439.7,699.9c0,30.22-42.124,20.873-93.7,20.873s-93.074,9.347-93.074-20.873S295.04,663.1,346.616,663.1,439.7,669.676,439.7,699.9Z" transform="translate(-56 -106.019)" fill="#3f3d56"/>
</g>
<g id="docusaurus_keytar" transform="translate(312.271 493.733)">
<path id="Path_40" data-name="Path 40" d="M99,52h91.791V89.153H99Z" transform="translate(5.904 -14.001)" fill="#fff" fill-rule="evenodd"/>
<path id="Path_41" data-name="Path 41" d="M24.855,163.927A21.828,21.828,0,0,1,5.947,153a21.829,21.829,0,0,0,18.908,32.782H46.71V163.927Z" transform="translate(-3 -4.634)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_42" data-name="Path 42" d="M121.861,61.1l76.514-4.782V45.39A21.854,21.854,0,0,0,176.52,23.535H78.173L75.441,18.8a3.154,3.154,0,0,0-5.464,0l-2.732,4.732L64.513,18.8a3.154,3.154,0,0,0-5.464,0l-2.732,4.732L53.586,18.8a3.154,3.154,0,0,0-5.464,0L45.39,23.535c-.024,0-.046,0-.071,0l-4.526-4.525a3.153,3.153,0,0,0-5.276,1.414l-1.5,5.577-5.674-1.521a3.154,3.154,0,0,0-3.863,3.864L26,34.023l-5.575,1.494a3.155,3.155,0,0,0-1.416,5.278l4.526,4.526c0,.023,0,.046,0,.07L18.8,48.122a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,59.05a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,69.977a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,80.9a3.154,3.154,0,0,0,0,5.464L23.535,89.1,18.8,91.832a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,102.76a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,113.687a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,124.615a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,135.542a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,146.469a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,157.4a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,168.324a3.154,3.154,0,0,0,0,5.464l4.732,2.732A21.854,21.854,0,0,0,45.39,198.375H176.52a21.854,21.854,0,0,0,21.855-21.855V89.1l-76.514-4.782a11.632,11.632,0,0,1,0-23.219" transform="translate(-1.681 -17.226)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_43" data-name="Path 43" d="M143,186.71h32.782V143H143Z" transform="translate(9.984 -5.561)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_44" data-name="Path 44" d="M196.71,159.855a5.438,5.438,0,0,0-.7.07c-.042-.164-.081-.329-.127-.493a5.457,5.457,0,1,0-5.4-9.372q-.181-.185-.366-.367a5.454,5.454,0,1,0-9.384-5.4c-.162-.046-.325-.084-.486-.126a5.467,5.467,0,1,0-10.788,0c-.162.042-.325.08-.486.126a5.457,5.457,0,1,0-9.384,5.4,21.843,21.843,0,1,0,36.421,21.02,5.452,5.452,0,1,0,.7-10.858" transform="translate(10.912 -6.025)" fill="#44d860" fill-rule="evenodd"/>
<path id="Path_45" data-name="Path 45" d="M153,124.855h32.782V103H153Z" transform="translate(10.912 -9.271)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_46" data-name="Path 46" d="M194.855,116.765a2.732,2.732,0,1,0,0-5.464,2.811,2.811,0,0,0-.349.035c-.022-.082-.04-.164-.063-.246a2.733,2.733,0,0,0-1.052-5.253,2.7,2.7,0,0,0-1.648.566q-.09-.093-.184-.184a2.7,2.7,0,0,0,.553-1.633,2.732,2.732,0,0,0-5.245-1.07,10.928,10.928,0,1,0,0,21.031,2.732,2.732,0,0,0,5.245-1.07,2.7,2.7,0,0,0-.553-1.633q.093-.09.184-.184a2.7,2.7,0,0,0,1.648.566,2.732,2.732,0,0,0,1.052-5.253c.023-.081.042-.164.063-.246a2.814,2.814,0,0,0,.349.035" transform="translate(12.767 -9.377)" fill="#44d860" fill-rule="evenodd"/>
<path id="Path_47" data-name="Path 47" d="M65.087,56.891a2.732,2.732,0,0,1-2.732-2.732,8.2,8.2,0,0,0-16.391,0,2.732,2.732,0,0,1-5.464,0,13.659,13.659,0,0,1,27.319,0,2.732,2.732,0,0,1-2.732,2.732" transform="translate(0.478 -15.068)" fill-rule="evenodd"/>
<path id="Path_48" data-name="Path 48" d="M103,191.347h65.565a21.854,21.854,0,0,0,21.855-21.855V93H124.855A21.854,21.854,0,0,0,103,114.855Z" transform="translate(6.275 -10.199)" fill="#ffff50" fill-rule="evenodd"/>
<path id="Path_49" data-name="Path 49" d="M173.216,129.787H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0,21.855H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186m0,21.855H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0-54.434H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0,21.652H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186m0,21.855H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186M189.585,61.611c-.013,0-.024-.007-.037-.005-3.377.115-4.974,3.492-6.384,6.472-1.471,3.114-2.608,5.139-4.473,5.078-2.064-.074-3.244-2.406-4.494-4.874-1.436-2.835-3.075-6.049-6.516-5.929-3.329.114-4.932,3.053-6.346,5.646-1.5,2.762-2.529,4.442-4.5,4.364-2.106-.076-3.225-1.972-4.52-4.167-1.444-2.443-3.112-5.191-6.487-5.1-3.272.113-4.879,2.606-6.3,4.808-1.5,2.328-2.552,3.746-4.551,3.662-2.156-.076-3.27-1.65-4.558-3.472-1.447-2.047-3.077-4.363-6.442-4.251-3.2.109-4.807,2.153-6.224,3.954-1.346,1.709-2.4,3.062-4.621,2.977a1.093,1.093,0,0,0-.079,2.186c3.3.11,4.967-1.967,6.417-3.81,1.286-1.635,2.4-3.045,4.582-3.12,2.1-.09,3.091,1.218,4.584,3.327,1.417,2,3.026,4.277,6.263,4.394,3.391.114,5.022-2.42,6.467-4.663,1.292-2,2.406-3.734,4.535-3.807,1.959-.073,3.026,1.475,4.529,4.022,1.417,2.4,3.023,5.121,6.324,5.241,3.415.118,5.064-2.863,6.5-5.5,1.245-2.282,2.419-4.437,4.5-4.509,1.959-.046,2.981,1.743,4.492,4.732,1.412,2.79,3.013,5.95,6.365,6.071l.185,0c3.348,0,4.937-3.36,6.343-6.331,1.245-2.634,2.423-5.114,4.444-5.216Z" transform="translate(7.109 -13.11)" fill-rule="evenodd"/>
<path id="Path_50" data-name="Path 50" d="M83,186.71h43.71V143H83Z" transform="translate(4.42 -5.561)" fill="#3ecc5f" fill-rule="evenodd"/>
<g id="Group_8" data-name="Group 8" transform="matrix(0.966, -0.259, 0.259, 0.966, 109.327, 91.085)">
<rect id="Rectangle_3" data-name="Rectangle 3" width="92.361" height="36.462" rx="2" transform="translate(0 0)" fill="#d8d8d8"/>
<g id="Group_2" data-name="Group 2" transform="translate(1.531 23.03)">
<rect id="Rectangle_4" data-name="Rectangle 4" width="5.336" height="5.336" rx="1" transform="translate(16.797 0)" fill="#4a4a4a"/>
<rect id="Rectangle_5" data-name="Rectangle 5" width="5.336" height="5.336" rx="1" transform="translate(23.12 0)" fill="#4a4a4a"/>
<rect id="Rectangle_6" data-name="Rectangle 6" width="5.336" height="5.336" rx="1" transform="translate(29.444 0)" fill="#4a4a4a"/>
<rect id="Rectangle_7" data-name="Rectangle 7" width="5.336" height="5.336" rx="1" transform="translate(35.768 0)" fill="#4a4a4a"/>
<rect id="Rectangle_8" data-name="Rectangle 8" width="5.336" height="5.336" rx="1" transform="translate(42.091 0)" fill="#4a4a4a"/>
<rect id="Rectangle_9" data-name="Rectangle 9" width="5.336" height="5.336" rx="1" transform="translate(48.415 0)" fill="#4a4a4a"/>
<rect id="Rectangle_10" data-name="Rectangle 10" width="5.336" height="5.336" rx="1" transform="translate(54.739 0)" fill="#4a4a4a"/>
<rect id="Rectangle_11" data-name="Rectangle 11" width="5.336" height="5.336" rx="1" transform="translate(61.063 0)" fill="#4a4a4a"/>
<rect id="Rectangle_12" data-name="Rectangle 12" width="5.336" height="5.336" rx="1" transform="translate(67.386 0)" fill="#4a4a4a"/>
<path id="Path_51" data-name="Path 51" d="M1.093,0H14.518a1.093,1.093,0,0,1,1.093,1.093V4.243a1.093,1.093,0,0,1-1.093,1.093H1.093A1.093,1.093,0,0,1,0,4.243V1.093A1.093,1.093,0,0,1,1.093,0ZM75,0H88.426a1.093,1.093,0,0,1,1.093,1.093V4.243a1.093,1.093,0,0,1-1.093,1.093H75a1.093,1.093,0,0,1-1.093-1.093V1.093A1.093,1.093,0,0,1,75,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
</g>
<g id="Group_3" data-name="Group 3" transform="translate(1.531 10.261)">
<path id="Path_52" data-name="Path 52" d="M1.093,0H6.218A1.093,1.093,0,0,1,7.31,1.093V4.242A1.093,1.093,0,0,1,6.218,5.335H1.093A1.093,1.093,0,0,1,0,4.242V1.093A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
<rect id="Rectangle_13" data-name="Rectangle 13" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/>
<rect id="Rectangle_14" data-name="Rectangle 14" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/>
<rect id="Rectangle_15" data-name="Rectangle 15" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/>
<rect id="Rectangle_16" data-name="Rectangle 16" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/>
<rect id="Rectangle_17" data-name="Rectangle 17" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/>
<rect id="Rectangle_18" data-name="Rectangle 18" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/>
<rect id="Rectangle_19" data-name="Rectangle 19" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/>
<rect id="Rectangle_20" data-name="Rectangle 20" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/>
<rect id="Rectangle_21" data-name="Rectangle 21" width="5.336" height="5.336" rx="1" transform="translate(58.888 0)" fill="#4a4a4a"/>
<rect id="Rectangle_22" data-name="Rectangle 22" width="5.336" height="5.336" rx="1" transform="translate(65.212 0)" fill="#4a4a4a"/>
<rect id="Rectangle_23" data-name="Rectangle 23" width="5.336" height="5.336" rx="1" transform="translate(71.536 0)" fill="#4a4a4a"/>
<rect id="Rectangle_24" data-name="Rectangle 24" width="5.336" height="5.336" rx="1" transform="translate(77.859 0)" fill="#4a4a4a"/>
<rect id="Rectangle_25" data-name="Rectangle 25" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
</g>
<g id="Group_4" data-name="Group 4" transform="translate(91.05 9.546) rotate(180)">
<path id="Path_53" data-name="Path 53" d="M1.093,0H6.219A1.093,1.093,0,0,1,7.312,1.093v3.15A1.093,1.093,0,0,1,6.219,5.336H1.093A1.093,1.093,0,0,1,0,4.243V1.093A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
<rect id="Rectangle_26" data-name="Rectangle 26" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/>
<rect id="Rectangle_27" data-name="Rectangle 27" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/>
<rect id="Rectangle_28" data-name="Rectangle 28" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/>
<rect id="Rectangle_29" data-name="Rectangle 29" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/>
<rect id="Rectangle_30" data-name="Rectangle 30" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/>
<rect id="Rectangle_31" data-name="Rectangle 31" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/>
<rect id="Rectangle_32" data-name="Rectangle 32" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/>
<rect id="Rectangle_33" data-name="Rectangle 33" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/>
<rect id="Rectangle_34" data-name="Rectangle 34" width="5.336" height="5.336" rx="1" transform="translate(58.889 0)" fill="#4a4a4a"/>
<rect id="Rectangle_35" data-name="Rectangle 35" width="5.336" height="5.336" rx="1" transform="translate(65.213 0)" fill="#4a4a4a"/>
<rect id="Rectangle_36" data-name="Rectangle 36" width="5.336" height="5.336" rx="1" transform="translate(71.537 0)" fill="#4a4a4a"/>
<rect id="Rectangle_37" data-name="Rectangle 37" width="5.336" height="5.336" rx="1" transform="translate(77.86 0)" fill="#4a4a4a"/>
<rect id="Rectangle_38" data-name="Rectangle 38" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
<rect id="Rectangle_39" data-name="Rectangle 39" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/>
<rect id="Rectangle_40" data-name="Rectangle 40" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/>
<rect id="Rectangle_41" data-name="Rectangle 41" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/>
<rect id="Rectangle_42" data-name="Rectangle 42" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/>
<rect id="Rectangle_43" data-name="Rectangle 43" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/>
<rect id="Rectangle_44" data-name="Rectangle 44" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/>
<rect id="Rectangle_45" data-name="Rectangle 45" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/>
<rect id="Rectangle_46" data-name="Rectangle 46" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/>
<rect id="Rectangle_47" data-name="Rectangle 47" width="5.336" height="5.336" rx="1" transform="translate(58.889 0)" fill="#4a4a4a"/>
<rect id="Rectangle_48" data-name="Rectangle 48" width="5.336" height="5.336" rx="1" transform="translate(65.213 0)" fill="#4a4a4a"/>
<rect id="Rectangle_49" data-name="Rectangle 49" width="5.336" height="5.336" rx="1" transform="translate(71.537 0)" fill="#4a4a4a"/>
<rect id="Rectangle_50" data-name="Rectangle 50" width="5.336" height="5.336" rx="1" transform="translate(77.86 0)" fill="#4a4a4a"/>
<rect id="Rectangle_51" data-name="Rectangle 51" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
</g>
<g id="Group_6" data-name="Group 6" transform="translate(1.531 16.584)">
<path id="Path_54" data-name="Path 54" d="M1.093,0h7.3A1.093,1.093,0,0,1,9.485,1.093v3.15A1.093,1.093,0,0,1,8.392,5.336h-7.3A1.093,1.093,0,0,1,0,4.243V1.094A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
<g id="Group_5" data-name="Group 5" transform="translate(10.671 0)">
<rect id="Rectangle_52" data-name="Rectangle 52" width="5.336" height="5.336" rx="1" fill="#4a4a4a"/>
<rect id="Rectangle_53" data-name="Rectangle 53" width="5.336" height="5.336" rx="1" transform="translate(6.324 0)" fill="#4a4a4a"/>
<rect id="Rectangle_54" data-name="Rectangle 54" width="5.336" height="5.336" rx="1" transform="translate(12.647 0)" fill="#4a4a4a"/>
<rect id="Rectangle_55" data-name="Rectangle 55" width="5.336" height="5.336" rx="1" transform="translate(18.971 0)" fill="#4a4a4a"/>
<rect id="Rectangle_56" data-name="Rectangle 56" width="5.336" height="5.336" rx="1" transform="translate(25.295 0)" fill="#4a4a4a"/>
<rect id="Rectangle_57" data-name="Rectangle 57" width="5.336" height="5.336" rx="1" transform="translate(31.619 0)" fill="#4a4a4a"/>
<rect id="Rectangle_58" data-name="Rectangle 58" width="5.336" height="5.336" rx="1" transform="translate(37.942 0)" fill="#4a4a4a"/>
<rect id="Rectangle_59" data-name="Rectangle 59" width="5.336" height="5.336" rx="1" transform="translate(44.265 0)" fill="#4a4a4a"/>
<rect id="Rectangle_60" data-name="Rectangle 60" width="5.336" height="5.336" rx="1" transform="translate(50.589 0)" fill="#4a4a4a"/>
<rect id="Rectangle_61" data-name="Rectangle 61" width="5.336" height="5.336" rx="1" transform="translate(56.912 0)" fill="#4a4a4a"/>
<rect id="Rectangle_62" data-name="Rectangle 62" width="5.336" height="5.336" rx="1" transform="translate(63.236 0)" fill="#4a4a4a"/>
</g>
<path id="Path_55" data-name="Path 55" d="M1.094,0H8A1.093,1.093,0,0,1,9.091,1.093v3.15A1.093,1.093,0,0,1,8,5.336H1.093A1.093,1.093,0,0,1,0,4.243V1.094A1.093,1.093,0,0,1,1.093,0Z" transform="translate(80.428 0)" fill="#4a4a4a" fill-rule="evenodd"/>
</g>
<g id="Group_7" data-name="Group 7" transform="translate(1.531 29.627)">
<rect id="Rectangle_63" data-name="Rectangle 63" width="5.336" height="5.336" rx="1" transform="translate(0 0)" fill="#4a4a4a"/>
<rect id="Rectangle_64" data-name="Rectangle 64" width="5.336" height="5.336" rx="1" transform="translate(6.324 0)" fill="#4a4a4a"/>
<rect id="Rectangle_65" data-name="Rectangle 65" width="5.336" height="5.336" rx="1" transform="translate(12.647 0)" fill="#4a4a4a"/>
<rect id="Rectangle_66" data-name="Rectangle 66" width="5.336" height="5.336" rx="1" transform="translate(18.971 0)" fill="#4a4a4a"/>
<path id="Path_56" data-name="Path 56" d="M1.093,0H31.515a1.093,1.093,0,0,1,1.093,1.093V4.244a1.093,1.093,0,0,1-1.093,1.093H1.093A1.093,1.093,0,0,1,0,4.244V1.093A1.093,1.093,0,0,1,1.093,0ZM34.687,0h3.942a1.093,1.093,0,0,1,1.093,1.093V4.244a1.093,1.093,0,0,1-1.093,1.093H34.687a1.093,1.093,0,0,1-1.093-1.093V1.093A1.093,1.093,0,0,1,34.687,0Z" transform="translate(25.294 0)" fill="#4a4a4a" fill-rule="evenodd"/>
<rect id="Rectangle_67" data-name="Rectangle 67" width="5.336" height="5.336" rx="1" transform="translate(66.003 0)" fill="#4a4a4a"/>
<rect id="Rectangle_68" data-name="Rectangle 68" width="5.336" height="5.336" rx="1" transform="translate(72.327 0)" fill="#4a4a4a"/>
<rect id="Rectangle_69" data-name="Rectangle 69" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
<path id="Path_57" data-name="Path 57" d="M5.336,0V1.18A1.093,1.093,0,0,1,4.243,2.273H1.093A1.093,1.093,0,0,1,0,1.18V0Z" transform="translate(83.59 2.273) rotate(180)" fill="#4a4a4a"/>
<path id="Path_58" data-name="Path 58" d="M5.336,0V1.18A1.093,1.093,0,0,1,4.243,2.273H1.093A1.093,1.093,0,0,1,0,1.18V0Z" transform="translate(78.255 3.063)" fill="#4a4a4a"/>
</g>
<rect id="Rectangle_70" data-name="Rectangle 70" width="88.927" height="2.371" rx="1.085" transform="translate(1.925 1.17)" fill="#4a4a4a"/>
<rect id="Rectangle_71" data-name="Rectangle 71" width="4.986" height="1.581" rx="0.723" transform="translate(4.1 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_72" data-name="Rectangle 72" width="4.986" height="1.581" rx="0.723" transform="translate(10.923 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_73" data-name="Rectangle 73" width="4.986" height="1.581" rx="0.723" transform="translate(16.173 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_74" data-name="Rectangle 74" width="4.986" height="1.581" rx="0.723" transform="translate(21.421 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_75" data-name="Rectangle 75" width="4.986" height="1.581" rx="0.723" transform="translate(26.671 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_76" data-name="Rectangle 76" width="4.986" height="1.581" rx="0.723" transform="translate(33.232 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_77" data-name="Rectangle 77" width="4.986" height="1.581" rx="0.723" transform="translate(38.48 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_78" data-name="Rectangle 78" width="4.986" height="1.581" rx="0.723" transform="translate(43.73 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_79" data-name="Rectangle 79" width="4.986" height="1.581" rx="0.723" transform="translate(48.978 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_80" data-name="Rectangle 80" width="4.986" height="1.581" rx="0.723" transform="translate(55.54 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_81" data-name="Rectangle 81" width="4.986" height="1.581" rx="0.723" transform="translate(60.788 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_82" data-name="Rectangle 82" width="4.986" height="1.581" rx="0.723" transform="translate(66.038 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_83" data-name="Rectangle 83" width="4.986" height="1.581" rx="0.723" transform="translate(72.599 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_84" data-name="Rectangle 84" width="4.986" height="1.581" rx="0.723" transform="translate(77.847 1.566)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_85" data-name="Rectangle 85" width="4.986" height="1.581" rx="0.723" transform="translate(83.097 1.566)" fill="#d8d8d8" opacity="0.136"/>
</g>
<path id="Path_59" data-name="Path 59" d="M146.71,159.855a5.439,5.439,0,0,0-.7.07c-.042-.164-.081-.329-.127-.493a5.457,5.457,0,1,0-5.4-9.372q-.181-.185-.366-.367a5.454,5.454,0,1,0-9.384-5.4c-.162-.046-.325-.084-.486-.126a5.467,5.467,0,1,0-10.788,0c-.162.042-.325.08-.486.126a5.457,5.457,0,1,0-9.384,5.4,21.843,21.843,0,1,0,36.421,21.02,5.452,5.452,0,1,0,.7-10.858" transform="translate(6.275 -6.025)" fill="#44d860" fill-rule="evenodd"/>
<path id="Path_60" data-name="Path 60" d="M83,124.855h43.71V103H83Z" transform="translate(4.42 -9.271)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_61" data-name="Path 61" d="M134.855,116.765a2.732,2.732,0,1,0,0-5.464,2.811,2.811,0,0,0-.349.035c-.022-.082-.04-.164-.063-.246a2.733,2.733,0,0,0-1.052-5.253,2.7,2.7,0,0,0-1.648.566q-.09-.093-.184-.184a2.7,2.7,0,0,0,.553-1.633,2.732,2.732,0,0,0-5.245-1.07,10.928,10.928,0,1,0,0,21.031,2.732,2.732,0,0,0,5.245-1.07,2.7,2.7,0,0,0-.553-1.633q.093-.09.184-.184a2.7,2.7,0,0,0,1.648.566,2.732,2.732,0,0,0,1.052-5.253c.023-.081.042-.164.063-.246a2.811,2.811,0,0,0,.349.035" transform="translate(7.202 -9.377)" fill="#44d860" fill-rule="evenodd"/>
<path id="Path_62" data-name="Path 62" d="M143.232,42.33a2.967,2.967,0,0,1-.535-.055,2.754,2.754,0,0,1-.514-.153,2.838,2.838,0,0,1-.471-.251,4.139,4.139,0,0,1-.415-.339,3.2,3.2,0,0,1-.338-.415A2.7,2.7,0,0,1,140.5,39.6a2.968,2.968,0,0,1,.055-.535,3.152,3.152,0,0,1,.152-.514,2.874,2.874,0,0,1,.252-.47,2.633,2.633,0,0,1,.753-.754,2.837,2.837,0,0,1,.471-.251,2.753,2.753,0,0,1,.514-.153,2.527,2.527,0,0,1,1.071,0,2.654,2.654,0,0,1,.983.4,4.139,4.139,0,0,1,.415.339,4.019,4.019,0,0,1,.339.415,2.786,2.786,0,0,1,.251.47,2.864,2.864,0,0,1,.208,1.049,2.77,2.77,0,0,1-.8,1.934,4.139,4.139,0,0,1-.415.339,2.722,2.722,0,0,1-1.519.459m21.855-1.366a2.789,2.789,0,0,1-1.935-.8,4.162,4.162,0,0,1-.338-.415,2.7,2.7,0,0,1-.459-1.519,2.789,2.789,0,0,1,.8-1.934,4.139,4.139,0,0,1,.415-.339,2.838,2.838,0,0,1,.471-.251,2.752,2.752,0,0,1,.514-.153,2.527,2.527,0,0,1,1.071,0,2.654,2.654,0,0,1,.983.4,4.139,4.139,0,0,1,.415.339,2.79,2.79,0,0,1,.8,1.934,3.069,3.069,0,0,1-.055.535,2.779,2.779,0,0,1-.153.514,3.885,3.885,0,0,1-.251.47,4.02,4.02,0,0,1-.339.415,4.138,4.138,0,0,1-.415.339,2.722,2.722,0,0,1-1.519.459" transform="translate(9.753 -15.532)" fill-rule="evenodd"/>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 31 KiB

View File

@@ -1,170 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="1041.277" height="554.141" viewBox="0 0 1041.277 554.141">
<title>Powered by React</title>
<g id="Group_24" data-name="Group 24" transform="translate(-440 -263)">
<g id="Group_23" data-name="Group 23" transform="translate(439.989 262.965)">
<path id="Path_299" data-name="Path 299" d="M1040.82,611.12q-1.74,3.75-3.47,7.4-2.7,5.67-5.33,11.12c-.78,1.61-1.56,3.19-2.32,4.77-8.6,17.57-16.63,33.11-23.45,45.89A73.21,73.21,0,0,1,942.44,719l-151.65,1.65h-1.6l-13,.14-11.12.12-34.1.37h-1.38l-17.36.19h-.53l-107,1.16-95.51,1-11.11.12-69,.75H429l-44.75.48h-.48l-141.5,1.53-42.33.46a87.991,87.991,0,0,1-10.79-.54h0c-1.22-.14-2.44-.3-3.65-.49a87.38,87.38,0,0,1-51.29-27.54C116,678.37,102.75,655,93.85,629.64q-1.93-5.49-3.6-11.12C59.44,514.37,97,380,164.6,290.08q4.25-5.64,8.64-11l.07-.08c20.79-25.52,44.1-46.84,68.93-62,44-26.91,92.75-34.49,140.7-11.9,40.57,19.12,78.45,28.11,115.17,30.55,3.71.24,7.42.42,11.11.53,84.23,2.65,163.17-27.7,255.87-47.29,3.69-.78,7.39-1.55,11.12-2.28,66.13-13.16,139.49-20.1,226.73-5.51a189.089,189.089,0,0,1,26.76,6.4q5.77,1.86,11.12,4c41.64,16.94,64.35,48.24,74,87.46q1.37,5.46,2.37,11.11C1134.3,384.41,1084.19,518.23,1040.82,611.12Z" transform="translate(-79.34 -172.91)" fill="#f2f2f2"/>
<path id="Path_300" data-name="Path 300" d="M576.36,618.52a95.21,95.21,0,0,1-1.87,11.12h93.7V618.52Zm-78.25,62.81,11.11-.09V653.77c-3.81-.17-7.52-.34-11.11-.52ZM265.19,618.52v11.12h198.5V618.52ZM1114.87,279h-74V191.51q-5.35-2.17-11.12-4V279H776.21V186.58c-3.73.73-7.43,1.5-11.12,2.28V279H509.22V236.15c-3.69-.11-7.4-.29-11.11-.53V279H242.24V217c-24.83,15.16-48.14,36.48-68.93,62h-.07v.08q-4.4,5.4-8.64,11h8.64V618.52h-83q1.66,5.63,3.6,11.12h79.39v93.62a87,87,0,0,0,12.2,2.79c1.21.19,2.43.35,3.65.49h0a87.991,87.991,0,0,0,10.79.54l42.33-.46v-97H498.11v94.21l11.11-.12V629.64H765.09V721l11.12-.12V629.64H1029.7v4.77c.76-1.58,1.54-3.16,2.32-4.77q2.63-5.45,5.33-11.12,1.73-3.64,3.47-7.4v-321h76.42Q1116.23,284.43,1114.87,279ZM242.24,618.52V290.08H498.11V618.52Zm267,0V290.08H765.09V618.52Zm520.48,0H776.21V290.08H1029.7Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
<path id="Path_301" data-name="Path 301" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l46.65-28,93.6-.78,2-.01.66-.01,2-.03,44.94-.37,2.01-.01.64-.01,2-.01L315,509.3l.38-.01,35.55-.3h.29l277.4-2.34,6.79-.05h.68l5.18-.05,37.65-.31,2-.03,1.85-.02h.96l11.71-.09,2.32-.03,3.11-.02,9.75-.09,15.47-.13,2-.02,3.48-.02h.65l74.71-.64Z" fill="#65617d"/>
<path id="Path_302" data-name="Path 302" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l46.65-28,93.6-.78,2-.01.66-.01,2-.03,44.94-.37,2.01-.01.64-.01,2-.01L315,509.3l.38-.01,35.55-.3h.29l277.4-2.34,6.79-.05h.68l5.18-.05,37.65-.31,2-.03,1.85-.02h.96l11.71-.09,2.32-.03,3.11-.02,9.75-.09,15.47-.13,2-.02,3.48-.02h.65l74.71-.64Z" opacity="0.2"/>
<path id="Path_303" data-name="Path 303" d="M375.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
<path id="Path_304" data-name="Path 304" d="M375.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
<path id="Path_305" data-name="Path 305" d="M377.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
<rect id="Rectangle_137" data-name="Rectangle 137" width="47.17" height="31.5" transform="translate(680.92 483.65)" fill="#3f3d56"/>
<rect id="Rectangle_138" data-name="Rectangle 138" width="47.17" height="31.5" transform="translate(680.92 483.65)" opacity="0.1"/>
<rect id="Rectangle_139" data-name="Rectangle 139" width="47.17" height="31.5" transform="translate(678.92 483.65)" fill="#3f3d56"/>
<path id="Path_306" data-name="Path 306" d="M298.09,483.65v4.97l-47.17,1.26v-6.23Z" opacity="0.1"/>
<path id="Path_307" data-name="Path 307" d="M460.69,485.27v168.2a4,4,0,0,1-3.85,3.95l-191.65,5.1h-.05a4,4,0,0,1-3.95-3.95V485.27a4,4,0,0,1,3.95-3.95h191.6a4,4,0,0,1,3.95,3.95Z" transform="translate(-79.34 -172.91)" fill="#65617d"/>
<path id="Path_308" data-name="Path 308" d="M265.19,481.32v181.2h-.05a4,4,0,0,1-3.95-3.95V485.27a4,4,0,0,1,3.95-3.95Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
<path id="Path_309" data-name="Path 309" d="M194.59,319.15h177.5V467.4l-177.5,4Z" fill="#39374d"/>
<path id="Path_310" data-name="Path 310" d="M726.09,483.65v6.41l-47.17-1.26v-5.15Z" opacity="0.1"/>
<path id="Path_311" data-name="Path 311" d="M867.69,485.27v173.3a4,4,0,0,1-4,3.95h0L672,657.42a4,4,0,0,1-3.85-3.95V485.27a4,4,0,0,1,3.95-3.95H863.7a4,4,0,0,1,3.99,3.95Z" transform="translate(-79.34 -172.91)" fill="#65617d"/>
<path id="Path_312" data-name="Path 312" d="M867.69,485.27v173.3a4,4,0,0,1-4,3.95h0V481.32h0a4,4,0,0,1,4,3.95Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
<path id="Path_313" data-name="Path 313" d="M775.59,319.15H598.09V467.4l177.5,4Z" fill="#39374d"/>
<path id="Path_314" data-name="Path 314" d="M663.19,485.27v168.2a4,4,0,0,1-3.85,3.95l-191.65,5.1h0a4,4,0,0,1-4-3.95V485.27a4,4,0,0,1,3.95-3.95h191.6A4,4,0,0,1,663.19,485.27Z" transform="translate(-79.34 -172.91)" fill="#65617d"/>
<path id="Path_315" data-name="Path 315" d="M397.09,319.15h177.5V467.4l-177.5,4Z" fill="#4267b2"/>
<path id="Path_316" data-name="Path 316" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l202.51-1.33h.48l40.99-.28h.19l283.08-1.87h.29l.17-.01h.47l4.79-.03h1.46l74.49-.5,4.4-.02.98-.01Z" opacity="0.1"/>
<circle id="Ellipse_111" data-name="Ellipse 111" cx="51.33" cy="51.33" r="51.33" transform="translate(435.93 246.82)" fill="#fbbebe"/>
<path id="Path_317" data-name="Path 317" d="M617.94,550.07s-99.5,12-90,0c3.44-4.34,4.39-17.2,4.2-31.85-.06-4.45-.22-9.06-.45-13.65-1.1-22-3.75-43.5-3.75-43.5s87-41,77-8.5c-4,13.13-2.69,31.57.35,48.88.89,5.05,1.92,10,3,14.7a344.66,344.66,0,0,0,9.65,33.92Z" transform="translate(-79.34 -172.91)" fill="#fbbebe"/>
<path id="Path_318" data-name="Path 318" d="M585.47,546c11.51-2.13,23.7-6,34.53-1.54,2.85,1.17,5.47,2.88,8.39,3.86s6.12,1.22,9.16,1.91c10.68,2.42,19.34,10.55,24.9,20s8.44,20.14,11.26,30.72l6.9,25.83c6,22.45,12,45.09,13.39,68.3a2437.506,2437.506,0,0,1-250.84,1.43c5.44-10.34,11-21.31,10.54-33s-7.19-23.22-4.76-34.74c1.55-7.34,6.57-13.39,9.64-20.22,8.75-19.52,1.94-45.79,17.32-60.65,6.92-6.68,17-9.21,26.63-8.89,12.28.41,24.85,4.24,37,6.11C555.09,547.48,569.79,548.88,585.47,546Z" transform="translate(-79.34 -172.91)" fill="#ff6584"/>
<path id="Path_319" data-name="Path 319" d="M716.37,657.17l-.1,1.43v.1l-.17,2.3-1.33,18.51-1.61,22.3-.46,6.28-1,13.44v.17l-107,1-175.59,1.9v.84h-.14v-1.12l.45-14.36.86-28.06.74-23.79.07-2.37a10.53,10.53,0,0,1,11.42-10.17c4.72.4,10.85.89,18.18,1.41l3,.22c42.33,2.94,120.56,6.74,199.5,2,1.66-.09,3.33-.19,5-.31,12.24-.77,24.47-1.76,36.58-3a10.53,10.53,0,0,1,11.6,11.23Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
<path id="Path_320" data-name="Path 320" d="M429.08,725.44v-.84l175.62-1.91,107-1h.3v-.17l1-13.44.43-6,1.64-22.61,1.29-17.9v-.44a10.617,10.617,0,0,0-.11-2.47.3.3,0,0,0,0-.1,10.391,10.391,0,0,0-2-4.64,10.54,10.54,0,0,0-9.42-4c-12.11,1.24-24.34,2.23-36.58,3-1.67.12-3.34.22-5,.31-78.94,4.69-157.17.89-199.5-2l-3-.22c-7.33-.52-13.46-1-18.18-1.41a10.54,10.54,0,0,0-11.24,8.53,11,11,0,0,0-.18,1.64l-.68,22.16L429.54,710l-.44,14.36v1.12Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
<path id="Path_321" data-name="Path 321" d="M716.67,664.18l-1.23,15.33-1.83,22.85-.46,5.72-1,12.81-.06.64v.17h0l-.15,1.48.11-1.48h-.29l-107,1-175.65,1.9v-.28l.49-14.36,1-28.06.64-18.65A6.36,6.36,0,0,1,434.3,658a6.25,6.25,0,0,1,3.78-.9c2.1.17,4.68.37,7.69.59,4.89.36,10.92.78,17.94,1.22,13,.82,29.31,1.7,48,2.42,52,2,122.2,2.67,188.88-3.17,3-.26,6.1-.55,9.13-.84a6.26,6.26,0,0,1,3.48.66,5.159,5.159,0,0,1,.86.54,6.14,6.14,0,0,1,2,2.46,3.564,3.564,0,0,1,.25.61A6.279,6.279,0,0,1,716.67,664.18Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
<path id="Path_322" data-name="Path 322" d="M377.44,677.87v3.19a6.13,6.13,0,0,1-3.5,5.54l-40.1.77a6.12,6.12,0,0,1-3.57-5.57v-3Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
<path id="Path_323" data-name="Path 323" d="M298.59,515.57l-52.25,1V507.9l52.25-1Z" fill="#3f3d56"/>
<path id="Path_324" data-name="Path 324" d="M298.59,515.57l-52.25,1V507.9l52.25-1Z" opacity="0.1"/>
<path id="Path_325" data-name="Path 325" d="M300.59,515.57l-52.25,1V507.9l52.25-1Z" fill="#3f3d56"/>
<path id="Path_326" data-name="Path 326" d="M758.56,679.87v3.19a6.13,6.13,0,0,0,3.5,5.54l40.1.77a6.12,6.12,0,0,0,3.57-5.57v-3Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
<path id="Path_327" data-name="Path 327" d="M678.72,517.57l52.25,1V509.9l-52.25-1Z" opacity="0.1"/>
<path id="Path_328" data-name="Path 328" d="M676.72,517.57l52.25,1V509.9l-52.25-1Z" fill="#3f3d56"/>
<path id="Path_329" data-name="Path 329" d="M534.13,486.79c.08,7-3.16,13.6-5.91,20.07a163.491,163.491,0,0,0-12.66,74.71c.73,11,2.58,22,.73,32.9s-8.43,21.77-19,24.9c17.53,10.45,41.26,9.35,57.76-2.66,8.79-6.4,15.34-15.33,21.75-24.11a97.86,97.86,0,0,1-13.31,44.75A103.43,103.43,0,0,0,637,616.53c4.31-5.81,8.06-12.19,9.72-19.23,3.09-13-1.22-26.51-4.51-39.5a266.055,266.055,0,0,1-6.17-33c-.43-3.56-.78-7.22.1-10.7,1-4.07,3.67-7.51,5.64-11.22,5.6-10.54,5.73-23.3,2.86-34.88s-8.49-22.26-14.06-32.81c-4.46-8.46-9.3-17.31-17.46-22.28-5.1-3.1-11-4.39-16.88-5.64l-25.37-5.43c-5.55-1.19-11.26-2.38-16.87-1.51-9.47,1.48-16.14,8.32-22,15.34-4.59,5.46-15.81,15.71-16.6,22.86-.72,6.59,5.1,17.63,6.09,24.58,1.3,9,2.22,6,7.3,11.52C532,478.05,534.07,482,534.13,486.79Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
</g>
<g id="docusaurus_keytar" transform="translate(670.271 615.768)">
<path id="Path_40" data-name="Path 40" d="M99,52h43.635V69.662H99Z" transform="translate(-49.132 -33.936)" fill="#fff" fill-rule="evenodd"/>
<path id="Path_41" data-name="Path 41" d="M13.389,158.195A10.377,10.377,0,0,1,4.4,153a10.377,10.377,0,0,0,8.988,15.584H23.779V158.195Z" transform="translate(-3 -82.47)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_42" data-name="Path 42" d="M66.967,38.083l36.373-2.273V30.615A10.389,10.389,0,0,0,92.95,20.226H46.2l-1.3-2.249a1.5,1.5,0,0,0-2.6,0L41,20.226l-1.3-2.249a1.5,1.5,0,0,0-2.6,0l-1.3,2.249-1.3-2.249a1.5,1.5,0,0,0-2.6,0l-1.3,2.249-.034,0-2.152-2.151a1.5,1.5,0,0,0-2.508.672L25.21,21.4l-2.7-.723a1.5,1.5,0,0,0-1.836,1.837l.722,2.7-2.65.71a1.5,1.5,0,0,0-.673,2.509l2.152,2.152c0,.011,0,.022,0,.033l-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6L20.226,41l-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3A10.389,10.389,0,0,0,30.615,103.34H92.95A10.389,10.389,0,0,0,103.34,92.95V51.393L66.967,49.12a5.53,5.53,0,0,1,0-11.038" transform="translate(-9.836 -17.226)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_43" data-name="Path 43" d="M143,163.779h15.584V143H143Z" transform="translate(-70.275 -77.665)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_44" data-name="Path 44" d="M173.779,148.389a2.582,2.582,0,0,0-.332.033c-.02-.078-.038-.156-.06-.234a2.594,2.594,0,1,0-2.567-4.455q-.086-.088-.174-.175a2.593,2.593,0,1,0-4.461-2.569c-.077-.022-.154-.04-.231-.06a2.6,2.6,0,1,0-5.128,0c-.077.02-.154.038-.231.06a2.594,2.594,0,1,0-4.461,2.569,10.384,10.384,0,1,0,17.314,9.992,2.592,2.592,0,1,0,.332-5.161" transform="translate(-75.08 -75.262)" fill="#44d860" fill-rule="evenodd"/>
<path id="Path_45" data-name="Path 45" d="M153,113.389h15.584V103H153Z" transform="translate(-75.08 -58.444)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_46" data-name="Path 46" d="M183.389,108.944a1.3,1.3,0,1,0,0-2.6,1.336,1.336,0,0,0-.166.017c-.01-.039-.019-.078-.03-.117a1.3,1.3,0,0,0-.5-2.5,1.285,1.285,0,0,0-.783.269q-.043-.044-.087-.087a1.285,1.285,0,0,0,.263-.776,1.3,1.3,0,0,0-2.493-.509,5.195,5.195,0,1,0,0,10,1.3,1.3,0,0,0,2.493-.509,1.285,1.285,0,0,0-.263-.776q.044-.043.087-.087a1.285,1.285,0,0,0,.783.269,1.3,1.3,0,0,0,.5-2.5c.011-.038.02-.078.03-.117a1.337,1.337,0,0,0,.166.017" transform="translate(-84.691 -57.894)" fill="#44d860" fill-rule="evenodd"/>
<path id="Path_47" data-name="Path 47" d="M52.188,48.292a1.3,1.3,0,0,1-1.3-1.3,3.9,3.9,0,0,0-7.792,0,1.3,1.3,0,1,1-2.6,0,6.493,6.493,0,0,1,12.987,0,1.3,1.3,0,0,1-1.3,1.3" transform="translate(-21.02 -28.41)" fill-rule="evenodd"/>
<path id="Path_48" data-name="Path 48" d="M103,139.752h31.168a10.389,10.389,0,0,0,10.389-10.389V93H113.389A10.389,10.389,0,0,0,103,103.389Z" transform="translate(-51.054 -53.638)" fill="#ffff50" fill-rule="evenodd"/>
<path id="Path_49" data-name="Path 49" d="M141.1,94.017H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0-25.877H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.293H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m7.782-47.993c-.006,0-.011,0-.018,0-1.605.055-2.365,1.66-3.035,3.077-.7,1.48-1.24,2.443-2.126,2.414-.981-.035-1.542-1.144-2.137-2.317-.683-1.347-1.462-2.876-3.1-2.819-1.582.054-2.344,1.451-3.017,2.684-.715,1.313-1.2,2.112-2.141,2.075-1-.036-1.533-.938-2.149-1.981-.686-1.162-1.479-2.467-3.084-2.423-1.555.053-2.319,1.239-2.994,2.286-.713,1.106-1.213,1.781-2.164,1.741-1.025-.036-1.554-.784-2.167-1.65-.688-.973-1.463-2.074-3.062-2.021a3.815,3.815,0,0,0-2.959,1.879c-.64.812-1.14,1.456-2.2,1.415a.52.52,0,0,0-.037,1.039,3.588,3.588,0,0,0,3.05-1.811c.611-.777,1.139-1.448,2.178-1.483,1-.043,1.47.579,2.179,1.582.674.953,1.438,2.033,2.977,2.089,1.612.054,2.387-1.151,3.074-2.217.614-.953,1.144-1.775,2.156-1.81.931-.035,1.438.7,2.153,1.912.674,1.141,1.437,2.434,3.006,2.491,1.623.056,2.407-1.361,3.09-2.616.592-1.085,1.15-2.109,2.14-2.143.931-.022,1.417.829,2.135,2.249.671,1.326,1.432,2.828,3.026,2.886l.088,0c1.592,0,2.347-1.6,3.015-3.01.592-1.252,1.152-2.431,2.113-2.479Z" transform="translate(-55.378 -38.552)" fill-rule="evenodd"/>
<path id="Path_50" data-name="Path 50" d="M83,163.779h20.779V143H83Z" transform="translate(-41.443 -77.665)" fill="#3ecc5f" fill-rule="evenodd"/>
<g id="Group_8" data-name="Group 8" transform="matrix(0.966, -0.259, 0.259, 0.966, 51.971, 43.3)">
<rect id="Rectangle_3" data-name="Rectangle 3" width="43.906" height="17.333" rx="2" transform="translate(0 0)" fill="#d8d8d8"/>
<g id="Group_2" data-name="Group 2" transform="translate(0.728 10.948)">
<rect id="Rectangle_4" data-name="Rectangle 4" width="2.537" height="2.537" rx="1" transform="translate(7.985 0)" fill="#4a4a4a"/>
<rect id="Rectangle_5" data-name="Rectangle 5" width="2.537" height="2.537" rx="1" transform="translate(10.991 0)" fill="#4a4a4a"/>
<rect id="Rectangle_6" data-name="Rectangle 6" width="2.537" height="2.537" rx="1" transform="translate(13.997 0)" fill="#4a4a4a"/>
<rect id="Rectangle_7" data-name="Rectangle 7" width="2.537" height="2.537" rx="1" transform="translate(17.003 0)" fill="#4a4a4a"/>
<rect id="Rectangle_8" data-name="Rectangle 8" width="2.537" height="2.537" rx="1" transform="translate(20.009 0)" fill="#4a4a4a"/>
<rect id="Rectangle_9" data-name="Rectangle 9" width="2.537" height="2.537" rx="1" transform="translate(23.015 0)" fill="#4a4a4a"/>
<rect id="Rectangle_10" data-name="Rectangle 10" width="2.537" height="2.537" rx="1" transform="translate(26.021 0)" fill="#4a4a4a"/>
<rect id="Rectangle_11" data-name="Rectangle 11" width="2.537" height="2.537" rx="1" transform="translate(29.028 0)" fill="#4a4a4a"/>
<rect id="Rectangle_12" data-name="Rectangle 12" width="2.537" height="2.537" rx="1" transform="translate(32.034 0)" fill="#4a4a4a"/>
<path id="Path_51" data-name="Path 51" d="M.519,0H6.9A.519.519,0,0,1,7.421.52v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0ZM35.653,0h6.383a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H35.652a.519.519,0,0,1-.519-.519V.519A.519.519,0,0,1,35.652,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
</g>
<g id="Group_3" data-name="Group 3" transform="translate(0.728 4.878)">
<path id="Path_52" data-name="Path 52" d="M.519,0H2.956a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
<rect id="Rectangle_13" data-name="Rectangle 13" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/>
<rect id="Rectangle_14" data-name="Rectangle 14" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/>
<rect id="Rectangle_15" data-name="Rectangle 15" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/>
<rect id="Rectangle_16" data-name="Rectangle 16" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/>
<rect id="Rectangle_17" data-name="Rectangle 17" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/>
<rect id="Rectangle_18" data-name="Rectangle 18" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/>
<rect id="Rectangle_19" data-name="Rectangle 19" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/>
<rect id="Rectangle_20" data-name="Rectangle 20" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/>
<rect id="Rectangle_21" data-name="Rectangle 21" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/>
<rect id="Rectangle_22" data-name="Rectangle 22" width="2.537" height="2.537" rx="1" transform="translate(31 0)" fill="#4a4a4a"/>
<rect id="Rectangle_23" data-name="Rectangle 23" width="2.537" height="2.537" rx="1" transform="translate(34.006 0)" fill="#4a4a4a"/>
<rect id="Rectangle_24" data-name="Rectangle 24" width="2.537" height="2.537" rx="1" transform="translate(37.012 0)" fill="#4a4a4a"/>
<rect id="Rectangle_25" data-name="Rectangle 25" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
</g>
<g id="Group_4" data-name="Group 4" transform="translate(43.283 4.538) rotate(180)">
<path id="Path_53" data-name="Path 53" d="M.519,0H2.956a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
<rect id="Rectangle_26" data-name="Rectangle 26" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/>
<rect id="Rectangle_27" data-name="Rectangle 27" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/>
<rect id="Rectangle_28" data-name="Rectangle 28" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/>
<rect id="Rectangle_29" data-name="Rectangle 29" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/>
<rect id="Rectangle_30" data-name="Rectangle 30" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/>
<rect id="Rectangle_31" data-name="Rectangle 31" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/>
<rect id="Rectangle_32" data-name="Rectangle 32" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/>
<rect id="Rectangle_33" data-name="Rectangle 33" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/>
<rect id="Rectangle_34" data-name="Rectangle 34" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/>
<rect id="Rectangle_35" data-name="Rectangle 35" width="2.537" height="2.537" rx="1" transform="translate(31.001 0)" fill="#4a4a4a"/>
<rect id="Rectangle_36" data-name="Rectangle 36" width="2.537" height="2.537" rx="1" transform="translate(34.007 0)" fill="#4a4a4a"/>
<rect id="Rectangle_37" data-name="Rectangle 37" width="2.537" height="2.537" rx="1" transform="translate(37.013 0)" fill="#4a4a4a"/>
<rect id="Rectangle_38" data-name="Rectangle 38" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
<rect id="Rectangle_39" data-name="Rectangle 39" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/>
<rect id="Rectangle_40" data-name="Rectangle 40" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/>
<rect id="Rectangle_41" data-name="Rectangle 41" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/>
<rect id="Rectangle_42" data-name="Rectangle 42" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/>
<rect id="Rectangle_43" data-name="Rectangle 43" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/>
<rect id="Rectangle_44" data-name="Rectangle 44" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/>
<rect id="Rectangle_45" data-name="Rectangle 45" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/>
<rect id="Rectangle_46" data-name="Rectangle 46" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/>
<rect id="Rectangle_47" data-name="Rectangle 47" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/>
<rect id="Rectangle_48" data-name="Rectangle 48" width="2.537" height="2.537" rx="1" transform="translate(31.001 0)" fill="#4a4a4a"/>
<rect id="Rectangle_49" data-name="Rectangle 49" width="2.537" height="2.537" rx="1" transform="translate(34.007 0)" fill="#4a4a4a"/>
<rect id="Rectangle_50" data-name="Rectangle 50" width="2.537" height="2.537" rx="1" transform="translate(37.013 0)" fill="#4a4a4a"/>
<rect id="Rectangle_51" data-name="Rectangle 51" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
</g>
<g id="Group_6" data-name="Group 6" transform="translate(0.728 7.883)">
<path id="Path_54" data-name="Path 54" d="M.519,0h3.47a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.52A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
<g id="Group_5" data-name="Group 5" transform="translate(5.073 0)">
<rect id="Rectangle_52" data-name="Rectangle 52" width="2.537" height="2.537" rx="1" transform="translate(0 0)" fill="#4a4a4a"/>
<rect id="Rectangle_53" data-name="Rectangle 53" width="2.537" height="2.537" rx="1" transform="translate(3.006 0)" fill="#4a4a4a"/>
<rect id="Rectangle_54" data-name="Rectangle 54" width="2.537" height="2.537" rx="1" transform="translate(6.012 0)" fill="#4a4a4a"/>
<rect id="Rectangle_55" data-name="Rectangle 55" width="2.537" height="2.537" rx="1" transform="translate(9.018 0)" fill="#4a4a4a"/>
<rect id="Rectangle_56" data-name="Rectangle 56" width="2.537" height="2.537" rx="1" transform="translate(12.025 0)" fill="#4a4a4a"/>
<rect id="Rectangle_57" data-name="Rectangle 57" width="2.537" height="2.537" rx="1" transform="translate(15.031 0)" fill="#4a4a4a"/>
<rect id="Rectangle_58" data-name="Rectangle 58" width="2.537" height="2.537" rx="1" transform="translate(18.037 0)" fill="#4a4a4a"/>
<rect id="Rectangle_59" data-name="Rectangle 59" width="2.537" height="2.537" rx="1" transform="translate(21.042 0)" fill="#4a4a4a"/>
<rect id="Rectangle_60" data-name="Rectangle 60" width="2.537" height="2.537" rx="1" transform="translate(24.049 0)" fill="#4a4a4a"/>
<rect id="Rectangle_61" data-name="Rectangle 61" width="2.537" height="2.537" rx="1" transform="translate(27.055 0)" fill="#4a4a4a"/>
<rect id="Rectangle_62" data-name="Rectangle 62" width="2.537" height="2.537" rx="1" transform="translate(30.061 0)" fill="#4a4a4a"/>
</g>
<path id="Path_55" data-name="Path 55" d="M.52,0H3.8a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.52A.519.519,0,0,1,.519,0Z" transform="translate(38.234 0)" fill="#4a4a4a" fill-rule="evenodd"/>
</g>
<g id="Group_7" data-name="Group 7" transform="translate(0.728 14.084)">
<rect id="Rectangle_63" data-name="Rectangle 63" width="2.537" height="2.537" rx="1" transform="translate(0 0)" fill="#4a4a4a"/>
<rect id="Rectangle_64" data-name="Rectangle 64" width="2.537" height="2.537" rx="1" transform="translate(3.006 0)" fill="#4a4a4a"/>
<rect id="Rectangle_65" data-name="Rectangle 65" width="2.537" height="2.537" rx="1" transform="translate(6.012 0)" fill="#4a4a4a"/>
<rect id="Rectangle_66" data-name="Rectangle 66" width="2.537" height="2.537" rx="1" transform="translate(9.018 0)" fill="#4a4a4a"/>
<path id="Path_56" data-name="Path 56" d="M.519,0H14.981A.519.519,0,0,1,15.5.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.018V.519A.519.519,0,0,1,.519,0Zm15.97,0h1.874a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H16.489a.519.519,0,0,1-.519-.519V.519A.519.519,0,0,1,16.489,0Z" transform="translate(12.024 0)" fill="#4a4a4a" fill-rule="evenodd"/>
<rect id="Rectangle_67" data-name="Rectangle 67" width="2.537" height="2.537" rx="1" transform="translate(31.376 0)" fill="#4a4a4a"/>
<rect id="Rectangle_68" data-name="Rectangle 68" width="2.537" height="2.537" rx="1" transform="translate(34.382 0)" fill="#4a4a4a"/>
<rect id="Rectangle_69" data-name="Rectangle 69" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
<path id="Path_57" data-name="Path 57" d="M2.537,0V.561a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,.561V0Z" transform="translate(39.736 1.08) rotate(180)" fill="#4a4a4a"/>
<path id="Path_58" data-name="Path 58" d="M2.537,0V.561a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,.561V0Z" transform="translate(37.2 1.456)" fill="#4a4a4a"/>
</g>
<rect id="Rectangle_70" data-name="Rectangle 70" width="42.273" height="1.127" rx="0.564" transform="translate(0.915 0.556)" fill="#4a4a4a"/>
<rect id="Rectangle_71" data-name="Rectangle 71" width="2.37" height="0.752" rx="0.376" transform="translate(1.949 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_72" data-name="Rectangle 72" width="2.37" height="0.752" rx="0.376" transform="translate(5.193 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_73" data-name="Rectangle 73" width="2.37" height="0.752" rx="0.376" transform="translate(7.688 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_74" data-name="Rectangle 74" width="2.37" height="0.752" rx="0.376" transform="translate(10.183 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_75" data-name="Rectangle 75" width="2.37" height="0.752" rx="0.376" transform="translate(12.679 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_76" data-name="Rectangle 76" width="2.37" height="0.752" rx="0.376" transform="translate(15.797 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_77" data-name="Rectangle 77" width="2.37" height="0.752" rx="0.376" transform="translate(18.292 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_78" data-name="Rectangle 78" width="2.37" height="0.752" rx="0.376" transform="translate(20.788 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_79" data-name="Rectangle 79" width="2.37" height="0.752" rx="0.376" transform="translate(23.283 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_80" data-name="Rectangle 80" width="2.37" height="0.752" rx="0.376" transform="translate(26.402 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_81" data-name="Rectangle 81" width="2.37" height="0.752" rx="0.376" transform="translate(28.897 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_82" data-name="Rectangle 82" width="2.37" height="0.752" rx="0.376" transform="translate(31.393 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_83" data-name="Rectangle 83" width="2.37" height="0.752" rx="0.376" transform="translate(34.512 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_84" data-name="Rectangle 84" width="2.37" height="0.752" rx="0.376" transform="translate(37.007 0.744)" fill="#d8d8d8" opacity="0.136"/>
<rect id="Rectangle_85" data-name="Rectangle 85" width="2.37" height="0.752" rx="0.376" transform="translate(39.502 0.744)" fill="#d8d8d8" opacity="0.136"/>
</g>
<path id="Path_59" data-name="Path 59" d="M123.779,148.389a2.583,2.583,0,0,0-.332.033c-.02-.078-.038-.156-.06-.234a2.594,2.594,0,1,0-2.567-4.455q-.086-.088-.174-.175a2.593,2.593,0,1,0-4.461-2.569c-.077-.022-.154-.04-.231-.06a2.6,2.6,0,1,0-5.128,0c-.077.02-.154.038-.231.06a2.594,2.594,0,1,0-4.461,2.569,10.384,10.384,0,1,0,17.314,9.992,2.592,2.592,0,1,0,.332-5.161" transform="translate(-51.054 -75.262)" fill="#44d860" fill-rule="evenodd"/>
<path id="Path_60" data-name="Path 60" d="M83,113.389h20.779V103H83Z" transform="translate(-41.443 -58.444)" fill="#3ecc5f" fill-rule="evenodd"/>
<path id="Path_61" data-name="Path 61" d="M123.389,108.944a1.3,1.3,0,1,0,0-2.6,1.338,1.338,0,0,0-.166.017c-.01-.039-.019-.078-.03-.117a1.3,1.3,0,0,0-.5-2.5,1.285,1.285,0,0,0-.783.269q-.043-.044-.087-.087a1.285,1.285,0,0,0,.263-.776,1.3,1.3,0,0,0-2.493-.509,5.195,5.195,0,1,0,0,10,1.3,1.3,0,0,0,2.493-.509,1.285,1.285,0,0,0-.263-.776q.044-.043.087-.087a1.285,1.285,0,0,0,.783.269,1.3,1.3,0,0,0,.5-2.5c.011-.038.02-.078.03-.117a1.335,1.335,0,0,0,.166.017" transform="translate(-55.859 -57.894)" fill="#44d860" fill-rule="evenodd"/>
<path id="Path_62" data-name="Path 62" d="M141.8,38.745a1.41,1.41,0,0,1-.255-.026,1.309,1.309,0,0,1-.244-.073,1.349,1.349,0,0,1-.224-.119,1.967,1.967,0,0,1-.2-.161,1.52,1.52,0,0,1-.161-.2,1.282,1.282,0,0,1-.218-.722,1.41,1.41,0,0,1,.026-.255,1.5,1.5,0,0,1,.072-.244,1.364,1.364,0,0,1,.12-.223,1.252,1.252,0,0,1,.358-.358,1.349,1.349,0,0,1,.224-.119,1.309,1.309,0,0,1,.244-.073,1.2,1.2,0,0,1,.509,0,1.262,1.262,0,0,1,.468.192,1.968,1.968,0,0,1,.2.161,1.908,1.908,0,0,1,.161.2,1.322,1.322,0,0,1,.12.223,1.361,1.361,0,0,1,.1.5,1.317,1.317,0,0,1-.379.919,1.968,1.968,0,0,1-.2.161,1.346,1.346,0,0,1-.223.119,1.332,1.332,0,0,1-.5.1m10.389-.649a1.326,1.326,0,0,1-.92-.379,1.979,1.979,0,0,1-.161-.2,1.282,1.282,0,0,1-.218-.722,1.326,1.326,0,0,1,.379-.919,1.967,1.967,0,0,1,.2-.161,1.351,1.351,0,0,1,.224-.119,1.308,1.308,0,0,1,.244-.073,1.2,1.2,0,0,1,.509,0,1.262,1.262,0,0,1,.468.192,1.967,1.967,0,0,1,.2.161,1.326,1.326,0,0,1,.379.919,1.461,1.461,0,0,1-.026.255,1.323,1.323,0,0,1-.073.244,1.847,1.847,0,0,1-.119.223,1.911,1.911,0,0,1-.161.2,1.967,1.967,0,0,1-.2.161,1.294,1.294,0,0,1-.722.218" transform="translate(-69.074 -26.006)" fill-rule="evenodd"/>
</g>
<g id="React-icon" transform="translate(906.3 541.56)">
<path id="Path_330" data-name="Path 330" d="M263.668,117.179c0-5.827-7.3-11.35-18.487-14.775,2.582-11.4,1.434-20.477-3.622-23.382a7.861,7.861,0,0,0-4.016-1v4a4.152,4.152,0,0,1,2.044.466c2.439,1.4,3.5,6.724,2.672,13.574-.2,1.685-.52,3.461-.914,5.272a86.9,86.9,0,0,0-11.386-1.954,87.469,87.469,0,0,0-7.459-8.965c5.845-5.433,11.332-8.41,15.062-8.41V78h0c-4.931,0-11.386,3.514-17.913,9.611-6.527-6.061-12.982-9.539-17.913-9.539v4c3.712,0,9.216,2.959,15.062,8.356a84.687,84.687,0,0,0-7.405,8.947,83.732,83.732,0,0,0-11.4,1.972c-.412-1.793-.717-3.532-.932-5.2-.843-6.85.2-12.175,2.618-13.592a3.991,3.991,0,0,1,2.062-.466v-4h0a8,8,0,0,0-4.052,1c-5.039,2.9-6.168,11.96-3.568,23.328-11.153,3.443-18.415,8.947-18.415,14.757,0,5.828,7.3,11.35,18.487,14.775-2.582,11.4-1.434,20.477,3.622,23.382a7.882,7.882,0,0,0,4.034,1c4.931,0,11.386-3.514,17.913-9.611,6.527,6.061,12.982,9.539,17.913,9.539a8,8,0,0,0,4.052-1c5.039-2.9,6.168-11.96,3.568-23.328C256.406,128.511,263.668,122.988,263.668,117.179Zm-23.346-11.96c-.663,2.313-1.488,4.7-2.421,7.083-.735-1.434-1.506-2.869-2.349-4.3-.825-1.434-1.7-2.833-2.582-4.2C235.517,104.179,237.974,104.645,240.323,105.219Zm-8.212,19.1c-1.4,2.421-2.833,4.716-4.321,6.85-2.672.233-5.379.359-8.1.359-2.708,0-5.415-.126-8.069-.341q-2.232-3.2-4.339-6.814-2.044-3.523-3.73-7.136c1.112-2.4,2.367-4.805,3.712-7.154,1.4-2.421,2.833-4.716,4.321-6.85,2.672-.233,5.379-.359,8.1-.359,2.708,0,5.415.126,8.069.341q2.232,3.2,4.339,6.814,2.044,3.523,3.73,7.136C234.692,119.564,233.455,121.966,232.11,124.315Zm5.792-2.331c.968,2.4,1.793,4.805,2.474,7.136-2.349.574-4.823,1.058-7.387,1.434.879-1.381,1.757-2.8,2.582-4.25C236.4,124.871,237.167,123.419,237.9,121.984ZM219.72,141.116a73.921,73.921,0,0,1-4.985-5.738c1.614.072,3.263.126,4.931.126,1.685,0,3.353-.036,4.985-.126A69.993,69.993,0,0,1,219.72,141.116ZM206.38,130.555c-2.546-.377-5-.843-7.352-1.417.663-2.313,1.488-4.7,2.421-7.083.735,1.434,1.506,2.869,2.349,4.3S205.5,129.192,206.38,130.555ZM219.63,93.241a73.924,73.924,0,0,1,4.985,5.738c-1.614-.072-3.263-.126-4.931-.126-1.686,0-3.353.036-4.985.126A69.993,69.993,0,0,1,219.63,93.241ZM206.362,103.8c-.879,1.381-1.757,2.8-2.582,4.25-.825,1.434-1.6,2.869-2.331,4.3-.968-2.4-1.793-4.805-2.474-7.136C201.323,104.663,203.8,104.179,206.362,103.8Zm-16.227,22.449c-6.348-2.708-10.454-6.258-10.454-9.073s4.106-6.383,10.454-9.073c1.542-.663,3.228-1.255,4.967-1.811a86.122,86.122,0,0,0,4.034,10.92,84.9,84.9,0,0,0-3.981,10.866C193.38,127.525,191.694,126.915,190.134,126.252Zm9.647,25.623c-2.439-1.4-3.5-6.724-2.672-13.574.2-1.686.52-3.461.914-5.272a86.9,86.9,0,0,0,11.386,1.954,87.465,87.465,0,0,0,7.459,8.965c-5.845,5.433-11.332,8.41-15.062,8.41A4.279,4.279,0,0,1,199.781,151.875Zm42.532-13.663c.843,6.85-.2,12.175-2.618,13.592a3.99,3.99,0,0,1-2.062.466c-3.712,0-9.216-2.959-15.062-8.356a84.689,84.689,0,0,0,7.405-8.947,83.731,83.731,0,0,0,11.4-1.972A50.194,50.194,0,0,1,242.313,138.212Zm6.9-11.96c-1.542.663-3.228,1.255-4.967,1.811a86.12,86.12,0,0,0-4.034-10.92,84.9,84.9,0,0,0,3.981-10.866c1.775.556,3.461,1.165,5.039,1.829,6.348,2.708,10.454,6.258,10.454,9.073C259.67,119.994,255.564,123.562,249.216,126.252Z" fill="#61dafb"/>
<path id="Path_331" data-name="Path 331" d="M320.8,78.4Z" transform="translate(-119.082 -0.328)" fill="#61dafb"/>
<circle id="Ellipse_112" data-name="Ellipse 112" cx="8.194" cy="8.194" r="8.194" transform="translate(211.472 108.984)" fill="#61dafb"/>
<path id="Path_332" data-name="Path 332" d="M520.5,78.1Z" transform="translate(-282.975 -0.082)" fill="#61dafb"/>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 35 KiB

View File

@@ -1,40 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" width="1129" height="663" viewBox="0 0 1129 663">
<title>Focus on What Matters</title>
<circle cx="321" cy="321" r="321" fill="#f2f2f2" />
<ellipse cx="559" cy="635.49998" rx="514" ry="27.50002" fill="#3f3d56" />
<ellipse cx="558" cy="627" rx="460" ry="22" opacity="0.2" />
<rect x="131" y="152.5" width="840" height="50" fill="#3f3d56" />
<path d="M166.5,727.3299A21.67009,21.67009,0,0,0,188.1701,749H984.8299A21.67009,21.67009,0,0,0,1006.5,727.3299V296h-840Z" transform="translate(-35.5 -118.5)" fill="#3f3d56" />
<path d="M984.8299,236H188.1701A21.67009,21.67009,0,0,0,166.5,257.6701V296h840V257.6701A21.67009,21.67009,0,0,0,984.8299,236Z" transform="translate(-35.5 -118.5)" fill="#3f3d56" />
<path d="M984.8299,236H188.1701A21.67009,21.67009,0,0,0,166.5,257.6701V296h840V257.6701A21.67009,21.67009,0,0,0,984.8299,236Z" transform="translate(-35.5 -118.5)" opacity="0.2" />
<circle cx="181" cy="147.5" r="13" fill="#3f3d56" />
<circle cx="217" cy="147.5" r="13" fill="#3f3d56" />
<circle cx="253" cy="147.5" r="13" fill="#3f3d56" />
<rect x="168" y="213.5" width="337" height="386" rx="5.33505" fill="#606060" />
<rect x="603" y="272.5" width="284" height="22" rx="5.47638" fill="#2e8555" />
<rect x="537" y="352.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
<rect x="537" y="396.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
<rect x="537" y="440.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
<rect x="537" y="484.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
<rect x="865" y="552.5" width="88" height="26" rx="7.02756" fill="#3ecc5f" />
<path d="M1088.60287,624.61594a30.11371,30.11371,0,0,0,3.98291-15.266c0-13.79652-8.54358-24.98081-19.08256-24.98081s-19.08256,11.18429-19.08256,24.98081a30.11411,30.11411,0,0,0,3.98291,15.266,31.248,31.248,0,0,0,0,30.53213,31.248,31.248,0,0,0,0,30.53208,31.248,31.248,0,0,0,0,30.53208,30.11408,30.11408,0,0,0-3.98291,15.266c0,13.79652,8.54353,24.98081,19.08256,24.98081s19.08256-11.18429,19.08256-24.98081a30.11368,30.11368,0,0,0-3.98291-15.266,31.248,31.248,0,0,0,0-30.53208,31.248,31.248,0,0,0,0-30.53208,31.248,31.248,0,0,0,0-30.53213Z" transform="translate(-35.5 -118.5)" fill="#3f3d56" />
<ellipse cx="1038.00321" cy="460.31783" rx="19.08256" ry="24.9808" fill="#3f3d56" />
<ellipse cx="1038.00321" cy="429.78574" rx="19.08256" ry="24.9808" fill="#3f3d56" />
<path d="M1144.93871,339.34489a91.61081,91.61081,0,0,0,7.10658-10.46092l-50.141-8.23491,54.22885.4033a91.566,91.566,0,0,0,1.74556-72.42605l-72.75449,37.74139,67.09658-49.32086a91.41255,91.41255,0,1,0-150.971,102.29805,91.45842,91.45842,0,0,0-10.42451,16.66946l65.0866,33.81447-69.40046-23.292a91.46011,91.46011,0,0,0,14.73837,85.83669,91.40575,91.40575,0,1,0,143.68892,0,91.41808,91.41808,0,0,0,0-113.02862Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
<path d="M981.6885,395.8592a91.01343,91.01343,0,0,0,19.56129,56.51431,91.40575,91.40575,0,1,0,143.68892,0C1157.18982,436.82067,981.6885,385.60008,981.6885,395.8592Z" transform="translate(-35.5 -118.5)" opacity="0.1" />
<path d="M365.62,461.43628H477.094v45.12043H365.62Z" transform="translate(-35.5 -118.5)" fill="#fff" fill-rule="evenodd" />
<path d="M264.76252,608.74122a26.50931,26.50931,0,0,1-22.96231-13.27072,26.50976,26.50976,0,0,0,22.96231,39.81215H291.304V608.74122Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
<path d="M384.17242,468.57061l92.92155-5.80726V449.49263a26.54091,26.54091,0,0,0-26.54143-26.54143H331.1161l-3.31768-5.74622a3.83043,3.83043,0,0,0-6.63536,0l-3.31768,5.74622-3.31767-5.74622a3.83043,3.83043,0,0,0-6.63536,0l-3.31768,5.74622L301.257,417.205a3.83043,3.83043,0,0,0-6.63536,0L291.304,422.9512c-.02919,0-.05573.004-.08625.004l-5.49674-5.49541a3.8293,3.8293,0,0,0-6.4071,1.71723l-1.81676,6.77338L270.607,424.1031a3.82993,3.82993,0,0,0-4.6912,4.69253l1.84463,6.89148-6.77072,1.81411a3.8315,3.8315,0,0,0-1.71988,6.40975l5.49673,5.49673c0,.02787-.004.05574-.004.08493l-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74621,3.31768L259.0163,466.081a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31767a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31767a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83042,3.83042,0,0,0,0,6.63535l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768L259.0163,558.976a3.83042,3.83042,0,0,0,0,6.63535l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83042,3.83042,0,0,0,0,6.63535l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768A26.54091,26.54091,0,0,0,291.304,635.28265H450.55254A26.5409,26.5409,0,0,0,477.094,608.74122V502.5755l-92.92155-5.80727a14.12639,14.12639,0,0,1,0-28.19762" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
<path d="M424.01111,635.28265h39.81214V582.19979H424.01111Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
<path d="M490.36468,602.10586a6.60242,6.60242,0,0,0-.848.08493c-.05042-.19906-.09821-.39945-.15393-.59852A6.62668,6.62668,0,1,0,482.80568,590.21q-.2203-.22491-.44457-.44589a6.62391,6.62391,0,1,0-11.39689-6.56369c-.1964-.05575-.39414-.10218-.59056-.15262a6.63957,6.63957,0,1,0-13.10086,0c-.1964.05042-.39414.09687-.59056.15262a6.62767,6.62767,0,1,0-11.39688,6.56369,26.52754,26.52754,0,1,0,44.23127,25.52756,6.6211,6.6211,0,1,0,.848-13.18579" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
<path d="M437.28182,555.65836H477.094V529.11693H437.28182Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
<path d="M490.36468,545.70532a3.31768,3.31768,0,0,0,0-6.63536,3.41133,3.41133,0,0,0-.42333.04247c-.02655-.09953-.04911-.19907-.077-.29859a3.319,3.319,0,0,0-1.278-6.37923,3.28174,3.28174,0,0,0-2.00122.68742q-.10947-.11346-.22294-.22295a3.282,3.282,0,0,0,.67149-1.98265,3.31768,3.31768,0,0,0-6.37-1.2992,13.27078,13.27078,0,1,0,0,25.54082,3.31768,3.31768,0,0,0,6.37-1.2992,3.282,3.282,0,0,0-.67149-1.98265q.11347-.10947.22294-.22294a3.28174,3.28174,0,0,0,2.00122.68742,3.31768,3.31768,0,0,0,1.278-6.37923c.02786-.0982.05042-.19907.077-.29859a3.41325,3.41325,0,0,0,.42333.04246" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
<path d="M317.84538,466.081a3.31768,3.31768,0,0,1-3.31767-3.31768,9.953,9.953,0,1,0-19.90608,0,3.31768,3.31768,0,1,1-6.63535,0,16.58839,16.58839,0,1,1,33.17678,0,3.31768,3.31768,0,0,1-3.31768,3.31768" transform="translate(-35.5 -118.5)" fill-rule="evenodd" />
<path d="M370.92825,635.28265h79.62429A26.5409,26.5409,0,0,0,477.094,608.74122v-92.895H397.46968a26.54091,26.54091,0,0,0-26.54143,26.54143Z" transform="translate(-35.5 -118.5)" fill="#ffff50" fill-rule="evenodd" />
<path d="M457.21444,556.98543H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.54143H390.80778a1.32707,1.32707,0,1,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.54143H390.80778a1.32707,1.32707,0,1,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0-66.10674H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.29459H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.54143H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414M477.094,474.19076c-.01592,0-.0292-.008-.04512-.00663-4.10064.13934-6.04083,4.24132-7.75274,7.86024-1.78623,3.78215-3.16771,6.24122-5.43171,6.16691-2.50685-.09024-3.94007-2.92222-5.45825-5.91874-1.74377-3.44243-3.73438-7.34667-7.91333-7.20069-4.04227.138-5.98907,3.70784-7.70631,6.857-1.82738,3.35484-3.07084,5.39455-5.46887,5.30033-2.55727-.09289-3.91619-2.39536-5.48877-5.06013-1.75306-2.96733-3.77951-6.30359-7.8775-6.18946-3.97326.13669-5.92537,3.16507-7.64791,5.83912-1.82207,2.82666-3.09872,4.5492-5.52725,4.447-2.61832-.09289-3.9706-2.00388-5.53522-4.21611-1.757-2.4856-3.737-5.299-7.82308-5.16231-3.88567.13271-5.83779,2.61434-7.559,4.80135-1.635,2.07555-2.9116,3.71846-5.61218,3.615a1.32793,1.32793,0,1,0-.09555,2.65414c4.00377.134,6.03154-2.38873,7.79257-4.6275,1.562-1.9853,2.91027-3.69855,5.56441-3.78879,2.55594-.10882,3.75429,1.47968,5.56707,4.04093,1.7212,2.43385,3.67465,5.19416,7.60545,5.33616,4.11789.138,6.09921-2.93946,7.8536-5.66261,1.56861-2.43385,2.92221-4.53461,5.50734-4.62352,2.37944-.08892,3.67466,1.79154,5.50072,4.885,1.72121,2.91557,3.67069,6.21865,7.67977,6.36463,4.14709.14332,6.14965-3.47693,7.89475-6.68181,1.51155-2.77092,2.93814-5.38791,5.46621-5.4755,2.37944-.05573,3.62025,2.11668,5.45558,5.74622,1.71459,3.388,3.65875,7.22591,7.73019,7.37321l.22429.004c4.06614,0,5.99571-4.08074,7.70364-7.68905,1.51154-3.19825,2.94211-6.21069,5.3972-6.33411Z" transform="translate(-35.5 -118.5)" fill-rule="evenodd" />
<path d="M344.38682,635.28265h53.08286V582.19979H344.38682Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
<path d="M424.01111,602.10586a6.60242,6.60242,0,0,0-.848.08493c-.05042-.19906-.09821-.39945-.15394-.59852A6.62667,6.62667,0,1,0,416.45211,590.21q-.2203-.22491-.44458-.44589a6.62391,6.62391,0,1,0-11.39689-6.56369c-.1964-.05575-.39413-.10218-.59054-.15262a6.63957,6.63957,0,1,0-13.10084,0c-.19641.05042-.39414.09687-.59055.15262a6.62767,6.62767,0,1,0-11.39689,6.56369,26.52755,26.52755,0,1,0,44.2313,25.52756,6.6211,6.6211,0,1,0,.848-13.18579" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
<path d="M344.38682,555.65836h53.08286V529.11693H344.38682Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
<path d="M410.74039,545.70532a3.31768,3.31768,0,1,0,0-6.63536,3.41133,3.41133,0,0,0-.42333.04247c-.02655-.09953-.04911-.19907-.077-.29859a3.319,3.319,0,0,0-1.278-6.37923,3.28174,3.28174,0,0,0-2.00122.68742q-.10947-.11346-.22294-.22295a3.282,3.282,0,0,0,.67149-1.98265,3.31768,3.31768,0,0,0-6.37-1.2992,13.27078,13.27078,0,1,0,0,25.54082,3.31768,3.31768,0,0,0,6.37-1.2992,3.282,3.282,0,0,0-.67149-1.98265q.11347-.10947.22294-.22294a3.28174,3.28174,0,0,0,2.00122.68742,3.31768,3.31768,0,0,0,1.278-6.37923c.02786-.0982.05042-.19907.077-.29859a3.41325,3.41325,0,0,0,.42333.04246" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
<path d="M424.01111,447.8338a3.60349,3.60349,0,0,1-.65028-.06636,3.34415,3.34415,0,0,1-.62372-.18579,3.44679,3.44679,0,0,1-.572-.30522,5.02708,5.02708,0,0,1-.50429-.4114,3.88726,3.88726,0,0,1-.41007-.50428,3.27532,3.27532,0,0,1-.55737-1.84463,3.60248,3.60248,0,0,1,.06636-.65027,3.82638,3.82638,0,0,1,.18447-.62373,3.48858,3.48858,0,0,1,.30656-.57064,3.197,3.197,0,0,1,.91436-.91568,3.44685,3.44685,0,0,1,.572-.30523,3.344,3.344,0,0,1,.62372-.18578,3.06907,3.06907,0,0,1,1.30053,0,3.22332,3.22332,0,0,1,1.19436.491,5.02835,5.02835,0,0,1,.50429.41139,4.8801,4.8801,0,0,1,.41139.50429,3.38246,3.38246,0,0,1,.30522.57064,3.47806,3.47806,0,0,1,.25215,1.274A3.36394,3.36394,0,0,1,426.36,446.865a5.02708,5.02708,0,0,1-.50429.4114,3.3057,3.3057,0,0,1-1.84463.55737m26.54143-1.65884a3.38754,3.38754,0,0,1-2.35024-.96877,5.04185,5.04185,0,0,1-.41007-.50428,3.27532,3.27532,0,0,1-.55737-1.84463,3.38659,3.38659,0,0,1,.96744-2.34892,5.02559,5.02559,0,0,1,.50429-.41139,3.44685,3.44685,0,0,1,.572-.30523,3.3432,3.3432,0,0,1,.62373-.18579,3.06952,3.06952,0,0,1,1.30052,0,3.22356,3.22356,0,0,1,1.19436.491,5.02559,5.02559,0,0,1,.50429.41139,3.38792,3.38792,0,0,1,.96876,2.34892,3.72635,3.72635,0,0,1-.06636.65026,3.37387,3.37387,0,0,1-.18579.62373,4.71469,4.71469,0,0,1-.30522.57064,4.8801,4.8801,0,0,1-.41139.50429,5.02559,5.02559,0,0,1-.50429.41139,3.30547,3.30547,0,0,1-1.84463.55737" transform="translate(-35.5 -118.5)" fill-rule="evenodd" />
</svg>

Before

Width:  |  Height:  |  Size: 12 KiB

3456
docs/static/openapi.json vendored

File diff suppressed because it is too large Load Diff

View File

@@ -1,241 +0,0 @@
# Transcript Formats
The Reflector API provides multiple output formats for transcript data through the `transcript_format` query parameter on the GET `/v1/transcripts/{id}` endpoint.
## Overview
When retrieving a transcript, you can specify the desired format using the `transcript_format` query parameter. The API supports four formats optimized for different use cases:
- **text** - Plain text with speaker names (default)
- **text-timestamped** - Timestamped text with speaker names
- **webvtt-named** - WebVTT subtitle format with participant names
- **json** - Structured JSON segments with full metadata
All formats include participant information when available, resolving speaker IDs to actual names.
## Query Parameter Usage
```
GET /v1/transcripts/{id}?transcript_format={format}
```
### Parameters
- `transcript_format` (optional): The desired output format
- Type: `"text" | "text-timestamped" | "webvtt-named" | "json"`
- Default: `"text"`
## Format Descriptions
### Text Format (`text`)
**Use case:** Simple, human-readable transcript for display or export.
**Format:** Speaker names followed by their dialogue, one line per segment.
**Example:**
```
John Smith: Hello everyone
Jane Doe: Hi there
John Smith: How are you today?
```
**Request:**
```bash
GET /v1/transcripts/{id}?transcript_format=text
```
**Response:**
```json
{
"id": "transcript_123",
"name": "Meeting Recording",
"transcript_format": "text",
"transcript": "John Smith: Hello everyone\nJane Doe: Hi there\nJohn Smith: How are you today?",
"participants": [
{"id": "p1", "speaker": 0, "name": "John Smith"},
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
],
...
}
```
### Text Timestamped Format (`text-timestamped`)
**Use case:** Transcript with timing information for navigation or reference.
**Format:** `[MM:SS]` timestamp prefix before each speaker and dialogue.
**Example:**
```
[00:00] John Smith: Hello everyone
[00:05] Jane Doe: Hi there
[00:12] John Smith: How are you today?
```
**Request:**
```bash
GET /v1/transcripts/{id}?transcript_format=text-timestamped
```
**Response:**
```json
{
"id": "transcript_123",
"name": "Meeting Recording",
"transcript_format": "text-timestamped",
"transcript": "[00:00] John Smith: Hello everyone\n[00:05] Jane Doe: Hi there\n[00:12] John Smith: How are you today?",
"participants": [
{"id": "p1", "speaker": 0, "name": "John Smith"},
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
],
...
}
```
### WebVTT Named Format (`webvtt-named`)
**Use case:** Subtitle files for video players, accessibility tools, or video editing.
**Format:** Standard WebVTT subtitle format with voice tags using participant names.
**Example:**
```
WEBVTT
00:00:00.000 --> 00:00:05.000
<v John Smith>Hello everyone
00:00:05.000 --> 00:00:12.000
<v Jane Doe>Hi there
00:00:12.000 --> 00:00:18.000
<v John Smith>How are you today?
```
**Request:**
```bash
GET /v1/transcripts/{id}?transcript_format=webvtt-named
```
**Response:**
```json
{
"id": "transcript_123",
"name": "Meeting Recording",
"transcript_format": "webvtt-named",
"transcript": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v John Smith>Hello everyone\n\n...",
"participants": [
{"id": "p1", "speaker": 0, "name": "John Smith"},
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
],
...
}
```
### JSON Format (`json`)
**Use case:** Programmatic access with full timing and speaker metadata.
**Format:** Array of segment objects with speaker information, text content, and precise timing.
**Example:**
```json
[
{
"speaker": 0,
"speaker_name": "John Smith",
"text": "Hello everyone",
"start": 0.0,
"end": 5.0
},
{
"speaker": 1,
"speaker_name": "Jane Doe",
"text": "Hi there",
"start": 5.0,
"end": 12.0
},
{
"speaker": 0,
"speaker_name": "John Smith",
"text": "How are you today?",
"start": 12.0,
"end": 18.0
}
]
```
**Request:**
```bash
GET /v1/transcripts/{id}?transcript_format=json
```
**Response:**
```json
{
"id": "transcript_123",
"name": "Meeting Recording",
"transcript_format": "json",
"transcript": [
{
"speaker": 0,
"speaker_name": "John Smith",
"text": "Hello everyone",
"start": 0.0,
"end": 5.0
},
{
"speaker": 1,
"speaker_name": "Jane Doe",
"text": "Hi there",
"start": 5.0,
"end": 12.0
}
],
"participants": [
{"id": "p1", "speaker": 0, "name": "John Smith"},
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
],
...
}
```
## Response Structure
All formats return the same base transcript metadata with an additional `transcript_format` field and format-specific `transcript` field:
### Common Fields
- `id`: Transcript identifier
- `user_id`: Owner user ID (if authenticated)
- `name`: Transcript name
- `status`: Processing status
- `locked`: Whether transcript is locked for editing
- `duration`: Total duration in seconds
- `title`: Auto-generated or custom title
- `short_summary`: Brief summary
- `long_summary`: Detailed summary
- `created_at`: Creation timestamp
- `share_mode`: Access control setting
- `source_language`: Original audio language
- `target_language`: Translation target language
- `reviewed`: Whether transcript has been reviewed
- `meeting_id`: Associated meeting ID (if applicable)
- `source_kind`: Source type (live, file, room)
- `room_id`: Associated room ID (if applicable)
- `audio_deleted`: Whether audio has been deleted
- `participants`: Array of participant objects with speaker mappings
### Format-Specific Fields
- `transcript_format`: The format identifier (discriminator field)
- `transcript`: The formatted transcript content (string for text/webvtt formats, array for json format)
## Speaker Name Resolution
All formats resolve speaker IDs to participant names when available:
- If a participant exists for the speaker ID, their name is used
- If no participant exists, a default name like "Speaker 0" is generated
- Speaker IDs are integers (0, 1, 2, etc.) assigned during diarization

View File

@@ -1,8 +0,0 @@
{
// This file is not used in compilation. It is here just for a nice editor experience.
"extends": "@docusaurus/tsconfig",
"compilerOptions": {
"baseUrl": "."
},
"exclude": [".docusaurus", "build"]
}

View File

@@ -1,150 +0,0 @@
#!/bin/bash
set -e
# --- Usage ---
usage() {
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " --hf-token TOKEN HuggingFace token"
echo " --help Show this help message"
echo ""
echo "Examples:"
echo " $0 # Interactive mode"
echo " $0 --hf-token hf_xxxxx # Non-interactive mode"
echo ""
exit 0
}
# --- Parse Arguments ---
HF_TOKEN=""
while [[ $# -gt 0 ]]; do
case $1 in
--hf-token)
HF_TOKEN="$2"
shift 2
;;
--help)
usage
;;
*)
echo "Unknown option: $1"
usage
;;
esac
done
echo "=========================================="
echo "Reflector GPU Functions Deployment"
echo "=========================================="
echo ""
# --- Check Dependencies ---
if ! command -v modal &> /dev/null; then
echo "Error: Modal CLI not installed."
echo " Install with: pip install modal"
exit 1
fi
if ! command -v openssl &> /dev/null; then
echo "Error: openssl not found."
echo " Mac: brew install openssl"
echo " Ubuntu: sudo apt-get install openssl"
exit 1
fi
# Check Modal authentication
if ! modal profile current &> /dev/null; then
echo "Error: Not authenticated with Modal."
echo " Run: modal setup"
exit 1
fi
# --- HuggingFace Token Setup ---
if [ -z "$HF_TOKEN" ]; then
echo "HuggingFace token required for Pyannote diarization model."
echo "1. Create account at https://huggingface.co"
echo "2. Accept license at https://huggingface.co/pyannote/speaker-diarization-3.1"
echo "3. Generate token at https://huggingface.co/settings/tokens"
echo ""
read -p "Enter your HuggingFace token: " HF_TOKEN
fi
if [ -z "$HF_TOKEN" ]; then
echo "Error: HuggingFace token is required for diarization"
exit 1
fi
# Basic token format validation
if [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
echo "Warning: HuggingFace tokens usually start with 'hf_'"
if [ -t 0 ]; then
read -p "Continue anyway? (y/n): " confirm
if [ "$confirm" != "y" ]; then
exit 1
fi
else
echo "Non-interactive mode: proceeding anyway"
fi
fi
# --- Auto-generate reflector<->GPU API Key ---
echo ""
echo "Generating API key for GPU services..."
API_KEY=$(openssl rand -hex 32)
# --- Create Modal Secrets ---
echo "Creating Modal secrets..."
# Create or update hf_token secret (delete first if exists)
if modal secret list 2>/dev/null | grep -q "hf_token"; then
echo " -> Recreating secret: hf_token"
modal secret delete hf_token --yes 2>/dev/null || true
fi
echo " -> Creating secret: hf_token"
modal secret create hf_token HF_TOKEN="$HF_TOKEN"
# Create or update reflector-gpu secret (delete first if exists)
if modal secret list 2>/dev/null | grep -q "reflector-gpu"; then
echo " -> Recreating secret: reflector-gpu"
modal secret delete reflector-gpu --yes 2>/dev/null || true
fi
echo " -> Creating secret: reflector-gpu"
modal secret create reflector-gpu REFLECTOR_GPU_APIKEY="$API_KEY"
# --- Deploy Functions ---
echo ""
echo "Deploying transcriber (Whisper)..."
TRANSCRIBER_URL=$(modal deploy reflector_transcriber.py 2>&1 | grep -o 'https://[^ ]*web.modal.run' | head -1)
if [ -z "$TRANSCRIBER_URL" ]; then
echo "Error: Failed to deploy transcriber. Check Modal dashboard for details."
exit 1
fi
echo " -> $TRANSCRIBER_URL"
echo ""
echo "Deploying diarizer (Pyannote)..."
DIARIZER_URL=$(modal deploy reflector_diarizer.py 2>&1 | grep -o 'https://[^ ]*web.modal.run' | head -1)
if [ -z "$DIARIZER_URL" ]; then
echo "Error: Failed to deploy diarizer. Check Modal dashboard for details."
exit 1
fi
echo " -> $DIARIZER_URL"
# --- Output Configuration ---
echo ""
echo "=========================================="
echo "Deployment complete!"
echo "=========================================="
echo ""
echo "Copy these values to your server's server/.env file:"
echo ""
echo "# --- Modal GPU Configuration ---"
echo "TRANSCRIPT_BACKEND=modal"
echo "TRANSCRIPT_URL=$TRANSCRIBER_URL"
echo "TRANSCRIPT_MODAL_API_KEY=$API_KEY"
echo ""
echo "DIARIZATION_BACKEND=modal"
echo "DIARIZATION_URL=$DIARIZER_URL"
echo "DIARIZATION_MODAL_API_KEY=$API_KEY"
echo "# --- End Modal Configuration ---"

View File

@@ -24,12 +24,6 @@ app = modal.App(name="reflector-diarizer")
upload_volume = modal.Volume.from_name("diarizer-uploads", create_if_missing=True) upload_volume = modal.Volume.from_name("diarizer-uploads", create_if_missing=True)
# IMPORTANT: This function is duplicated in multiple files for deployment isolation.
# If you modify the audio format detection logic, you MUST update all copies:
# - gpu/self_hosted/app/utils.py
# - gpu/modal_deployments/reflector_transcriber.py (2 copies)
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
# - gpu/modal_deployments/reflector_diarizer.py (this file)
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension: def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
parsed_url = urlparse(url) parsed_url = urlparse(url)
url_path = parsed_url.path url_path = parsed_url.path
@@ -45,8 +39,6 @@ def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtens
return AudioFileExtension("wav") return AudioFileExtension("wav")
if "audio/mp4" in content_type: if "audio/mp4" in content_type:
return AudioFileExtension("mp4") return AudioFileExtension("mp4")
if "audio/webm" in content_type or "video/webm" in content_type:
return AudioFileExtension("webm")
raise ValueError( raise ValueError(
f"Unsupported audio format for URL: {url}. " f"Unsupported audio format for URL: {url}. "
@@ -113,7 +105,7 @@ def download_pyannote_audio():
diarizer_image = ( diarizer_image = (
modal.Image.debian_slim(python_version="3.10") modal.Image.debian_slim(python_version="3.10.8")
.pip_install( .pip_install(
"pyannote.audio==3.1.0", "pyannote.audio==3.1.0",
"requests", "requests",
@@ -124,7 +116,7 @@ diarizer_image = (
"transformers==4.34.0", "transformers==4.34.0",
"sentencepiece", "sentencepiece",
"protobuf", "protobuf",
"numpy<2", "numpy",
"huggingface_hub", "huggingface_hub",
"hf-transfer", "hf-transfer",
) )

View File

@@ -89,7 +89,6 @@ image = (
"torch==2.5.1", "torch==2.5.1",
"faster-whisper==1.1.1", "faster-whisper==1.1.1",
"fastapi==0.115.12", "fastapi==0.115.12",
"python-multipart",
"requests", "requests",
"librosa==0.10.1", "librosa==0.10.1",
"numpy<2", "numpy<2",
@@ -99,12 +98,6 @@ image = (
) )
# IMPORTANT: This function is duplicated in multiple files for deployment isolation.
# If you modify the audio format detection logic, you MUST update all copies:
# - gpu/self_hosted/app/utils.py
# - gpu/modal_deployments/reflector_transcriber.py (this file - 2 copies!)
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
# - gpu/modal_deployments/reflector_diarizer.py
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension: def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
parsed_url = urlparse(url) parsed_url = urlparse(url)
url_path = parsed_url.path url_path = parsed_url.path
@@ -120,8 +113,6 @@ def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtens
return AudioFileExtension("wav") return AudioFileExtension("wav")
if "audio/mp4" in content_type: if "audio/mp4" in content_type:
return AudioFileExtension("mp4") return AudioFileExtension("mp4")
if "audio/webm" in content_type or "video/webm" in content_type:
return AudioFileExtension("webm")
raise ValueError( raise ValueError(
f"Unsupported audio format for URL: {url}. " f"Unsupported audio format for URL: {url}. "
@@ -324,11 +315,6 @@ class TranscriberWhisperFile:
import numpy as np import numpy as np
from silero_vad import VADIterator from silero_vad import VADIterator
# IMPORTANT: This VAD segment logic is duplicated in multiple files for deployment isolation.
# If you modify this function, you MUST update all copies:
# - gpu/modal_deployments/reflector_transcriber.py (this file)
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
# - gpu/self_hosted/app/services/transcriber.py
def vad_segments( def vad_segments(
audio_array, audio_array,
sample_rate: int = SAMPLERATE, sample_rate: int = SAMPLERATE,
@@ -336,7 +322,6 @@ class TranscriberWhisperFile:
) -> Generator[TimeSegment, None, None]: ) -> Generator[TimeSegment, None, None]:
"""Generate speech segments as TimeSegment using Silero VAD.""" """Generate speech segments as TimeSegment using Silero VAD."""
iterator = VADIterator(self.vad_model, sampling_rate=sample_rate) iterator = VADIterator(self.vad_model, sampling_rate=sample_rate)
audio_duration = len(audio_array) / float(SAMPLERATE)
start = None start = None
for i in range(0, len(audio_array), window_size): for i in range(0, len(audio_array), window_size):
chunk = audio_array[i : i + window_size] chunk = audio_array[i : i + window_size]
@@ -356,9 +341,6 @@ class TranscriberWhisperFile:
start / float(SAMPLERATE), end / float(SAMPLERATE) start / float(SAMPLERATE), end / float(SAMPLERATE)
) )
start = None start = None
# Handle case where audio ends while speech is still active
if start is not None:
yield TimeSegment(start / float(SAMPLERATE), audio_duration)
iterator.reset_states() iterator.reset_states()
upload_volume.reload() upload_volume.reload()
@@ -424,12 +406,6 @@ class TranscriberWhisperFile:
return {"text": " ".join(all_text), "words": all_words} return {"text": " ".join(all_text), "words": all_words}
# IMPORTANT: This function is duplicated in multiple files for deployment isolation.
# If you modify the audio format detection logic, you MUST update all copies:
# - gpu/self_hosted/app/utils.py
# - gpu/modal_deployments/reflector_transcriber.py (this file - 2 copies!)
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
# - gpu/modal_deployments/reflector_diarizer.py
def detect_audio_format(url: str, headers: dict) -> str: def detect_audio_format(url: str, headers: dict) -> str:
from urllib.parse import urlparse from urllib.parse import urlparse
@@ -447,8 +423,6 @@ def detect_audio_format(url: str, headers: dict) -> str:
return "wav" return "wav"
if "audio/mp4" in content_type: if "audio/mp4" in content_type:
return "mp4" return "mp4"
if "audio/webm" in content_type or "video/webm" in content_type:
return "webm"
raise HTTPException( raise HTTPException(
status_code=400, status_code=400,

View File

@@ -81,21 +81,15 @@ image = (
"cuda-python==12.8.0", "cuda-python==12.8.0",
"fastapi==0.115.12", "fastapi==0.115.12",
"numpy<2", "numpy<2",
"librosa==0.11.0", "librosa==0.10.1",
"requests", "requests",
"silero-vad==6.2.0", "silero-vad==5.1.0",
"torch", "torch",
) )
.entrypoint([]) # silence chatty logs by container on start .entrypoint([]) # silence chatty logs by container on start
) )
# IMPORTANT: This function is duplicated in multiple files for deployment isolation.
# If you modify the audio format detection logic, you MUST update all copies:
# - gpu/self_hosted/app/utils.py
# - gpu/modal_deployments/reflector_transcriber.py (2 copies)
# - gpu/modal_deployments/reflector_transcriber_parakeet.py (this file)
# - gpu/modal_deployments/reflector_diarizer.py
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension: def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
parsed_url = urlparse(url) parsed_url = urlparse(url)
url_path = parsed_url.path url_path = parsed_url.path
@@ -111,8 +105,6 @@ def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtens
return AudioFileExtension("wav") return AudioFileExtension("wav")
if "audio/mp4" in content_type: if "audio/mp4" in content_type:
return AudioFileExtension("mp4") return AudioFileExtension("mp4")
if "audio/webm" in content_type or "video/webm" in content_type:
return AudioFileExtension("webm")
raise ValueError( raise ValueError(
f"Unsupported audio format for URL: {url}. " f"Unsupported audio format for URL: {url}. "
@@ -309,17 +301,11 @@ class TranscriberParakeetFile:
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True) audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
return audio_array return audio_array
# IMPORTANT: This VAD segment logic is duplicated in multiple files for deployment isolation.
# If you modify this function, you MUST update all copies:
# - gpu/modal_deployments/reflector_transcriber.py
# - gpu/modal_deployments/reflector_transcriber_parakeet.py (this file)
# - gpu/self_hosted/app/services/transcriber.py
def vad_segment_generator( def vad_segment_generator(
audio_array, audio_array,
) -> Generator[TimeSegment, None, None]: ) -> Generator[TimeSegment, None, None]:
"""Generate speech segments using VAD with start/end sample indices""" """Generate speech segments using VAD with start/end sample indices"""
vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE) vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE)
audio_duration = len(audio_array) / float(SAMPLERATE)
window_size = VAD_CONFIG["window_size"] window_size = VAD_CONFIG["window_size"]
start = None start = None
@@ -346,10 +332,6 @@ class TranscriberParakeetFile:
yield TimeSegment(start_time, end_time) yield TimeSegment(start_time, end_time)
start = None start = None
if start is not None:
start_time = start / float(SAMPLERATE)
yield TimeSegment(start_time, audio_duration)
vad_iterator.reset_states() vad_iterator.reset_states()
def batch_speech_segments( def batch_speech_segments(

View File

@@ -103,7 +103,7 @@ def configure_seamless_m4t():
transcriber_image = ( transcriber_image = (
Image.debian_slim(python_version="3.10") Image.debian_slim(python_version="3.10.8")
.apt_install("git") .apt_install("git")
.apt_install("wget") .apt_install("wget")
.apt_install("libsndfile-dev") .apt_install("libsndfile-dev")
@@ -119,7 +119,6 @@ transcriber_image = (
"fairseq2", "fairseq2",
"pyyaml", "pyyaml",
"hf-transfer~=0.1", "hf-transfer~=0.1",
"pydantic",
) )
.run_function(install_seamless_communication) .run_function(install_seamless_communication)
.run_function(download_seamlessm4t_model) .run_function(download_seamlessm4t_model)

View File

@@ -1,137 +0,0 @@
# Local Development GPU Setup
Run transcription and diarization locally for development/testing.
> **For production deployment**, see the [Self-Hosted GPU Setup Guide](../../docs/docs/installation/self-hosted-gpu-setup.md).
## Prerequisites
1. **Python 3.12+** and **uv** package manager
2. **FFmpeg** installed and on PATH
3. **HuggingFace account** with access to pyannote models
### Accept Pyannote Licenses (Required)
Before first run, accept licenses for these gated models (logged into HuggingFace):
- https://hf.co/pyannote/speaker-diarization-3.1
- https://hf.co/pyannote/segmentation-3.0
## Quick Start
### 1. Install dependencies
```bash
cd gpu/self_hosted
uv sync
```
### 2. Start the GPU service
```bash
cd gpu/self_hosted
HF_TOKEN=<your-huggingface-token> \
REFLECTOR_GPU_APIKEY=dev-key-12345 \
.venv/bin/uvicorn main:app --host 0.0.0.0 --port 8000
```
Note: The `.env` file is NOT auto-loaded. Pass env vars explicitly or use:
```bash
export HF_TOKEN=<your-token>
export REFLECTOR_GPU_APIKEY=dev-key-12345
.venv/bin/uvicorn main:app --host 0.0.0.0 --port 8000
```
### 3. Configure Reflector to use local GPU
Edit `server/.env`:
```bash
# Transcription - local GPU service
TRANSCRIPT_BACKEND=modal
TRANSCRIPT_URL=http://host.docker.internal:8000
TRANSCRIPT_MODAL_API_KEY=dev-key-12345
# Diarization - local GPU service
DIARIZATION_BACKEND=modal
DIARIZATION_URL=http://host.docker.internal:8000
DIARIZATION_MODAL_API_KEY=dev-key-12345
```
Note: Use `host.docker.internal` because Reflector server runs in Docker.
### 4. Restart Reflector server
```bash
cd server
docker compose restart server worker
```
## Testing
### Test transcription
```bash
curl -s -X POST http://localhost:8000/v1/audio/transcriptions \
-H "Authorization: Bearer dev-key-12345" \
-F "file=@/path/to/audio.wav" \
-F "language=en"
```
### Test diarization
```bash
curl -s -X POST "http://localhost:8000/diarize?audio_file_url=<audio-url>" \
-H "Authorization: Bearer dev-key-12345"
```
## Platform Notes
### macOS (ARM)
Docker build fails - CUDA packages are x86_64 only. Use local Python instead:
```bash
uv sync
HF_TOKEN=xxx REFLECTOR_GPU_APIKEY=xxx .venv/bin/uvicorn main:app --host 0.0.0.0 --port 8000
```
### Linux with NVIDIA GPU
Docker works with CUDA acceleration:
```bash
docker compose up -d
```
### CPU-only
Works on any platform, just slower. PyTorch auto-detects and falls back to CPU.
## Switching Back to Modal.com
Edit `server/.env`:
```bash
TRANSCRIPT_BACKEND=modal
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-parakeet-web.modal.run
TRANSCRIPT_MODAL_API_KEY=<modal-api-key>
DIARIZATION_BACKEND=modal
DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
DIARIZATION_MODAL_API_KEY=<modal-api-key>
```
## Troubleshooting
### "Could not download pyannote pipeline"
- Accept model licenses at HuggingFace (see Prerequisites)
- Verify HF_TOKEN is set and valid
### Service won't start
- Check port 8000 is free: `lsof -i :8000`
- Kill orphan processes if needed
### Transcription returns empty text
- Ensure audio contains speech (not just tones/silence)
- Check audio format is supported (wav, mp3, etc.)
### Deprecation warnings from torchaudio/pyannote
- Safe to ignore - doesn't affect functionality

View File

@@ -56,13 +56,9 @@ Docker
- Not yet provided in this directory. A Dockerfile will be added later. For now, use Local run above - Not yet provided in this directory. A Dockerfile will be added later. For now, use Local run above
# Setup Conformance tests
[SETUP.md](SETUP.md) # From this directory
# Conformance tests
## From this directory
TRANSCRIPT_URL=http://localhost:8000 \ TRANSCRIPT_URL=http://localhost:8000 \
TRANSCRIPT_API_KEY=dev-key \ TRANSCRIPT_API_KEY=dev-key \

View File

@@ -129,11 +129,6 @@ class WhisperService:
audio = np.frombuffer(proc.stdout, dtype=np.float32) audio = np.frombuffer(proc.stdout, dtype=np.float32)
return audio return audio
# IMPORTANT: This VAD segment logic is duplicated in multiple files for deployment isolation.
# If you modify this function, you MUST update all copies:
# - gpu/modal_deployments/reflector_transcriber.py
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
# - gpu/self_hosted/app/services/transcriber.py (this file)
def vad_segments( def vad_segments(
audio_array, audio_array,
sample_rate: int = SAMPLE_RATE, sample_rate: int = SAMPLE_RATE,
@@ -158,10 +153,6 @@ class WhisperService:
end = speech["end"] end = speech["end"]
yield (start / float(SAMPLE_RATE), end / float(SAMPLE_RATE)) yield (start / float(SAMPLE_RATE), end / float(SAMPLE_RATE))
start = None start = None
# Handle case where audio ends while speech is still active
if start is not None:
audio_duration = len(audio_array) / float(sample_rate)
yield (start / float(SAMPLE_RATE), audio_duration)
iterator.reset_states() iterator.reset_states()
audio_array = load_audio_via_ffmpeg(file_path, SAMPLE_RATE) audio_array = load_audio_via_ffmpeg(file_path, SAMPLE_RATE)

View File

@@ -34,12 +34,6 @@ def ensure_dirs():
UPLOADS_PATH.mkdir(parents=True, exist_ok=True) UPLOADS_PATH.mkdir(parents=True, exist_ok=True)
# IMPORTANT: This function is duplicated in multiple files for deployment isolation.
# If you modify the audio format detection logic, you MUST update all copies:
# - gpu/self_hosted/app/utils.py (this file)
# - gpu/modal_deployments/reflector_transcriber.py (2 copies)
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
# - gpu/modal_deployments/reflector_diarizer.py
def detect_audio_format(url: str, headers: Mapping[str, str]) -> str: def detect_audio_format(url: str, headers: Mapping[str, str]) -> str:
url_path = urlparse(url).path url_path = urlparse(url).path
for ext in SUPPORTED_FILE_EXTENSIONS: for ext in SUPPORTED_FILE_EXTENSIONS:
@@ -53,8 +47,6 @@ def detect_audio_format(url: str, headers: Mapping[str, str]) -> str:
return "wav" return "wav"
if "audio/mp4" in content_type: if "audio/mp4" in content_type:
return "mp4" return "mp4"
if "audio/webm" in content_type or "video/webm" in content_type:
return "webm"
raise HTTPException( raise HTTPException(
status_code=400, status_code=400,

View File

@@ -8,11 +8,3 @@ services:
- .env - .env
volumes: volumes:
- ./cache:/root/.cache - ./cache:/root/.cache
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
restart: unless-stopped

View File

@@ -1,258 +0,0 @@
#!/bin/bash
set -e
# Setup Authentik OAuth provider for Reflector
#
# IMPORTANT: Run this script from your Reflector repository directory (cd ~/reflector)
# The script creates files using relative paths: server/reflector/auth/jwt/keys/
#
# Usage: ./setup-authentik-oauth.sh <authentik-url> <admin-password> <frontend-url>
# Example: ./setup-authentik-oauth.sh https://authentik.example.com MyPassword123 https://app.example.com
AUTHENTIK_URL="${1:-}"
ADMIN_PASSWORD="${2:-}"
FRONTEND_URL="${3:-}"
if [ -z "$AUTHENTIK_URL" ] || [ -z "$ADMIN_PASSWORD" ] || [ -z "$FRONTEND_URL" ]; then
echo "Usage: $0 <authentik-url> <admin-password> <frontend-url>"
echo "Example: $0 https://authentik.example.com MyPassword123 https://app.example.com"
exit 1
fi
# Remove trailing slash from URLs
AUTHENTIK_URL="${AUTHENTIK_URL%/}"
FRONTEND_URL="${FRONTEND_URL%/}"
echo "==========================================="
echo "Authentik OAuth Setup for Reflector"
echo "==========================================="
echo ""
echo "Authentik URL: $AUTHENTIK_URL"
echo "Frontend URL: $FRONTEND_URL"
echo ""
# Step 1: Create API token via Django shell
echo "Creating API token..."
cd ~/authentik || { echo "Error: ~/authentik directory not found"; exit 1; }
API_TOKEN=$(sudo docker compose exec -T server python -m manage shell 2>&1 << 'PYTHON' | grep "^TOKEN:" | cut -d: -f2
from authentik.core.models import User, Token, TokenIntents
user = User.objects.get(username='akadmin')
token, created = Token.objects.update_or_create(
identifier='reflector-setup',
defaults={
'user': user,
'intent': TokenIntents.INTENT_API,
'description': 'Reflector setup token',
'expiring': False
}
)
print(f"TOKEN:{token.key}")
PYTHON
)
cd - > /dev/null
if [ -z "$API_TOKEN" ] || [ "$API_TOKEN" = "null" ]; then
echo "Error: Failed to create API token"
echo "Make sure Authentik is fully started and akadmin user exists"
exit 1
fi
echo " -> Got API token"
# Step 2: Get authorization flow UUID
echo "Getting authorization flow..."
FLOW_RESPONSE=$(curl -s "$AUTHENTIK_URL/api/v3/flows/instances/?slug=default-provider-authorization-implicit-consent" \
-H "Authorization: Bearer $API_TOKEN")
FLOW_UUID=$(echo "$FLOW_RESPONSE" | jq -r '.results[0].pk')
if [ -z "$FLOW_UUID" ] || [ "$FLOW_UUID" = "null" ]; then
echo "Error: Could not find authorization flow"
echo "Response: $FLOW_RESPONSE"
exit 1
fi
echo " -> Flow UUID: $FLOW_UUID"
# Step 3: Get invalidation flow UUID
echo "Getting invalidation flow..."
INVALIDATION_RESPONSE=$(curl -s "$AUTHENTIK_URL/api/v3/flows/instances/?slug=default-provider-invalidation-flow" \
-H "Authorization: Bearer $API_TOKEN")
INVALIDATION_UUID=$(echo "$INVALIDATION_RESPONSE" | jq -r '.results[0].pk')
if [ -z "$INVALIDATION_UUID" ] || [ "$INVALIDATION_UUID" = "null" ]; then
echo "Warning: Could not find invalidation flow, using authorization flow"
INVALIDATION_UUID="$FLOW_UUID"
fi
echo " -> Invalidation UUID: $INVALIDATION_UUID"
# Step 4: Get scope mappings (email, openid, profile)
echo "Getting scope mappings..."
SCOPE_RESPONSE=$(curl -s "$AUTHENTIK_URL/api/v3/propertymappings/all/" \
-H "Authorization: Bearer $API_TOKEN")
EMAIL_SCOPE=$(echo "$SCOPE_RESPONSE" | jq -r '.results[] | select(.name == "authentik default OAuth Mapping: OpenID '\''email'\''") | .pk')
OPENID_SCOPE=$(echo "$SCOPE_RESPONSE" | jq -r '.results[] | select(.name == "authentik default OAuth Mapping: OpenID '\''openid'\''") | .pk')
PROFILE_SCOPE=$(echo "$SCOPE_RESPONSE" | jq -r '.results[] | select(.name == "authentik default OAuth Mapping: OpenID '\''profile'\''") | .pk')
echo " -> email: $EMAIL_SCOPE"
echo " -> openid: $OPENID_SCOPE"
echo " -> profile: $PROFILE_SCOPE"
# Step 5: Get signing key
echo "Getting signing key..."
CERT_RESPONSE=$(curl -s "$AUTHENTIK_URL/api/v3/crypto/certificatekeypairs/" \
-H "Authorization: Bearer $API_TOKEN")
SIGNING_KEY=$(echo "$CERT_RESPONSE" | jq -r '.results[0].pk')
echo " -> Signing key: $SIGNING_KEY"
# Step 6: Generate client credentials
CLIENT_ID="reflector"
CLIENT_SECRET=$(openssl rand -hex 32)
# Step 7: Create OAuth2 provider
echo "Creating OAuth2 provider..."
PROVIDER_RESPONSE=$(curl -s -X POST "$AUTHENTIK_URL/api/v3/providers/oauth2/" \
-H "Authorization: Bearer $API_TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"name\": \"Reflector\",
\"authorization_flow\": \"$FLOW_UUID\",
\"invalidation_flow\": \"$INVALIDATION_UUID\",
\"client_type\": \"confidential\",
\"client_id\": \"$CLIENT_ID\",
\"client_secret\": \"$CLIENT_SECRET\",
\"redirect_uris\": [{
\"matching_mode\": \"strict\",
\"url\": \"$FRONTEND_URL/api/auth/callback/authentik\"
}],
\"property_mappings\": [\"$EMAIL_SCOPE\", \"$OPENID_SCOPE\", \"$PROFILE_SCOPE\"],
\"signing_key\": \"$SIGNING_KEY\",
\"access_token_validity\": \"hours=1\",
\"refresh_token_validity\": \"days=30\"
}")
PROVIDER_ID=$(echo "$PROVIDER_RESPONSE" | jq -r '.pk')
if [ -z "$PROVIDER_ID" ] || [ "$PROVIDER_ID" = "null" ]; then
# Check if provider already exists
if echo "$PROVIDER_RESPONSE" | grep -q "already exists"; then
echo " -> Provider already exists, updating..."
EXISTING=$(curl -s "$AUTHENTIK_URL/api/v3/providers/oauth2/?name=Reflector" \
-H "Authorization: Bearer $API_TOKEN")
PROVIDER_ID=$(echo "$EXISTING" | jq -r '.results[0].pk')
CLIENT_ID=$(echo "$EXISTING" | jq -r '.results[0].client_id')
# Update secret and scopes
curl -s -X PATCH "$AUTHENTIK_URL/api/v3/providers/oauth2/$PROVIDER_ID/" \
-H "Authorization: Bearer $API_TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"client_secret\": \"$CLIENT_SECRET\",
\"property_mappings\": [\"$EMAIL_SCOPE\", \"$OPENID_SCOPE\", \"$PROFILE_SCOPE\"],
\"signing_key\": \"$SIGNING_KEY\"
}" > /dev/null
else
echo "Error: Failed to create provider"
echo "Response: $PROVIDER_RESPONSE"
exit 1
fi
fi
echo " -> Provider ID: $PROVIDER_ID"
# Step 8: Create application
echo "Creating application..."
APP_RESPONSE=$(curl -s -X POST "$AUTHENTIK_URL/api/v3/core/applications/" \
-H "Authorization: Bearer $API_TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"name\": \"Reflector\",
\"slug\": \"reflector\",
\"provider\": $PROVIDER_ID
}")
if echo "$APP_RESPONSE" | grep -q "already exists"; then
echo " -> Application already exists"
else
APP_SLUG=$(echo "$APP_RESPONSE" | jq -r '.slug')
if [ -z "$APP_SLUG" ] || [ "$APP_SLUG" = "null" ]; then
echo "Error: Failed to create application"
echo "Response: $APP_RESPONSE"
exit 1
fi
echo " -> Application created: $APP_SLUG"
fi
# Step 9: Extract public key for JWT verification
echo "Extracting public key for JWT verification..."
mkdir -p server/reflector/auth/jwt/keys
curl -s "$AUTHENTIK_URL/application/o/reflector/jwks/" | \
jq -r '.keys[0].x5c[0]' | \
base64 -d | \
openssl x509 -pubkey -noout > server/reflector/auth/jwt/keys/authentik_public.pem
if [ ! -s server/reflector/auth/jwt/keys/authentik_public.pem ]; then
echo "Error: Failed to extract public key"
exit 1
fi
echo " -> Saved to server/reflector/auth/jwt/keys/authentik_public.pem"
# Step 10: Update environment files automatically
echo "Updating environment files..."
# Update server/.env
cat >> server/.env << EOF
# --- Authentik OAuth (added by setup script) ---
AUTH_BACKEND=jwt
AUTH_JWT_AUDIENCE=$CLIENT_ID
AUTH_JWT_PUBLIC_KEY=authentik_public.pem
# --- End JWT Configuration ---
EOF
echo " -> Updated server/.env"
# Update www/.env
cat >> www/.env << EOF
# --- Authentik OAuth (added by setup script) ---
FEATURE_REQUIRE_LOGIN=true
AUTHENTIK_ISSUER=$AUTHENTIK_URL/application/o/reflector
AUTHENTIK_REFRESH_TOKEN_URL=$AUTHENTIK_URL/application/o/token/
AUTHENTIK_CLIENT_ID=$CLIENT_ID
AUTHENTIK_CLIENT_SECRET=$CLIENT_SECRET
# --- End Authentik Configuration ---
EOF
echo " -> Updated www/.env"
# Step 11: Restart Reflector services
echo "Restarting Reflector services..."
docker compose -f docker-compose.prod.yml up -d server worker web
echo ""
echo "==========================================="
echo "Setup complete!"
echo "==========================================="
echo ""
echo "Authentik admin: $AUTHENTIK_URL"
echo " Username: akadmin"
echo " Password: (provided as argument)"
echo ""
echo "Frontend: $FRONTEND_URL"
echo " Authentication is now required"
echo ""
echo "Note: Public key saved to server/reflector/auth/jwt/keys/authentik_public.pem"
echo " and mounted via docker-compose volume."
echo ""
echo "==========================================="
echo "Configuration values (for reference):"
echo "==========================================="
echo ""
echo "# server/.env"
echo "AUTH_BACKEND=jwt"
echo "AUTH_JWT_AUDIENCE=$CLIENT_ID"
echo "AUTH_JWT_PUBLIC_KEY=authentik_public.pem"
echo ""
echo "# www/.env"
echo "FEATURE_REQUIRE_LOGIN=true"
echo "AUTHENTIK_ISSUER=$AUTHENTIK_URL/application/o/reflector"
echo "AUTHENTIK_REFRESH_TOKEN_URL=$AUTHENTIK_URL/application/o/token/"
echo "AUTHENTIK_CLIENT_ID=$CLIENT_ID"
echo "AUTHENTIK_CLIENT_SECRET=$CLIENT_SECRET"
echo ""

613
server/DAILYCO_TEST.md Normal file
View File

@@ -0,0 +1,613 @@
# Daily.co Integration Test Plan
## ✅ IMPLEMENTATION STATUS: Real Transcription Active
**This test validates Daily.co multitrack recording integration with REAL transcription/diarization.**
The implementation includes complete audio processing pipeline:
- **Multitrack recordings** from Daily.co S3 (separate audio stream per participant)
- **PyAV-based audio mixdown** with PTS-based track alignment
- **Real transcription** via Modal GPU backend (Whisper)
- **Real diarization** via Modal GPU backend (speaker identification)
- **Per-track transcription** with timestamp synchronization
- **Complete database entities** (recording, transcript, topics, participants, words)
**Processing pipeline** (`PipelineMainMultitrack`):
1. Download all audio tracks from Daily.co S3
2. Align tracks by PTS (presentation timestamp) to handle late joiners
3. Mix tracks into single audio file for unified playback
4. Transcribe each track individually with proper offset handling
5. Perform diarization on mixed audio
6. Generate topics, summaries, and word-level timestamps
7. Convert audio to MP3 and generate waveform visualization
**Note:** A stub processor (`process_daily_recording`) exists for testing webhook flow without GPU costs, but the production code path uses `process_multitrack_recording` with full ML pipeline.
---
## Prerequisites
**1. Environment Variables** (check in `.env.development.local`):
```bash
# Daily.co API Configuration
DAILY_API_KEY=<key>
DAILY_SUBDOMAIN=monadical
DAILY_WEBHOOK_SECRET=<base64-encoded-secret>
AWS_DAILY_S3_BUCKET=reflector-dailyco-local
AWS_DAILY_S3_REGION=us-east-1
AWS_DAILY_ROLE_ARN=arn:aws:iam::950402358378:role/DailyCo
DAILY_MIGRATION_ENABLED=true
DAILY_MIGRATION_ROOM_IDS=["552640fd-16f2-4162-9526-8cf40cd2357e"]
# Transcription/Diarization Backend (Required for real processing)
DIARIZATION_BACKEND=modal
DIARIZATION_MODAL_API_KEY=<modal-api-key>
# TRANSCRIPTION_BACKEND is not explicitly set (uses default/modal)
```
**2. Services Running:**
```bash
docker compose ps # server, postgres, redis, worker, beat should be UP
```
**IMPORTANT:** Worker and beat services MUST be running for transcription processing:
```bash
docker compose up -d worker beat
```
**3. ngrok Tunnel for Webhooks:**
```bash
# Start ngrok (if not already running)
ngrok http 1250 --log=stdout > /tmp/ngrok.log 2>&1 &
# Get public URL
curl -s http://localhost:4040/api/tunnels | python3 -c "import sys, json; data=json.load(sys.stdin); print(data['tunnels'][0]['public_url'])"
```
**Current ngrok URL:** `https://0503947384a3.ngrok-free.app` (as of last registration)
**4. Webhook Created:**
```bash
cd server
uv run python scripts/recreate_daily_webhook.py https://0503947384a3.ngrok-free.app/v1/daily/webhook
# Verify: "Created webhook <uuid> (state: ACTIVE)"
```
**Current webhook status:** ✅ ACTIVE (webhook ID: dad5ad16-ceca-488e-8fc5-dae8650b51d0)
---
## Test 1: Database Configuration
**Check room platform:**
```bash
docker-compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT id, name, platform, recording_type FROM room WHERE name = 'test2';"
```
**Expected:**
```
id: 552640fd-16f2-4162-9526-8cf40cd2357e
name: test2
platform: whereby # DB value (overridden by env var DAILY_MIGRATION_ROOM_IDS)
recording_type: cloud
```
**Clear old meetings:**
```bash
docker-compose exec -T postgres psql -U reflector -d reflector -c \
"UPDATE meeting SET is_active = false WHERE room_id = '552640fd-16f2-4162-9526-8cf40cd2357e';"
```
---
## Test 2: Meeting Creation with Auto-Recording
**Create meeting:**
```bash
curl -s -X POST http://localhost:1250/v1/rooms/test2/meeting \
-H "Content-Type: application/json" \
-d '{"allow_duplicated":false}' | python3 -m json.tool
```
**Expected Response:**
```json
{
"room_name": "test2-YYYYMMDDHHMMSS", // Includes "test2" prefix!
"room_url": "https://monadical.daily.co/test2-...?t=<JWT_TOKEN>", // Has token!
"platform": "daily",
"recording_type": "cloud" // DB value (Whereby-specific)
}
```
**Decode token to verify auto-recording:**
```bash
# Extract token from room_url, decode JWT payload
echo "<token>" | python3 -c "
import sys, json, base64
token = sys.stdin.read().strip()
payload = token.split('.')[1] + '=' * (4 - len(token.split('.')[1]) % 4)
print(json.dumps(json.loads(base64.b64decode(payload)), indent=2))
"
```
**Expected token payload:**
```json
{
"r": "test2-YYYYMMDDHHMMSS", // Room name
"sr": true, // start_recording: true ✅
"d": "...", // Domain ID
"iat": 1234567890
}
```
---
## Test 3: Daily.co API Verification
**Check room configuration:**
```bash
ROOM_NAME="<from previous step>"
curl -s -X GET "https://api.daily.co/v1/rooms/$ROOM_NAME" \
-H "Authorization: Bearer $DAILY_API_KEY" | python3 -m json.tool
```
**Expected config:**
```json
{
"config": {
"enable_recording": "raw-tracks", // ✅
"recordings_bucket": {
"bucket_name": "reflector-dailyco-local",
"bucket_region": "us-east-1",
"assume_role_arn": "arn:aws:iam::950402358378:role/DailyCo"
}
}
}
```
---
## Test 4: Browser UI Test (Playwright MCP)
**Using Claude Code MCP tools:**
**Load room:**
```
Use: mcp__playwright__browser_navigate
Input: {"url": "http://localhost:3000/test2"}
Then wait 12 seconds for iframe to load
```
**Verify Daily.co iframe loaded:**
```
Use: mcp__playwright__browser_snapshot
Expected in snapshot:
- iframe element with src containing "monadical.daily.co"
- Daily.co pre-call UI visible
```
**Take screenshot:**
```
Use: mcp__playwright__browser_take_screenshot
Input: {"filename": "test2-before-join.png"}
Expected: Daily.co pre-call UI with "Join" button visible
```
**Join meeting:**
```
Note: Daily.co iframe interaction requires clicking inside iframe.
Use: mcp__playwright__browser_click
Input: {"element": "Join button in Daily.co iframe", "ref": "<ref-from-snapshot>"}
Then wait 5 seconds for call to connect
```
**Verify in-call:**
```
Use: mcp__playwright__browser_take_screenshot
Input: {"filename": "test2-in-call.png"}
Expected: "Waiting for others to join" or participant video visible
```
**Leave meeting:**
```
Use: mcp__playwright__browser_click
Input: {"element": "Leave button in Daily.co iframe", "ref": "<ref-from-snapshot>"}
```
---
**Alternative: JavaScript snippets (for manual testing):**
```javascript
await page.goto('http://localhost:3000/test2');
await new Promise(f => setTimeout(f, 12000)); // Wait for load
// Verify iframe
const iframes = document.querySelectorAll('iframe');
// Expected: 1 iframe with src containing "monadical.daily.co"
// Screenshot
await page.screenshot({ path: 'test2-before-join.png' });
// Join
await page.locator('iframe').contentFrame().getByRole('button', { name: 'Join' }).click();
await new Promise(f => setTimeout(f, 5000));
// In-call screenshot
await page.screenshot({ path: 'test2-in-call.png' });
// Leave
await page.locator('iframe').contentFrame().getByRole('button', { name: 'Leave' }).click();
```
---
## Test 5: Webhook Verification
**Check server logs for webhooks:**
```bash
docker-compose logs --since 15m server 2>&1 | grep -i "participant joined\|recording started"
```
**Expected logs:**
```
[info] Participant joined | meeting_id=... | num_clients=1 | recording_type=cloud | recording_trigger=automatic-2nd-participant
[info] Recording started | meeting_id=... | recording_id=... | platform=daily
```
**Check Daily.co webhook delivery logs:**
```bash
curl -s -X GET "https://api.daily.co/v1/logs/webhooks?limit=20" \
-H "Authorization: Bearer $DAILY_API_KEY" | python3 -c "
import sys, json
logs = json.load(sys.stdin)
for log in logs[:10]:
req = json.loads(log['request'])
room = req.get('payload', {}).get('room') or req.get('payload', {}).get('room_name', 'N/A')
print(f\"{req['type']:30s} | room: {room:30s} | status: {log['status']}\")
"
```
**Expected output:**
```
participant.joined | room: test2-YYYYMMDDHHMMSS | status: 200
recording.started | room: test2-YYYYMMDDHHMMSS | status: 200
participant.left | room: test2-YYYYMMDDHHMMSS | status: 200
recording.ready-to-download | room: test2-YYYYMMDDHHMMSS | status: 200
```
**Check database updated:**
```bash
docker-compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT room_name, num_clients FROM meeting WHERE room_name LIKE 'test2-%' ORDER BY end_date DESC LIMIT 1;"
```
**Expected:**
```
room_name: test2-YYYYMMDDHHMMSS
num_clients: 0 // After participant left
```
---
## Test 6: Recording in S3
**List recent recordings:**
```bash
curl -s -X GET "https://api.daily.co/v1/recordings" \
-H "Authorization: Bearer $DAILY_API_KEY" | python3 -c "
import sys, json
data = json.load(sys.stdin)
for rec in data.get('data', [])[:5]:
if 'test2-' in rec.get('room_name', ''):
print(f\"Room: {rec['room_name']}\")
print(f\"Status: {rec['status']}\")
print(f\"Duration: {rec.get('duration', 0)}s\")
print(f\"S3 key: {rec.get('s3key', 'N/A')}\")
print(f\"Tracks: {len(rec.get('tracks', []))} files\")
for track in rec.get('tracks', []):
print(f\" - {track['type']}: {track['s3Key'].split('/')[-1]} ({track['size']} bytes)\")
print()
"
```
**Expected output:**
```
Room: test2-20251009192341
Status: finished
Duration: ~30-120s
S3 key: monadical/test2-20251009192341/1760037914930
Tracks: 2 files
- audio: 1760037914930-<uuid>-cam-audio-1760037915265 (~400 KB)
- video: 1760037914930-<uuid>-cam-video-1760037915269 (~10-30 MB)
```
**Verify S3 path structure:**
- `monadical/` - Daily.co subdomain
- `test2-20251009192341/` - Reflector room name + timestamp
- `<timestamp>-<participant-uuid>-<media-type>-<track-start>.webm` - Individual track files
---
## Test 7: Database Check - Recording and Transcript
**Check recording created:**
```bash
docker-compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT id, bucket_name, object_key, status, meeting_id, recorded_at
FROM recording
ORDER BY recorded_at DESC LIMIT 1;"
```
**Expected:**
```
id: <recording-id-from-webhook>
bucket_name: reflector-dailyco-local
object_key: monadical/test2-<timestamp>/<recording-timestamp>-<uuid>-cam-audio-<track-start>.webm
status: completed
meeting_id: <meeting-id>
recorded_at: <recent-timestamp>
```
**Check transcript created:**
```bash
docker compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT id, title, status, duration, recording_id, meeting_id, room_id
FROM transcript
ORDER BY created_at DESC LIMIT 1;"
```
**Expected (REAL transcription):**
```
id: <transcript-id>
title: <AI-generated title based on actual conversation content>
status: uploaded (audio file processed and available)
duration: <actual meeting duration in seconds>
recording_id: <same-as-recording-id-above>
meeting_id: <meeting-id>
room_id: 552640fd-16f2-4162-9526-8cf40cd2357e
```
**Note:** Title and content will reflect the ACTUAL conversation, not mock data. Processing time depends on recording length and GPU backend availability (Modal).
**Verify audio file exists:**
```bash
ls -lh data/<transcript-id>/upload.webm
```
**Expected:**
```
-rw-r--r-- 1 user staff ~100-200K Oct 10 18:48 upload.webm
```
**Check transcript topics (REAL transcription):**
```bash
TRANSCRIPT_ID=$(docker compose exec -T postgres psql -U reflector -d reflector -t -c \
"SELECT id FROM transcript ORDER BY created_at DESC LIMIT 1;")
docker compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT
jsonb_array_length(topics) as num_topics,
jsonb_array_length(participants) as num_participants,
short_summary,
title
FROM transcript
WHERE id = '$TRANSCRIPT_ID';"
```
**Expected (REAL data):**
```
num_topics: <varies based on conversation>
num_participants: <actual number of participants who spoke>
short_summary: <AI-generated summary of actual conversation>
title: <AI-generated title based on content>
```
**Check topics contain actual transcription:**
```bash
docker compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT topics->0->'title', topics->0->'summary', topics->0->'transcript'
FROM transcript
ORDER BY created_at DESC LIMIT 1;" | head -20
```
**Expected output:** Will contain the ACTUAL transcribed conversation from the Daily.co meeting, not mock data.
**Check participants:**
```bash
docker compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT participants FROM transcript ORDER BY created_at DESC LIMIT 1;" \
| python3 -c "import sys, json; data=json.loads(sys.stdin.read()); print(json.dumps(data, indent=2))"
```
**Expected (REAL diarization):**
```json
[
{
"id": "<uuid>",
"speaker": 0,
"name": "Speaker 1"
},
{
"id": "<uuid>",
"speaker": 1,
"name": "Speaker 2"
}
]
```
**Note:** Speaker names will be generic ("Speaker 1", "Speaker 2", etc.) as determined by the diarization backend. Number of participants depends on how many actually spoke during the meeting.
**Check word-level data:**
```bash
docker compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT jsonb_array_length(topics->0->'words') as num_words_first_topic
FROM transcript
ORDER BY created_at DESC LIMIT 1;"
```
**Expected:**
```
num_words_first_topic: <varies based on actual conversation length and topic chunking>
```
**Verify speaker diarization in words:**
```bash
docker compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT
topics->0->'words'->0->>'text' as first_word,
topics->0->'words'->0->>'speaker' as speaker,
topics->0->'words'->0->>'start' as start_time,
topics->0->'words'->0->>'end' as end_time
FROM transcript
ORDER BY created_at DESC LIMIT 1;"
```
**Expected (REAL transcription):**
```
first_word: <actual first word from transcription>
speaker: 0, 1, 2, ... (actual speaker ID from diarization)
start_time: <actual timestamp in seconds>
end_time: <actual end timestamp>
```
**Note:** All timestamps and speaker IDs are from real transcription/diarization, synchronized across tracks.
---
## Test 8: Recording Type Verification
**Check what Daily.co received:**
```bash
curl -s -X GET "https://api.daily.co/v1/rooms/test2-<timestamp>" \
-H "Authorization: Bearer $DAILY_API_KEY" | python3 -m json.tool | grep "enable_recording"
```
**Expected:**
```json
"enable_recording": "raw-tracks"
```
**NOT:** `"enable_recording": "cloud"` (that would be wrong - we want raw tracks)
---
## Troubleshooting
### Issue: No webhooks received
**Check webhook state:**
```bash
curl -s -X GET "https://api.daily.co/v1/webhooks" \
-H "Authorization: Bearer $DAILY_API_KEY" | python3 -m json.tool
```
**If state is FAILED:**
```bash
cd server
uv run python scripts/recreate_daily_webhook.py https://<ngrok-url>/v1/daily/webhook
```
### Issue: Webhooks return 422
**Check server logs:**
```bash
docker-compose logs --tail=50 server | grep "Failed to parse webhook event"
```
**Common cause:** Event structure mismatch. Daily.co events use:
```json
{
"version": "1.0.0",
"type": "participant.joined",
"payload": {...}, // NOT "data"
"event_ts": 123.456 // NOT "ts"
}
```
### Issue: Recording not starting
1. **Check token has `sr: true`:**
- Decode JWT token from room_url query param
- Should contain `"sr": true`
2. **Check Daily.co room config:**
- `enable_recording` must be set (not false)
- For raw-tracks: must be exactly `"raw-tracks"`
3. **Check participant actually joined:**
- Logs should show "Participant joined"
- Must click "Join" button, not just pre-call screen
### Issue: Recording in S3 but wrong format
**Daily.co recording types:**
- `"cloud"` → Single MP4 file (`download_link` in webhook)
- `"raw-tracks"` → Multiple WebM files (`tracks` array in webhook)
- `"raw-tracks-audio-only"` → Only audio WebM files
**Current implementation:** Always uses `"raw-tracks"` (better for transcription)
---
## Quick Validation Commands
**One-liner to verify everything:**
```bash
# 1. Check room exists
docker-compose exec -T postgres psql -U reflector -d reflector -c \
"SELECT name, platform FROM room WHERE name = 'test2';" && \
# 2. Create meeting
MEETING=$(curl -s -X POST http://localhost:1250/v1/rooms/test2/meeting \
-H "Content-Type: application/json" -d '{"allow_duplicated":false}') && \
echo "$MEETING" | python3 -c "import sys,json; m=json.load(sys.stdin); print(f'Room: {m[\"room_name\"]}\nURL: {m[\"room_url\"][:80]}...')" && \
# 3. Check Daily.co config
ROOM_NAME=$(echo "$MEETING" | python3 -c "import sys,json; print(json.load(sys.stdin)['room_name'])") && \
curl -s -X GET "https://api.daily.co/v1/rooms/$ROOM_NAME" \
-H "Authorization: Bearer $DAILY_API_KEY" | python3 -c "import sys,json; print(f'Recording: {json.load(sys.stdin)[\"config\"][\"enable_recording\"]}')"
```
**Expected output:**
```
name: test2, platform: whereby
Room: test2-20251009192341
URL: https://monadical.daily.co/test2-20251009192341?t=eyJhbGc...
Recording: raw-tracks
```
---
## Success Criteria Checklist
- [x] Room name includes Reflector room prefix (`test2-...`)
- [x] Meeting URL contains JWT token (`?t=...`)
- [x] Token has `sr: true` (auto-recording enabled)
- [x] Daily.co room config: `enable_recording: "raw-tracks"`
- [x] Browser loads Daily.co interface (not Whereby)
- [x] Recording auto-starts when participant joins
- [x] Webhooks received: participant.joined, recording.started, participant.left, recording.ready-to-download
- [x] Recording status: `finished`
- [x] S3 contains 2 files: audio (.webm) and video (.webm)
- [x] S3 path: `monadical/test2-{timestamp}/{recording-start-ts}-{participant-uuid}-cam-{audio|video}-{track-start-ts}`
- [x] Database `num_clients` increments/decrements correctly
- [x] **Database recording entry created** with correct S3 path and status `completed`
- [ ] **Database transcript entry created** with status `uploaded`
- [ ] **Audio file downloaded** to `data/{transcript_id}/upload.webm`
- [ ] **Transcript has REAL data**: AI-generated title based on conversation
- [ ] **Transcript has topics** generated from actual content
- [ ] **Transcript has participants** with proper speaker diarization
- [ ] **Topics contain word-level data** with accurate timestamps and speaker IDs
- [ ] **Total duration** matches actual meeting length
- [ ] **MP3 and waveform files generated** by file processing pipeline
- [ ] **Frontend transcript page loads** without "Failed to load audio" error
- [ ] **Audio player functional** with working playback and waveform visualization
- [ ] **Multitrack processing completed** without errors in worker logs
- [ ] **Modal GPU backends accessible** (transcription and diarization)

View File

@@ -6,7 +6,7 @@ ENV PYTHONUNBUFFERED=1 \
# builder install base dependencies # builder install base dependencies
WORKDIR /tmp WORKDIR /tmp
RUN apt-get update && apt-get install -y curl && apt-get clean RUN apt-get update && apt-get install -y curl ffmpeg && apt-get clean
ADD https://astral.sh/uv/install.sh /uv-installer.sh ADD https://astral.sh/uv/install.sh /uv-installer.sh
RUN sh /uv-installer.sh && rm /uv-installer.sh RUN sh /uv-installer.sh && rm /uv-installer.sh
ENV PATH="/root/.local/bin/:$PATH" ENV PATH="/root/.local/bin/:$PATH"

View File

@@ -1,29 +1,3 @@
## API Key Management
### Finding Your User ID
```bash
# Get your OAuth sub (user ID) - requires authentication
curl -H "Authorization: Bearer <your_jwt>" http://localhost:1250/v1/me
# Returns: {"sub": "your-oauth-sub-here", "email": "...", ...}
```
### Creating API Keys
```bash
curl -X POST http://localhost:1250/v1/user/api-keys \
-H "Authorization: Bearer <your_jwt>" \
-H "Content-Type: application/json" \
-d '{"name": "My API Key"}'
```
### Using API Keys
```bash
# Use X-API-Key header instead of Authorization
curl -H "X-API-Key: <your_api_key>" http://localhost:1250/v1/transcripts
```
## AWS S3/SQS usage clarification ## AWS S3/SQS usage clarification
Whereby.com uploads recordings directly to our S3 bucket when meetings end. Whereby.com uploads recordings directly to our S3 bucket when meetings end.
@@ -53,36 +27,6 @@ response = sqs.receive_message(QueueUrl=queue_url, ...)
uv run /app/requeue_uploaded_file.py TRANSCRIPT_ID uv run /app/requeue_uploaded_file.py TRANSCRIPT_ID
``` ```
## Hatchet Setup (Fresh DB)
After resetting the Hatchet database:
### Option A: Automatic (CLI)
```bash
# Get default tenant ID and create token in one command
TENANT_ID=$(docker compose exec -T postgres psql -U reflector -d hatchet -t -c \
"SELECT id FROM \"Tenant\" WHERE slug = 'default';" | tr -d ' \n') && \
TOKEN=$(docker compose exec -T hatchet /hatchet-admin token create \
--config /config --tenant-id "$TENANT_ID" 2>/dev/null | tr -d '\n') && \
echo "HATCHET_CLIENT_TOKEN=$TOKEN"
```
Copy the output to `server/.env`.
### Option B: Manual (UI)
1. Create API token at http://localhost:8889 → Settings → API Tokens
2. Update `server/.env`: `HATCHET_CLIENT_TOKEN=<new-token>`
### Then restart workers
```bash
docker compose restart server hatchet-worker
```
Workflows register automatically when hatchet-worker starts.
## Pipeline Management ## Pipeline Management
### Continue stuck pipeline from final summaries (identify_participants) step: ### Continue stuck pipeline from final summaries (identify_participants) step:

View File

@@ -1,2 +0,0 @@
-- Create hatchet database for Hatchet workflow engine
CREATE DATABASE hatchet;

View File

@@ -1,421 +0,0 @@
# Daily.co pipeline
This document details every external call, storage operation, and database write that occurs when a new Daily.co recording is discovered.
It includes a bunch of common logic that other pipelines use, therefore not everything is Daily-oriented.
**The doc was generated at 12.12.2025 and things may have changed since.**
## Trigger
Two entry points, both converging to the same handler:
1. **Webhook**: Daily.co sends `POST /v1/daily/webhook` with `recording.ready-to-download`
2. **Polling**: `GET /recordings` (paginated, max 100/call) → filter new → convert to same payload format
Both produce `RecordingReadyPayload` and call `handleRecordingReady(payload)`.
```
┌─────────────────┐ ┌──────────────────────────┐
│ Daily Webhook │────▶│ RecordingReadyPayload │
│ (push) │ │ {room_name, recording_id│
└─────────────────┘ │ tracks[], ...} │
└────────────┬─────────────┘
┌─────────────────┐ │
│ GET /recordings│ ▼
│ (poll) │────▶ convert ──▶ handleRecordingReady()
└─────────────────┘ │
┌────────────────────────┐
│ process_multitrack_ │
│ recording pipeline │
└────────────────────────┘
```
**Polling API**: `GET https://api.daily.co/v1/recordings`
- Pagination: `limit` (max 100), `starting_after`, `ending_before`
- Rate limit: ~2 req/sec
- Response: `{total_count, data: Recording[]}`
```mermaid
flowchart TB
subgraph Trigger["1. Recording Discovery - Daily.co Webhook"]
DAILY_WEBHOOK["Daily.co sends POST /v1/daily/webhook<br/>type: recording.ready-to-download"]
VERIFY["Verify X-Webhook-Signature (HMAC)"]
PARSE["Parse DailyWebhookEvent<br/>Extract tracks[], room_name, recording_id"]
FILTER["Filter audio tracks only<br/>track_keys = [t.s3Key for t in tracks if t.type == 'audio']"]
DISPATCH["process_multitrack_recording.delay()"]
DAILY_WEBHOOK --> VERIFY --> PARSE --> FILTER --> DISPATCH
end
subgraph Init["2. Recording Initialization"]
FETCH_MEETING[DB READ: meetings_controller.get_by_room_name]
FETCH_ROOM[DB READ: rooms_controller.get_by_name]
DAILY_API_REC[Daily API: GET /recordings/recording_id]
DAILY_API_PART[Daily API: GET /meetings/mtgSessionId/participants]
CREATE_RECORDING[DB WRITE: recordings_controller.create]
CREATE_TRANSCRIPT[DB WRITE: transcripts_controller.add]
MAP_PARTICIPANTS[DB WRITE: transcript.participants upsert]
end
subgraph Pipeline["3. Processing Pipeline"]
direction TB
PAD[Track Padding & Mixdown]
TRANSCRIBE[GPU: Transcription per track]
TOPICS[LLM: Topic Detection]
TITLE[LLM: Title Generation]
SUMMARY[LLM: Summary Generation]
end
subgraph Storage["4. S3 Operations"]
S3_PRESIGN[S3: generate_presigned_url for tracks]
S3_UPLOAD_PADDED[S3 UPLOAD: padded tracks temp]
S3_UPLOAD_MP3[S3 UPLOAD: audio.mp3]
S3_DELETE_TEMP[S3 DELETE: cleanup temp files]
end
subgraph PostProcess["5. Post-Processing"]
CONSENT[Consent check & cleanup]
ZULIP[Zulip: send/update message]
WEBHOOK_OUT[Webhook: POST to room.webhook_url]
end
Trigger --> Init --> Pipeline
Pipeline --> Storage
Pipeline --> PostProcess
```
## Detailed Sequence: Daily.co Multitrack Recording
```mermaid
sequenceDiagram
participant DailyCo as Daily.co
participant API as FastAPI /v1/daily/webhook
participant Worker as Celery Worker
participant DB as PostgreSQL
participant DailyAPI as Daily.co REST API
participant S3 as AWS S3
participant GPU as Modal.com GPU
participant LLM as LLM Service
participant WS as WebSocket
participant Zulip as Zulip
participant ExtWH as External Webhook
Note over DailyCo,API: Phase 0: Webhook Receipt
DailyCo->>API: POST /v1/daily/webhook
Note right of DailyCo: X-Webhook-Signature, X-Webhook-Timestamp
API->>API: verify_webhook_signature()
API->>API: Extract audio track s3Keys from payload.tracks[]
API->>Worker: process_multitrack_recording.delay()
API-->>DailyCo: 200 OK
Note over Worker,DailyAPI: Phase 1: Recording Initialization
Worker->>DB: SELECT meeting WHERE room_name=?
Worker->>DB: SELECT room WHERE name=?
Worker->>DailyAPI: GET /recordings/{recording_id}
DailyAPI-->>Worker: {mtgSessionId, ...}
Worker->>DailyAPI: GET /meetings/{mtgSessionId}/participants
DailyAPI-->>Worker: [{participant_id, user_name}, ...]
Worker->>DB: INSERT INTO recording
Worker->>DB: INSERT INTO transcript (status='idle')
loop For each track_key (parse participant_id from filename)
Worker->>DB: UPSERT transcript.participants[speaker=idx, name=X]
end
Note over Worker,S3: Phase 2: Track Padding
Worker->>DB: UPDATE transcript SET status='processing'
Worker->>WS: broadcast STATUS='processing'
loop For each track in track_keys (N tracks)
Worker->>S3: generate_presigned_url(track_key, DAILYCO_BUCKET)
S3-->>Worker: presigned_url (2hr)
Note over Worker: PyAV: read WebM, extract start_time
Note over Worker: PyAV: adelay filter (pad silence)
Worker->>S3: PUT file_pipeline/{id}/tracks/padded_{idx}.webm
Worker->>S3: generate_presigned_url(padded_{idx}.webm)
end
Note over Worker,S3: Phase 3: Audio Mixdown
Note over Worker: PyAV: amix filter → stereo MP3
Worker->>DB: UPDATE transcript SET duration=X
Worker->>WS: broadcast DURATION
Worker->>S3: PUT {transcript_id}/audio.mp3
Worker->>DB: UPDATE transcript SET audio_location='storage'
Note over Worker: Phase 4: Waveform
Note over Worker: Generate peaks from MP3
Worker->>DB: UPDATE events+=WAVEFORM
Worker->>WS: broadcast WAVEFORM
Note over Worker,GPU: Phase 5: Transcription (N GPU calls)
loop For each padded track URL (N tracks)
Worker->>GPU: POST /v1/audio/transcriptions-from-url
Note right of GPU: {audio_file_url, language, batch:true}
GPU-->>Worker: {words: [{word, start, end}, ...]}
Note over Worker: Assign speaker=track_idx to words
end
Note over Worker: Merge all words, sort by start time
Worker->>DB: UPDATE events+=TRANSCRIPT
Worker->>WS: broadcast TRANSCRIPT
Note over Worker,S3: Cleanup temp files
loop For each padded file
Worker->>S3: DELETE padded_{idx}.webm
end
Note over Worker,LLM: Phase 6: Topic Detection (C LLM calls)
Note over Worker: C = ceil(total_words / 300)
loop For each 300-word chunk (C chunks)
Worker->>LLM: TOPIC_PROMPT + words[i:i+300]
Note right of LLM: "Extract main topic title + 2-sentence summary"
LLM-->>Worker: TitleSummary{title, summary}
Worker->>DB: UPSERT topics[]
Worker->>DB: UPDATE events+=TOPIC
Worker->>WS: broadcast TOPIC
end
Note over Worker,LLM: Phase 7a: Title Generation (1 LLM call)
Note over Worker: Input: all TitleSummary[].title joined
Worker->>LLM: TITLE_PROMPT
Note right of LLM: "Generate concise title from topic titles"
LLM-->>Worker: "Meeting Title"
Worker->>DB: UPDATE transcript SET title=X
Worker->>DB: UPDATE events+=FINAL_TITLE
Worker->>WS: broadcast FINAL_TITLE
Note over Worker,LLM: Phase 7b: Summary Generation (2+2M LLM calls)
Note over Worker: Reconstruct full transcript from TitleSummary[].transcript
opt If participants unknown
Worker->>LLM: PARTICIPANTS_PROMPT
LLM-->>Worker: ParticipantsResponse
end
Worker->>LLM: SUBJECTS_PROMPT (call #1)
Note right of LLM: "Main high-level topics? Max 6"
LLM-->>Worker: SubjectsResponse{subjects: ["A", "B", ...]}
loop For each subject (M subjects, max 6)
Worker->>LLM: DETAILED_SUBJECT_PROMPT (call #2..#1+M)
Note right of LLM: "Info about 'A': decisions, actions, deadlines"
LLM-->>Worker: detailed_response (discarded after next call)
Worker->>LLM: PARAGRAPH_SUMMARY_PROMPT (call #2+M..#1+2M)
Note right of LLM: "Summarize in 1 paragraph"
LLM-->>Worker: paragraph → summaries[]
end
Worker->>LLM: RECAP_PROMPT (call #2+2M)
Note right of LLM: "High-level quick recap, 1 paragraph"
LLM-->>Worker: recap
Note over Worker: long_summary = "# Quick recap\n{recap}\n# Summary\n**A**\n{para1}..."
Note over Worker: short_summary = recap only
Worker->>DB: UPDATE long_summary, short_summary
Worker->>DB: UPDATE events+=FINAL_LONG_SUMMARY
Worker->>WS: broadcast FINAL_LONG_SUMMARY
Worker->>DB: UPDATE events+=FINAL_SHORT_SUMMARY
Worker->>WS: broadcast FINAL_SHORT_SUMMARY
Note over Worker,DB: Phase 8: Finalize
Worker->>DB: UPDATE transcript SET status='ended'
Worker->>DB: UPDATE events+=STATUS
Worker->>WS: broadcast STATUS='ended'
Note over Worker,ExtWH: Phase 9: Post-Processing Chain
Worker->>DB: SELECT meeting_consent WHERE meeting_id=?
alt Any consent denied
Worker->>S3: DELETE tracks from DAILYCO_BUCKET
Worker->>S3: DELETE audio.mp3 from TRANSCRIPT_BUCKET
Worker->>DB: UPDATE transcript SET audio_deleted=true
end
opt Room has zulip_auto_post=true
alt Existing zulip_message_id
Worker->>Zulip: PATCH /api/v1/messages/{id}
else New
Worker->>Zulip: POST /api/v1/messages
Zulip-->>Worker: {id}
Worker->>DB: UPDATE transcript SET zulip_message_id=X
end
end
opt Room has webhook_url
Worker->>ExtWH: POST {webhook_url}
Note right of ExtWH: X-Webhook-Signature: HMAC-SHA256
Note right of ExtWH: Body: {transcript_id, room_id, ...}
end
```
## Title & Summary Generation Data Flow
```mermaid
flowchart TB
subgraph Input["Input: TitleSummary[] from Topic Detection"]
TS1["TitleSummary 1<br/>title: 'Q1 Budget'<br/>transcript: words[0:300]"]
TS2["TitleSummary 2<br/>title: 'Product Launch'<br/>transcript: words[300:600]"]
TS3["TitleSummary N..."]
end
subgraph TitleGen["Title Generation"]
T1["Extract .title from each TitleSummary"]
T2["Concatenate: '- Q1 Budget\n- Product Launch\n...'"]
T3["LLM: TITLE_PROMPT\n'Generate concise title from topic titles'"]
T4["Output: FinalTitle"]
T1 --> T2 --> T3 --> T4
end
subgraph SummaryGen["Summary Generation"]
direction TB
subgraph Reconstruct["1. Reconstruct Full Transcript"]
S1["For each TitleSummary.transcript.as_segments()"]
S2["Map speaker ID → name"]
S3["Build: 'Alice: hello\nBob: hi\n...'"]
S1 --> S2 --> S3
end
subgraph Subjects["2. Extract Subjects - LLM call #1"]
S4["LLM: SUBJECTS_PROMPT\n'Main high-level topics? Max 6'"]
S5["subjects[] = ['Budget Review', ...]"]
S4 --> S5
end
subgraph DetailedSum["3. Per-Subject Summary - LLM calls #2 to #(1+2M)"]
S6["For each subject:"]
S7["LLM: DETAILED_SUBJECT_PROMPT\n'Info about subject: decisions, actions...'"]
S8["detailed_response - NOT STORED"]
S9["LLM: PARAGRAPH_SUMMARY_PROMPT\n'Summarize in 1 paragraph'"]
S10["paragraph → summaries[]"]
S6 --> S7 --> S8 --> S9 --> S10
end
subgraph Recap["4. Generate Recap - LLM call #(2+2M)"]
S11["Concatenate paragraph summaries"]
S12["LLM: RECAP_PROMPT\n'High-level recap, 1 paragraph'"]
S13["recap"]
S11 --> S12 --> S13
end
subgraph Output["5. Output"]
S14["long_summary = markdown:\n# Quick recap\n[recap]\n# Summary\n**Subject 1**\n[para1]..."]
S15["short_summary = recap only"]
S14 --> S15
end
Reconstruct --> Subjects --> DetailedSum --> Recap --> Output
end
Input --> TitleGen
Input --> SummaryGen
```
### topics[] vs subjects[]
| | topics[] | subjects[] |
|-|----------|------------|
| **Source** | 300-word chunk splitting | LLM extraction from full text |
| **Count** | Variable (words / 300) | Max 6 |
| **Purpose** | Timeline segmentation | Summary structure |
| **Has timestamp?** | Yes | No |
## External API Calls Summary
### 1. Daily.co REST API (called during initialization)
| Endpoint | Method | When | Purpose |
|----------|--------|------|---------|
| `GET /recordings/{recording_id}` | GET | After webhook | Get mtgSessionId for participant lookup |
| `GET /meetings/{mtgSessionId}/participants` | GET | After above | Map participant_id → user_name |
### 2. GPU Service (Modal.com or Self-Hosted)
| Endpoint | Method | Count | Request |
|----------|--------|-------|---------|
| `{TRANSCRIPT_URL}/v1/audio/transcriptions-from-url` | POST | **N** (N = num tracks) | `{audio_file_url, language, batch: true}` |
**Note**: Diarization is NOT called for multitrack - speaker identification comes from separate tracks.
### 3. LLM Service (OpenAI-compatible via LlamaIndex)
| Phase | Operation | Input | LLM Calls | Output |
|-------|-----------|-------|-----------|--------|
| Topic Detection | TOPIC_PROMPT per 300-word chunk | words[i:i+300] | **C** = ceil(words/300) | TitleSummary{title, summary, timestamp} |
| Title Generation | TITLE_PROMPT | All topic titles joined | **1** | FinalTitle |
| Participant ID | PARTICIPANTS_PROMPT | Full transcript | **0-1** (skipped if known) | ParticipantsResponse |
| Subject Extraction | SUBJECTS_PROMPT | Full transcript | **1** | SubjectsResponse{subjects[]} |
| Subject Detail | DETAILED_SUBJECT_PROMPT | Full transcript + subject name | **M** (M = subjects, max 6) | detailed text (discarded) |
| Subject Paragraph | PARAGRAPH_SUMMARY_PROMPT | Detailed text | **M** | paragraph text → summaries[] |
| Recap | RECAP_PROMPT | All paragraph summaries | **1** | recap text |
**Total LLM calls**: C + 2M + 3 (+ 1 if participants unknown)
- Short meeting (1000 words, 3 subjects): ~4 + 6 + 3 = **13 calls**
- Long meeting (5000 words, 6 subjects): ~17 + 12 + 3 = **32 calls**
## S3 Operations Summary
### Source Bucket: `DAILYCO_STORAGE_AWS_BUCKET_NAME`
Daily.co uploads raw-tracks recordings here.
| Operation | Key Pattern | When |
|-----------|-------------|------|
| **READ** (presign) | `{domain}/{room_name}/{ts}/{participant_id}-cam-audio-{ts}.webm` | Track acquisition |
| **DELETE** | Same as above | Consent denied cleanup |
### Transcript Storage Bucket: `TRANSCRIPT_STORAGE_AWS_BUCKET_NAME`
Reflector's own storage.
| Operation | Key Pattern | When |
|-----------|-------------|------|
| **PUT** | `file_pipeline/{transcript_id}/tracks/padded_{idx}.webm` | After track padding |
| **READ** (presign) | Same | For GPU transcription |
| **DELETE** | Same | After transcription complete |
| **PUT** | `{transcript_id}/audio.mp3` | After mixdown |
| **DELETE** | Same | Consent denied cleanup |
## Database Operations
### Tables Written
| Table | Operation | When |
|-------|-----------|------|
| `recording` | INSERT | Initialization |
| `transcript` | INSERT | Initialization |
| `transcript` | UPDATE (participants) | After Daily API participant fetch |
| `transcript` | UPDATE (status, events, duration, topics, title, summaries, etc.) | Throughout pipeline |
### Transcript Update Sequence
```
1. INSERT: id, name, status='idle', source_kind='room', user_id, recording_id, room_id, meeting_id
2. UPDATE: participants[] (speaker index → participant name mapping)
3. UPDATE: status='processing', events+=[{event:'STATUS', data:{value:'processing'}}]
4. UPDATE: duration=X, events+=[{event:'DURATION', data:{duration:X}}]
5. UPDATE: audio_location='storage'
6. UPDATE: events+=[{event:'WAVEFORM', data:{waveform:[...]}}]
7. UPDATE: events+=[{event:'TRANSCRIPT', data:{text, translation}}]
8. UPDATE: topics[]+=topic, events+=[{event:'TOPIC'}] -- repeated per chunk
9. UPDATE: title=X, events+=[{event:'FINAL_TITLE'}]
10. UPDATE: long_summary=X, events+=[{event:'FINAL_LONG_SUMMARY'}]
11. UPDATE: short_summary=X, events+=[{event:'FINAL_SHORT_SUMMARY'}]
12. UPDATE: status='ended', events+=[{event:'STATUS', data:{value:'ended'}}]
13. UPDATE: zulip_message_id=X -- if Zulip enabled
14. UPDATE: audio_deleted=true -- if consent denied
```
## WebSocket Events
All broadcast to room `ts:{transcript_id}`:
| Event | Payload | Trigger |
|-------|---------|---------|
| STATUS | `{value: "processing"\|"ended"\|"error"}` | Status transitions |
| DURATION | `{duration: float}` | After audio processing |
| WAVEFORM | `{waveform: float[]}` | After waveform generation |
| TRANSCRIPT | `{text: string, translation: string\|null}` | After transcription merge |
| TOPIC | `{id, title, summary, timestamp, duration, transcript, words}` | Per topic detected |
| FINAL_TITLE | `{title: string}` | After LLM title generation |
| FINAL_LONG_SUMMARY | `{long_summary: string}` | After LLM summary |
| FINAL_SHORT_SUMMARY | `{short_summary: string}` | After LLM recap |
User-room broadcasts to `user:{user_id}`:
- `TRANSCRIPT_STATUS`
- `TRANSCRIPT_FINAL_TITLE`
- `TRANSCRIPT_DURATION`

View File

@@ -1,238 +0,0 @@
# Reflector Architecture: Whereby + Daily.co Recording Storage
## System Overview
```mermaid
graph TB
subgraph "Actors"
APP[Our App<br/>Reflector]
WHEREBY[Whereby Service<br/>External]
DAILY[Daily.co Service<br/>External]
end
subgraph "AWS S3 Buckets"
TRANSCRIPT_BUCKET[Transcript Bucket<br/>reflector-transcripts<br/>Output: Processed MP3s]
WHEREBY_BUCKET[Whereby Bucket<br/>reflector-whereby-recordings<br/>Input: Raw MP4s]
DAILY_BUCKET[Daily.co Bucket<br/>reflector-dailyco-recordings<br/>Input: Raw WebM tracks]
end
subgraph "AWS Infrastructure"
SQS[SQS Queue<br/>Whereby notifications]
end
subgraph "Database"
DB[(PostgreSQL<br/>Recordings, Transcripts, Meetings)]
end
APP -->|Write processed| TRANSCRIPT_BUCKET
APP -->|Read/Delete| WHEREBY_BUCKET
APP -->|Read/Delete| DAILY_BUCKET
APP -->|Poll| SQS
APP -->|Store metadata| DB
WHEREBY -->|Write recordings| WHEREBY_BUCKET
WHEREBY_BUCKET -->|S3 Event| SQS
WHEREBY -->|Participant webhooks<br/>room.client.joined/left| APP
DAILY -->|Write recordings| DAILY_BUCKET
DAILY -->|Recording webhook<br/>recording.ready-to-download| APP
```
**Note on Webhook vs S3 Event for Recording Processing:**
- **Whereby**: Uses S3 Events → SQS for recording availability (S3 as source of truth, no race conditions)
- **Daily.co**: Uses webhooks for recording availability (more immediate, built-in reliability)
- **Both**: Use webhooks for participant tracking (real-time updates)
## Credentials & Permissions
```mermaid
graph LR
subgraph "Master Credentials"
MASTER[TRANSCRIPT_STORAGE_AWS_*<br/>Access Key ID + Secret]
end
subgraph "Whereby Upload Credentials"
WHEREBY_CREDS[AWS_WHEREBY_ACCESS_KEY_*<br/>Access Key ID + Secret]
end
subgraph "Daily.co Upload Role"
DAILY_ROLE[DAILY_STORAGE_AWS_ROLE_ARN<br/>IAM Role ARN]
end
subgraph "Our App Uses"
MASTER -->|Read/Write/Delete| TRANSCRIPT_BUCKET[Transcript Bucket]
MASTER -->|Read/Delete| WHEREBY_BUCKET[Whereby Bucket]
MASTER -->|Read/Delete| DAILY_BUCKET[Daily.co Bucket]
MASTER -->|Poll/Delete| SQS[SQS Queue]
end
subgraph "We Give To Services"
WHEREBY_CREDS -->|Passed in API call| WHEREBY_SERVICE[Whereby Service]
WHEREBY_SERVICE -->|Write Only| WHEREBY_BUCKET
DAILY_ROLE -->|Passed in API call| DAILY_SERVICE[Daily.co Service]
DAILY_SERVICE -->|Assume Role| DAILY_ROLE
DAILY_SERVICE -->|Write Only| DAILY_BUCKET
end
```
# Video Platform Recording Integration
This document explains how Reflector receives and identifies multitrack audio recordings from different video platforms.
## Platform Comparison
| Platform | Delivery Method | Track Identification |
|----------|----------------|---------------------|
| **Daily.co** | Webhook | Explicit track list in payload |
| **Whereby** | SQS (S3 notifications) | Single file per notification |
---
## Daily.co
**Note:** Primary discovery via polling (`poll_daily_recordings`), webhooks as backup.
Daily.co uses **webhooks** to notify Reflector when recordings are ready.
### How It Works
1. **Daily.co sends webhook** when recording is ready
- Event type: `recording.ready-to-download`
- Endpoint: `/v1/daily/webhook` (`reflector/views/daily.py:46-102`)
2. **Webhook payload explicitly includes track list**:
```json
{
"recording_id": "7443ee0a-dab1-40eb-b316-33d6c0d5ff88",
"room_name": "daily-20251020193458",
"tracks": [
{
"type": "audio",
"s3Key": "monadical/daily-20251020193458/1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922",
"size": 831843
},
{
"type": "audio",
"s3Key": "monadical/daily-20251020193458/1760988935484-a37c35e3-6f8e-4274-a482-e9d0f102a732-cam-audio-1760988943823",
"size": 408438
},
{
"type": "video",
"s3Key": "monadical/daily-20251020193458/...-video.webm",
"size": 30000000
}
]
}
```
3. **System extracts audio tracks** (`daily.py:211`):
```python
track_keys = [t.s3Key for t in tracks if t.type == "audio"]
```
4. **Triggers multitrack processing** (`daily.py:213-218`):
```python
process_multitrack_recording.delay(
bucket_name=bucket_name, # reflector-dailyco-local
room_name=room_name, # daily-20251020193458
recording_id=recording_id, # 7443ee0a-dab1-40eb-b316-33d6c0d5ff88
track_keys=track_keys # Only audio s3Keys
)
```
### Key Advantage: No Ambiguity
Even though multiple meetings may share the same S3 bucket/folder (`monadical/`), **there's no ambiguity** because:
- Each webhook payload contains the exact `s3Key` list for that specific `recording_id`
- No need to scan folders or guess which files belong together
- Each track's s3Key includes the room timestamp subfolder (e.g., `daily-20251020193458/`)
The room name includes timestamp (`daily-20251020193458`) to keep recordings organized, but **the webhook's explicit track list is what prevents mixing files from different meetings**.
### Track Timeline Extraction
Daily.co provides timing information in two places:
**1. PyAV WebM Metadata (current approach)**:
```python
# Read from WebM container stream metadata
stream.start_time = 8.130s # Meeting-relative timing
```
**2. Filename Timestamps (alternative approach, commit 3bae9076)**:
```
Filename format: {recording_start_ts}-{uuid}-cam-audio-{track_start_ts}.webm
Example: 1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922.webm
Parse timestamps:
- recording_start_ts: 1760988935484 (Unix ms)
- track_start_ts: 1760988935922 (Unix ms)
- offset: (1760988935922 - 1760988935484) / 1000 = 0.438s
```
**Time Difference (PyAV vs Filename)**:
```
Track 0:
Filename offset: 438ms
PyAV metadata: 229ms
Difference: ~200ms
Track 1:
Filename offset: 8339ms
PyAV metadata: 8130ms
Difference: ~200ms
```
**Consistent ~200ms delta** suggests network/encoding delay between file upload initiation (filename) and actual audio stream start (metadata).
**Note:** The ~200ms difference observed in this test recording is not crucial for timing accuracy. Either method (filename timestamps or PyAV metadata) works well for multi-track alignment. Filename timestamps are preferable as they are better officially documented by Daily.co.
**Current implementation uses PyAV metadata** because:
- More accurate (represents when audio actually started)
- Padding BEFORE transcription produces correct Whisper timestamps automatically
- No manual offset adjustment needed during transcript merge
### Why Re-encoding During Padding
Padding coincidentally involves re-encoding, which is important for Daily.co + Whisper:
**Problem:** Daily.co skips frames in recordings when microphone is muted or paused
- WebM containers have gaps where audio frames should be
- Whisper doesn't understand these gaps and produces incorrect timestamps
- Example: 5s of audio with 2s muted → file has frames only for 3s, Whisper thinks duration is 3s
**Solution:** Re-encoding via PyAV filter graph (`adelay` + `aresample`)
- Restores missing frames as silence
- Produces continuous audio stream without gaps
- Whisper now sees correct duration and produces accurate timestamps
**Why combined with padding:**
- Already re-encoding for padding (adding initial silence)
- More performant to do both operations in single PyAV pipeline
- Padded values needed for mixdown anyway (creating final MP3)
Implementation: `main_multitrack_pipeline.py:_apply_audio_padding_streaming()`
---
## Whereby (SQS-based)
Whereby uses **AWS SQS** (via S3 notifications) to notify Reflector when files are uploaded.
### How It Works
1. **Whereby uploads recording** to S3
2. **S3 sends notification** to SQS queue (one notification per file)
3. **Reflector polls SQS queue** (`worker/process.py:process_messages()`)
4. **System processes single file** (`worker/process.py:process_recording()`)
### Key Difference from Daily.co
**Whereby (SQS):** System receives S3 notification "file X was created" - only knows about one file at a time, would need to scan folder to find related files
**Daily.co (Webhook):** Daily explicitly tells system which files belong together in the webhook payload
---

View File

@@ -3,29 +3,6 @@
# All the settings are described here: reflector/settings.py # All the settings are described here: reflector/settings.py
# #
## =======================================================
## Core Configuration (Required for Production)
## =======================================================
## Database (for docker-compose.prod.yml, use postgres hostname)
#DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
## Redis (for docker-compose.prod.yml, use redis hostname)
#REDIS_HOST=redis
#REDIS_PORT=6379
#CELERY_BROKER_URL=redis://redis:6379/1
#CELERY_RESULT_BACKEND=redis://redis:6379/1
## Base URL - your API domain with https
#BASE_URL=https://api.example.com
## CORS - required when frontend and API are on different domains
#CORS_ORIGIN=https://app.example.com
#CORS_ALLOW_CREDENTIALS=true
## Secret key - generate with: openssl rand -hex 32
#SECRET_KEY=changeme-generate-a-secure-random-string
## ======================================================= ## =======================================================
## User authentication ## User authentication
## ======================================================= ## =======================================================
@@ -63,21 +40,18 @@ TRANSLATE_URL=https://monadical-sas--reflector-translator-web.modal.run
#TRANSLATION_MODAL_API_KEY=xxxxx #TRANSLATION_MODAL_API_KEY=xxxxx
## ======================================================= ## =======================================================
## LLM backend (Required) ## LLM backend
## ##
## Responsible for generating titles, summaries, and topic detection ## Responsible for titles and short summary
## Requires OpenAI API key ## Check reflector/llm/* for the full list of available
## llm backend implementation
## ======================================================= ## =======================================================
## OpenAI API key - get from https://platform.openai.com/account/api-keys
LLM_API_KEY=sk-your-openai-api-key
LLM_MODEL=gpt-4o-mini
## Optional: Custom endpoint (defaults to OpenAI)
# LLM_URL=https://api.openai.com/v1
## Context size for summary generation (tokens) ## Context size for summary generation (tokens)
# LLM_MODEL=microsoft/phi-4
LLM_CONTEXT_WINDOW=16000 LLM_CONTEXT_WINDOW=16000
LLM_URL=
LLM_API_KEY=sk-
## ======================================================= ## =======================================================
## Diarization ## Diarization
@@ -91,19 +65,6 @@ DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
#DIARIZATION_MODAL_API_KEY=xxxxx #DIARIZATION_MODAL_API_KEY=xxxxx
## =======================================================
## Transcript Storage
##
## Where to store audio files and transcripts
## AWS S3 is required for production
## =======================================================
TRANSCRIPT_STORAGE_BACKEND=aws
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
TRANSCRIPT_STORAGE_AWS_BUCKET_NAME=reflector-media
TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
## ======================================================= ## =======================================================
## Sentry ## Sentry
## ======================================================= ## =======================================================
@@ -118,22 +79,19 @@ TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
## Whereby ## Whereby
#WHEREBY_API_KEY=your-whereby-api-key #WHEREBY_API_KEY=your-whereby-api-key
#WHEREBY_WEBHOOK_SECRET=your-whereby-webhook-secret #WHEREBY_WEBHOOK_SECRET=your-whereby-webhook-secret
#WHEREBY_STORAGE_AWS_ACCESS_KEY_ID=your-aws-key #AWS_WHEREBY_ACCESS_KEY_ID=your-aws-key
#WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret #AWS_WHEREBY_ACCESS_KEY_SECRET=your-aws-secret
#AWS_PROCESS_RECORDING_QUEUE_URL=https://sqs.us-west-2.amazonaws.com/... #AWS_PROCESS_RECORDING_QUEUE_URL=https://sqs.us-west-2.amazonaws.com/...
## Daily.co ## Daily.co
#DAILY_API_KEY=your-daily-api-key #DAILY_API_KEY=your-daily-api-key
#DAILY_WEBHOOK_SECRET=your-daily-webhook-secret #DAILY_WEBHOOK_SECRET=your-daily-webhook-secret
#DAILY_SUBDOMAIN=your-subdomain #DAILY_SUBDOMAIN=your-subdomain
#DAILY_WEBHOOK_UUID= # Auto-populated by recreate_daily_webhook.py script #AWS_DAILY_S3_BUCKET=your-daily-bucket
#DAILYCO_STORAGE_AWS_ROLE_ARN=... # IAM role ARN for Daily.co S3 access #AWS_DAILY_S3_REGION=us-west-2
#DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco #AWS_DAILY_ROLE_ARN=arn:aws:iam::ACCOUNT:role/DailyRecording
#DAILYCO_STORAGE_AWS_REGION=us-west-2
## Whereby (optional separate bucket) ## Platform Selection
#WHEREBY_STORAGE_AWS_BUCKET_NAME=reflector-whereby #DAILY_MIGRATION_ENABLED=false # Enable Daily.co support
#WHEREBY_STORAGE_AWS_REGION=us-east-1 #DAILY_MIGRATION_ROOM_IDS=[] # Specific rooms to use Daily
## Platform Configuration
#DEFAULT_VIDEO_PLATFORM=whereby # Default platform for new rooms #DEFAULT_VIDEO_PLATFORM=whereby # Default platform for new rooms

View File

@@ -1,26 +0,0 @@
"""add_action_items
Revision ID: 05f8688d6895
Revises: bbafedfa510c
Create Date: 2025-12-12 11:57:50.209658
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "05f8688d6895"
down_revision: Union[str, None] = "bbafedfa510c"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("transcript", sa.Column("action_items", sa.JSON(), nullable=True))
def downgrade() -> None:
op.drop_column("transcript", "action_items")

View File

@@ -1,28 +0,0 @@
"""add workflow_run_id to transcript
Revision ID: 0f943fede0e0
Revises: 20251217000000
Create Date: 2025-12-16 01:54:13.855106
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "0f943fede0e0"
down_revision: Union[str, None] = "20251217000000"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
with op.batch_alter_table("transcript", schema=None) as batch_op:
batch_op.add_column(sa.Column("workflow_run_id", sa.String(), nullable=True))
def downgrade() -> None:
with op.batch_alter_table("transcript", schema=None) as batch_op:
batch_op.drop_column("workflow_run_id")

View File

@@ -1,7 +1,7 @@
"""add_platform_support """add_platform_support
Revision ID: 1e49625677e4 Revision ID: 1e49625677e4
Revises: 9e3f7b2a4c8e Revises: dc035ff72fd5
Create Date: 2025-10-08 13:17:29.943612 Create Date: 2025-10-08 13:17:29.943612
""" """
@@ -13,7 +13,7 @@ from alembic import op
# revision identifiers, used by Alembic. # revision identifiers, used by Alembic.
revision: str = "1e49625677e4" revision: str = "1e49625677e4"
down_revision: Union[str, None] = "9e3f7b2a4c8e" down_revision: Union[str, None] = "dc035ff72fd5"
branch_labels: Union[str, Sequence[str], None] = None branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None depends_on: Union[str, Sequence[str], None] = None
@@ -25,8 +25,8 @@ def upgrade() -> None:
sa.Column( sa.Column(
"platform", "platform",
sa.String(), sa.String(),
nullable=True, nullable=False,
server_default=None, server_default="whereby",
) )
) )

View File

@@ -1,35 +0,0 @@
"""add skip_consent to room
Revision ID: 20251217000000
Revises: 05f8688d6895
Create Date: 2025-12-17 00:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "20251217000000"
down_revision: Union[str, None] = "05f8688d6895"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
with op.batch_alter_table("room", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"skip_consent",
sa.Boolean(),
nullable=False,
server_default=sa.text("false"),
)
)
def downgrade() -> None:
with op.batch_alter_table("room", schema=None) as batch_op:
batch_op.drop_column("skip_consent")

View File

@@ -1,79 +0,0 @@
"""add daily participant session table with immutable left_at
Revision ID: 2b92a1b03caa
Revises: f8294b31f022
Create Date: 2025-11-13 20:29:30.486577
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "2b92a1b03caa"
down_revision: Union[str, None] = "f8294b31f022"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Create table
op.create_table(
"daily_participant_session",
sa.Column("id", sa.String(), nullable=False),
sa.Column("meeting_id", sa.String(), nullable=False),
sa.Column("room_id", sa.String(), nullable=False),
sa.Column("session_id", sa.String(), nullable=False),
sa.Column("user_id", sa.String(), nullable=True),
sa.Column("user_name", sa.String(), nullable=False),
sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("left_at", sa.DateTime(timezone=True), nullable=True),
sa.ForeignKeyConstraint(["meeting_id"], ["meeting.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["room_id"], ["room.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
with op.batch_alter_table("daily_participant_session", schema=None) as batch_op:
batch_op.create_index(
"idx_daily_session_meeting_left", ["meeting_id", "left_at"], unique=False
)
batch_op.create_index("idx_daily_session_room", ["room_id"], unique=False)
# Create trigger function to prevent left_at from being updated once set
op.execute("""
CREATE OR REPLACE FUNCTION prevent_left_at_update()
RETURNS TRIGGER AS $$
BEGIN
IF OLD.left_at IS NOT NULL THEN
RAISE EXCEPTION 'left_at is immutable once set';
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
""")
# Create trigger
op.execute("""
CREATE TRIGGER prevent_left_at_update_trigger
BEFORE UPDATE ON daily_participant_session
FOR EACH ROW
EXECUTE FUNCTION prevent_left_at_update();
""")
def downgrade() -> None:
# Drop trigger
op.execute(
"DROP TRIGGER IF EXISTS prevent_left_at_update_trigger ON daily_participant_session;"
)
# Drop trigger function
op.execute("DROP FUNCTION IF EXISTS prevent_left_at_update();")
# Drop indexes and table
with op.batch_alter_table("daily_participant_session", schema=None) as batch_op:
batch_op.drop_index("idx_daily_session_room")
batch_op.drop_index("idx_daily_session_meeting_left")
op.drop_table("daily_participant_session")

View File

@@ -1,30 +0,0 @@
"""Make room platform non-nullable with dynamic default
Revision ID: 5d6b9df9b045
Revises: 2b92a1b03caa
Create Date: 2025-11-21 13:22:25.756584
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "5d6b9df9b045"
down_revision: Union[str, None] = "2b92a1b03caa"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.execute("UPDATE room SET platform = 'whereby' WHERE platform IS NULL")
with op.batch_alter_table("room", schema=None) as batch_op:
batch_op.alter_column("platform", existing_type=sa.String(), nullable=False)
def downgrade() -> None:
with op.batch_alter_table("room", schema=None) as batch_op:
batch_op.alter_column("platform", existing_type=sa.String(), nullable=True)

View File

@@ -1,38 +0,0 @@
"""add user api keys
Revision ID: 9e3f7b2a4c8e
Revises: dc035ff72fd5
Create Date: 2025-10-17 00:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "9e3f7b2a4c8e"
down_revision: Union[str, None] = "dc035ff72fd5"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.create_table(
"user_api_key",
sa.Column("id", sa.String(), nullable=False),
sa.Column("user_id", sa.String(), nullable=False),
sa.Column("key_hash", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
with op.batch_alter_table("user_api_key", schema=None) as batch_op:
batch_op.create_index("idx_user_api_key_hash", ["key_hash"], unique=True)
batch_op.create_index("idx_user_api_key_user_id", ["user_id"], unique=False)
def downgrade() -> None:
op.drop_table("user_api_key")

View File

@@ -1,38 +0,0 @@
"""add user table
Revision ID: bbafedfa510c
Revises: 5d6b9df9b045
Create Date: 2025-11-19 21:06:30.543262
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "bbafedfa510c"
down_revision: Union[str, None] = "5d6b9df9b045"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.create_table(
"user",
sa.Column("id", sa.String(), nullable=False),
sa.Column("email", sa.String(), nullable=False),
sa.Column("authentik_uid", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.create_index("idx_user_authentik_uid", ["authentik_uid"], unique=True)
batch_op.create_index("idx_user_email", ["email"], unique=False)
def downgrade() -> None:
op.drop_table("user")

View File

@@ -1,35 +0,0 @@
"""add use_hatchet to room
Revision ID: bd3a729bb379
Revises: 0f943fede0e0
Create Date: 2025-12-16 16:34:03.594231
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "bd3a729bb379"
down_revision: Union[str, None] = "0f943fede0e0"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
with op.batch_alter_table("room", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"use_hatchet",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
)
)
def downgrade() -> None:
with op.batch_alter_table("room", schema=None) as batch_op:
batch_op.drop_column("use_hatchet")

View File

@@ -1,28 +0,0 @@
"""add_track_keys
Revision ID: f8294b31f022
Revises: 1e49625677e4
Create Date: 2025-10-27 18:52:17.589167
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "f8294b31f022"
down_revision: Union[str, None] = "1e49625677e4"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
with op.batch_alter_table("recording", schema=None) as batch_op:
batch_op.add_column(sa.Column("track_keys", sa.JSON(), nullable=True))
def downgrade() -> None:
with op.batch_alter_table("recording", schema=None) as batch_op:
batch_op.drop_column("track_keys")

View File

@@ -39,7 +39,6 @@ dependencies = [
"pytest-env>=1.1.5", "pytest-env>=1.1.5",
"webvtt-py>=0.5.0", "webvtt-py>=0.5.0",
"icalendar>=6.0.0", "icalendar>=6.0.0",
"hatchet-sdk>=0.47.0",
] ]
[dependency-groups] [dependency-groups]
@@ -127,7 +126,6 @@ markers = [
select = [ select = [
"I", # isort - import sorting "I", # isort - import sorting
"F401", # unused imports "F401", # unused imports
"E402", # module level import not at top of file
"PLC0415", # import-outside-top-level - detect inline imports "PLC0415", # import-outside-top-level - detect inline imports
] ]

View File

@@ -18,7 +18,6 @@ from reflector.views.rooms import router as rooms_router
from reflector.views.rtc_offer import router as rtc_offer_router from reflector.views.rtc_offer import router as rtc_offer_router
from reflector.views.transcripts import router as transcripts_router from reflector.views.transcripts import router as transcripts_router
from reflector.views.transcripts_audio import router as transcripts_audio_router from reflector.views.transcripts_audio import router as transcripts_audio_router
from reflector.views.transcripts_chat import router as transcripts_chat_router
from reflector.views.transcripts_participants import ( from reflector.views.transcripts_participants import (
router as transcripts_participants_router, router as transcripts_participants_router,
) )
@@ -28,7 +27,6 @@ from reflector.views.transcripts_upload import router as transcripts_upload_rout
from reflector.views.transcripts_webrtc import router as transcripts_webrtc_router from reflector.views.transcripts_webrtc import router as transcripts_webrtc_router
from reflector.views.transcripts_websocket import router as transcripts_websocket_router from reflector.views.transcripts_websocket import router as transcripts_websocket_router
from reflector.views.user import router as user_router from reflector.views.user import router as user_router
from reflector.views.user_api_keys import router as user_api_keys_router
from reflector.views.user_websocket import router as user_ws_router from reflector.views.user_websocket import router as user_ws_router
from reflector.views.whereby import router as whereby_router from reflector.views.whereby import router as whereby_router
from reflector.views.zulip import router as zulip_router from reflector.views.zulip import router as zulip_router
@@ -91,11 +89,9 @@ app.include_router(transcripts_participants_router, prefix="/v1")
app.include_router(transcripts_speaker_router, prefix="/v1") app.include_router(transcripts_speaker_router, prefix="/v1")
app.include_router(transcripts_upload_router, prefix="/v1") app.include_router(transcripts_upload_router, prefix="/v1")
app.include_router(transcripts_websocket_router, prefix="/v1") app.include_router(transcripts_websocket_router, prefix="/v1")
app.include_router(transcripts_chat_router, prefix="/v1")
app.include_router(transcripts_webrtc_router, prefix="/v1") app.include_router(transcripts_webrtc_router, prefix="/v1")
app.include_router(transcripts_process_router, prefix="/v1") app.include_router(transcripts_process_router, prefix="/v1")
app.include_router(user_router, prefix="/v1") app.include_router(user_router, prefix="/v1")
app.include_router(user_api_keys_router, prefix="/v1")
app.include_router(user_ws_router, prefix="/v1") app.include_router(user_ws_router, prefix="/v1")
app.include_router(zulip_router, prefix="/v1") app.include_router(zulip_router, prefix="/v1")
app.include_router(whereby_router, prefix="/v1") app.include_router(whereby_router, prefix="/v1")

View File

@@ -1,19 +1,13 @@
import asyncio import asyncio
import functools import functools
from uuid import uuid4
from celery import current_task
from reflector.db import get_database from reflector.db import get_database
from reflector.llm import llm_session_id
def asynctask(f): def asynctask(f):
@functools.wraps(f) @functools.wraps(f)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
async def run_with_db(): async def run_with_db():
task_id = current_task.request.id if current_task else None
llm_session_id.set(task_id or f"random-{uuid4().hex}")
database = get_database() database = get_database()
await database.connect() await database.connect()
try: try:

View File

@@ -1,18 +1,14 @@
from typing import Annotated, List, Optional from typing import Annotated, Optional
from fastapi import Depends, HTTPException from fastapi import Depends, HTTPException
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt from jose import JWTError, jwt
from pydantic import BaseModel from pydantic import BaseModel
from reflector.db.user_api_keys import user_api_keys_controller
from reflector.db.users import user_controller
from reflector.logger import logger from reflector.logger import logger
from reflector.settings import settings from reflector.settings import settings
from reflector.utils import generate_uuid4
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False) oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
jwt_public_key = open(f"reflector/auth/jwt/keys/{settings.AUTH_JWT_PUBLIC_KEY}").read() jwt_public_key = open(f"reflector/auth/jwt/keys/{settings.AUTH_JWT_PUBLIC_KEY}").read()
jwt_algorithm = settings.AUTH_JWT_ALGORITHM jwt_algorithm = settings.AUTH_JWT_ALGORITHM
@@ -30,7 +26,7 @@ class JWTException(Exception):
class UserInfo(BaseModel): class UserInfo(BaseModel):
sub: str sub: str
email: Optional[str] = None email: str
def __getitem__(self, key): def __getitem__(self, key):
return getattr(self, key) return getattr(self, key)
@@ -62,65 +58,34 @@ def authenticated(token: Annotated[str, Depends(oauth2_scheme)]):
return None return None
async def _authenticate_user( def current_user(
jwt_token: Optional[str], token: Annotated[Optional[str], Depends(oauth2_scheme)],
api_key: Optional[str],
jwtauth: JWTAuth,
) -> UserInfo | None:
user_infos: List[UserInfo] = []
if api_key:
user_api_key = await user_api_keys_controller.verify_key(api_key)
if user_api_key:
user_infos.append(UserInfo(sub=user_api_key.user_id, email=None))
if jwt_token:
try:
payload = jwtauth.verify_token(jwt_token)
authentik_uid = payload["sub"]
email = payload["email"]
user = await user_controller.get_by_authentik_uid(authentik_uid)
if not user:
logger.info(
f"Creating new user on first login: {authentik_uid} ({email})"
)
user = await user_controller.create_or_update(
id=generate_uuid4(),
authentik_uid=authentik_uid,
email=email,
)
user_infos.append(UserInfo(sub=user.id, email=email))
except JWTError as e:
logger.error(f"JWT error: {e}")
raise HTTPException(status_code=401, detail="Invalid authentication")
if len(user_infos) == 0:
return None
if len(set([x.sub for x in user_infos])) > 1:
raise JWTException(
status_code=401,
detail="Invalid authentication: more than one user provided",
)
return user_infos[0]
async def current_user(
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)],
api_key: Annotated[Optional[str], Depends(api_key_header)],
jwtauth: JWTAuth = Depends(), jwtauth: JWTAuth = Depends(),
): ):
user = await _authenticate_user(jwt_token, api_key, jwtauth) if token is None:
if user is None:
raise HTTPException(status_code=401, detail="Not authenticated") raise HTTPException(status_code=401, detail="Not authenticated")
return user try:
payload = jwtauth.verify_token(token)
sub = payload["sub"]
email = payload["email"]
return UserInfo(sub=sub, email=email)
except JWTError as e:
logger.error(f"JWT error: {e}")
raise HTTPException(status_code=401, detail="Invalid authentication")
async def current_user_optional( def current_user_optional(
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)], token: Annotated[Optional[str], Depends(oauth2_scheme)],
api_key: Annotated[Optional[str], Depends(api_key_header)],
jwtauth: JWTAuth = Depends(), jwtauth: JWTAuth = Depends(),
): ):
return await _authenticate_user(jwt_token, api_key, jwtauth) # we accept no token, but if one is provided, it must be a valid one.
if token is None:
return None
try:
payload = jwtauth.verify_token(token)
sub = payload["sub"]
email = payload["email"]
return UserInfo(sub=sub, email=email)
except JWTError as e:
logger.error(f"JWT error: {e}")
raise HTTPException(status_code=401, detail="Invalid authentication")

View File

@@ -1,6 +0,0 @@
anything about Daily.co api interaction
- webhook event shapes
- REST api client
No REST api client existing found in the wild; the official lib is about working with videocall as a bot

View File

@@ -1,110 +0,0 @@
"""
Daily.co API Module
"""
# Client
from .client import DailyApiClient, DailyApiError
# Request models
from .requests import (
CreateMeetingTokenRequest,
CreateRoomRequest,
CreateWebhookRequest,
MeetingTokenProperties,
RecordingsBucketConfig,
RoomProperties,
UpdateWebhookRequest,
)
# Response models
from .responses import (
FinishedRecordingResponse,
MeetingParticipant,
MeetingParticipantsResponse,
MeetingResponse,
MeetingTokenResponse,
RecordingResponse,
RecordingS3Info,
RoomPresenceParticipant,
RoomPresenceResponse,
RoomResponse,
WebhookResponse,
)
# Webhook utilities
from .webhook_utils import (
extract_room_name,
parse_participant_joined,
parse_participant_left,
parse_recording_error,
parse_recording_ready,
parse_recording_started,
parse_webhook_payload,
verify_webhook_signature,
)
# Webhook models
from .webhooks import (
DailyTrack,
DailyWebhookEvent,
DailyWebhookEventUnion,
ParticipantJoinedEvent,
ParticipantJoinedPayload,
ParticipantLeftEvent,
ParticipantLeftPayload,
RecordingErrorEvent,
RecordingErrorPayload,
RecordingReadyEvent,
RecordingReadyToDownloadPayload,
RecordingStartedEvent,
RecordingStartedPayload,
)
__all__ = [
# Client
"DailyApiClient",
"DailyApiError",
# Requests
"CreateRoomRequest",
"RoomProperties",
"RecordingsBucketConfig",
"CreateMeetingTokenRequest",
"MeetingTokenProperties",
"CreateWebhookRequest",
"UpdateWebhookRequest",
# Responses
"RoomResponse",
"RoomPresenceResponse",
"RoomPresenceParticipant",
"MeetingParticipantsResponse",
"MeetingParticipant",
"MeetingResponse",
"RecordingResponse",
"FinishedRecordingResponse",
"RecordingS3Info",
"MeetingTokenResponse",
"WebhookResponse",
# Webhooks
"DailyWebhookEvent",
"DailyWebhookEventUnion",
"DailyTrack",
"ParticipantJoinedEvent",
"ParticipantJoinedPayload",
"ParticipantLeftEvent",
"ParticipantLeftPayload",
"RecordingStartedEvent",
"RecordingStartedPayload",
"RecordingReadyEvent",
"RecordingReadyToDownloadPayload",
"RecordingErrorEvent",
"RecordingErrorPayload",
# Webhook utilities
"verify_webhook_signature",
"extract_room_name",
"parse_webhook_payload",
"parse_participant_joined",
"parse_participant_left",
"parse_recording_started",
"parse_recording_ready",
"parse_recording_error",
]

View File

@@ -1,573 +0,0 @@
"""
Daily.co API Client
Complete async client for Daily.co REST API with Pydantic models.
Reference: https://docs.daily.co/reference/rest-api
"""
from http import HTTPStatus
from typing import Any
import httpx
import structlog
from reflector.utils.string import NonEmptyString
from .requests import (
CreateMeetingTokenRequest,
CreateRoomRequest,
CreateWebhookRequest,
UpdateWebhookRequest,
)
from .responses import (
MeetingParticipantsResponse,
MeetingResponse,
MeetingTokenResponse,
RecordingResponse,
RoomPresenceResponse,
RoomResponse,
WebhookResponse,
)
logger = structlog.get_logger(__name__)
class DailyApiError(Exception):
"""Daily.co API error with full request/response context."""
def __init__(self, operation: str, response: httpx.Response):
self.operation = operation
self.response = response
self.status_code = response.status_code
self.response_body = response.text
self.url = str(response.url)
self.request_body = (
response.request.content.decode() if response.request.content else None
)
super().__init__(
f"Daily.co API error: {operation} failed with status {self.status_code}: {response.text}"
)
class DailyApiClient:
"""
Complete async client for Daily.co REST API.
Usage:
# Direct usage
client = DailyApiClient(api_key="your_api_key")
room = await client.create_room(CreateRoomRequest(name="my-room"))
await client.close() # Clean up when done
# Context manager (recommended)
async with DailyApiClient(api_key="your_api_key") as client:
room = await client.create_room(CreateRoomRequest(name="my-room"))
"""
BASE_URL = "https://api.daily.co/v1"
DEFAULT_TIMEOUT = 10.0
def __init__(
self,
api_key: NonEmptyString,
webhook_secret: NonEmptyString | None = None,
timeout: float = DEFAULT_TIMEOUT,
base_url: NonEmptyString | None = None,
):
"""
Initialize Daily.co API client.
Args:
api_key: Daily.co API key (Bearer token)
webhook_secret: Base64-encoded HMAC secret for webhook verification.
Must match the 'hmac' value provided when creating webhooks.
Generate with: base64.b64encode(os.urandom(32)).decode()
timeout: Default request timeout in seconds
base_url: Override base URL (for testing)
"""
self.api_key = api_key
self.webhook_secret = webhook_secret
self.timeout = timeout
self.base_url = base_url or self.BASE_URL
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
self._client: httpx.AsyncClient | None = None
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def _get_client(self) -> httpx.AsyncClient:
if self._client is None:
self._client = httpx.AsyncClient(timeout=self.timeout)
return self._client
async def close(self):
if self._client is not None:
await self._client.aclose()
self._client = None
async def _handle_response(
self, response: httpx.Response, operation: str
) -> dict[str, Any]:
"""
Handle API response with error logging.
Args:
response: HTTP response
operation: Operation name for logging (e.g., "create_room")
Returns:
Parsed JSON response
Raises:
DailyApiError: If request failed with full context
"""
if response.status_code >= 400:
logger.error(
f"Daily.co API error: {operation}",
status_code=response.status_code,
response_body=response.text,
request_body=response.request.content.decode()
if response.request.content
else None,
url=str(response.url),
)
raise DailyApiError(operation, response)
return response.json()
# ============================================================================
# ROOMS
# ============================================================================
async def create_room(self, request: CreateRoomRequest) -> RoomResponse:
"""
Create a new Daily.co room.
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
Args:
request: Room creation request with name, privacy, and properties
Returns:
Created room data including URL and ID
Raises:
httpx.HTTPStatusError: If API request fails
"""
client = await self._get_client()
response = await client.post(
f"{self.base_url}/rooms",
headers=self.headers,
json=request.model_dump(exclude_none=True),
)
data = await self._handle_response(response, "create_room")
return RoomResponse(**data)
async def get_room(self, room_name: NonEmptyString) -> RoomResponse:
"""
Get room configuration.
Args:
room_name: Daily.co room name
Returns:
Room configuration data
Raises:
httpx.HTTPStatusError: If API request fails
"""
client = await self._get_client()
response = await client.get(
f"{self.base_url}/rooms/{room_name}",
headers=self.headers,
)
data = await self._handle_response(response, "get_room")
return RoomResponse(**data)
async def get_room_presence(
self, room_name: NonEmptyString
) -> RoomPresenceResponse:
"""
Get current participants in a room (real-time presence).
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
Args:
room_name: Daily.co room name
Returns:
List of currently present participants with join time and duration
Raises:
httpx.HTTPStatusError: If API request fails
"""
client = await self._get_client()
response = await client.get(
f"{self.base_url}/rooms/{room_name}/presence",
headers=self.headers,
)
data = await self._handle_response(response, "get_room_presence")
return RoomPresenceResponse(**data)
async def delete_room(self, room_name: NonEmptyString) -> None:
"""
Delete a room (idempotent - succeeds even if room doesn't exist).
Reference: https://docs.daily.co/reference/rest-api/rooms/delete-room
Args:
room_name: Daily.co room name
Raises:
httpx.HTTPStatusError: If API request fails (except 404)
"""
client = await self._get_client()
response = await client.delete(
f"{self.base_url}/rooms/{room_name}",
headers=self.headers,
)
# Idempotent delete - 404 means already deleted
if response.status_code == HTTPStatus.NOT_FOUND:
logger.debug("Room not found (already deleted)", room_name=room_name)
return
await self._handle_response(response, "delete_room")
# ============================================================================
# MEETINGS
# ============================================================================
async def get_meeting(self, meeting_id: NonEmptyString) -> MeetingResponse:
"""
Get full meeting information including participants.
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information
Args:
meeting_id: Daily.co meeting/session ID
Returns:
Meeting metadata including room, duration, participants, and status
Raises:
httpx.HTTPStatusError: If API request fails
"""
client = await self._get_client()
response = await client.get(
f"{self.base_url}/meetings/{meeting_id}",
headers=self.headers,
)
data = await self._handle_response(response, "get_meeting")
return MeetingResponse(**data)
async def get_meeting_participants(
self,
meeting_id: NonEmptyString,
limit: int | None = None,
joined_after: NonEmptyString | None = None,
joined_before: NonEmptyString | None = None,
) -> MeetingParticipantsResponse:
"""
Get historical participant data from a completed meeting (paginated).
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
Args:
meeting_id: Daily.co meeting/session ID
limit: Maximum number of participant records to return
joined_after: Return participants who joined after this participant_id
joined_before: Return participants who joined before this participant_id
Returns:
List of participants with join times and duration
Raises:
httpx.HTTPStatusError: If API request fails (404 when no more participants)
Note:
For pagination, use joined_after with the last participant_id from previous response.
Returns 404 when no more participants remain.
"""
params = {}
if limit is not None:
params["limit"] = limit
if joined_after is not None:
params["joined_after"] = joined_after
if joined_before is not None:
params["joined_before"] = joined_before
client = await self._get_client()
response = await client.get(
f"{self.base_url}/meetings/{meeting_id}/participants",
headers=self.headers,
params=params,
)
data = await self._handle_response(response, "get_meeting_participants")
return MeetingParticipantsResponse(**data)
# ============================================================================
# RECORDINGS
# ============================================================================
async def get_recording(self, recording_id: NonEmptyString) -> RecordingResponse:
"""
https://docs.daily.co/reference/rest-api/recordings/get-recording-information
Get recording metadata and status.
"""
client = await self._get_client()
response = await client.get(
f"{self.base_url}/recordings/{recording_id}",
headers=self.headers,
)
data = await self._handle_response(response, "get_recording")
return RecordingResponse(**data)
async def list_recordings(
self,
room_name: NonEmptyString | None = None,
starting_after: str | None = None,
ending_before: str | None = None,
limit: int = 100,
) -> list[RecordingResponse]:
"""
List recordings with optional filters.
Reference: https://docs.daily.co/reference/rest-api/recordings
Args:
room_name: Filter by room name
starting_after: Pagination cursor - recording ID to start after
ending_before: Pagination cursor - recording ID to end before
limit: Max results per page (default 100, max 100)
Note: starting_after/ending_before are pagination cursors (recording IDs),
NOT time filters. API returns recordings in reverse chronological order.
"""
client = await self._get_client()
params = {"limit": limit}
if room_name:
params["room_name"] = room_name
if starting_after:
params["starting_after"] = starting_after
if ending_before:
params["ending_before"] = ending_before
response = await client.get(
f"{self.base_url}/recordings",
headers=self.headers,
params=params,
)
data = await self._handle_response(response, "list_recordings")
if not isinstance(data, dict) or "data" not in data:
logger.error(
"Daily.co API returned unexpected format for list_recordings",
data_type=type(data).__name__,
data_keys=list(data.keys()) if isinstance(data, dict) else None,
data_sample=str(data)[:500],
room_name=room_name,
operation="list_recordings",
)
raise httpx.HTTPStatusError(
message=f"Unexpected response format from list_recordings: {type(data).__name__}",
request=response.request,
response=response,
)
return [RecordingResponse(**r) for r in data["data"]]
# ============================================================================
# MEETING TOKENS
# ============================================================================
async def create_meeting_token(
self, request: CreateMeetingTokenRequest
) -> MeetingTokenResponse:
"""
Create a meeting token for participant authentication.
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
Args:
request: Token properties including room name, user_id, permissions
Returns:
JWT meeting token
Raises:
httpx.HTTPStatusError: If API request fails
"""
client = await self._get_client()
response = await client.post(
f"{self.base_url}/meeting-tokens",
headers=self.headers,
json=request.model_dump(exclude_none=True),
)
data = await self._handle_response(response, "create_meeting_token")
return MeetingTokenResponse(**data)
# ============================================================================
# WEBHOOKS
# ============================================================================
async def list_webhooks(self) -> list[WebhookResponse]:
"""
List all configured webhooks for this account.
Reference: https://docs.daily.co/reference/rest-api/webhooks
Returns:
List of webhook configurations
Raises:
httpx.HTTPStatusError: If API request fails
"""
client = await self._get_client()
response = await client.get(
f"{self.base_url}/webhooks",
headers=self.headers,
)
data = await self._handle_response(response, "list_webhooks")
# Daily.co returns array directly (not paginated)
if isinstance(data, list):
return [WebhookResponse(**wh) for wh in data]
# Future-proof: handle potential pagination envelope
if isinstance(data, dict) and "data" in data:
return [WebhookResponse(**wh) for wh in data["data"]]
logger.warning("Unexpected webhook list response format", data=data)
return []
async def create_webhook(self, request: CreateWebhookRequest) -> WebhookResponse:
"""
Create a new webhook subscription.
Reference: https://docs.daily.co/reference/rest-api/webhooks
Args:
request: Webhook configuration with URL, event types, and HMAC secret
Returns:
Created webhook with UUID and state
Raises:
httpx.HTTPStatusError: If API request fails
"""
client = await self._get_client()
response = await client.post(
f"{self.base_url}/webhooks",
headers=self.headers,
json=request.model_dump(exclude_none=True),
)
data = await self._handle_response(response, "create_webhook")
return WebhookResponse(**data)
async def update_webhook(
self, webhook_uuid: NonEmptyString, request: UpdateWebhookRequest
) -> WebhookResponse:
"""
Update webhook configuration.
Note: Daily.co may not support PATCH for all fields.
Common pattern is delete + recreate.
Reference: https://docs.daily.co/reference/rest-api/webhooks
Args:
webhook_uuid: Webhook UUID to update
request: Updated webhook configuration
Returns:
Updated webhook configuration
Raises:
httpx.HTTPStatusError: If API request fails
"""
client = await self._get_client()
response = await client.patch(
f"{self.base_url}/webhooks/{webhook_uuid}",
headers=self.headers,
json=request.model_dump(exclude_none=True),
)
data = await self._handle_response(response, "update_webhook")
return WebhookResponse(**data)
async def delete_webhook(self, webhook_uuid: NonEmptyString) -> None:
"""
Delete a webhook.
Reference: https://docs.daily.co/reference/rest-api/webhooks
Args:
webhook_uuid: Webhook UUID to delete
Raises:
httpx.HTTPStatusError: If webhook not found or deletion fails
"""
client = await self._get_client()
response = await client.delete(
f"{self.base_url}/webhooks/{webhook_uuid}",
headers=self.headers,
)
await self._handle_response(response, "delete_webhook")
# ============================================================================
# HELPER METHODS
# ============================================================================
async def find_webhook_by_url(self, url: NonEmptyString) -> WebhookResponse | None:
"""
Find a webhook by its URL.
Args:
url: Webhook endpoint URL to search for
Returns:
Webhook if found, None otherwise
"""
webhooks = await self.list_webhooks()
for webhook in webhooks:
if webhook.url == url:
return webhook
return None
async def find_webhooks_by_pattern(
self, pattern: NonEmptyString
) -> list[WebhookResponse]:
"""
Find webhooks matching a URL pattern (e.g., 'ngrok').
Args:
pattern: String to match in webhook URLs
Returns:
List of matching webhooks
"""
webhooks = await self.list_webhooks()
return [wh for wh in webhooks if pattern in wh.url]

View File

@@ -1,166 +0,0 @@
"""
Daily.co API Request Models
Reference: https://docs.daily.co/reference/rest-api
"""
from typing import List, Literal
from pydantic import BaseModel, Field
from reflector.utils.string import NonEmptyString
class RecordingsBucketConfig(BaseModel):
"""
S3 bucket configuration for raw-tracks recordings.
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
"""
bucket_name: NonEmptyString = Field(description="S3 bucket name")
bucket_region: NonEmptyString = Field(description="AWS region (e.g., 'us-east-1')")
assume_role_arn: NonEmptyString = Field(
description="AWS IAM role ARN that Daily.co will assume to write recordings"
)
allow_api_access: bool = Field(
default=True,
description="Whether to allow API access to recording metadata",
)
class RoomProperties(BaseModel):
"""
Room configuration properties.
"""
enable_recording: Literal["cloud", "local", "raw-tracks"] | None = Field(
default=None,
description="Recording mode: 'cloud' for mixed, 'local' for local recording, 'raw-tracks' for multitrack, None to disable",
)
enable_chat: bool = Field(default=True, description="Enable in-meeting chat")
enable_screenshare: bool = Field(default=True, description="Enable screen sharing")
enable_knocking: bool = Field(
default=False,
description="Enable knocking for private rooms (allows participants to request access)",
)
start_video_off: bool = Field(
default=False, description="Start with video off for all participants"
)
start_audio_off: bool = Field(
default=False, description="Start with audio muted for all participants"
)
exp: int | None = Field(
None, description="Room expiration timestamp (Unix epoch seconds)"
)
recordings_bucket: RecordingsBucketConfig | None = Field(
None, description="S3 bucket configuration for raw-tracks recordings"
)
class CreateRoomRequest(BaseModel):
"""
Request to create a new Daily.co room.
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
"""
name: NonEmptyString = Field(description="Room name (must be unique within domain)")
privacy: Literal["public", "private"] = Field(
default="public", description="Room privacy setting"
)
properties: RoomProperties = Field(
default_factory=RoomProperties, description="Room configuration properties"
)
class MeetingTokenProperties(BaseModel):
"""
Properties for meeting token creation.
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
"""
room_name: NonEmptyString = Field(description="Room name this token is valid for")
user_id: NonEmptyString | None = Field(
None, description="User identifier to associate with token"
)
is_owner: bool = Field(
default=False, description="Grant owner privileges to token holder"
)
start_cloud_recording: bool = Field(
default=False, description="Automatically start cloud recording on join"
)
start_cloud_recording_opts: dict | None = Field(
default=None,
description="Options for startRecording when start_cloud_recording is true (e.g., maxDuration)",
)
enable_recording_ui: bool = Field(
default=True, description="Show recording controls in UI"
)
eject_at_token_exp: bool = Field(
default=False, description="Eject participant when token expires"
)
nbf: int | None = Field(
None, description="Not-before timestamp (Unix epoch seconds)"
)
exp: int | None = Field(
None, description="Expiration timestamp (Unix epoch seconds)"
)
class CreateMeetingTokenRequest(BaseModel):
"""
Request to create a meeting token for participant authentication.
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
"""
properties: MeetingTokenProperties = Field(description="Token properties")
class CreateWebhookRequest(BaseModel):
"""
Request to create a webhook subscription.
Reference: https://docs.daily.co/reference/rest-api/webhooks
"""
url: NonEmptyString = Field(description="Webhook endpoint URL (must be HTTPS)")
eventTypes: List[
Literal[
"participant.joined",
"participant.left",
"recording.started",
"recording.ready-to-download",
"recording.error",
]
] = Field(
description="Array of event types to subscribe to (only events we handle)"
)
hmac: NonEmptyString = Field(
description="Base64-encoded HMAC secret for webhook signature verification"
)
basicAuth: NonEmptyString | None = Field(
None, description="Optional basic auth credentials for webhook endpoint"
)
class UpdateWebhookRequest(BaseModel):
"""
Request to update an existing webhook.
Note: Daily.co API may not support PATCH for webhooks.
Common pattern is to delete and recreate.
Reference: https://docs.daily.co/reference/rest-api/webhooks
"""
url: NonEmptyString | None = Field(None, description="New webhook endpoint URL")
eventTypes: List[NonEmptyString] | None = Field(
None, description="New array of event types"
)
hmac: NonEmptyString | None = Field(None, description="New HMAC secret")
basicAuth: NonEmptyString | None = Field(
None, description="New basic auth credentials"
)

View File

@@ -1,217 +0,0 @@
"""
Daily.co API Response Models
"""
from typing import Any, Dict, List, Literal
from pydantic import BaseModel, Field
from reflector.dailyco_api.webhooks import DailyTrack
from reflector.utils.string import NonEmptyString
# not documented in daily; we fill it according to observations
RecordingStatus = Literal["in-progress", "finished"]
class RoomResponse(BaseModel):
"""
Response from room creation or retrieval.
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
"""
id: NonEmptyString = Field(description="Unique room identifier (UUID)")
name: NonEmptyString = Field(description="Room name used in URLs")
api_created: bool = Field(description="Whether room was created via API")
privacy: Literal["public", "private"] = Field(description="Room privacy setting")
url: NonEmptyString = Field(description="Full room URL")
created_at: NonEmptyString = Field(description="ISO 8601 creation timestamp")
config: Dict[NonEmptyString, Any] = Field(
default_factory=dict, description="Room configuration properties"
)
class RoomPresenceParticipant(BaseModel):
"""
Participant presence information in a room.
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
"""
room: NonEmptyString = Field(description="Room name")
id: NonEmptyString = Field(description="Participant session ID")
userId: NonEmptyString | None = Field(None, description="User ID if provided")
userName: NonEmptyString | None = Field(None, description="User display name")
joinTime: NonEmptyString = Field(description="ISO 8601 join timestamp")
duration: int = Field(description="Duration in room (seconds)")
class RoomPresenceResponse(BaseModel):
"""
Response from room presence endpoint.
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
"""
total_count: int = Field(
description="Total number of participants currently in room"
)
data: List[RoomPresenceParticipant] = Field(
default_factory=list, description="Array of participant presence data"
)
class MeetingParticipant(BaseModel):
"""
Historical participant data from a meeting.
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
"""
user_id: NonEmptyString | None = Field(None, description="User identifier")
participant_id: NonEmptyString = Field(description="Participant session identifier")
user_name: NonEmptyString | None = Field(None, description="User display name")
join_time: int = Field(description="Join timestamp (Unix epoch seconds)")
duration: int = Field(description="Duration in meeting (seconds)")
class MeetingParticipantsResponse(BaseModel):
"""
Response from meeting participants endpoint.
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
"""
data: List[MeetingParticipant] = Field(
default_factory=list, description="Array of participant data"
)
class MeetingResponse(BaseModel):
"""
Response from meeting information endpoint.
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information
"""
id: NonEmptyString = Field(description="Meeting session identifier (UUID)")
room: NonEmptyString = Field(description="Room name where meeting occurred")
start_time: int = Field(
description="Meeting start Unix timestamp (~15s granularity)"
)
duration: int = Field(description="Total meeting duration in seconds")
ongoing: bool = Field(description="Whether meeting is currently active")
max_participants: int = Field(description="Peak concurrent participant count")
participants: List[MeetingParticipant] = Field(
default_factory=list, description="Array of participant session data"
)
class RecordingS3Info(BaseModel):
"""
S3 bucket information for a recording.
Reference: https://docs.daily.co/reference/rest-api/recordings
"""
bucket_name: NonEmptyString
bucket_region: NonEmptyString
endpoint: NonEmptyString | None = None
class RecordingResponse(BaseModel):
"""
Response from recording retrieval endpoint (network layer).
Duration may be None for recordings still being processed by Daily.
Use FinishedRecordingResponse for recordings ready for processing.
Reference: https://docs.daily.co/reference/rest-api/recordings
"""
id: NonEmptyString = Field(description="Recording identifier")
room_name: NonEmptyString = Field(description="Room where recording occurred")
start_ts: int = Field(description="Recording start timestamp (Unix epoch seconds)")
status: RecordingStatus = Field(
description="Recording status ('in-progress' or 'finished')"
)
max_participants: int | None = Field(
None, description="Maximum participants during recording (may be missing)"
)
duration: int | None = Field(
None, description="Recording duration in seconds (None if still processing)"
)
share_token: NonEmptyString | None = Field(
None, description="Token for sharing recording"
)
s3: RecordingS3Info | None = Field(None, description="S3 bucket information")
tracks: list[DailyTrack] = Field(
default_factory=list,
description="Track list for raw-tracks recordings (always array, never null)",
)
# this is not a mistake but a deliberate Daily.co naming decision
mtgSessionId: NonEmptyString | None = Field(
None, description="Meeting session identifier (may be missing)"
)
def to_finished(self) -> "FinishedRecordingResponse | None":
"""Convert to FinishedRecordingResponse if duration is available and status is finished."""
if self.duration is None or self.status != "finished":
return None
return FinishedRecordingResponse(**self.model_dump())
class FinishedRecordingResponse(RecordingResponse):
"""
Recording with confirmed duration - ready for processing.
This model guarantees duration is present and status is finished.
"""
status: Literal["finished"] = Field(
description="Recording status (always 'finished')"
)
duration: int = Field(description="Recording duration in seconds")
class MeetingTokenResponse(BaseModel):
"""
Response from meeting token creation.
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
"""
token: NonEmptyString = Field(
description="JWT meeting token for participant authentication"
)
class WebhookResponse(BaseModel):
"""
Response from webhook creation or retrieval.
Reference: https://docs.daily.co/reference/rest-api/webhooks
"""
uuid: NonEmptyString = Field(description="Unique webhook identifier")
url: NonEmptyString = Field(description="Webhook endpoint URL")
hmac: NonEmptyString | None = Field(
None, description="Base64-encoded HMAC secret for signature verification"
)
basicAuth: NonEmptyString | None = Field(
None, description="Basic auth credentials if configured"
)
eventTypes: List[NonEmptyString] = Field(
default_factory=list,
description="Array of event types (e.g., ['recording.started', 'participant.joined'])",
)
state: Literal["ACTIVE", "FAILED"] = Field(
description="Webhook state - FAILED after 3+ consecutive failures"
)
failedCount: int = Field(default=0, description="Number of consecutive failures")
lastMomentPushed: NonEmptyString | None = Field(
None, description="ISO 8601 timestamp of last successful push"
)
domainId: NonEmptyString = Field(description="Daily.co domain/account identifier")
createdAt: NonEmptyString = Field(description="ISO 8601 creation timestamp")
updatedAt: NonEmptyString = Field(description="ISO 8601 last update timestamp")

View File

@@ -1,228 +0,0 @@
"""
Daily.co Webhook Utilities
Utilities for verifying and parsing Daily.co webhook events.
Reference: https://docs.daily.co/reference/rest-api/webhooks
"""
import base64
import hmac
from hashlib import sha256
import structlog
from .webhooks import (
DailyWebhookEvent,
ParticipantJoinedPayload,
ParticipantLeftPayload,
RecordingErrorPayload,
RecordingReadyToDownloadPayload,
RecordingStartedPayload,
)
logger = structlog.get_logger(__name__)
def verify_webhook_signature(
body: bytes,
signature: str,
timestamp: str,
webhook_secret: str,
) -> bool:
"""
Verify Daily.co webhook signature using HMAC-SHA256.
Daily.co signature verification:
1. Base64-decode the webhook secret
2. Create signed content: timestamp + '.' + body
3. Compute HMAC-SHA256(secret, signed_content)
4. Base64-encode the result
5. Compare with provided signature using constant-time comparison
Reference: https://docs.daily.co/reference/rest-api/webhooks
Args:
body: Raw request body bytes
signature: X-Webhook-Signature header value
timestamp: X-Webhook-Timestamp header value
webhook_secret: Base64-encoded HMAC secret
Returns:
True if signature is valid, False otherwise
Example:
>>> body = b'{"version":"1.0.0","type":"participant.joined",...}'
>>> signature = "abc123..."
>>> timestamp = "1234567890"
>>> secret = "your-base64-secret"
>>> is_valid = verify_webhook_signature(body, signature, timestamp, secret)
"""
if not signature or not timestamp or not webhook_secret:
logger.warning(
"Missing required data for webhook verification",
has_signature=bool(signature),
has_timestamp=bool(timestamp),
has_secret=bool(webhook_secret),
)
return False
try:
secret_bytes = base64.b64decode(webhook_secret)
signed_content = timestamp.encode() + b"." + body
expected = hmac.new(secret_bytes, signed_content, sha256).digest()
expected_b64 = base64.b64encode(expected).decode()
# Constant-time comparison to prevent timing attacks
return hmac.compare_digest(expected_b64, signature)
except (base64.binascii.Error, ValueError, TypeError, UnicodeDecodeError) as e:
logger.error(
"Webhook signature verification failed",
error=str(e),
error_type=type(e).__name__,
)
return False
def extract_room_name(event: DailyWebhookEvent) -> str | None:
"""
Extract room name from Daily.co webhook event payload.
Args:
event: Parsed webhook event
Returns:
Room name if present and is a string, None otherwise
Example:
>>> event = DailyWebhookEvent(**webhook_payload)
>>> room_name = extract_room_name(event)
"""
room = event.payload.get("room_name")
# Ensure we return a string, not any falsy value that might be in payload
return room if isinstance(room, str) else None
def parse_participant_joined(event: DailyWebhookEvent) -> ParticipantJoinedPayload:
"""
Parse participant.joined webhook event payload.
Args:
event: Webhook event with type "participant.joined"
Returns:
Parsed participant joined payload
Raises:
pydantic.ValidationError: If payload doesn't match expected schema
"""
return ParticipantJoinedPayload(**event.payload)
def parse_participant_left(event: DailyWebhookEvent) -> ParticipantLeftPayload:
"""
Parse participant.left webhook event payload.
Args:
event: Webhook event with type "participant.left"
Returns:
Parsed participant left payload
Raises:
pydantic.ValidationError: If payload doesn't match expected schema
"""
return ParticipantLeftPayload(**event.payload)
def parse_recording_started(event: DailyWebhookEvent) -> RecordingStartedPayload:
"""
Parse recording.started webhook event payload.
Args:
event: Webhook event with type "recording.started"
Returns:
Parsed recording started payload
Raises:
pydantic.ValidationError: If payload doesn't match expected schema
"""
return RecordingStartedPayload(**event.payload)
def parse_recording_ready(
event: DailyWebhookEvent,
) -> RecordingReadyToDownloadPayload:
"""
Parse recording.ready-to-download webhook event payload.
This event is sent when raw-tracks recordings are complete and uploaded to S3.
The payload includes a 'tracks' array with individual audio/video files.
Args:
event: Webhook event with type "recording.ready-to-download"
Returns:
Parsed recording ready payload with tracks array
Raises:
pydantic.ValidationError: If payload doesn't match expected schema
Example:
>>> event = DailyWebhookEvent(**webhook_payload)
>>> if event.type == "recording.ready-to-download":
... payload = parse_recording_ready(event)
... audio_tracks = [t for t in payload.tracks if t.type == "audio"]
"""
return RecordingReadyToDownloadPayload(**event.payload)
def parse_recording_error(event: DailyWebhookEvent) -> RecordingErrorPayload:
"""
Parse recording.error webhook event payload.
Args:
event: Webhook event with type "recording.error"
Returns:
Parsed recording error payload
Raises:
pydantic.ValidationError: If payload doesn't match expected schema
"""
return RecordingErrorPayload(**event.payload)
WEBHOOK_PARSERS = {
"participant.joined": parse_participant_joined,
"participant.left": parse_participant_left,
"recording.started": parse_recording_started,
"recording.ready-to-download": parse_recording_ready,
"recording.error": parse_recording_error,
}
def parse_webhook_payload(event: DailyWebhookEvent):
"""
Parse webhook event payload based on event type.
Args:
event: Webhook event
Returns:
Typed payload model based on event type, or raw dict if unknown
Example:
>>> event = DailyWebhookEvent(**webhook_payload)
>>> payload = parse_webhook_payload(event)
>>> if isinstance(payload, ParticipantJoinedPayload):
... print(f"User {payload.user_name} joined")
"""
parser = WEBHOOK_PARSERS.get(event.type)
if parser:
return parser(event)
else:
logger.warning("Unknown webhook event type", event_type=event.type)
return event.payload

View File

@@ -1,271 +0,0 @@
"""
Daily.co Webhook Event Models
Reference: https://docs.daily.co/reference/rest-api/webhooks
"""
from typing import Annotated, Any, Dict, Literal, Union
from pydantic import BaseModel, Field, field_validator
from reflector.utils.string import NonEmptyString
def normalize_timestamp_to_int(v):
"""
Normalize float timestamps to int by truncating decimal part.
Daily.co sometimes sends timestamps as floats (e.g., 1708972279.96).
Pydantic expects int for fields typed as `int`.
"""
if v is None:
return v
if isinstance(v, float):
return int(v)
return v
WebhookEventType = Literal[
"participant.joined",
"participant.left",
"recording.started",
"recording.ready-to-download",
"recording.error",
]
class DailyTrack(BaseModel):
"""
Individual audio or video track from a multitrack recording.
Reference: https://docs.daily.co/reference/rest-api/recordings
"""
type: Literal["audio", "video"]
s3Key: NonEmptyString = Field(description="S3 object key for the track file")
size: int = Field(description="File size in bytes")
class DailyWebhookEvent(BaseModel):
"""
Base structure for all Daily.co webhook events.
All events share five common fields documented below.
Reference: https://docs.daily.co/reference/rest-api/webhooks
"""
version: NonEmptyString = Field(
description="Represents the version of the event. This uses semantic versioning to inform a consumer if the payload has introduced any breaking changes"
)
type: WebhookEventType = Field(
description="Represents the type of the event described in the payload"
)
id: NonEmptyString = Field(
description="An identifier representing this specific event"
)
payload: Dict[NonEmptyString, Any] = Field(
description="An object representing the event, whose fields are described in the corresponding payload class"
)
event_ts: int = Field(
description="Documenting when the webhook itself was sent. This timestamp is different than the time of the event the webhook describes. For example, a recording.started event will contain a start_ts timestamp of when the actual recording started, and a slightly later event_ts timestamp indicating when the webhook event was sent"
)
_normalize_event_ts = field_validator("event_ts", mode="before")(
normalize_timestamp_to_int
)
class ParticipantJoinedPayload(BaseModel):
"""
Payload for participant.joined webhook event.
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-joined
"""
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
session_id: NonEmptyString = Field(description="Daily.co session identifier")
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
user_name: NonEmptyString | None = Field(None, description="User display name")
joined_at: int = Field(description="Join timestamp in Unix epoch seconds")
_normalize_joined_at = field_validator("joined_at", mode="before")(
normalize_timestamp_to_int
)
class ParticipantLeftPayload(BaseModel):
"""
Payload for participant.left webhook event.
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-left
"""
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
session_id: NonEmptyString = Field(description="Daily.co session identifier")
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
user_name: NonEmptyString | None = Field(None, description="User display name")
joined_at: int = Field(description="Join timestamp in Unix epoch seconds")
duration: int | None = Field(
None, description="Duration of participation in seconds"
)
_normalize_joined_at = field_validator("joined_at", mode="before")(
normalize_timestamp_to_int
)
class RecordingStartedPayload(BaseModel):
"""
Payload for recording.started webhook event.
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-started
"""
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
recording_id: NonEmptyString = Field(description="Recording identifier")
start_ts: int | None = Field(None, description="Recording start timestamp")
_normalize_start_ts = field_validator("start_ts", mode="before")(
normalize_timestamp_to_int
)
class RecordingReadyToDownloadPayload(BaseModel):
"""
Payload for recording.ready-to-download webhook event.
This is sent when raw-tracks recordings are complete and uploaded to S3.
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-ready-to-download
"""
type: Literal["cloud", "raw-tracks"] = Field(
description="The type of recording that was generated"
)
recording_id: NonEmptyString = Field(
description="An ID identifying the recording that was generated"
)
room_name: NonEmptyString = Field(
description="The name of the room where the recording was made"
)
start_ts: int = Field(
description="The Unix epoch time in seconds representing when the recording started"
)
status: Literal["finished"] = Field(
description="The status of the given recording (always 'finished' in ready-to-download webhook, see RecordingStatus in responses.py for full API statuses)"
)
max_participants: int = Field(
description="The number of participants on the call that were recorded"
)
duration: int = Field(description="The duration in seconds of the call")
s3_key: NonEmptyString = Field(
description="The location of the recording in the provided S3 bucket"
)
share_token: NonEmptyString | None = Field(
None, description="undocumented documented secret field"
)
tracks: list[DailyTrack] | None = Field(
None,
description="If the recording is a raw-tracks recording, a tracks field will be provided. If role permissions have been removed, the tracks field may be null",
)
_normalize_start_ts = field_validator("start_ts", mode="before")(
normalize_timestamp_to_int
)
class RecordingErrorPayload(BaseModel):
"""
Payload for recording.error webhook event.
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-error
"""
action: Literal["clourd-recording-err", "cloud-recording-error"] = Field(
description="A string describing the event that was emitted (both variants are documented)"
)
error_msg: NonEmptyString = Field(description="The error message returned")
instance_id: NonEmptyString = Field(
description="The recording instance ID that was passed into the start recording command"
)
room_name: NonEmptyString = Field(
description="The name of the room where the recording was made"
)
timestamp: int = Field(
description="The Unix epoch time in seconds representing when the error was emitted"
)
_normalize_timestamp = field_validator("timestamp", mode="before")(
normalize_timestamp_to_int
)
class ParticipantJoinedEvent(BaseModel):
version: NonEmptyString
type: Literal["participant.joined"]
id: NonEmptyString
payload: ParticipantJoinedPayload
event_ts: int
_normalize_event_ts = field_validator("event_ts", mode="before")(
normalize_timestamp_to_int
)
class ParticipantLeftEvent(BaseModel):
version: NonEmptyString
type: Literal["participant.left"]
id: NonEmptyString
payload: ParticipantLeftPayload
event_ts: int
_normalize_event_ts = field_validator("event_ts", mode="before")(
normalize_timestamp_to_int
)
class RecordingStartedEvent(BaseModel):
version: NonEmptyString
type: Literal["recording.started"]
id: NonEmptyString
payload: RecordingStartedPayload
event_ts: int
_normalize_event_ts = field_validator("event_ts", mode="before")(
normalize_timestamp_to_int
)
class RecordingReadyEvent(BaseModel):
version: NonEmptyString
type: Literal["recording.ready-to-download"]
id: NonEmptyString
payload: RecordingReadyToDownloadPayload
event_ts: int
_normalize_event_ts = field_validator("event_ts", mode="before")(
normalize_timestamp_to_int
)
class RecordingErrorEvent(BaseModel):
version: NonEmptyString
type: Literal["recording.error"]
id: NonEmptyString
payload: RecordingErrorPayload
event_ts: int
_normalize_event_ts = field_validator("event_ts", mode="before")(
normalize_timestamp_to_int
)
DailyWebhookEventUnion = Annotated[
Union[
ParticipantJoinedEvent,
ParticipantLeftEvent,
RecordingStartedEvent,
RecordingReadyEvent,
RecordingErrorEvent,
],
Field(discriminator="type"),
]

View File

@@ -25,13 +25,10 @@ def get_database() -> databases.Database:
# import models # import models
import reflector.db.calendar_events # noqa import reflector.db.calendar_events # noqa
import reflector.db.daily_participant_sessions # noqa
import reflector.db.meetings # noqa import reflector.db.meetings # noqa
import reflector.db.recordings # noqa import reflector.db.recordings # noqa
import reflector.db.rooms # noqa import reflector.db.rooms # noqa
import reflector.db.transcripts # noqa import reflector.db.transcripts # noqa
import reflector.db.user_api_keys # noqa
import reflector.db.users # noqa
kwargs = {} kwargs = {}
if "postgres" not in settings.DATABASE_URL: if "postgres" not in settings.DATABASE_URL:

View File

@@ -1,229 +0,0 @@
"""Daily.co participant session tracking.
Stores webhook data for participant.joined and participant.left events to provide
historical session information (Daily.co API only returns current participants).
"""
from datetime import datetime
import sqlalchemy as sa
from pydantic import BaseModel
from sqlalchemy.dialects.postgresql import insert
from reflector.db import get_database, metadata
from reflector.utils.string import NonEmptyString
daily_participant_sessions = sa.Table(
"daily_participant_session",
metadata,
sa.Column("id", sa.String, primary_key=True),
sa.Column(
"meeting_id",
sa.String,
sa.ForeignKey("meeting.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column(
"room_id",
sa.String,
sa.ForeignKey("room.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column("session_id", sa.String, nullable=False),
sa.Column("user_id", sa.String, nullable=True),
sa.Column("user_name", sa.String, nullable=False),
sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("left_at", sa.DateTime(timezone=True), nullable=True),
sa.Index("idx_daily_session_meeting_left", "meeting_id", "left_at"),
sa.Index("idx_daily_session_room", "room_id"),
)
class DailyParticipantSession(BaseModel):
"""Daily.co participant session record.
Tracks when a participant joined and left a meeting. Populated from webhooks:
- participant.joined: Creates record with left_at=None
- participant.left: Updates record with left_at
ID format: {meeting_id}:{user_id}:{joined_at_ms}
- Ensures idempotency (duplicate webhooks don't create duplicates)
- Allows same user to rejoin (different joined_at = different session)
Duration is calculated as: left_at - joined_at (not stored)
"""
id: NonEmptyString
meeting_id: NonEmptyString
room_id: NonEmptyString
session_id: NonEmptyString # Daily.co's session_id (identifies room session)
user_id: NonEmptyString | None = None
user_name: str
joined_at: datetime
left_at: datetime | None = None
class DailyParticipantSessionController:
"""Controller for Daily.co participant session persistence."""
async def get_by_id(self, id: str) -> DailyParticipantSession | None:
"""Get a session by its ID."""
query = daily_participant_sessions.select().where(
daily_participant_sessions.c.id == id
)
result = await get_database().fetch_one(query)
return DailyParticipantSession(**result) if result else None
async def get_open_session(
self, meeting_id: NonEmptyString, session_id: NonEmptyString
) -> DailyParticipantSession | None:
"""Get the open (not left) session for a user in a meeting."""
query = daily_participant_sessions.select().where(
sa.and_(
daily_participant_sessions.c.meeting_id == meeting_id,
daily_participant_sessions.c.session_id == session_id,
daily_participant_sessions.c.left_at.is_(None),
)
)
results = await get_database().fetch_all(query)
if len(results) > 1:
raise ValueError(
f"Multiple open sessions for daily session {session_id} in meeting {meeting_id}: "
f"found {len(results)} sessions"
)
return DailyParticipantSession(**results[0]) if results else None
async def upsert_joined(self, session: DailyParticipantSession) -> None:
"""Insert or update when participant.joined webhook arrives.
Idempotent: Duplicate webhooks with same ID are safely ignored.
Out-of-order: If left webhook arrived first, preserves left_at.
"""
query = insert(daily_participant_sessions).values(**session.model_dump())
query = query.on_conflict_do_update(
index_elements=["id"],
set_={"user_name": session.user_name},
)
await get_database().execute(query)
async def upsert_left(self, session: DailyParticipantSession) -> None:
"""Update session when participant.left webhook arrives.
Finds the open session for this user in this meeting and updates left_at.
Works around Daily.co webhook timestamp inconsistency (joined_at differs by ~4ms between webhooks).
Handles three cases:
1. Normal flow: open session exists → updates left_at
2. Out-of-order: left arrives first → creates new record with left data
3. Duplicate: left arrives again → idempotent (DB trigger prevents left_at modification)
"""
if session.left_at is None:
raise ValueError("left_at is required for upsert_left")
if session.left_at <= session.joined_at:
raise ValueError(
f"left_at ({session.left_at}) must be after joined_at ({session.joined_at})"
)
# Find existing open session (works around timestamp mismatch in webhooks)
existing = await self.get_open_session(session.meeting_id, session.session_id)
if existing:
# Update existing open session
query = (
daily_participant_sessions.update()
.where(daily_participant_sessions.c.id == existing.id)
.values(left_at=session.left_at)
)
await get_database().execute(query)
else:
# Out-of-order or first webhook: insert new record
query = insert(daily_participant_sessions).values(**session.model_dump())
query = query.on_conflict_do_nothing(index_elements=["id"])
await get_database().execute(query)
async def get_by_meeting(self, meeting_id: str) -> list[DailyParticipantSession]:
"""Get all participant sessions for a meeting (active and ended)."""
query = daily_participant_sessions.select().where(
daily_participant_sessions.c.meeting_id == meeting_id
)
results = await get_database().fetch_all(query)
return [DailyParticipantSession(**result) for result in results]
async def get_active_by_meeting(
self, meeting_id: str
) -> list[DailyParticipantSession]:
"""Get only active (not left) participant sessions for a meeting."""
query = daily_participant_sessions.select().where(
sa.and_(
daily_participant_sessions.c.meeting_id == meeting_id,
daily_participant_sessions.c.left_at.is_(None),
)
)
results = await get_database().fetch_all(query)
return [DailyParticipantSession(**result) for result in results]
async def get_all_sessions_for_meeting(
self, meeting_id: NonEmptyString
) -> dict[NonEmptyString, DailyParticipantSession]:
query = daily_participant_sessions.select().where(
daily_participant_sessions.c.meeting_id == meeting_id
)
results = await get_database().fetch_all(query)
# TODO DailySessionId custom type
return {row["session_id"]: DailyParticipantSession(**row) for row in results}
async def batch_upsert_sessions(
self, sessions: list[DailyParticipantSession]
) -> None:
"""Upsert multiple sessions in single query.
Uses ON CONFLICT for idempotency. Updates user_name on conflict since they may change it during a meeting.
"""
if not sessions:
return
values = [session.model_dump() for session in sessions]
query = insert(daily_participant_sessions).values(values)
query = query.on_conflict_do_update(
index_elements=["id"],
set_={
# Preserve existing left_at to prevent race conditions
"left_at": sa.func.coalesce(
daily_participant_sessions.c.left_at,
query.excluded.left_at,
),
"user_name": query.excluded.user_name,
},
)
await get_database().execute(query)
async def batch_close_sessions(
self, session_ids: list[NonEmptyString], left_at: datetime
) -> None:
"""Mark multiple sessions as left in single query.
Only updates sessions where left_at is NULL (protects already-closed sessions).
Left_at mismatch for existing sessions is ignored, assumed to be not important issue if ever happens.
"""
if not session_ids:
return
query = (
daily_participant_sessions.update()
.where(
sa.and_(
daily_participant_sessions.c.id.in_(session_ids),
daily_participant_sessions.c.left_at.is_(None),
)
)
.values(left_at=left_at)
)
await get_database().execute(query)
daily_participant_sessions_controller = DailyParticipantSessionController()

View File

@@ -7,9 +7,8 @@ from sqlalchemy.dialects.postgresql import JSONB
from reflector.db import get_database, metadata from reflector.db import get_database, metadata
from reflector.db.rooms import Room from reflector.db.rooms import Room
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform from reflector.platform_types import Platform
from reflector.utils import generate_uuid4 from reflector.utils import generate_uuid4
from reflector.utils.string import assert_equal
meetings = sa.Table( meetings = sa.Table(
"meeting", "meeting",
@@ -61,7 +60,7 @@ meetings = sa.Table(
"platform", "platform",
sa.String, sa.String,
nullable=False, nullable=False,
server_default=assert_equal(WHEREBY_PLATFORM, "whereby"), server_default="whereby",
), ),
sa.Index("idx_meeting_room_id", "room_id"), sa.Index("idx_meeting_room_id", "room_id"),
sa.Index("idx_meeting_calendar_event", "calendar_event_id"), sa.Index("idx_meeting_calendar_event", "calendar_event_id"),
@@ -109,7 +108,7 @@ class Meeting(BaseModel):
is_active: bool = True is_active: bool = True
calendar_event_id: str | None = None calendar_event_id: str | None = None
calendar_metadata: dict[str, Any] | None = None calendar_metadata: dict[str, Any] | None = None
platform: Platform = WHEREBY_PLATFORM platform: Platform = "whereby"
class MeetingController: class MeetingController:
@@ -124,6 +123,7 @@ class MeetingController:
room: Room, room: Room,
calendar_event_id: str | None = None, calendar_event_id: str | None = None,
calendar_metadata: dict[str, Any] | None = None, calendar_metadata: dict[str, Any] | None = None,
platform: Platform = "whereby",
): ):
meeting = Meeting( meeting = Meeting(
id=id, id=id,
@@ -139,19 +139,15 @@ class MeetingController:
recording_trigger=room.recording_trigger, recording_trigger=room.recording_trigger,
calendar_event_id=calendar_event_id, calendar_event_id=calendar_event_id,
calendar_metadata=calendar_metadata, calendar_metadata=calendar_metadata,
platform=room.platform, platform=platform,
) )
query = meetings.insert().values(**meeting.model_dump()) query = meetings.insert().values(**meeting.model_dump())
await get_database().execute(query) await get_database().execute(query)
return meeting return meeting
async def get_all_active(self, platform: str | None = None) -> list[Meeting]: async def get_all_active(self) -> list[Meeting]:
conditions = [meetings.c.is_active] query = meetings.select().where(meetings.c.is_active)
if platform is not None: return await get_database().fetch_all(query)
conditions.append(meetings.c.platform == platform)
query = meetings.select().where(sa.and_(*conditions))
results = await get_database().fetch_all(query)
return [Meeting(**result) for result in results]
async def get_by_room_name( async def get_by_room_name(
self, self,
@@ -161,14 +157,16 @@ class MeetingController:
Get a meeting by room name. Get a meeting by room name.
For backward compatibility, returns the most recent meeting. For backward compatibility, returns the most recent meeting.
""" """
end_date = getattr(meetings.c, "end_date")
query = ( query = (
meetings.select() meetings.select()
.where(meetings.c.room_name == room_name) .where(meetings.c.room_name == room_name)
.order_by(meetings.c.end_date.desc()) .order_by(end_date.desc())
) )
result = await get_database().fetch_one(query) result = await get_database().fetch_one(query)
if not result: if not result:
return None return None
return Meeting(**result) return Meeting(**result)
async def get_active(self, room: Room, current_time: datetime) -> Meeting | None: async def get_active(self, room: Room, current_time: datetime) -> Meeting | None:
@@ -191,6 +189,7 @@ class MeetingController:
result = await get_database().fetch_one(query) result = await get_database().fetch_one(query)
if not result: if not result:
return None return None
return Meeting(**result) return Meeting(**result)
async def get_all_active_for_room( async def get_all_active_for_room(
@@ -230,27 +229,17 @@ class MeetingController:
return None return None
return Meeting(**result) return Meeting(**result)
async def get_by_id( async def get_by_id(self, meeting_id: str, **kwargs) -> Meeting | None:
self, meeting_id: str, room: Room | None = None
) -> Meeting | None:
query = meetings.select().where(meetings.c.id == meeting_id) query = meetings.select().where(meetings.c.id == meeting_id)
if room:
query = query.where(meetings.c.room_id == room.id)
result = await get_database().fetch_one(query) result = await get_database().fetch_one(query)
if not result: if not result:
return None return None
return Meeting(**result) return Meeting(**result)
async def get_by_calendar_event( async def get_by_calendar_event(self, calendar_event_id: str) -> Meeting | None:
self, calendar_event_id: str, room: Room
) -> Meeting | None:
query = meetings.select().where( query = meetings.select().where(
meetings.c.calendar_event_id == calendar_event_id meetings.c.calendar_event_id == calendar_event_id
) )
if room:
query = query.where(meetings.c.room_id == room.id)
result = await get_database().fetch_one(query) result = await get_database().fetch_one(query)
if not result: if not result:
return None return None
@@ -260,7 +249,7 @@ class MeetingController:
query = meetings.update().where(meetings.c.id == meeting_id).values(**kwargs) query = meetings.update().where(meetings.c.id == meeting_id).values(**kwargs)
await get_database().execute(query) await get_database().execute(query)
async def increment_num_clients(self, meeting_id: str) -> None: async def increment_num_clients(self, meeting_id: str):
"""Atomically increment participant count.""" """Atomically increment participant count."""
query = ( query = (
meetings.update() meetings.update()
@@ -269,7 +258,7 @@ class MeetingController:
) )
await get_database().execute(query) await get_database().execute(query)
async def decrement_num_clients(self, meeting_id: str) -> None: async def decrement_num_clients(self, meeting_id: str):
"""Atomically decrement participant count (min 0).""" """Atomically decrement participant count (min 0)."""
query = ( query = (
meetings.update() meetings.update()

Some files were not shown because too many files have changed in this diff Show More