mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-21 04:39:06 +00:00
Compare commits
27 Commits
mathieu/re
...
mathieu/ji
| Author | SHA1 | Date | |
|---|---|---|---|
| e91979abbc | |||
| 95e8011975 | |||
| 293f7d4f1f | |||
| 41224a424c | |||
| dd0089906f | |||
| fa559b1970 | |||
| c26ce65083 | |||
| 52eff2acc0 | |||
| 7875ec3432 | |||
| 398be06fad | |||
| da700069d9 | |||
| 51229a1790 | |||
| 2d2c23f7cc | |||
| 0acb9cac79 | |||
| d861d92cc2 | |||
| 24ff83a2ec | |||
| 249234238c | |||
| 42a603d5c3 | |||
| 6d2092f950 | |||
| f2bb6aaecb | |||
| 2b136ac7b0 | |||
| 3f4fc26483 | |||
| 8e5ef5bca6 | |||
| d49fdcb38d | |||
| d42380abf1 | |||
| cf64e1a3d9 | |||
| ea53ca7000 |
90
.github/workflows/deploy.yml
vendored
Normal file
90
.github/workflows/deploy.yml
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
name: Deploy to Amazon ECS
|
||||
|
||||
on: [workflow_dispatch]
|
||||
|
||||
env:
|
||||
# 950402358378.dkr.ecr.us-east-1.amazonaws.com/reflector
|
||||
AWS_REGION: us-east-1
|
||||
ECR_REPOSITORY: reflector
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: linux-amd64
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: linux-arm64
|
||||
arch: arm64
|
||||
|
||||
runs-on: ${{ matrix.runner }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
outputs:
|
||||
registry: ${{ steps.login-ecr.outputs.registry }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push ${{ matrix.arch }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: server
|
||||
platforms: ${{ matrix.platform }}
|
||||
push: true
|
||||
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
provenance: false
|
||||
|
||||
create-manifest:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
|
||||
permissions:
|
||||
deployments: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Create and push multi-arch manifest
|
||||
run: |
|
||||
# Get the registry URL (since we can't easily access job outputs in matrix)
|
||||
ECR_REGISTRY=$(aws ecr describe-registry --query 'registryId' --output text).dkr.ecr.${{ env.AWS_REGION }}.amazonaws.com
|
||||
|
||||
docker manifest create \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-amd64 \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-arm64
|
||||
|
||||
docker manifest push $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest
|
||||
|
||||
echo "✅ Multi-arch manifest pushed: $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest"
|
||||
53
.github/workflows/dockerhub-backend.yml
vendored
53
.github/workflows/dockerhub-backend.yml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Build and Push Backend Docker Image (Docker Hub)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
REGISTRY: docker.io
|
||||
IMAGE_NAME: monadicalsas/reflector-backend
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: monadicalsas
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./server
|
||||
file: ./server/Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
70
.github/workflows/dockerhub-frontend.yml
vendored
70
.github/workflows/dockerhub-frontend.yml
vendored
@@ -1,70 +0,0 @@
|
||||
name: Build and Push Frontend Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
REGISTRY: docker.io
|
||||
IMAGE_NAME: monadicalsas/reflector-frontend
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: monadicalsas
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./www
|
||||
file: ./www/Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
deploy:
|
||||
needs: build-and-push
|
||||
runs-on: ubuntu-latest
|
||||
if: success()
|
||||
strategy:
|
||||
matrix:
|
||||
environment: [reflector-monadical, reflector-media]
|
||||
environment: ${{ matrix.environment }}
|
||||
steps:
|
||||
- name: Trigger Coolify deployment
|
||||
run: |
|
||||
curl -X POST "${{ secrets.COOLIFY_WEBHOOK_URL }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer ${{ secrets.COOLIFY_WEBHOOK_TOKEN }}" \
|
||||
-f || (echo "Failed to trigger Coolify deployment for ${{ matrix.environment }}" && exit 1)
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,7 +1,6 @@
|
||||
.DS_Store
|
||||
server/.env
|
||||
.env
|
||||
Caddyfile
|
||||
server/exportdanswer
|
||||
.vercel
|
||||
.env*.local
|
||||
@@ -19,5 +18,3 @@ CLAUDE.local.md
|
||||
www/.env.development
|
||||
www/.env.production
|
||||
.playwright-mcp
|
||||
docs/pnpm-lock.yaml
|
||||
.secrets
|
||||
|
||||
@@ -1,5 +1 @@
|
||||
b9d891d3424f371642cb032ecfd0e2564470a72c:server/tests/test_transcripts_recording_deletion.py:generic-api-key:15
|
||||
docs/docs/installation/auth-setup.md:curl-auth-header:250
|
||||
docs/docs/installation/daily-setup.md:curl-auth-header:277
|
||||
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:74
|
||||
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:83
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# Example secrets file for GitHub Actions workflows
|
||||
# Copy this to .secrets and fill in your values
|
||||
# These secrets should be configured in GitHub repository settings:
|
||||
# Settings > Secrets and variables > Actions
|
||||
|
||||
# DockerHub Configuration (required for frontend and backend deployment)
|
||||
# Create a Docker Hub access token at https://hub.docker.com/settings/security
|
||||
# Username: monadicalsas
|
||||
DOCKERHUB_TOKEN=your-dockerhub-access-token
|
||||
|
||||
# GitHub Token (required for frontend and backend deployment)
|
||||
# Used by docker/metadata-action for extracting image metadata
|
||||
# Can use the default GITHUB_TOKEN or create a personal access token
|
||||
GITHUB_TOKEN=your-github-token-or-use-default-GITHUB_TOKEN
|
||||
|
||||
# Coolify Deployment Webhook (required for frontend deployment)
|
||||
# Used to trigger automatic deployment after image push
|
||||
# Configure these secrets in GitHub Environments:
|
||||
# Each environment should have:
|
||||
# - COOLIFY_WEBHOOK_URL: The webhook URL for that specific deployment
|
||||
# - COOLIFY_WEBHOOK_TOKEN: The webhook token (can be the same for both if using same token)
|
||||
|
||||
# Optional: GitHub Actions Cache Token (for local testing with act)
|
||||
GHA_CACHE_TOKEN=your-github-token-or-empty
|
||||
201
CHANGELOG.md
201
CHANGELOG.md
@@ -1,206 +1,5 @@
|
||||
# Changelog
|
||||
|
||||
## [0.23.2](https://github.com/Monadical-SAS/reflector/compare/v0.23.1...v0.23.2) (2025-12-11)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* build on push tags ([#785](https://github.com/Monadical-SAS/reflector/issues/785)) ([d7f140b](https://github.com/Monadical-SAS/reflector/commit/d7f140b7d1f4660d5da7a0da1357f68869e0b5cd))
|
||||
|
||||
## [0.23.1](https://github.com/Monadical-SAS/reflector/compare/v0.23.0...v0.23.1) (2025-12-11)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* populate room_name in transcript GET endpoint ([#783](https://github.com/Monadical-SAS/reflector/issues/783)) ([0eba147](https://github.com/Monadical-SAS/reflector/commit/0eba1470181c7b9e0a79964a1ef28c09bcbdd9d7))
|
||||
|
||||
## [0.23.0](https://github.com/Monadical-SAS/reflector/compare/v0.22.4...v0.23.0) (2025-12-10)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* dockerhub ci ([#772](https://github.com/Monadical-SAS/reflector/issues/772)) ([00549f1](https://github.com/Monadical-SAS/reflector/commit/00549f153ade922cf4cb6c5358a7d11a39c426d2))
|
||||
* llm retries ([#739](https://github.com/Monadical-SAS/reflector/issues/739)) ([61f0e29](https://github.com/Monadical-SAS/reflector/commit/61f0e29d4c51eab54ee67af92141fbb171e8ccaa))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* celery inspect bug sidestep in restart script ([#766](https://github.com/Monadical-SAS/reflector/issues/766)) ([ec17ed7](https://github.com/Monadical-SAS/reflector/commit/ec17ed7b587cf6ee143646baaee67a7c017044d4))
|
||||
* deploy frontend to coolify ([#779](https://github.com/Monadical-SAS/reflector/issues/779)) ([91650ec](https://github.com/Monadical-SAS/reflector/commit/91650ec65f65713faa7ee0dcfb75af427b7c4ba0))
|
||||
* hide rooms settings instead of disabling ([#763](https://github.com/Monadical-SAS/reflector/issues/763)) ([3ad78be](https://github.com/Monadical-SAS/reflector/commit/3ad78be7628c0d029296b301a0e87236c76b7598))
|
||||
* return participant emails from transcript endpoint ([#769](https://github.com/Monadical-SAS/reflector/issues/769)) ([d3a5cd1](https://github.com/Monadical-SAS/reflector/commit/d3a5cd12d2d0d9c32af2d5bd9322e030ef69b85d))
|
||||
|
||||
## [0.22.4](https://github.com/Monadical-SAS/reflector/compare/v0.22.3...v0.22.4) (2025-12-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Multitrack mixdown optimisation 2 ([#764](https://github.com/Monadical-SAS/reflector/issues/764)) ([bd5df1c](https://github.com/Monadical-SAS/reflector/commit/bd5df1ce2ebf35d7f3413b295e56937a9a28ef7b))
|
||||
|
||||
## [0.22.3](https://github.com/Monadical-SAS/reflector/compare/v0.22.2...v0.22.3) (2025-12-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* align daily room settings ([#759](https://github.com/Monadical-SAS/reflector/issues/759)) ([28f87c0](https://github.com/Monadical-SAS/reflector/commit/28f87c09dc459846873d0dde65b03e3d7b2b9399))
|
||||
|
||||
## [0.22.2](https://github.com/Monadical-SAS/reflector/compare/v0.22.1...v0.22.2) (2025-12-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* daily auto refresh fix ([#755](https://github.com/Monadical-SAS/reflector/issues/755)) ([fe47c46](https://github.com/Monadical-SAS/reflector/commit/fe47c46489c5aa0cc538109f7559cc9accb35c01))
|
||||
* Skip mixdown for multitrack ([#760](https://github.com/Monadical-SAS/reflector/issues/760)) ([b51b7aa](https://github.com/Monadical-SAS/reflector/commit/b51b7aa9176c1a53ba57ad99f5e976c804a1e80c))
|
||||
|
||||
## [0.22.1](https://github.com/Monadical-SAS/reflector/compare/v0.22.0...v0.22.1) (2025-11-27)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* participants update from daily ([#749](https://github.com/Monadical-SAS/reflector/issues/749)) ([7f0b728](https://github.com/Monadical-SAS/reflector/commit/7f0b728991c1b9f9aae702c96297eae63b561ef5))
|
||||
|
||||
## [0.22.0](https://github.com/Monadical-SAS/reflector/compare/v0.21.0...v0.22.0) (2025-11-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Multitrack segmentation ([#747](https://github.com/Monadical-SAS/reflector/issues/747)) ([d63040e](https://github.com/Monadical-SAS/reflector/commit/d63040e2fdc07e7b272e85a39eb2411cd6a14798))
|
||||
|
||||
## [0.21.0](https://github.com/Monadical-SAS/reflector/compare/v0.20.0...v0.21.0) (2025-11-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add transcript format parameter to GET endpoint ([#709](https://github.com/Monadical-SAS/reflector/issues/709)) ([f6ca075](https://github.com/Monadical-SAS/reflector/commit/f6ca07505f34483b02270a2ef3bd809e9d2e1045))
|
||||
|
||||
## [0.20.0](https://github.com/Monadical-SAS/reflector/compare/v0.19.0...v0.20.0) (2025-11-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* link transcript participants ([#737](https://github.com/Monadical-SAS/reflector/issues/737)) ([9bec398](https://github.com/Monadical-SAS/reflector/commit/9bec39808fc6322612d8b87e922a6f7901fc01c1))
|
||||
* transcript restart script ([#742](https://github.com/Monadical-SAS/reflector/issues/742)) ([86d5e26](https://github.com/Monadical-SAS/reflector/commit/86d5e26224bb55a0f1cc785aeda52065bb92ee6f))
|
||||
|
||||
## [0.19.0](https://github.com/Monadical-SAS/reflector/compare/v0.18.0...v0.19.0) (2025-11-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* dailyco api module ([#725](https://github.com/Monadical-SAS/reflector/issues/725)) ([4287f8b](https://github.com/Monadical-SAS/reflector/commit/4287f8b8aeee60e51db7539f4dcbda5f6e696bd8))
|
||||
* dailyco poll ([#730](https://github.com/Monadical-SAS/reflector/issues/730)) ([8e438ca](https://github.com/Monadical-SAS/reflector/commit/8e438ca285152bd48fdc42767e706fb448d3525c))
|
||||
* multitrack cli ([#735](https://github.com/Monadical-SAS/reflector/issues/735)) ([11731c9](https://github.com/Monadical-SAS/reflector/commit/11731c9d38439b04e93b1c3afbd7090bad11a11f))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* default platform fix ([#736](https://github.com/Monadical-SAS/reflector/issues/736)) ([c442a62](https://github.com/Monadical-SAS/reflector/commit/c442a627873ca667656eeaefb63e54ab10b8d19e))
|
||||
* parakeet vad not getting the end timestamp ([#728](https://github.com/Monadical-SAS/reflector/issues/728)) ([18ed713](https://github.com/Monadical-SAS/reflector/commit/18ed7133693653ef4ddac6c659a8c14b320d1657))
|
||||
* start raw tracks recording ([#729](https://github.com/Monadical-SAS/reflector/issues/729)) ([3e47c2c](https://github.com/Monadical-SAS/reflector/commit/3e47c2c0573504858e0d2e1798b6ed31f16b4a5d))
|
||||
|
||||
## [0.18.0](https://github.com/Monadical-SAS/reflector/compare/v0.17.0...v0.18.0) (2025-11-14)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* daily QOL: participants dictionary ([#721](https://github.com/Monadical-SAS/reflector/issues/721)) ([b20cad7](https://github.com/Monadical-SAS/reflector/commit/b20cad76e69fb6a76405af299a005f1ddcf60eae))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add proccessing page to file upload and reprocessing ([#650](https://github.com/Monadical-SAS/reflector/issues/650)) ([28a7258](https://github.com/Monadical-SAS/reflector/commit/28a7258e45317b78e60e6397be2bc503647eaace))
|
||||
* copy transcript ([#674](https://github.com/Monadical-SAS/reflector/issues/674)) ([a9a4f32](https://github.com/Monadical-SAS/reflector/commit/a9a4f32324f66c838e081eee42bb9502f38c1db1))
|
||||
|
||||
## [0.17.0](https://github.com/Monadical-SAS/reflector/compare/v0.16.0...v0.17.0) (2025-11-13)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add API key management UI ([#716](https://github.com/Monadical-SAS/reflector/issues/716)) ([372202b](https://github.com/Monadical-SAS/reflector/commit/372202b0e1a86823900b0aa77be1bfbc2893d8a1))
|
||||
* daily.co support as alternative to whereby ([#691](https://github.com/Monadical-SAS/reflector/issues/691)) ([1473fd8](https://github.com/Monadical-SAS/reflector/commit/1473fd82dc472c394cbaa2987212ad662a74bcac))
|
||||
|
||||
## [0.16.0](https://github.com/Monadical-SAS/reflector/compare/v0.15.0...v0.16.0) (2025-10-24)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* search date filter ([#710](https://github.com/Monadical-SAS/reflector/issues/710)) ([962c40e](https://github.com/Monadical-SAS/reflector/commit/962c40e2b6428ac42fd10aea926782d7a6f3f902))
|
||||
|
||||
## [0.15.0](https://github.com/Monadical-SAS/reflector/compare/v0.14.0...v0.15.0) (2025-10-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* api tokens ([#705](https://github.com/Monadical-SAS/reflector/issues/705)) ([9a258ab](https://github.com/Monadical-SAS/reflector/commit/9a258abc0209b0ac3799532a507ea6a9125d703a))
|
||||
|
||||
## [0.14.0](https://github.com/Monadical-SAS/reflector/compare/v0.13.1...v0.14.0) (2025-10-08)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Add calendar event data to transcript webhook payload ([#689](https://github.com/Monadical-SAS/reflector/issues/689)) ([5f6910e](https://github.com/Monadical-SAS/reflector/commit/5f6910e5131b7f28f86c9ecdcc57fed8412ee3cd))
|
||||
* container build for www / github ([#672](https://github.com/Monadical-SAS/reflector/issues/672)) ([969bd84](https://github.com/Monadical-SAS/reflector/commit/969bd84fcc14851d1a101412a0ba115f1b7cde82))
|
||||
* docker-compose for production frontend ([#664](https://github.com/Monadical-SAS/reflector/issues/664)) ([5bf64b5](https://github.com/Monadical-SAS/reflector/commit/5bf64b5a41f64535e22849b4bb11734d4dbb4aae))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* restore feature boolean logic ([#671](https://github.com/Monadical-SAS/reflector/issues/671)) ([3660884](https://github.com/Monadical-SAS/reflector/commit/36608849ec64e953e3be456172502762e3c33df9))
|
||||
* security review ([#656](https://github.com/Monadical-SAS/reflector/issues/656)) ([5d98754](https://github.com/Monadical-SAS/reflector/commit/5d98754305c6c540dd194dda268544f6d88bfaf8))
|
||||
* update transcript list on reprocess ([#676](https://github.com/Monadical-SAS/reflector/issues/676)) ([9a71af1](https://github.com/Monadical-SAS/reflector/commit/9a71af145ee9b833078c78d0c684590ab12e9f0e))
|
||||
* upgrade nemo toolkit ([#678](https://github.com/Monadical-SAS/reflector/issues/678)) ([eef6dc3](https://github.com/Monadical-SAS/reflector/commit/eef6dc39037329b65804297786d852dddb0557f9))
|
||||
|
||||
## [0.13.1](https://github.com/Monadical-SAS/reflector/compare/v0.13.0...v0.13.1) (2025-09-22)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* TypeError on not all arguments converted during string formatting in logger ([#667](https://github.com/Monadical-SAS/reflector/issues/667)) ([565a629](https://github.com/Monadical-SAS/reflector/commit/565a62900f5a02fc946b68f9269a42190ed70ab6))
|
||||
|
||||
## [0.13.0](https://github.com/Monadical-SAS/reflector/compare/v0.12.1...v0.13.0) (2025-09-19)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* room form edit with enter ([#662](https://github.com/Monadical-SAS/reflector/issues/662)) ([47716f6](https://github.com/Monadical-SAS/reflector/commit/47716f6e5ddee952609d2fa0ffabdfa865286796))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* invalid cleanup call ([#660](https://github.com/Monadical-SAS/reflector/issues/660)) ([0abcebf](https://github.com/Monadical-SAS/reflector/commit/0abcebfc9491f87f605f21faa3e53996fafedd9a))
|
||||
|
||||
## [0.12.1](https://github.com/Monadical-SAS/reflector/compare/v0.12.0...v0.12.1) (2025-09-17)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* production blocked because having existing meeting with room_id null ([#657](https://github.com/Monadical-SAS/reflector/issues/657)) ([870e860](https://github.com/Monadical-SAS/reflector/commit/870e8605171a27155a9cbee215eeccb9a8d6c0a2))
|
||||
|
||||
## [0.12.0](https://github.com/Monadical-SAS/reflector/compare/v0.11.0...v0.12.0) (2025-09-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* calendar integration ([#608](https://github.com/Monadical-SAS/reflector/issues/608)) ([6f680b5](https://github.com/Monadical-SAS/reflector/commit/6f680b57954c688882c4ed49f40f161c52a00a24))
|
||||
* self-hosted gpu api ([#636](https://github.com/Monadical-SAS/reflector/issues/636)) ([ab859d6](https://github.com/Monadical-SAS/reflector/commit/ab859d65a6bded904133a163a081a651b3938d42))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* ignore player hotkeys for text inputs ([#646](https://github.com/Monadical-SAS/reflector/issues/646)) ([fa049e8](https://github.com/Monadical-SAS/reflector/commit/fa049e8d068190ce7ea015fd9fcccb8543f54a3f))
|
||||
|
||||
## [0.11.0](https://github.com/Monadical-SAS/reflector/compare/v0.10.0...v0.11.0) (2025-09-16)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* remove profanity filter that was there for conference ([#652](https://github.com/Monadical-SAS/reflector/issues/652)) ([b42f7cf](https://github.com/Monadical-SAS/reflector/commit/b42f7cfc606783afcee792590efcc78b507468ab))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* zulip and consent handler on the file pipeline ([#645](https://github.com/Monadical-SAS/reflector/issues/645)) ([5f143fe](https://github.com/Monadical-SAS/reflector/commit/5f143fe3640875dcb56c26694254a93189281d17))
|
||||
* zulip stream and topic selection in share dialog ([#644](https://github.com/Monadical-SAS/reflector/issues/644)) ([c546e69](https://github.com/Monadical-SAS/reflector/commit/c546e69739e68bb74fbc877eb62609928e5b8de6))
|
||||
|
||||
## [0.10.0](https://github.com/Monadical-SAS/reflector/compare/v0.9.0...v0.10.0) (2025-09-11)
|
||||
|
||||
|
||||
|
||||
@@ -151,7 +151,7 @@ All endpoints prefixed `/v1/`:
|
||||
|
||||
**Frontend** (`www/.env`):
|
||||
- `NEXTAUTH_URL`, `NEXTAUTH_SECRET` - Authentication configuration
|
||||
- `REFLECTOR_API_URL` - Backend API endpoint
|
||||
- `NEXT_PUBLIC_REFLECTOR_API_URL` - Backend API endpoint
|
||||
- `REFLECTOR_DOMAIN_CONFIG` - Feature flags and domain settings
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
# Reflector Caddyfile
|
||||
# Replace example.com with your actual domains
|
||||
# CORS is handled by the backend - Caddy just proxies
|
||||
#
|
||||
# For environment variable substitution, set:
|
||||
# FRONTEND_DOMAIN=app.example.com
|
||||
# API_DOMAIN=api.example.com
|
||||
# AUTHENTIK_DOMAIN=authentik.example.com (optional, for authentication)
|
||||
# Or edit this file directly with your domains.
|
||||
|
||||
{$FRONTEND_DOMAIN:app.example.com} {
|
||||
reverse_proxy web:3000
|
||||
}
|
||||
|
||||
{$API_DOMAIN:api.example.com} {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
|
||||
# Uncomment if using Authentik for authentication (see auth-setup.md)
|
||||
# {$AUTHENTIK_DOMAIN:authentik.example.com} {
|
||||
# reverse_proxy authentik-server-1:9000
|
||||
# }
|
||||
31
README.md
31
README.md
@@ -168,19 +168,6 @@ You can manually process an audio file by calling the process tool:
|
||||
uv run python -m reflector.tools.process path/to/audio.wav
|
||||
```
|
||||
|
||||
## Reprocessing any transcription
|
||||
|
||||
```bash
|
||||
uv run -m reflector.tools.process_transcript 81ec38d1-9dd7-43d2-b3f8-51f4d34a07cd --sync
|
||||
```
|
||||
|
||||
## Build-time env variables
|
||||
|
||||
Next.js projects are more used to NEXT_PUBLIC_ prefixed buildtime vars. We don't have those for the reason we need to serve a ccustomizable prebuild docker container.
|
||||
|
||||
Instead, all the variables are runtime. Variables needed to the frontend are served to the frontend app at initial render.
|
||||
|
||||
It also means there's no static prebuild and no static files to serve for js/html.
|
||||
|
||||
## Feature Flags
|
||||
|
||||
@@ -190,24 +177,24 @@ Reflector uses environment variable-based feature flags to control application f
|
||||
|
||||
| Feature Flag | Environment Variable |
|
||||
|-------------|---------------------|
|
||||
| `requireLogin` | `FEATURE_REQUIRE_LOGIN` |
|
||||
| `privacy` | `FEATURE_PRIVACY` |
|
||||
| `browse` | `FEATURE_BROWSE` |
|
||||
| `sendToZulip` | `FEATURE_SEND_TO_ZULIP` |
|
||||
| `rooms` | `FEATURE_ROOMS` |
|
||||
| `requireLogin` | `NEXT_PUBLIC_FEATURE_REQUIRE_LOGIN` |
|
||||
| `privacy` | `NEXT_PUBLIC_FEATURE_PRIVACY` |
|
||||
| `browse` | `NEXT_PUBLIC_FEATURE_BROWSE` |
|
||||
| `sendToZulip` | `NEXT_PUBLIC_FEATURE_SEND_TO_ZULIP` |
|
||||
| `rooms` | `NEXT_PUBLIC_FEATURE_ROOMS` |
|
||||
|
||||
### Setting Feature Flags
|
||||
|
||||
Feature flags are controlled via environment variables using the pattern `FEATURE_{FEATURE_NAME}` where `{FEATURE_NAME}` is the SCREAMING_SNAKE_CASE version of the feature name.
|
||||
Feature flags are controlled via environment variables using the pattern `NEXT_PUBLIC_FEATURE_{FEATURE_NAME}` where `{FEATURE_NAME}` is the SCREAMING_SNAKE_CASE version of the feature name.
|
||||
|
||||
**Examples:**
|
||||
```bash
|
||||
# Enable user authentication requirement
|
||||
FEATURE_REQUIRE_LOGIN=true
|
||||
NEXT_PUBLIC_FEATURE_REQUIRE_LOGIN=true
|
||||
|
||||
# Disable browse functionality
|
||||
FEATURE_BROWSE=false
|
||||
NEXT_PUBLIC_FEATURE_BROWSE=false
|
||||
|
||||
# Enable Zulip integration
|
||||
FEATURE_SEND_TO_ZULIP=true
|
||||
NEXT_PUBLIC_FEATURE_SEND_TO_ZULIP=true
|
||||
```
|
||||
|
||||
@@ -39,7 +39,7 @@ services:
|
||||
ports:
|
||||
- 6379:6379
|
||||
web:
|
||||
image: node:22-alpine
|
||||
image: node:18
|
||||
ports:
|
||||
- "3000:3000"
|
||||
command: sh -c "corepack enable && pnpm install && pnpm dev"
|
||||
@@ -50,8 +50,6 @@ services:
|
||||
- /app/node_modules
|
||||
env_file:
|
||||
- ./www/.env.local
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
|
||||
postgres:
|
||||
image: postgres:17
|
||||
@@ -1,113 +0,0 @@
|
||||
# Production Docker Compose configuration
|
||||
# Usage: docker compose -f docker-compose.prod.yml up -d
|
||||
#
|
||||
# Prerequisites:
|
||||
# 1. Copy .env.example to .env and configure for both server/ and www/
|
||||
# 2. Copy Caddyfile.example to Caddyfile and edit with your domains
|
||||
# 3. Deploy Modal GPU functions (see gpu/modal_deployments/deploy-all.sh)
|
||||
|
||||
services:
|
||||
web:
|
||||
image: monadicalsas/reflector-frontend:latest
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./www/.env
|
||||
pull_policy: always
|
||||
environment:
|
||||
- KV_URL=redis://redis:6379
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
server:
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: server
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
- ./server/reflector/auth/jwt/keys:/app/reflector/auth/jwt/keys:ro
|
||||
|
||||
worker:
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: worker
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
- ./server/reflector/auth/jwt/keys:/app/reflector/auth/jwt/keys:ro
|
||||
|
||||
beat:
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: beat
|
||||
depends_on:
|
||||
- postgres
|
||||
- redis
|
||||
|
||||
redis:
|
||||
image: redis:7.2-alpine
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 30s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
postgres:
|
||||
image: postgres:17-alpine
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_USER: reflector
|
||||
POSTGRES_PASSWORD: reflector
|
||||
POSTGRES_DB: reflector
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U reflector"]
|
||||
interval: 30s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
|
||||
caddy:
|
||||
image: caddy:2-alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
- caddy_data:/data
|
||||
- caddy_config:/config
|
||||
depends_on:
|
||||
- web
|
||||
- server
|
||||
|
||||
docs:
|
||||
build: ./docs
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
redis_data:
|
||||
postgres_data:
|
||||
server_data:
|
||||
caddy_data:
|
||||
caddy_config:
|
||||
|
||||
networks:
|
||||
default:
|
||||
attachable: true
|
||||
20
docs/.gitignore
vendored
20
docs/.gitignore
vendored
@@ -1,20 +0,0 @@
|
||||
# Dependencies
|
||||
/node_modules
|
||||
|
||||
# Production
|
||||
/build
|
||||
|
||||
# Generated files
|
||||
.docusaurus
|
||||
.cache-loader
|
||||
|
||||
# Misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
@@ -1,39 +0,0 @@
|
||||
FROM node:18-alpine AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Install curl for fetching OpenAPI spec
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json ./
|
||||
|
||||
# Install dependencies
|
||||
RUN npm ci
|
||||
|
||||
# Copy source
|
||||
COPY . .
|
||||
|
||||
# Fetch OpenAPI spec from production API
|
||||
ARG OPENAPI_URL=https://api-reflector.monadical.com/openapi.json
|
||||
RUN mkdir -p ./static && curl -sf "${OPENAPI_URL}" -o ./static/openapi.json || echo '{}' > ./static/openapi.json
|
||||
|
||||
# Fix docusaurus config: change onBrokenLinks to 'warn' for Docker build
|
||||
RUN sed -i "s/onBrokenLinks: 'throw'/onBrokenLinks: 'warn'/g" docusaurus.config.ts
|
||||
|
||||
# Build static site (skip prebuild hook by calling docusaurus directly)
|
||||
RUN npx docusaurus build
|
||||
|
||||
# Production image
|
||||
FROM nginx:alpine
|
||||
|
||||
# Copy built static files
|
||||
COPY --from=builder /app/build /usr/share/nginx/html
|
||||
|
||||
# Healthcheck for container orchestration
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost/ || exit 1
|
||||
|
||||
# Expose port
|
||||
EXPOSE 80
|
||||
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
@@ -1,41 +0,0 @@
|
||||
# Website
|
||||
|
||||
This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator.
|
||||
|
||||
### Installation
|
||||
|
||||
```
|
||||
$ yarn
|
||||
```
|
||||
|
||||
### Local Development
|
||||
|
||||
```
|
||||
$ yarn start
|
||||
```
|
||||
|
||||
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
|
||||
|
||||
### Build
|
||||
|
||||
```
|
||||
$ yarn build
|
||||
```
|
||||
|
||||
This command generates static content into the `build` directory and can be served using any static contents hosting service.
|
||||
|
||||
### Deployment
|
||||
|
||||
Using SSH:
|
||||
|
||||
```
|
||||
$ USE_SSH=true yarn deploy
|
||||
```
|
||||
|
||||
Not using SSH:
|
||||
|
||||
```
|
||||
$ GIT_USER=<Your GitHub username> yarn deploy
|
||||
```
|
||||
|
||||
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
|
||||
170
docs/TODO.md
170
docs/TODO.md
@@ -1,170 +0,0 @@
|
||||
# Documentation TODO List
|
||||
|
||||
This file tracks information needed from the user to complete the documentation.
|
||||
|
||||
## Required Information
|
||||
|
||||
### Processing Times & Costs
|
||||
|
||||
Please provide actual numbers for:
|
||||
|
||||
- [ ] **Modal.com GPU Costs**
|
||||
- Cost per hour of audio for Whisper transcription
|
||||
- Cost per hour of audio for Pyannote diarization
|
||||
- Cost per hour of audio for Seamless-M4T translation
|
||||
- Typical GPU instance used (T4, A10, etc.)
|
||||
|
||||
- [ ] **RunPod LLM Costs**
|
||||
- Cost per 1000 tokens for summarization
|
||||
- Model used (phi-4-unsloth-bnb-4bit)
|
||||
- RTX 4000 Ada instance cost per hour
|
||||
|
||||
- [ ] **AWS S3 Storage**
|
||||
- Cost per GB per month
|
||||
- Data transfer costs
|
||||
- Typical storage requirements per hour of audio
|
||||
|
||||
- [ ] **Whereby API**
|
||||
- Monthly cost structure
|
||||
- API call limits
|
||||
- Room participant limits
|
||||
|
||||
- [ ] **Actual Processing Times**
|
||||
- Whisper tiny model: X minutes per hour of audio
|
||||
- Whisper base model: X minutes per hour of audio
|
||||
- Whisper large-v3 model: X minutes per hour of audio
|
||||
- Diarization: X minutes per hour of audio
|
||||
- Translation: X minutes per hour of audio
|
||||
|
||||
### Screenshots Needed
|
||||
|
||||
Location: `/docs/static/screenshots/`
|
||||
|
||||
Please provide screenshots of:
|
||||
|
||||
- [ ] **Dashboard Overview** - Main dashboard showing recent transcripts
|
||||
- [ ] **Live Transcription** - Active transcription in progress
|
||||
- [ ] **Meeting Room Interface** - Whereby room with participants
|
||||
- [ ] **Transcript with Diarization** - Showing speaker labels
|
||||
- [ ] **Settings Page** - Configuration options
|
||||
- [ ] **API Documentation** - OpenAPI/Swagger interface
|
||||
- [ ] **File Upload Interface** - Drag and drop upload
|
||||
- [ ] **Translation View** - Showing original and translated text
|
||||
- [ ] **Summary View** - Generated summary and topics
|
||||
|
||||
### Setup Screenshots
|
||||
|
||||
Please provide step-by-step screenshots for:
|
||||
|
||||
- [ ] **Modal.com Setup**
|
||||
- Creating account
|
||||
- Getting API keys
|
||||
- Deploying functions
|
||||
|
||||
- [ ] **Whereby Configuration**
|
||||
- Creating developer account
|
||||
- Getting API credentials
|
||||
- Setting up rooms
|
||||
|
||||
- [ ] **AWS S3 Setup**
|
||||
- Creating bucket
|
||||
- Setting permissions
|
||||
- Getting access keys
|
||||
|
||||
- [ ] **Authentik Integration**
|
||||
- Adding application
|
||||
- Configuring OAuth
|
||||
- Setting up users
|
||||
|
||||
### Technical Details
|
||||
|
||||
Please provide specific values for:
|
||||
|
||||
- [ ] **WebRTC Configuration**
|
||||
- Exact UDP port range used (e.g., 10000-20000)
|
||||
- STUN server configuration (if any)
|
||||
- ICE candidate gathering timeout
|
||||
- https://docs.daily.co/guides/privacy-and-security/corporate-firewalls-nats-allowed-ip-list
|
||||
|
||||
- [ ] **Worker Configuration**
|
||||
- Default Celery worker count
|
||||
- Worker memory limits
|
||||
- Queue priorities
|
||||
|
||||
- [ ] **Redis Requirements**
|
||||
- Typical memory usage
|
||||
- Persistence configuration
|
||||
- Eviction policies
|
||||
|
||||
- [ ] **PostgreSQL**
|
||||
- Expected database growth (MB per hour of audio)
|
||||
- Recommended connection pool size
|
||||
- Backup strategy
|
||||
|
||||
- [ ] **Performance Metrics**
|
||||
- Average transcription accuracy (WER)
|
||||
- Average diarization accuracy (DER)
|
||||
- Translation quality scores
|
||||
- Typical latency for live streaming
|
||||
|
||||
### Configuration Examples
|
||||
|
||||
Please provide real-world examples for:
|
||||
|
||||
- [ ] **Production .env file** (sanitized)
|
||||
- [ ] **Caddy configuration** for production
|
||||
- [ ] **Docker compose** for production deployment
|
||||
- [ ] **Nginx configuration** (if alternative to Caddy)
|
||||
|
||||
### API Examples
|
||||
|
||||
Please provide:
|
||||
|
||||
- [ ] **Sample API requests** for common operations
|
||||
- [ ] **WebSocket message examples**
|
||||
- [ ] **Webhook payload examples**
|
||||
- [ ] **Error response examples**
|
||||
|
||||
## How to Add Information
|
||||
|
||||
1. **For text information**: Edit the relevant markdown files in `/docs/docs/`
|
||||
2. **For screenshots**: Add to `/docs/static/screenshots/` and reference in docs
|
||||
3. **For code examples**: Add to documentation with proper syntax highlighting
|
||||
|
||||
## Priority Items
|
||||
|
||||
High priority (blocks documentation completeness):
|
||||
1. Modal.com costs and setup steps
|
||||
2. Basic screenshots (dashboard, transcription)
|
||||
3. Docker deployment configuration
|
||||
|
||||
Medium priority (enhances documentation):
|
||||
1. Performance metrics
|
||||
2. Advanced configuration examples
|
||||
3. Troubleshooting scenarios
|
||||
|
||||
Low priority (nice to have):
|
||||
1. Video tutorials
|
||||
2. Architecture diagrams
|
||||
3. Benchmark comparisons
|
||||
|
||||
## Documentation Structure
|
||||
|
||||
Once information is provided, update these files:
|
||||
- `/docs/docs/installation/modal-setup.md` - Add Modal.com setup screenshots
|
||||
- `/docs/docs/installation/whereby-setup.md` - Add Whereby configuration steps
|
||||
- `/docs/docs/reference/configuration.md` - Add environment variable details
|
||||
- `/docs/docs/pipelines/file-pipeline.md` - Add actual processing times
|
||||
- `/docs/docs/pipelines/live-pipeline.md` - Add latency measurements
|
||||
|
||||
## Notes
|
||||
|
||||
- Replace placeholder values with actual data
|
||||
- Ensure all sensitive information is sanitized
|
||||
- Test all configuration examples before documenting
|
||||
- Verify all costs are up-to-date
|
||||
|
||||
---
|
||||
|
||||
Last updated: 2025-08-20
|
||||
Contact: [Your Email]
|
||||
@@ -1,777 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Create directory structure
|
||||
mkdir -p docs/concepts
|
||||
mkdir -p docs/installation
|
||||
mkdir -p docs/pipelines
|
||||
mkdir -p docs/reference/architecture
|
||||
mkdir -p docs/reference/processors
|
||||
mkdir -p docs/reference/api
|
||||
|
||||
# Create all documentation files with content
|
||||
echo "Creating documentation files..."
|
||||
|
||||
# Concepts - Modes
|
||||
cat > docs/concepts/modes.md << 'EOF'
|
||||
---
|
||||
sidebar_position: 2
|
||||
title: Operating Modes
|
||||
---
|
||||
|
||||
# Operating Modes
|
||||
|
||||
Reflector operates in two distinct modes to accommodate different use cases and security requirements.
|
||||
|
||||
## Public Mode
|
||||
|
||||
Public mode provides immediate access to core transcription features without requiring authentication.
|
||||
|
||||
### Features Available
|
||||
- **File Upload**: Process audio files up to 2GB
|
||||
- **Live Transcription**: Stream audio from microphone
|
||||
- **Basic Processing**: Transcription and diarization
|
||||
- **Temporary Storage**: Results available for 24 hours
|
||||
|
||||
### Limitations
|
||||
- No persistent storage
|
||||
- No meeting rooms
|
||||
- Limited to single-user sessions
|
||||
- No team collaboration features
|
||||
|
||||
### Use Cases
|
||||
- Quick transcription needs
|
||||
- Testing and evaluation
|
||||
- Individual users
|
||||
- Public demonstrations
|
||||
|
||||
## Private Mode
|
||||
|
||||
Private mode unlocks the full potential of Reflector with authentication and persistent storage.
|
||||
|
||||
### Additional Features
|
||||
- **Virtual Meeting Rooms**: Whereby integration
|
||||
- **Team Collaboration**: Share transcripts with team
|
||||
- **Persistent Storage**: Long-term transcript archive
|
||||
- **Advanced Analytics**: Meeting insights and trends
|
||||
- **Custom Integration**: Webhooks and API access
|
||||
- **User Management**: Role-based access control
|
||||
|
||||
### Authentication Options
|
||||
|
||||
#### Authentik Integration
|
||||
Enterprise-grade SSO with support for:
|
||||
- SAML 2.0
|
||||
- OAuth 2.0 / OIDC
|
||||
- LDAP / Active Directory
|
||||
- Multi-factor authentication
|
||||
|
||||
#### JWT Authentication
|
||||
Stateless token-based auth for:
|
||||
- API access
|
||||
- Service-to-service communication
|
||||
- Mobile applications
|
||||
|
||||
### Room Management
|
||||
|
||||
Virtual rooms provide dedicated spaces for meetings:
|
||||
- **Persistent URLs**: Same link for recurring meetings
|
||||
- **Access Control**: Invite-only or open rooms
|
||||
- **Recording Consent**: Automatic consent management
|
||||
- **Custom Settings**: Per-room configuration
|
||||
|
||||
## Mode Selection
|
||||
|
||||
The mode is determined by your deployment configuration:
|
||||
|
||||
```yaml
|
||||
# Public Mode (no authentication)
|
||||
REFLECTOR_AUTH_BACKEND=none
|
||||
|
||||
# Private Mode (with authentication)
|
||||
REFLECTOR_AUTH_BACKEND=jwt
|
||||
# or
|
||||
REFLECTOR_AUTH_BACKEND=authentik
|
||||
```
|
||||
|
||||
## Feature Comparison
|
||||
|
||||
| Feature | Public Mode | Private Mode |
|
||||
|---------|------------|--------------|
|
||||
| File Upload | ✅ | ✅ |
|
||||
| Live Transcription | ✅ | ✅ |
|
||||
| Speaker Diarization | ✅ | ✅ |
|
||||
| Translation | ✅ | ✅ |
|
||||
| Summarization | ✅ | ✅ |
|
||||
| Meeting Rooms | ❌ | ✅ |
|
||||
| Persistent Storage | ❌ | ✅ |
|
||||
| Team Collaboration | ❌ | ✅ |
|
||||
| API Access | Limited | Full |
|
||||
| User Management | ❌ | ✅ |
|
||||
| Custom Branding | ❌ | ✅ |
|
||||
| Analytics | ❌ | ✅ |
|
||||
| Webhooks | ❌ | ✅ |
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Public Mode Security
|
||||
- Rate limiting to prevent abuse
|
||||
- File size restrictions
|
||||
- Automatic cleanup of old data
|
||||
- No PII storage
|
||||
|
||||
### Private Mode Security
|
||||
- Encrypted data storage
|
||||
- Audit logging
|
||||
- Session management
|
||||
- Access control lists
|
||||
- Data retention policies
|
||||
|
||||
## Choosing the Right Mode
|
||||
|
||||
### Choose Public Mode if:
|
||||
- You need quick, one-time transcriptions
|
||||
- You're evaluating Reflector
|
||||
- You don't need persistent storage
|
||||
- You're processing non-sensitive content
|
||||
|
||||
### Choose Private Mode if:
|
||||
- You need team collaboration
|
||||
- You require persistent storage
|
||||
- You're processing sensitive content
|
||||
- You need meeting room functionality
|
||||
- You want advanced analytics
|
||||
EOF
|
||||
|
||||
# Concepts - Independence
|
||||
cat > docs/concepts/independence.md << 'EOF'
|
||||
---
|
||||
sidebar_position: 3
|
||||
title: Data Independence
|
||||
---
|
||||
|
||||
# Data Independence & Privacy
|
||||
|
||||
Reflector is designed with privacy and data independence as core principles, giving you complete control over your data and processing.
|
||||
|
||||
## Privacy by Design
|
||||
|
||||
### No Third-Party Data Sharing
|
||||
|
||||
Your audio and transcripts are never shared with third parties:
|
||||
- **Local Processing**: All ML models can run on your infrastructure
|
||||
- **No Training on User Data**: Your content is never used to improve models
|
||||
- **Isolated Processing**: Each transcript is processed in isolation
|
||||
- **No Analytics Tracking**: No usage analytics sent to external services
|
||||
|
||||
### Data Ownership
|
||||
|
||||
You maintain complete ownership of all data:
|
||||
- **Export Anytime**: Download all your transcripts and audio
|
||||
- **Delete on Demand**: Permanent deletion with no recovery
|
||||
- **API Access**: Full programmatic access to your data
|
||||
- **No Vendor Lock-in**: Standard formats for easy migration
|
||||
|
||||
## Processing Transparency
|
||||
|
||||
### What Happens to Your Audio
|
||||
|
||||
1. **Upload/Stream**: Audio received by your server
|
||||
2. **Temporary Storage**: Stored only for processing duration
|
||||
3. **Processing**: ML models process audio locally or on Modal
|
||||
4. **Results Storage**: Transcripts stored in your database
|
||||
5. **Cleanup**: Original audio deleted (unless configured otherwise)
|
||||
|
||||
### Local vs Cloud Processing
|
||||
|
||||
#### Local Processing
|
||||
When configured for local processing:
|
||||
- All models run on your hardware
|
||||
- No data leaves your infrastructure
|
||||
- Complete air-gap capability
|
||||
- Higher hardware requirements
|
||||
|
||||
#### Modal.com Processing
|
||||
When using Modal for GPU acceleration:
|
||||
- Audio chunks sent to Modal for processing
|
||||
- Processed immediately and deleted
|
||||
- No long-term storage on Modal
|
||||
- Modal's security: SOC 2 Type II compliant
|
||||
|
||||
### Data Retention
|
||||
|
||||
Default retention policies:
|
||||
- **Public Mode**: 24 hours then automatic deletion
|
||||
- **Private Mode**: Configurable (default: indefinite)
|
||||
- **Audio Files**: Deleted after processing (configurable)
|
||||
- **Transcripts**: Retained based on policy
|
||||
|
||||
## Compliance Features
|
||||
|
||||
### GDPR Compliance
|
||||
|
||||
- **Right to Access**: Export all user data
|
||||
- **Right to Deletion**: Permanent data removal
|
||||
- **Data Portability**: Standard export formats
|
||||
- **Privacy by Default**: Minimal data collection
|
||||
|
||||
### HIPAA Considerations
|
||||
|
||||
For healthcare deployments:
|
||||
- **Self-hosted Option**: Complete infrastructure control
|
||||
- **Encryption**: At rest and in transit
|
||||
- **Audit Logging**: Complete access trail
|
||||
- **Access Controls**: Role-based permissions
|
||||
|
||||
### Industry Standards
|
||||
|
||||
- **TLS 1.3**: Modern encryption for data in transit
|
||||
- **AES-256**: Encryption for data at rest
|
||||
- **JWT Tokens**: Secure, stateless authentication
|
||||
- **OWASP Guidelines**: Security best practices
|
||||
|
||||
## Self-Hosted Deployment
|
||||
|
||||
### Complete Independence
|
||||
|
||||
Self-hosting provides maximum control:
|
||||
- **Your Infrastructure**: Run on your servers
|
||||
- **Your Network**: No external connections required
|
||||
- **Your Policies**: Implement custom retention
|
||||
- **Your Compliance**: Meet specific requirements
|
||||
|
||||
### Air-Gap Capability
|
||||
|
||||
Reflector can run completely offline:
|
||||
1. Download all models during setup
|
||||
2. Configure for local processing only
|
||||
3. Disable all external integrations
|
||||
4. Run in isolated network environment
|
||||
|
||||
## Data Flow Control
|
||||
|
||||
### Configurable Processing
|
||||
|
||||
Control where each step happens:
|
||||
|
||||
```yaml
|
||||
# All local processing
|
||||
TRANSCRIPT_BACKEND=local
|
||||
DIARIZATION_BACKEND=local
|
||||
TRANSLATION_BACKEND=local
|
||||
|
||||
# Hybrid approach
|
||||
TRANSCRIPT_BACKEND=modal # Fast GPU processing
|
||||
DIARIZATION_BACKEND=local # Sensitive speaker data
|
||||
TRANSLATION_BACKEND=modal # Non-sensitive translation
|
||||
```
|
||||
|
||||
### Storage Options
|
||||
|
||||
Choose where data is stored:
|
||||
- **Local Filesystem**: Complete control
|
||||
- **PostgreSQL**: Self-hosted database
|
||||
- **S3-Compatible**: MinIO or AWS with encryption
|
||||
- **Hybrid**: Different storage for different data types
|
||||
|
||||
## Security Architecture
|
||||
|
||||
### Defense in Depth
|
||||
|
||||
Multiple layers of security:
|
||||
1. **Network Security**: Firewalls and VPNs
|
||||
2. **Application Security**: Input validation and sanitization
|
||||
3. **Data Security**: Encryption and access controls
|
||||
4. **Operational Security**: Logging and monitoring
|
||||
|
||||
### Zero Trust Principles
|
||||
|
||||
- **Verify Everything**: All requests authenticated
|
||||
- **Least Privilege**: Minimal permissions granted
|
||||
- **Assume Breach**: Design for compromise containment
|
||||
- **Encrypt Everything**: No plaintext transmission
|
||||
|
||||
## Audit and Compliance
|
||||
|
||||
### Audit Logging
|
||||
|
||||
Comprehensive logging of:
|
||||
- **Access Events**: Who accessed what and when
|
||||
- **Processing Events**: What was processed and how
|
||||
- **Configuration Changes**: System modifications
|
||||
- **Security Events**: Failed authentication attempts
|
||||
|
||||
### Compliance Reporting
|
||||
|
||||
Generate reports for:
|
||||
- **Data Processing**: What data was processed
|
||||
- **Data Access**: Who accessed the data
|
||||
- **Data Retention**: What was retained or deleted
|
||||
- **Security Events**: Security-related incidents
|
||||
|
||||
## Best Practices
|
||||
|
||||
### For Maximum Privacy
|
||||
|
||||
1. **Self-host** all components
|
||||
2. **Use local processing** for all models
|
||||
3. **Implement short retention** periods
|
||||
4. **Encrypt all storage** at rest
|
||||
5. **Use VPN** for all connections
|
||||
6. **Regular audits** of access logs
|
||||
|
||||
### For Balanced Approach
|
||||
|
||||
1. **Self-host core services** (database, API)
|
||||
2. **Use Modal for processing** (faster, cost-effective)
|
||||
3. **Implement encryption** everywhere
|
||||
4. **Regular backups** with encryption
|
||||
5. **Monitor access** patterns
|
||||
EOF
|
||||
|
||||
# Concepts - Pipeline
|
||||
cat > docs/concepts/pipeline.md << 'EOF'
|
||||
---
|
||||
sidebar_position: 4
|
||||
title: Processing Pipeline
|
||||
---
|
||||
|
||||
# Processing Pipeline
|
||||
|
||||
Reflector uses a sophisticated pipeline architecture to process audio efficiently and accurately.
|
||||
|
||||
## Pipeline Overview
|
||||
|
||||
The processing pipeline consists of modular components that can be combined and configured based on your needs:
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
A[Audio Input] --> B[Pre-processing]
|
||||
B --> C[Chunking]
|
||||
C --> D[Transcription]
|
||||
D --> E[Diarization]
|
||||
E --> F[Alignment]
|
||||
F --> G[Post-processing]
|
||||
G --> H[Output]
|
||||
```
|
||||
|
||||
## Pipeline Components
|
||||
|
||||
### Audio Input
|
||||
|
||||
Accepts various input sources:
|
||||
- **File Upload**: MP3, WAV, M4A, WebM, MP4
|
||||
- **WebRTC Stream**: Live browser audio
|
||||
- **Recording Integration**: Whereby recordings
|
||||
- **API Upload**: Direct API submission
|
||||
|
||||
### Pre-processing
|
||||
|
||||
Prepares audio for optimal processing:
|
||||
- **Format Conversion**: Convert to 16kHz mono WAV
|
||||
- **Normalization**: Adjust volume to -23 LUFS
|
||||
- **Noise Reduction**: Optional background noise removal
|
||||
- **Validation**: Check duration and quality
|
||||
|
||||
### Chunking
|
||||
|
||||
Splits audio for parallel processing:
|
||||
- **Fixed Size**: 30-second chunks by default
|
||||
- **Overlap**: 1-second overlap for continuity
|
||||
- **Smart Boundaries**: Attempt to split at silence
|
||||
- **Metadata**: Track chunk positions
|
||||
|
||||
### Transcription
|
||||
|
||||
Converts speech to text:
|
||||
- **Model Selection**: Whisper or Parakeet
|
||||
- **Language Detection**: Automatic or specified
|
||||
- **Timestamp Generation**: Word-level timing
|
||||
- **Confidence Scores**: Quality indicators
|
||||
|
||||
### Diarization
|
||||
|
||||
Identifies different speakers:
|
||||
- **Voice Activity Detection**: Find speech segments
|
||||
- **Speaker Embedding**: Extract voice characteristics
|
||||
- **Clustering**: Group similar voices
|
||||
- **Label Assignment**: Assign speaker IDs
|
||||
|
||||
### Alignment
|
||||
|
||||
Merges all processing results:
|
||||
- **Chunk Assembly**: Combine transcription chunks
|
||||
- **Speaker Mapping**: Align speakers with text
|
||||
- **Overlap Resolution**: Handle chunk boundaries
|
||||
- **Timeline Creation**: Build unified timeline
|
||||
|
||||
### Post-processing
|
||||
|
||||
Enhances the final output:
|
||||
- **Formatting**: Apply punctuation and capitalization
|
||||
- **Translation**: Convert to target languages
|
||||
- **Summarization**: Generate concise summaries
|
||||
- **Topic Extraction**: Identify key themes
|
||||
- **Action Items**: Extract tasks and decisions
|
||||
|
||||
## Processing Modes
|
||||
|
||||
### Batch Processing
|
||||
|
||||
For uploaded files:
|
||||
- Optimized for throughput
|
||||
- Parallel chunk processing
|
||||
- Higher accuracy models
|
||||
- Complete file analysis
|
||||
|
||||
### Stream Processing
|
||||
|
||||
For live audio:
|
||||
- Optimized for latency
|
||||
- Sequential processing
|
||||
- Real-time feedback
|
||||
- Progressive results
|
||||
|
||||
### Hybrid Processing
|
||||
|
||||
For meetings:
|
||||
- Stream during meeting
|
||||
- Batch after completion
|
||||
- Best of both modes
|
||||
- Maximum accuracy
|
||||
|
||||
## Pipeline Configuration
|
||||
|
||||
### Model Selection
|
||||
|
||||
Choose models based on requirements:
|
||||
|
||||
```python
|
||||
# High accuracy (slower)
|
||||
config = {
|
||||
"transcription_model": "whisper-large-v3",
|
||||
"diarization_model": "pyannote-3.1",
|
||||
"translation_model": "seamless-m4t-large"
|
||||
}
|
||||
|
||||
# Balanced (default)
|
||||
config = {
|
||||
"transcription_model": "whisper-base",
|
||||
"diarization_model": "pyannote-3.1",
|
||||
"translation_model": "seamless-m4t-medium"
|
||||
}
|
||||
|
||||
# Fast processing
|
||||
config = {
|
||||
"transcription_model": "whisper-tiny",
|
||||
"diarization_model": "pyannote-3.1-fast",
|
||||
"translation_model": "seamless-m4t-small"
|
||||
}
|
||||
```
|
||||
|
||||
### Processing Options
|
||||
|
||||
Customize pipeline behavior:
|
||||
|
||||
```yaml
|
||||
# Parallel processing
|
||||
max_parallel_chunks: 10
|
||||
chunk_size_seconds: 30
|
||||
chunk_overlap_seconds: 1
|
||||
|
||||
# Quality settings
|
||||
enable_noise_reduction: true
|
||||
enable_normalization: true
|
||||
min_speech_confidence: 0.5
|
||||
|
||||
# Post-processing
|
||||
enable_translation: true
|
||||
target_languages: ["es", "fr", "de"]
|
||||
enable_summarization: true
|
||||
summary_length: "medium"
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Processing Times
|
||||
|
||||
For 1 hour of audio:
|
||||
|
||||
| Pipeline Config | Processing Time | Accuracy |
|
||||
|----------------|-----------------|----------|
|
||||
| Fast | 2-3 minutes | 85-90% |
|
||||
| Balanced | 5-8 minutes | 92-95% |
|
||||
| High Accuracy | 15-20 minutes | 95-98% |
|
||||
|
||||
### Resource Usage
|
||||
|
||||
| Component | CPU Usage | Memory | GPU |
|
||||
|-----------|-----------|---------|-----|
|
||||
| Transcription | Medium | 2-4 GB | Required |
|
||||
| Diarization | High | 4-8 GB | Required |
|
||||
| Translation | Low | 2-3 GB | Optional |
|
||||
| Post-processing | Low | 1-2 GB | Not needed |
|
||||
|
||||
## Pipeline Orchestration
|
||||
|
||||
### Celery Task Chain
|
||||
|
||||
The pipeline is orchestrated using Celery:
|
||||
|
||||
```python
|
||||
chain = (
|
||||
chunk_audio.s(audio_id) |
|
||||
group(transcribe_chunk.s(chunk) for chunk in chunks) |
|
||||
merge_transcriptions.s() |
|
||||
diarize_audio.s() |
|
||||
align_speakers.s() |
|
||||
post_process.s()
|
||||
)
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
Robust error recovery:
|
||||
- **Automatic Retry**: Failed tasks retry up to 3 times
|
||||
- **Partial Recovery**: Continue with successful chunks
|
||||
- **Fallback Models**: Use alternative models on failure
|
||||
- **Error Reporting**: Detailed error messages
|
||||
|
||||
### Progress Tracking
|
||||
|
||||
Real-time progress updates:
|
||||
- **Chunk Progress**: Track individual chunk processing
|
||||
- **Overall Progress**: Percentage completion
|
||||
- **ETA Calculation**: Estimated completion time
|
||||
- **WebSocket Updates**: Live progress to clients
|
||||
|
||||
## Optimization Strategies
|
||||
|
||||
### GPU Utilization
|
||||
|
||||
Maximize GPU efficiency:
|
||||
- **Batch Processing**: Process multiple chunks together
|
||||
- **Model Caching**: Keep models loaded in memory
|
||||
- **Dynamic Batching**: Adjust batch size based on GPU memory
|
||||
- **Multi-GPU Support**: Distribute across available GPUs
|
||||
|
||||
### Memory Management
|
||||
|
||||
Efficient memory usage:
|
||||
- **Streaming Processing**: Process large files in chunks
|
||||
- **Garbage Collection**: Clean up after each chunk
|
||||
- **Memory Limits**: Prevent out-of-memory errors
|
||||
- **Disk Caching**: Use disk for large intermediate results
|
||||
|
||||
### Network Optimization
|
||||
|
||||
Minimize network overhead:
|
||||
- **Compression**: Compress audio before transfer
|
||||
- **CDN Integration**: Use CDN for static assets
|
||||
- **Connection Pooling**: Reuse network connections
|
||||
- **Parallel Uploads**: Multiple concurrent uploads
|
||||
|
||||
## Quality Assurance
|
||||
|
||||
### Accuracy Metrics
|
||||
|
||||
Monitor processing quality:
|
||||
- **Word Error Rate (WER)**: Transcription accuracy
|
||||
- **Diarization Error Rate (DER)**: Speaker identification accuracy
|
||||
- **Translation BLEU Score**: Translation quality
|
||||
- **Summary Coherence**: Summary quality metrics
|
||||
|
||||
### Validation Steps
|
||||
|
||||
Ensure output quality:
|
||||
- **Confidence Thresholds**: Filter low-confidence segments
|
||||
- **Consistency Checks**: Verify timeline consistency
|
||||
- **Language Validation**: Ensure correct language detection
|
||||
- **Format Validation**: Check output format compliance
|
||||
|
||||
## Advanced Features
|
||||
|
||||
### Custom Models
|
||||
|
||||
Use your own models:
|
||||
- **Fine-tuned Whisper**: Domain-specific models
|
||||
- **Custom Diarization**: Trained on your speakers
|
||||
- **Specialized Post-processing**: Industry-specific formatting
|
||||
|
||||
### Pipeline Extensions
|
||||
|
||||
Add custom processing steps:
|
||||
- **Sentiment Analysis**: Analyze emotional tone
|
||||
- **Entity Extraction**: Identify people, places, organizations
|
||||
- **Custom Metrics**: Calculate domain-specific metrics
|
||||
- **Integration Hooks**: Call external services
|
||||
EOF
|
||||
|
||||
# Create installation documentation
|
||||
cat > docs/installation/overview.md << 'EOF'
|
||||
---
|
||||
sidebar_position: 1
|
||||
title: Installation Overview
|
||||
---
|
||||
|
||||
# Installation Overview
|
||||
|
||||
Reflector is designed for self-hosted deployment, giving you complete control over your infrastructure and data.
|
||||
|
||||
## Deployment Options
|
||||
|
||||
### Docker Deployment (Recommended)
|
||||
|
||||
The easiest way to deploy Reflector:
|
||||
- Pre-configured containers
|
||||
- Automated dependency management
|
||||
- Consistent environment
|
||||
- Easy updates
|
||||
|
||||
### Manual Installation
|
||||
|
||||
For custom deployments:
|
||||
- Greater control over configuration
|
||||
- Integration with existing infrastructure
|
||||
- Custom optimization options
|
||||
- Development environments
|
||||
|
||||
## Requirements
|
||||
|
||||
### System Requirements
|
||||
|
||||
**Minimum Requirements:**
|
||||
- CPU: 4 cores
|
||||
- RAM: 8 GB
|
||||
- Storage: 50 GB
|
||||
- OS: Ubuntu 20.04+ or similar Linux
|
||||
|
||||
**Recommended Requirements:**
|
||||
- CPU: 8+ cores
|
||||
- RAM: 16 GB
|
||||
- Storage: 100 GB SSD
|
||||
- GPU: NVIDIA GPU with 8GB+ VRAM (for local processing)
|
||||
|
||||
### Network Requirements
|
||||
|
||||
- Public IP address (for WebRTC)
|
||||
- Ports: 80, 443, 8000, 3000
|
||||
- Domain name (for SSL)
|
||||
- SSL certificate (Let's Encrypt supported)
|
||||
|
||||
## Required Services
|
||||
|
||||
### Core Services
|
||||
|
||||
These services are required for basic operation:
|
||||
|
||||
1. **PostgreSQL** - Primary database
|
||||
2. **Redis** - Message broker and cache
|
||||
3. **Docker** - Container runtime
|
||||
|
||||
### GPU Processing
|
||||
|
||||
Choose one:
|
||||
- **Modal.com** - Serverless GPU (recommended)
|
||||
- **Local GPU** - Self-hosted GPU processing
|
||||
|
||||
### Optional Services
|
||||
|
||||
Enhance functionality with:
|
||||
- **AWS S3** - Long-term storage
|
||||
- **Whereby** - Video conferencing rooms
|
||||
- **Authentik** - Enterprise authentication
|
||||
- **Zulip** - Chat integration
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
1. Clone the repository:
|
||||
```bash
|
||||
git clone https://github.com/monadical-sas/reflector.git
|
||||
cd reflector
|
||||
```
|
||||
|
||||
2. Navigate to docker directory:
|
||||
```bash
|
||||
cd docker
|
||||
```
|
||||
|
||||
3. Copy and configure environment:
|
||||
```bash
|
||||
cp .env.example .env
|
||||
# Edit .env with your settings
|
||||
```
|
||||
|
||||
4. Start services:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
5. Access Reflector:
|
||||
- Frontend: https://your-domain.com
|
||||
- API: https://your-domain.com/api
|
||||
|
||||
## Configuration Overview
|
||||
|
||||
### Essential Configuration
|
||||
|
||||
```env
|
||||
# Database
|
||||
DATABASE_URL=postgresql://user:pass@localhost/reflector
|
||||
|
||||
# Redis
|
||||
REDIS_URL=redis://localhost:6379
|
||||
|
||||
# Modal.com (for GPU processing)
|
||||
TRANSCRIPT_MODAL_API_KEY=your-key
|
||||
DIARIZATION_MODAL_API_KEY=your-key
|
||||
|
||||
# Domain
|
||||
DOMAIN=your-domain.com
|
||||
```
|
||||
|
||||
### Security Configuration
|
||||
|
||||
```env
|
||||
# Authentication
|
||||
REFLECTOR_AUTH_BACKEND=jwt
|
||||
NEXTAUTH_SECRET=generate-strong-secret
|
||||
|
||||
# SSL (handled by Caddy)
|
||||
# Automatic with Let's Encrypt
|
||||
```
|
||||
|
||||
## Service Architecture
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Caddy Reverse Proxy] --> B[Frontend - Next.js]
|
||||
A --> C[Backend - FastAPI]
|
||||
C --> D[PostgreSQL]
|
||||
C --> E[Redis]
|
||||
C --> F[Celery Workers]
|
||||
F --> G[Modal.com GPU]
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Review Requirements**: [System Requirements](./requirements)
|
||||
2. **Docker Setup**: [Docker Deployment Guide](./docker-setup)
|
||||
3. **Configure Services**:
|
||||
- [Modal.com Setup](./modal-setup)
|
||||
- [Whereby Setup](./whereby-setup)
|
||||
- [AWS S3 Setup](./aws-setup)
|
||||
4. **Optional Services**:
|
||||
- [Authentik Setup](./authentik-setup)
|
||||
- [Zulip Setup](./zulip-setup)
|
||||
|
||||
## Getting Help
|
||||
|
||||
- [Troubleshooting Guide](../reference/troubleshooting)
|
||||
- [GitHub Issues](https://github.com/monadical-sas/reflector/issues)
|
||||
- [Community Discord](#)
|
||||
EOF
|
||||
|
||||
chmod +x create-docs.sh
|
||||
echo "Documentation creation script ready. Run ./create-docs.sh to generate all docs."
|
||||
@@ -1,125 +0,0 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
title: Operating Modes
|
||||
---
|
||||
|
||||
# Operating Modes
|
||||
|
||||
Reflector operates in two distinct modes to accommodate different use cases and security requirements.
|
||||
|
||||
## Public Mode
|
||||
|
||||
Public mode provides immediate access to core transcription features without requiring authentication.
|
||||
|
||||
### Features Available
|
||||
- **File Upload**: Process audio files up to 2GB
|
||||
- **Live Transcription**: Stream audio from microphone
|
||||
- **Basic Processing**: Transcription and diarization
|
||||
- **Temporary Storage**: Results available for 24 hours
|
||||
|
||||
### Limitations
|
||||
- No persistent storage
|
||||
- No meeting rooms
|
||||
- Limited to single-user sessions
|
||||
- No team collaboration features
|
||||
|
||||
### Use Cases
|
||||
- Quick transcription needs
|
||||
- Testing and evaluation
|
||||
- Individual users
|
||||
- Public demonstrations
|
||||
|
||||
## Private Mode
|
||||
|
||||
Private mode unlocks the full potential of Reflector with authentication and persistent storage.
|
||||
|
||||
### Additional Features
|
||||
- **Virtual Meeting Rooms**: Whereby and Daily.co integration
|
||||
- **Team Collaboration**: Share transcripts with team
|
||||
- **Persistent Storage**: Long-term transcript archive
|
||||
- **Advanced Analytics**: Meeting insights and trends
|
||||
- **Custom Integration**: Webhooks and API access
|
||||
- **User Management**: Role-based access control
|
||||
|
||||
### Authentication Options
|
||||
|
||||
#### Authentik Integration
|
||||
Enterprise-grade SSO with support for:
|
||||
- SAML 2.0
|
||||
- OAuth 2.0 / OIDC
|
||||
- LDAP / Active Directory
|
||||
- Multi-factor authentication
|
||||
|
||||
#### JWT Authentication
|
||||
Stateless token-based auth for:
|
||||
- API access
|
||||
- Service-to-service communication
|
||||
- Mobile applications
|
||||
|
||||
### Room Management
|
||||
|
||||
Virtual rooms provide dedicated spaces for meetings:
|
||||
- **Persistent URLs**: Same link for recurring meetings
|
||||
- **Access Control**: Invite-only or open rooms
|
||||
- **Recording Consent**: Automatic consent management
|
||||
- **Custom Settings**: Per-room configuration
|
||||
|
||||
## Mode Selection
|
||||
|
||||
The mode is determined by your deployment configuration:
|
||||
|
||||
```yaml
|
||||
# Public Mode (no authentication)
|
||||
REFLECTOR_AUTH_BACKEND=none
|
||||
|
||||
# Private Mode (with authentication)
|
||||
REFLECTOR_AUTH_BACKEND=jwt
|
||||
# or
|
||||
REFLECTOR_AUTH_BACKEND=authentik
|
||||
```
|
||||
|
||||
## Feature Comparison
|
||||
|
||||
| Feature | Public Mode | Private Mode |
|
||||
|---------|------------|--------------|
|
||||
| File Upload | ✅ | ✅ |
|
||||
| Live Transcription | ✅ | ✅ |
|
||||
| Speaker Diarization | ✅ | ✅ |
|
||||
| Translation | ✅ | ✅ |
|
||||
| Summarization | ✅ | ✅ |
|
||||
| Meeting Rooms | ❌ | ✅ |
|
||||
| Persistent Storage | ❌ | ✅ |
|
||||
| Team Collaboration | ❌ | ✅ |
|
||||
| API Access | Limited | Full |
|
||||
| User Management | ❌ | ✅ |
|
||||
| Custom Branding | ❌ | ✅ |
|
||||
| Analytics | ❌ | ✅ |
|
||||
| Webhooks | ❌ | ✅ |
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Public Mode Security
|
||||
- File size restrictions
|
||||
- Automatic cleanup of old data
|
||||
|
||||
### Private Mode Security
|
||||
- Encrypted data storage
|
||||
- Audit logging
|
||||
- Session management
|
||||
- Access control lists
|
||||
- Data retention policies
|
||||
|
||||
## Choosing the Right Mode
|
||||
|
||||
### Choose Public Mode if:
|
||||
- You need quick, one-time transcriptions
|
||||
- You're evaluating Reflector
|
||||
- You don't need persistent storage
|
||||
- You're processing non-sensitive content
|
||||
|
||||
### Choose Private Mode if:
|
||||
- You need team collaboration
|
||||
- You require persistent storage
|
||||
- You're processing sensitive content
|
||||
- You need meeting room functionality
|
||||
- You want advanced analytics
|
||||
@@ -1,194 +0,0 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
title: Architecture Overview
|
||||
---
|
||||
|
||||
# Architecture Overview
|
||||
|
||||
Reflector is built as a modern, scalable, microservices-based application designed to handle audio processing workloads efficiently while maintaining data privacy and control.
|
||||
|
||||
## System Components
|
||||
|
||||
### Frontend Application
|
||||
|
||||
The user interface is built with **Next.js 14** using the App Router pattern, providing:
|
||||
|
||||
- Server-side rendering for optimal performance
|
||||
- Real-time WebSocket connections for live transcription
|
||||
- WebRTC support for audio streaming and live meetings
|
||||
- Responsive design with Chakra UI components
|
||||
|
||||
### Backend API Server
|
||||
|
||||
The core API is powered by **FastAPI**, a modern Python framework that provides:
|
||||
|
||||
- High-performance async request handling
|
||||
- Automatic OpenAPI documentation generation
|
||||
- Type safety with Pydantic models
|
||||
- WebSocket support for real-time updates
|
||||
|
||||
### Processing Pipeline
|
||||
|
||||
Audio processing is handled through a modular pipeline architecture:
|
||||
|
||||
```
|
||||
Audio Input → Chunking → Transcription → Diarization → Post-Processing → Storage
|
||||
```
|
||||
|
||||
Each step can run independently and in parallel, allowing for:
|
||||
- Scalable processing of large files
|
||||
- Real-time streaming capabilities
|
||||
- Fault tolerance and retry mechanisms
|
||||
|
||||
### Worker Architecture
|
||||
|
||||
Background tasks are managed by **Celery** workers with **Redis** as the message broker:
|
||||
|
||||
- Distributed task processing
|
||||
- Priority queues for time-sensitive operations
|
||||
- Automatic retry on failure
|
||||
- Progress tracking and notifications
|
||||
|
||||
### GPU Acceleration
|
||||
|
||||
ML models run on GPU-accelerated infrastructure:
|
||||
|
||||
- **Modal.com** for serverless GPU processing
|
||||
- Support for local GPU deployment (coming soon)
|
||||
- Automatic scaling based on demand
|
||||
- Cost-effective pay-per-use model
|
||||
|
||||
## Data Flow
|
||||
|
||||
### File Processing Flow
|
||||
|
||||
1. **Upload**: User uploads audio file through web interface
|
||||
2. **Storage**: File stored temporarily or in S3
|
||||
3. **Queue**: Processing job added to Celery queue
|
||||
4. **Chunking**: Audio split into 30-second segments
|
||||
5. **Parallel Processing**: Chunks processed simultaneously
|
||||
6. **Assembly**: Results merged and aligned
|
||||
7. **Post-Processing**: Summary, topics, translation
|
||||
8. **Delivery**: Results stored and user notified
|
||||
|
||||
### Live Streaming Flow
|
||||
|
||||
1. **WebRTC Connection**: Browser establishes peer connection
|
||||
2. **Audio Capture**: Microphone audio streamed to server
|
||||
3. **Buffering**: Audio buffered for processing
|
||||
4. **VAD**: Voice activity detection segments speech
|
||||
5. **Real-time Processing**: Segments transcribed immediately
|
||||
6. **WebSocket Updates**: Results streamed back to client
|
||||
7. **Continuous Assembly**: Full transcript built progressively
|
||||
|
||||
## Deployment Architecture
|
||||
|
||||
### Container-Based Deployment
|
||||
|
||||
All components are containerized for consistent deployment:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
frontend: # Next.js application
|
||||
backend: # FastAPI server
|
||||
worker: # Celery workers
|
||||
redis: # Message broker
|
||||
postgres: # Database
|
||||
caddy: # Reverse proxy
|
||||
```
|
||||
|
||||
### Networking
|
||||
|
||||
- **Host Network Mode**: Required for WebRTC/ICE compatibility
|
||||
- **Caddy Reverse Proxy**: Handles SSL termination and routing
|
||||
- **WebSocket Upgrade**: Supports real-time connections
|
||||
|
||||
## Scalability Considerations
|
||||
|
||||
### Horizontal Scaling
|
||||
|
||||
- **Stateless Backend**: Multiple API server instances
|
||||
- **Worker Pools**: Add workers based on queue depth
|
||||
- **Database Pooling**: Connection management for concurrent access
|
||||
|
||||
### Vertical Scaling
|
||||
|
||||
- **GPU Workers**: Scale up for faster model inference
|
||||
- **Memory Optimization**: Efficient audio buffering
|
||||
- **CPU Optimization**: Multi-threaded processing where applicable
|
||||
|
||||
## Security Architecture
|
||||
|
||||
### Authentication & Authorization
|
||||
|
||||
- **JWT Tokens**: Stateless authentication
|
||||
- **Authentik Integration**: Enterprise SSO support
|
||||
- **Role-Based Access**: Granular permissions
|
||||
|
||||
### Data Protection
|
||||
|
||||
- **Encryption at Rest**: Database and S3 encryption
|
||||
- **Encryption in Transit**: TLS for all connections
|
||||
- **Temporary Storage**: Automatic cleanup of processed files
|
||||
|
||||
### Privacy by Design
|
||||
|
||||
- **Local Processing**: Option to process entirely on-premises
|
||||
- **No Training on User Data**: Models are pre-trained
|
||||
- **Data Isolation**: Multi-tenant data separation
|
||||
|
||||
## Integration Points
|
||||
|
||||
### External Services
|
||||
|
||||
- **Modal.com**: GPU processing
|
||||
- **AWS S3**: Long-term storage
|
||||
- **Whereby**: Video conferencing rooms
|
||||
- **Zulip**: Chat integration (optional)
|
||||
|
||||
### APIs and Webhooks
|
||||
|
||||
- **RESTful API**: Standard CRUD operations
|
||||
- **WebSocket API**: Real-time updates
|
||||
- **Webhook Notifications**: Processing completion events
|
||||
- **OpenAPI Specification**: Machine-readable API definition
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Caching Strategy
|
||||
|
||||
- **Redis Cache**: Frequently accessed data
|
||||
- **CDN**: Static asset delivery
|
||||
- **Browser Cache**: Client-side optimization
|
||||
|
||||
### Database Optimization
|
||||
|
||||
- **Indexed Queries**: Fast search and retrieval
|
||||
- **Connection Pooling**: Efficient resource usage
|
||||
- **Query Optimization**: N+1 query prevention
|
||||
|
||||
### Processing Optimization
|
||||
|
||||
- **Batch Processing**: Efficient GPU utilization
|
||||
- **Parallel Execution**: Multi-core CPU usage
|
||||
- **Stream Processing**: Reduced memory footprint
|
||||
|
||||
## Monitoring and Observability
|
||||
|
||||
### Metrics Collection
|
||||
|
||||
- **Application Metrics**: Request rates, response times
|
||||
- **System Metrics**: CPU, memory, disk usage
|
||||
- **Business Metrics**: Transcription accuracy, processing times
|
||||
|
||||
### Logging
|
||||
|
||||
- **Structured Logging**: JSON format for analysis
|
||||
- **Log Aggregation**: Centralized log management
|
||||
- **Error Tracking**: Sentry integration
|
||||
|
||||
### Health Checks
|
||||
|
||||
- **Liveness Probes**: Component availability
|
||||
- **Readiness Probes**: Service readiness
|
||||
- **Dependency Checks**: External service status
|
||||
@@ -1,272 +0,0 @@
|
||||
---
|
||||
sidebar_position: 4
|
||||
title: Processing Pipeline
|
||||
---
|
||||
|
||||
# Processing Pipeline
|
||||
|
||||
Reflector uses a modular pipeline architecture to process audio efficiently and accurately.
|
||||
|
||||
## Pipeline Overview
|
||||
|
||||
The processing pipeline consists of modular components that can be combined and configured based on your needs:
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
A[Audio Input] --> B[Pre-processing]
|
||||
B --> C[Chunking]
|
||||
C --> D[Transcription]
|
||||
D --> E[Diarization]
|
||||
E --> F[Alignment]
|
||||
F --> G[Post-processing]
|
||||
G --> H[Output]
|
||||
```
|
||||
|
||||
## Pipeline Components
|
||||
|
||||
### Audio Input
|
||||
|
||||
Accepts various input sources:
|
||||
- **File Upload**: MP3, WAV, M4A, WebM, MP4
|
||||
- **WebRTC Stream**: Live browser audio
|
||||
- **Recording Integration**: Whereby recordings
|
||||
- **API Upload**: Direct API submission
|
||||
|
||||
### Pre-processing
|
||||
|
||||
Prepares audio for optimal processing:
|
||||
- **Format Conversion**: Convert to 16kHz mono WAV
|
||||
- **Noise Reduction**: Optional background noise removal
|
||||
- **Validation**: Check duration and quality
|
||||
|
||||
### Chunking
|
||||
|
||||
Splits audio for parallel processing:
|
||||
- **Fixed Size**: 30-second chunks by default
|
||||
- **Overlap**: 1-second overlap for continuity
|
||||
- **Silence Detection**: Attempt to split at silence
|
||||
- **Metadata**: Track chunk positions
|
||||
|
||||
### Transcription
|
||||
|
||||
Converts speech to text:
|
||||
- **Model Selection**: Whisper or Parakeet
|
||||
- **Language Detection**: Automatic or specified
|
||||
- **Timestamp Generation**: Word-level timing
|
||||
- **Confidence Scores**: Quality indicators
|
||||
|
||||
### Diarization
|
||||
|
||||
Identifies different speakers:
|
||||
- **Voice Activity Detection**: Find speech segments
|
||||
- **Speaker Embedding**: Extract voice characteristics
|
||||
- **Clustering**: Group similar voices
|
||||
- **Label Assignment**: Assign speaker IDs
|
||||
|
||||
### Alignment
|
||||
|
||||
Merges all processing results:
|
||||
- **Chunk Assembly**: Combine transcription chunks
|
||||
- **Speaker Mapping**: Align speakers with text
|
||||
- **Overlap Resolution**: Handle chunk boundaries
|
||||
- **Timeline Creation**: Build unified timeline
|
||||
|
||||
### Post-processing
|
||||
|
||||
Enhances the final output:
|
||||
- **Formatting**: Apply punctuation and capitalization
|
||||
- **Translation**: Convert to target languages
|
||||
- **Summarization**: Generate concise summaries
|
||||
- **Topic Extraction**: Identify key themes
|
||||
- **Action Items**: Extract tasks and decisions
|
||||
|
||||
## Processing Modes
|
||||
|
||||
### Batch Processing
|
||||
|
||||
For uploaded files:
|
||||
- Optimized for throughput
|
||||
- Parallel chunk processing
|
||||
- Higher accuracy models
|
||||
- Complete file analysis
|
||||
|
||||
### Stream Processing
|
||||
|
||||
For live audio:
|
||||
- Optimized for latency
|
||||
- Sequential processing
|
||||
- Real-time feedback
|
||||
- Progressive results
|
||||
|
||||
### Hybrid Processing
|
||||
|
||||
For meetings:
|
||||
- Stream during meeting
|
||||
- Batch after completion
|
||||
- Best of both modes
|
||||
- Maximum accuracy
|
||||
|
||||
## Pipeline Configuration
|
||||
|
||||
### Model Selection
|
||||
|
||||
Choose models based on requirements:
|
||||
|
||||
```python
|
||||
# High accuracy (slower)
|
||||
config = {
|
||||
"transcription_model": "whisper-large-v3",
|
||||
"diarization_model": "pyannote-3.1",
|
||||
"translation_model": "seamless-m4t-large"
|
||||
}
|
||||
|
||||
# Balanced (default)
|
||||
config = {
|
||||
"transcription_model": "whisper-base",
|
||||
"diarization_model": "pyannote-3.1",
|
||||
"translation_model": "seamless-m4t-medium"
|
||||
}
|
||||
|
||||
# Fast processing
|
||||
config = {
|
||||
"transcription_model": "whisper-tiny",
|
||||
"diarization_model": "pyannote-3.1-fast",
|
||||
"translation_model": "seamless-m4t-small"
|
||||
}
|
||||
```
|
||||
|
||||
### Processing Options
|
||||
|
||||
Customize pipeline behavior:
|
||||
|
||||
```yaml
|
||||
# Parallel processing
|
||||
max_parallel_chunks: 10
|
||||
chunk_size_seconds: 30
|
||||
chunk_overlap_seconds: 1
|
||||
|
||||
# Quality settings
|
||||
enable_noise_reduction: true
|
||||
min_speech_confidence: 0.5
|
||||
|
||||
# Post-processing
|
||||
enable_translation: true
|
||||
target_languages: ["es", "fr", "de"]
|
||||
enable_summarization: true
|
||||
summary_length: "medium"
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Processing Times
|
||||
|
||||
For 1 hour of audio:
|
||||
|
||||
| Pipeline Config | Processing Time | Accuracy |
|
||||
|----------------|-----------------|----------|
|
||||
| Fast | 2-3 minutes | 85-90% |
|
||||
| Balanced | 5-8 minutes | 92-95% |
|
||||
| High Accuracy | 15-20 minutes | 95-98% |
|
||||
|
||||
### Resource Usage
|
||||
|
||||
| Component | CPU Usage | Memory | GPU |
|
||||
|-----------|-----------|---------|-----|
|
||||
| Transcription | Medium | 2-4 GB | Required |
|
||||
| Diarization | High | 4-8 GB | Required |
|
||||
| Translation | Low | 2-3 GB | Optional |
|
||||
| Post-processing | Low | 1-2 GB | Not needed |
|
||||
|
||||
## Pipeline Orchestration
|
||||
|
||||
### Celery Task Chain
|
||||
|
||||
The pipeline is orchestrated using Celery:
|
||||
|
||||
```python
|
||||
chain = (
|
||||
chunk_audio.s(audio_id) |
|
||||
group(transcribe_chunk.s(chunk) for chunk in chunks) |
|
||||
merge_transcriptions.s() |
|
||||
diarize_audio.s() |
|
||||
align_speakers.s() |
|
||||
post_process.s()
|
||||
)
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
Error recovery:
|
||||
- **Automatic Retry**: Failed tasks retry up to 3 times
|
||||
- **Partial Recovery**: Continue with successful chunks
|
||||
- **Fallback Models**: Use alternative models on failure
|
||||
- **Error Reporting**: Detailed error messages
|
||||
|
||||
### Progress Tracking
|
||||
|
||||
Real-time progress updates:
|
||||
- **Chunk Progress**: Track individual chunk processing
|
||||
- **Overall Progress**: Percentage completion
|
||||
- **ETA Calculation**: Estimated completion time
|
||||
- **WebSocket Updates**: Live progress to clients
|
||||
|
||||
## Optimization Strategies
|
||||
|
||||
### GPU Utilization
|
||||
|
||||
Maximize GPU efficiency:
|
||||
- **Batch Processing**: Process multiple chunks together
|
||||
- **Model Caching**: Keep models loaded in memory
|
||||
- **Dynamic Batching**: Adjust batch size based on GPU memory
|
||||
- **Multi-GPU Support**: Distribute across available GPUs
|
||||
|
||||
### Memory Management
|
||||
|
||||
Efficient memory usage:
|
||||
- **Streaming Processing**: Process large files in chunks
|
||||
- **Garbage Collection**: Clean up after each chunk
|
||||
- **Memory Limits**: Prevent out-of-memory errors
|
||||
- **Disk Caching**: Use disk for large intermediate results
|
||||
|
||||
### Network Optimization
|
||||
|
||||
Minimize network overhead:
|
||||
- **Compression**: Compress audio before transfer
|
||||
- **CDN Integration**: Use CDN for static assets
|
||||
- **Connection Pooling**: Reuse network connections
|
||||
- **Parallel Uploads**: Multiple concurrent uploads
|
||||
|
||||
## Quality Assurance
|
||||
|
||||
### Accuracy Metrics
|
||||
|
||||
Monitor processing quality:
|
||||
- **Word Error Rate (WER)**: Transcription accuracy
|
||||
- **Diarization Error Rate (DER)**: Speaker identification accuracy
|
||||
- **Translation BLEU Score**: Translation quality
|
||||
- **Summary Coherence**: Summary quality metrics
|
||||
|
||||
### Validation Steps
|
||||
|
||||
Ensure output quality:
|
||||
- **Confidence Thresholds**: Filter low-confidence segments
|
||||
- **Consistency Checks**: Verify timeline consistency
|
||||
- **Language Validation**: Ensure correct language detection
|
||||
- **Format Validation**: Check output format compliance
|
||||
|
||||
## Advanced Features
|
||||
|
||||
### Custom Models
|
||||
|
||||
Use your own models:
|
||||
- **Fine-tuned Whisper**: Domain-specific models
|
||||
- **Custom Diarization**: Trained on your speakers
|
||||
- **Specialized Post-processing**: Industry-specific formatting
|
||||
|
||||
### Pipeline Extensions
|
||||
|
||||
Add custom processing steps:
|
||||
- **Sentiment Analysis**: Analyze emotional tone
|
||||
- **Entity Extraction**: Identify people, places, organizations
|
||||
- **Custom Metrics**: Calculate domain-specific metrics
|
||||
- **Integration Hooks**: Call external services
|
||||
@@ -1,285 +0,0 @@
|
||||
---
|
||||
sidebar_position: 5
|
||||
title: Authentication Setup
|
||||
---
|
||||
|
||||
# Authentication Setup
|
||||
|
||||
This page covers authentication setup in detail. For the complete deployment guide, see [Deployment Guide](./overview).
|
||||
|
||||
Reflector uses [Authentik](https://goauthentik.io/) for OAuth/OIDC authentication. This guide walks you through setting up Authentik and connecting it to Reflector.
|
||||
|
||||
The guide simplistically sets Authentic on the same server as Reflector. You can use your own Authentic instance instead.
|
||||
|
||||
## Overview
|
||||
|
||||
Reflector's authentication flow:
|
||||
1. User clicks "Sign In" on frontend
|
||||
2. Frontend redirects to Authentik login page
|
||||
3. User authenticates with Authentik
|
||||
4. Authentik redirects back with OAuth tokens
|
||||
5. Frontend stores tokens, backends verify JWT signature
|
||||
|
||||
## Option 1: Self-Hosted Authentik (Same Server)
|
||||
|
||||
This setup runs Authentik on the same server as Reflector, with Caddy proxying to both.
|
||||
|
||||
### Deploy Authentik
|
||||
|
||||
```bash
|
||||
# Create directory for Authentik
|
||||
mkdir -p ~/authentik && cd ~/authentik
|
||||
|
||||
# Download docker-compose file
|
||||
curl -O https://goauthentik.io/docker-compose.yml
|
||||
|
||||
# Generate secrets and bootstrap credentials
|
||||
cat > .env << 'EOF'
|
||||
PG_PASS=$(openssl rand -base64 36 | tr -d '\n')
|
||||
AUTHENTIK_SECRET_KEY=$(openssl rand -base64 60 | tr -d '\n')
|
||||
# Privacy-focused choice for self-hosted deployments
|
||||
AUTHENTIK_ERROR_REPORTING__ENABLED=false
|
||||
AUTHENTIK_BOOTSTRAP_PASSWORD=YourSecurePassword123
|
||||
AUTHENTIK_BOOTSTRAP_EMAIL=admin@example.com
|
||||
EOF
|
||||
|
||||
# Start Authentik
|
||||
sudo docker compose up -d
|
||||
```
|
||||
|
||||
Authentik takes ~2 minutes to run migrations and apply blueprints on first start.
|
||||
|
||||
### Connect Authentik to Reflector's Network
|
||||
|
||||
If Authentik runs in a separate Docker Compose project, connect it to Reflector's network so Caddy can proxy to it:
|
||||
|
||||
```bash
|
||||
# Wait for Authentik to be healthy
|
||||
# Connect Authentik server to Reflector's network
|
||||
sudo docker network connect reflector_default authentik-server-1
|
||||
```
|
||||
|
||||
**Important:** This step must be repeated if you restart Authentik with `docker compose down`. Add it to your deployment scripts or use `docker compose up -d` (which preserves containers) instead of down/up.
|
||||
|
||||
### Add Authentik to Caddy
|
||||
|
||||
Uncomment the Authentik section in your `Caddyfile` and set your domain:
|
||||
|
||||
```bash
|
||||
nano Caddyfile
|
||||
```
|
||||
|
||||
Uncomment and edit:
|
||||
```
|
||||
{$AUTHENTIK_DOMAIN:authentik.example.com} {
|
||||
reverse_proxy authentik-server-1:9000
|
||||
}
|
||||
```
|
||||
|
||||
Reload Caddy:
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml exec caddy caddy reload --config /etc/caddy/Caddyfile
|
||||
```
|
||||
|
||||
### Create OAuth2 Provider in Authentik
|
||||
|
||||
**Option A: Automated Setup (Recommended)**
|
||||
|
||||
**Location: Reflector server**
|
||||
|
||||
Run the setup script from the Reflector repository:
|
||||
|
||||
```bash
|
||||
ssh user@your-server-ip
|
||||
cd ~/reflector
|
||||
./scripts/setup-authentik-oauth.sh https://authentik.example.com YourSecurePassword123 https://app.example.com
|
||||
```
|
||||
|
||||
**Important:** The script must be run from the `~/reflector` directory on your server, as it creates files using relative paths.
|
||||
|
||||
The script will output the configuration values to add to your `.env` files. Skip to "Update docker-compose.prod.yml".
|
||||
|
||||
**Option B: Manual Setup**
|
||||
|
||||
1. **Login to Authentik Admin** at `https://authentik.example.com/`
|
||||
- Username: `akadmin`
|
||||
- Password: The `AUTHENTIK_BOOTSTRAP_PASSWORD` you set in .env
|
||||
|
||||
2. **Create OAuth2 Provider:**
|
||||
- Go to **Applications > Providers > Create**
|
||||
- Select **OAuth2/OpenID Provider**
|
||||
- Configure:
|
||||
- **Name**: `Reflector`
|
||||
- **Authorization flow**: `default-provider-authorization-implicit-consent`
|
||||
- **Client type**: `Confidential`
|
||||
- **Client ID**: Note this value (auto-generated)
|
||||
- **Client Secret**: Note this value (auto-generated)
|
||||
- **Redirect URIs**: Add entry with:
|
||||
```
|
||||
https://app.example.com/api/auth/callback/authentik
|
||||
```
|
||||
- Scroll down to **Advanced protocol settings**
|
||||
- In **Scopes**, add these three mappings:
|
||||
- `authentik default OAuth Mapping: OpenID 'email'`
|
||||
- `authentik default OAuth Mapping: OpenID 'openid'`
|
||||
- `authentik default OAuth Mapping: OpenID 'profile'`
|
||||
- Click **Finish**
|
||||
|
||||
3. **Create Application:**
|
||||
- Go to **Applications > Applications > Create**
|
||||
- Configure:
|
||||
- **Name**: `Reflector`
|
||||
- **Slug**: `reflector` (auto-filled)
|
||||
- **Provider**: Select the `Reflector` provider you just created
|
||||
- Click **Create**
|
||||
|
||||
### Get Public Key for JWT Verification
|
||||
|
||||
**Location: Reflector server**
|
||||
|
||||
Extract the public key from Authentik's JWKS endpoint:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/reflector/server/reflector/auth/jwt/keys
|
||||
curl -s https://authentik.example.com/application/o/reflector/jwks/ | \
|
||||
jq -r '.keys[0].x5c[0]' | base64 -d | openssl x509 -pubkey -noout \
|
||||
> ~/reflector/server/reflector/auth/jwt/keys/authentik_public.pem
|
||||
```
|
||||
|
||||
### Update docker-compose.prod.yml
|
||||
|
||||
**Location: Reflector server**
|
||||
|
||||
**Note:** This step is already done in the current `docker-compose.prod.yml`. Verify the volume mounts exist:
|
||||
|
||||
```yaml
|
||||
server:
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
# ... other config ...
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
- ./server/reflector/auth/jwt/keys:/app/reflector/auth/jwt/keys:ro
|
||||
|
||||
worker:
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
# ... other config ...
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
- ./server/reflector/auth/jwt/keys:/app/reflector/auth/jwt/keys:ro
|
||||
```
|
||||
|
||||
### Configure Reflector Backend
|
||||
|
||||
**Location: Reflector server**
|
||||
|
||||
Update `server/.env`:
|
||||
```env
|
||||
# Authentication
|
||||
AUTH_BACKEND=jwt
|
||||
AUTH_JWT_PUBLIC_KEY=authentik_public.pem
|
||||
AUTH_JWT_AUDIENCE=<your-client-id>
|
||||
CORS_ALLOW_CREDENTIALS=true
|
||||
```
|
||||
|
||||
Replace `<your-client-id>` with the Client ID from previous steps.
|
||||
|
||||
### Configure Reflector Frontend
|
||||
|
||||
**Location: Reflector server**
|
||||
|
||||
Update `www/.env`:
|
||||
```env
|
||||
# Authentication
|
||||
FEATURE_REQUIRE_LOGIN=true
|
||||
|
||||
# Authentik OAuth
|
||||
AUTHENTIK_ISSUER=https://authentik.example.com/application/o/reflector
|
||||
AUTHENTIK_REFRESH_TOKEN_URL=https://authentik.example.com/application/o/token/
|
||||
AUTHENTIK_CLIENT_ID=<your-client-id>
|
||||
AUTHENTIK_CLIENT_SECRET=<your-client-secret>
|
||||
|
||||
# NextAuth
|
||||
NEXTAUTH_SECRET=<generate-with-openssl-rand-hex-32>
|
||||
```
|
||||
|
||||
### Restart Services
|
||||
|
||||
**Location: Reflector server**
|
||||
|
||||
```bash
|
||||
cd ~/reflector
|
||||
sudo docker compose -f docker-compose.prod.yml up -d --force-recreate server worker web
|
||||
```
|
||||
|
||||
### Verify Authentication
|
||||
|
||||
1. Visit `https://app.example.com`
|
||||
2. Click "Log in" or navigate to `/api/auth/signin`
|
||||
3. Click "Sign in with Authentik"
|
||||
4. Login with your Authentik credentials
|
||||
5. You should be redirected back and see "Log out" in the header
|
||||
|
||||
## Option 2: Disable Authentication
|
||||
|
||||
For testing or internal deployments where authentication isn't needed:
|
||||
|
||||
**Backend `server/.env`:**
|
||||
```env
|
||||
AUTH_BACKEND=none
|
||||
```
|
||||
|
||||
**Frontend `www/.env`:**
|
||||
```env
|
||||
FEATURE_REQUIRE_LOGIN=false
|
||||
```
|
||||
|
||||
**Note:** The pre-built Docker images have `FEATURE_REQUIRE_LOGIN=true` baked in. To disable auth, you'll need to rebuild the frontend image with the env var set at build time, or set up Authentik.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Invalid redirect URI" error
|
||||
- Verify the redirect URI in Authentik matches exactly:
|
||||
```
|
||||
https://app.example.com/api/auth/callback/authentik
|
||||
```
|
||||
- Check for trailing slashes - they must match exactly
|
||||
|
||||
### "Invalid audience" JWT error
|
||||
- Ensure `AUTH_JWT_AUDIENCE` in `server/.env` matches the Client ID from Authentik
|
||||
- The audience value is the OAuth Client ID, not the issuer URL
|
||||
|
||||
### "JWT verification failed" error
|
||||
- Verify the public key file is mounted in the container
|
||||
- Check `AUTH_JWT_PUBLIC_KEY` points to the correct filename
|
||||
- Ensure the key was extracted from the correct provider's JWKS endpoint
|
||||
|
||||
### Caddy returns 503 for Authentik
|
||||
- Verify Authentik container is connected to Reflector's network:
|
||||
```bash
|
||||
sudo docker network connect reflector_default authentik-server-1
|
||||
```
|
||||
- Check Authentik is healthy: `cd ~/authentik && sudo docker compose ps`
|
||||
|
||||
### Users can't access protected pages
|
||||
- Verify `FEATURE_REQUIRE_LOGIN=true` in frontend
|
||||
- Check `AUTH_BACKEND=jwt` in backend
|
||||
- Verify CORS settings allow credentials
|
||||
|
||||
### Token refresh errors
|
||||
- Ensure Redis is running (frontend uses Redis for token caching)
|
||||
- Verify `KV_URL` is set correctly in frontend env
|
||||
- Check `AUTHENTIK_REFRESH_TOKEN_URL` is correct
|
||||
|
||||
## API Key Authentication
|
||||
|
||||
For programmatic access (scripts, integrations), users can generate API keys:
|
||||
|
||||
1. Login to Reflector
|
||||
2. Go to Settings > API Keys
|
||||
3. Click "Generate New Key"
|
||||
4. Use the key in requests:
|
||||
```bash
|
||||
curl -H "X-API-Key: your-api-key" https://api.example.com/v1/transcripts
|
||||
```
|
||||
|
||||
API keys are stored hashed and can be revoked at any time.
|
||||
@@ -1,165 +0,0 @@
|
||||
---
|
||||
sidebar_position: 6
|
||||
title: Daily.co Setup
|
||||
---
|
||||
|
||||
# Daily.co Setup
|
||||
|
||||
This page covers Daily.co video platform setup for live meeting rooms. For the complete deployment guide, see [Deployment Guide](./overview).
|
||||
|
||||
Daily.co enables live video meetings with automatic recording and transcription.
|
||||
|
||||
## What You'll Set Up
|
||||
|
||||
```
|
||||
User joins meeting → Daily.co video room → Recording to S3 → [Webhook] → Reflector transcribes
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [ ] **Daily.co account** - Free tier at https://dashboard.daily.co
|
||||
- [ ] **AWS account** - For S3 storage
|
||||
- [ ] **Reflector deployed** - Complete steps from [Deployment Guide](./overview)
|
||||
|
||||
---
|
||||
|
||||
## Create Daily.co Account
|
||||
|
||||
1. Visit https://dashboard.daily.co and sign up
|
||||
2. Verify your email
|
||||
3. Note your subdomain (e.g., `yourname.daily.co` → subdomain is `yourname`)
|
||||
|
||||
---
|
||||
|
||||
## Get Daily.co API Key
|
||||
|
||||
1. In Daily.co dashboard, go to **Developers**
|
||||
2. Click **API Keys**
|
||||
3. Click **Create API Key**
|
||||
4. Copy the key (starts with a long string)
|
||||
|
||||
Save this for later.
|
||||
|
||||
---
|
||||
|
||||
## Create AWS S3 Bucket
|
||||
|
||||
Daily.co needs somewhere to store recordings before Reflector processes them.
|
||||
|
||||
```bash
|
||||
# Choose a unique bucket name
|
||||
BUCKET_NAME="reflector-dailyco-yourname" # -yourname is not a requirement, you can name the bucket as you wish
|
||||
AWS_REGION="us-east-1"
|
||||
|
||||
# Create bucket
|
||||
aws s3 mb s3://$BUCKET_NAME --region $AWS_REGION
|
||||
|
||||
# Enable versioning (required)
|
||||
aws s3api put-bucket-versioning \
|
||||
--bucket $BUCKET_NAME \
|
||||
--versioning-configuration Status=Enabled
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Create IAM Role for Daily.co
|
||||
|
||||
Daily.co needs permission to write recordings to your S3 bucket.
|
||||
|
||||
Follow the guide https://docs.daily.co/guides/products/live-streaming-recording/storing-recordings-in-a-custom-s3-bucket
|
||||
|
||||
Save the role ARN - you'll need it soon.
|
||||
|
||||
It looks like: `arn:aws:iam::123456789012:role/DailyCo`
|
||||
|
||||
Shortly, you'll need to set up a role and give this role your s3 bucket access
|
||||
|
||||
No additional setup is required from Daily.co settings website side: the app code takes care of letting Daily know where to save the recordings.
|
||||
|
||||
---
|
||||
|
||||
## Configure Reflector
|
||||
|
||||
**Location: Reflector server**
|
||||
|
||||
Add to `server/.env`:
|
||||
|
||||
```env
|
||||
# Daily.co Configuration
|
||||
DEFAULT_VIDEO_PLATFORM=daily
|
||||
DAILY_API_KEY=<your-api-key-from-daily-setup>
|
||||
DAILY_SUBDOMAIN=<your-subdomain-from-daily-setup>
|
||||
|
||||
# S3 Storage for Daily.co recordings
|
||||
DAILYCO_STORAGE_AWS_BUCKET_NAME=<your-bucket-from-daily-setup>
|
||||
DAILYCO_STORAGE_AWS_REGION=us-east-1
|
||||
DAILYCO_STORAGE_AWS_ROLE_ARN=<your-role-arn-from-daily-setup>
|
||||
|
||||
# Transcript storage (should already be configured from main setup)
|
||||
# TRANSCRIPT_STORAGE_BACKEND=aws
|
||||
# TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=<your-key>
|
||||
# TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=<your-secret>
|
||||
# TRANSCRIPT_STORAGE_AWS_BUCKET_NAME=<your-bucket-name>
|
||||
# TRANSCRIPT_STORAGE_AWS_REGION=<your-bucket-region>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Restart Services
|
||||
|
||||
After changing `.env` files, reload with `up -d`:
|
||||
|
||||
```bash
|
||||
sudo docker compose -f docker-compose.prod.yml up -d server worker
|
||||
```
|
||||
|
||||
**Note**: `docker compose up -d` detects env changes and recreates containers automatically.
|
||||
|
||||
---
|
||||
|
||||
## Test Live Room
|
||||
|
||||
1. Visit your Reflector frontend: `https://app.example.com`
|
||||
2. Go to **Rooms**
|
||||
3. Click **Create Room**
|
||||
4. Select **Daily** as the platform
|
||||
5. Allow camera/microphone access
|
||||
6. You should see Daily.co video interface
|
||||
7. Speak for 10-20 seconds
|
||||
8. Leave the meeting
|
||||
9. Recording should appear in **Transcripts** within 5 minutes (if webhooks aren't set up yet, see [Webhook Configuration](#webhook-configuration-optional) below)
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Recording doesn't appear in S3
|
||||
|
||||
1. Check Daily.co dashboard → **Logs** for errors
|
||||
2. Verify IAM role trust policy has correct Daily.co account ID and your Daily.co subdomain
|
||||
3. Verify that the bucket has
|
||||
|
||||
### Recording in S3 but not transcribed
|
||||
|
||||
1. Check webhook is configured (Reflector should auto-create it)
|
||||
2. Check worker logs:
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml logs worker --tail 50
|
||||
```
|
||||
3. Verify `DAILYCO_STORAGE_AWS_*` vars in `server/.env`
|
||||
|
||||
### "Access Denied" when Daily.co tries to write to S3
|
||||
|
||||
1. Double-check IAM role ARN in Daily.co settings
|
||||
2. Verify bucket name matches exactly
|
||||
3. Check IAM policy has `s3:PutObject` permission
|
||||
|
||||
---
|
||||
|
||||
## Webhook Configuration [optional]
|
||||
|
||||
`manage_daily_webhook.py` script guides you through creating a webhook for Daily recordings.
|
||||
|
||||
The webhook isn't required - polling mechanism is the default and performed automatically.
|
||||
|
||||
This guide won't go deep into webhook setup.
|
||||
@@ -1,192 +0,0 @@
|
||||
---
|
||||
sidebar_position: 3
|
||||
title: Docker Reference
|
||||
---
|
||||
|
||||
# Docker Reference
|
||||
|
||||
This page documents the Docker Compose configuration for Reflector. For the complete deployment guide, see [Deployment Guide](./overview).
|
||||
|
||||
## Services
|
||||
|
||||
The `docker-compose.prod.yml` includes these services:
|
||||
|
||||
| Service | Image | Purpose |
|
||||
|---------|-------|---------|
|
||||
| `web` | `monadicalsas/reflector-frontend` | Next.js frontend |
|
||||
| `server` | `monadicalsas/reflector-backend` | FastAPI backend |
|
||||
| `worker` | `monadicalsas/reflector-backend` | Celery worker for background tasks |
|
||||
| `beat` | `monadicalsas/reflector-backend` | Celery beat scheduler |
|
||||
| `redis` | `redis:7.2-alpine` | Message broker and cache |
|
||||
| `postgres` | `postgres:17-alpine` | Primary database |
|
||||
| `caddy` | `caddy:2-alpine` | Reverse proxy with auto-SSL |
|
||||
|
||||
## Environment Files
|
||||
|
||||
Reflector uses two separate environment files:
|
||||
|
||||
### Backend (`server/.env`)
|
||||
|
||||
Used by: `server`, `worker`, `beat`
|
||||
|
||||
Key variables:
|
||||
```env
|
||||
# Database connection
|
||||
DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
|
||||
# Redis
|
||||
REDIS_HOST=redis
|
||||
CELERY_BROKER_URL=redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
|
||||
# API domain and CORS
|
||||
BASE_URL=https://api.example.com
|
||||
CORS_ORIGIN=https://app.example.com
|
||||
|
||||
# Modal GPU processing
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://...
|
||||
TRANSCRIPT_MODAL_API_KEY=...
|
||||
```
|
||||
|
||||
### Frontend (`www/.env`)
|
||||
|
||||
Used by: `web`
|
||||
|
||||
Key variables:
|
||||
```env
|
||||
# Domain configuration
|
||||
SITE_URL=https://app.example.com
|
||||
API_URL=https://api.example.com
|
||||
WEBSOCKET_URL=wss://api.example.com
|
||||
SERVER_API_URL=http://server:1250
|
||||
|
||||
# Authentication
|
||||
NEXTAUTH_URL=https://app.example.com
|
||||
NEXTAUTH_SECRET=...
|
||||
```
|
||||
|
||||
Note: `API_URL` is used client-side (browser), `SERVER_API_URL` is used server-side (SSR).
|
||||
|
||||
## Volumes
|
||||
|
||||
| Volume | Purpose |
|
||||
|--------|---------|
|
||||
| `redis_data` | Redis persistence |
|
||||
| `postgres_data` | PostgreSQL data |
|
||||
| `server_data` | Uploaded files, local storage |
|
||||
| `caddy_data` | SSL certificates |
|
||||
| `caddy_config` | Caddy configuration |
|
||||
|
||||
## Network
|
||||
|
||||
All services share the default network. The network is marked `attachable: true` to allow external containers (like Authentik) to join.
|
||||
|
||||
## Common Commands
|
||||
|
||||
### Start all services
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
### View logs
|
||||
```bash
|
||||
# All services
|
||||
docker compose -f docker-compose.prod.yml logs -f
|
||||
|
||||
# Specific service
|
||||
docker compose -f docker-compose.prod.yml logs server --tail 50
|
||||
```
|
||||
|
||||
### Restart a service
|
||||
```bash
|
||||
# Quick restart (doesn't reload .env changes)
|
||||
docker compose -f docker-compose.prod.yml restart server
|
||||
|
||||
# Reload .env and restart
|
||||
docker compose -f docker-compose.prod.yml up -d server
|
||||
```
|
||||
|
||||
### Run database migrations
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml exec server uv run alembic upgrade head
|
||||
```
|
||||
|
||||
### Access database
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml exec postgres psql -U reflector
|
||||
```
|
||||
|
||||
### Pull latest images
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml pull
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
### Stop all services
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml down
|
||||
```
|
||||
|
||||
### Full reset (WARNING: deletes data)
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml down -v
|
||||
```
|
||||
|
||||
## Customization
|
||||
|
||||
### Using a different database
|
||||
|
||||
To use an external PostgreSQL:
|
||||
|
||||
1. Remove `postgres` service from compose file
|
||||
2. Update `DATABASE_URL` in `server/.env`:
|
||||
```env
|
||||
DATABASE_URL=postgresql+asyncpg://user:pass@external-host:5432/reflector
|
||||
```
|
||||
|
||||
### Using external Redis
|
||||
|
||||
1. Remove `redis` service from compose file
|
||||
2. Update Redis settings in `server/.env`:
|
||||
```env
|
||||
REDIS_HOST=external-redis-host
|
||||
CELERY_BROKER_URL=redis://external-redis-host:6379/1
|
||||
```
|
||||
|
||||
### Adding Authentik
|
||||
|
||||
To add Authentik for authentication, see [Authentication Setup](./auth-setup). Quick steps:
|
||||
|
||||
1. Deploy Authentik separately
|
||||
2. Connect to Reflector's network:
|
||||
```bash
|
||||
docker network connect reflector_default authentik-server-1
|
||||
```
|
||||
3. Add to Caddyfile:
|
||||
```
|
||||
authentik.example.com {
|
||||
reverse_proxy authentik-server-1:9000
|
||||
}
|
||||
```
|
||||
|
||||
## Caddyfile Reference
|
||||
|
||||
The Caddyfile supports environment variable substitution:
|
||||
|
||||
```
|
||||
{$FRONTEND_DOMAIN:app.example.com} {
|
||||
reverse_proxy web:3000
|
||||
}
|
||||
|
||||
{$API_DOMAIN:api.example.com} {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
```
|
||||
|
||||
Set `FRONTEND_DOMAIN` and `API_DOMAIN` environment variables, or edit the file directly.
|
||||
|
||||
### Reload Caddy after changes
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml exec caddy caddy reload --config /etc/caddy/Caddyfile
|
||||
```
|
||||
@@ -1,139 +0,0 @@
|
||||
---
|
||||
sidebar_position: 10
|
||||
title: Docs Website Deployment
|
||||
---
|
||||
|
||||
# Docs Website Deployment
|
||||
|
||||
This guide covers deploying the Reflector documentation website. **This is optional and intended for internal/experimental use only.**
|
||||
|
||||
## Overview
|
||||
|
||||
The documentation is built using Docusaurus and deployed as a static nginx-served site.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Reflector already deployed (Steps 1-7 from [Deployment Guide](./overview))
|
||||
- DNS A record for docs subdomain (e.g., `docs.example.com`)
|
||||
|
||||
## Deployment Steps
|
||||
|
||||
### Step 1: Pre-fetch OpenAPI Spec
|
||||
|
||||
The docs site includes API reference from your running backend. Fetch it before building:
|
||||
|
||||
```bash
|
||||
cd ~/reflector
|
||||
docker compose -f docker-compose.prod.yml exec server curl -s http://localhost:1250/openapi.json > docs/static/openapi.json
|
||||
```
|
||||
|
||||
This creates `docs/static/openapi.json` (should be ~70KB) which will be copied during Docker build.
|
||||
|
||||
**Why not fetch during build?** Docker build containers are network-isolated and can't access the running backend services.
|
||||
|
||||
### Step 2: Verify Dockerfile
|
||||
|
||||
The Dockerfile is already in `docs/Dockerfile`:
|
||||
|
||||
```dockerfile
|
||||
FROM node:18-alpine AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json ./
|
||||
|
||||
# Inshall dependencies
|
||||
RUN npm ci
|
||||
|
||||
# Copy source (includes static/openapi.json if pre-fetched)
|
||||
COPY . .
|
||||
|
||||
# Fix docusaurus config: change onBrokenLinks to 'warn' for Docker build
|
||||
RUN sed -i "s/onBrokenLinks: 'throw'/onBrokenLinks: 'warn'/g" docusaurus.config.ts
|
||||
|
||||
# Build static site
|
||||
RUN npx docusaurus build
|
||||
|
||||
FROM nginx:alpine
|
||||
COPY --from=builder /app/build /usr/share/nginx/html
|
||||
EXPOSE 80
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
```
|
||||
|
||||
### Step 3: Add Docs Service to docker-compose.prod.yml
|
||||
|
||||
Add this service to `docker-compose.prod.yml`:
|
||||
|
||||
```yaml
|
||||
docs:
|
||||
build: ./docs
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- default
|
||||
```
|
||||
|
||||
### Step 4: Add Caddy Route
|
||||
|
||||
Add to `Caddyfile`:
|
||||
|
||||
```
|
||||
{$DOCS_DOMAIN:docs.example.com} {
|
||||
reverse_proxy docs:80
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5: Build and Deploy
|
||||
|
||||
```bash
|
||||
cd ~/reflector
|
||||
docker compose -f docker-compose.prod.yml up -d --build docs
|
||||
docker compose -f docker-compose.prod.yml exec caddy caddy reload --config /etc/caddy/Caddyfile
|
||||
```
|
||||
|
||||
### Step 6: Verify
|
||||
|
||||
```bash
|
||||
# Check container status
|
||||
docker compose -f docker-compose.prod.yml ps docs
|
||||
# Should show "Up"
|
||||
|
||||
# Test URL
|
||||
curl -I https://docs.example.com
|
||||
# Should return HTTP/2 200
|
||||
```
|
||||
|
||||
Visit `https://docs.example.com` in your browser
|
||||
|
||||
## Updating Documentation
|
||||
|
||||
When docs are updated:
|
||||
|
||||
```bash
|
||||
cd ~/reflector
|
||||
git pull
|
||||
|
||||
# Refresh OpenAPI spec from backend
|
||||
docker compose -f docker-compose.prod.yml exec server curl -s http://localhost:1250/openapi.json > docs/static/openapi.json
|
||||
|
||||
# Rebuild docs
|
||||
docker compose -f docker-compose.prod.yml up -d --build docs
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Missing openapi.json during build
|
||||
- Make sure you ran the pre-fetch step first (Step 1)
|
||||
- Verify `docs/static/openapi.json` exists and is ~70KB
|
||||
- Re-run: `docker compose exec server curl -s http://localhost:1250/openapi.json > docs/static/openapi.json`
|
||||
|
||||
### Build fails with "Docusaurus found broken links"
|
||||
- This happens if `onBrokenLinks: 'throw'` is set in docusaurus.config.ts
|
||||
- Solution is already in Dockerfile: uses `sed` to change to `'warn'` during build
|
||||
|
||||
### 404 on all pages
|
||||
- Docusaurus baseUrl might be wrong - should be `/` for custom domain
|
||||
- Check `docs/docusaurus.config.ts`: `baseUrl: '/'`
|
||||
|
||||
### Docs not updating after rebuild
|
||||
- Force rebuild: `docker compose -f docker-compose.prod.yml build --no-cache docs`
|
||||
- Then: `docker compose -f docker-compose.prod.yml up -d docs`
|
||||
@@ -1,171 +0,0 @@
|
||||
---
|
||||
sidebar_position: 4
|
||||
title: Modal.com Setup
|
||||
---
|
||||
|
||||
# Modal.com Setup
|
||||
|
||||
This page covers Modal.com GPU setup in detail. For the complete deployment guide, see [Deployment Guide](./overview).
|
||||
|
||||
Reflector uses [Modal.com](https://modal.com) for GPU-accelerated audio processing. This guide walks you through deploying the required GPU functions.
|
||||
|
||||
## What is Modal.com?
|
||||
|
||||
Modal is a serverless GPU platform. You deploy Python code that runs on their GPUs, and pay only for actual compute time. Reflector uses Modal for:
|
||||
|
||||
- **Transcription**: Whisper model for speech-to-text
|
||||
- **Diarization**: Pyannote model for speaker identification
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Modal.com account** - Sign up at https://modal.com (free tier available)
|
||||
2. **HuggingFace account** - Required for Pyannote diarization models:
|
||||
- Create account at https://huggingface.co
|
||||
- Accept **both** Pyannote licenses:
|
||||
- https://huggingface.co/pyannote/speaker-diarization-3.1
|
||||
- https://huggingface.co/pyannote/segmentation-3.0
|
||||
- Generate access token at https://huggingface.co/settings/tokens
|
||||
|
||||
## Deployment
|
||||
|
||||
**Location: YOUR LOCAL COMPUTER (laptop/desktop)**
|
||||
|
||||
Modal CLI requires browser authentication, so this must run on a machine with a browser - not on a headless server.
|
||||
|
||||
### Install Modal CLI
|
||||
|
||||
```bash
|
||||
pip install modal
|
||||
```
|
||||
|
||||
### Authenticate with Modal
|
||||
|
||||
```bash
|
||||
modal setup
|
||||
```
|
||||
|
||||
This opens your browser for authentication. Complete the login flow.
|
||||
|
||||
### Clone Repository and Deploy
|
||||
|
||||
```bash
|
||||
git clone https://github.com/monadical-sas/reflector.git
|
||||
cd reflector/gpu/modal_deployments
|
||||
./deploy-all.sh --hf-token YOUR_HUGGINGFACE_TOKEN
|
||||
```
|
||||
|
||||
Or run interactively (script will prompt for token):
|
||||
```bash
|
||||
./deploy-all.sh
|
||||
```
|
||||
|
||||
### What the Script Does
|
||||
|
||||
1. **Prompts for HuggingFace token** - Needed to download the Pyannote diarization model
|
||||
2. **Generates API key** - Creates a secure random key for authenticating requests to GPU functions
|
||||
3. **Creates Modal secrets**:
|
||||
- `hf_token` - Your HuggingFace token
|
||||
- `reflector-gpu` - The generated API key
|
||||
4. **Deploys GPU functions** - Transcriber (Whisper) and Diarizer (Pyannote)
|
||||
5. **Outputs configuration** - Prints URLs and API key to console
|
||||
|
||||
### Example Output
|
||||
|
||||
```
|
||||
==========================================
|
||||
Reflector GPU Functions Deployment
|
||||
==========================================
|
||||
|
||||
Generating API key for GPU services...
|
||||
Creating Modal secrets...
|
||||
-> Creating secret: hf_token
|
||||
-> Creating secret: reflector-gpu
|
||||
|
||||
Deploying transcriber (Whisper)...
|
||||
-> https://yourname--reflector-transcriber-web.modal.run
|
||||
|
||||
Deploying diarizer (Pyannote)...
|
||||
-> https://yourname--reflector-diarizer-web.modal.run
|
||||
|
||||
==========================================
|
||||
Deployment complete!
|
||||
==========================================
|
||||
|
||||
Copy these values to your server's server/.env file:
|
||||
|
||||
# --- Modal GPU Configuration ---
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://yourname--reflector-transcriber-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=abc123...
|
||||
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=https://yourname--reflector-diarizer-web.modal.run
|
||||
DIARIZATION_MODAL_API_KEY=abc123...
|
||||
# --- End Modal Configuration ---
|
||||
```
|
||||
|
||||
Copy the output and paste it into your `server/.env` file on your server.
|
||||
|
||||
## Costs
|
||||
|
||||
Modal charges based on GPU compute time:
|
||||
- Functions scale to zero when not in use (no cost when idle)
|
||||
- You only pay for actual processing time
|
||||
- Free tier includes $30/month of credits
|
||||
|
||||
Typical costs for audio processing:
|
||||
- Transcription: ~$0.01-0.05 per minute of audio
|
||||
- Diarization: ~$0.02-0.10 per minute of audio
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Modal CLI not installed"
|
||||
```bash
|
||||
pip install modal
|
||||
```
|
||||
|
||||
### "Not authenticated with Modal"
|
||||
```bash
|
||||
modal setup
|
||||
# Complete browser authentication
|
||||
```
|
||||
|
||||
### "Failed to create secret hf_token"
|
||||
- Verify your HuggingFace token is valid
|
||||
- Ensure you've accepted the Pyannote license
|
||||
- Token needs `read` permission
|
||||
|
||||
### Deployment fails
|
||||
Check the Modal dashboard for detailed error logs:
|
||||
- Visit https://modal.com/apps
|
||||
- Click on the failed function
|
||||
- View build and runtime logs
|
||||
|
||||
### Re-running deployment
|
||||
The script is safe to re-run. It will:
|
||||
- Update existing secrets if they exist
|
||||
- Redeploy functions with latest code
|
||||
- Output new configuration (API key stays the same if secret exists)
|
||||
|
||||
## Manual Deployment (Advanced)
|
||||
|
||||
If you prefer to deploy functions individually:
|
||||
|
||||
```bash
|
||||
cd gpu/modal_deployments
|
||||
|
||||
# Create secrets manually
|
||||
modal secret create hf_token HF_TOKEN=your-hf-token
|
||||
modal secret create reflector-gpu REFLECTOR_GPU_APIKEY=$(openssl rand -hex 32)
|
||||
|
||||
# Deploy each function
|
||||
modal deploy reflector_transcriber.py
|
||||
modal deploy reflector_diarizer.py
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
View your deployed functions and their usage:
|
||||
- **Modal Dashboard**: https://modal.com/apps
|
||||
- **Function logs**: Click on any function to view logs
|
||||
- **Usage**: View compute time and costs in the dashboard
|
||||
@@ -1,401 +0,0 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
title: Deployment Guide
|
||||
---
|
||||
|
||||
# Deployment Guide
|
||||
|
||||
This guide walks you through deploying Reflector from scratch. Follow these steps in order.
|
||||
|
||||
## What You'll Set Up
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
User --> Caddy["Caddy (auto-SSL)"]
|
||||
Caddy --> Frontend["Frontend (Next.js)"]
|
||||
Caddy --> Backend["Backend (FastAPI)"]
|
||||
Backend --> PostgreSQL
|
||||
Backend --> Redis
|
||||
Backend --> Workers["Celery Workers"]
|
||||
Workers --> PostgreSQL
|
||||
Workers --> Redis
|
||||
Workers --> GPU["GPU Processing<br/>(Modal.com OR Self-hosted)"]
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before starting, you need:
|
||||
|
||||
- **Production server** - 4+ cores, 8GB+ RAM, public IP
|
||||
- **Two domain names** - e.g., `app.example.com` (frontend) and `api.example.com` (backend)
|
||||
- **GPU processing** - Choose one:
|
||||
- Modal.com account, OR
|
||||
- GPU server with NVIDIA GPU (8GB+ VRAM)
|
||||
- **HuggingFace account** - Free at https://huggingface.co
|
||||
- Accept both Pyannote licenses (required for speaker diarization):
|
||||
- https://huggingface.co/pyannote/speaker-diarization-3.1
|
||||
- https://huggingface.co/pyannote/segmentation-3.0
|
||||
- **LLM API** - For summaries and topic detection. Choose one:
|
||||
- OpenAI API key at https://platform.openai.com/account/api-keys, OR
|
||||
- Any OpenAI-compatible endpoint (vLLM, LiteLLM, Ollama, etc.)
|
||||
- **AWS S3 bucket** - For storing audio files and transcripts (see [S3 Setup](#create-s3-bucket-for-transcript-storage) below)
|
||||
|
||||
### Optional (for live meeting rooms)
|
||||
|
||||
- [ ] **Daily.co account** - Free tier at https://dashboard.daily.co
|
||||
- [ ] **AWS S3 bucket + IAM Role** - For Daily.co recording storage (separate from transcript storage)
|
||||
|
||||
---
|
||||
|
||||
## Configure DNS
|
||||
|
||||
```
|
||||
Type: A Name: app Value: <your-server-ip>
|
||||
Type: A Name: api Value: <your-server-ip>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploy GPU Processing
|
||||
|
||||
Reflector requires GPU processing for transcription and speaker diarization. Choose one option:
|
||||
|
||||
| | **Modal.com (Cloud)** | **Self-Hosted GPU** |
|
||||
|---|---|---|
|
||||
| **Best for** | No GPU hardware, zero maintenance | Own GPU server, full control |
|
||||
| **Pricing** | Pay-per-use | Fixed infrastructure cost |
|
||||
|
||||
### Option A: Modal.com (Serverless Cloud GPU)
|
||||
|
||||
#### Accept HuggingFace Licenses
|
||||
|
||||
Visit both pages and click "Accept":
|
||||
- https://huggingface.co/pyannote/speaker-diarization-3.1
|
||||
- https://huggingface.co/pyannote/segmentation-3.0
|
||||
|
||||
Generate a token at https://huggingface.co/settings/tokens
|
||||
|
||||
#### Deploy to Modal
|
||||
|
||||
There's an install script to help with this setup. It's using modal API to set all necessary moving parts.
|
||||
|
||||
As an alternative, all those operations that script does could be performed in modal settings in modal UI.
|
||||
|
||||
```bash
|
||||
pip install modal
|
||||
modal setup # opens browser for authentication
|
||||
|
||||
git clone https://github.com/monadical-sas/reflector.git
|
||||
cd reflector/gpu/modal_deployments
|
||||
./deploy-all.sh --hf-token YOUR_HUGGINGFACE_TOKEN
|
||||
```
|
||||
|
||||
**Save the output** - copy the configuration block, you'll need it soon.
|
||||
|
||||
See [Modal Setup](./modal-setup) for troubleshooting and details.
|
||||
|
||||
### Option B: Self-Hosted GPU
|
||||
|
||||
**Location: YOUR GPU SERVER**
|
||||
|
||||
Requires: NVIDIA GPU with 8GB+ VRAM, Ubuntu 22.04+, 40-50GB disk (Docker) or 25-30GB (Systemd).
|
||||
|
||||
See [Self-Hosted GPU Setup](./self-hosted-gpu-setup) for complete instructions. Quick summary:
|
||||
|
||||
1. Install NVIDIA drivers and Docker (or uv for systemd)
|
||||
2. Clone repository: `git clone https://github.com/monadical-sas/reflector.git`
|
||||
3. Configure `.env` with HuggingFace token
|
||||
4. Start service (Docker compose or systemd)
|
||||
5. Set up Caddy reverse proxy for HTTPS
|
||||
|
||||
**Save your API key and HTTPS URL** - you'll need them soon.
|
||||
|
||||
---
|
||||
|
||||
## Prepare Server
|
||||
|
||||
**Location: dedicated reflector server**
|
||||
|
||||
### Install Docker
|
||||
|
||||
```bash
|
||||
ssh user@your-server-ip
|
||||
|
||||
curl -fsSL https://get.docker.com | sh
|
||||
sudo usermod -aG docker $USER
|
||||
|
||||
# Log out and back in for group changes
|
||||
exit
|
||||
ssh user@your-server-ip
|
||||
|
||||
docker --version # verify
|
||||
```
|
||||
|
||||
### Firewall
|
||||
|
||||
Ensure ports 80 (HTTP) and 443 (HTTPS) are open for inbound traffic. The method varies by cloud provider and OS configuration.
|
||||
|
||||
### Clone Repository
|
||||
|
||||
The Docker images contain all application code. You clone the repository for configuration files and the compose definition:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/monadical-sas/reflector.git
|
||||
cd reflector
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Create S3 Bucket for Transcript Storage
|
||||
|
||||
Reflector requires AWS S3 to store audio files during processing.
|
||||
|
||||
### Create Bucket
|
||||
|
||||
```bash
|
||||
# Choose a unique bucket name
|
||||
BUCKET_NAME="reflector-transcripts-yourname"
|
||||
AWS_REGION="us-east-1"
|
||||
|
||||
# Create bucket
|
||||
aws s3 mb s3://$BUCKET_NAME --region $AWS_REGION
|
||||
```
|
||||
|
||||
### Create IAM User
|
||||
|
||||
Create an IAM user with S3 access for Reflector:
|
||||
|
||||
1. Go to AWS IAM Console → Users → Create User
|
||||
2. Name: `reflector-transcripts`
|
||||
3. Attach policy: `AmazonS3FullAccess` (or create a custom policy for just your bucket)
|
||||
4. Create access key (Access key ID + Secret access key)
|
||||
|
||||
Save these credentials - you'll need them in the next step.
|
||||
|
||||
---
|
||||
|
||||
## Configure Environment
|
||||
|
||||
Reflector has two env files:
|
||||
- `server/.env` - Backend configuration
|
||||
- `www/.env` - Frontend configuration
|
||||
|
||||
### Backend Configuration
|
||||
|
||||
```bash
|
||||
cp server/.env.example server/.env
|
||||
nano server/.env
|
||||
```
|
||||
|
||||
**Required settings:**
|
||||
```env
|
||||
# Database (defaults work with docker-compose.prod.yml)
|
||||
DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
|
||||
# Redis
|
||||
REDIS_HOST=redis
|
||||
CELERY_BROKER_URL=redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
|
||||
# Your domains
|
||||
BASE_URL=https://api.example.com
|
||||
CORS_ORIGIN=https://app.example.com
|
||||
CORS_ALLOW_CREDENTIALS=true
|
||||
|
||||
# Secret key - generate with: openssl rand -hex 32
|
||||
SECRET_KEY=<your-generated-secret>
|
||||
|
||||
# GPU Processing - choose ONE option:
|
||||
|
||||
# Option A: Modal.com (paste from deploy-all.sh output)
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://yourname--reflector-transcriber-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=<from-deploy-all.sh-output>
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=https://yourname--reflector-diarizer-web.modal.run
|
||||
DIARIZATION_MODAL_API_KEY=<from-deploy-all.sh-output>
|
||||
|
||||
# Option B: Self-hosted GPU (use your GPU server URL and API key)
|
||||
# TRANSCRIPT_BACKEND=modal
|
||||
# TRANSCRIPT_URL=https://gpu.example.com
|
||||
# TRANSCRIPT_MODAL_API_KEY=<your-generated-api-key>
|
||||
# DIARIZATION_BACKEND=modal
|
||||
# DIARIZATION_URL=https://gpu.example.com
|
||||
# DIARIZATION_MODAL_API_KEY=<your-generated-api-key>
|
||||
|
||||
# Storage - where to store audio files and transcripts (requires AWS S3)
|
||||
TRANSCRIPT_STORAGE_BACKEND=aws
|
||||
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
|
||||
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
|
||||
TRANSCRIPT_STORAGE_AWS_BUCKET_NAME=reflector-media
|
||||
TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
|
||||
|
||||
# LLM - for generating titles, summaries, and topics
|
||||
LLM_API_KEY=sk-your-openai-api-key
|
||||
LLM_MODEL=gpt-4o-mini
|
||||
# LLM_URL=https://api.openai.com/v1 # Optional: custom endpoint (vLLM, LiteLLM, Ollama, etc.)
|
||||
|
||||
# Auth - disable for initial setup (see a dedicated step for authentication)
|
||||
AUTH_BACKEND=none
|
||||
```
|
||||
|
||||
### Frontend Configuration
|
||||
|
||||
```bash
|
||||
cp www/.env.example www/.env
|
||||
nano www/.env
|
||||
```
|
||||
|
||||
**Required settings:**
|
||||
```env
|
||||
# Your domains
|
||||
SITE_URL=https://app.example.com
|
||||
API_URL=https://api.example.com
|
||||
WEBSOCKET_URL=wss://api.example.com
|
||||
SERVER_API_URL=http://server:1250
|
||||
|
||||
# NextAuth
|
||||
NEXTAUTH_URL=https://app.example.com
|
||||
NEXTAUTH_SECRET=<generate-with-openssl-rand-hex-32>
|
||||
|
||||
# Disable login requirement for initial setup
|
||||
FEATURE_REQUIRE_LOGIN=false
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configure Caddy
|
||||
|
||||
```bash
|
||||
cp Caddyfile.example Caddyfile
|
||||
nano Caddyfile
|
||||
```
|
||||
|
||||
Replace `example.com` with your domains. The `{$VAR:default}` syntax uses Caddy's env var substitution - you can either edit the file directly or set `FRONTEND_DOMAIN` and `API_DOMAIN` environment variables.
|
||||
|
||||
```
|
||||
{$FRONTEND_DOMAIN:app.example.com} {
|
||||
reverse_proxy web:3000
|
||||
}
|
||||
|
||||
{$API_DOMAIN:api.example.com} {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Start Services
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml up -d
|
||||
```
|
||||
|
||||
Wait for PostgreSQL to be ready, then run migrations:
|
||||
|
||||
```bash
|
||||
# Wait for postgres to be healthy (may take 30-60 seconds on first run)
|
||||
docker compose -f docker-compose.prod.yml exec postgres pg_isready -U reflector
|
||||
|
||||
# Run database migrations
|
||||
docker compose -f docker-compose.prod.yml exec server uv run alembic upgrade head
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Verify Deployment
|
||||
|
||||
### Check services
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml ps
|
||||
# All should show "Up"
|
||||
```
|
||||
|
||||
### Test API
|
||||
```bash
|
||||
curl https://api.example.com/health
|
||||
# Should return: {"status":"healthy"}
|
||||
```
|
||||
|
||||
### Test Frontend
|
||||
- Visit https://app.example.com
|
||||
- You should see the Reflector interface
|
||||
- Try uploading an audio file to test transcription
|
||||
|
||||
---
|
||||
|
||||
## Enable Authentication (Required for Live Rooms)
|
||||
|
||||
By default, Reflector is open (no login required). **Authentication is required if you want to use Live Meeting Rooms.**
|
||||
|
||||
See [Authentication Setup](./auth-setup) for full Authentik OAuth configuration.
|
||||
|
||||
Quick summary:
|
||||
1. Deploy Authentik on your server
|
||||
2. Create OAuth provider in Authentik
|
||||
3. Extract public key for JWT verification
|
||||
4. Update `server/.env`: `AUTH_BACKEND=jwt` + `AUTH_JWT_AUDIENCE`
|
||||
5. Update `www/.env`: `FEATURE_REQUIRE_LOGIN=true` + Authentik credentials
|
||||
6. Mount JWT keys volume and restart services
|
||||
|
||||
---
|
||||
|
||||
## Enable Live Meeting Rooms
|
||||
|
||||
**Requires: Authentication Step**
|
||||
|
||||
Live rooms require Daily.co and AWS S3. See [Daily.co Setup](./daily-setup) for complete S3/IAM configuration instructions.
|
||||
|
||||
Note that Reflector also supports Whereby as a call provider - this doc doesn't cover its setup yet.
|
||||
|
||||
Quick config - Add to `server/.env`:
|
||||
|
||||
```env
|
||||
DEFAULT_VIDEO_PLATFORM=daily
|
||||
DAILY_API_KEY=<from-daily.co-dashboard>
|
||||
DAILY_SUBDOMAIN=<your-daily-subdomain>
|
||||
|
||||
# S3 for recording storage
|
||||
DAILYCO_STORAGE_AWS_BUCKET_NAME=<your-bucket>
|
||||
DAILYCO_STORAGE_AWS_REGION=us-east-1
|
||||
DAILYCO_STORAGE_AWS_ROLE_ARN=<arn:aws:iam::ACCOUNT:role/DailyCo>
|
||||
```
|
||||
|
||||
Reload env and restart:
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml up -d server worker
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Check logs for errors
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml logs server --tail 20
|
||||
docker compose -f docker-compose.prod.yml logs worker --tail 20
|
||||
```
|
||||
|
||||
### Services won't start
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml logs
|
||||
```
|
||||
|
||||
### CORS errors in browser
|
||||
- Verify `CORS_ORIGIN` in `server/.env` matches your frontend domain exactly (including `https://`)
|
||||
- Reload env: `docker compose -f docker-compose.prod.yml up -d server`
|
||||
|
||||
### SSL certificate errors
|
||||
- Caddy auto-provisions Let's Encrypt certificates
|
||||
- Ensure ports 80 and 443 are open
|
||||
- Check: `docker compose -f docker-compose.prod.yml logs caddy`
|
||||
|
||||
### Transcription not working
|
||||
- Check Modal dashboard: https://modal.com/apps
|
||||
- Verify URLs in `server/.env` match deployed functions
|
||||
- Check worker logs: `docker compose -f docker-compose.prod.yml logs worker`
|
||||
|
||||
### "Login required" but auth not configured
|
||||
- Set `FEATURE_REQUIRE_LOGIN=false` in `www/.env`
|
||||
- Rebuild frontend: `docker compose -f docker-compose.prod.yml up -d --force-recreate web`
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
title: System Requirements
|
||||
---
|
||||
|
||||
# System Requirements
|
||||
|
||||
This page lists hardware and software requirements. For the complete deployment guide, see [Deployment Guide](./overview).
|
||||
|
||||
## Server Requirements
|
||||
|
||||
### Minimum Requirements
|
||||
|
||||
- **CPU**: 4 cores
|
||||
- **RAM**: 8 GB
|
||||
- **Storage**: 50 GB SSD
|
||||
- **OS**: Ubuntu 22.04+ or compatible Linux
|
||||
- **Network**: Public IP address
|
||||
|
||||
### Recommended Requirements
|
||||
|
||||
- **CPU**: 8+ cores
|
||||
- **RAM**: 16 GB
|
||||
- **Storage**: 100 GB SSD
|
||||
- **Network**: 1 Gbps connection
|
||||
|
||||
## Software Requirements
|
||||
|
||||
- Docker Engine 20.10+
|
||||
- Docker Compose 2.0+
|
||||
|
||||
## External Services
|
||||
|
||||
### Required
|
||||
|
||||
- **Two domain names** - One for frontend (e.g., `app.example.com`), one for API (e.g., `api.example.com`)
|
||||
- **Modal.com account** - For GPU-accelerated transcription and diarization (free tier available)
|
||||
- **HuggingFace account** - For Pyannote diarization model access
|
||||
- **OpenAI API key** - For generating summaries and topic detection (https://platform.openai.com/account/api-keys)
|
||||
|
||||
### Required for Live Meeting Rooms
|
||||
|
||||
- **Daily.co account** - For video conferencing (free tier available at https://dashboard.daily.co)
|
||||
- **AWS S3 bucket + IAM Role** - For Daily.co to store recordings
|
||||
- **Another AWS S3 bucket (optional, can reuse the one above)** - For Reflector to store "compiled" mp3 files and transient diarization process temporary files
|
||||
|
||||
### Optional
|
||||
|
||||
- **AWS S3** - For cloud storage of recordings and transcripts
|
||||
- **Authentik** - For SSO/OIDC authentication
|
||||
- **Sentry** - For error tracking
|
||||
|
||||
## Development Requirements
|
||||
|
||||
For local development only (not required for production deployment):
|
||||
|
||||
- Node.js 22+ (for frontend development)
|
||||
- Python 3.12+ (for backend development)
|
||||
- pnpm (for frontend package management)
|
||||
- uv (for Python package management)
|
||||
@@ -1,465 +0,0 @@
|
||||
---
|
||||
sidebar_position: 5
|
||||
title: Self-Hosted GPU Setup
|
||||
---
|
||||
|
||||
# Self-Hosted GPU Setup
|
||||
|
||||
This guide covers deploying Reflector's GPU processing on your own server instead of Modal.com. For the complete deployment guide, see [Deployment Guide](./overview).
|
||||
|
||||
## When to Use Self-Hosted GPU
|
||||
|
||||
**Choose self-hosted GPU if you:**
|
||||
- Have GPU hardware available (NVIDIA required)
|
||||
- Want full control over processing
|
||||
- Prefer fixed infrastructure costs over pay-per-use
|
||||
- Have privacy or data locality requirements
|
||||
- Need to process audio without external API calls
|
||||
|
||||
**Choose Modal.com instead if you:**
|
||||
- Don't have GPU hardware
|
||||
- Want zero infrastructure management
|
||||
- Prefer pay-per-use pricing
|
||||
- Need instant scaling for variable workloads
|
||||
|
||||
See [Modal.com Setup](./modal-setup) for cloud GPU deployment.
|
||||
|
||||
## What Gets Deployed
|
||||
|
||||
The self-hosted GPU service provides the same API endpoints as Modal:
|
||||
- `POST /v1/audio/transcriptions` - Whisper transcription
|
||||
- `POST /diarize` - Pyannote speaker diarization
|
||||
|
||||
Your main Reflector server connects to this service exactly like it connects to Modal - only the URL changes.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Hardware
|
||||
- **GPU**: NVIDIA GPU with 8GB+ VRAM (tested on Tesla T4 with 15GB)
|
||||
- **CPU**: 4+ cores recommended
|
||||
- **RAM**: 8GB minimum, 16GB recommended
|
||||
- **Disk**:
|
||||
- Docker method: 40-50GB minimum
|
||||
- Systemd method: 25-30GB minimum
|
||||
|
||||
### Software
|
||||
- Public IP address
|
||||
- Domain name with DNS A record pointing to server
|
||||
|
||||
### Accounts
|
||||
- **HuggingFace account** with accepted Pyannote licenses:
|
||||
- https://huggingface.co/pyannote/speaker-diarization-3.1
|
||||
- https://huggingface.co/pyannote/segmentation-3.0
|
||||
- **HuggingFace access token** from https://huggingface.co/settings/tokens
|
||||
|
||||
## Choose Deployment Method
|
||||
|
||||
---
|
||||
|
||||
## Docker Deployment
|
||||
|
||||
### Step 1: Install NVIDIA Driver
|
||||
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install -y nvidia-driver-535
|
||||
|
||||
# Load kernel modules
|
||||
sudo modprobe nvidia
|
||||
|
||||
# Verify installation
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
Expected output: GPU details with driver version and CUDA version.
|
||||
|
||||
### Step 2: Install Docker
|
||||
|
||||
```bash
|
||||
curl -fsSL https://get.docker.com | sudo sh
|
||||
sudo usermod -aG docker $USER
|
||||
|
||||
# Log out and back in for group changes
|
||||
exit
|
||||
# SSH back in
|
||||
```
|
||||
|
||||
### Step 3: Install NVIDIA Container Toolkit
|
||||
|
||||
```bash
|
||||
# Add NVIDIA repository
|
||||
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | \
|
||||
sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
|
||||
|
||||
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
|
||||
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
|
||||
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
||||
|
||||
# Install toolkit
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y nvidia-container-toolkit
|
||||
|
||||
# Configure Docker runtime
|
||||
sudo nvidia-ctk runtime configure --runtime=docker
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
### Step 4: Clone Repository and Configure
|
||||
|
||||
```bash
|
||||
git clone https://github.com/monadical-sas/reflector.git
|
||||
cd reflector/gpu/self_hosted
|
||||
|
||||
# Create environment file
|
||||
cat > .env << EOF
|
||||
REFLECTOR_GPU_APIKEY=$(openssl rand -hex 16)
|
||||
HF_TOKEN=your_huggingface_token_here
|
||||
EOF
|
||||
|
||||
# Note the generated API key - you'll need it for main server config
|
||||
cat .env
|
||||
```
|
||||
|
||||
### Step 5: Create Docker Compose File
|
||||
|
||||
```bash
|
||||
cat > compose.yml << 'EOF'
|
||||
services:
|
||||
reflector_gpu:
|
||||
build:
|
||||
context: .
|
||||
ports:
|
||||
- "8000:8000"
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./cache:/root/.cache
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
restart: unless-stopped
|
||||
EOF
|
||||
```
|
||||
|
||||
### Step 6: Build and Start
|
||||
|
||||
```bash
|
||||
# Build image (takes ~5 minutes, downloads ~10GB)
|
||||
sudo docker compose build
|
||||
|
||||
# Start service
|
||||
sudo docker compose up -d
|
||||
|
||||
# Wait for startup and verify
|
||||
sleep 30
|
||||
sudo docker compose logs
|
||||
```
|
||||
|
||||
Look for: `INFO: Application startup complete. Uvicorn running on http://0.0.0.0:8000`
|
||||
|
||||
### Step 7: Verify GPU Access
|
||||
|
||||
```bash
|
||||
# Check GPU is accessible from container
|
||||
sudo docker exec $(sudo docker ps -q) nvidia-smi
|
||||
```
|
||||
|
||||
Should show GPU with ~3GB VRAM used (models loaded).
|
||||
|
||||
---
|
||||
|
||||
## Systemd Deployment
|
||||
|
||||
### Step 1: Install NVIDIA Driver
|
||||
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install -y nvidia-driver-535
|
||||
|
||||
# Load kernel modules
|
||||
sudo modprobe nvidia
|
||||
|
||||
# Verify installation
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
### Step 2: Install Dependencies
|
||||
|
||||
```bash
|
||||
# Install ffmpeg
|
||||
sudo apt install -y ffmpeg
|
||||
|
||||
# Install uv package manager
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
source ~/.local/bin/env
|
||||
|
||||
# Clone repository
|
||||
git clone https://github.com/monadical-sas/reflector.git
|
||||
cd reflector/gpu/self_hosted
|
||||
```
|
||||
|
||||
### Step 3: Configure Environment
|
||||
|
||||
```bash
|
||||
# Create environment file
|
||||
cat > .env << EOF
|
||||
REFLECTOR_GPU_APIKEY=$(openssl rand -hex 16)
|
||||
HF_TOKEN=your_huggingface_token_here
|
||||
EOF
|
||||
|
||||
# Note the generated API key
|
||||
cat .env
|
||||
```
|
||||
|
||||
### Step 4: Install Python Packages
|
||||
|
||||
```bash
|
||||
# Install dependencies (~3GB download)
|
||||
uv sync
|
||||
```
|
||||
|
||||
### Step 5: Create Systemd Service
|
||||
|
||||
```bash
|
||||
# Generate library paths for NVIDIA packages
|
||||
export NVIDIA_LIBS=$(find ~/reflector/gpu/self_hosted/.venv/lib/python3.12/site-packages/nvidia -name lib -type d | tr '\n' ':')
|
||||
|
||||
# Load environment variables
|
||||
source ~/reflector/gpu/self_hosted/.env
|
||||
|
||||
# Create service file
|
||||
sudo tee /etc/systemd/system/reflector-gpu.service << EOFSVC
|
||||
[Unit]
|
||||
Description=Reflector GPU Service (Transcription & Diarization)
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=$USER
|
||||
WorkingDirectory=$HOME/reflector/gpu/self_hosted
|
||||
Environment="PATH=$HOME/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
Environment="HF_TOKEN=${HF_TOKEN}"
|
||||
Environment="REFLECTOR_GPU_APIKEY=${REFLECTOR_GPU_APIKEY}"
|
||||
Environment="LD_LIBRARY_PATH=${NVIDIA_LIBS}"
|
||||
ExecStart=$HOME/reflector/gpu/self_hosted/.venv/bin/uvicorn main:app --host 0.0.0.0 --port 8000
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOFSVC
|
||||
|
||||
# Enable and start
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable reflector-gpu
|
||||
sudo systemctl start reflector-gpu
|
||||
```
|
||||
|
||||
### Step 6: Verify Service
|
||||
|
||||
```bash
|
||||
# Check status
|
||||
sudo systemctl status reflector-gpu
|
||||
|
||||
# View logs
|
||||
sudo journalctl -u reflector-gpu -f
|
||||
```
|
||||
|
||||
Look for: `INFO: Application startup complete.`
|
||||
|
||||
---
|
||||
|
||||
## Configure HTTPS with Caddy
|
||||
|
||||
Both deployment methods need HTTPS for production. Caddy handles SSL automatically.
|
||||
|
||||
### Install Caddy
|
||||
|
||||
```bash
|
||||
sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https curl
|
||||
|
||||
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | \
|
||||
sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
|
||||
|
||||
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | \
|
||||
sudo tee /etc/apt/sources.list.d/caddy-stable.list
|
||||
|
||||
sudo apt update
|
||||
sudo apt install -y caddy
|
||||
```
|
||||
|
||||
### Configure Reverse Proxy
|
||||
|
||||
```bash
|
||||
sudo tee /etc/caddy/Caddyfile << 'EOF'
|
||||
gpu.example.com {
|
||||
reverse_proxy localhost:8000
|
||||
}
|
||||
EOF
|
||||
|
||||
# Reload Caddy (auto-provisions SSL certificate)
|
||||
sudo systemctl reload caddy
|
||||
```
|
||||
|
||||
Replace `gpu.example.com` with your domain.
|
||||
|
||||
### Verify HTTPS
|
||||
|
||||
```bash
|
||||
curl -I https://gpu.example.com/docs
|
||||
# Should return HTTP/2 200
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configure Main Reflector Server
|
||||
|
||||
On your main Reflector server, update `server/.env`:
|
||||
|
||||
```env
|
||||
# GPU Processing - Self-hosted
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://gpu.example.com
|
||||
TRANSCRIPT_MODAL_API_KEY=<your-generated-api-key>
|
||||
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=https://gpu.example.com
|
||||
DIARIZATION_MODAL_API_KEY=<your-generated-api-key>
|
||||
```
|
||||
|
||||
**Note:** The backend type is `modal` because the self-hosted GPU service implements the same API contract as Modal.com. This allows you to switch between cloud and self-hosted GPU processing by only changing the URL and API key.
|
||||
|
||||
Restart services to apply:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.prod.yml restart server worker
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Service Management
|
||||
|
||||
All commands in this section assume you're in `~/reflector/gpu/self_hosted/`.
|
||||
|
||||
### Docker
|
||||
|
||||
```bash
|
||||
# View logs
|
||||
sudo docker compose logs -f
|
||||
|
||||
# Restart service
|
||||
sudo docker compose restart
|
||||
|
||||
# Stop service
|
||||
sudo docker compose down
|
||||
|
||||
# Check status
|
||||
sudo docker compose ps
|
||||
```
|
||||
|
||||
### Systemd
|
||||
|
||||
```bash
|
||||
# View logs
|
||||
sudo journalctl -u reflector-gpu -f
|
||||
|
||||
# Restart service
|
||||
sudo systemctl restart reflector-gpu
|
||||
|
||||
# Stop service
|
||||
sudo systemctl stop reflector-gpu
|
||||
|
||||
# Check status
|
||||
sudo systemctl status reflector-gpu
|
||||
```
|
||||
|
||||
### Monitor GPU
|
||||
|
||||
```bash
|
||||
# Check GPU usage
|
||||
nvidia-smi
|
||||
|
||||
# Watch in real-time
|
||||
watch -n 1 nvidia-smi
|
||||
```
|
||||
|
||||
**Typical GPU memory usage:**
|
||||
- Idle (models loaded): ~3GB VRAM
|
||||
- During transcription: ~4-5GB VRAM
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### nvidia-smi fails after driver install
|
||||
|
||||
```bash
|
||||
# Manually load kernel modules
|
||||
sudo modprobe nvidia
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
### Service fails with "Could not download pyannote pipeline"
|
||||
|
||||
1. Verify HF_TOKEN is valid: `echo $HF_TOKEN`
|
||||
2. Check model access at https://huggingface.co/pyannote/speaker-diarization-3.1
|
||||
3. Regenerate service/compose with correct token
|
||||
4. Restart service
|
||||
|
||||
### cuDNN library loading errors (Systemd only)
|
||||
|
||||
Symptom: `Unable to load libcudnn_cnn.so`
|
||||
|
||||
Regenerate the systemd service file - the `LD_LIBRARY_PATH` must include all NVIDIA package directories.
|
||||
|
||||
### Cannot connect to HTTPS endpoint
|
||||
|
||||
1. Verify DNS resolves: `dig +short gpu.example.com`
|
||||
2. Check firewall: `sudo ufw status` (ports 80, 443 must be open)
|
||||
3. Check Caddy: `sudo systemctl status caddy`
|
||||
4. View Caddy logs: `sudo journalctl -u caddy -n 50`
|
||||
|
||||
### SSL certificate not provisioning
|
||||
|
||||
Requirements for Let's Encrypt:
|
||||
- Ports 80 and 443 publicly accessible
|
||||
- DNS resolves to server's public IP
|
||||
- Valid domain (not localhost or private IP)
|
||||
|
||||
### Docker container won't start
|
||||
|
||||
```bash
|
||||
# Check logs
|
||||
sudo docker compose logs
|
||||
|
||||
# Common issues:
|
||||
# - Port 8000 already in use
|
||||
# - GPU not accessible (nvidia-ctk not configured)
|
||||
# - Missing .env file
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Updating
|
||||
|
||||
### Docker
|
||||
|
||||
```bash
|
||||
cd ~/reflector/gpu/self_hosted
|
||||
git pull
|
||||
sudo docker compose build
|
||||
sudo docker compose up -d
|
||||
```
|
||||
|
||||
### Systemd
|
||||
|
||||
```bash
|
||||
cd ~/reflector/gpu/self_hosted
|
||||
git pull
|
||||
uv sync
|
||||
sudo systemctl restart reflector-gpu
|
||||
```
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: whereby setup
|
||||
---
|
||||
|
||||
# whereby setup
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: zulip setup
|
||||
---
|
||||
|
||||
# zulip setup
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,61 +0,0 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
title: Introduction
|
||||
---
|
||||
|
||||
# Welcome to Reflector
|
||||
|
||||
Reflector is a privacy-focused, self-hosted AI-powered audio transcription and meeting analysis platform that provides real-time transcription, speaker diarization, translation, and summarization for audio content and live meetings. With complete control over your data and infrastructure, you can run models on your own hardware (roadmap - currently supports Modal.com for GPU processing).
|
||||
|
||||
## What is Reflector?
|
||||
|
||||
Reflector is a web application that utilizes AI to process audio content, providing:
|
||||
|
||||
- **Real-time Transcription**: Convert speech to text using [Whisper](https://github.com/openai/whisper) (multi-language) or [Parakeet](https://github.com/NVIDIA/NeMo) (English) models
|
||||
- **Speaker Diarization**: Identify and label different speakers using [Pyannote](https://github.com/pyannote/pyannote-audio) 3.1
|
||||
- **Live Translation**: Translate audio content in real-time to 100+ languages with [Facebook Seamless-M4T](https://github.com/facebookresearch/seamless_communication)
|
||||
- **Topic Detection & Summarization**: Extract key topics and generate concise summaries using LLMs
|
||||
- **Meeting Recording**: Create permanent records of meetings with searchable transcripts
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Public Mode | Private Mode |
|
||||
|---------|------------|--------------|
|
||||
| **Authentication** | None required | Required |
|
||||
| **Audio Upload** | ✅ | ✅ |
|
||||
| **Live Microphone Streaming** | ✅ | ✅ |
|
||||
| **Transcription** | ✅ | ✅ |
|
||||
| **Speaker Diarization** | ✅ | ✅ |
|
||||
| **Translation** | ✅ | ✅ |
|
||||
| **Topic Detection** | ✅ | ✅ |
|
||||
| **Summarization** | ✅ | ✅ |
|
||||
| **Virtual Meeting Rooms (Whereby)** | ❌ | ✅ |
|
||||
| **Browse Transcripts Page** | ❌ | ✅ |
|
||||
| **Search Functionality** | ❌ | ✅ |
|
||||
| **Persistent Storage** | ❌ | ✅ |
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
Reflector consists of three main components:
|
||||
|
||||
- **Frontend**: React application built with Next.js 14
|
||||
- **Backend**: Python server using FastAPI
|
||||
- **Processing**: Scalable GPU workers for ML inference (Modal.com or local)
|
||||
|
||||
## Getting Started
|
||||
|
||||
Ready to deploy Reflector? Head over to our [Installation Guide](./installation/overview) to set up your own instance.
|
||||
|
||||
For a quick overview of how Reflector processes audio, check out our [Pipeline Documentation](./pipelines/overview).
|
||||
|
||||
## Open Source
|
||||
|
||||
Reflector is open source software developed by [Monadical](https://monadical.com) and licensed under the **MIT License**. We welcome contributions from the community!
|
||||
|
||||
- [GitHub Repository](https://github.com/monadical-sas/reflector)
|
||||
- [Issue Tracker](https://github.com/monadical-sas/reflector/issues)
|
||||
- [Pull Requests](https://github.com/monadical-sas/reflector/pulls)
|
||||
|
||||
## Support
|
||||
|
||||
Need help? Reach out to the community through GitHub Discussions.
|
||||
@@ -1,343 +0,0 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
title: File Processing Pipeline
|
||||
---
|
||||
|
||||
# File Processing Pipeline
|
||||
|
||||
The file processing pipeline handles uploaded audio files, optimizing for accuracy and throughput.
|
||||
|
||||
## Pipeline Stages
|
||||
|
||||
### 1. Input Stage
|
||||
|
||||
**Accepted Formats:**
|
||||
- MP3 (most common)
|
||||
- WAV (uncompressed)
|
||||
- M4A (Apple format)
|
||||
- WebM (browser recordings)
|
||||
- MP4 (video with audio track)
|
||||
|
||||
**File Validation:**
|
||||
- Maximum size: 2GB (configurable)
|
||||
- Minimum duration: 5 seconds
|
||||
- Maximum duration: 6 hours
|
||||
- Sample rate: Any (will be resampled)
|
||||
|
||||
### 2. Pre-processing
|
||||
|
||||
**Audio Normalization:**
|
||||
```yaml
|
||||
# Convert to standard format
|
||||
- Sample rate: 16kHz (Whisper requirement)
|
||||
- Channels: Mono
|
||||
- Bit depth: 16-bit
|
||||
- Format: WAV
|
||||
```
|
||||
|
||||
**Noise Reduction (Optional):**
|
||||
- Background noise removal
|
||||
- Echo cancellation
|
||||
- High-pass filter for rumble
|
||||
|
||||
### 3. Chunking Strategy
|
||||
|
||||
**Default Configuration:**
|
||||
```yaml
|
||||
chunk_size: 30 # seconds
|
||||
overlap: 1 # seconds
|
||||
max_parallel: 10
|
||||
silence_detection: true
|
||||
```
|
||||
|
||||
**Chunking with Silence Detection:**
|
||||
- Detects silence periods
|
||||
- Attempts to break at natural pauses
|
||||
- Maintains context with overlap
|
||||
- Preserves sentence boundaries
|
||||
|
||||
**Chunk Metadata:**
|
||||
```json
|
||||
{
|
||||
"chunk_id": "chunk_001",
|
||||
"start_time": 0.0,
|
||||
"end_time": 30.0,
|
||||
"duration": 30.0,
|
||||
"has_speech": true,
|
||||
"audio_hash": "sha256:..."
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Transcription Processing
|
||||
|
||||
**Whisper Models:**
|
||||
|
||||
| Model | Size | Speed | Accuracy | Use Case |
|
||||
|-------|------|-------|----------|----------|
|
||||
| tiny | 39M | Very Fast | 85% | Quick drafts |
|
||||
| base | 74M | Fast | 89% | Good balance |
|
||||
| small | 244M | Medium | 91% | Better accuracy |
|
||||
| medium | 769M | Slow | 93% | High quality |
|
||||
| large-v3 | 1550M | Very Slow | 96% | Best quality |
|
||||
|
||||
**Processing Configuration:**
|
||||
```python
|
||||
transcription_config = {
|
||||
"model": "whisper-base",
|
||||
"language": "auto", # or specify: "en", "es", etc.
|
||||
"task": "transcribe", # or "translate"
|
||||
"temperature": 0, # deterministic
|
||||
"compression_ratio_threshold": 2.4,
|
||||
"no_speech_threshold": 0.6,
|
||||
"condition_on_previous_text": True,
|
||||
"initial_prompt": None, # optional context
|
||||
}
|
||||
```
|
||||
|
||||
**Parallel Processing:**
|
||||
- Each chunk processed independently
|
||||
- GPU batching for efficiency
|
||||
- Automatic load balancing
|
||||
- Failure isolation
|
||||
|
||||
### 5. Diarization (Speaker Identification)
|
||||
|
||||
**Pyannote 3.1 Pipeline:**
|
||||
|
||||
1. **Voice Activity Detection (VAD)**
|
||||
- Identifies speech segments
|
||||
- Filters out silence and noise
|
||||
- Precision: 95%+
|
||||
|
||||
2. **Speaker Embedding**
|
||||
- Extracts voice characteristics
|
||||
- 256-dimensional vectors
|
||||
- Speaker-invariant features
|
||||
|
||||
3. **Clustering**
|
||||
- Groups similar voice embeddings
|
||||
- Agglomerative clustering
|
||||
- Automatic speaker count detection
|
||||
|
||||
4. **Segmentation**
|
||||
- Assigns speaker labels to time segments
|
||||
- Handles overlapping speech
|
||||
- Minimum segment duration: 0.5s
|
||||
|
||||
**Configuration:**
|
||||
```python
|
||||
diarization_config = {
|
||||
"min_speakers": 1,
|
||||
"max_speakers": 10,
|
||||
"min_duration": 0.5,
|
||||
"clustering": "AgglomerativeClustering",
|
||||
"embedding_model": "speechbrain/spkrec-ecapa-voxceleb",
|
||||
}
|
||||
```
|
||||
|
||||
### 6. Alignment & Merging
|
||||
|
||||
**Chunk Assembly:**
|
||||
```python
|
||||
# Merge overlapping segments
|
||||
for chunk in chunks:
|
||||
# Remove overlap duplicates
|
||||
if chunk.start < previous.end:
|
||||
chunk.text = resolve_overlap(previous, chunk)
|
||||
|
||||
# Maintain timeline
|
||||
merged_transcript.append(chunk)
|
||||
```
|
||||
|
||||
**Speaker Alignment:**
|
||||
- Map diarization timeline to transcript
|
||||
- Resolve speaker changes mid-sentence
|
||||
- Handle multiple speakers per segment
|
||||
|
||||
**Quality Checks:**
|
||||
- Timeline consistency
|
||||
- No gaps in transcript
|
||||
- Speaker label continuity
|
||||
- Confidence score validation
|
||||
|
||||
### 7. Post-processing Chain
|
||||
|
||||
**Text Formatting:**
|
||||
- Sentence capitalization
|
||||
- Punctuation restoration
|
||||
- Number formatting
|
||||
- Acronym detection
|
||||
|
||||
**Translation (Optional):**
|
||||
```python
|
||||
translation_config = {
|
||||
"model": "facebook/seamless-m4t-medium",
|
||||
"source_lang": "auto",
|
||||
"target_langs": ["es", "fr", "de"],
|
||||
"preserve_formatting": True
|
||||
}
|
||||
```
|
||||
|
||||
**Topic Detection:**
|
||||
- LLM-based analysis
|
||||
- Extract 3-5 key topics
|
||||
- Keyword extraction
|
||||
- Entity recognition
|
||||
|
||||
**Summarization:**
|
||||
```python
|
||||
summary_config = {
|
||||
"model": "openai-compatible",
|
||||
"max_length": 500,
|
||||
"style": "bullets", # or "paragraph"
|
||||
"include_action_items": True,
|
||||
"include_decisions": True
|
||||
}
|
||||
```
|
||||
|
||||
### 8. Storage & Delivery
|
||||
|
||||
**Database Storage:**
|
||||
```sql
|
||||
-- Main transcript record
|
||||
INSERT INTO transcripts (
|
||||
id, title, duration, language,
|
||||
transcript_text, transcript_json,
|
||||
speakers, topics, summary,
|
||||
created_at, processing_time
|
||||
) VALUES (...);
|
||||
|
||||
-- Processing metadata
|
||||
INSERT INTO processing_metadata (
|
||||
transcript_id, model_versions,
|
||||
chunk_count, total_chunks,
|
||||
error_count, warnings
|
||||
) VALUES (...);
|
||||
```
|
||||
|
||||
**File Storage:**
|
||||
- Original audio: S3 (optional)
|
||||
- Processed chunks: Temporary (24h)
|
||||
- Transcript exports: JSON, SRT, VTT, TXT
|
||||
|
||||
**Notification:**
|
||||
```json
|
||||
{
|
||||
"type": "webhook",
|
||||
"url": "https://your-app.com/webhook",
|
||||
"payload": {
|
||||
"transcript_id": "...",
|
||||
"status": "completed",
|
||||
"duration": 3600,
|
||||
"processing_time": 180
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Processing Times
|
||||
|
||||
**Estimated times for 1 hour of audio:**
|
||||
|
||||
| Component | Fast Mode | Balanced | High Quality |
|
||||
|-----------|-----------|----------|--------------|
|
||||
| Pre-processing | 10s | 10s | 10s |
|
||||
| Transcription | 60s | 180s | 600s |
|
||||
| Diarization | 30s | 60s | 120s |
|
||||
| Post-processing | 20s | 30s | 60s |
|
||||
| **Total** | **2 min** | **5 min** | **13 min** |
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Retry Strategy
|
||||
|
||||
```python
|
||||
@celery.task(
|
||||
bind=True,
|
||||
max_retries=3,
|
||||
default_retry_delay=60,
|
||||
retry_backoff=True
|
||||
)
|
||||
def process_chunk(self, chunk_id):
|
||||
try:
|
||||
# Process chunk
|
||||
result = transcribe(chunk_id)
|
||||
except Exception as exc:
|
||||
# Exponential backoff
|
||||
raise self.retry(exc=exc)
|
||||
```
|
||||
|
||||
### Partial Recovery
|
||||
|
||||
- Continue with successful chunks
|
||||
- Mark failed chunks in output
|
||||
- Provide partial transcript
|
||||
- Report processing issues
|
||||
|
||||
### Fallback Options
|
||||
|
||||
1. **Model Fallback:**
|
||||
- If large model fails, try medium
|
||||
- If GPU fails, try CPU
|
||||
- If Modal fails, try local
|
||||
|
||||
2. **Quality Degradation:**
|
||||
- Reduce chunk size
|
||||
- Disable post-processing
|
||||
- Skip diarization if needed
|
||||
|
||||
## Optimization Tips
|
||||
|
||||
### For Speed
|
||||
|
||||
1. Use smaller models (tiny/base)
|
||||
2. Increase parallel chunks
|
||||
3. Disable diarization
|
||||
4. Skip post-processing
|
||||
5. Use GPU acceleration
|
||||
|
||||
### For Accuracy
|
||||
|
||||
1. Use larger models (medium/large)
|
||||
2. Enable all pre-processing
|
||||
3. Reduce chunk size
|
||||
4. Enable silence detection
|
||||
5. Multiple pass processing
|
||||
|
||||
### For Cost
|
||||
|
||||
1. Use Modal spot instances
|
||||
2. Batch multiple files
|
||||
3. Cache common phrases
|
||||
4. Optimize chunk size
|
||||
5. Selective post-processing
|
||||
|
||||
## Monitoring
|
||||
|
||||
### Metrics to Track
|
||||
|
||||
```python
|
||||
metrics = {
|
||||
"processing_time": histogram,
|
||||
"chunk_success_rate": gauge,
|
||||
"model_accuracy": histogram,
|
||||
"queue_depth": gauge,
|
||||
"gpu_utilization": gauge,
|
||||
"cost_per_hour": counter
|
||||
}
|
||||
```
|
||||
|
||||
### Quality Metrics
|
||||
|
||||
- Word Error Rate (WER)
|
||||
- Diarization Error Rate (DER)
|
||||
- Confidence scores
|
||||
- Processing speed
|
||||
- User feedback
|
||||
|
||||
### Alerts
|
||||
|
||||
- Processing time > 30 minutes
|
||||
- Error rate > 5%
|
||||
- Queue depth > 100
|
||||
- GPU memory > 90%
|
||||
- Cost spike detected
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: Live pipeline
|
||||
---
|
||||
|
||||
# Live pipeline
|
||||
|
||||
Documentation coming soon.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: overview
|
||||
---
|
||||
|
||||
# overview
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,441 +0,0 @@
|
||||
---
|
||||
title: API Reference
|
||||
---
|
||||
|
||||
# API Reference
|
||||
|
||||
The Reflector API provides a comprehensive RESTful interface for audio transcription, meeting management, and real-time streaming capabilities.
|
||||
|
||||
## Base URL
|
||||
|
||||
```
|
||||
http://localhost:8000/v1
|
||||
```
|
||||
|
||||
All API endpoints are prefixed with `/v1/` for versioning.
|
||||
|
||||
## Authentication
|
||||
|
||||
Reflector supports multiple authentication modes:
|
||||
|
||||
- **No Authentication** (Public Mode): Basic transcription and upload functionality
|
||||
- **JWT Authentication** (Private Mode): Full feature access including meeting rooms and persistent storage
|
||||
- **OAuth/OIDC via Authentik**: Enterprise single sign-on integration
|
||||
|
||||
## Core Endpoints
|
||||
|
||||
### Transcripts
|
||||
|
||||
Manage audio transcriptions and their associated metadata.
|
||||
|
||||
#### List Transcripts
|
||||
```http
|
||||
GET /v1/transcripts/
|
||||
```
|
||||
|
||||
Returns a paginated list of transcripts with filtering options.
|
||||
|
||||
#### Create Transcript
|
||||
```http
|
||||
POST /v1/transcripts/
|
||||
```
|
||||
|
||||
Create a new transcript from uploaded audio or initialize for streaming.
|
||||
|
||||
#### Get Transcript
|
||||
```http
|
||||
GET /v1/transcripts/{transcript_id}
|
||||
```
|
||||
|
||||
Retrieve detailed information about a specific transcript.
|
||||
|
||||
#### Update Transcript
|
||||
```http
|
||||
PATCH /v1/transcripts/{transcript_id}
|
||||
```
|
||||
|
||||
Update transcript metadata, summary, or processing status.
|
||||
|
||||
#### Delete Transcript
|
||||
```http
|
||||
DELETE /v1/transcripts/{transcript_id}
|
||||
```
|
||||
|
||||
Remove a transcript and its associated data.
|
||||
|
||||
### Audio Processing
|
||||
|
||||
#### Upload Audio
|
||||
```http
|
||||
POST /v1/transcripts_audio/{transcript_id}/upload
|
||||
```
|
||||
|
||||
Upload an audio file for transcription processing.
|
||||
|
||||
**Supported formats:**
|
||||
- WAV, MP3, M4A, FLAC, OGG
|
||||
- Maximum file size: 500MB
|
||||
- Sample rates: 8kHz - 48kHz
|
||||
|
||||
#### Download Audio
|
||||
```http
|
||||
GET /v1/transcripts_audio/{transcript_id}/download
|
||||
```
|
||||
|
||||
Download the original or processed audio file.
|
||||
|
||||
#### Stream Audio
|
||||
```http
|
||||
GET /v1/transcripts_audio/{transcript_id}/stream
|
||||
```
|
||||
|
||||
Stream audio content with range support for progressive playback.
|
||||
|
||||
### WebRTC Streaming
|
||||
|
||||
Real-time audio streaming via WebRTC for live transcription.
|
||||
|
||||
#### Initialize WebRTC Session
|
||||
```http
|
||||
POST /v1/transcripts_webrtc/{transcript_id}/offer
|
||||
```
|
||||
|
||||
Create a WebRTC offer for establishing a peer connection.
|
||||
|
||||
#### Complete WebRTC Handshake
|
||||
```http
|
||||
POST /v1/transcripts_webrtc/{transcript_id}/answer
|
||||
```
|
||||
|
||||
Submit the WebRTC answer to complete connection setup.
|
||||
|
||||
### WebSocket Streaming
|
||||
|
||||
Real-time updates and live transcription via WebSocket.
|
||||
|
||||
#### WebSocket Endpoint
|
||||
```ws
|
||||
ws://localhost:8000/v1/transcripts_websocket/{transcript_id}
|
||||
```
|
||||
|
||||
Receive real-time transcription updates, speaker changes, and processing status.
|
||||
|
||||
**Message Types:**
|
||||
- `transcription`: New transcribed text segments
|
||||
- `diarization`: Speaker identification updates
|
||||
- `status`: Processing status changes
|
||||
- `error`: Error notifications
|
||||
|
||||
### Meetings
|
||||
|
||||
Manage virtual meeting rooms and recordings.
|
||||
|
||||
#### List Meetings
|
||||
```http
|
||||
GET /v1/meetings/
|
||||
```
|
||||
|
||||
Get all meetings for the authenticated user.
|
||||
|
||||
#### Create Meeting
|
||||
```http
|
||||
POST /v1/meetings/
|
||||
```
|
||||
|
||||
Initialize a new meeting room with Whereby integration.
|
||||
|
||||
#### Join Meeting
|
||||
```http
|
||||
POST /v1/meetings/{meeting_id}/join
|
||||
```
|
||||
|
||||
Join an existing meeting and start recording.
|
||||
|
||||
#### End Meeting
|
||||
```http
|
||||
POST /v1/meetings/{meeting_id}/end
|
||||
```
|
||||
|
||||
End the meeting and finalize the recording.
|
||||
|
||||
### Rooms
|
||||
|
||||
Virtual meeting room configuration and management.
|
||||
|
||||
#### List Rooms
|
||||
```http
|
||||
GET /v1/rooms/
|
||||
```
|
||||
|
||||
Get available meeting rooms.
|
||||
|
||||
#### Create Room
|
||||
```http
|
||||
POST /v1/rooms/
|
||||
```
|
||||
|
||||
Create a new persistent meeting room.
|
||||
|
||||
#### Update Room Settings
|
||||
```http
|
||||
PATCH /v1/rooms/{room_id}
|
||||
```
|
||||
|
||||
Modify room configuration and permissions.
|
||||
|
||||
## Response Formats
|
||||
|
||||
### Success Response
|
||||
```json
|
||||
{
|
||||
"id": "uuid",
|
||||
"created_at": "2025-01-20T10:00:00Z",
|
||||
"updated_at": "2025-01-20T10:30:00Z",
|
||||
"data": {...}
|
||||
}
|
||||
```
|
||||
|
||||
### Error Response
|
||||
```json
|
||||
{
|
||||
"error": {
|
||||
"code": "ERROR_CODE",
|
||||
"message": "Human-readable error message",
|
||||
"details": {...}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Status Codes
|
||||
|
||||
- `200 OK`: Successful request
|
||||
- `201 Created`: Resource created successfully
|
||||
- `204 No Content`: Successful deletion
|
||||
- `400 Bad Request`: Invalid request parameters
|
||||
- `401 Unauthorized`: Authentication required
|
||||
- `403 Forbidden`: Insufficient permissions
|
||||
- `404 Not Found`: Resource not found
|
||||
- `409 Conflict`: Resource conflict
|
||||
- `422 Unprocessable Entity`: Validation error
|
||||
- `429 Too Many Requests`: Rate limit exceeded
|
||||
- `500 Internal Server Error`: Server error
|
||||
|
||||
## WebSocket Protocol
|
||||
|
||||
The WebSocket connection provides real-time updates during transcription processing. The server sends structured messages to communicate different events and data updates.
|
||||
|
||||
### Connection
|
||||
```javascript
|
||||
const ws = new WebSocket('ws://localhost:8000/v1/transcripts_websocket/{transcript_id}');
|
||||
```
|
||||
|
||||
### Message Types and Formats
|
||||
|
||||
#### Transcription Update
|
||||
Sent when new text is transcribed from the audio stream.
|
||||
```json
|
||||
{
|
||||
"type": "transcription",
|
||||
"data": {
|
||||
"text": "The transcribed text segment",
|
||||
"speaker": "Speaker 1",
|
||||
"timestamp": 1705745623.456,
|
||||
"confidence": 0.95,
|
||||
"segment_id": "seg_001",
|
||||
"is_final": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Diarization Update
|
||||
Sent when speaker changes are detected or speaker labels are updated.
|
||||
```json
|
||||
{
|
||||
"type": "diarization",
|
||||
"data": {
|
||||
"speaker": "Speaker 2",
|
||||
"speaker_id": "spk_002",
|
||||
"start_time": 1705745620.123,
|
||||
"end_time": 1705745625.456,
|
||||
"confidence": 0.87
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Processing Status
|
||||
Sent to indicate changes in the processing pipeline status.
|
||||
```json
|
||||
{
|
||||
"type": "status",
|
||||
"data": {
|
||||
"status": "processing",
|
||||
"stage": "transcription",
|
||||
"progress": 45.5,
|
||||
"message": "Processing audio chunk 12 of 26"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Status values:
|
||||
- `initializing`: Setting up processing pipeline
|
||||
- `processing`: Active transcription/diarization
|
||||
- `completed`: Processing finished successfully
|
||||
- `failed`: Processing encountered an error
|
||||
- `paused`: Processing temporarily suspended
|
||||
|
||||
#### Summary Update
|
||||
Sent when AI-generated summaries or topics are available.
|
||||
```json
|
||||
{
|
||||
"type": "summary",
|
||||
"data": {
|
||||
"summary": "Brief summary of the conversation",
|
||||
"topics": ["topic1", "topic2", "topic3"],
|
||||
"action_items": ["action 1", "action 2"],
|
||||
"key_points": ["point 1", "point 2"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Error Messages
|
||||
Sent when errors occur during processing.
|
||||
```json
|
||||
{
|
||||
"type": "error",
|
||||
"data": {
|
||||
"code": "AUDIO_FORMAT_ERROR",
|
||||
"message": "Unsupported audio format",
|
||||
"details": {
|
||||
"format": "unknown",
|
||||
"sample_rate": 0
|
||||
},
|
||||
"recoverable": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Heartbeat/Keepalive
|
||||
Sent periodically to maintain the connection.
|
||||
```json
|
||||
{
|
||||
"type": "ping",
|
||||
"data": {
|
||||
"timestamp": 1705745630.000
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Client-to-Server Messages
|
||||
|
||||
Clients can send control messages to the server:
|
||||
|
||||
#### Start/Resume Processing
|
||||
```json
|
||||
{
|
||||
"action": "start",
|
||||
"params": {}
|
||||
}
|
||||
```
|
||||
|
||||
#### Pause Processing
|
||||
```json
|
||||
{
|
||||
"action": "pause",
|
||||
"params": {}
|
||||
}
|
||||
```
|
||||
|
||||
#### Request Status
|
||||
```json
|
||||
{
|
||||
"action": "get_status",
|
||||
"params": {}
|
||||
}
|
||||
```
|
||||
|
||||
## OpenAPI Specification
|
||||
|
||||
The complete OpenAPI 3.0 specification is available at:
|
||||
|
||||
```
|
||||
http://localhost:8000/v1/openapi.json
|
||||
```
|
||||
|
||||
You can import this specification into tools like:
|
||||
- Postman
|
||||
- Insomnia
|
||||
- Swagger UI
|
||||
- OpenAPI Generator (for client SDK generation)
|
||||
|
||||
## SDK Support
|
||||
|
||||
While Reflector doesn't provide official SDKs, you can generate client libraries using the OpenAPI specification with tools like:
|
||||
|
||||
- **Python**: `openapi-python-client`
|
||||
- **TypeScript**: `openapi-typescript-codegen`
|
||||
- **Go**: `oapi-codegen`
|
||||
- **Java**: `openapi-generator`
|
||||
|
||||
## Example Usage
|
||||
|
||||
### Python Example
|
||||
```python
|
||||
import requests
|
||||
|
||||
# Upload and transcribe audio
|
||||
with open('meeting.mp3', 'rb') as f:
|
||||
response = requests.post(
|
||||
'http://localhost:8000/v1/transcripts/',
|
||||
files={'file': f}
|
||||
)
|
||||
transcript_id = response.json()['id']
|
||||
|
||||
# Check transcription status
|
||||
status = requests.get(
|
||||
f'http://localhost:8000/v1/transcripts/{transcript_id}'
|
||||
).json()
|
||||
|
||||
print(f"Transcription status: {status['status']}")
|
||||
```
|
||||
|
||||
### JavaScript WebSocket Example
|
||||
```javascript
|
||||
// Connect to WebSocket for real-time transcription updates
|
||||
const ws = new WebSocket(`ws://localhost:8000/v1/transcripts_websocket/${transcriptId}`);
|
||||
|
||||
ws.onopen = () => {
|
||||
console.log('Connected to transcription WebSocket');
|
||||
};
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
const message = JSON.parse(event.data);
|
||||
|
||||
switch(message.type) {
|
||||
case 'transcription':
|
||||
console.log(`[${message.data.speaker}]: ${message.data.text}`);
|
||||
break;
|
||||
case 'diarization':
|
||||
console.log(`Speaker change: ${message.data.speaker}`);
|
||||
break;
|
||||
case 'status':
|
||||
console.log(`Status: ${message.data.status}`);
|
||||
break;
|
||||
case 'error':
|
||||
console.error(`Error: ${message.data.message}`);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = (error) => {
|
||||
console.error('WebSocket error:', error);
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
console.log('WebSocket connection closed');
|
||||
};
|
||||
```
|
||||
|
||||
## Need Help?
|
||||
|
||||
- Review [example implementations](https://github.com/monadical-sas/reflector/tree/main/examples)
|
||||
- Open an issue on [GitHub](https://github.com/monadical-sas/reflector/issues)
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: overview
|
||||
---
|
||||
|
||||
# overview
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: backend
|
||||
---
|
||||
|
||||
# backend
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: database
|
||||
---
|
||||
|
||||
# database
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: frontend
|
||||
---
|
||||
|
||||
# frontend
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: overview
|
||||
---
|
||||
|
||||
# overview
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: workers
|
||||
---
|
||||
|
||||
# workers
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: configuration
|
||||
---
|
||||
|
||||
# configuration
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: analysis
|
||||
---
|
||||
|
||||
# analysis
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: diarization
|
||||
---
|
||||
|
||||
# diarization
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: transcription
|
||||
---
|
||||
|
||||
# transcription
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: translation
|
||||
---
|
||||
|
||||
# translation
|
||||
|
||||
Documentation coming soon. See [TODO.md](/docs/TODO) for required information.
|
||||
@@ -1,139 +0,0 @@
|
||||
---
|
||||
sidebar_position: 100
|
||||
title: Roadmap
|
||||
---
|
||||
|
||||
# Product Roadmap
|
||||
|
||||
Our development roadmap for Reflector, focusing on expanding capabilities while maintaining privacy and performance.
|
||||
|
||||
## Planned Features
|
||||
|
||||
### 🌍 Multi-Language Support Enhancement
|
||||
|
||||
**Current State:**
|
||||
- Whisper supports 99+ languages for transcription
|
||||
- Parakeet supports English only with high accuracy
|
||||
- Translation available to 100+ languages
|
||||
|
||||
**Planned Improvements:**
|
||||
- Default language selection per room/user
|
||||
- Automatic language detection improvements
|
||||
- Multi-language diarization support
|
||||
- RTL (Right-to-Left) language UI support
|
||||
- Language-specific post-processing rules
|
||||
|
||||
### 🏠 Self-Hosted Room Providers
|
||||
|
||||
**Jitsi Integration**
|
||||
|
||||
Moving beyond Whereby to support self-hosted video conferencing:
|
||||
|
||||
- No API keys required
|
||||
- Complete control over video infrastructure
|
||||
- Custom branding and configuration
|
||||
- Lower operational costs
|
||||
- Enhanced privacy with self-hosted video
|
||||
|
||||
**Implementation Plan:**
|
||||
- WebRTC bridge for Jitsi Meet
|
||||
- Room management API integration
|
||||
- Recording synchronization
|
||||
- Participant tracking
|
||||
|
||||
### 📅 Calendar Integration
|
||||
|
||||
**Planned Capabilities:**
|
||||
- Google Calendar synchronization
|
||||
- Microsoft Outlook integration
|
||||
- Automatic meeting room creation
|
||||
- Pre-meeting document preparation
|
||||
- Post-meeting transcript delivery
|
||||
- Recurring meeting support
|
||||
|
||||
**Features:**
|
||||
- Auto-join scheduled meetings
|
||||
- Calendar-based access control
|
||||
- Meeting agenda import
|
||||
- Action item export to calendar
|
||||
|
||||
### 🖥️ Self-Hosted GPU Service
|
||||
|
||||
**For organizations with dedicated GPU hardware (H100, A100, RTX 4090):**
|
||||
|
||||
**Docker GPU Worker Image:**
|
||||
- Self-contained processing service
|
||||
- CUDA 11/12 support
|
||||
- Pre-loaded models:
|
||||
- Whisper (all sizes)
|
||||
- Pyannote diarization
|
||||
- Seamless-M4T translation
|
||||
- Automatic model management
|
||||
|
||||
**Deployment Options:**
|
||||
- Kubernetes GPU operators
|
||||
- Docker Compose with nvidia-docker
|
||||
- Bare metal installation
|
||||
- Hybrid cloud/on-premise
|
||||
|
||||
**Benefits:**
|
||||
- No Modal.com dependency
|
||||
- Complete data isolation
|
||||
- Predictable costs
|
||||
- Maximum performance
|
||||
- Custom model support
|
||||
|
||||
## Future Considerations
|
||||
|
||||
### Enhanced Analytics
|
||||
- Meeting insights dashboard
|
||||
- Speaker participation metrics
|
||||
- Topic trends over time
|
||||
- Team collaboration patterns
|
||||
|
||||
### Advanced AI Features
|
||||
- Real-time sentiment analysis
|
||||
- Emotion detection
|
||||
- Meeting quality scores
|
||||
- Automated coaching suggestions
|
||||
|
||||
### Integration Ecosystem
|
||||
- Slack/Teams notifications
|
||||
- CRM integration (Salesforce, HubSpot)
|
||||
- Project management tools (Jira, Asana)
|
||||
- Knowledge bases (Notion, Confluence)
|
||||
|
||||
### Performance Improvements
|
||||
- WebAssembly for client-side processing
|
||||
- Edge computing support
|
||||
- 5G network optimization
|
||||
- Blockchain for transcript verification
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome community contributions! Areas where you can help:
|
||||
|
||||
1. **Language Support**: Add support for your language
|
||||
2. **Integrations**: Connect with your favorite tools
|
||||
3. **Models**: Fine-tune models for specific domains
|
||||
4. **Documentation**: Improve guides and examples
|
||||
|
||||
See our [Contributing Guide](https://github.com/monadical-sas/reflector/blob/main/CONTRIBUTING.md) for details.
|
||||
|
||||
## Timeline
|
||||
|
||||
We don't provide specific dates as development depends on community contributions and priorities. Features are generally released when they're ready and properly tested.
|
||||
|
||||
## Feature Requests
|
||||
|
||||
Have an idea for Reflector? We'd love to hear it!
|
||||
|
||||
- [Open a GitHub Issue](https://github.com/monadical-sas/reflector/issues/new)
|
||||
- [Join our Discord](#)
|
||||
- [Email us](mailto:reflector@monadical.com)
|
||||
|
||||
## Stay Updated
|
||||
|
||||
- Watch our [GitHub repository](https://github.com/monadical-sas/reflector)
|
||||
- Follow our [blog](#)
|
||||
- Subscribe to our [newsletter](#)
|
||||
@@ -1,163 +0,0 @@
|
||||
import {themes as prismThemes} from 'prism-react-renderer';
|
||||
import type {Config} from '@docusaurus/types';
|
||||
import type * as Preset from '@docusaurus/preset-classic';
|
||||
import type * as OpenApiPlugin from 'docusaurus-plugin-openapi-docs';
|
||||
|
||||
const config: Config = {
|
||||
title: 'Reflector',
|
||||
tagline: 'AI-powered audio transcription and meeting analysis platform',
|
||||
favicon: 'img/favicon.ico',
|
||||
|
||||
url: 'https://monadical-sas.github.io',
|
||||
baseUrl: '/',
|
||||
|
||||
organizationName: 'monadical-sas',
|
||||
projectName: 'reflector',
|
||||
|
||||
onBrokenLinks: 'throw',
|
||||
onBrokenMarkdownLinks: 'warn',
|
||||
|
||||
markdown: {
|
||||
mermaid: true,
|
||||
},
|
||||
|
||||
i18n: {
|
||||
defaultLocale: 'en',
|
||||
locales: ['en'],
|
||||
},
|
||||
|
||||
presets: [
|
||||
[
|
||||
'classic',
|
||||
{
|
||||
docs: {
|
||||
sidebarPath: './sidebars.ts',
|
||||
editUrl: 'https://github.com/monadical-sas/reflector/tree/main/docs/',
|
||||
},
|
||||
blog: false,
|
||||
theme: {
|
||||
customCss: './src/css/custom.css',
|
||||
},
|
||||
} satisfies Preset.Options,
|
||||
],
|
||||
],
|
||||
|
||||
plugins: [
|
||||
[
|
||||
'docusaurus-plugin-openapi-docs',
|
||||
{
|
||||
id: 'openapi',
|
||||
docsPluginId: 'classic',
|
||||
config: {
|
||||
reflectorapi: {
|
||||
specPath: 'static/openapi.json', // Use local file fetched by script
|
||||
outputDir: 'docs/reference/api-generated',
|
||||
sidebarOptions: {
|
||||
groupPathsBy: 'tag',
|
||||
categoryLinkSource: 'tag',
|
||||
},
|
||||
downloadUrl: '/openapi.json',
|
||||
hideSendButton: false,
|
||||
showExtensions: true,
|
||||
},
|
||||
} satisfies OpenApiPlugin.Options,
|
||||
},
|
||||
],
|
||||
],
|
||||
|
||||
themes: ['docusaurus-theme-openapi-docs', '@docusaurus/theme-mermaid'],
|
||||
|
||||
themeConfig: {
|
||||
image: 'img/reflector-social-card.jpg',
|
||||
colorMode: {
|
||||
defaultMode: 'light',
|
||||
disableSwitch: false,
|
||||
respectPrefersColorScheme: true,
|
||||
},
|
||||
navbar: {
|
||||
title: 'Reflector',
|
||||
logo: {
|
||||
alt: 'Reflector Logo',
|
||||
src: 'img/reflector-logo.svg',
|
||||
},
|
||||
items: [
|
||||
{
|
||||
type: 'docSidebar',
|
||||
sidebarId: 'tutorialSidebar',
|
||||
position: 'left',
|
||||
label: 'Documentation',
|
||||
},
|
||||
{
|
||||
to: '/docs/reference/api',
|
||||
label: 'API',
|
||||
position: 'left',
|
||||
},
|
||||
{
|
||||
href: 'https://github.com/monadical-sas/reflector',
|
||||
label: 'GitHub',
|
||||
position: 'right',
|
||||
},
|
||||
],
|
||||
},
|
||||
footer: {
|
||||
style: 'dark',
|
||||
links: [
|
||||
{
|
||||
title: 'Documentation',
|
||||
items: [
|
||||
{
|
||||
label: 'Introduction',
|
||||
to: '/docs/intro',
|
||||
},
|
||||
{
|
||||
label: 'Installation',
|
||||
to: '/docs/installation/overview',
|
||||
},
|
||||
{
|
||||
label: 'API Reference',
|
||||
to: '/docs/reference/api',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: 'Resources',
|
||||
items: [
|
||||
{
|
||||
label: 'Architecture',
|
||||
to: '/docs/reference/architecture/overview',
|
||||
},
|
||||
{
|
||||
label: 'Pipelines',
|
||||
to: '/docs/pipelines/overview',
|
||||
},
|
||||
{
|
||||
label: 'Roadmap',
|
||||
to: '/docs/roadmap',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
title: 'More',
|
||||
items: [
|
||||
{
|
||||
label: 'GitHub',
|
||||
href: 'https://github.com/monadical-sas/reflector',
|
||||
},
|
||||
{
|
||||
label: 'Docker Hub',
|
||||
href: 'https://hub.docker.com/r/reflector/backend',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
copyright: `Copyright © ${new Date().getFullYear()} <a href="https://monadical.com" target="_blank" rel="noopener noreferrer">Monadical</a>. Licensed under MIT. Built with Docusaurus.`,
|
||||
},
|
||||
prism: {
|
||||
theme: prismThemes.github,
|
||||
darkTheme: prismThemes.dracula,
|
||||
additionalLanguages: ['python', 'bash', 'docker', 'yaml'],
|
||||
},
|
||||
} satisfies Preset.ThemeConfig,
|
||||
};
|
||||
|
||||
export default config;
|
||||
369
docs/jitsi.md
Normal file
369
docs/jitsi.md
Normal file
@@ -0,0 +1,369 @@
|
||||
# Jitsi Integration for Reflector
|
||||
|
||||
This document contains research and planning notes for integrating Jitsi Meet as a replacement for Whereby in Reflector.
|
||||
|
||||
## Overview
|
||||
|
||||
Jitsi Meet is an open-source video conferencing solution that can replace Whereby in Reflector, providing:
|
||||
- Cost reduction (no per-minute charges)
|
||||
- Direct recording access via Jibri
|
||||
- Real-time event webhooks
|
||||
- Full customization and control
|
||||
|
||||
## Current Whereby Integration Analysis
|
||||
|
||||
### Architecture
|
||||
1. **Room Creation**: User creates a "room" template in Reflector DB with settings
|
||||
2. **Meeting Creation**: `/rooms/{room_name}/meeting` endpoint calls Whereby API to create meeting
|
||||
3. **Recording**: Whereby handles recording automatically to S3 bucket
|
||||
4. **Webhooks**: Whereby sends events for participant tracking
|
||||
|
||||
### Database Structure
|
||||
```python
|
||||
# Room = Template/Configuration
|
||||
class Room:
|
||||
id, name, user_id
|
||||
recording_type, recording_trigger # cloud, automatic-2nd-participant
|
||||
webhook_url, webhook_secret
|
||||
|
||||
# Meeting = Actual Whereby Meeting Instance
|
||||
class Meeting:
|
||||
id # Whereby meetingId
|
||||
room_name # Generated by Whereby
|
||||
room_url, host_room_url # Whereby URLs
|
||||
num_clients # Updated via webhooks
|
||||
```
|
||||
|
||||
## Jitsi Components
|
||||
|
||||
### Core Architecture
|
||||
- **Jitsi Meet**: Web frontend (Next.js + React)
|
||||
- **Prosody**: XMPP server for messaging/rooms
|
||||
- **Jicofo**: Conference focus (orchestration)
|
||||
- **JVB**: Videobridge (media routing)
|
||||
- **Jibri**: Recording service
|
||||
- **Jigasi**: SIP gateway (optional, for phone dial-in)
|
||||
|
||||
### Exposure Requirements
|
||||
- **Web service**: 443/80 (frontend)
|
||||
- **JVB**: 10000/UDP (media streams) - **MUST EXPOSE**
|
||||
- **Prosody**: 5280 (BOSH/WebSocket) - can proxy via web
|
||||
- **Jicofo, Jibri, Jigasi**: Internal only
|
||||
|
||||
## Recording with Jibri
|
||||
|
||||
### How Jibri Works
|
||||
- Each Jibri instance handles **one recording at a time**
|
||||
- Records mixed audio/video to MP4 format
|
||||
- Uses Chrome headless + ffmpeg for capture
|
||||
- Supports finalize scripts for post-processing
|
||||
|
||||
### Jibri Pool for Scaling
|
||||
- Multiple Jibri instances join "jibribrewery" MUC
|
||||
- Jicofo distributes recording requests to available instances
|
||||
- Automatic load balancing and failover
|
||||
|
||||
```yaml
|
||||
# Multiple Jibri instances
|
||||
jibri1:
|
||||
environment:
|
||||
- JIBRI_INSTANCE_ID=jibri1
|
||||
- JIBRI_BREWERY_MUC=jibribrewery
|
||||
|
||||
jibri2:
|
||||
environment:
|
||||
- JIBRI_INSTANCE_ID=jibri2
|
||||
- JIBRI_BREWERY_MUC=jibribrewery
|
||||
```
|
||||
|
||||
### Recording Automation Options
|
||||
1. **Environment Variables**: `ENABLE_RECORDING=1`, `AUTO_RECORDING=1`
|
||||
2. **URL Parameters**: `?config.autoRecord=true`
|
||||
3. **JWT Token**: Include recording permissions in JWT
|
||||
4. **API Control**: `api.executeCommand('startRecording')`
|
||||
|
||||
### Post-Processing Integration
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# finalize.sh - runs after recording completion
|
||||
RECORDING_FILE=$1
|
||||
MEETING_METADATA=$2
|
||||
ROOM_NAME=$3
|
||||
|
||||
# Copy to Reflector-accessible location
|
||||
cp "$RECORDING_FILE" /shared/reflector-uploads/
|
||||
|
||||
# Trigger Reflector processing
|
||||
curl -X POST "http://reflector-api:8000/v1/transcripts/process" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"file_path\": \"/shared/reflector-uploads/$(basename $RECORDING_FILE)\",
|
||||
\"room_name\": \"$ROOM_NAME\",
|
||||
\"source\": \"jitsi\"
|
||||
}"
|
||||
```
|
||||
|
||||
## React Integration
|
||||
|
||||
### Official React SDK
|
||||
```bash
|
||||
npm i @jitsi/react-sdk
|
||||
```
|
||||
|
||||
```jsx
|
||||
import { JitsiMeeting } from '@jitsi/react-sdk'
|
||||
|
||||
<JitsiMeeting
|
||||
room="meeting-room"
|
||||
serverURL="https://your-jitsi.domain"
|
||||
jwt="your-jwt-token"
|
||||
config={{
|
||||
startWithAudioMuted: true,
|
||||
fileRecordingsEnabled: true,
|
||||
autoRecord: true
|
||||
}}
|
||||
onParticipantJoined={(participant) => {
|
||||
// Track participant events
|
||||
}}
|
||||
onRecordingStatusChanged={(status) => {
|
||||
// Handle recording events
|
||||
}}
|
||||
/>
|
||||
```
|
||||
|
||||
## Authentication & Room Control
|
||||
|
||||
### JWT-Based Access Control
|
||||
```python
|
||||
def generate_jitsi_jwt(payload):
|
||||
return jwt.encode({
|
||||
"aud": "jitsi",
|
||||
"iss": "reflector",
|
||||
"sub": "reflector-user",
|
||||
"room": payload["room"],
|
||||
"exp": int(payload["exp"].timestamp()),
|
||||
"context": {
|
||||
"user": {
|
||||
"name": payload["user_name"],
|
||||
"moderator": payload.get("moderator", False)
|
||||
},
|
||||
"features": {
|
||||
"recording": payload.get("recording", True)
|
||||
}
|
||||
}
|
||||
}, JITSI_JWT_SECRET)
|
||||
```
|
||||
|
||||
### Prevent Anonymous Room Creation
|
||||
```bash
|
||||
# Environment configuration
|
||||
ENABLE_AUTH=1
|
||||
ENABLE_GUESTS=0
|
||||
AUTH_TYPE=jwt
|
||||
JWT_APP_ID=reflector
|
||||
JWT_APP_SECRET=your-secret-key
|
||||
```
|
||||
|
||||
## Webhook Integration
|
||||
|
||||
### Real-time Events via Prosody
|
||||
Custom event-sync module can send webhooks for:
|
||||
- Participant join/leave
|
||||
- Recording start/stop
|
||||
- Room creation/destruction
|
||||
- Mute/unmute events
|
||||
|
||||
```lua
|
||||
-- mod_event_sync.lua
|
||||
module:hook("muc-occupant-joined", function(event)
|
||||
send_event({
|
||||
type = "participant_joined",
|
||||
room = event.room.jid,
|
||||
participant = {
|
||||
nick = event.occupant.nick,
|
||||
jid = event.occupant.jid,
|
||||
},
|
||||
timestamp = os.time(),
|
||||
});
|
||||
end);
|
||||
```
|
||||
|
||||
### Jibri Recording Webhooks
|
||||
```bash
|
||||
# Environment variable
|
||||
JIBRI_WEBHOOK_SUBSCRIBERS=https://your-reflector.com/webhooks/jibri
|
||||
```
|
||||
|
||||
## Proposed Reflector Integration
|
||||
|
||||
### Modified Database Schema
|
||||
```python
|
||||
class Meeting(BaseModel):
|
||||
id: str # Our generated meeting ID
|
||||
room_name: str # Generated: reflector-{room.name}-{timestamp}
|
||||
room_url: str # https://jitsi.domain/room_name?jwt=token
|
||||
host_room_url: str # Same but with moderator JWT
|
||||
# Add Jitsi-specific fields
|
||||
jitsi_jwt: str # JWT token
|
||||
jitsi_room_id: str # Internal room identifier
|
||||
recording_status: str # pending, recording, completed
|
||||
recording_file_path: Optional[str]
|
||||
```
|
||||
|
||||
### API Replacement
|
||||
```python
|
||||
# Replace whereby.py with jitsi.py
|
||||
async def create_meeting(room_name_prefix: str, end_date: datetime, room: Room):
|
||||
# Generate unique room name
|
||||
jitsi_room = f"reflector-{room.name}-{int(time.time())}"
|
||||
|
||||
# Generate JWT tokens
|
||||
user_jwt = generate_jwt(room=jitsi_room, moderator=False, exp=end_date)
|
||||
host_jwt = generate_jwt(room=jitsi_room, moderator=True, exp=end_date)
|
||||
|
||||
return {
|
||||
"meetingId": generate_uuid4(), # Our ID
|
||||
"roomName": jitsi_room,
|
||||
"roomUrl": f"https://jitsi.domain/{jitsi_room}?jwt={user_jwt}",
|
||||
"hostRoomUrl": f"https://jitsi.domain/{jitsi_room}?jwt={host_jwt}",
|
||||
"startDate": datetime.now().isoformat(),
|
||||
"endDate": end_date.isoformat(),
|
||||
}
|
||||
```
|
||||
|
||||
### Webhook Endpoints
|
||||
```python
|
||||
# Replace whereby webhook with jitsi webhooks
|
||||
@router.post("/jitsi/events")
|
||||
async def jitsi_events_webhook(event_data: dict):
|
||||
event_type = event_data.get("event")
|
||||
room_name = event_data.get("room", "").split("@")[0]
|
||||
|
||||
meeting = await Meeting.get_by_room(room_name)
|
||||
|
||||
if event_type == "muc-occupant-joined":
|
||||
# Update participant count
|
||||
meeting.num_clients += 1
|
||||
|
||||
elif event_type == "jibri-recording-on":
|
||||
meeting.recording_status = "recording"
|
||||
|
||||
elif event_type == "jibri-recording-off":
|
||||
meeting.recording_status = "processing"
|
||||
await process_meeting_recording.delay(meeting.id)
|
||||
|
||||
@router.post("/jibri/recording-complete")
|
||||
async def recording_complete(data: dict):
|
||||
# Handle finalize script webhook
|
||||
room_name = data.get("room_name")
|
||||
file_path = data.get("file_path")
|
||||
|
||||
meeting = await Meeting.get_by_room(room_name)
|
||||
meeting.recording_file_path = file_path
|
||||
meeting.recording_status = "completed"
|
||||
|
||||
# Start Reflector processing
|
||||
await process_recording_for_transcription(meeting.id, file_path)
|
||||
```
|
||||
|
||||
## Deployment with Docker
|
||||
|
||||
### Official docker-jitsi-meet
|
||||
```bash
|
||||
# Download official release
|
||||
wget $(wget -q -O - https://api.github.com/repos/jitsi/docker-jitsi-meet/releases/latest | grep zip | cut -d\" -f4)
|
||||
|
||||
# Setup
|
||||
mkdir -p ~/.jitsi-meet-cfg/{web,transcripts,prosody/config,prosody/prosody-plugins-custom,jicofo,jvb,jigasi,jibri}
|
||||
./gen-passwords.sh # Generate secure passwords
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Coolify Integration
|
||||
```yaml
|
||||
services:
|
||||
web:
|
||||
ports: ["80:80", "443:443"]
|
||||
jvb:
|
||||
ports: ["10000:10000/udp"] # Must expose for media
|
||||
jibri1:
|
||||
environment:
|
||||
- JIBRI_INSTANCE_ID=jibri1
|
||||
- JIBRI_FINALIZE_RECORDING_SCRIPT_PATH=/config/finalize.sh
|
||||
jibri2:
|
||||
environment:
|
||||
- JIBRI_INSTANCE_ID=jibri2
|
||||
```
|
||||
|
||||
## Benefits vs Whereby
|
||||
|
||||
### Cost & Control
|
||||
✅ **No per-minute charges** - significant cost savings
|
||||
✅ **Full recording control** - direct file access
|
||||
✅ **Custom branding** - complete UI control
|
||||
✅ **Self-hosted** - no vendor lock-in
|
||||
|
||||
### Technical Advantages
|
||||
✅ **Real-time events** - immediate webhook notifications
|
||||
✅ **Rich participant metadata** - detailed tracking
|
||||
✅ **JWT security** - token-based access with expiration
|
||||
✅ **Multiple recording formats** - audio-only options
|
||||
✅ **Scalable architecture** - horizontal Jibri scaling
|
||||
|
||||
### Integration Benefits
|
||||
✅ **Same API surface** - minimal changes to existing code
|
||||
✅ **React SDK** - better frontend integration
|
||||
✅ **Direct processing** - no S3 download delays
|
||||
✅ **Event-driven architecture** - better real-time capabilities
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
1. **Deploy Jitsi Stack** - Set up docker-jitsi-meet with multiple Jibri instances
|
||||
2. **Create jitsi.py** - Replace whereby.py with Jitsi API functions
|
||||
3. **Update Database** - Add Jitsi-specific fields to Meeting model
|
||||
4. **Webhook Integration** - Replace Whereby webhooks with Jitsi events
|
||||
5. **Frontend Updates** - Replace Whereby embed with Jitsi React SDK
|
||||
6. **Testing & Migration** - Gradual rollout with fallback to Whereby
|
||||
|
||||
## Recording Limitations & Considerations
|
||||
|
||||
### Current Limitations
|
||||
- **Mixed audio only** - Jibri doesn't separate participant tracks natively
|
||||
- **One recording per Jibri** - requires multiple instances for concurrent recordings
|
||||
- **Chrome dependency** - Jibri uses headless Chrome for recording
|
||||
|
||||
### Metadata Capabilities
|
||||
✅ **Participant join/leave timestamps** - via webhooks
|
||||
✅ **Speaking time tracking** - via audio level events
|
||||
✅ **Meeting duration** - precise timing
|
||||
✅ **Room-specific data** - custom metadata in JWT
|
||||
|
||||
### Alternative Recording Methods
|
||||
- **Local recording** - browser-based, per-participant
|
||||
- **Custom recording** - lib-jitsi-meet for individual streams
|
||||
- **Third-party solutions** - Recall.ai, Otter.ai integrations
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### JWT Configuration
|
||||
- **Room-specific tokens** - limit access to specific rooms
|
||||
- **Time-based expiration** - automatic cleanup
|
||||
- **Feature permissions** - control recording, moderation rights
|
||||
- **User identification** - embed user metadata in tokens
|
||||
|
||||
### Access Control
|
||||
- **No anonymous rooms** - all rooms require valid JWT
|
||||
- **API-only creation** - prevent direct room access
|
||||
- **Webhook verification** - HMAC signature validation
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Deploy test Jitsi instance** - validate recording pipeline
|
||||
2. **Prototype jitsi.py** - create equivalent API functions
|
||||
3. **Test webhook integration** - ensure event delivery works
|
||||
4. **Performance testing** - validate multiple concurrent recordings
|
||||
5. **Migration strategy** - plan gradual transition from Whereby
|
||||
|
||||
---
|
||||
|
||||
*This document serves as the comprehensive planning and research notes for Jitsi integration in Reflector. It should be updated as implementation progresses and new insights are discovered.*
|
||||
23526
docs/package-lock.json
generated
23526
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,53 +0,0 @@
|
||||
{
|
||||
"name": "docs",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"docusaurus": "docusaurus",
|
||||
"start": "docusaurus start",
|
||||
"build": "docusaurus build",
|
||||
"swizzle": "docusaurus swizzle",
|
||||
"deploy": "docusaurus deploy",
|
||||
"clear": "docusaurus clear",
|
||||
"serve": "docusaurus serve",
|
||||
"write-translations": "docusaurus write-translations",
|
||||
"write-heading-ids": "docusaurus write-heading-ids",
|
||||
"typecheck": "tsc",
|
||||
"fetch-openapi": "./scripts/fetch-openapi.sh",
|
||||
"gen-api-docs": "npm run fetch-openapi && docusaurus gen-api-docs reflector",
|
||||
"prebuild": "npm run fetch-openapi"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "3.6.3",
|
||||
"@docusaurus/preset-classic": "3.6.3",
|
||||
"@mdx-js/react": "^3.0.0",
|
||||
"clsx": "^2.0.0",
|
||||
"docusaurus-plugin-openapi-docs": "^4.5.1",
|
||||
"docusaurus-theme-openapi-docs": "^4.5.1",
|
||||
"@docusaurus/theme-mermaid": "3.6.3",
|
||||
"prism-react-renderer": "^2.3.0",
|
||||
"react": "^18.0.0",
|
||||
"react-dom": "^18.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@docusaurus/module-type-aliases": "3.6.3",
|
||||
"@docusaurus/tsconfig": "3.6.3",
|
||||
"@docusaurus/types": "3.6.3",
|
||||
"typescript": "~5.6.2"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
">0.5%",
|
||||
"not dead",
|
||||
"not op_mini all"
|
||||
],
|
||||
"development": [
|
||||
"last 3 chrome version",
|
||||
"last 3 firefox version",
|
||||
"last 5 safari version"
|
||||
]
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0"
|
||||
}
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to fetch OpenAPI specification from FastAPI backend
|
||||
# Used during documentation build process
|
||||
|
||||
set -e
|
||||
|
||||
echo "📡 Fetching OpenAPI specification from FastAPI backend..."
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
BACKEND_DIR="../server"
|
||||
OPENAPI_OUTPUT="./static/openapi.json"
|
||||
SERVER_PORT=1250 # Reflector uses port 1250 by default
|
||||
MAX_WAIT=30
|
||||
|
||||
# Check if backend directory exists
|
||||
if [ ! -d "$BACKEND_DIR" ]; then
|
||||
echo -e "${RED}Error: Backend directory not found at $BACKEND_DIR${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Function to check if server is running
|
||||
check_server() {
|
||||
curl -s -o /dev/null -w "%{http_code}" "http://localhost:${SERVER_PORT}/openapi.json" 2>/dev/null
|
||||
}
|
||||
|
||||
# Function to cleanup on exit
|
||||
cleanup() {
|
||||
if [ ! -z "$SERVER_PID" ]; then
|
||||
echo -e "\n${YELLOW}Stopping FastAPI server (PID: $SERVER_PID)...${NC}"
|
||||
kill $SERVER_PID 2>/dev/null || true
|
||||
wait $SERVER_PID 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Set trap to cleanup on exit
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
# Change to backend directory
|
||||
cd "$BACKEND_DIR"
|
||||
|
||||
# Check if uv is installed
|
||||
if ! command -v uv &> /dev/null; then
|
||||
echo -e "${YELLOW}uv not found, checking for python...${NC}"
|
||||
if command -v python3 &> /dev/null; then
|
||||
PYTHON_CMD="python3"
|
||||
elif command -v python &> /dev/null; then
|
||||
PYTHON_CMD="python"
|
||||
else
|
||||
echo -e "${RED}Error: Neither uv nor python found${NC}"
|
||||
exit 1
|
||||
fi
|
||||
RUN_CMD="$PYTHON_CMD -m"
|
||||
else
|
||||
RUN_CMD="uv run -m"
|
||||
fi
|
||||
|
||||
# Start the FastAPI server in the background (let it use default port 1250)
|
||||
echo -e "${YELLOW}Starting FastAPI server...${NC}"
|
||||
$RUN_CMD reflector.app > /dev/null 2>&1 &
|
||||
SERVER_PID=$!
|
||||
|
||||
# Wait for server to be ready
|
||||
echo -n "Waiting for server to be ready"
|
||||
WAITED=0
|
||||
while [ $WAITED -lt $MAX_WAIT ]; do
|
||||
if [ "$(check_server)" = "200" ]; then
|
||||
echo -e " ${GREEN}✓${NC}"
|
||||
break
|
||||
fi
|
||||
echo -n "."
|
||||
sleep 1
|
||||
WAITED=$((WAITED + 1))
|
||||
done
|
||||
|
||||
if [ $WAITED -ge $MAX_WAIT ]; then
|
||||
echo -e " ${RED}✗${NC}"
|
||||
echo -e "${RED}Error: Server failed to start within ${MAX_WAIT} seconds${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Change back to docs directory
|
||||
cd - > /dev/null
|
||||
|
||||
# Create static directory if it doesn't exist
|
||||
mkdir -p "$(dirname "$OPENAPI_OUTPUT")"
|
||||
|
||||
# Fetch the OpenAPI specification
|
||||
echo -e "${YELLOW}Fetching OpenAPI specification...${NC}"
|
||||
if curl -s "http://localhost:${SERVER_PORT}/openapi.json" -o "$OPENAPI_OUTPUT"; then
|
||||
echo -e "${GREEN}✓ OpenAPI specification saved to $OPENAPI_OUTPUT${NC}"
|
||||
|
||||
# Validate JSON
|
||||
if command -v jq &> /dev/null; then
|
||||
if jq empty "$OPENAPI_OUTPUT" 2>/dev/null; then
|
||||
echo -e "${GREEN}✓ OpenAPI specification is valid JSON${NC}"
|
||||
# Pretty print the JSON
|
||||
jq . "$OPENAPI_OUTPUT" > "${OPENAPI_OUTPUT}.tmp" && mv "${OPENAPI_OUTPUT}.tmp" "$OPENAPI_OUTPUT"
|
||||
else
|
||||
echo -e "${RED}Error: Invalid JSON in OpenAPI specification${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}Error: Failed to fetch OpenAPI specification${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✅ OpenAPI specification successfully fetched!${NC}"
|
||||
@@ -1,95 +0,0 @@
|
||||
import type {SidebarsConfig} from '@docusaurus/plugin-content-docs';
|
||||
|
||||
const sidebars: SidebarsConfig = {
|
||||
tutorialSidebar: [
|
||||
'intro',
|
||||
{
|
||||
type: 'category',
|
||||
label: 'Concepts',
|
||||
collapsed: false,
|
||||
items: [
|
||||
'concepts/overview',
|
||||
'concepts/modes',
|
||||
'concepts/pipeline',
|
||||
],
|
||||
},
|
||||
{
|
||||
type: 'category',
|
||||
label: 'Installation',
|
||||
collapsed: false,
|
||||
items: [
|
||||
'installation/overview',
|
||||
'installation/requirements',
|
||||
'installation/docker-setup',
|
||||
'installation/modal-setup',
|
||||
'installation/self-hosted-gpu-setup',
|
||||
'installation/auth-setup',
|
||||
'installation/daily-setup',
|
||||
{
|
||||
type: 'category',
|
||||
label: 'Other Integrations',
|
||||
collapsed: true,
|
||||
items: [
|
||||
'installation/whereby-setup',
|
||||
'installation/zulip-setup',
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
type: 'category',
|
||||
label: 'Pipelines',
|
||||
items: [
|
||||
'pipelines/overview',
|
||||
'pipelines/file-pipeline',
|
||||
'pipelines/live-pipeline',
|
||||
],
|
||||
},
|
||||
{
|
||||
type: 'category',
|
||||
label: 'Reference',
|
||||
items: [
|
||||
{
|
||||
type: 'category',
|
||||
label: 'Architecture',
|
||||
items: [
|
||||
'reference/architecture/overview',
|
||||
'reference/architecture/backend',
|
||||
'reference/architecture/frontend',
|
||||
'reference/architecture/workers',
|
||||
'reference/architecture/database',
|
||||
],
|
||||
},
|
||||
{
|
||||
type: 'category',
|
||||
label: 'Processors',
|
||||
items: [
|
||||
'reference/processors/transcription',
|
||||
'reference/processors/diarization',
|
||||
'reference/processors/translation',
|
||||
'reference/processors/analysis',
|
||||
],
|
||||
},
|
||||
{
|
||||
type: 'category',
|
||||
label: 'API',
|
||||
items: [
|
||||
{
|
||||
type: 'doc',
|
||||
id: 'reference/api/overview',
|
||||
},
|
||||
{
|
||||
type: 'link',
|
||||
label: 'OpenAPI Reference',
|
||||
href: '/docs/reference/api',
|
||||
},
|
||||
],
|
||||
},
|
||||
'reference/configuration',
|
||||
],
|
||||
},
|
||||
'roadmap',
|
||||
],
|
||||
};
|
||||
|
||||
export default sidebars;
|
||||
@@ -1,70 +0,0 @@
|
||||
import clsx from 'clsx';
|
||||
import Heading from '@theme/Heading';
|
||||
import styles from './styles.module.css';
|
||||
|
||||
type FeatureItem = {
|
||||
title: string;
|
||||
Svg: React.ComponentType<React.ComponentProps<'svg'>>;
|
||||
description: JSX.Element;
|
||||
};
|
||||
|
||||
const FeatureList: FeatureItem[] = [
|
||||
{
|
||||
title: 'Easy to Use',
|
||||
Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default,
|
||||
description: (
|
||||
<>
|
||||
Docusaurus was designed from the ground up to be easily installed and
|
||||
used to get your website up and running quickly.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
title: 'Focus on What Matters',
|
||||
Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default,
|
||||
description: (
|
||||
<>
|
||||
Docusaurus lets you focus on your docs, and we'll do the chores. Go
|
||||
ahead and move your docs into the <code>docs</code> directory.
|
||||
</>
|
||||
),
|
||||
},
|
||||
{
|
||||
title: 'Powered by React',
|
||||
Svg: require('@site/static/img/undraw_docusaurus_react.svg').default,
|
||||
description: (
|
||||
<>
|
||||
Extend or customize your website layout by reusing React. Docusaurus can
|
||||
be extended while reusing the same header and footer.
|
||||
</>
|
||||
),
|
||||
},
|
||||
];
|
||||
|
||||
function Feature({title, Svg, description}: FeatureItem) {
|
||||
return (
|
||||
<div className={clsx('col col--4')}>
|
||||
<div className="text--center">
|
||||
<Svg className={styles.featureSvg} role="img" />
|
||||
</div>
|
||||
<div className="text--center padding-horiz--md">
|
||||
<Heading as="h3">{title}</Heading>
|
||||
<p>{description}</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default function HomepageFeatures(): JSX.Element {
|
||||
return (
|
||||
<section className={styles.features}>
|
||||
<div className="container">
|
||||
<div className="row">
|
||||
{FeatureList.map((props, idx) => (
|
||||
<Feature key={idx} {...props} />
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
);
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
.features {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 2rem 0;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.featureSvg {
|
||||
height: 200px;
|
||||
width: 200px;
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
/**
|
||||
* Reflector Documentation Theme
|
||||
* Based on frontend colors from www/app/styles/theme.ts
|
||||
*/
|
||||
|
||||
@import url('https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700&display=swap');
|
||||
|
||||
:root {
|
||||
--ifm-color-primary: #3158E2;
|
||||
--ifm-color-primary-dark: #2847C9;
|
||||
--ifm-color-primary-darker: #2442BF;
|
||||
--ifm-color-primary-darkest: #1D369C;
|
||||
--ifm-color-primary-light: #4A6FE5;
|
||||
--ifm-color-primary-lighter: #5F81E8;
|
||||
--ifm-color-primary-lightest: #8DA6F0;
|
||||
|
||||
--ifm-background-color: #FFFFFF;
|
||||
--ifm-background-surface-color: #F4F4F4;
|
||||
--ifm-font-color-base: #1A202C;
|
||||
--ifm-font-color-secondary: #838383;
|
||||
|
||||
--ifm-code-font-size: 95%;
|
||||
--docusaurus-highlighted-code-line-bg: rgba(49, 88, 226, 0.1);
|
||||
|
||||
--ifm-font-family-base: 'Poppins', system-ui, -apple-system, sans-serif;
|
||||
--ifm-font-family-monospace: 'Fira Code', 'Monaco', 'Consolas', monospace;
|
||||
--ifm-navbar-background-color: #FFFFFF;
|
||||
--ifm-heading-font-weight: 600;
|
||||
}
|
||||
|
||||
[data-theme='dark'] {
|
||||
--ifm-color-primary: #B1CBFF;
|
||||
--ifm-color-primary-dark: #91B3FF;
|
||||
--ifm-color-primary-darker: #81A7FF;
|
||||
--ifm-color-primary-darkest: #5189FF;
|
||||
--ifm-color-primary-light: #D1DFFF;
|
||||
--ifm-color-primary-lighter: #E1EBFF;
|
||||
--ifm-color-primary-lightest: #F0F5FF;
|
||||
|
||||
--ifm-background-color: #0C0D0E;
|
||||
--ifm-background-surface-color: #1A202C;
|
||||
--ifm-font-color-base: #E2E8F0;
|
||||
--ifm-font-color-secondary: #A0AEC0;
|
||||
--docusaurus-highlighted-code-line-bg: rgba(177, 203, 255, 0.1);
|
||||
--ifm-navbar-background-color: #1A202C;
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
/**
|
||||
* CSS files with the .module.css suffix will be treated as CSS modules
|
||||
* and scoped locally.
|
||||
*/
|
||||
|
||||
.heroBanner {
|
||||
padding: 4rem 0;
|
||||
text-align: center;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 996px) {
|
||||
.heroBanner {
|
||||
padding: 2rem;
|
||||
}
|
||||
}
|
||||
|
||||
.buttons {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
import React from 'react';
|
||||
import { Redirect } from '@docusaurus/router';
|
||||
import useBaseUrl from '@docusaurus/useBaseUrl';
|
||||
|
||||
export default function Home(): JSX.Element {
|
||||
return <Redirect to={useBaseUrl('/docs/intro')} />;
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
title: Markdown page example
|
||||
---
|
||||
|
||||
# Markdown page example
|
||||
|
||||
You don't need React to write simple standalone pages.
|
||||
0
docs/static/img/favicon.ico
vendored
0
docs/static/img/favicon.ico
vendored
17
docs/static/img/logo.svg
vendored
17
docs/static/img/logo.svg
vendored
@@ -1,17 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 27.9.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 500 500" style="enable-background:new 0 0 500 500;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#B6B6B6;}
|
||||
.st1{fill:#4A4A4A;}
|
||||
</style>
|
||||
<g>
|
||||
<polygon class="st0" points="227.5,51.5 86.5,150.1 100.8,383.9 244.3,249.8 "/>
|
||||
<polygon class="st1" points="305.4,421.4 423.9,286 244.3,249.8 100.8,383.9 "/>
|
||||
</g>
|
||||
<image style="overflow:visible;" width="1504" height="1128" xlink:href="Ref/original-12843059d855efa50c3a12db8586ced7.jpg" transform="matrix(1 0 0 1 1857.8739 723.9433)">
|
||||
</image>
|
||||
<image style="overflow:visible;" width="1504" height="1128" xlink:href="Ref/original-f72ce8039f760337a51b47d045b477b8.jpg" transform="matrix(1 0 0 1 1857.8739 -512.4843)">
|
||||
</image>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 965 B |
17
docs/static/img/reflector-logo.svg
vendored
17
docs/static/img/reflector-logo.svg
vendored
@@ -1,17 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 27.9.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 500 500" style="enable-background:new 0 0 500 500;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#B6B6B6;}
|
||||
.st1{fill:#4A4A4A;}
|
||||
</style>
|
||||
<g>
|
||||
<polygon class="st0" points="227.5,51.5 86.5,150.1 100.8,383.9 244.3,249.8 "/>
|
||||
<polygon class="st1" points="305.4,421.4 423.9,286 244.3,249.8 100.8,383.9 "/>
|
||||
</g>
|
||||
<image style="overflow:visible;" width="1504" height="1128" xlink:href="Ref/original-12843059d855efa50c3a12db8586ced7.jpg" transform="matrix(1 0 0 1 1857.8739 723.9433)">
|
||||
</image>
|
||||
<image style="overflow:visible;" width="1504" height="1128" xlink:href="Ref/original-f72ce8039f760337a51b47d045b477b8.jpg" transform="matrix(1 0 0 1 1857.8739 -512.4843)">
|
||||
</image>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 965 B |
171
docs/static/img/undraw_docusaurus_mountain.svg
vendored
171
docs/static/img/undraw_docusaurus_mountain.svg
vendored
@@ -1,171 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1088" height="687.962" viewBox="0 0 1088 687.962">
|
||||
<title>Easy to Use</title>
|
||||
<g id="Group_12" data-name="Group 12" transform="translate(-57 -56)">
|
||||
<g id="Group_11" data-name="Group 11" transform="translate(57 56)">
|
||||
<path id="Path_83" data-name="Path 83" d="M1017.81,560.461c-5.27,45.15-16.22,81.4-31.25,110.31-20,38.52-54.21,54.04-84.77,70.28a193.275,193.275,0,0,1-27.46,11.94c-55.61,19.3-117.85,14.18-166.74,3.99a657.282,657.282,0,0,0-104.09-13.16q-14.97-.675-29.97-.67c-15.42.02-293.07,5.29-360.67-131.57-16.69-33.76-28.13-75-32.24-125.27-11.63-142.12,52.29-235.46,134.74-296.47,155.97-115.41,369.76-110.57,523.43,7.88C941.15,276.621,1036.99,396.031,1017.81,560.461Z" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_84" data-name="Path 84" d="M986.56,670.771c-20,38.52-47.21,64.04-77.77,80.28a193.272,193.272,0,0,1-27.46,11.94c-55.61,19.3-117.85,14.18-166.74,3.99a657.3,657.3,0,0,0-104.09-13.16q-14.97-.675-29.97-.67-23.13.03-46.25,1.72c-100.17,7.36-253.82-6.43-321.42-143.29L382,283.981,444.95,445.6l20.09,51.59,55.37-75.98L549,381.981l130.2,149.27,36.8-81.27L970.78,657.9l14.21,11.59Z" transform="translate(-56 -106.019)" fill="#f2f2f2"/>
|
||||
<path id="Path_85" data-name="Path 85" d="M302,282.962l26-57,36,83-31-60Z" opacity="0.1"/>
|
||||
<path id="Path_86" data-name="Path 86" d="M610.5,753.821q-14.97-.675-29.97-.67L465.04,497.191Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<path id="Path_87" data-name="Path 87" d="M464.411,315.191,493,292.962l130,150-132-128Z" opacity="0.1"/>
|
||||
<path id="Path_88" data-name="Path 88" d="M908.79,751.051a193.265,193.265,0,0,1-27.46,11.94L679.2,531.251Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<circle id="Ellipse_11" data-name="Ellipse 11" cx="3" cy="3" r="3" transform="translate(479 98.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_12" data-name="Ellipse 12" cx="3" cy="3" r="3" transform="translate(396 201.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_13" data-name="Ellipse 13" cx="2" cy="2" r="2" transform="translate(600 220.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_14" data-name="Ellipse 14" cx="2" cy="2" r="2" transform="translate(180 265.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_15" data-name="Ellipse 15" cx="2" cy="2" r="2" transform="translate(612 96.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_16" data-name="Ellipse 16" cx="2" cy="2" r="2" transform="translate(736 192.962)" fill="#f2f2f2"/>
|
||||
<circle id="Ellipse_17" data-name="Ellipse 17" cx="2" cy="2" r="2" transform="translate(858 344.962)" fill="#f2f2f2"/>
|
||||
<path id="Path_89" data-name="Path 89" d="M306,121.222h-2.76v-2.76h-1.48v2.76H299V122.7h2.76v2.759h1.48V122.7H306Z" fill="#f2f2f2"/>
|
||||
<path id="Path_90" data-name="Path 90" d="M848,424.222h-2.76v-2.76h-1.48v2.76H841V425.7h2.76v2.759h1.48V425.7H848Z" fill="#f2f2f2"/>
|
||||
<path id="Path_91" data-name="Path 91" d="M1144,719.981c0,16.569-243.557,74-544,74s-544-57.431-544-74,243.557,14,544,14S1144,703.413,1144,719.981Z" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_92" data-name="Path 92" d="M1144,719.981c0,16.569-243.557,74-544,74s-544-57.431-544-74,243.557,14,544,14S1144,703.413,1144,719.981Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<ellipse id="Ellipse_18" data-name="Ellipse 18" cx="544" cy="30" rx="544" ry="30" transform="translate(0 583.962)" fill="#3f3d56"/>
|
||||
<path id="Path_93" data-name="Path 93" d="M624,677.981c0,33.137-14.775,24-33,24s-33,9.137-33-24,33-96,33-96S624,644.844,624,677.981Z" transform="translate(-56 -106.019)" fill="#ff6584"/>
|
||||
<path id="Path_94" data-name="Path 94" d="M606,690.66c0,15.062-6.716,10.909-15,10.909s-15,4.153-15-10.909,15-43.636,15-43.636S606,675.6,606,690.66Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<rect id="Rectangle_97" data-name="Rectangle 97" width="92" height="18" rx="9" transform="translate(489 604.962)" fill="#2f2e41"/>
|
||||
<rect id="Rectangle_98" data-name="Rectangle 98" width="92" height="18" rx="9" transform="translate(489 586.962)" fill="#2f2e41"/>
|
||||
<path id="Path_95" data-name="Path 95" d="M193,596.547c0,55.343,34.719,100.126,77.626,100.126" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_96" data-name="Path 96" d="M270.626,696.673c0-55.965,38.745-101.251,86.626-101.251" transform="translate(-56 -106.019)" fill="#6c63ff"/>
|
||||
<path id="Path_97" data-name="Path 97" d="M221.125,601.564c0,52.57,22.14,95.109,49.5,95.109" transform="translate(-56 -106.019)" fill="#6c63ff"/>
|
||||
<path id="Path_98" data-name="Path 98" d="M270.626,696.673c0-71.511,44.783-129.377,100.126-129.377" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_99" data-name="Path 99" d="M254.3,697.379s11.009-.339,14.326-2.7,16.934-5.183,17.757-1.395,16.544,18.844,4.115,18.945-28.879-1.936-32.19-3.953S254.3,697.379,254.3,697.379Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_100" data-name="Path 100" d="M290.716,710.909c-12.429.1-28.879-1.936-32.19-3.953-2.522-1.536-3.527-7.048-3.863-9.591l-.368.014s.7,8.879,4.009,10.9,19.761,4.053,32.19,3.953c3.588-.029,4.827-1.305,4.759-3.2C294.755,710.174,293.386,710.887,290.716,710.909Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_101" data-name="Path 101" d="M777.429,633.081c0,38.029,23.857,68.8,53.341,68.8" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_102" data-name="Path 102" d="M830.769,701.882c0-38.456,26.623-69.575,59.525-69.575" transform="translate(-56 -106.019)" fill="#6c63ff"/>
|
||||
<path id="Path_103" data-name="Path 103" d="M796.755,636.528c0,36.124,15.213,65.354,34.014,65.354" transform="translate(-56 -106.019)" fill="#6c63ff"/>
|
||||
<path id="Path_104" data-name="Path 104" d="M830.769,701.882c0-49.139,30.773-88.9,68.8-88.9" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
<path id="Path_105" data-name="Path 105" d="M819.548,702.367s7.565-.233,9.844-1.856,11.636-3.562,12.2-.958,11.368,12.949,2.828,13.018-19.844-1.33-22.119-2.716S819.548,702.367,819.548,702.367Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_106" data-name="Path 106" d="M844.574,711.664c-8.54.069-19.844-1.33-22.119-2.716-1.733-1.056-2.423-4.843-2.654-6.59l-.253.01s.479,6.1,2.755,7.487,13.579,2.785,22.119,2.716c2.465-.02,3.317-.9,3.27-2.2C847.349,711.159,846.409,711.649,844.574,711.664Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_107" data-name="Path 107" d="M949.813,724.718s11.36-1.729,14.5-4.591,16.89-7.488,18.217-3.667,19.494,17.447,6.633,19.107-30.153,1.609-33.835-.065S949.813,724.718,949.813,724.718Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_108" data-name="Path 108" d="M989.228,734.173c-12.86,1.659-30.153,1.609-33.835-.065-2.8-1.275-4.535-6.858-5.2-9.45l-.379.061s1.833,9.109,5.516,10.783,20.975,1.725,33.835.065c3.712-.479,4.836-1.956,4.529-3.906C993.319,732.907,991.991,733.817,989.228,734.173Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_109" data-name="Path 109" d="M670.26,723.9s9.587-1.459,12.237-3.875,14.255-6.32,15.374-3.095,16.452,14.725,5.6,16.125-25.448,1.358-28.555-.055S670.26,723.9,670.26,723.9Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_110" data-name="Path 110" d="M703.524,731.875c-10.853,1.4-25.448,1.358-28.555-.055-2.367-1.076-3.827-5.788-4.39-7.976l-.32.051s1.547,7.687,4.655,9.1,17.7,1.456,28.555.055c3.133-.4,4.081-1.651,3.822-3.3C706.977,730.807,705.856,731.575,703.524,731.875Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_111" data-name="Path 111" d="M178.389,719.109s7.463-1.136,9.527-3.016,11.1-4.92,11.969-2.409,12.808,11.463,4.358,12.553-19.811,1.057-22.23-.043S178.389,719.109,178.389,719.109Z" transform="translate(-56 -106.019)" fill="#a8a8a8"/>
|
||||
<path id="Path_112" data-name="Path 112" d="M204.285,725.321c-8.449,1.09-19.811,1.057-22.23-.043-1.842-.838-2.979-4.506-3.417-6.209l-.249.04s1.2,5.984,3.624,7.085,13.781,1.133,22.23.043c2.439-.315,3.177-1.285,2.976-2.566C206.973,724.489,206.1,725.087,204.285,725.321Z" transform="translate(-56 -106.019)" opacity="0.2"/>
|
||||
<path id="Path_113" data-name="Path 113" d="M439.7,707.337c0,30.22-42.124,20.873-93.7,20.873s-93.074,9.347-93.074-20.873,42.118-36.793,93.694-36.793S439.7,677.117,439.7,707.337Z" transform="translate(-56 -106.019)" opacity="0.1"/>
|
||||
<path id="Path_114" data-name="Path 114" d="M439.7,699.9c0,30.22-42.124,20.873-93.7,20.873s-93.074,9.347-93.074-20.873S295.04,663.1,346.616,663.1,439.7,669.676,439.7,699.9Z" transform="translate(-56 -106.019)" fill="#3f3d56"/>
|
||||
</g>
|
||||
<g id="docusaurus_keytar" transform="translate(312.271 493.733)">
|
||||
<path id="Path_40" data-name="Path 40" d="M99,52h91.791V89.153H99Z" transform="translate(5.904 -14.001)" fill="#fff" fill-rule="evenodd"/>
|
||||
<path id="Path_41" data-name="Path 41" d="M24.855,163.927A21.828,21.828,0,0,1,5.947,153a21.829,21.829,0,0,0,18.908,32.782H46.71V163.927Z" transform="translate(-3 -4.634)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_42" data-name="Path 42" d="M121.861,61.1l76.514-4.782V45.39A21.854,21.854,0,0,0,176.52,23.535H78.173L75.441,18.8a3.154,3.154,0,0,0-5.464,0l-2.732,4.732L64.513,18.8a3.154,3.154,0,0,0-5.464,0l-2.732,4.732L53.586,18.8a3.154,3.154,0,0,0-5.464,0L45.39,23.535c-.024,0-.046,0-.071,0l-4.526-4.525a3.153,3.153,0,0,0-5.276,1.414l-1.5,5.577-5.674-1.521a3.154,3.154,0,0,0-3.863,3.864L26,34.023l-5.575,1.494a3.155,3.155,0,0,0-1.416,5.278l4.526,4.526c0,.023,0,.046,0,.07L18.8,48.122a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,59.05a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,69.977a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,80.9a3.154,3.154,0,0,0,0,5.464L23.535,89.1,18.8,91.832a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,102.76a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,113.687a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,124.615a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,135.542a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,146.469a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,157.4a3.154,3.154,0,0,0,0,5.464l4.732,2.732L18.8,168.324a3.154,3.154,0,0,0,0,5.464l4.732,2.732A21.854,21.854,0,0,0,45.39,198.375H176.52a21.854,21.854,0,0,0,21.855-21.855V89.1l-76.514-4.782a11.632,11.632,0,0,1,0-23.219" transform="translate(-1.681 -17.226)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_43" data-name="Path 43" d="M143,186.71h32.782V143H143Z" transform="translate(9.984 -5.561)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_44" data-name="Path 44" d="M196.71,159.855a5.438,5.438,0,0,0-.7.07c-.042-.164-.081-.329-.127-.493a5.457,5.457,0,1,0-5.4-9.372q-.181-.185-.366-.367a5.454,5.454,0,1,0-9.384-5.4c-.162-.046-.325-.084-.486-.126a5.467,5.467,0,1,0-10.788,0c-.162.042-.325.08-.486.126a5.457,5.457,0,1,0-9.384,5.4,21.843,21.843,0,1,0,36.421,21.02,5.452,5.452,0,1,0,.7-10.858" transform="translate(10.912 -6.025)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_45" data-name="Path 45" d="M153,124.855h32.782V103H153Z" transform="translate(10.912 -9.271)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_46" data-name="Path 46" d="M194.855,116.765a2.732,2.732,0,1,0,0-5.464,2.811,2.811,0,0,0-.349.035c-.022-.082-.04-.164-.063-.246a2.733,2.733,0,0,0-1.052-5.253,2.7,2.7,0,0,0-1.648.566q-.09-.093-.184-.184a2.7,2.7,0,0,0,.553-1.633,2.732,2.732,0,0,0-5.245-1.07,10.928,10.928,0,1,0,0,21.031,2.732,2.732,0,0,0,5.245-1.07,2.7,2.7,0,0,0-.553-1.633q.093-.09.184-.184a2.7,2.7,0,0,0,1.648.566,2.732,2.732,0,0,0,1.052-5.253c.023-.081.042-.164.063-.246a2.814,2.814,0,0,0,.349.035" transform="translate(12.767 -9.377)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_47" data-name="Path 47" d="M65.087,56.891a2.732,2.732,0,0,1-2.732-2.732,8.2,8.2,0,0,0-16.391,0,2.732,2.732,0,0,1-5.464,0,13.659,13.659,0,0,1,27.319,0,2.732,2.732,0,0,1-2.732,2.732" transform="translate(0.478 -15.068)" fill-rule="evenodd"/>
|
||||
<path id="Path_48" data-name="Path 48" d="M103,191.347h65.565a21.854,21.854,0,0,0,21.855-21.855V93H124.855A21.854,21.854,0,0,0,103,114.855Z" transform="translate(6.275 -10.199)" fill="#ffff50" fill-rule="evenodd"/>
|
||||
<path id="Path_49" data-name="Path 49" d="M173.216,129.787H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0,21.855H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186m0,21.855H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0-54.434H118.535a1.093,1.093,0,1,1,0-2.185h54.681a1.093,1.093,0,0,1,0,2.185m0,21.652H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186m0,21.855H118.535a1.093,1.093,0,1,1,0-2.186h54.681a1.093,1.093,0,0,1,0,2.186M189.585,61.611c-.013,0-.024-.007-.037-.005-3.377.115-4.974,3.492-6.384,6.472-1.471,3.114-2.608,5.139-4.473,5.078-2.064-.074-3.244-2.406-4.494-4.874-1.436-2.835-3.075-6.049-6.516-5.929-3.329.114-4.932,3.053-6.346,5.646-1.5,2.762-2.529,4.442-4.5,4.364-2.106-.076-3.225-1.972-4.52-4.167-1.444-2.443-3.112-5.191-6.487-5.1-3.272.113-4.879,2.606-6.3,4.808-1.5,2.328-2.552,3.746-4.551,3.662-2.156-.076-3.27-1.65-4.558-3.472-1.447-2.047-3.077-4.363-6.442-4.251-3.2.109-4.807,2.153-6.224,3.954-1.346,1.709-2.4,3.062-4.621,2.977a1.093,1.093,0,0,0-.079,2.186c3.3.11,4.967-1.967,6.417-3.81,1.286-1.635,2.4-3.045,4.582-3.12,2.1-.09,3.091,1.218,4.584,3.327,1.417,2,3.026,4.277,6.263,4.394,3.391.114,5.022-2.42,6.467-4.663,1.292-2,2.406-3.734,4.535-3.807,1.959-.073,3.026,1.475,4.529,4.022,1.417,2.4,3.023,5.121,6.324,5.241,3.415.118,5.064-2.863,6.5-5.5,1.245-2.282,2.419-4.437,4.5-4.509,1.959-.046,2.981,1.743,4.492,4.732,1.412,2.79,3.013,5.95,6.365,6.071l.185,0c3.348,0,4.937-3.36,6.343-6.331,1.245-2.634,2.423-5.114,4.444-5.216Z" transform="translate(7.109 -13.11)" fill-rule="evenodd"/>
|
||||
<path id="Path_50" data-name="Path 50" d="M83,186.71h43.71V143H83Z" transform="translate(4.42 -5.561)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<g id="Group_8" data-name="Group 8" transform="matrix(0.966, -0.259, 0.259, 0.966, 109.327, 91.085)">
|
||||
<rect id="Rectangle_3" data-name="Rectangle 3" width="92.361" height="36.462" rx="2" transform="translate(0 0)" fill="#d8d8d8"/>
|
||||
<g id="Group_2" data-name="Group 2" transform="translate(1.531 23.03)">
|
||||
<rect id="Rectangle_4" data-name="Rectangle 4" width="5.336" height="5.336" rx="1" transform="translate(16.797 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_5" data-name="Rectangle 5" width="5.336" height="5.336" rx="1" transform="translate(23.12 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_6" data-name="Rectangle 6" width="5.336" height="5.336" rx="1" transform="translate(29.444 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_7" data-name="Rectangle 7" width="5.336" height="5.336" rx="1" transform="translate(35.768 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_8" data-name="Rectangle 8" width="5.336" height="5.336" rx="1" transform="translate(42.091 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_9" data-name="Rectangle 9" width="5.336" height="5.336" rx="1" transform="translate(48.415 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_10" data-name="Rectangle 10" width="5.336" height="5.336" rx="1" transform="translate(54.739 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_11" data-name="Rectangle 11" width="5.336" height="5.336" rx="1" transform="translate(61.063 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_12" data-name="Rectangle 12" width="5.336" height="5.336" rx="1" transform="translate(67.386 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_51" data-name="Path 51" d="M1.093,0H14.518a1.093,1.093,0,0,1,1.093,1.093V4.243a1.093,1.093,0,0,1-1.093,1.093H1.093A1.093,1.093,0,0,1,0,4.243V1.093A1.093,1.093,0,0,1,1.093,0ZM75,0H88.426a1.093,1.093,0,0,1,1.093,1.093V4.243a1.093,1.093,0,0,1-1.093,1.093H75a1.093,1.093,0,0,1-1.093-1.093V1.093A1.093,1.093,0,0,1,75,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="Group_3" data-name="Group 3" transform="translate(1.531 10.261)">
|
||||
<path id="Path_52" data-name="Path 52" d="M1.093,0H6.218A1.093,1.093,0,0,1,7.31,1.093V4.242A1.093,1.093,0,0,1,6.218,5.335H1.093A1.093,1.093,0,0,1,0,4.242V1.093A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_13" data-name="Rectangle 13" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_14" data-name="Rectangle 14" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_15" data-name="Rectangle 15" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_16" data-name="Rectangle 16" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_17" data-name="Rectangle 17" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_18" data-name="Rectangle 18" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_19" data-name="Rectangle 19" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_20" data-name="Rectangle 20" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_21" data-name="Rectangle 21" width="5.336" height="5.336" rx="1" transform="translate(58.888 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_22" data-name="Rectangle 22" width="5.336" height="5.336" rx="1" transform="translate(65.212 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_23" data-name="Rectangle 23" width="5.336" height="5.336" rx="1" transform="translate(71.536 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_24" data-name="Rectangle 24" width="5.336" height="5.336" rx="1" transform="translate(77.859 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_25" data-name="Rectangle 25" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<g id="Group_4" data-name="Group 4" transform="translate(91.05 9.546) rotate(180)">
|
||||
<path id="Path_53" data-name="Path 53" d="M1.093,0H6.219A1.093,1.093,0,0,1,7.312,1.093v3.15A1.093,1.093,0,0,1,6.219,5.336H1.093A1.093,1.093,0,0,1,0,4.243V1.093A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_26" data-name="Rectangle 26" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_27" data-name="Rectangle 27" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_28" data-name="Rectangle 28" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_29" data-name="Rectangle 29" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_30" data-name="Rectangle 30" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_31" data-name="Rectangle 31" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_32" data-name="Rectangle 32" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_33" data-name="Rectangle 33" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_34" data-name="Rectangle 34" width="5.336" height="5.336" rx="1" transform="translate(58.889 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_35" data-name="Rectangle 35" width="5.336" height="5.336" rx="1" transform="translate(65.213 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_36" data-name="Rectangle 36" width="5.336" height="5.336" rx="1" transform="translate(71.537 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_37" data-name="Rectangle 37" width="5.336" height="5.336" rx="1" transform="translate(77.86 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_38" data-name="Rectangle 38" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_39" data-name="Rectangle 39" width="5.336" height="5.336" rx="1" transform="translate(8.299 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_40" data-name="Rectangle 40" width="5.336" height="5.336" rx="1" transform="translate(14.623 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_41" data-name="Rectangle 41" width="5.336" height="5.336" rx="1" transform="translate(20.947 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_42" data-name="Rectangle 42" width="5.336" height="5.336" rx="1" transform="translate(27.271 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_43" data-name="Rectangle 43" width="5.336" height="5.336" rx="1" transform="translate(33.594 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_44" data-name="Rectangle 44" width="5.336" height="5.336" rx="1" transform="translate(39.918 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_45" data-name="Rectangle 45" width="5.336" height="5.336" rx="1" transform="translate(46.242 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_46" data-name="Rectangle 46" width="5.336" height="5.336" rx="1" transform="translate(52.565 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_47" data-name="Rectangle 47" width="5.336" height="5.336" rx="1" transform="translate(58.889 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_48" data-name="Rectangle 48" width="5.336" height="5.336" rx="1" transform="translate(65.213 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_49" data-name="Rectangle 49" width="5.336" height="5.336" rx="1" transform="translate(71.537 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_50" data-name="Rectangle 50" width="5.336" height="5.336" rx="1" transform="translate(77.86 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_51" data-name="Rectangle 51" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<g id="Group_6" data-name="Group 6" transform="translate(1.531 16.584)">
|
||||
<path id="Path_54" data-name="Path 54" d="M1.093,0h7.3A1.093,1.093,0,0,1,9.485,1.093v3.15A1.093,1.093,0,0,1,8.392,5.336h-7.3A1.093,1.093,0,0,1,0,4.243V1.094A1.093,1.093,0,0,1,1.093,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<g id="Group_5" data-name="Group 5" transform="translate(10.671 0)">
|
||||
<rect id="Rectangle_52" data-name="Rectangle 52" width="5.336" height="5.336" rx="1" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_53" data-name="Rectangle 53" width="5.336" height="5.336" rx="1" transform="translate(6.324 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_54" data-name="Rectangle 54" width="5.336" height="5.336" rx="1" transform="translate(12.647 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_55" data-name="Rectangle 55" width="5.336" height="5.336" rx="1" transform="translate(18.971 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_56" data-name="Rectangle 56" width="5.336" height="5.336" rx="1" transform="translate(25.295 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_57" data-name="Rectangle 57" width="5.336" height="5.336" rx="1" transform="translate(31.619 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_58" data-name="Rectangle 58" width="5.336" height="5.336" rx="1" transform="translate(37.942 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_59" data-name="Rectangle 59" width="5.336" height="5.336" rx="1" transform="translate(44.265 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_60" data-name="Rectangle 60" width="5.336" height="5.336" rx="1" transform="translate(50.589 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_61" data-name="Rectangle 61" width="5.336" height="5.336" rx="1" transform="translate(56.912 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_62" data-name="Rectangle 62" width="5.336" height="5.336" rx="1" transform="translate(63.236 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<path id="Path_55" data-name="Path 55" d="M1.094,0H8A1.093,1.093,0,0,1,9.091,1.093v3.15A1.093,1.093,0,0,1,8,5.336H1.093A1.093,1.093,0,0,1,0,4.243V1.094A1.093,1.093,0,0,1,1.093,0Z" transform="translate(80.428 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="Group_7" data-name="Group 7" transform="translate(1.531 29.627)">
|
||||
<rect id="Rectangle_63" data-name="Rectangle 63" width="5.336" height="5.336" rx="1" transform="translate(0 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_64" data-name="Rectangle 64" width="5.336" height="5.336" rx="1" transform="translate(6.324 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_65" data-name="Rectangle 65" width="5.336" height="5.336" rx="1" transform="translate(12.647 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_66" data-name="Rectangle 66" width="5.336" height="5.336" rx="1" transform="translate(18.971 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_56" data-name="Path 56" d="M1.093,0H31.515a1.093,1.093,0,0,1,1.093,1.093V4.244a1.093,1.093,0,0,1-1.093,1.093H1.093A1.093,1.093,0,0,1,0,4.244V1.093A1.093,1.093,0,0,1,1.093,0ZM34.687,0h3.942a1.093,1.093,0,0,1,1.093,1.093V4.244a1.093,1.093,0,0,1-1.093,1.093H34.687a1.093,1.093,0,0,1-1.093-1.093V1.093A1.093,1.093,0,0,1,34.687,0Z" transform="translate(25.294 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_67" data-name="Rectangle 67" width="5.336" height="5.336" rx="1" transform="translate(66.003 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_68" data-name="Rectangle 68" width="5.336" height="5.336" rx="1" transform="translate(72.327 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_69" data-name="Rectangle 69" width="5.336" height="5.336" rx="1" transform="translate(84.183 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_57" data-name="Path 57" d="M5.336,0V1.18A1.093,1.093,0,0,1,4.243,2.273H1.093A1.093,1.093,0,0,1,0,1.18V0Z" transform="translate(83.59 2.273) rotate(180)" fill="#4a4a4a"/>
|
||||
<path id="Path_58" data-name="Path 58" d="M5.336,0V1.18A1.093,1.093,0,0,1,4.243,2.273H1.093A1.093,1.093,0,0,1,0,1.18V0Z" transform="translate(78.255 3.063)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<rect id="Rectangle_70" data-name="Rectangle 70" width="88.927" height="2.371" rx="1.085" transform="translate(1.925 1.17)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_71" data-name="Rectangle 71" width="4.986" height="1.581" rx="0.723" transform="translate(4.1 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_72" data-name="Rectangle 72" width="4.986" height="1.581" rx="0.723" transform="translate(10.923 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_73" data-name="Rectangle 73" width="4.986" height="1.581" rx="0.723" transform="translate(16.173 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_74" data-name="Rectangle 74" width="4.986" height="1.581" rx="0.723" transform="translate(21.421 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_75" data-name="Rectangle 75" width="4.986" height="1.581" rx="0.723" transform="translate(26.671 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_76" data-name="Rectangle 76" width="4.986" height="1.581" rx="0.723" transform="translate(33.232 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_77" data-name="Rectangle 77" width="4.986" height="1.581" rx="0.723" transform="translate(38.48 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_78" data-name="Rectangle 78" width="4.986" height="1.581" rx="0.723" transform="translate(43.73 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_79" data-name="Rectangle 79" width="4.986" height="1.581" rx="0.723" transform="translate(48.978 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_80" data-name="Rectangle 80" width="4.986" height="1.581" rx="0.723" transform="translate(55.54 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_81" data-name="Rectangle 81" width="4.986" height="1.581" rx="0.723" transform="translate(60.788 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_82" data-name="Rectangle 82" width="4.986" height="1.581" rx="0.723" transform="translate(66.038 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_83" data-name="Rectangle 83" width="4.986" height="1.581" rx="0.723" transform="translate(72.599 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_84" data-name="Rectangle 84" width="4.986" height="1.581" rx="0.723" transform="translate(77.847 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_85" data-name="Rectangle 85" width="4.986" height="1.581" rx="0.723" transform="translate(83.097 1.566)" fill="#d8d8d8" opacity="0.136"/>
|
||||
</g>
|
||||
<path id="Path_59" data-name="Path 59" d="M146.71,159.855a5.439,5.439,0,0,0-.7.07c-.042-.164-.081-.329-.127-.493a5.457,5.457,0,1,0-5.4-9.372q-.181-.185-.366-.367a5.454,5.454,0,1,0-9.384-5.4c-.162-.046-.325-.084-.486-.126a5.467,5.467,0,1,0-10.788,0c-.162.042-.325.08-.486.126a5.457,5.457,0,1,0-9.384,5.4,21.843,21.843,0,1,0,36.421,21.02,5.452,5.452,0,1,0,.7-10.858" transform="translate(6.275 -6.025)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_60" data-name="Path 60" d="M83,124.855h43.71V103H83Z" transform="translate(4.42 -9.271)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_61" data-name="Path 61" d="M134.855,116.765a2.732,2.732,0,1,0,0-5.464,2.811,2.811,0,0,0-.349.035c-.022-.082-.04-.164-.063-.246a2.733,2.733,0,0,0-1.052-5.253,2.7,2.7,0,0,0-1.648.566q-.09-.093-.184-.184a2.7,2.7,0,0,0,.553-1.633,2.732,2.732,0,0,0-5.245-1.07,10.928,10.928,0,1,0,0,21.031,2.732,2.732,0,0,0,5.245-1.07,2.7,2.7,0,0,0-.553-1.633q.093-.09.184-.184a2.7,2.7,0,0,0,1.648.566,2.732,2.732,0,0,0,1.052-5.253c.023-.081.042-.164.063-.246a2.811,2.811,0,0,0,.349.035" transform="translate(7.202 -9.377)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_62" data-name="Path 62" d="M143.232,42.33a2.967,2.967,0,0,1-.535-.055,2.754,2.754,0,0,1-.514-.153,2.838,2.838,0,0,1-.471-.251,4.139,4.139,0,0,1-.415-.339,3.2,3.2,0,0,1-.338-.415A2.7,2.7,0,0,1,140.5,39.6a2.968,2.968,0,0,1,.055-.535,3.152,3.152,0,0,1,.152-.514,2.874,2.874,0,0,1,.252-.47,2.633,2.633,0,0,1,.753-.754,2.837,2.837,0,0,1,.471-.251,2.753,2.753,0,0,1,.514-.153,2.527,2.527,0,0,1,1.071,0,2.654,2.654,0,0,1,.983.4,4.139,4.139,0,0,1,.415.339,4.019,4.019,0,0,1,.339.415,2.786,2.786,0,0,1,.251.47,2.864,2.864,0,0,1,.208,1.049,2.77,2.77,0,0,1-.8,1.934,4.139,4.139,0,0,1-.415.339,2.722,2.722,0,0,1-1.519.459m21.855-1.366a2.789,2.789,0,0,1-1.935-.8,4.162,4.162,0,0,1-.338-.415,2.7,2.7,0,0,1-.459-1.519,2.789,2.789,0,0,1,.8-1.934,4.139,4.139,0,0,1,.415-.339,2.838,2.838,0,0,1,.471-.251,2.752,2.752,0,0,1,.514-.153,2.527,2.527,0,0,1,1.071,0,2.654,2.654,0,0,1,.983.4,4.139,4.139,0,0,1,.415.339,2.79,2.79,0,0,1,.8,1.934,3.069,3.069,0,0,1-.055.535,2.779,2.779,0,0,1-.153.514,3.885,3.885,0,0,1-.251.47,4.02,4.02,0,0,1-.339.415,4.138,4.138,0,0,1-.415.339,2.722,2.722,0,0,1-1.519.459" transform="translate(9.753 -15.532)" fill-rule="evenodd"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 31 KiB |
170
docs/static/img/undraw_docusaurus_react.svg
vendored
170
docs/static/img/undraw_docusaurus_react.svg
vendored
@@ -1,170 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1041.277" height="554.141" viewBox="0 0 1041.277 554.141">
|
||||
<title>Powered by React</title>
|
||||
<g id="Group_24" data-name="Group 24" transform="translate(-440 -263)">
|
||||
<g id="Group_23" data-name="Group 23" transform="translate(439.989 262.965)">
|
||||
<path id="Path_299" data-name="Path 299" d="M1040.82,611.12q-1.74,3.75-3.47,7.4-2.7,5.67-5.33,11.12c-.78,1.61-1.56,3.19-2.32,4.77-8.6,17.57-16.63,33.11-23.45,45.89A73.21,73.21,0,0,1,942.44,719l-151.65,1.65h-1.6l-13,.14-11.12.12-34.1.37h-1.38l-17.36.19h-.53l-107,1.16-95.51,1-11.11.12-69,.75H429l-44.75.48h-.48l-141.5,1.53-42.33.46a87.991,87.991,0,0,1-10.79-.54h0c-1.22-.14-2.44-.3-3.65-.49a87.38,87.38,0,0,1-51.29-27.54C116,678.37,102.75,655,93.85,629.64q-1.93-5.49-3.6-11.12C59.44,514.37,97,380,164.6,290.08q4.25-5.64,8.64-11l.07-.08c20.79-25.52,44.1-46.84,68.93-62,44-26.91,92.75-34.49,140.7-11.9,40.57,19.12,78.45,28.11,115.17,30.55,3.71.24,7.42.42,11.11.53,84.23,2.65,163.17-27.7,255.87-47.29,3.69-.78,7.39-1.55,11.12-2.28,66.13-13.16,139.49-20.1,226.73-5.51a189.089,189.089,0,0,1,26.76,6.4q5.77,1.86,11.12,4c41.64,16.94,64.35,48.24,74,87.46q1.37,5.46,2.37,11.11C1134.3,384.41,1084.19,518.23,1040.82,611.12Z" transform="translate(-79.34 -172.91)" fill="#f2f2f2"/>
|
||||
<path id="Path_300" data-name="Path 300" d="M576.36,618.52a95.21,95.21,0,0,1-1.87,11.12h93.7V618.52Zm-78.25,62.81,11.11-.09V653.77c-3.81-.17-7.52-.34-11.11-.52ZM265.19,618.52v11.12h198.5V618.52ZM1114.87,279h-74V191.51q-5.35-2.17-11.12-4V279H776.21V186.58c-3.73.73-7.43,1.5-11.12,2.28V279H509.22V236.15c-3.69-.11-7.4-.29-11.11-.53V279H242.24V217c-24.83,15.16-48.14,36.48-68.93,62h-.07v.08q-4.4,5.4-8.64,11h8.64V618.52h-83q1.66,5.63,3.6,11.12h79.39v93.62a87,87,0,0,0,12.2,2.79c1.21.19,2.43.35,3.65.49h0a87.991,87.991,0,0,0,10.79.54l42.33-.46v-97H498.11v94.21l11.11-.12V629.64H765.09V721l11.12-.12V629.64H1029.7v4.77c.76-1.58,1.54-3.16,2.32-4.77q2.63-5.45,5.33-11.12,1.73-3.64,3.47-7.4v-321h76.42Q1116.23,284.43,1114.87,279ZM242.24,618.52V290.08H498.11V618.52Zm267,0V290.08H765.09V618.52Zm520.48,0H776.21V290.08H1029.7Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_301" data-name="Path 301" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l46.65-28,93.6-.78,2-.01.66-.01,2-.03,44.94-.37,2.01-.01.64-.01,2-.01L315,509.3l.38-.01,35.55-.3h.29l277.4-2.34,6.79-.05h.68l5.18-.05,37.65-.31,2-.03,1.85-.02h.96l11.71-.09,2.32-.03,3.11-.02,9.75-.09,15.47-.13,2-.02,3.48-.02h.65l74.71-.64Z" fill="#65617d"/>
|
||||
<path id="Path_302" data-name="Path 302" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l46.65-28,93.6-.78,2-.01.66-.01,2-.03,44.94-.37,2.01-.01.64-.01,2-.01L315,509.3l.38-.01,35.55-.3h.29l277.4-2.34,6.79-.05h.68l5.18-.05,37.65-.31,2-.03,1.85-.02h.96l11.71-.09,2.32-.03,3.11-.02,9.75-.09,15.47-.13,2-.02,3.48-.02h.65l74.71-.64Z" opacity="0.2"/>
|
||||
<path id="Path_303" data-name="Path 303" d="M375.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
|
||||
<path id="Path_304" data-name="Path 304" d="M375.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_305" data-name="Path 305" d="M377.44,656.57v24.49a6.13,6.13,0,0,1-3.5,5.54,6,6,0,0,1-2.5.6l-34.9.74a6,6,0,0,1-2.7-.57,6.12,6.12,0,0,1-3.57-5.57V656.57Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
|
||||
<rect id="Rectangle_137" data-name="Rectangle 137" width="47.17" height="31.5" transform="translate(680.92 483.65)" fill="#3f3d56"/>
|
||||
<rect id="Rectangle_138" data-name="Rectangle 138" width="47.17" height="31.5" transform="translate(680.92 483.65)" opacity="0.1"/>
|
||||
<rect id="Rectangle_139" data-name="Rectangle 139" width="47.17" height="31.5" transform="translate(678.92 483.65)" fill="#3f3d56"/>
|
||||
<path id="Path_306" data-name="Path 306" d="M298.09,483.65v4.97l-47.17,1.26v-6.23Z" opacity="0.1"/>
|
||||
<path id="Path_307" data-name="Path 307" d="M460.69,485.27v168.2a4,4,0,0,1-3.85,3.95l-191.65,5.1h-.05a4,4,0,0,1-3.95-3.95V485.27a4,4,0,0,1,3.95-3.95h191.6a4,4,0,0,1,3.95,3.95Z" transform="translate(-79.34 -172.91)" fill="#65617d"/>
|
||||
<path id="Path_308" data-name="Path 308" d="M265.19,481.32v181.2h-.05a4,4,0,0,1-3.95-3.95V485.27a4,4,0,0,1,3.95-3.95Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_309" data-name="Path 309" d="M194.59,319.15h177.5V467.4l-177.5,4Z" fill="#39374d"/>
|
||||
<path id="Path_310" data-name="Path 310" d="M726.09,483.65v6.41l-47.17-1.26v-5.15Z" opacity="0.1"/>
|
||||
<path id="Path_311" data-name="Path 311" d="M867.69,485.27v173.3a4,4,0,0,1-4,3.95h0L672,657.42a4,4,0,0,1-3.85-3.95V485.27a4,4,0,0,1,3.95-3.95H863.7a4,4,0,0,1,3.99,3.95Z" transform="translate(-79.34 -172.91)" fill="#65617d"/>
|
||||
<path id="Path_312" data-name="Path 312" d="M867.69,485.27v173.3a4,4,0,0,1-4,3.95h0V481.32h0a4,4,0,0,1,4,3.95Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_313" data-name="Path 313" d="M775.59,319.15H598.09V467.4l177.5,4Z" fill="#39374d"/>
|
||||
<path id="Path_314" data-name="Path 314" d="M663.19,485.27v168.2a4,4,0,0,1-3.85,3.95l-191.65,5.1h0a4,4,0,0,1-4-3.95V485.27a4,4,0,0,1,3.95-3.95h191.6A4,4,0,0,1,663.19,485.27Z" transform="translate(-79.34 -172.91)" fill="#65617d"/>
|
||||
<path id="Path_315" data-name="Path 315" d="M397.09,319.15h177.5V467.4l-177.5,4Z" fill="#4267b2"/>
|
||||
<path id="Path_316" data-name="Path 316" d="M863.09,533.65v13l-151.92,1.4-1.62.03-57.74.53-1.38.02-17.55.15h-.52l-106.98.99L349.77,551.4h-.15l-44.65.42-.48.01-198.4,1.82v-15l202.51-1.33h.48l40.99-.28h.19l283.08-1.87h.29l.17-.01h.47l4.79-.03h1.46l74.49-.5,4.4-.02.98-.01Z" opacity="0.1"/>
|
||||
<circle id="Ellipse_111" data-name="Ellipse 111" cx="51.33" cy="51.33" r="51.33" transform="translate(435.93 246.82)" fill="#fbbebe"/>
|
||||
<path id="Path_317" data-name="Path 317" d="M617.94,550.07s-99.5,12-90,0c3.44-4.34,4.39-17.2,4.2-31.85-.06-4.45-.22-9.06-.45-13.65-1.1-22-3.75-43.5-3.75-43.5s87-41,77-8.5c-4,13.13-2.69,31.57.35,48.88.89,5.05,1.92,10,3,14.7a344.66,344.66,0,0,0,9.65,33.92Z" transform="translate(-79.34 -172.91)" fill="#fbbebe"/>
|
||||
<path id="Path_318" data-name="Path 318" d="M585.47,546c11.51-2.13,23.7-6,34.53-1.54,2.85,1.17,5.47,2.88,8.39,3.86s6.12,1.22,9.16,1.91c10.68,2.42,19.34,10.55,24.9,20s8.44,20.14,11.26,30.72l6.9,25.83c6,22.45,12,45.09,13.39,68.3a2437.506,2437.506,0,0,1-250.84,1.43c5.44-10.34,11-21.31,10.54-33s-7.19-23.22-4.76-34.74c1.55-7.34,6.57-13.39,9.64-20.22,8.75-19.52,1.94-45.79,17.32-60.65,6.92-6.68,17-9.21,26.63-8.89,12.28.41,24.85,4.24,37,6.11C555.09,547.48,569.79,548.88,585.47,546Z" transform="translate(-79.34 -172.91)" fill="#ff6584"/>
|
||||
<path id="Path_319" data-name="Path 319" d="M716.37,657.17l-.1,1.43v.1l-.17,2.3-1.33,18.51-1.61,22.3-.46,6.28-1,13.44v.17l-107,1-175.59,1.9v.84h-.14v-1.12l.45-14.36.86-28.06.74-23.79.07-2.37a10.53,10.53,0,0,1,11.42-10.17c4.72.4,10.85.89,18.18,1.41l3,.22c42.33,2.94,120.56,6.74,199.5,2,1.66-.09,3.33-.19,5-.31,12.24-.77,24.47-1.76,36.58-3a10.53,10.53,0,0,1,11.6,11.23Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_320" data-name="Path 320" d="M429.08,725.44v-.84l175.62-1.91,107-1h.3v-.17l1-13.44.43-6,1.64-22.61,1.29-17.9v-.44a10.617,10.617,0,0,0-.11-2.47.3.3,0,0,0,0-.1,10.391,10.391,0,0,0-2-4.64,10.54,10.54,0,0,0-9.42-4c-12.11,1.24-24.34,2.23-36.58,3-1.67.12-3.34.22-5,.31-78.94,4.69-157.17.89-199.5-2l-3-.22c-7.33-.52-13.46-1-18.18-1.41a10.54,10.54,0,0,0-11.24,8.53,11,11,0,0,0-.18,1.64l-.68,22.16L429.54,710l-.44,14.36v1.12Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
|
||||
<path id="Path_321" data-name="Path 321" d="M716.67,664.18l-1.23,15.33-1.83,22.85-.46,5.72-1,12.81-.06.64v.17h0l-.15,1.48.11-1.48h-.29l-107,1-175.65,1.9v-.28l.49-14.36,1-28.06.64-18.65A6.36,6.36,0,0,1,434.3,658a6.25,6.25,0,0,1,3.78-.9c2.1.17,4.68.37,7.69.59,4.89.36,10.92.78,17.94,1.22,13,.82,29.31,1.7,48,2.42,52,2,122.2,2.67,188.88-3.17,3-.26,6.1-.55,9.13-.84a6.26,6.26,0,0,1,3.48.66,5.159,5.159,0,0,1,.86.54,6.14,6.14,0,0,1,2,2.46,3.564,3.564,0,0,1,.25.61A6.279,6.279,0,0,1,716.67,664.18Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_322" data-name="Path 322" d="M377.44,677.87v3.19a6.13,6.13,0,0,1-3.5,5.54l-40.1.77a6.12,6.12,0,0,1-3.57-5.57v-3Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_323" data-name="Path 323" d="M298.59,515.57l-52.25,1V507.9l52.25-1Z" fill="#3f3d56"/>
|
||||
<path id="Path_324" data-name="Path 324" d="M298.59,515.57l-52.25,1V507.9l52.25-1Z" opacity="0.1"/>
|
||||
<path id="Path_325" data-name="Path 325" d="M300.59,515.57l-52.25,1V507.9l52.25-1Z" fill="#3f3d56"/>
|
||||
<path id="Path_326" data-name="Path 326" d="M758.56,679.87v3.19a6.13,6.13,0,0,0,3.5,5.54l40.1.77a6.12,6.12,0,0,0,3.57-5.57v-3Z" transform="translate(-79.34 -172.91)" opacity="0.1"/>
|
||||
<path id="Path_327" data-name="Path 327" d="M678.72,517.57l52.25,1V509.9l-52.25-1Z" opacity="0.1"/>
|
||||
<path id="Path_328" data-name="Path 328" d="M676.72,517.57l52.25,1V509.9l-52.25-1Z" fill="#3f3d56"/>
|
||||
<path id="Path_329" data-name="Path 329" d="M534.13,486.79c.08,7-3.16,13.6-5.91,20.07a163.491,163.491,0,0,0-12.66,74.71c.73,11,2.58,22,.73,32.9s-8.43,21.77-19,24.9c17.53,10.45,41.26,9.35,57.76-2.66,8.79-6.4,15.34-15.33,21.75-24.11a97.86,97.86,0,0,1-13.31,44.75A103.43,103.43,0,0,0,637,616.53c4.31-5.81,8.06-12.19,9.72-19.23,3.09-13-1.22-26.51-4.51-39.5a266.055,266.055,0,0,1-6.17-33c-.43-3.56-.78-7.22.1-10.7,1-4.07,3.67-7.51,5.64-11.22,5.6-10.54,5.73-23.3,2.86-34.88s-8.49-22.26-14.06-32.81c-4.46-8.46-9.3-17.31-17.46-22.28-5.1-3.1-11-4.39-16.88-5.64l-25.37-5.43c-5.55-1.19-11.26-2.38-16.87-1.51-9.47,1.48-16.14,8.32-22,15.34-4.59,5.46-15.81,15.71-16.6,22.86-.72,6.59,5.1,17.63,6.09,24.58,1.3,9,2.22,6,7.3,11.52C532,478.05,534.07,482,534.13,486.79Z" transform="translate(-79.34 -172.91)" fill="#3f3d56"/>
|
||||
</g>
|
||||
<g id="docusaurus_keytar" transform="translate(670.271 615.768)">
|
||||
<path id="Path_40" data-name="Path 40" d="M99,52h43.635V69.662H99Z" transform="translate(-49.132 -33.936)" fill="#fff" fill-rule="evenodd"/>
|
||||
<path id="Path_41" data-name="Path 41" d="M13.389,158.195A10.377,10.377,0,0,1,4.4,153a10.377,10.377,0,0,0,8.988,15.584H23.779V158.195Z" transform="translate(-3 -82.47)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_42" data-name="Path 42" d="M66.967,38.083l36.373-2.273V30.615A10.389,10.389,0,0,0,92.95,20.226H46.2l-1.3-2.249a1.5,1.5,0,0,0-2.6,0L41,20.226l-1.3-2.249a1.5,1.5,0,0,0-2.6,0l-1.3,2.249-1.3-2.249a1.5,1.5,0,0,0-2.6,0l-1.3,2.249-.034,0-2.152-2.151a1.5,1.5,0,0,0-2.508.672L25.21,21.4l-2.7-.723a1.5,1.5,0,0,0-1.836,1.837l.722,2.7-2.65.71a1.5,1.5,0,0,0-.673,2.509l2.152,2.152c0,.011,0,.022,0,.033l-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6L20.226,41l-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3-2.249,1.3a1.5,1.5,0,0,0,0,2.6l2.249,1.3A10.389,10.389,0,0,0,30.615,103.34H92.95A10.389,10.389,0,0,0,103.34,92.95V51.393L66.967,49.12a5.53,5.53,0,0,1,0-11.038" transform="translate(-9.836 -17.226)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_43" data-name="Path 43" d="M143,163.779h15.584V143H143Z" transform="translate(-70.275 -77.665)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_44" data-name="Path 44" d="M173.779,148.389a2.582,2.582,0,0,0-.332.033c-.02-.078-.038-.156-.06-.234a2.594,2.594,0,1,0-2.567-4.455q-.086-.088-.174-.175a2.593,2.593,0,1,0-4.461-2.569c-.077-.022-.154-.04-.231-.06a2.6,2.6,0,1,0-5.128,0c-.077.02-.154.038-.231.06a2.594,2.594,0,1,0-4.461,2.569,10.384,10.384,0,1,0,17.314,9.992,2.592,2.592,0,1,0,.332-5.161" transform="translate(-75.08 -75.262)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_45" data-name="Path 45" d="M153,113.389h15.584V103H153Z" transform="translate(-75.08 -58.444)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_46" data-name="Path 46" d="M183.389,108.944a1.3,1.3,0,1,0,0-2.6,1.336,1.336,0,0,0-.166.017c-.01-.039-.019-.078-.03-.117a1.3,1.3,0,0,0-.5-2.5,1.285,1.285,0,0,0-.783.269q-.043-.044-.087-.087a1.285,1.285,0,0,0,.263-.776,1.3,1.3,0,0,0-2.493-.509,5.195,5.195,0,1,0,0,10,1.3,1.3,0,0,0,2.493-.509,1.285,1.285,0,0,0-.263-.776q.044-.043.087-.087a1.285,1.285,0,0,0,.783.269,1.3,1.3,0,0,0,.5-2.5c.011-.038.02-.078.03-.117a1.337,1.337,0,0,0,.166.017" transform="translate(-84.691 -57.894)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_47" data-name="Path 47" d="M52.188,48.292a1.3,1.3,0,0,1-1.3-1.3,3.9,3.9,0,0,0-7.792,0,1.3,1.3,0,1,1-2.6,0,6.493,6.493,0,0,1,12.987,0,1.3,1.3,0,0,1-1.3,1.3" transform="translate(-21.02 -28.41)" fill-rule="evenodd"/>
|
||||
<path id="Path_48" data-name="Path 48" d="M103,139.752h31.168a10.389,10.389,0,0,0,10.389-10.389V93H113.389A10.389,10.389,0,0,0,103,103.389Z" transform="translate(-51.054 -53.638)" fill="#ffff50" fill-rule="evenodd"/>
|
||||
<path id="Path_49" data-name="Path 49" d="M141.1,94.017H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0-25.877H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.293H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m0,10.389H115.106a.519.519,0,1,1,0-1.039H141.1a.519.519,0,0,1,0,1.039m7.782-47.993c-.006,0-.011,0-.018,0-1.605.055-2.365,1.66-3.035,3.077-.7,1.48-1.24,2.443-2.126,2.414-.981-.035-1.542-1.144-2.137-2.317-.683-1.347-1.462-2.876-3.1-2.819-1.582.054-2.344,1.451-3.017,2.684-.715,1.313-1.2,2.112-2.141,2.075-1-.036-1.533-.938-2.149-1.981-.686-1.162-1.479-2.467-3.084-2.423-1.555.053-2.319,1.239-2.994,2.286-.713,1.106-1.213,1.781-2.164,1.741-1.025-.036-1.554-.784-2.167-1.65-.688-.973-1.463-2.074-3.062-2.021a3.815,3.815,0,0,0-2.959,1.879c-.64.812-1.14,1.456-2.2,1.415a.52.52,0,0,0-.037,1.039,3.588,3.588,0,0,0,3.05-1.811c.611-.777,1.139-1.448,2.178-1.483,1-.043,1.47.579,2.179,1.582.674.953,1.438,2.033,2.977,2.089,1.612.054,2.387-1.151,3.074-2.217.614-.953,1.144-1.775,2.156-1.81.931-.035,1.438.7,2.153,1.912.674,1.141,1.437,2.434,3.006,2.491,1.623.056,2.407-1.361,3.09-2.616.592-1.085,1.15-2.109,2.14-2.143.931-.022,1.417.829,2.135,2.249.671,1.326,1.432,2.828,3.026,2.886l.088,0c1.592,0,2.347-1.6,3.015-3.01.592-1.252,1.152-2.431,2.113-2.479Z" transform="translate(-55.378 -38.552)" fill-rule="evenodd"/>
|
||||
<path id="Path_50" data-name="Path 50" d="M83,163.779h20.779V143H83Z" transform="translate(-41.443 -77.665)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<g id="Group_8" data-name="Group 8" transform="matrix(0.966, -0.259, 0.259, 0.966, 51.971, 43.3)">
|
||||
<rect id="Rectangle_3" data-name="Rectangle 3" width="43.906" height="17.333" rx="2" transform="translate(0 0)" fill="#d8d8d8"/>
|
||||
<g id="Group_2" data-name="Group 2" transform="translate(0.728 10.948)">
|
||||
<rect id="Rectangle_4" data-name="Rectangle 4" width="2.537" height="2.537" rx="1" transform="translate(7.985 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_5" data-name="Rectangle 5" width="2.537" height="2.537" rx="1" transform="translate(10.991 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_6" data-name="Rectangle 6" width="2.537" height="2.537" rx="1" transform="translate(13.997 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_7" data-name="Rectangle 7" width="2.537" height="2.537" rx="1" transform="translate(17.003 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_8" data-name="Rectangle 8" width="2.537" height="2.537" rx="1" transform="translate(20.009 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_9" data-name="Rectangle 9" width="2.537" height="2.537" rx="1" transform="translate(23.015 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_10" data-name="Rectangle 10" width="2.537" height="2.537" rx="1" transform="translate(26.021 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_11" data-name="Rectangle 11" width="2.537" height="2.537" rx="1" transform="translate(29.028 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_12" data-name="Rectangle 12" width="2.537" height="2.537" rx="1" transform="translate(32.034 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_51" data-name="Path 51" d="M.519,0H6.9A.519.519,0,0,1,7.421.52v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0ZM35.653,0h6.383a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H35.652a.519.519,0,0,1-.519-.519V.519A.519.519,0,0,1,35.652,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="Group_3" data-name="Group 3" transform="translate(0.728 4.878)">
|
||||
<path id="Path_52" data-name="Path 52" d="M.519,0H2.956a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_13" data-name="Rectangle 13" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_14" data-name="Rectangle 14" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_15" data-name="Rectangle 15" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_16" data-name="Rectangle 16" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_17" data-name="Rectangle 17" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_18" data-name="Rectangle 18" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_19" data-name="Rectangle 19" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_20" data-name="Rectangle 20" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_21" data-name="Rectangle 21" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_22" data-name="Rectangle 22" width="2.537" height="2.537" rx="1" transform="translate(31 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_23" data-name="Rectangle 23" width="2.537" height="2.537" rx="1" transform="translate(34.006 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_24" data-name="Rectangle 24" width="2.537" height="2.537" rx="1" transform="translate(37.012 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_25" data-name="Rectangle 25" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<g id="Group_4" data-name="Group 4" transform="translate(43.283 4.538) rotate(180)">
|
||||
<path id="Path_53" data-name="Path 53" d="M.519,0H2.956a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.519A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_26" data-name="Rectangle 26" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_27" data-name="Rectangle 27" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_28" data-name="Rectangle 28" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_29" data-name="Rectangle 29" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_30" data-name="Rectangle 30" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_31" data-name="Rectangle 31" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_32" data-name="Rectangle 32" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_33" data-name="Rectangle 33" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_34" data-name="Rectangle 34" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_35" data-name="Rectangle 35" width="2.537" height="2.537" rx="1" transform="translate(31.001 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_36" data-name="Rectangle 36" width="2.537" height="2.537" rx="1" transform="translate(34.007 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_37" data-name="Rectangle 37" width="2.537" height="2.537" rx="1" transform="translate(37.013 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_38" data-name="Rectangle 38" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_39" data-name="Rectangle 39" width="2.537" height="2.537" rx="1" transform="translate(3.945 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_40" data-name="Rectangle 40" width="2.537" height="2.537" rx="1" transform="translate(6.951 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_41" data-name="Rectangle 41" width="2.537" height="2.537" rx="1" transform="translate(9.958 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_42" data-name="Rectangle 42" width="2.537" height="2.537" rx="1" transform="translate(12.964 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_43" data-name="Rectangle 43" width="2.537" height="2.537" rx="1" transform="translate(15.97 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_44" data-name="Rectangle 44" width="2.537" height="2.537" rx="1" transform="translate(18.976 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_45" data-name="Rectangle 45" width="2.537" height="2.537" rx="1" transform="translate(21.982 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_46" data-name="Rectangle 46" width="2.537" height="2.537" rx="1" transform="translate(24.988 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_47" data-name="Rectangle 47" width="2.537" height="2.537" rx="1" transform="translate(27.994 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_48" data-name="Rectangle 48" width="2.537" height="2.537" rx="1" transform="translate(31.001 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_49" data-name="Rectangle 49" width="2.537" height="2.537" rx="1" transform="translate(34.007 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_50" data-name="Rectangle 50" width="2.537" height="2.537" rx="1" transform="translate(37.013 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_51" data-name="Rectangle 51" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<g id="Group_6" data-name="Group 6" transform="translate(0.728 7.883)">
|
||||
<path id="Path_54" data-name="Path 54" d="M.519,0h3.47a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.52A.519.519,0,0,1,.519,0Z" transform="translate(0 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<g id="Group_5" data-name="Group 5" transform="translate(5.073 0)">
|
||||
<rect id="Rectangle_52" data-name="Rectangle 52" width="2.537" height="2.537" rx="1" transform="translate(0 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_53" data-name="Rectangle 53" width="2.537" height="2.537" rx="1" transform="translate(3.006 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_54" data-name="Rectangle 54" width="2.537" height="2.537" rx="1" transform="translate(6.012 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_55" data-name="Rectangle 55" width="2.537" height="2.537" rx="1" transform="translate(9.018 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_56" data-name="Rectangle 56" width="2.537" height="2.537" rx="1" transform="translate(12.025 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_57" data-name="Rectangle 57" width="2.537" height="2.537" rx="1" transform="translate(15.031 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_58" data-name="Rectangle 58" width="2.537" height="2.537" rx="1" transform="translate(18.037 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_59" data-name="Rectangle 59" width="2.537" height="2.537" rx="1" transform="translate(21.042 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_60" data-name="Rectangle 60" width="2.537" height="2.537" rx="1" transform="translate(24.049 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_61" data-name="Rectangle 61" width="2.537" height="2.537" rx="1" transform="translate(27.055 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_62" data-name="Rectangle 62" width="2.537" height="2.537" rx="1" transform="translate(30.061 0)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<path id="Path_55" data-name="Path 55" d="M.52,0H3.8a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.017V.52A.519.519,0,0,1,.519,0Z" transform="translate(38.234 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="Group_7" data-name="Group 7" transform="translate(0.728 14.084)">
|
||||
<rect id="Rectangle_63" data-name="Rectangle 63" width="2.537" height="2.537" rx="1" transform="translate(0 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_64" data-name="Rectangle 64" width="2.537" height="2.537" rx="1" transform="translate(3.006 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_65" data-name="Rectangle 65" width="2.537" height="2.537" rx="1" transform="translate(6.012 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_66" data-name="Rectangle 66" width="2.537" height="2.537" rx="1" transform="translate(9.018 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_56" data-name="Path 56" d="M.519,0H14.981A.519.519,0,0,1,15.5.519v1.5a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,2.018V.519A.519.519,0,0,1,.519,0Zm15.97,0h1.874a.519.519,0,0,1,.519.519v1.5a.519.519,0,0,1-.519.519H16.489a.519.519,0,0,1-.519-.519V.519A.519.519,0,0,1,16.489,0Z" transform="translate(12.024 0)" fill="#4a4a4a" fill-rule="evenodd"/>
|
||||
<rect id="Rectangle_67" data-name="Rectangle 67" width="2.537" height="2.537" rx="1" transform="translate(31.376 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_68" data-name="Rectangle 68" width="2.537" height="2.537" rx="1" transform="translate(34.382 0)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_69" data-name="Rectangle 69" width="2.537" height="2.537" rx="1" transform="translate(40.018 0)" fill="#4a4a4a"/>
|
||||
<path id="Path_57" data-name="Path 57" d="M2.537,0V.561a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,.561V0Z" transform="translate(39.736 1.08) rotate(180)" fill="#4a4a4a"/>
|
||||
<path id="Path_58" data-name="Path 58" d="M2.537,0V.561a.519.519,0,0,1-.519.519H.519A.519.519,0,0,1,0,.561V0Z" transform="translate(37.2 1.456)" fill="#4a4a4a"/>
|
||||
</g>
|
||||
<rect id="Rectangle_70" data-name="Rectangle 70" width="42.273" height="1.127" rx="0.564" transform="translate(0.915 0.556)" fill="#4a4a4a"/>
|
||||
<rect id="Rectangle_71" data-name="Rectangle 71" width="2.37" height="0.752" rx="0.376" transform="translate(1.949 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_72" data-name="Rectangle 72" width="2.37" height="0.752" rx="0.376" transform="translate(5.193 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_73" data-name="Rectangle 73" width="2.37" height="0.752" rx="0.376" transform="translate(7.688 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_74" data-name="Rectangle 74" width="2.37" height="0.752" rx="0.376" transform="translate(10.183 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_75" data-name="Rectangle 75" width="2.37" height="0.752" rx="0.376" transform="translate(12.679 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_76" data-name="Rectangle 76" width="2.37" height="0.752" rx="0.376" transform="translate(15.797 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_77" data-name="Rectangle 77" width="2.37" height="0.752" rx="0.376" transform="translate(18.292 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_78" data-name="Rectangle 78" width="2.37" height="0.752" rx="0.376" transform="translate(20.788 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_79" data-name="Rectangle 79" width="2.37" height="0.752" rx="0.376" transform="translate(23.283 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_80" data-name="Rectangle 80" width="2.37" height="0.752" rx="0.376" transform="translate(26.402 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_81" data-name="Rectangle 81" width="2.37" height="0.752" rx="0.376" transform="translate(28.897 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_82" data-name="Rectangle 82" width="2.37" height="0.752" rx="0.376" transform="translate(31.393 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_83" data-name="Rectangle 83" width="2.37" height="0.752" rx="0.376" transform="translate(34.512 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_84" data-name="Rectangle 84" width="2.37" height="0.752" rx="0.376" transform="translate(37.007 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
<rect id="Rectangle_85" data-name="Rectangle 85" width="2.37" height="0.752" rx="0.376" transform="translate(39.502 0.744)" fill="#d8d8d8" opacity="0.136"/>
|
||||
</g>
|
||||
<path id="Path_59" data-name="Path 59" d="M123.779,148.389a2.583,2.583,0,0,0-.332.033c-.02-.078-.038-.156-.06-.234a2.594,2.594,0,1,0-2.567-4.455q-.086-.088-.174-.175a2.593,2.593,0,1,0-4.461-2.569c-.077-.022-.154-.04-.231-.06a2.6,2.6,0,1,0-5.128,0c-.077.02-.154.038-.231.06a2.594,2.594,0,1,0-4.461,2.569,10.384,10.384,0,1,0,17.314,9.992,2.592,2.592,0,1,0,.332-5.161" transform="translate(-51.054 -75.262)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_60" data-name="Path 60" d="M83,113.389h20.779V103H83Z" transform="translate(-41.443 -58.444)" fill="#3ecc5f" fill-rule="evenodd"/>
|
||||
<path id="Path_61" data-name="Path 61" d="M123.389,108.944a1.3,1.3,0,1,0,0-2.6,1.338,1.338,0,0,0-.166.017c-.01-.039-.019-.078-.03-.117a1.3,1.3,0,0,0-.5-2.5,1.285,1.285,0,0,0-.783.269q-.043-.044-.087-.087a1.285,1.285,0,0,0,.263-.776,1.3,1.3,0,0,0-2.493-.509,5.195,5.195,0,1,0,0,10,1.3,1.3,0,0,0,2.493-.509,1.285,1.285,0,0,0-.263-.776q.044-.043.087-.087a1.285,1.285,0,0,0,.783.269,1.3,1.3,0,0,0,.5-2.5c.011-.038.02-.078.03-.117a1.335,1.335,0,0,0,.166.017" transform="translate(-55.859 -57.894)" fill="#44d860" fill-rule="evenodd"/>
|
||||
<path id="Path_62" data-name="Path 62" d="M141.8,38.745a1.41,1.41,0,0,1-.255-.026,1.309,1.309,0,0,1-.244-.073,1.349,1.349,0,0,1-.224-.119,1.967,1.967,0,0,1-.2-.161,1.52,1.52,0,0,1-.161-.2,1.282,1.282,0,0,1-.218-.722,1.41,1.41,0,0,1,.026-.255,1.5,1.5,0,0,1,.072-.244,1.364,1.364,0,0,1,.12-.223,1.252,1.252,0,0,1,.358-.358,1.349,1.349,0,0,1,.224-.119,1.309,1.309,0,0,1,.244-.073,1.2,1.2,0,0,1,.509,0,1.262,1.262,0,0,1,.468.192,1.968,1.968,0,0,1,.2.161,1.908,1.908,0,0,1,.161.2,1.322,1.322,0,0,1,.12.223,1.361,1.361,0,0,1,.1.5,1.317,1.317,0,0,1-.379.919,1.968,1.968,0,0,1-.2.161,1.346,1.346,0,0,1-.223.119,1.332,1.332,0,0,1-.5.1m10.389-.649a1.326,1.326,0,0,1-.92-.379,1.979,1.979,0,0,1-.161-.2,1.282,1.282,0,0,1-.218-.722,1.326,1.326,0,0,1,.379-.919,1.967,1.967,0,0,1,.2-.161,1.351,1.351,0,0,1,.224-.119,1.308,1.308,0,0,1,.244-.073,1.2,1.2,0,0,1,.509,0,1.262,1.262,0,0,1,.468.192,1.967,1.967,0,0,1,.2.161,1.326,1.326,0,0,1,.379.919,1.461,1.461,0,0,1-.026.255,1.323,1.323,0,0,1-.073.244,1.847,1.847,0,0,1-.119.223,1.911,1.911,0,0,1-.161.2,1.967,1.967,0,0,1-.2.161,1.294,1.294,0,0,1-.722.218" transform="translate(-69.074 -26.006)" fill-rule="evenodd"/>
|
||||
</g>
|
||||
<g id="React-icon" transform="translate(906.3 541.56)">
|
||||
<path id="Path_330" data-name="Path 330" d="M263.668,117.179c0-5.827-7.3-11.35-18.487-14.775,2.582-11.4,1.434-20.477-3.622-23.382a7.861,7.861,0,0,0-4.016-1v4a4.152,4.152,0,0,1,2.044.466c2.439,1.4,3.5,6.724,2.672,13.574-.2,1.685-.52,3.461-.914,5.272a86.9,86.9,0,0,0-11.386-1.954,87.469,87.469,0,0,0-7.459-8.965c5.845-5.433,11.332-8.41,15.062-8.41V78h0c-4.931,0-11.386,3.514-17.913,9.611-6.527-6.061-12.982-9.539-17.913-9.539v4c3.712,0,9.216,2.959,15.062,8.356a84.687,84.687,0,0,0-7.405,8.947,83.732,83.732,0,0,0-11.4,1.972c-.412-1.793-.717-3.532-.932-5.2-.843-6.85.2-12.175,2.618-13.592a3.991,3.991,0,0,1,2.062-.466v-4h0a8,8,0,0,0-4.052,1c-5.039,2.9-6.168,11.96-3.568,23.328-11.153,3.443-18.415,8.947-18.415,14.757,0,5.828,7.3,11.35,18.487,14.775-2.582,11.4-1.434,20.477,3.622,23.382a7.882,7.882,0,0,0,4.034,1c4.931,0,11.386-3.514,17.913-9.611,6.527,6.061,12.982,9.539,17.913,9.539a8,8,0,0,0,4.052-1c5.039-2.9,6.168-11.96,3.568-23.328C256.406,128.511,263.668,122.988,263.668,117.179Zm-23.346-11.96c-.663,2.313-1.488,4.7-2.421,7.083-.735-1.434-1.506-2.869-2.349-4.3-.825-1.434-1.7-2.833-2.582-4.2C235.517,104.179,237.974,104.645,240.323,105.219Zm-8.212,19.1c-1.4,2.421-2.833,4.716-4.321,6.85-2.672.233-5.379.359-8.1.359-2.708,0-5.415-.126-8.069-.341q-2.232-3.2-4.339-6.814-2.044-3.523-3.73-7.136c1.112-2.4,2.367-4.805,3.712-7.154,1.4-2.421,2.833-4.716,4.321-6.85,2.672-.233,5.379-.359,8.1-.359,2.708,0,5.415.126,8.069.341q2.232,3.2,4.339,6.814,2.044,3.523,3.73,7.136C234.692,119.564,233.455,121.966,232.11,124.315Zm5.792-2.331c.968,2.4,1.793,4.805,2.474,7.136-2.349.574-4.823,1.058-7.387,1.434.879-1.381,1.757-2.8,2.582-4.25C236.4,124.871,237.167,123.419,237.9,121.984ZM219.72,141.116a73.921,73.921,0,0,1-4.985-5.738c1.614.072,3.263.126,4.931.126,1.685,0,3.353-.036,4.985-.126A69.993,69.993,0,0,1,219.72,141.116ZM206.38,130.555c-2.546-.377-5-.843-7.352-1.417.663-2.313,1.488-4.7,2.421-7.083.735,1.434,1.506,2.869,2.349,4.3S205.5,129.192,206.38,130.555ZM219.63,93.241a73.924,73.924,0,0,1,4.985,5.738c-1.614-.072-3.263-.126-4.931-.126-1.686,0-3.353.036-4.985.126A69.993,69.993,0,0,1,219.63,93.241ZM206.362,103.8c-.879,1.381-1.757,2.8-2.582,4.25-.825,1.434-1.6,2.869-2.331,4.3-.968-2.4-1.793-4.805-2.474-7.136C201.323,104.663,203.8,104.179,206.362,103.8Zm-16.227,22.449c-6.348-2.708-10.454-6.258-10.454-9.073s4.106-6.383,10.454-9.073c1.542-.663,3.228-1.255,4.967-1.811a86.122,86.122,0,0,0,4.034,10.92,84.9,84.9,0,0,0-3.981,10.866C193.38,127.525,191.694,126.915,190.134,126.252Zm9.647,25.623c-2.439-1.4-3.5-6.724-2.672-13.574.2-1.686.52-3.461.914-5.272a86.9,86.9,0,0,0,11.386,1.954,87.465,87.465,0,0,0,7.459,8.965c-5.845,5.433-11.332,8.41-15.062,8.41A4.279,4.279,0,0,1,199.781,151.875Zm42.532-13.663c.843,6.85-.2,12.175-2.618,13.592a3.99,3.99,0,0,1-2.062.466c-3.712,0-9.216-2.959-15.062-8.356a84.689,84.689,0,0,0,7.405-8.947,83.731,83.731,0,0,0,11.4-1.972A50.194,50.194,0,0,1,242.313,138.212Zm6.9-11.96c-1.542.663-3.228,1.255-4.967,1.811a86.12,86.12,0,0,0-4.034-10.92,84.9,84.9,0,0,0,3.981-10.866c1.775.556,3.461,1.165,5.039,1.829,6.348,2.708,10.454,6.258,10.454,9.073C259.67,119.994,255.564,123.562,249.216,126.252Z" fill="#61dafb"/>
|
||||
<path id="Path_331" data-name="Path 331" d="M320.8,78.4Z" transform="translate(-119.082 -0.328)" fill="#61dafb"/>
|
||||
<circle id="Ellipse_112" data-name="Ellipse 112" cx="8.194" cy="8.194" r="8.194" transform="translate(211.472 108.984)" fill="#61dafb"/>
|
||||
<path id="Path_332" data-name="Path 332" d="M520.5,78.1Z" transform="translate(-282.975 -0.082)" fill="#61dafb"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 35 KiB |
40
docs/static/img/undraw_docusaurus_tree.svg
vendored
40
docs/static/img/undraw_docusaurus_tree.svg
vendored
@@ -1,40 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1129" height="663" viewBox="0 0 1129 663">
|
||||
<title>Focus on What Matters</title>
|
||||
<circle cx="321" cy="321" r="321" fill="#f2f2f2" />
|
||||
<ellipse cx="559" cy="635.49998" rx="514" ry="27.50002" fill="#3f3d56" />
|
||||
<ellipse cx="558" cy="627" rx="460" ry="22" opacity="0.2" />
|
||||
<rect x="131" y="152.5" width="840" height="50" fill="#3f3d56" />
|
||||
<path d="M166.5,727.3299A21.67009,21.67009,0,0,0,188.1701,749H984.8299A21.67009,21.67009,0,0,0,1006.5,727.3299V296h-840Z" transform="translate(-35.5 -118.5)" fill="#3f3d56" />
|
||||
<path d="M984.8299,236H188.1701A21.67009,21.67009,0,0,0,166.5,257.6701V296h840V257.6701A21.67009,21.67009,0,0,0,984.8299,236Z" transform="translate(-35.5 -118.5)" fill="#3f3d56" />
|
||||
<path d="M984.8299,236H188.1701A21.67009,21.67009,0,0,0,166.5,257.6701V296h840V257.6701A21.67009,21.67009,0,0,0,984.8299,236Z" transform="translate(-35.5 -118.5)" opacity="0.2" />
|
||||
<circle cx="181" cy="147.5" r="13" fill="#3f3d56" />
|
||||
<circle cx="217" cy="147.5" r="13" fill="#3f3d56" />
|
||||
<circle cx="253" cy="147.5" r="13" fill="#3f3d56" />
|
||||
<rect x="168" y="213.5" width="337" height="386" rx="5.33505" fill="#606060" />
|
||||
<rect x="603" y="272.5" width="284" height="22" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="537" y="352.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="537" y="396.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="537" y="440.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="537" y="484.5" width="416" height="15" rx="5.47638" fill="#2e8555" />
|
||||
<rect x="865" y="552.5" width="88" height="26" rx="7.02756" fill="#3ecc5f" />
|
||||
<path d="M1088.60287,624.61594a30.11371,30.11371,0,0,0,3.98291-15.266c0-13.79652-8.54358-24.98081-19.08256-24.98081s-19.08256,11.18429-19.08256,24.98081a30.11411,30.11411,0,0,0,3.98291,15.266,31.248,31.248,0,0,0,0,30.53213,31.248,31.248,0,0,0,0,30.53208,31.248,31.248,0,0,0,0,30.53208,30.11408,30.11408,0,0,0-3.98291,15.266c0,13.79652,8.54353,24.98081,19.08256,24.98081s19.08256-11.18429,19.08256-24.98081a30.11368,30.11368,0,0,0-3.98291-15.266,31.248,31.248,0,0,0,0-30.53208,31.248,31.248,0,0,0,0-30.53208,31.248,31.248,0,0,0,0-30.53213Z" transform="translate(-35.5 -118.5)" fill="#3f3d56" />
|
||||
<ellipse cx="1038.00321" cy="460.31783" rx="19.08256" ry="24.9808" fill="#3f3d56" />
|
||||
<ellipse cx="1038.00321" cy="429.78574" rx="19.08256" ry="24.9808" fill="#3f3d56" />
|
||||
<path d="M1144.93871,339.34489a91.61081,91.61081,0,0,0,7.10658-10.46092l-50.141-8.23491,54.22885.4033a91.566,91.566,0,0,0,1.74556-72.42605l-72.75449,37.74139,67.09658-49.32086a91.41255,91.41255,0,1,0-150.971,102.29805,91.45842,91.45842,0,0,0-10.42451,16.66946l65.0866,33.81447-69.40046-23.292a91.46011,91.46011,0,0,0,14.73837,85.83669,91.40575,91.40575,0,1,0,143.68892,0,91.41808,91.41808,0,0,0,0-113.02862Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M981.6885,395.8592a91.01343,91.01343,0,0,0,19.56129,56.51431,91.40575,91.40575,0,1,0,143.68892,0C1157.18982,436.82067,981.6885,385.60008,981.6885,395.8592Z" transform="translate(-35.5 -118.5)" opacity="0.1" />
|
||||
<path d="M365.62,461.43628H477.094v45.12043H365.62Z" transform="translate(-35.5 -118.5)" fill="#fff" fill-rule="evenodd" />
|
||||
<path d="M264.76252,608.74122a26.50931,26.50931,0,0,1-22.96231-13.27072,26.50976,26.50976,0,0,0,22.96231,39.81215H291.304V608.74122Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M384.17242,468.57061l92.92155-5.80726V449.49263a26.54091,26.54091,0,0,0-26.54143-26.54143H331.1161l-3.31768-5.74622a3.83043,3.83043,0,0,0-6.63536,0l-3.31768,5.74622-3.31767-5.74622a3.83043,3.83043,0,0,0-6.63536,0l-3.31768,5.74622L301.257,417.205a3.83043,3.83043,0,0,0-6.63536,0L291.304,422.9512c-.02919,0-.05573.004-.08625.004l-5.49674-5.49541a3.8293,3.8293,0,0,0-6.4071,1.71723l-1.81676,6.77338L270.607,424.1031a3.82993,3.82993,0,0,0-4.6912,4.69253l1.84463,6.89148-6.77072,1.81411a3.8315,3.8315,0,0,0-1.71988,6.40975l5.49673,5.49673c0,.02787-.004.05574-.004.08493l-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74621,3.31768L259.0163,466.081a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31767a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31767a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83042,3.83042,0,0,0,0,6.63535l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768L259.0163,558.976a3.83042,3.83042,0,0,0,0,6.63535l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768-5.74622,3.31768a3.83042,3.83042,0,0,0,0,6.63535l5.74622,3.31768-5.74622,3.31768a3.83043,3.83043,0,0,0,0,6.63536l5.74622,3.31768A26.54091,26.54091,0,0,0,291.304,635.28265H450.55254A26.5409,26.5409,0,0,0,477.094,608.74122V502.5755l-92.92155-5.80727a14.12639,14.12639,0,0,1,0-28.19762" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M424.01111,635.28265h39.81214V582.19979H424.01111Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M490.36468,602.10586a6.60242,6.60242,0,0,0-.848.08493c-.05042-.19906-.09821-.39945-.15393-.59852A6.62668,6.62668,0,1,0,482.80568,590.21q-.2203-.22491-.44457-.44589a6.62391,6.62391,0,1,0-11.39689-6.56369c-.1964-.05575-.39414-.10218-.59056-.15262a6.63957,6.63957,0,1,0-13.10086,0c-.1964.05042-.39414.09687-.59056.15262a6.62767,6.62767,0,1,0-11.39688,6.56369,26.52754,26.52754,0,1,0,44.23127,25.52756,6.6211,6.6211,0,1,0,.848-13.18579" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
|
||||
<path d="M437.28182,555.65836H477.094V529.11693H437.28182Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M490.36468,545.70532a3.31768,3.31768,0,0,0,0-6.63536,3.41133,3.41133,0,0,0-.42333.04247c-.02655-.09953-.04911-.19907-.077-.29859a3.319,3.319,0,0,0-1.278-6.37923,3.28174,3.28174,0,0,0-2.00122.68742q-.10947-.11346-.22294-.22295a3.282,3.282,0,0,0,.67149-1.98265,3.31768,3.31768,0,0,0-6.37-1.2992,13.27078,13.27078,0,1,0,0,25.54082,3.31768,3.31768,0,0,0,6.37-1.2992,3.282,3.282,0,0,0-.67149-1.98265q.11347-.10947.22294-.22294a3.28174,3.28174,0,0,0,2.00122.68742,3.31768,3.31768,0,0,0,1.278-6.37923c.02786-.0982.05042-.19907.077-.29859a3.41325,3.41325,0,0,0,.42333.04246" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
|
||||
<path d="M317.84538,466.081a3.31768,3.31768,0,0,1-3.31767-3.31768,9.953,9.953,0,1,0-19.90608,0,3.31768,3.31768,0,1,1-6.63535,0,16.58839,16.58839,0,1,1,33.17678,0,3.31768,3.31768,0,0,1-3.31768,3.31768" transform="translate(-35.5 -118.5)" fill-rule="evenodd" />
|
||||
<path d="M370.92825,635.28265h79.62429A26.5409,26.5409,0,0,0,477.094,608.74122v-92.895H397.46968a26.54091,26.54091,0,0,0-26.54143,26.54143Z" transform="translate(-35.5 -118.5)" fill="#ffff50" fill-rule="evenodd" />
|
||||
<path d="M457.21444,556.98543H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.54143H390.80778a1.32707,1.32707,0,1,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.54143H390.80778a1.32707,1.32707,0,1,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0-66.10674H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.29459H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414m0,26.54143H390.80778a1.32707,1.32707,0,0,1,0-2.65414h66.40666a1.32707,1.32707,0,0,1,0,2.65414M477.094,474.19076c-.01592,0-.0292-.008-.04512-.00663-4.10064.13934-6.04083,4.24132-7.75274,7.86024-1.78623,3.78215-3.16771,6.24122-5.43171,6.16691-2.50685-.09024-3.94007-2.92222-5.45825-5.91874-1.74377-3.44243-3.73438-7.34667-7.91333-7.20069-4.04227.138-5.98907,3.70784-7.70631,6.857-1.82738,3.35484-3.07084,5.39455-5.46887,5.30033-2.55727-.09289-3.91619-2.39536-5.48877-5.06013-1.75306-2.96733-3.77951-6.30359-7.8775-6.18946-3.97326.13669-5.92537,3.16507-7.64791,5.83912-1.82207,2.82666-3.09872,4.5492-5.52725,4.447-2.61832-.09289-3.9706-2.00388-5.53522-4.21611-1.757-2.4856-3.737-5.299-7.82308-5.16231-3.88567.13271-5.83779,2.61434-7.559,4.80135-1.635,2.07555-2.9116,3.71846-5.61218,3.615a1.32793,1.32793,0,1,0-.09555,2.65414c4.00377.134,6.03154-2.38873,7.79257-4.6275,1.562-1.9853,2.91027-3.69855,5.56441-3.78879,2.55594-.10882,3.75429,1.47968,5.56707,4.04093,1.7212,2.43385,3.67465,5.19416,7.60545,5.33616,4.11789.138,6.09921-2.93946,7.8536-5.66261,1.56861-2.43385,2.92221-4.53461,5.50734-4.62352,2.37944-.08892,3.67466,1.79154,5.50072,4.885,1.72121,2.91557,3.67069,6.21865,7.67977,6.36463,4.14709.14332,6.14965-3.47693,7.89475-6.68181,1.51155-2.77092,2.93814-5.38791,5.46621-5.4755,2.37944-.05573,3.62025,2.11668,5.45558,5.74622,1.71459,3.388,3.65875,7.22591,7.73019,7.37321l.22429.004c4.06614,0,5.99571-4.08074,7.70364-7.68905,1.51154-3.19825,2.94211-6.21069,5.3972-6.33411Z" transform="translate(-35.5 -118.5)" fill-rule="evenodd" />
|
||||
<path d="M344.38682,635.28265h53.08286V582.19979H344.38682Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M424.01111,602.10586a6.60242,6.60242,0,0,0-.848.08493c-.05042-.19906-.09821-.39945-.15394-.59852A6.62667,6.62667,0,1,0,416.45211,590.21q-.2203-.22491-.44458-.44589a6.62391,6.62391,0,1,0-11.39689-6.56369c-.1964-.05575-.39413-.10218-.59054-.15262a6.63957,6.63957,0,1,0-13.10084,0c-.19641.05042-.39414.09687-.59055.15262a6.62767,6.62767,0,1,0-11.39689,6.56369,26.52755,26.52755,0,1,0,44.2313,25.52756,6.6211,6.6211,0,1,0,.848-13.18579" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
|
||||
<path d="M344.38682,555.65836h53.08286V529.11693H344.38682Z" transform="translate(-35.5 -118.5)" fill="#3ecc5f" fill-rule="evenodd" />
|
||||
<path d="M410.74039,545.70532a3.31768,3.31768,0,1,0,0-6.63536,3.41133,3.41133,0,0,0-.42333.04247c-.02655-.09953-.04911-.19907-.077-.29859a3.319,3.319,0,0,0-1.278-6.37923,3.28174,3.28174,0,0,0-2.00122.68742q-.10947-.11346-.22294-.22295a3.282,3.282,0,0,0,.67149-1.98265,3.31768,3.31768,0,0,0-6.37-1.2992,13.27078,13.27078,0,1,0,0,25.54082,3.31768,3.31768,0,0,0,6.37-1.2992,3.282,3.282,0,0,0-.67149-1.98265q.11347-.10947.22294-.22294a3.28174,3.28174,0,0,0,2.00122.68742,3.31768,3.31768,0,0,0,1.278-6.37923c.02786-.0982.05042-.19907.077-.29859a3.41325,3.41325,0,0,0,.42333.04246" transform="translate(-35.5 -118.5)" fill="#44d860" fill-rule="evenodd" />
|
||||
<path d="M424.01111,447.8338a3.60349,3.60349,0,0,1-.65028-.06636,3.34415,3.34415,0,0,1-.62372-.18579,3.44679,3.44679,0,0,1-.572-.30522,5.02708,5.02708,0,0,1-.50429-.4114,3.88726,3.88726,0,0,1-.41007-.50428,3.27532,3.27532,0,0,1-.55737-1.84463,3.60248,3.60248,0,0,1,.06636-.65027,3.82638,3.82638,0,0,1,.18447-.62373,3.48858,3.48858,0,0,1,.30656-.57064,3.197,3.197,0,0,1,.91436-.91568,3.44685,3.44685,0,0,1,.572-.30523,3.344,3.344,0,0,1,.62372-.18578,3.06907,3.06907,0,0,1,1.30053,0,3.22332,3.22332,0,0,1,1.19436.491,5.02835,5.02835,0,0,1,.50429.41139,4.8801,4.8801,0,0,1,.41139.50429,3.38246,3.38246,0,0,1,.30522.57064,3.47806,3.47806,0,0,1,.25215,1.274A3.36394,3.36394,0,0,1,426.36,446.865a5.02708,5.02708,0,0,1-.50429.4114,3.3057,3.3057,0,0,1-1.84463.55737m26.54143-1.65884a3.38754,3.38754,0,0,1-2.35024-.96877,5.04185,5.04185,0,0,1-.41007-.50428,3.27532,3.27532,0,0,1-.55737-1.84463,3.38659,3.38659,0,0,1,.96744-2.34892,5.02559,5.02559,0,0,1,.50429-.41139,3.44685,3.44685,0,0,1,.572-.30523,3.3432,3.3432,0,0,1,.62373-.18579,3.06952,3.06952,0,0,1,1.30052,0,3.22356,3.22356,0,0,1,1.19436.491,5.02559,5.02559,0,0,1,.50429.41139,3.38792,3.38792,0,0,1,.96876,2.34892,3.72635,3.72635,0,0,1-.06636.65026,3.37387,3.37387,0,0,1-.18579.62373,4.71469,4.71469,0,0,1-.30522.57064,4.8801,4.8801,0,0,1-.41139.50429,5.02559,5.02559,0,0,1-.50429.41139,3.30547,3.30547,0,0,1-1.84463.55737" transform="translate(-35.5 -118.5)" fill-rule="evenodd" />
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 12 KiB |
3456
docs/static/openapi.json
vendored
3456
docs/static/openapi.json
vendored
File diff suppressed because it is too large
Load Diff
@@ -1,241 +0,0 @@
|
||||
# Transcript Formats
|
||||
|
||||
The Reflector API provides multiple output formats for transcript data through the `transcript_format` query parameter on the GET `/v1/transcripts/{id}` endpoint.
|
||||
|
||||
## Overview
|
||||
|
||||
When retrieving a transcript, you can specify the desired format using the `transcript_format` query parameter. The API supports four formats optimized for different use cases:
|
||||
|
||||
- **text** - Plain text with speaker names (default)
|
||||
- **text-timestamped** - Timestamped text with speaker names
|
||||
- **webvtt-named** - WebVTT subtitle format with participant names
|
||||
- **json** - Structured JSON segments with full metadata
|
||||
|
||||
All formats include participant information when available, resolving speaker IDs to actual names.
|
||||
|
||||
## Query Parameter Usage
|
||||
|
||||
```
|
||||
GET /v1/transcripts/{id}?transcript_format={format}
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- `transcript_format` (optional): The desired output format
|
||||
- Type: `"text" | "text-timestamped" | "webvtt-named" | "json"`
|
||||
- Default: `"text"`
|
||||
|
||||
## Format Descriptions
|
||||
|
||||
### Text Format (`text`)
|
||||
|
||||
**Use case:** Simple, human-readable transcript for display or export.
|
||||
|
||||
**Format:** Speaker names followed by their dialogue, one line per segment.
|
||||
|
||||
**Example:**
|
||||
```
|
||||
John Smith: Hello everyone
|
||||
Jane Doe: Hi there
|
||||
John Smith: How are you today?
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=text
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "text",
|
||||
"transcript": "John Smith: Hello everyone\nJane Doe: Hi there\nJohn Smith: How are you today?",
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Text Timestamped Format (`text-timestamped`)
|
||||
|
||||
**Use case:** Transcript with timing information for navigation or reference.
|
||||
|
||||
**Format:** `[MM:SS]` timestamp prefix before each speaker and dialogue.
|
||||
|
||||
**Example:**
|
||||
```
|
||||
[00:00] John Smith: Hello everyone
|
||||
[00:05] Jane Doe: Hi there
|
||||
[00:12] John Smith: How are you today?
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=text-timestamped
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "text-timestamped",
|
||||
"transcript": "[00:00] John Smith: Hello everyone\n[00:05] Jane Doe: Hi there\n[00:12] John Smith: How are you today?",
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### WebVTT Named Format (`webvtt-named`)
|
||||
|
||||
**Use case:** Subtitle files for video players, accessibility tools, or video editing.
|
||||
|
||||
**Format:** Standard WebVTT subtitle format with voice tags using participant names.
|
||||
|
||||
**Example:**
|
||||
```
|
||||
WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:05.000
|
||||
<v John Smith>Hello everyone
|
||||
|
||||
00:00:05.000 --> 00:00:12.000
|
||||
<v Jane Doe>Hi there
|
||||
|
||||
00:00:12.000 --> 00:00:18.000
|
||||
<v John Smith>How are you today?
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=webvtt-named
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "webvtt-named",
|
||||
"transcript": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v John Smith>Hello everyone\n\n...",
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Format (`json`)
|
||||
|
||||
**Use case:** Programmatic access with full timing and speaker metadata.
|
||||
|
||||
**Format:** Array of segment objects with speaker information, text content, and precise timing.
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
[
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "Hello everyone",
|
||||
"start": 0.0,
|
||||
"end": 5.0
|
||||
},
|
||||
{
|
||||
"speaker": 1,
|
||||
"speaker_name": "Jane Doe",
|
||||
"text": "Hi there",
|
||||
"start": 5.0,
|
||||
"end": 12.0
|
||||
},
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "How are you today?",
|
||||
"start": 12.0,
|
||||
"end": 18.0
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=json
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "json",
|
||||
"transcript": [
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "Hello everyone",
|
||||
"start": 0.0,
|
||||
"end": 5.0
|
||||
},
|
||||
{
|
||||
"speaker": 1,
|
||||
"speaker_name": "Jane Doe",
|
||||
"text": "Hi there",
|
||||
"start": 5.0,
|
||||
"end": 12.0
|
||||
}
|
||||
],
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Response Structure
|
||||
|
||||
All formats return the same base transcript metadata with an additional `transcript_format` field and format-specific `transcript` field:
|
||||
|
||||
### Common Fields
|
||||
|
||||
- `id`: Transcript identifier
|
||||
- `user_id`: Owner user ID (if authenticated)
|
||||
- `name`: Transcript name
|
||||
- `status`: Processing status
|
||||
- `locked`: Whether transcript is locked for editing
|
||||
- `duration`: Total duration in seconds
|
||||
- `title`: Auto-generated or custom title
|
||||
- `short_summary`: Brief summary
|
||||
- `long_summary`: Detailed summary
|
||||
- `created_at`: Creation timestamp
|
||||
- `share_mode`: Access control setting
|
||||
- `source_language`: Original audio language
|
||||
- `target_language`: Translation target language
|
||||
- `reviewed`: Whether transcript has been reviewed
|
||||
- `meeting_id`: Associated meeting ID (if applicable)
|
||||
- `source_kind`: Source type (live, file, room)
|
||||
- `room_id`: Associated room ID (if applicable)
|
||||
- `audio_deleted`: Whether audio has been deleted
|
||||
- `participants`: Array of participant objects with speaker mappings
|
||||
|
||||
### Format-Specific Fields
|
||||
|
||||
- `transcript_format`: The format identifier (discriminator field)
|
||||
- `transcript`: The formatted transcript content (string for text/webvtt formats, array for json format)
|
||||
|
||||
## Speaker Name Resolution
|
||||
|
||||
All formats resolve speaker IDs to participant names when available:
|
||||
|
||||
- If a participant exists for the speaker ID, their name is used
|
||||
- If no participant exists, a default name like "Speaker 0" is generated
|
||||
- Speaker IDs are integers (0, 1, 2, etc.) assigned during diarization
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
// This file is not used in compilation. It is here just for a nice editor experience.
|
||||
"extends": "@docusaurus/tsconfig",
|
||||
"compilerOptions": {
|
||||
"baseUrl": "."
|
||||
},
|
||||
"exclude": [".docusaurus", "build"]
|
||||
}
|
||||
720
docs/video-jitsi.md
Normal file
720
docs/video-jitsi.md
Normal file
@@ -0,0 +1,720 @@
|
||||
# Jitsi Meet Integration Configuration Guide
|
||||
|
||||
This guide explains how to configure Reflector to use your self-hosted Jitsi Meet installation for video meetings, recording, and participant tracking.
|
||||
|
||||
## Overview
|
||||
|
||||
Jitsi Meet is an open-source video conferencing platform that can be self-hosted. Reflector integrates with Jitsi Meet to:
|
||||
|
||||
- Create secure meeting rooms with JWT authentication
|
||||
- Track participant join/leave events via Prosody webhooks
|
||||
- Record meetings using Jibri recording service
|
||||
- Process recordings for transcription and analysis
|
||||
|
||||
## Requirements
|
||||
|
||||
### Self-Hosted Jitsi Meet
|
||||
|
||||
You need a complete Jitsi Meet installation including:
|
||||
|
||||
1. **Jitsi Meet Web Interface** - The main meeting interface
|
||||
2. **Prosody XMPP Server** - Handles room management and authentication
|
||||
3. **Jicofo (JItsi COnference FOcus)** - Manages media sessions
|
||||
4. **Jitsi Videobridge (JVB)** - Handles WebRTC media routing
|
||||
5. **Jibri Recording Service** - Records meetings (optional but recommended)
|
||||
|
||||
### System Requirements
|
||||
|
||||
- **Domain with SSL Certificate** - Required for WebRTC functionality
|
||||
- **Prosody mod_event_sync** - For webhook event handling
|
||||
- **JWT Authentication** - For secure room access control
|
||||
- **Storage Solution** - For recording files (local or cloud)
|
||||
|
||||
## Configuration Variables
|
||||
|
||||
Add the following environment variables to your Reflector `.env` file:
|
||||
|
||||
### Required Variables
|
||||
|
||||
```bash
|
||||
# Jitsi Meet Domain (without https://)
|
||||
JITSI_DOMAIN=meet.example.com
|
||||
|
||||
# JWT Secret for room authentication (generate with: openssl rand -hex 32)
|
||||
JITSI_JWT_SECRET=your-64-character-hex-secret-here
|
||||
|
||||
# Webhook secret for event handling (generate with: openssl rand -hex 16)
|
||||
JITSI_WEBHOOK_SECRET=your-32-character-hex-secret-here
|
||||
```
|
||||
|
||||
### Optional Variables
|
||||
|
||||
```bash
|
||||
# Application identifier (should match Jitsi configuration)
|
||||
JITSI_APP_ID=reflector
|
||||
|
||||
# JWT issuer and audience (should match Jitsi configuration)
|
||||
JITSI_JWT_ISSUER=reflector
|
||||
JITSI_JWT_AUDIENCE=jitsi
|
||||
```
|
||||
|
||||
## Installation Steps
|
||||
|
||||
### 1. Jitsi Meet Server Installation
|
||||
|
||||
#### Quick Installation (Ubuntu/Debian)
|
||||
|
||||
```bash
|
||||
# Add Jitsi repository
|
||||
curl -fsSL https://download.jitsi.org/jitsi-key.gpg.key | sudo gpg --dearmor -o /usr/share/keyrings/jitsi-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/jitsi-keyring.gpg] https://download.jitsi.org stable/" | sudo tee /etc/apt/sources.list.d/jitsi-stable.list
|
||||
|
||||
# Install Jitsi Meet
|
||||
sudo apt update
|
||||
sudo apt install jitsi-meet
|
||||
|
||||
# Configure SSL certificate
|
||||
sudo /usr/share/jitsi-meet/scripts/install-letsencrypt-cert.sh
|
||||
```
|
||||
|
||||
#### Docker Installation
|
||||
|
||||
```bash
|
||||
# Clone Jitsi Docker repository
|
||||
git clone https://github.com/jitsi/docker-jitsi-meet
|
||||
cd docker-jitsi-meet
|
||||
|
||||
# Copy environment template
|
||||
cp env.example .env
|
||||
|
||||
# Edit configuration
|
||||
nano .env
|
||||
|
||||
# Start services
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### 2. JWT Authentication Setup
|
||||
|
||||
#### Update Prosody Configuration
|
||||
|
||||
Edit `/etc/prosody/conf.d/your-domain.cfg.lua`:
|
||||
|
||||
```lua
|
||||
VirtualHost "meet.example.com"
|
||||
authentication = "token"
|
||||
app_id = "reflector"
|
||||
app_secret = "your-jwt-secret-here"
|
||||
|
||||
-- Allow anonymous access for guests
|
||||
c2s_require_encryption = false
|
||||
admins = { "focusUser@auth.meet.example.com" }
|
||||
|
||||
modules_enabled = {
|
||||
"bosh";
|
||||
"pubsub";
|
||||
"ping";
|
||||
"roster";
|
||||
"saslauth";
|
||||
"tls";
|
||||
"dialback";
|
||||
"disco";
|
||||
"carbons";
|
||||
"pep";
|
||||
"private";
|
||||
"blocklist";
|
||||
"vcard";
|
||||
"version";
|
||||
"uptime";
|
||||
"time";
|
||||
"ping";
|
||||
"register";
|
||||
"admin_adhoc";
|
||||
"token_verification";
|
||||
"event_sync"; -- Required for webhooks
|
||||
}
|
||||
```
|
||||
|
||||
#### Configure Jitsi Meet Interface
|
||||
|
||||
Edit `/etc/jitsi/meet/your-domain-config.js`:
|
||||
|
||||
```javascript
|
||||
var config = {
|
||||
hosts: {
|
||||
domain: 'meet.example.com',
|
||||
muc: 'conference.meet.example.com'
|
||||
},
|
||||
|
||||
// Enable JWT authentication
|
||||
enableUserRolesBasedOnToken: true,
|
||||
|
||||
// Recording configuration
|
||||
fileRecordingsEnabled: true,
|
||||
liveStreamingEnabled: false,
|
||||
|
||||
// Reflector integration settings
|
||||
prejoinPageEnabled: true,
|
||||
requireDisplayName: true
|
||||
};
|
||||
```
|
||||
|
||||
### 3. Webhook Event Configuration
|
||||
|
||||
#### Install Event Sync Module
|
||||
|
||||
```bash
|
||||
# Download the module
|
||||
cd /usr/share/jitsi-meet/prosody-plugins/
|
||||
wget https://raw.githubusercontent.com/jitsi-contrib/prosody-plugins/main/mod_event_sync.lua
|
||||
```
|
||||
|
||||
#### Configure Event Sync
|
||||
|
||||
Add to your Prosody configuration:
|
||||
|
||||
```lua
|
||||
Component "conference.meet.example.com" "muc"
|
||||
storage = "memory"
|
||||
modules_enabled = {
|
||||
"muc_meeting_id";
|
||||
"muc_domain_mapper";
|
||||
"polls";
|
||||
"event_sync"; -- Enable event sync
|
||||
}
|
||||
|
||||
-- Event sync webhook configuration
|
||||
event_sync_url = "https://your-reflector-domain.com/v1/jitsi/events"
|
||||
event_sync_secret = "your-webhook-secret-here"
|
||||
|
||||
-- Events to track
|
||||
event_sync_events = {
|
||||
"muc-occupant-joined",
|
||||
"muc-occupant-left",
|
||||
"jibri-recording-on",
|
||||
"jibri-recording-off"
|
||||
}
|
||||
|
||||
#### Webhook Event Payload Examples
|
||||
|
||||
**Participant Joined Event:**
|
||||
```json
|
||||
{
|
||||
"event": "muc-occupant-joined",
|
||||
"room": "reflector-my-room-uuid123",
|
||||
"timestamp": "2025-01-15T10:30:00.000Z",
|
||||
"data": {
|
||||
"occupant_id": "participant-456",
|
||||
"nick": "John Doe",
|
||||
"role": "participant",
|
||||
"affiliation": "none"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Recording Started Event:**
|
||||
```json
|
||||
{
|
||||
"event": "jibri-recording-on",
|
||||
"room": "reflector-my-room-uuid123",
|
||||
"timestamp": "2025-01-15T10:32:00.000Z",
|
||||
"data": {
|
||||
"recording_id": "rec-789",
|
||||
"initiator": "moderator-123"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Recording Completed Event:**
|
||||
```json
|
||||
{
|
||||
"room_name": "reflector-my-room-uuid123",
|
||||
"recording_file": "/var/recordings/rec-789.mp4",
|
||||
"recording_status": "completed",
|
||||
"timestamp": "2025-01-15T11:15:00.000Z"
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Jibri Recording Setup (Optional)
|
||||
|
||||
#### Install Jibri
|
||||
|
||||
```bash
|
||||
# Install Jibri package
|
||||
sudo apt install jibri
|
||||
|
||||
# Create recording directory
|
||||
sudo mkdir -p /var/recordings
|
||||
sudo chown jibri:jibri /var/recordings
|
||||
```
|
||||
|
||||
#### Configure Jibri
|
||||
|
||||
Edit `/etc/jitsi/jibri/jibri.conf`:
|
||||
|
||||
```hocon
|
||||
jibri {
|
||||
recording {
|
||||
recordings-directory = "/var/recordings"
|
||||
finalize-script = "/opt/jitsi/jibri/finalize.sh"
|
||||
}
|
||||
|
||||
api {
|
||||
xmpp {
|
||||
environments = [{
|
||||
name = "prod environment"
|
||||
xmpp-server-hosts = ["meet.example.com"]
|
||||
xmpp-domain = "meet.example.com"
|
||||
|
||||
control-muc {
|
||||
domain = "internal.auth.meet.example.com"
|
||||
room-name = "JibriBrewery"
|
||||
nickname = "jibri-nickname"
|
||||
}
|
||||
|
||||
control-login {
|
||||
domain = "auth.meet.example.com"
|
||||
username = "jibri"
|
||||
password = "jibri-password"
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Create Finalize Script
|
||||
|
||||
Create `/opt/jitsi/jibri/finalize.sh`:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Jibri finalize script for Reflector integration
|
||||
|
||||
RECORDING_FILE="$1"
|
||||
ROOM_NAME="$2"
|
||||
REFLECTOR_API_URL="${REFLECTOR_API_URL:-http://localhost:1250}"
|
||||
|
||||
# Prepare webhook payload
|
||||
TIMESTAMP=$(date -u +%Y-%m-%dT%H:%M:%S.%3NZ)
|
||||
PAYLOAD=$(cat <<EOF
|
||||
{
|
||||
"room_name": "$ROOM_NAME",
|
||||
"recording_file": "$RECORDING_FILE",
|
||||
"recording_status": "completed",
|
||||
"timestamp": "$TIMESTAMP"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Generate signature
|
||||
SIGNATURE=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$JITSI_WEBHOOK_SECRET" | cut -d' ' -f2)
|
||||
|
||||
# Send webhook to Reflector
|
||||
curl -X POST "$REFLECTOR_API_URL/v1/jibri/recording-complete" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: $SIGNATURE" \
|
||||
-d "$PAYLOAD"
|
||||
|
||||
echo "Recording finalization webhook sent for room: $ROOM_NAME"
|
||||
```
|
||||
|
||||
Make executable:
|
||||
```bash
|
||||
sudo chmod +x /opt/jitsi/jibri/finalize.sh
|
||||
```
|
||||
|
||||
### 5. Restart Services
|
||||
|
||||
After configuration changes:
|
||||
|
||||
```bash
|
||||
sudo systemctl restart prosody
|
||||
sudo systemctl restart jicofo
|
||||
sudo systemctl restart jitsi-videobridge2
|
||||
sudo systemctl restart jibri
|
||||
sudo systemctl restart nginx
|
||||
```
|
||||
|
||||
## Room Configuration
|
||||
|
||||
### Creating Jitsi Rooms
|
||||
|
||||
Create rooms with Jitsi platform in Reflector:
|
||||
|
||||
```bash
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN" \
|
||||
-d '{
|
||||
"name": "my-jitsi-room",
|
||||
"platform": "jitsi",
|
||||
"recording_type": "cloud",
|
||||
"recording_trigger": "automatic-2nd-participant",
|
||||
"is_locked": false,
|
||||
"room_mode": "normal"
|
||||
}'
|
||||
```
|
||||
|
||||
### Meeting Creation
|
||||
|
||||
Meetings automatically use JWT authentication:
|
||||
|
||||
```bash
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms/my-jitsi-room/meeting" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN"
|
||||
```
|
||||
|
||||
Response includes JWT-authenticated URLs:
|
||||
```json
|
||||
{
|
||||
"id": "meeting-uuid",
|
||||
"room_name": "reflector-my-jitsi-room-123456",
|
||||
"room_url": "https://meet.example.com/room?jwt=user-token",
|
||||
"host_room_url": "https://meet.example.com/room?jwt=moderator-token"
|
||||
}
|
||||
```
|
||||
|
||||
## Features and Capabilities
|
||||
|
||||
### JWT Authentication
|
||||
|
||||
Reflector automatically generates JWT tokens with:
|
||||
- **Room Access Control** - Secure room entry
|
||||
- **User Roles** - Moderator vs participant permissions
|
||||
- **Expiration** - Configurable token lifetime (default 8 hours)
|
||||
- **Custom Claims** - Room-specific metadata
|
||||
|
||||
### Recording Options
|
||||
|
||||
**Recording Types:**
|
||||
- `"none"` - No recording
|
||||
- `"local"` - Local Jibri recording
|
||||
- `"cloud"` - Cloud recording (requires external storage)
|
||||
|
||||
**Recording Triggers:**
|
||||
- `"none"` - Manual recording only
|
||||
- `"prompt"` - Prompt users to start
|
||||
- `"automatic"` - Start immediately
|
||||
- `"automatic-2nd-participant"` - Start when 2nd person joins
|
||||
|
||||
### Event Tracking and Storage
|
||||
|
||||
Reflector automatically stores all webhook events in the `meetings` table for comprehensive meeting analytics:
|
||||
|
||||
**Supported Event Types:**
|
||||
- `muc-occupant-joined` - Participant joined the meeting
|
||||
- `muc-occupant-left` - Participant left the meeting
|
||||
- `jibri-recording-on` - Recording started
|
||||
- `jibri-recording-off` - Recording stopped
|
||||
- `recording_completed` - Recording file ready for processing
|
||||
|
||||
**Event Storage Structure:**
|
||||
Each webhook event is stored as a JSON object in the `meetings.events` column:
|
||||
```json
|
||||
{
|
||||
"type": "muc-occupant-joined",
|
||||
"timestamp": "2025-01-15T10:30:00.123456Z",
|
||||
"data": {
|
||||
"timestamp": "2025-01-15T10:30:00Z",
|
||||
"user_id": "participant-123",
|
||||
"display_name": "John Doe"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Querying Stored Events:**
|
||||
```sql
|
||||
-- Get all events for a meeting
|
||||
SELECT events FROM meeting WHERE id = 'meeting-uuid';
|
||||
|
||||
-- Count participant joins
|
||||
SELECT json_array_length(
|
||||
json_extract(events, '$[*] ? (@.type == "muc-occupant-joined")')
|
||||
) as total_joins FROM meeting WHERE id = 'meeting-uuid';
|
||||
```
|
||||
|
||||
## Testing and Verification
|
||||
|
||||
### Health Check
|
||||
|
||||
Test Jitsi webhook integration:
|
||||
|
||||
```bash
|
||||
curl "https://your-reflector-domain.com/v1/jitsi/health"
|
||||
```
|
||||
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"status": "ok",
|
||||
"service": "jitsi-webhooks",
|
||||
"timestamp": "2025-01-15T10:30:00.000Z",
|
||||
"webhook_secret_configured": true
|
||||
}
|
||||
```
|
||||
|
||||
### JWT Token Testing
|
||||
|
||||
Verify JWT generation works:
|
||||
```bash
|
||||
# Create a test meeting
|
||||
MEETING=$(curl -X POST "https://your-reflector-domain.com/v1/rooms/test-room/meeting" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN" | jq -r '.room_url')
|
||||
|
||||
echo "Test meeting URL: $MEETING"
|
||||
```
|
||||
|
||||
### Webhook Testing
|
||||
|
||||
#### Manual Webhook Event Testing
|
||||
|
||||
Test participant join event:
|
||||
```bash
|
||||
# Generate proper signature
|
||||
PAYLOAD='{"event":"muc-occupant-joined","room":"reflector-test-room-uuid","timestamp":"2025-01-15T10:30:00.000Z","data":{"user_id":"test-user","display_name":"Test User"}}'
|
||||
SIGNATURE=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$JITSI_WEBHOOK_SECRET" | cut -d' ' -f2)
|
||||
|
||||
curl -X POST "https://your-reflector-domain.com/v1/jitsi/events" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: $SIGNATURE" \
|
||||
-d "$PAYLOAD"
|
||||
```
|
||||
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"status": "ok",
|
||||
"event": "muc-occupant-joined",
|
||||
"room": "reflector-test-room-uuid"
|
||||
}
|
||||
```
|
||||
|
||||
#### Recording Webhook Testing
|
||||
|
||||
Test recording completion event:
|
||||
```bash
|
||||
PAYLOAD='{"room_name":"reflector-test-room-uuid","recording_file":"/recordings/test.mp4","recording_status":"completed","timestamp":"2025-01-15T10:30:00.000Z"}'
|
||||
SIGNATURE=$(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$JITSI_WEBHOOK_SECRET" | cut -d' ' -f2)
|
||||
|
||||
curl -X POST "https://your-reflector-domain.com/v1/jibri/recording-complete" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: $SIGNATURE" \
|
||||
-d "$PAYLOAD"
|
||||
```
|
||||
|
||||
#### Event Storage Verification
|
||||
|
||||
Verify events were stored:
|
||||
```bash
|
||||
# Check meeting events via API (requires authentication)
|
||||
curl -H "Authorization: Bearer $AUTH_TOKEN" \
|
||||
"https://your-reflector-domain.com/v1/meetings/{meeting-id}"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### JWT Authentication Failures
|
||||
|
||||
**Symptoms**: Users cannot join rooms, "Authentication failed" errors
|
||||
|
||||
**Solutions**:
|
||||
1. Verify `JITSI_JWT_SECRET` matches Prosody configuration
|
||||
2. Check JWT token hasn't expired (default 8 hours)
|
||||
3. Ensure system clocks are synchronized between servers
|
||||
4. Validate JWT issuer/audience configuration matches
|
||||
|
||||
**Debug JWT tokens**:
|
||||
```bash
|
||||
# Decode JWT payload
|
||||
echo "JWT_TOKEN_HERE" | cut -d'.' -f2 | base64 -d | jq
|
||||
```
|
||||
|
||||
#### Webhook Events Not Received
|
||||
|
||||
**Symptoms**: Participant counts not updating, no recording events
|
||||
|
||||
**Solutions**:
|
||||
1. Verify `mod_event_sync` is loaded in Prosody
|
||||
2. Check webhook URL is accessible from Jitsi server
|
||||
3. Validate webhook signature generation
|
||||
4. Review Prosody and Reflector logs
|
||||
|
||||
**Debug webhook connectivity**:
|
||||
```bash
|
||||
# Test from Jitsi server
|
||||
curl -v "https://your-reflector-domain.com/v1/jitsi/health"
|
||||
|
||||
# Check Prosody logs
|
||||
sudo tail -f /var/log/prosody/prosody.log
|
||||
```
|
||||
|
||||
#### Webhook Signature Verification Issues
|
||||
|
||||
**Symptoms**: HTTP 401 "Invalid webhook signature" errors
|
||||
|
||||
**Solutions**:
|
||||
1. Verify webhook secret matches between Jitsi and Reflector
|
||||
2. Check payload encoding (no extra whitespace)
|
||||
3. Ensure proper HMAC-SHA256 signature generation
|
||||
|
||||
**Debug signature generation**:
|
||||
```bash
|
||||
# Test signature manually
|
||||
PAYLOAD='{"event":"test","room":"test","timestamp":"2025-01-15T10:30:00.000Z","data":{}}'
|
||||
SECRET="your-webhook-secret-here"
|
||||
|
||||
# Generate signature (should match X-Jitsi-Signature header)
|
||||
echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$SECRET" | cut -d' ' -f2
|
||||
|
||||
# Test with curl
|
||||
curl -X POST "https://your-reflector-domain.com/v1/jitsi/events" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Jitsi-Signature: $(echo -n "$PAYLOAD" | openssl dgst -sha256 -hmac "$SECRET" | cut -d' ' -f2)" \
|
||||
-d "$PAYLOAD" -v
|
||||
```
|
||||
|
||||
#### Event Storage Problems
|
||||
|
||||
**Symptoms**: Events received but not stored in database
|
||||
|
||||
**Solutions**:
|
||||
1. Check database connectivity and permissions
|
||||
2. Verify meeting exists before event processing
|
||||
3. Review Reflector application logs
|
||||
4. Ensure JSON column support in database
|
||||
|
||||
**Debug event storage**:
|
||||
```bash
|
||||
# Check meeting exists
|
||||
curl -H "Authorization: Bearer $TOKEN" \
|
||||
"https://your-reflector-domain.com/v1/meetings/{meeting-id}"
|
||||
|
||||
# Monitor database queries (if using PostgreSQL)
|
||||
sudo -u postgres psql -c "SELECT * FROM pg_stat_activity WHERE query LIKE '%meeting%';"
|
||||
|
||||
# Check Reflector logs for event processing
|
||||
sudo journalctl -u reflector -f | grep -E "(event|webhook|jitsi)"
|
||||
```
|
||||
|
||||
#### Recording Issues
|
||||
|
||||
**Symptoms**: Recordings not starting, finalize script errors
|
||||
|
||||
**Solutions**:
|
||||
1. Verify Jibri service status: `sudo systemctl status jibri`
|
||||
2. Check recording directory permissions: `/var/recordings`
|
||||
3. Validate finalize script execution permissions
|
||||
4. Monitor Jibri logs: `sudo journalctl -u jibri -f`
|
||||
|
||||
**Test finalize script**:
|
||||
```bash
|
||||
sudo -u jibri /opt/jitsi/jibri/finalize.sh "/test/recording.mp4" "test-room"
|
||||
```
|
||||
|
||||
#### Meeting Creation Failures
|
||||
|
||||
**Symptoms**: HTTP 500 errors when creating meetings
|
||||
|
||||
**Solutions**:
|
||||
1. Check Reflector logs for JWT generation errors
|
||||
2. Verify all required environment variables are set
|
||||
3. Ensure Jitsi domain is accessible from Reflector
|
||||
4. Test JWT secret configuration
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Verify Prosody configuration
|
||||
sudo prosodyctl check config
|
||||
|
||||
# Check Jitsi services status
|
||||
sudo systemctl status prosody jicofo jitsi-videobridge2
|
||||
|
||||
# Test JWT generation
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms/test/meeting" \
|
||||
-H "Authorization: Bearer $TOKEN" -v
|
||||
|
||||
# Monitor webhook events
|
||||
sudo tail -f /var/log/reflector/app.log | grep jitsi
|
||||
|
||||
# Check SSL certificates
|
||||
sudo certbot certificates
|
||||
```
|
||||
|
||||
### Performance Optimization
|
||||
|
||||
#### Scaling Considerations
|
||||
|
||||
**Single Server Limits:**
|
||||
- ~50 concurrent participants per JVB instance
|
||||
- ~10 concurrent Jibri recordings
|
||||
- CPU and bandwidth become bottlenecks
|
||||
|
||||
**Multi-Server Setup:**
|
||||
- Multiple JVB instances for scaling
|
||||
- Dedicated Jibri recording servers
|
||||
- Load balancing for high availability
|
||||
|
||||
#### Resource Monitoring
|
||||
|
||||
```bash
|
||||
# Monitor JVB performance
|
||||
sudo systemctl status jitsi-videobridge2
|
||||
sudo journalctl -u jitsi-videobridge2 -f
|
||||
|
||||
# Check Prosody connections
|
||||
sudo prosodyctl mod_admin_telnet
|
||||
> c2s:show()
|
||||
> muc:rooms()
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
### JWT Security
|
||||
- Use strong, unique secrets (32+ characters)
|
||||
- Rotate JWT secrets regularly
|
||||
- Implement proper token expiration
|
||||
- Never log or expose JWT tokens
|
||||
|
||||
### Network Security
|
||||
- Use HTTPS/WSS for all communications
|
||||
- Implement proper firewall rules
|
||||
- Consider VPN for server-to-server communication
|
||||
- Monitor for unauthorized access attempts
|
||||
|
||||
### Recording Security
|
||||
- Encrypt recordings at rest
|
||||
- Implement access controls for recording files
|
||||
- Regular security audits of file permissions
|
||||
- Comply with data protection regulations
|
||||
|
||||
## Migration from Whereby
|
||||
|
||||
If migrating from Whereby to Jitsi:
|
||||
|
||||
1. **Parallel Setup** - Configure Jitsi alongside existing Whereby
|
||||
2. **Room Migration** - Update room platform field to "jitsi"
|
||||
3. **Test Integration** - Verify meeting creation and webhooks
|
||||
4. **User Training** - Different UI and feature set
|
||||
5. **Monitor Performance** - Watch for issues during transition
|
||||
6. **Cleanup** - Remove Whereby configuration when stable
|
||||
|
||||
## Support and Resources
|
||||
|
||||
### Jitsi Community Resources
|
||||
- **Documentation**: [jitsi.github.io/handbook](https://jitsi.github.io/handbook/)
|
||||
- **Community Forum**: [community.jitsi.org](https://community.jitsi.org/)
|
||||
- **GitHub Issues**: [github.com/jitsi/jitsi-meet](https://github.com/jitsi/jitsi-meet)
|
||||
|
||||
### Professional Support
|
||||
- **8x8 Commercial Support** - Professional Jitsi hosting and support
|
||||
- **Community Consulting** - Third-party Jitsi implementation services
|
||||
|
||||
### Monitoring and Maintenance
|
||||
- Monitor system resources (CPU, memory, bandwidth)
|
||||
- Regular security updates for all components
|
||||
- Backup configuration files and certificates
|
||||
- Test disaster recovery procedures
|
||||
276
docs/video-whereby.md
Normal file
276
docs/video-whereby.md
Normal file
@@ -0,0 +1,276 @@
|
||||
# Whereby Integration Configuration Guide
|
||||
|
||||
This guide explains how to configure Reflector to use Whereby as your video meeting platform for room creation, recording, and participant tracking.
|
||||
|
||||
## Overview
|
||||
|
||||
Whereby is a browser-based video meeting platform that provides hosted meeting rooms with recording capabilities. Reflector integrates with Whereby's API to:
|
||||
|
||||
- Create secure meeting rooms with custom branding
|
||||
- Handle participant join/leave events via webhooks
|
||||
- Automatically record meetings to AWS S3 storage
|
||||
- Track meeting sessions and participant counts
|
||||
|
||||
## Requirements
|
||||
|
||||
### Whereby Account Setup
|
||||
|
||||
1. **Whereby Account**: Sign up for a Whereby business account at [whereby.com](https://whereby.com/business)
|
||||
2. **API Access**: Request API access from Whereby support (required for programmatic room creation)
|
||||
3. **Webhook Configuration**: Configure webhooks in your Whereby dashboard to point to your Reflector instance
|
||||
|
||||
### AWS S3 Storage
|
||||
|
||||
Whereby requires AWS S3 for recording storage. You need:
|
||||
- AWS account with S3 access
|
||||
- Dedicated S3 bucket for Whereby recordings
|
||||
- AWS IAM credentials with S3 write permissions
|
||||
|
||||
## Configuration Variables
|
||||
|
||||
Add the following environment variables to your Reflector `.env` file:
|
||||
|
||||
### Required Variables
|
||||
|
||||
```bash
|
||||
# Whereby API Configuration
|
||||
WHEREBY_API_KEY=your-whereby-jwt-api-key
|
||||
WHEREBY_WEBHOOK_SECRET=your-webhook-secret-from-whereby
|
||||
|
||||
# AWS S3 Storage for Recordings
|
||||
AWS_WHEREBY_ACCESS_KEY_ID=your-aws-access-key
|
||||
AWS_WHEREBY_ACCESS_KEY_SECRET=your-aws-secret-key
|
||||
RECORDING_STORAGE_AWS_BUCKET_NAME=your-s3-bucket-name
|
||||
```
|
||||
|
||||
### Optional Variables
|
||||
|
||||
```bash
|
||||
# Whereby API URL (defaults to production)
|
||||
WHEREBY_API_URL=https://api.whereby.dev/v1
|
||||
|
||||
# SQS Configuration (for recording processing)
|
||||
AWS_PROCESS_RECORDING_QUEUE_URL=https://sqs.region.amazonaws.com/account/queue
|
||||
SQS_POLLING_TIMEOUT_SECONDS=60
|
||||
```
|
||||
|
||||
## Configuration Steps
|
||||
|
||||
### 1. Whereby API Key Setup
|
||||
|
||||
1. **Contact Whereby Support** to request API access for your account
|
||||
2. **Generate JWT Token** in your Whereby dashboard under API settings
|
||||
3. **Copy the JWT token** and set it as `WHEREBY_API_KEY` in your environment
|
||||
|
||||
The API key is a JWT token that looks like:
|
||||
```
|
||||
eyJ[...truncated JWT token...]
|
||||
```
|
||||
|
||||
### 2. Webhook Configuration
|
||||
|
||||
1. **Access Whereby Dashboard** and navigate to webhook settings
|
||||
2. **Set Webhook URL** to your Reflector instance:
|
||||
```
|
||||
https://your-reflector-domain.com/v1/whereby
|
||||
```
|
||||
3. **Configure Events** to send the following event types:
|
||||
- `room.client.joined` - When participants join
|
||||
- `room.client.left` - When participants leave
|
||||
4. **Generate Webhook Secret** and set it as `WHEREBY_WEBHOOK_SECRET`
|
||||
5. **Save Configuration** in your Whereby dashboard
|
||||
|
||||
### 3. AWS S3 Storage Setup
|
||||
|
||||
1. **Create S3 Bucket** dedicated for Whereby recordings
|
||||
2. **Create IAM User** with programmatic access
|
||||
3. **Attach S3 Policy** with the following permissions:
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:PutObjectAcl",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": "arn:aws:s3:::your-bucket-name/*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
4. **Configure Environment Variables** with the IAM credentials
|
||||
|
||||
### 4. Room Configuration
|
||||
|
||||
When creating rooms in Reflector, set the platform to use Whereby:
|
||||
|
||||
```bash
|
||||
curl -X POST "https://your-reflector-domain.com/v1/rooms" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer $AUTH_TOKEN" \
|
||||
-d '{
|
||||
"name": "my-whereby-room",
|
||||
"platform": "whereby",
|
||||
"recording_type": "cloud",
|
||||
"recording_trigger": "automatic-2nd-participant",
|
||||
"is_locked": false,
|
||||
"room_mode": "normal"
|
||||
}'
|
||||
```
|
||||
|
||||
## Meeting Features
|
||||
|
||||
### Recording Options
|
||||
|
||||
Whereby supports three recording types:
|
||||
- **`none`**: No recording
|
||||
- **`local`**: Local recording (not recommended for production)
|
||||
- **`cloud`**: Cloud recording to S3 (recommended)
|
||||
|
||||
### Recording Triggers
|
||||
|
||||
Control when recordings start:
|
||||
- **`none`**: No automatic recording
|
||||
- **`prompt`**: Prompt users to start recording
|
||||
- **`automatic`**: Start immediately when meeting begins
|
||||
- **`automatic-2nd-participant`**: Start when second participant joins
|
||||
|
||||
### Room Modes
|
||||
|
||||
- **`normal`**: Standard meeting room
|
||||
- **`group`**: Group meeting with advanced features
|
||||
|
||||
## Webhook Event Handling
|
||||
|
||||
Reflector automatically handles these Whereby webhook events:
|
||||
|
||||
### Participant Tracking
|
||||
```json
|
||||
{
|
||||
"type": "room.client.joined",
|
||||
"data": {
|
||||
"meetingId": "room-uuid",
|
||||
"numClients": 2
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Recording Events
|
||||
Whereby sends recording completion events that trigger Reflector's processing pipeline:
|
||||
- Audio transcription
|
||||
- Speaker diarization
|
||||
- Summary generation
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### API Authentication Errors
|
||||
**Symptoms**: 401 Unauthorized errors when creating meetings
|
||||
|
||||
**Solutions**:
|
||||
1. Verify your `WHEREBY_API_KEY` is correct and not expired
|
||||
2. Ensure you have API access enabled on your Whereby account
|
||||
3. Contact Whereby support if API access is not available
|
||||
|
||||
#### Webhook Signature Validation Failed
|
||||
**Symptoms**: Webhook events rejected with 401 errors
|
||||
|
||||
**Solutions**:
|
||||
1. Verify `WHEREBY_WEBHOOK_SECRET` matches your Whereby dashboard configuration
|
||||
2. Check webhook URL is correctly configured in Whereby dashboard
|
||||
3. Ensure webhook endpoint is accessible from Whereby servers
|
||||
|
||||
#### Recording Upload Failures
|
||||
**Symptoms**: Recordings not appearing in S3 bucket
|
||||
|
||||
**Solutions**:
|
||||
1. Verify AWS credentials have S3 write permissions
|
||||
2. Check S3 bucket name is correct and accessible
|
||||
3. Ensure AWS region settings match your bucket location
|
||||
4. Review AWS CloudTrail logs for permission issues
|
||||
|
||||
#### Participant Count Not Updating
|
||||
**Symptoms**: Meeting participant counts remain at 0
|
||||
|
||||
**Solutions**:
|
||||
1. Verify webhook events are being received at `/v1/whereby`
|
||||
2. Check webhook signature validation is passing
|
||||
3. Ensure meeting IDs match between Whereby and Reflector database
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Test Whereby API connectivity
|
||||
curl -H "Authorization: Bearer $WHEREBY_API_KEY" \
|
||||
https://api.whereby.dev/v1/meetings
|
||||
|
||||
# Check webhook endpoint health
|
||||
curl https://your-reflector-domain.com/v1/whereby/health
|
||||
|
||||
# Verify S3 bucket access
|
||||
aws s3 ls s3://your-bucket-name --profile whereby-user
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### API Key Security
|
||||
- Store API keys securely using environment variables
|
||||
- Rotate API keys regularly
|
||||
- Never commit API keys to version control
|
||||
- Use separate keys for development and production
|
||||
|
||||
### Webhook Security
|
||||
- Always validate webhook signatures using HMAC-SHA256
|
||||
- Use HTTPS for all webhook endpoints
|
||||
- Implement rate limiting on webhook endpoints
|
||||
- Monitor webhook events for suspicious activity
|
||||
|
||||
### Recording Privacy
|
||||
- Ensure S3 bucket access is restricted to authorized users
|
||||
- Consider encryption at rest for sensitive recordings
|
||||
- Implement retention policies for recorded content
|
||||
- Comply with data protection regulations (GDPR, etc.)
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Meeting Scaling
|
||||
- Monitor concurrent meeting limits on your Whereby plan
|
||||
- Implement meeting cleanup for expired sessions
|
||||
- Use appropriate room modes for different use cases
|
||||
|
||||
### Recording Processing
|
||||
- Configure SQS for asynchronous recording processing
|
||||
- Monitor S3 storage usage and costs
|
||||
- Implement automatic cleanup of processed recordings
|
||||
|
||||
### Webhook Reliability
|
||||
- Implement webhook retry mechanisms
|
||||
- Monitor webhook delivery success rates
|
||||
- Log webhook events for debugging and auditing
|
||||
|
||||
## Migration from Other Platforms
|
||||
|
||||
If migrating from another video platform:
|
||||
|
||||
1. **Update Room Configuration**: Change existing rooms to use `"platform": "whereby"`
|
||||
2. **Configure Webhooks**: Set up Whereby webhook endpoints
|
||||
3. **Test Integration**: Verify meeting creation and event handling
|
||||
4. **Monitor Performance**: Watch for any issues during transition
|
||||
5. **Update Documentation**: Inform users of any workflow changes
|
||||
|
||||
## Support
|
||||
|
||||
For Whereby-specific issues:
|
||||
- **Whereby Support**: [whereby.com/support](https://whereby.com/support)
|
||||
- **API Documentation**: [whereby.dev](https://whereby.dev)
|
||||
- **Status Page**: [status.whereby.com](https://status.whereby.com)
|
||||
|
||||
For Reflector integration issues:
|
||||
- Check application logs for error details
|
||||
- Verify environment variable configuration
|
||||
- Test webhook connectivity and authentication
|
||||
- Review AWS permissions and S3 access
|
||||
474
docs/video_platforms.md
Normal file
474
docs/video_platforms.md
Normal file
@@ -0,0 +1,474 @@
|
||||
# Video Platforms Architecture (PR #529 Analysis)
|
||||
|
||||
This document analyzes the video platforms refactoring implemented in PR #529 for daily.co integration, providing a blueprint for extending support to Jitsi and other video conferencing platforms.
|
||||
|
||||
## Overview
|
||||
|
||||
The video platforms refactoring introduces a clean abstraction layer that allows Reflector to support multiple video conferencing providers (Whereby, Daily.co, etc.) without changing core application logic. This architecture enables:
|
||||
|
||||
- Seamless switching between video platforms
|
||||
- Platform-specific feature support
|
||||
- Isolated platform code organization
|
||||
- Consistent API surface across platforms
|
||||
- Feature flags for gradual migration
|
||||
|
||||
## Architecture Components
|
||||
|
||||
### 1. **Directory Structure**
|
||||
|
||||
```
|
||||
server/reflector/video_platforms/
|
||||
├── __init__.py # Public API exports
|
||||
├── base.py # Abstract base classes
|
||||
├── factory.py # Platform client factory
|
||||
├── registry.py # Platform registration system
|
||||
├── whereby.py # Whereby implementation
|
||||
├── daily.py # Daily.co implementation
|
||||
└── mock.py # Testing implementation
|
||||
```
|
||||
|
||||
### 2. **Core Abstract Classes**
|
||||
|
||||
#### `VideoPlatformClient` (base.py)
|
||||
Abstract base class defining the interface all platforms must implement:
|
||||
|
||||
```python
|
||||
class VideoPlatformClient(ABC):
|
||||
PLATFORM_NAME: str = ""
|
||||
|
||||
@abstractmethod
|
||||
async def create_meeting(self, room_name_prefix: str, end_date: datetime, room: Room) -> MeetingData
|
||||
|
||||
@abstractmethod
|
||||
async def get_room_sessions(self, room_name: str) -> Dict[str, Any]
|
||||
|
||||
@abstractmethod
|
||||
async def delete_room(self, room_name: str) -> bool
|
||||
|
||||
@abstractmethod
|
||||
async def upload_logo(self, room_name: str, logo_path: str) -> bool
|
||||
|
||||
@abstractmethod
|
||||
def verify_webhook_signature(self, body: bytes, signature: str, timestamp: Optional[str] = None) -> bool
|
||||
```
|
||||
|
||||
#### `MeetingData` (base.py)
|
||||
Standardized meeting data structure returned by all platforms:
|
||||
|
||||
```python
|
||||
class MeetingData(BaseModel):
|
||||
meeting_id: str
|
||||
room_name: str
|
||||
room_url: str
|
||||
host_room_url: str
|
||||
platform: str
|
||||
extra_data: Dict[str, Any] = {} # Platform-specific data
|
||||
```
|
||||
|
||||
#### `VideoPlatformConfig` (base.py)
|
||||
Unified configuration structure for all platforms:
|
||||
|
||||
```python
|
||||
class VideoPlatformConfig(BaseModel):
|
||||
api_key: str
|
||||
webhook_secret: str
|
||||
api_url: Optional[str] = None
|
||||
subdomain: Optional[str] = None
|
||||
s3_bucket: Optional[str] = None
|
||||
s3_region: Optional[str] = None
|
||||
aws_role_arn: Optional[str] = None
|
||||
aws_access_key_id: Optional[str] = None
|
||||
aws_access_key_secret: Optional[str] = None
|
||||
```
|
||||
|
||||
### 3. **Platform Registration System**
|
||||
|
||||
#### Registry Pattern (registry.py)
|
||||
- Automatic registration of built-in platforms
|
||||
- Runtime platform discovery
|
||||
- Type-safe client instantiation
|
||||
|
||||
```python
|
||||
# Auto-registration of platforms
|
||||
_PLATFORMS: Dict[str, Type[VideoPlatformClient]] = {}
|
||||
|
||||
def register_platform(name: str, client_class: Type[VideoPlatformClient])
|
||||
def get_platform_client(platform: str, config: VideoPlatformConfig) -> VideoPlatformClient
|
||||
```
|
||||
|
||||
#### Factory System (factory.py)
|
||||
- Configuration management per platform
|
||||
- Platform selection logic
|
||||
- Feature flag integration
|
||||
|
||||
```python
|
||||
def get_platform_for_room(room_id: Optional[str] = None) -> str:
|
||||
"""Determine which platform to use based on feature flags."""
|
||||
if not settings.DAILY_MIGRATION_ENABLED:
|
||||
return "whereby"
|
||||
|
||||
if room_id and room_id in settings.DAILY_MIGRATION_ROOM_IDS:
|
||||
return "daily"
|
||||
|
||||
return settings.DEFAULT_VIDEO_PLATFORM
|
||||
```
|
||||
|
||||
### 4. **Database Schema Changes**
|
||||
|
||||
#### Room Model Updates
|
||||
Added `platform` field to track which video platform each room uses:
|
||||
|
||||
```python
|
||||
# Database Schema
|
||||
platform_column = sqlalchemy.Column(
|
||||
"platform",
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
server_default="whereby"
|
||||
)
|
||||
|
||||
# Pydantic Model
|
||||
class Room(BaseModel):
|
||||
platform: Literal["whereby", "daily"] = "whereby"
|
||||
```
|
||||
|
||||
#### Meeting Model Updates
|
||||
Added `platform` field to meetings for tracking and debugging:
|
||||
|
||||
```python
|
||||
# Database Schema
|
||||
platform_column = sqlalchemy.Column(
|
||||
"platform",
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
server_default="whereby"
|
||||
)
|
||||
|
||||
# Pydantic Model
|
||||
class Meeting(BaseModel):
|
||||
platform: Literal["whereby", "daily"] = "whereby"
|
||||
```
|
||||
|
||||
**Key Decision**: No platform-specific fields were added to models. Instead, the `extra_data` field in `MeetingData` handles platform-specific information, following the user's rule of using generic `provider_data` as JSON if needed.
|
||||
|
||||
### 5. **Settings Configuration**
|
||||
|
||||
#### Feature Flags
|
||||
```python
|
||||
# Migration control
|
||||
DAILY_MIGRATION_ENABLED: bool = True
|
||||
DAILY_MIGRATION_ROOM_IDS: list[str] = []
|
||||
DEFAULT_VIDEO_PLATFORM: str = "daily"
|
||||
|
||||
# Daily.co specific settings
|
||||
DAILY_API_KEY: str | None = None
|
||||
DAILY_WEBHOOK_SECRET: str | None = None
|
||||
DAILY_SUBDOMAIN: str | None = None
|
||||
AWS_DAILY_S3_BUCKET: str | None = None
|
||||
AWS_DAILY_S3_REGION: str = "us-west-2"
|
||||
AWS_DAILY_ROLE_ARN: str | None = None
|
||||
```
|
||||
|
||||
#### Configuration Pattern
|
||||
Each platform gets its own configuration namespace while sharing common patterns:
|
||||
|
||||
```python
|
||||
def get_platform_config(platform: str) -> VideoPlatformConfig:
|
||||
if platform == "whereby":
|
||||
return VideoPlatformConfig(
|
||||
api_key=settings.WHEREBY_API_KEY or "",
|
||||
webhook_secret=settings.WHEREBY_WEBHOOK_SECRET or "",
|
||||
# ... whereby-specific config
|
||||
)
|
||||
elif platform == "daily":
|
||||
return VideoPlatformConfig(
|
||||
api_key=settings.DAILY_API_KEY or "",
|
||||
webhook_secret=settings.DAILY_WEBHOOK_SECRET or "",
|
||||
# ... daily-specific config
|
||||
)
|
||||
```
|
||||
|
||||
### 6. **API Integration Updates**
|
||||
|
||||
#### Room Creation (views/rooms.py)
|
||||
Updated to use platform factory instead of direct Whereby calls:
|
||||
|
||||
```python
|
||||
@router.post("/rooms/{room_name}/meeting")
|
||||
async def rooms_create_meeting(room_name: str, user: UserInfo):
|
||||
# OLD: Direct Whereby integration
|
||||
# whereby_meeting = await create_meeting("", end_date=end_date, room=room)
|
||||
|
||||
# NEW: Platform abstraction
|
||||
platform = get_platform_for_room(room.id)
|
||||
client = create_platform_client(platform)
|
||||
|
||||
meeting_data = await client.create_meeting(
|
||||
room_name_prefix=room.name, end_date=end_date, room=room
|
||||
)
|
||||
|
||||
await client.upload_logo(meeting_data.room_name, "./images/logo.png")
|
||||
```
|
||||
|
||||
### 7. **Webhook Handling**
|
||||
|
||||
#### Separate Webhook Endpoints
|
||||
Each platform gets its own webhook endpoint with platform-specific signature verification:
|
||||
|
||||
```python
|
||||
# views/daily.py
|
||||
@router.post("/daily_webhook")
|
||||
async def daily_webhook(event: DailyWebhookEvent, request: Request):
|
||||
# Verify Daily.co signature
|
||||
body = await request.body()
|
||||
signature = request.headers.get("X-Daily-Signature", "")
|
||||
|
||||
if not verify_daily_webhook_signature(body, signature):
|
||||
raise HTTPException(status_code=401)
|
||||
|
||||
# Handle platform-specific events
|
||||
if event.type == "participant.joined":
|
||||
await _handle_participant_joined(event)
|
||||
```
|
||||
|
||||
#### Consistent Event Handling
|
||||
Despite different event formats, the core business logic remains the same:
|
||||
|
||||
```python
|
||||
async def _handle_participant_joined(event):
|
||||
room_name = event.data.get("room", {}).get("name") # Daily.co format
|
||||
meeting = await meetings_controller.get_by_room_name(room_name)
|
||||
if meeting:
|
||||
current_count = getattr(meeting, "num_clients", 0)
|
||||
await meetings_controller.update_meeting(
|
||||
meeting.id, num_clients=current_count + 1
|
||||
)
|
||||
```
|
||||
|
||||
### 8. **Worker Task Integration**
|
||||
|
||||
#### New Task for Daily.co Recording Processing
|
||||
Added platform-specific recording processing while maintaining the same pipeline:
|
||||
|
||||
```python
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def process_recording_from_url(recording_url: str, meeting_id: str, recording_id: str):
|
||||
"""Process recording from Direct URL (Daily.co webhook)."""
|
||||
logger.info("Processing recording from URL for meeting: %s", meeting_id)
|
||||
# Uses same processing pipeline as Whereby S3 recordings
|
||||
```
|
||||
|
||||
**Key Decision**: Worker tasks remain in main worker module but could be moved to platform-specific folders as suggested by the user.
|
||||
|
||||
### 9. **Testing Infrastructure**
|
||||
|
||||
#### Comprehensive Test Suite
|
||||
- Unit tests for each platform client
|
||||
- Integration tests for platform switching
|
||||
- Mock platform for testing without external dependencies
|
||||
- Webhook signature verification tests
|
||||
|
||||
```python
|
||||
class TestPlatformIntegration:
|
||||
"""Integration tests for platform switching."""
|
||||
|
||||
async def test_platform_switching_preserves_interface(self):
|
||||
"""Test that different platforms provide consistent interface."""
|
||||
# Test both Mock and Daily platforms return MeetingData objects
|
||||
# with consistent fields
|
||||
```
|
||||
|
||||
## Implementation Patterns for Jitsi Integration
|
||||
|
||||
Based on the daily.co implementation, here's how Jitsi should be integrated:
|
||||
|
||||
### 1. **Jitsi Client Implementation**
|
||||
|
||||
```python
|
||||
# video_platforms/jitsi.py
|
||||
class JitsiClient(VideoPlatformClient):
|
||||
PLATFORM_NAME = "jitsi"
|
||||
|
||||
async def create_meeting(self, room_name_prefix: str, end_date: datetime, room: Room) -> MeetingData:
|
||||
# Generate unique room name
|
||||
jitsi_room = f"reflector-{room.name}-{int(time.time())}"
|
||||
|
||||
# Generate JWT tokens
|
||||
user_jwt = self._generate_jwt(room=jitsi_room, moderator=False, exp=end_date)
|
||||
host_jwt = self._generate_jwt(room=jitsi_room, moderator=True, exp=end_date)
|
||||
|
||||
return MeetingData(
|
||||
meeting_id=generate_uuid4(),
|
||||
room_name=jitsi_room,
|
||||
room_url=f"https://jitsi.domain/{jitsi_room}?jwt={user_jwt}",
|
||||
host_room_url=f"https://jitsi.domain/{jitsi_room}?jwt={host_jwt}",
|
||||
platform=self.PLATFORM_NAME,
|
||||
extra_data={"user_jwt": user_jwt, "host_jwt": host_jwt}
|
||||
)
|
||||
```
|
||||
|
||||
### 2. **Settings Integration**
|
||||
|
||||
```python
|
||||
# settings.py
|
||||
JITSI_DOMAIN: str = "meet.jit.si"
|
||||
JITSI_JWT_SECRET: str | None = None
|
||||
JITSI_WEBHOOK_SECRET: str | None = None
|
||||
JITSI_API_URL: str | None = None # If using Jitsi API
|
||||
```
|
||||
|
||||
### 3. **Factory Registration**
|
||||
|
||||
```python
|
||||
# registry.py
|
||||
def _register_builtin_platforms():
|
||||
from .jitsi import JitsiClient
|
||||
register_platform("jitsi", JitsiClient)
|
||||
|
||||
# factory.py
|
||||
def get_platform_config(platform: str) -> VideoPlatformConfig:
|
||||
elif platform == "jitsi":
|
||||
return VideoPlatformConfig(
|
||||
api_key="", # Jitsi may not need API key
|
||||
webhook_secret=settings.JITSI_WEBHOOK_SECRET or "",
|
||||
api_url=settings.JITSI_API_URL,
|
||||
)
|
||||
```
|
||||
|
||||
### 4. **Webhook Integration**
|
||||
|
||||
```python
|
||||
# views/jitsi.py
|
||||
@router.post("/jitsi/events")
|
||||
async def jitsi_events_webhook(event_data: dict):
|
||||
# Handle Prosody event-sync webhook format
|
||||
event_type = event_data.get("event")
|
||||
room_name = event_data.get("room", "").split("@")[0]
|
||||
|
||||
if event_type == "muc-occupant-joined":
|
||||
# Same participant handling logic as other platforms
|
||||
```
|
||||
|
||||
## Key Benefits of This Architecture
|
||||
|
||||
### 1. **Isolation and Organization**
|
||||
- Platform-specific code contained in separate modules
|
||||
- No platform logic leaking into core application
|
||||
- Easy to add/remove platforms without affecting others
|
||||
|
||||
### 2. **Consistent Interface**
|
||||
- All platforms implement the same abstract methods
|
||||
- Standardized `MeetingData` structure
|
||||
- Uniform error handling and logging
|
||||
|
||||
### 3. **Gradual Migration Support**
|
||||
- Feature flags for controlled rollouts
|
||||
- Room-specific platform selection
|
||||
- Fallback mechanisms for platform failures
|
||||
|
||||
### 4. **Configuration Management**
|
||||
- Centralized settings per platform
|
||||
- Consistent naming patterns
|
||||
- Environment-based configuration
|
||||
|
||||
### 5. **Testing and Quality**
|
||||
- Mock platform for testing
|
||||
- Comprehensive test coverage
|
||||
- Platform-specific test utilities
|
||||
|
||||
## Migration Strategy Applied
|
||||
|
||||
The daily.co implementation demonstrates a careful migration approach:
|
||||
|
||||
### 1. **Backward Compatibility**
|
||||
- Default platform remains "whereby"
|
||||
- Existing rooms continue using Whereby unless explicitly migrated
|
||||
- Same API endpoints and response formats
|
||||
|
||||
### 2. **Feature Flag Control**
|
||||
```python
|
||||
# Gradual rollout control
|
||||
DAILY_MIGRATION_ENABLED: bool = True
|
||||
DAILY_MIGRATION_ROOM_IDS: list[str] = [] # Specific rooms to migrate
|
||||
DEFAULT_VIDEO_PLATFORM: str = "daily" # New rooms default
|
||||
```
|
||||
|
||||
### 3. **Data Integrity**
|
||||
- Platform field tracks which service each room/meeting uses
|
||||
- No data loss during migration
|
||||
- Platform-specific data preserved in `extra_data`
|
||||
|
||||
### 4. **Monitoring and Rollback**
|
||||
- Comprehensive logging of platform selection
|
||||
- Easy rollback by changing feature flags
|
||||
- Platform-specific error tracking
|
||||
|
||||
## Recommendations for Jitsi Integration
|
||||
|
||||
Based on this analysis and the user's requirements:
|
||||
|
||||
### 1. **Follow the Pattern**
|
||||
- Create `video_platforms/jitsi/` directory with:
|
||||
- `client.py` - Main JitsiClient implementation
|
||||
- `tasks.py` - Jitsi-specific worker tasks
|
||||
- `__init__.py` - Module exports
|
||||
|
||||
### 2. **Settings Organization**
|
||||
- Use `JITSI_*` prefix for all Jitsi settings
|
||||
- Follow the same configuration pattern as Daily.co
|
||||
- Support both environment variables and config files
|
||||
|
||||
### 3. **Generic Database Fields**
|
||||
- Avoid platform-specific columns in database
|
||||
- Use `provider_data` JSON field if platform-specific data needed
|
||||
- Keep `platform` field as simple string identifier
|
||||
|
||||
### 4. **Worker Task Migration**
|
||||
According to user requirements, migrate platform-specific tasks:
|
||||
```
|
||||
video_platforms/
|
||||
├── whereby/
|
||||
│ ├── client.py (moved from whereby.py)
|
||||
│ └── tasks.py (moved from worker/whereby_tasks.py)
|
||||
├── daily/
|
||||
│ ├── client.py (moved from daily.py)
|
||||
│ └── tasks.py (moved from worker/daily_tasks.py)
|
||||
└── jitsi/
|
||||
├── client.py (new JitsiClient)
|
||||
└── tasks.py (new Jitsi recording tasks)
|
||||
```
|
||||
|
||||
### 5. **Webhook Architecture**
|
||||
- Create `views/jitsi.py` for Jitsi-specific webhooks
|
||||
- Follow the same signature verification pattern
|
||||
- Reuse existing participant tracking logic
|
||||
|
||||
## Implementation Checklist for Jitsi
|
||||
|
||||
- [ ] Create `video_platforms/jitsi/` directory structure
|
||||
- [ ] Implement `JitsiClient` following the abstract interface
|
||||
- [ ] Add Jitsi settings to configuration
|
||||
- [ ] Register Jitsi platform in factory/registry
|
||||
- [ ] Create Jitsi webhook endpoint
|
||||
- [ ] Implement JWT token generation for room access
|
||||
- [ ] Add Jitsi recording processing tasks
|
||||
- [ ] Create comprehensive test suite
|
||||
- [ ] Update database migrations for platform field
|
||||
- [ ] Document Jitsi-specific configuration
|
||||
|
||||
## Conclusion
|
||||
|
||||
The video platforms refactoring in PR #529 provides an excellent foundation for adding Jitsi support. The architecture is well-designed with clear separation of concerns, consistent interfaces, and excellent extensibility. The daily.co implementation demonstrates how to add a new platform while maintaining backward compatibility and providing gradual migration capabilities.
|
||||
|
||||
The pattern should be directly applicable to Jitsi integration, with the main differences being:
|
||||
- JWT-based authentication instead of API keys
|
||||
- Different webhook event formats
|
||||
- Jibri recording pipeline integration
|
||||
- Self-hosted deployment considerations
|
||||
|
||||
This architecture successfully achieves the user's goals of:
|
||||
1. Settings-based configuration
|
||||
2. Generic database fields (no provider-specific columns)
|
||||
3. Platform isolation in separate directories
|
||||
4. Worker task organization within platform folders
|
||||
33
gpu/modal_deployments/.gitignore
vendored
33
gpu/modal_deployments/.gitignore
vendored
@@ -1,33 +0,0 @@
|
||||
# OS / Editor
|
||||
.DS_Store
|
||||
.vscode/
|
||||
.idea/
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
|
||||
# Env and secrets
|
||||
.env
|
||||
.env.*
|
||||
*.env
|
||||
*.secret
|
||||
|
||||
# Build / dist
|
||||
build/
|
||||
dist/
|
||||
.eggs/
|
||||
*.egg-info/
|
||||
|
||||
# Coverage / test
|
||||
.pytest_cache/
|
||||
.coverage*
|
||||
htmlcov/
|
||||
|
||||
# Modal local state (if any)
|
||||
modal_mounts/
|
||||
.modal_cache/
|
||||
@@ -1,150 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# --- Usage ---
|
||||
usage() {
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --hf-token TOKEN HuggingFace token"
|
||||
echo " --help Show this help message"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Interactive mode"
|
||||
echo " $0 --hf-token hf_xxxxx # Non-interactive mode"
|
||||
echo ""
|
||||
exit 0
|
||||
}
|
||||
|
||||
# --- Parse Arguments ---
|
||||
HF_TOKEN=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--hf-token)
|
||||
HF_TOKEN="$2"
|
||||
shift 2
|
||||
;;
|
||||
--help)
|
||||
usage
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "=========================================="
|
||||
echo "Reflector GPU Functions Deployment"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
# --- Check Dependencies ---
|
||||
if ! command -v modal &> /dev/null; then
|
||||
echo "Error: Modal CLI not installed."
|
||||
echo " Install with: pip install modal"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v openssl &> /dev/null; then
|
||||
echo "Error: openssl not found."
|
||||
echo " Mac: brew install openssl"
|
||||
echo " Ubuntu: sudo apt-get install openssl"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Modal authentication
|
||||
if ! modal profile current &> /dev/null; then
|
||||
echo "Error: Not authenticated with Modal."
|
||||
echo " Run: modal setup"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# --- HuggingFace Token Setup ---
|
||||
if [ -z "$HF_TOKEN" ]; then
|
||||
echo "HuggingFace token required for Pyannote diarization model."
|
||||
echo "1. Create account at https://huggingface.co"
|
||||
echo "2. Accept license at https://huggingface.co/pyannote/speaker-diarization-3.1"
|
||||
echo "3. Generate token at https://huggingface.co/settings/tokens"
|
||||
echo ""
|
||||
read -p "Enter your HuggingFace token: " HF_TOKEN
|
||||
fi
|
||||
|
||||
if [ -z "$HF_TOKEN" ]; then
|
||||
echo "Error: HuggingFace token is required for diarization"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Basic token format validation
|
||||
if [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
|
||||
echo "Warning: HuggingFace tokens usually start with 'hf_'"
|
||||
if [ -t 0 ]; then
|
||||
read -p "Continue anyway? (y/n): " confirm
|
||||
if [ "$confirm" != "y" ]; then
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Non-interactive mode: proceeding anyway"
|
||||
fi
|
||||
fi
|
||||
|
||||
# --- Auto-generate reflector<->GPU API Key ---
|
||||
echo ""
|
||||
echo "Generating API key for GPU services..."
|
||||
API_KEY=$(openssl rand -hex 32)
|
||||
|
||||
# --- Create Modal Secrets ---
|
||||
echo "Creating Modal secrets..."
|
||||
|
||||
# Create or update hf_token secret (delete first if exists)
|
||||
if modal secret list 2>/dev/null | grep -q "hf_token"; then
|
||||
echo " -> Recreating secret: hf_token"
|
||||
modal secret delete hf_token --yes 2>/dev/null || true
|
||||
fi
|
||||
echo " -> Creating secret: hf_token"
|
||||
modal secret create hf_token HF_TOKEN="$HF_TOKEN"
|
||||
|
||||
# Create or update reflector-gpu secret (delete first if exists)
|
||||
if modal secret list 2>/dev/null | grep -q "reflector-gpu"; then
|
||||
echo " -> Recreating secret: reflector-gpu"
|
||||
modal secret delete reflector-gpu --yes 2>/dev/null || true
|
||||
fi
|
||||
echo " -> Creating secret: reflector-gpu"
|
||||
modal secret create reflector-gpu REFLECTOR_GPU_APIKEY="$API_KEY"
|
||||
|
||||
# --- Deploy Functions ---
|
||||
echo ""
|
||||
echo "Deploying transcriber (Whisper)..."
|
||||
TRANSCRIBER_URL=$(modal deploy reflector_transcriber.py 2>&1 | grep -o 'https://[^ ]*web.modal.run' | head -1)
|
||||
if [ -z "$TRANSCRIBER_URL" ]; then
|
||||
echo "Error: Failed to deploy transcriber. Check Modal dashboard for details."
|
||||
exit 1
|
||||
fi
|
||||
echo " -> $TRANSCRIBER_URL"
|
||||
|
||||
echo ""
|
||||
echo "Deploying diarizer (Pyannote)..."
|
||||
DIARIZER_URL=$(modal deploy reflector_diarizer.py 2>&1 | grep -o 'https://[^ ]*web.modal.run' | head -1)
|
||||
if [ -z "$DIARIZER_URL" ]; then
|
||||
echo "Error: Failed to deploy diarizer. Check Modal dashboard for details."
|
||||
exit 1
|
||||
fi
|
||||
echo " -> $DIARIZER_URL"
|
||||
|
||||
# --- Output Configuration ---
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Deployment complete!"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "Copy these values to your server's server/.env file:"
|
||||
echo ""
|
||||
echo "# --- Modal GPU Configuration ---"
|
||||
echo "TRANSCRIPT_BACKEND=modal"
|
||||
echo "TRANSCRIPT_URL=$TRANSCRIBER_URL"
|
||||
echo "TRANSCRIPT_MODAL_API_KEY=$API_KEY"
|
||||
echo ""
|
||||
echo "DIARIZATION_BACKEND=modal"
|
||||
echo "DIARIZATION_URL=$DIARIZER_URL"
|
||||
echo "DIARIZATION_MODAL_API_KEY=$API_KEY"
|
||||
echo "# --- End Modal Configuration ---"
|
||||
@@ -1,2 +0,0 @@
|
||||
REFLECTOR_GPU_APIKEY=
|
||||
HF_TOKEN=
|
||||
38
gpu/self_hosted/.gitignore
vendored
38
gpu/self_hosted/.gitignore
vendored
@@ -1,38 +0,0 @@
|
||||
cache/
|
||||
|
||||
# OS / Editor
|
||||
.DS_Store
|
||||
.vscode/
|
||||
.idea/
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# Env and secrets
|
||||
.env
|
||||
*.env
|
||||
*.secret
|
||||
HF_TOKEN
|
||||
REFLECTOR_GPU_APIKEY
|
||||
|
||||
# Virtual env / uv
|
||||
.venv/
|
||||
venv/
|
||||
ENV/
|
||||
uv/
|
||||
|
||||
# Build / dist
|
||||
build/
|
||||
dist/
|
||||
.eggs/
|
||||
*.egg-info/
|
||||
|
||||
# Coverage / test
|
||||
.pytest_cache/
|
||||
.coverage*
|
||||
htmlcov/
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
@@ -1,137 +0,0 @@
|
||||
# Local Development GPU Setup
|
||||
|
||||
Run transcription and diarization locally for development/testing.
|
||||
|
||||
> **For production deployment**, see the [Self-Hosted GPU Setup Guide](../../docs/docs/installation/self-hosted-gpu-setup.md).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Python 3.12+** and **uv** package manager
|
||||
2. **FFmpeg** installed and on PATH
|
||||
3. **HuggingFace account** with access to pyannote models
|
||||
|
||||
### Accept Pyannote Licenses (Required)
|
||||
|
||||
Before first run, accept licenses for these gated models (logged into HuggingFace):
|
||||
- https://hf.co/pyannote/speaker-diarization-3.1
|
||||
- https://hf.co/pyannote/segmentation-3.0
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Install dependencies
|
||||
|
||||
```bash
|
||||
cd gpu/self_hosted
|
||||
uv sync
|
||||
```
|
||||
|
||||
### 2. Start the GPU service
|
||||
|
||||
```bash
|
||||
cd gpu/self_hosted
|
||||
HF_TOKEN=<your-huggingface-token> \
|
||||
REFLECTOR_GPU_APIKEY=dev-key-12345 \
|
||||
.venv/bin/uvicorn main:app --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
Note: The `.env` file is NOT auto-loaded. Pass env vars explicitly or use:
|
||||
```bash
|
||||
export HF_TOKEN=<your-token>
|
||||
export REFLECTOR_GPU_APIKEY=dev-key-12345
|
||||
.venv/bin/uvicorn main:app --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
### 3. Configure Reflector to use local GPU
|
||||
|
||||
Edit `server/.env`:
|
||||
|
||||
```bash
|
||||
# Transcription - local GPU service
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=http://host.docker.internal:8000
|
||||
TRANSCRIPT_MODAL_API_KEY=dev-key-12345
|
||||
|
||||
# Diarization - local GPU service
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=http://host.docker.internal:8000
|
||||
DIARIZATION_MODAL_API_KEY=dev-key-12345
|
||||
```
|
||||
|
||||
Note: Use `host.docker.internal` because Reflector server runs in Docker.
|
||||
|
||||
### 4. Restart Reflector server
|
||||
|
||||
```bash
|
||||
cd server
|
||||
docker compose restart server worker
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Test transcription
|
||||
|
||||
```bash
|
||||
curl -s -X POST http://localhost:8000/v1/audio/transcriptions \
|
||||
-H "Authorization: Bearer dev-key-12345" \
|
||||
-F "file=@/path/to/audio.wav" \
|
||||
-F "language=en"
|
||||
```
|
||||
|
||||
### Test diarization
|
||||
|
||||
```bash
|
||||
curl -s -X POST "http://localhost:8000/diarize?audio_file_url=<audio-url>" \
|
||||
-H "Authorization: Bearer dev-key-12345"
|
||||
```
|
||||
|
||||
## Platform Notes
|
||||
|
||||
### macOS (ARM)
|
||||
|
||||
Docker build fails - CUDA packages are x86_64 only. Use local Python instead:
|
||||
```bash
|
||||
uv sync
|
||||
HF_TOKEN=xxx REFLECTOR_GPU_APIKEY=xxx .venv/bin/uvicorn main:app --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
### Linux with NVIDIA GPU
|
||||
|
||||
Docker works with CUDA acceleration:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### CPU-only
|
||||
|
||||
Works on any platform, just slower. PyTorch auto-detects and falls back to CPU.
|
||||
|
||||
## Switching Back to Modal.com
|
||||
|
||||
Edit `server/.env`:
|
||||
|
||||
```bash
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-parakeet-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=<modal-api-key>
|
||||
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
|
||||
DIARIZATION_MODAL_API_KEY=<modal-api-key>
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Could not download pyannote pipeline"
|
||||
- Accept model licenses at HuggingFace (see Prerequisites)
|
||||
- Verify HF_TOKEN is set and valid
|
||||
|
||||
### Service won't start
|
||||
- Check port 8000 is free: `lsof -i :8000`
|
||||
- Kill orphan processes if needed
|
||||
|
||||
### Transcription returns empty text
|
||||
- Ensure audio contains speech (not just tones/silence)
|
||||
- Check audio format is supported (wav, mp3, etc.)
|
||||
|
||||
### Deprecation warnings from torchaudio/pyannote
|
||||
- Safe to ignore - doesn't affect functionality
|
||||
@@ -1,46 +0,0 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_NO_CACHE=1
|
||||
|
||||
WORKDIR /tmp
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
ffmpeg \
|
||||
curl \
|
||||
ca-certificates \
|
||||
gnupg \
|
||||
wget \
|
||||
&& apt-get clean
|
||||
# Add NVIDIA CUDA repo for Debian 12 (bookworm) and install cuDNN 9 for CUDA 12
|
||||
ADD https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/cuda-keyring_1.1-1_all.deb /cuda-keyring.deb
|
||||
RUN dpkg -i /cuda-keyring.deb \
|
||||
&& rm /cuda-keyring.deb \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
cuda-cudart-12-6 \
|
||||
libcublas-12-6 \
|
||||
libcudnn9-cuda-12 \
|
||||
libcudnn9-dev-cuda-12 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ADD https://astral.sh/uv/install.sh /uv-installer.sh
|
||||
RUN sh /uv-installer.sh && rm /uv-installer.sh
|
||||
ENV PATH="/root/.local/bin/:$PATH"
|
||||
ENV LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH"
|
||||
|
||||
RUN mkdir -p /app
|
||||
WORKDIR /app
|
||||
COPY pyproject.toml uv.lock /app/
|
||||
|
||||
|
||||
COPY ./app /app/app
|
||||
COPY ./main.py /app/
|
||||
COPY ./runserver.sh /app/
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["sh", "/app/runserver.sh"]
|
||||
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
# Self-hosted Model API
|
||||
|
||||
Run transcription, translation, and diarization services compatible with Reflector's GPU Model API. Works on CPU or GPU.
|
||||
|
||||
Environment variables
|
||||
|
||||
- REFLECTOR_GPU_APIKEY: Optional Bearer token. If unset, auth is disabled.
|
||||
- HF_TOKEN: Optional. Required for diarization to download pyannote pipelines
|
||||
|
||||
Requirements
|
||||
|
||||
- FFmpeg must be installed and on PATH (used for URL-based and segmented transcription)
|
||||
- Python 3.12+
|
||||
- NVIDIA GPU optional. If available, it will be used automatically
|
||||
|
||||
Local run
|
||||
Set env vars in self_hosted/.env file
|
||||
uv sync
|
||||
|
||||
uv run uvicorn main:app --host 0.0.0.0 --port 8000
|
||||
|
||||
Authentication
|
||||
|
||||
- If REFLECTOR_GPU_APIKEY is set, include header: Authorization: Bearer <key>
|
||||
|
||||
Endpoints
|
||||
|
||||
- POST /v1/audio/transcriptions
|
||||
|
||||
- multipart/form-data
|
||||
- fields: file (single file) OR files[] (multiple files), language, batch (true/false)
|
||||
- response: single { text, words, filename } or { results: [ ... ] }
|
||||
|
||||
- POST /v1/audio/transcriptions-from-url
|
||||
|
||||
- application/json
|
||||
- body: { audio_file_url, language, timestamp_offset }
|
||||
- response: { text, words }
|
||||
|
||||
- POST /translate
|
||||
|
||||
- text: query parameter
|
||||
- body (application/json): { source_language, target_language }
|
||||
- response: { text: { <src>: original, <tgt>: translated } }
|
||||
|
||||
- POST /diarize
|
||||
- query parameters: audio_file_url, timestamp (optional)
|
||||
- requires HF_TOKEN to be set (for pyannote)
|
||||
- response: { diarization: [ { start, end, speaker } ] }
|
||||
|
||||
OpenAPI docs
|
||||
|
||||
- Visit /docs when the server is running
|
||||
|
||||
Docker
|
||||
|
||||
- Not yet provided in this directory. A Dockerfile will be added later. For now, use Local run above
|
||||
|
||||
# Setup
|
||||
|
||||
[SETUP.md](SETUP.md)
|
||||
|
||||
# Conformance tests
|
||||
|
||||
## From this directory
|
||||
|
||||
TRANSCRIPT_URL=http://localhost:8000 \
|
||||
TRANSCRIPT_API_KEY=dev-key \
|
||||
uv run -m pytest -m model_api --no-cov ../../server/tests/test_model_api_transcript.py
|
||||
|
||||
TRANSLATION_URL=http://localhost:8000 \
|
||||
TRANSLATION_API_KEY=dev-key \
|
||||
uv run -m pytest -m model_api --no-cov ../../server/tests/test_model_api_translation.py
|
||||
|
||||
DIARIZATION_URL=http://localhost:8000 \
|
||||
DIARIZATION_API_KEY=dev-key \
|
||||
uv run -m pytest -m model_api --no-cov ../../server/tests/test_model_api_diarization.py
|
||||
@@ -1,19 +0,0 @@
|
||||
import os
|
||||
|
||||
from fastapi import Depends, HTTPException, status
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
required_key = os.environ.get("REFLECTOR_GPU_APIKEY")
|
||||
if not required_key:
|
||||
return
|
||||
if apikey == required_key:
|
||||
return
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
@@ -1,12 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
SUPPORTED_FILE_EXTENSIONS = ["mp3", "mp4", "mpeg", "mpga", "m4a", "wav", "webm"]
|
||||
SAMPLE_RATE = 16000
|
||||
VAD_CONFIG = {
|
||||
"batch_max_duration": 30.0,
|
||||
"silence_padding": 0.5,
|
||||
"window_size": 512,
|
||||
}
|
||||
|
||||
# App-level paths
|
||||
UPLOADS_PATH = Path("/tmp/whisper-uploads")
|
||||
@@ -1,30 +0,0 @@
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import FastAPI
|
||||
|
||||
from .routers.diarization import router as diarization_router
|
||||
from .routers.transcription import router as transcription_router
|
||||
from .routers.translation import router as translation_router
|
||||
from .services.transcriber import WhisperService
|
||||
from .services.diarizer import PyannoteDiarizationService
|
||||
from .utils import ensure_dirs
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
ensure_dirs()
|
||||
whisper_service = WhisperService()
|
||||
whisper_service.load()
|
||||
app.state.whisper = whisper_service
|
||||
diarization_service = PyannoteDiarizationService()
|
||||
diarization_service.load()
|
||||
app.state.diarizer = diarization_service
|
||||
yield
|
||||
|
||||
|
||||
def create_app() -> FastAPI:
|
||||
app = FastAPI(lifespan=lifespan)
|
||||
app.include_router(transcription_router)
|
||||
app.include_router(translation_router)
|
||||
app.include_router(diarization_router)
|
||||
return app
|
||||
@@ -1,30 +0,0 @@
|
||||
from typing import List
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from pydantic import BaseModel
|
||||
|
||||
from ..auth import apikey_auth
|
||||
from ..services.diarizer import PyannoteDiarizationService
|
||||
from ..utils import download_audio_file
|
||||
|
||||
router = APIRouter(tags=["diarization"])
|
||||
|
||||
|
||||
class DiarizationSegment(BaseModel):
|
||||
start: float
|
||||
end: float
|
||||
speaker: int
|
||||
|
||||
|
||||
class DiarizationResponse(BaseModel):
|
||||
diarization: List[DiarizationSegment]
|
||||
|
||||
|
||||
@router.post(
|
||||
"/diarize", dependencies=[Depends(apikey_auth)], response_model=DiarizationResponse
|
||||
)
|
||||
def diarize(request: Request, audio_file_url: str, timestamp: float = 0.0):
|
||||
with download_audio_file(audio_file_url) as (file_path, _ext):
|
||||
file_path = str(file_path)
|
||||
diarizer: PyannoteDiarizationService = request.app.state.diarizer
|
||||
return diarizer.diarize_file(file_path, timestamp=timestamp)
|
||||
@@ -1,109 +0,0 @@
|
||||
import uuid
|
||||
from typing import Optional, Union
|
||||
|
||||
from fastapi import APIRouter, Body, Depends, Form, HTTPException, Request, UploadFile
|
||||
from pydantic import BaseModel
|
||||
from pathlib import Path
|
||||
from ..auth import apikey_auth
|
||||
from ..config import SUPPORTED_FILE_EXTENSIONS, UPLOADS_PATH
|
||||
from ..services.transcriber import MODEL_NAME
|
||||
from ..utils import cleanup_uploaded_files, download_audio_file
|
||||
|
||||
router = APIRouter(prefix="/v1/audio", tags=["transcription"])
|
||||
|
||||
|
||||
class WordTiming(BaseModel):
|
||||
word: str
|
||||
start: float
|
||||
end: float
|
||||
|
||||
|
||||
class TranscriptResult(BaseModel):
|
||||
text: str
|
||||
words: list[WordTiming]
|
||||
filename: Optional[str] = None
|
||||
|
||||
|
||||
class TranscriptBatchResponse(BaseModel):
|
||||
results: list[TranscriptResult]
|
||||
|
||||
|
||||
@router.post(
|
||||
"/transcriptions",
|
||||
dependencies=[Depends(apikey_auth)],
|
||||
response_model=Union[TranscriptResult, TranscriptBatchResponse],
|
||||
)
|
||||
def transcribe(
|
||||
request: Request,
|
||||
file: UploadFile = None,
|
||||
files: list[UploadFile] | None = None,
|
||||
model: str = Form(MODEL_NAME),
|
||||
language: str = Form("en"),
|
||||
batch: bool = Form(False),
|
||||
):
|
||||
service = request.app.state.whisper
|
||||
if not file and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Either 'file' or 'files' parameter is required"
|
||||
)
|
||||
if batch and not files:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Batch transcription requires 'files'"
|
||||
)
|
||||
|
||||
upload_files = [file] if file else files
|
||||
|
||||
uploaded_paths: list[Path] = []
|
||||
with cleanup_uploaded_files(uploaded_paths):
|
||||
for upload_file in upload_files:
|
||||
audio_suffix = upload_file.filename.split(".")[-1].lower()
|
||||
if audio_suffix not in SUPPORTED_FILE_EXTENSIONS:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=(
|
||||
f"Unsupported audio format. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
),
|
||||
)
|
||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
||||
file_path = UPLOADS_PATH / unique_filename
|
||||
with open(file_path, "wb") as f:
|
||||
content = upload_file.file.read()
|
||||
f.write(content)
|
||||
uploaded_paths.append(file_path)
|
||||
|
||||
if batch and len(upload_files) > 1:
|
||||
results = []
|
||||
for path in uploaded_paths:
|
||||
result = service.transcribe_file(str(path), language=language)
|
||||
result["filename"] = path.name
|
||||
results.append(result)
|
||||
return {"results": results}
|
||||
|
||||
results = []
|
||||
for path in uploaded_paths:
|
||||
result = service.transcribe_file(str(path), language=language)
|
||||
result["filename"] = path.name
|
||||
results.append(result)
|
||||
|
||||
return {"results": results} if len(results) > 1 else results[0]
|
||||
|
||||
|
||||
@router.post(
|
||||
"/transcriptions-from-url",
|
||||
dependencies=[Depends(apikey_auth)],
|
||||
response_model=TranscriptResult,
|
||||
)
|
||||
def transcribe_from_url(
|
||||
request: Request,
|
||||
audio_file_url: str = Body(..., description="URL of the audio file to transcribe"),
|
||||
model: str = Body(MODEL_NAME),
|
||||
language: str = Body("en"),
|
||||
timestamp_offset: float = Body(0.0),
|
||||
):
|
||||
service = request.app.state.whisper
|
||||
with download_audio_file(audio_file_url) as (file_path, _ext):
|
||||
file_path = str(file_path)
|
||||
result = service.transcribe_vad_url_segment(
|
||||
file_path=file_path, timestamp_offset=timestamp_offset, language=language
|
||||
)
|
||||
return result
|
||||
@@ -1,28 +0,0 @@
|
||||
from typing import Dict
|
||||
|
||||
from fastapi import APIRouter, Body, Depends
|
||||
from pydantic import BaseModel
|
||||
|
||||
from ..auth import apikey_auth
|
||||
from ..services.translator import TextTranslatorService
|
||||
|
||||
router = APIRouter(tags=["translation"])
|
||||
|
||||
translator = TextTranslatorService()
|
||||
|
||||
|
||||
class TranslationResponse(BaseModel):
|
||||
text: Dict[str, str]
|
||||
|
||||
|
||||
@router.post(
|
||||
"/translate",
|
||||
dependencies=[Depends(apikey_auth)],
|
||||
response_model=TranslationResponse,
|
||||
)
|
||||
def translate(
|
||||
text: str,
|
||||
source_language: str = Body("en"),
|
||||
target_language: str = Body("fr"),
|
||||
):
|
||||
return translator.translate(text, source_language, target_language)
|
||||
@@ -1,42 +0,0 @@
|
||||
import os
|
||||
import threading
|
||||
|
||||
import torch
|
||||
import torchaudio
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
|
||||
class PyannoteDiarizationService:
|
||||
def __init__(self):
|
||||
self._pipeline = None
|
||||
self._device = "cpu"
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def load(self):
|
||||
self._device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
self._pipeline = Pipeline.from_pretrained(
|
||||
"pyannote/speaker-diarization-3.1",
|
||||
use_auth_token=os.environ.get("HF_TOKEN"),
|
||||
)
|
||||
self._pipeline.to(torch.device(self._device))
|
||||
|
||||
def diarize_file(self, file_path: str, timestamp: float = 0.0) -> dict:
|
||||
if self._pipeline is None:
|
||||
self.load()
|
||||
waveform, sample_rate = torchaudio.load(file_path)
|
||||
with self._lock:
|
||||
diarization = self._pipeline(
|
||||
{"waveform": waveform, "sample_rate": sample_rate}
|
||||
)
|
||||
words = []
|
||||
for diarization_segment, _, speaker in diarization.itertracks(yield_label=True):
|
||||
words.append(
|
||||
{
|
||||
"start": round(timestamp + diarization_segment.start, 3),
|
||||
"end": round(timestamp + diarization_segment.end, 3),
|
||||
"speaker": int(speaker[-2:])
|
||||
if speaker and speaker[-2:].isdigit()
|
||||
else 0,
|
||||
}
|
||||
)
|
||||
return {"diarization": words}
|
||||
@@ -1,217 +0,0 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import threading
|
||||
from typing import Generator
|
||||
|
||||
import faster_whisper
|
||||
import librosa
|
||||
import numpy as np
|
||||
import torch
|
||||
from fastapi import HTTPException
|
||||
from silero_vad import VADIterator, load_silero_vad
|
||||
|
||||
from ..config import SAMPLE_RATE, VAD_CONFIG
|
||||
|
||||
# Whisper configuration (service-local defaults)
|
||||
MODEL_NAME = "large-v2"
|
||||
# None delegates compute type to runtime: float16 on CUDA, int8 on CPU
|
||||
MODEL_COMPUTE_TYPE = None
|
||||
MODEL_NUM_WORKERS = 1
|
||||
CACHE_PATH = os.path.join(os.path.expanduser("~"), ".cache", "reflector-whisper")
|
||||
from ..utils import NoStdStreams
|
||||
|
||||
|
||||
class WhisperService:
|
||||
def __init__(self):
|
||||
self.model = None
|
||||
self.device = "cpu"
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def load(self):
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
compute_type = MODEL_COMPUTE_TYPE or (
|
||||
"float16" if self.device == "cuda" else "int8"
|
||||
)
|
||||
self.model = faster_whisper.WhisperModel(
|
||||
MODEL_NAME,
|
||||
device=self.device,
|
||||
compute_type=compute_type,
|
||||
num_workers=MODEL_NUM_WORKERS,
|
||||
download_root=CACHE_PATH,
|
||||
)
|
||||
|
||||
def pad_audio(self, audio_array, sample_rate: int = SAMPLE_RATE):
|
||||
audio_duration = len(audio_array) / sample_rate
|
||||
if audio_duration < VAD_CONFIG["silence_padding"]:
|
||||
silence_samples = int(sample_rate * VAD_CONFIG["silence_padding"])
|
||||
silence = np.zeros(silence_samples, dtype=np.float32)
|
||||
return np.concatenate([audio_array, silence])
|
||||
return audio_array
|
||||
|
||||
def enforce_word_timing_constraints(self, words: list[dict]) -> list[dict]:
|
||||
if len(words) <= 1:
|
||||
return words
|
||||
enforced: list[dict] = []
|
||||
for i, word in enumerate(words):
|
||||
current = dict(word)
|
||||
if i < len(words) - 1:
|
||||
next_start = words[i + 1]["start"]
|
||||
if current["end"] > next_start:
|
||||
current["end"] = next_start
|
||||
enforced.append(current)
|
||||
return enforced
|
||||
|
||||
def transcribe_file(self, file_path: str, language: str = "en") -> dict:
|
||||
input_for_model: str | "object" = file_path
|
||||
try:
|
||||
audio_array, _sample_rate = librosa.load(
|
||||
file_path, sr=SAMPLE_RATE, mono=True
|
||||
)
|
||||
if len(audio_array) / float(SAMPLE_RATE) < VAD_CONFIG["silence_padding"]:
|
||||
input_for_model = self.pad_audio(audio_array, SAMPLE_RATE)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
with self.lock:
|
||||
with NoStdStreams():
|
||||
segments, _ = self.model.transcribe(
|
||||
input_for_model,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
|
||||
segments = list(segments)
|
||||
text = "".join(segment.text for segment in segments).strip()
|
||||
words = [
|
||||
{
|
||||
"word": word.word,
|
||||
"start": round(float(word.start), 2),
|
||||
"end": round(float(word.end), 2),
|
||||
}
|
||||
for segment in segments
|
||||
for word in segment.words
|
||||
]
|
||||
words = self.enforce_word_timing_constraints(words)
|
||||
return {"text": text, "words": words}
|
||||
|
||||
def transcribe_vad_url_segment(
|
||||
self, file_path: str, timestamp_offset: float = 0.0, language: str = "en"
|
||||
) -> dict:
|
||||
def load_audio_via_ffmpeg(input_path: str, sample_rate: int) -> np.ndarray:
|
||||
ffmpeg_bin = shutil.which("ffmpeg") or "ffmpeg"
|
||||
cmd = [
|
||||
ffmpeg_bin,
|
||||
"-nostdin",
|
||||
"-threads",
|
||||
"1",
|
||||
"-i",
|
||||
input_path,
|
||||
"-f",
|
||||
"f32le",
|
||||
"-acodec",
|
||||
"pcm_f32le",
|
||||
"-ac",
|
||||
"1",
|
||||
"-ar",
|
||||
str(sample_rate),
|
||||
"pipe:1",
|
||||
]
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"ffmpeg failed: {e}")
|
||||
audio = np.frombuffer(proc.stdout, dtype=np.float32)
|
||||
return audio
|
||||
|
||||
# IMPORTANT: This VAD segment logic is duplicated in multiple files for deployment isolation.
|
||||
# If you modify this function, you MUST update all copies:
|
||||
# - gpu/modal_deployments/reflector_transcriber.py
|
||||
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
|
||||
# - gpu/self_hosted/app/services/transcriber.py (this file)
|
||||
def vad_segments(
|
||||
audio_array,
|
||||
sample_rate: int = SAMPLE_RATE,
|
||||
window_size: int = VAD_CONFIG["window_size"],
|
||||
) -> Generator[tuple[float, float], None, None]:
|
||||
vad_model = load_silero_vad(onnx=False)
|
||||
iterator = VADIterator(vad_model, sampling_rate=sample_rate)
|
||||
start = None
|
||||
for i in range(0, len(audio_array), window_size):
|
||||
chunk = audio_array[i : i + window_size]
|
||||
if len(chunk) < window_size:
|
||||
chunk = np.pad(
|
||||
chunk, (0, window_size - len(chunk)), mode="constant"
|
||||
)
|
||||
speech = iterator(chunk)
|
||||
if not speech:
|
||||
continue
|
||||
if "start" in speech:
|
||||
start = speech["start"]
|
||||
continue
|
||||
if "end" in speech and start is not None:
|
||||
end = speech["end"]
|
||||
yield (start / float(SAMPLE_RATE), end / float(SAMPLE_RATE))
|
||||
start = None
|
||||
# Handle case where audio ends while speech is still active
|
||||
if start is not None:
|
||||
audio_duration = len(audio_array) / float(sample_rate)
|
||||
yield (start / float(SAMPLE_RATE), audio_duration)
|
||||
iterator.reset_states()
|
||||
|
||||
audio_array = load_audio_via_ffmpeg(file_path, SAMPLE_RATE)
|
||||
|
||||
merged_batches: list[tuple[float, float]] = []
|
||||
batch_start = None
|
||||
batch_end = None
|
||||
max_duration = VAD_CONFIG["batch_max_duration"]
|
||||
for seg_start, seg_end in vad_segments(audio_array):
|
||||
if batch_start is None:
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
continue
|
||||
if seg_end - batch_start <= max_duration:
|
||||
batch_end = seg_end
|
||||
else:
|
||||
merged_batches.append((batch_start, batch_end))
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
if batch_start is not None and batch_end is not None:
|
||||
merged_batches.append((batch_start, batch_end))
|
||||
|
||||
all_text = []
|
||||
all_words = []
|
||||
for start_time, end_time in merged_batches:
|
||||
s_idx = int(start_time * SAMPLE_RATE)
|
||||
e_idx = int(end_time * SAMPLE_RATE)
|
||||
segment = audio_array[s_idx:e_idx]
|
||||
segment = self.pad_audio(segment, SAMPLE_RATE)
|
||||
with self.lock:
|
||||
segments, _ = self.model.transcribe(
|
||||
segment,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
segments = list(segments)
|
||||
text = "".join(seg.text for seg in segments).strip()
|
||||
words = [
|
||||
{
|
||||
"word": w.word,
|
||||
"start": round(float(w.start) + start_time + timestamp_offset, 2),
|
||||
"end": round(float(w.end) + start_time + timestamp_offset, 2),
|
||||
}
|
||||
for seg in segments
|
||||
for w in seg.words
|
||||
]
|
||||
if text:
|
||||
all_text.append(text)
|
||||
all_words.extend(words)
|
||||
|
||||
all_words = self.enforce_word_timing_constraints(all_words)
|
||||
return {"text": " ".join(all_text), "words": all_words}
|
||||
@@ -1,44 +0,0 @@
|
||||
import threading
|
||||
|
||||
from transformers import MarianMTModel, MarianTokenizer, pipeline
|
||||
|
||||
|
||||
class TextTranslatorService:
|
||||
"""Simple text-to-text translator using HuggingFace MarianMT models.
|
||||
|
||||
This mirrors the modal translator API shape but uses text translation only.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._pipeline = None
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def load(self, source_language: str = "en", target_language: str = "fr"):
|
||||
# Pick a default MarianMT model pair if available; fall back to Helsinki-NLP en->fr
|
||||
model_name = self._resolve_model_name(source_language, target_language)
|
||||
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
model = MarianMTModel.from_pretrained(model_name)
|
||||
self._pipeline = pipeline("translation", model=model, tokenizer=tokenizer)
|
||||
|
||||
def _resolve_model_name(self, src: str, tgt: str) -> str:
|
||||
# Minimal mapping; extend as needed
|
||||
pair = (src.lower(), tgt.lower())
|
||||
mapping = {
|
||||
("en", "fr"): "Helsinki-NLP/opus-mt-en-fr",
|
||||
("fr", "en"): "Helsinki-NLP/opus-mt-fr-en",
|
||||
("en", "es"): "Helsinki-NLP/opus-mt-en-es",
|
||||
("es", "en"): "Helsinki-NLP/opus-mt-es-en",
|
||||
("en", "de"): "Helsinki-NLP/opus-mt-en-de",
|
||||
("de", "en"): "Helsinki-NLP/opus-mt-de-en",
|
||||
}
|
||||
return mapping.get(pair, "Helsinki-NLP/opus-mt-en-fr")
|
||||
|
||||
def translate(self, text: str, source_language: str, target_language: str) -> dict:
|
||||
if self._pipeline is None:
|
||||
self.load(source_language, target_language)
|
||||
with self._lock:
|
||||
results = self._pipeline(
|
||||
text, src_lang=source_language, tgt_lang=target_language
|
||||
)
|
||||
translated = results[0]["translation_text"] if results else ""
|
||||
return {"text": {source_language: text, target_language: translated}}
|
||||
@@ -1,115 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
from contextlib import contextmanager
|
||||
from typing import Mapping
|
||||
from urllib.parse import urlparse
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
from fastapi import HTTPException
|
||||
|
||||
from .config import SUPPORTED_FILE_EXTENSIONS, UPLOADS_PATH
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NoStdStreams:
|
||||
def __init__(self):
|
||||
self.devnull = open(os.devnull, "w")
|
||||
|
||||
def __enter__(self):
|
||||
self._stdout, self._stderr = sys.stdout, sys.stderr
|
||||
self._stdout.flush()
|
||||
self._stderr.flush()
|
||||
sys.stdout, sys.stderr = self.devnull, self.devnull
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
sys.stdout, sys.stderr = self._stdout, self._stderr
|
||||
self.devnull.close()
|
||||
|
||||
|
||||
def ensure_dirs():
|
||||
UPLOADS_PATH.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# IMPORTANT: This function is duplicated in multiple files for deployment isolation.
|
||||
# If you modify the audio format detection logic, you MUST update all copies:
|
||||
# - gpu/self_hosted/app/utils.py (this file)
|
||||
# - gpu/modal_deployments/reflector_transcriber.py (2 copies)
|
||||
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
|
||||
# - gpu/modal_deployments/reflector_diarizer.py
|
||||
def detect_audio_format(url: str, headers: Mapping[str, str]) -> str:
|
||||
url_path = urlparse(url).path
|
||||
for ext in SUPPORTED_FILE_EXTENSIONS:
|
||||
if url_path.lower().endswith(f".{ext}"):
|
||||
return ext
|
||||
|
||||
content_type = headers.get("content-type", "").lower()
|
||||
if "audio/mpeg" in content_type or "audio/mp3" in content_type:
|
||||
return "mp3"
|
||||
if "audio/wav" in content_type:
|
||||
return "wav"
|
||||
if "audio/mp4" in content_type:
|
||||
return "mp4"
|
||||
if "audio/webm" in content_type or "video/webm" in content_type:
|
||||
return "webm"
|
||||
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=(
|
||||
f"Unsupported audio format for URL. Supported extensions: {', '.join(SUPPORTED_FILE_EXTENSIONS)}"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def download_audio_to_uploads(audio_file_url: str) -> tuple[Path, str]:
|
||||
response = requests.head(audio_file_url, allow_redirects=True)
|
||||
if response.status_code == 404:
|
||||
raise HTTPException(status_code=404, detail="Audio file not found")
|
||||
|
||||
response = requests.get(audio_file_url, allow_redirects=True)
|
||||
response.raise_for_status()
|
||||
|
||||
audio_suffix = detect_audio_format(audio_file_url, response.headers)
|
||||
unique_filename = f"{uuid.uuid4()}.{audio_suffix}"
|
||||
file_path: Path = UPLOADS_PATH / unique_filename
|
||||
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
return file_path, audio_suffix
|
||||
|
||||
|
||||
@contextmanager
|
||||
def download_audio_file(audio_file_url: str):
|
||||
"""Download an audio file to UPLOADS_PATH and remove it after use.
|
||||
|
||||
Yields (file_path: Path, audio_suffix: str).
|
||||
"""
|
||||
file_path, audio_suffix = download_audio_to_uploads(audio_file_url)
|
||||
try:
|
||||
yield file_path, audio_suffix
|
||||
finally:
|
||||
try:
|
||||
file_path.unlink(missing_ok=True)
|
||||
except Exception as e:
|
||||
logger.error("Error deleting temporary file %s: %s", file_path, e)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def cleanup_uploaded_files(file_paths: list[Path]):
|
||||
"""Ensure provided file paths are removed after use.
|
||||
|
||||
The provided list can be populated inside the context; all present entries
|
||||
at exit will be deleted.
|
||||
"""
|
||||
try:
|
||||
yield file_paths
|
||||
finally:
|
||||
for path in list(file_paths):
|
||||
try:
|
||||
path.unlink(missing_ok=True)
|
||||
except Exception as e:
|
||||
logger.error("Error deleting temporary file %s: %s", path, e)
|
||||
@@ -1,10 +0,0 @@
|
||||
services:
|
||||
reflector_gpu:
|
||||
build:
|
||||
context: .
|
||||
ports:
|
||||
- "8000:8000"
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./cache:/root/.cache
|
||||
@@ -1,3 +0,0 @@
|
||||
from app.factory import create_app
|
||||
|
||||
app = create_app()
|
||||
@@ -1,19 +0,0 @@
|
||||
[project]
|
||||
name = "reflector-gpu"
|
||||
version = "0.1.0"
|
||||
description = "Self-hosted GPU service for speech transcription, diarization, and translation via FastAPI."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.12"
|
||||
dependencies = [
|
||||
"fastapi[standard]>=0.116.1",
|
||||
"uvicorn[standard]>=0.30.0",
|
||||
"torch>=2.3.0",
|
||||
"faster-whisper>=1.1.0",
|
||||
"librosa==0.10.1",
|
||||
"numpy<2",
|
||||
"silero-vad==5.1.0",
|
||||
"transformers>=4.35.0",
|
||||
"sentencepiece",
|
||||
"pyannote.audio==3.1.0",
|
||||
"torchaudio>=2.3.0",
|
||||
]
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
export PATH="/root/.local/bin:$PATH"
|
||||
cd /app
|
||||
|
||||
# Install Python dependencies at runtime (first run or when FORCE_SYNC=1)
|
||||
if [ ! -d "/app/.venv" ] || [ "$FORCE_SYNC" = "1" ]; then
|
||||
echo "[startup] Installing Python dependencies with uv..."
|
||||
uv sync --compile-bytecode --locked
|
||||
else
|
||||
echo "[startup] Using existing virtual environment at /app/.venv"
|
||||
fi
|
||||
|
||||
exec uv run uvicorn main:app --host 0.0.0.0 --port 8000
|
||||
|
||||
|
||||
3013
gpu/self_hosted/uv.lock
generated
3013
gpu/self_hosted/uv.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,258 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Setup Authentik OAuth provider for Reflector
|
||||
#
|
||||
# IMPORTANT: Run this script from your Reflector repository directory (cd ~/reflector)
|
||||
# The script creates files using relative paths: server/reflector/auth/jwt/keys/
|
||||
#
|
||||
# Usage: ./setup-authentik-oauth.sh <authentik-url> <admin-password> <frontend-url>
|
||||
# Example: ./setup-authentik-oauth.sh https://authentik.example.com MyPassword123 https://app.example.com
|
||||
|
||||
AUTHENTIK_URL="${1:-}"
|
||||
ADMIN_PASSWORD="${2:-}"
|
||||
FRONTEND_URL="${3:-}"
|
||||
|
||||
if [ -z "$AUTHENTIK_URL" ] || [ -z "$ADMIN_PASSWORD" ] || [ -z "$FRONTEND_URL" ]; then
|
||||
echo "Usage: $0 <authentik-url> <admin-password> <frontend-url>"
|
||||
echo "Example: $0 https://authentik.example.com MyPassword123 https://app.example.com"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove trailing slash from URLs
|
||||
AUTHENTIK_URL="${AUTHENTIK_URL%/}"
|
||||
FRONTEND_URL="${FRONTEND_URL%/}"
|
||||
|
||||
echo "==========================================="
|
||||
echo "Authentik OAuth Setup for Reflector"
|
||||
echo "==========================================="
|
||||
echo ""
|
||||
echo "Authentik URL: $AUTHENTIK_URL"
|
||||
echo "Frontend URL: $FRONTEND_URL"
|
||||
echo ""
|
||||
|
||||
# Step 1: Create API token via Django shell
|
||||
echo "Creating API token..."
|
||||
cd ~/authentik || { echo "Error: ~/authentik directory not found"; exit 1; }
|
||||
|
||||
API_TOKEN=$(sudo docker compose exec -T server python -m manage shell 2>&1 << 'PYTHON' | grep "^TOKEN:" | cut -d: -f2
|
||||
from authentik.core.models import User, Token, TokenIntents
|
||||
|
||||
user = User.objects.get(username='akadmin')
|
||||
token, created = Token.objects.update_or_create(
|
||||
identifier='reflector-setup',
|
||||
defaults={
|
||||
'user': user,
|
||||
'intent': TokenIntents.INTENT_API,
|
||||
'description': 'Reflector setup token',
|
||||
'expiring': False
|
||||
}
|
||||
)
|
||||
print(f"TOKEN:{token.key}")
|
||||
PYTHON
|
||||
)
|
||||
|
||||
cd - > /dev/null
|
||||
|
||||
if [ -z "$API_TOKEN" ] || [ "$API_TOKEN" = "null" ]; then
|
||||
echo "Error: Failed to create API token"
|
||||
echo "Make sure Authentik is fully started and akadmin user exists"
|
||||
exit 1
|
||||
fi
|
||||
echo " -> Got API token"
|
||||
|
||||
# Step 2: Get authorization flow UUID
|
||||
echo "Getting authorization flow..."
|
||||
FLOW_RESPONSE=$(curl -s "$AUTHENTIK_URL/api/v3/flows/instances/?slug=default-provider-authorization-implicit-consent" \
|
||||
-H "Authorization: Bearer $API_TOKEN")
|
||||
|
||||
FLOW_UUID=$(echo "$FLOW_RESPONSE" | jq -r '.results[0].pk')
|
||||
if [ -z "$FLOW_UUID" ] || [ "$FLOW_UUID" = "null" ]; then
|
||||
echo "Error: Could not find authorization flow"
|
||||
echo "Response: $FLOW_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
echo " -> Flow UUID: $FLOW_UUID"
|
||||
|
||||
# Step 3: Get invalidation flow UUID
|
||||
echo "Getting invalidation flow..."
|
||||
INVALIDATION_RESPONSE=$(curl -s "$AUTHENTIK_URL/api/v3/flows/instances/?slug=default-provider-invalidation-flow" \
|
||||
-H "Authorization: Bearer $API_TOKEN")
|
||||
|
||||
INVALIDATION_UUID=$(echo "$INVALIDATION_RESPONSE" | jq -r '.results[0].pk')
|
||||
if [ -z "$INVALIDATION_UUID" ] || [ "$INVALIDATION_UUID" = "null" ]; then
|
||||
echo "Warning: Could not find invalidation flow, using authorization flow"
|
||||
INVALIDATION_UUID="$FLOW_UUID"
|
||||
fi
|
||||
echo " -> Invalidation UUID: $INVALIDATION_UUID"
|
||||
|
||||
# Step 4: Get scope mappings (email, openid, profile)
|
||||
echo "Getting scope mappings..."
|
||||
SCOPE_RESPONSE=$(curl -s "$AUTHENTIK_URL/api/v3/propertymappings/all/" \
|
||||
-H "Authorization: Bearer $API_TOKEN")
|
||||
|
||||
EMAIL_SCOPE=$(echo "$SCOPE_RESPONSE" | jq -r '.results[] | select(.name == "authentik default OAuth Mapping: OpenID '\''email'\''") | .pk')
|
||||
OPENID_SCOPE=$(echo "$SCOPE_RESPONSE" | jq -r '.results[] | select(.name == "authentik default OAuth Mapping: OpenID '\''openid'\''") | .pk')
|
||||
PROFILE_SCOPE=$(echo "$SCOPE_RESPONSE" | jq -r '.results[] | select(.name == "authentik default OAuth Mapping: OpenID '\''profile'\''") | .pk')
|
||||
echo " -> email: $EMAIL_SCOPE"
|
||||
echo " -> openid: $OPENID_SCOPE"
|
||||
echo " -> profile: $PROFILE_SCOPE"
|
||||
|
||||
# Step 5: Get signing key
|
||||
echo "Getting signing key..."
|
||||
CERT_RESPONSE=$(curl -s "$AUTHENTIK_URL/api/v3/crypto/certificatekeypairs/" \
|
||||
-H "Authorization: Bearer $API_TOKEN")
|
||||
SIGNING_KEY=$(echo "$CERT_RESPONSE" | jq -r '.results[0].pk')
|
||||
echo " -> Signing key: $SIGNING_KEY"
|
||||
|
||||
# Step 6: Generate client credentials
|
||||
CLIENT_ID="reflector"
|
||||
CLIENT_SECRET=$(openssl rand -hex 32)
|
||||
|
||||
# Step 7: Create OAuth2 provider
|
||||
echo "Creating OAuth2 provider..."
|
||||
PROVIDER_RESPONSE=$(curl -s -X POST "$AUTHENTIK_URL/api/v3/providers/oauth2/" \
|
||||
-H "Authorization: Bearer $API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"name\": \"Reflector\",
|
||||
\"authorization_flow\": \"$FLOW_UUID\",
|
||||
\"invalidation_flow\": \"$INVALIDATION_UUID\",
|
||||
\"client_type\": \"confidential\",
|
||||
\"client_id\": \"$CLIENT_ID\",
|
||||
\"client_secret\": \"$CLIENT_SECRET\",
|
||||
\"redirect_uris\": [{
|
||||
\"matching_mode\": \"strict\",
|
||||
\"url\": \"$FRONTEND_URL/api/auth/callback/authentik\"
|
||||
}],
|
||||
\"property_mappings\": [\"$EMAIL_SCOPE\", \"$OPENID_SCOPE\", \"$PROFILE_SCOPE\"],
|
||||
\"signing_key\": \"$SIGNING_KEY\",
|
||||
\"access_token_validity\": \"hours=1\",
|
||||
\"refresh_token_validity\": \"days=30\"
|
||||
}")
|
||||
|
||||
PROVIDER_ID=$(echo "$PROVIDER_RESPONSE" | jq -r '.pk')
|
||||
if [ -z "$PROVIDER_ID" ] || [ "$PROVIDER_ID" = "null" ]; then
|
||||
# Check if provider already exists
|
||||
if echo "$PROVIDER_RESPONSE" | grep -q "already exists"; then
|
||||
echo " -> Provider already exists, updating..."
|
||||
EXISTING=$(curl -s "$AUTHENTIK_URL/api/v3/providers/oauth2/?name=Reflector" \
|
||||
-H "Authorization: Bearer $API_TOKEN")
|
||||
PROVIDER_ID=$(echo "$EXISTING" | jq -r '.results[0].pk')
|
||||
CLIENT_ID=$(echo "$EXISTING" | jq -r '.results[0].client_id')
|
||||
# Update secret and scopes
|
||||
curl -s -X PATCH "$AUTHENTIK_URL/api/v3/providers/oauth2/$PROVIDER_ID/" \
|
||||
-H "Authorization: Bearer $API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"client_secret\": \"$CLIENT_SECRET\",
|
||||
\"property_mappings\": [\"$EMAIL_SCOPE\", \"$OPENID_SCOPE\", \"$PROFILE_SCOPE\"],
|
||||
\"signing_key\": \"$SIGNING_KEY\"
|
||||
}" > /dev/null
|
||||
else
|
||||
echo "Error: Failed to create provider"
|
||||
echo "Response: $PROVIDER_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo " -> Provider ID: $PROVIDER_ID"
|
||||
|
||||
# Step 8: Create application
|
||||
echo "Creating application..."
|
||||
APP_RESPONSE=$(curl -s -X POST "$AUTHENTIK_URL/api/v3/core/applications/" \
|
||||
-H "Authorization: Bearer $API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"name\": \"Reflector\",
|
||||
\"slug\": \"reflector\",
|
||||
\"provider\": $PROVIDER_ID
|
||||
}")
|
||||
|
||||
if echo "$APP_RESPONSE" | grep -q "already exists"; then
|
||||
echo " -> Application already exists"
|
||||
else
|
||||
APP_SLUG=$(echo "$APP_RESPONSE" | jq -r '.slug')
|
||||
if [ -z "$APP_SLUG" ] || [ "$APP_SLUG" = "null" ]; then
|
||||
echo "Error: Failed to create application"
|
||||
echo "Response: $APP_RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
echo " -> Application created: $APP_SLUG"
|
||||
fi
|
||||
|
||||
# Step 9: Extract public key for JWT verification
|
||||
echo "Extracting public key for JWT verification..."
|
||||
mkdir -p server/reflector/auth/jwt/keys
|
||||
curl -s "$AUTHENTIK_URL/application/o/reflector/jwks/" | \
|
||||
jq -r '.keys[0].x5c[0]' | \
|
||||
base64 -d | \
|
||||
openssl x509 -pubkey -noout > server/reflector/auth/jwt/keys/authentik_public.pem
|
||||
|
||||
if [ ! -s server/reflector/auth/jwt/keys/authentik_public.pem ]; then
|
||||
echo "Error: Failed to extract public key"
|
||||
exit 1
|
||||
fi
|
||||
echo " -> Saved to server/reflector/auth/jwt/keys/authentik_public.pem"
|
||||
|
||||
# Step 10: Update environment files automatically
|
||||
echo "Updating environment files..."
|
||||
|
||||
# Update server/.env
|
||||
cat >> server/.env << EOF
|
||||
|
||||
# --- Authentik OAuth (added by setup script) ---
|
||||
AUTH_BACKEND=jwt
|
||||
AUTH_JWT_AUDIENCE=$CLIENT_ID
|
||||
AUTH_JWT_PUBLIC_KEY=authentik_public.pem
|
||||
# --- End JWT Configuration ---
|
||||
EOF
|
||||
echo " -> Updated server/.env"
|
||||
|
||||
# Update www/.env
|
||||
cat >> www/.env << EOF
|
||||
|
||||
# --- Authentik OAuth (added by setup script) ---
|
||||
FEATURE_REQUIRE_LOGIN=true
|
||||
AUTHENTIK_ISSUER=$AUTHENTIK_URL/application/o/reflector
|
||||
AUTHENTIK_REFRESH_TOKEN_URL=$AUTHENTIK_URL/application/o/token/
|
||||
AUTHENTIK_CLIENT_ID=$CLIENT_ID
|
||||
AUTHENTIK_CLIENT_SECRET=$CLIENT_SECRET
|
||||
# --- End Authentik Configuration ---
|
||||
EOF
|
||||
echo " -> Updated www/.env"
|
||||
|
||||
# Step 11: Restart Reflector services
|
||||
echo "Restarting Reflector services..."
|
||||
docker compose -f docker-compose.prod.yml up -d server worker web
|
||||
|
||||
echo ""
|
||||
echo "==========================================="
|
||||
echo "Setup complete!"
|
||||
echo "==========================================="
|
||||
echo ""
|
||||
echo "Authentik admin: $AUTHENTIK_URL"
|
||||
echo " Username: akadmin"
|
||||
echo " Password: (provided as argument)"
|
||||
echo ""
|
||||
echo "Frontend: $FRONTEND_URL"
|
||||
echo " Authentication is now required"
|
||||
echo ""
|
||||
echo "Note: Public key saved to server/reflector/auth/jwt/keys/authentik_public.pem"
|
||||
echo " and mounted via docker-compose volume."
|
||||
echo ""
|
||||
echo "==========================================="
|
||||
echo "Configuration values (for reference):"
|
||||
echo "==========================================="
|
||||
echo ""
|
||||
echo "# server/.env"
|
||||
echo "AUTH_BACKEND=jwt"
|
||||
echo "AUTH_JWT_AUDIENCE=$CLIENT_ID"
|
||||
echo "AUTH_JWT_PUBLIC_KEY=authentik_public.pem"
|
||||
echo ""
|
||||
echo "# www/.env"
|
||||
echo "FEATURE_REQUIRE_LOGIN=true"
|
||||
echo "AUTHENTIK_ISSUER=$AUTHENTIK_URL/application/o/reflector"
|
||||
echo "AUTHENTIK_REFRESH_TOKEN_URL=$AUTHENTIK_URL/application/o/token/"
|
||||
echo "AUTHENTIK_CLIENT_ID=$CLIENT_ID"
|
||||
echo "AUTHENTIK_CLIENT_SECRET=$CLIENT_SECRET"
|
||||
echo ""
|
||||
@@ -1,139 +0,0 @@
|
||||
#
|
||||
# This file serve as an example of possible configuration
|
||||
# All the settings are described here: reflector/settings.py
|
||||
#
|
||||
|
||||
## =======================================================
|
||||
## Core Configuration (Required for Production)
|
||||
## =======================================================
|
||||
|
||||
## Database (for docker-compose.prod.yml, use postgres hostname)
|
||||
#DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
|
||||
## Redis (for docker-compose.prod.yml, use redis hostname)
|
||||
#REDIS_HOST=redis
|
||||
#REDIS_PORT=6379
|
||||
#CELERY_BROKER_URL=redis://redis:6379/1
|
||||
#CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
|
||||
## Base URL - your API domain with https
|
||||
#BASE_URL=https://api.example.com
|
||||
|
||||
## CORS - required when frontend and API are on different domains
|
||||
#CORS_ORIGIN=https://app.example.com
|
||||
#CORS_ALLOW_CREDENTIALS=true
|
||||
|
||||
## Secret key - generate with: openssl rand -hex 32
|
||||
#SECRET_KEY=changeme-generate-a-secure-random-string
|
||||
|
||||
## =======================================================
|
||||
## User authentication
|
||||
## =======================================================
|
||||
|
||||
## Using jwt/authentik
|
||||
AUTH_BACKEND=jwt
|
||||
AUTH_JWT_AUDIENCE=
|
||||
|
||||
## =======================================================
|
||||
## Transcription backend
|
||||
##
|
||||
## Check reflector/processors/audio_transcript_* for the
|
||||
## full list of available transcription backend
|
||||
## =======================================================
|
||||
|
||||
## Using local whisper
|
||||
#TRANSCRIPT_BACKEND=whisper
|
||||
|
||||
## Using serverless modal.com (require reflector-gpu-modal deployed)
|
||||
#TRANSCRIPT_BACKEND=modal
|
||||
#TRANSCRIPT_URL=https://xxxxx--reflector-transcriber-web.modal.run
|
||||
#TRANSCRIPT_MODAL_API_KEY=xxxxx
|
||||
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-parakeet-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=
|
||||
|
||||
## =======================================================
|
||||
## Translation backend
|
||||
##
|
||||
## Only available in modal atm
|
||||
## =======================================================
|
||||
TRANSLATION_BACKEND=modal
|
||||
TRANSLATE_URL=https://monadical-sas--reflector-translator-web.modal.run
|
||||
#TRANSLATION_MODAL_API_KEY=xxxxx
|
||||
|
||||
## =======================================================
|
||||
## LLM backend (Required)
|
||||
##
|
||||
## Responsible for generating titles, summaries, and topic detection
|
||||
## Requires OpenAI API key
|
||||
## =======================================================
|
||||
|
||||
## OpenAI API key - get from https://platform.openai.com/account/api-keys
|
||||
LLM_API_KEY=sk-your-openai-api-key
|
||||
LLM_MODEL=gpt-4o-mini
|
||||
|
||||
## Optional: Custom endpoint (defaults to OpenAI)
|
||||
# LLM_URL=https://api.openai.com/v1
|
||||
|
||||
## Context size for summary generation (tokens)
|
||||
LLM_CONTEXT_WINDOW=16000
|
||||
|
||||
## =======================================================
|
||||
## Diarization
|
||||
##
|
||||
## Only available on modal
|
||||
## To allow diarization, you need to expose expose the files to be dowloded by the pipeline
|
||||
## =======================================================
|
||||
DIARIZATION_ENABLED=false
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
|
||||
#DIARIZATION_MODAL_API_KEY=xxxxx
|
||||
|
||||
|
||||
## =======================================================
|
||||
## Transcript Storage
|
||||
##
|
||||
## Where to store audio files and transcripts
|
||||
## AWS S3 is required for production
|
||||
## =======================================================
|
||||
TRANSCRIPT_STORAGE_BACKEND=aws
|
||||
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
|
||||
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
|
||||
TRANSCRIPT_STORAGE_AWS_BUCKET_NAME=reflector-media
|
||||
TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
|
||||
|
||||
|
||||
## =======================================================
|
||||
## Sentry
|
||||
## =======================================================
|
||||
|
||||
## Sentry DSN configuration
|
||||
#SENTRY_DSN=
|
||||
|
||||
## =======================================================
|
||||
## Video Platform Configuration
|
||||
## =======================================================
|
||||
|
||||
## Whereby
|
||||
#WHEREBY_API_KEY=your-whereby-api-key
|
||||
#WHEREBY_WEBHOOK_SECRET=your-whereby-webhook-secret
|
||||
#WHEREBY_STORAGE_AWS_ACCESS_KEY_ID=your-aws-key
|
||||
#WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret
|
||||
#AWS_PROCESS_RECORDING_QUEUE_URL=https://sqs.us-west-2.amazonaws.com/...
|
||||
|
||||
## Daily.co
|
||||
#DAILY_API_KEY=your-daily-api-key
|
||||
#DAILY_WEBHOOK_SECRET=your-daily-webhook-secret
|
||||
#DAILY_SUBDOMAIN=your-subdomain
|
||||
#DAILY_WEBHOOK_UUID= # Auto-populated by recreate_daily_webhook.py script
|
||||
#DAILYCO_STORAGE_AWS_ROLE_ARN=... # IAM role ARN for Daily.co S3 access
|
||||
#DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
||||
#DAILYCO_STORAGE_AWS_REGION=us-west-2
|
||||
|
||||
## Whereby (optional separate bucket)
|
||||
#WHEREBY_STORAGE_AWS_BUCKET_NAME=reflector-whereby
|
||||
#WHEREBY_STORAGE_AWS_REGION=us-east-1
|
||||
|
||||
## Platform Configuration
|
||||
#DEFAULT_VIDEO_PLATFORM=whereby # Default platform for new rooms
|
||||
@@ -1,29 +1,3 @@
|
||||
## API Key Management
|
||||
|
||||
### Finding Your User ID
|
||||
|
||||
```bash
|
||||
# Get your OAuth sub (user ID) - requires authentication
|
||||
curl -H "Authorization: Bearer <your_jwt>" http://localhost:1250/v1/me
|
||||
# Returns: {"sub": "your-oauth-sub-here", "email": "...", ...}
|
||||
```
|
||||
|
||||
### Creating API Keys
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:1250/v1/user/api-keys \
|
||||
-H "Authorization: Bearer <your_jwt>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name": "My API Key"}'
|
||||
```
|
||||
|
||||
### Using API Keys
|
||||
|
||||
```bash
|
||||
# Use X-API-Key header instead of Authorization
|
||||
curl -H "X-API-Key: <your_api_key>" http://localhost:1250/v1/transcripts
|
||||
```
|
||||
|
||||
## AWS S3/SQS usage clarification
|
||||
|
||||
Whereby.com uploads recordings directly to our S3 bucket when meetings end.
|
||||
|
||||
212
server/contrib/jitsi/README.md
Normal file
212
server/contrib/jitsi/README.md
Normal file
@@ -0,0 +1,212 @@
|
||||
# Event Logger for Docker-Jitsi-Meet
|
||||
|
||||
A Prosody module that logs Jitsi meeting events to JSONL files alongside recordings, enabling complete participant tracking and speaker statistics.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Running docker-jitsi-meet installation
|
||||
- Jibri configured for recording
|
||||
|
||||
## Installation
|
||||
|
||||
### Step 1: Copy the Module
|
||||
|
||||
Copy the Prosody module to your custom plugins directory:
|
||||
|
||||
```bash
|
||||
# Create the directory if it doesn't exist
|
||||
mkdir -p ~/.jitsi-meet-cfg/prosody/prosody-plugins-custom
|
||||
|
||||
# Copy the module
|
||||
cp mod_event_logger.lua ~/.jitsi-meet-cfg/prosody/prosody-plugins-custom/
|
||||
```
|
||||
|
||||
### Step 2: Update Your .env File
|
||||
|
||||
Add or modify these variables in your `.env` file:
|
||||
|
||||
```bash
|
||||
# If XMPP_MUC_MODULES already exists, append event_logger
|
||||
# Example: XMPP_MUC_MODULES=existing_module,event_logger
|
||||
XMPP_MUC_MODULES=event_logger
|
||||
|
||||
# Optional: Configure the module (these are defaults)
|
||||
JIBRI_RECORDINGS_PATH=/config/recordings
|
||||
JIBRI_LOG_SPEAKER_STATS=true
|
||||
JIBRI_SPEAKER_STATS_INTERVAL=10
|
||||
```
|
||||
|
||||
**Important**: If you already have `XMPP_MUC_MODULES` defined, add `event_logger` to the comma-separated list:
|
||||
```bash
|
||||
# Existing modules + our module
|
||||
XMPP_MUC_MODULES=mod_info,mod_alert,event_logger
|
||||
```
|
||||
|
||||
### Step 3: Modify docker-compose.yml
|
||||
|
||||
Add a shared recordings volume so Prosody can write events alongside Jibri recordings:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
prosody:
|
||||
# ... existing configuration ...
|
||||
volumes:
|
||||
- ${CONFIG}/prosody/config:/config:Z
|
||||
- ${CONFIG}/prosody/prosody-plugins-custom:/prosody-plugins-custom:Z
|
||||
- ${CONFIG}/recordings:/config/recordings:Z # Add this line
|
||||
environment:
|
||||
# Add if not using .env file
|
||||
- XMPP_MUC_MODULES=${XMPP_MUC_MODULES:-event_logger}
|
||||
- JIBRI_RECORDINGS_PATH=/config/recordings
|
||||
|
||||
jibri:
|
||||
# ... existing configuration ...
|
||||
volumes:
|
||||
- ${CONFIG}/jibri:/config:Z
|
||||
- ${CONFIG}/recordings:/config/recordings:Z # Add this line
|
||||
environment:
|
||||
# For Reflector webhook integration (optional)
|
||||
- REFLECTOR_WEBHOOK_URL=${REFLECTOR_WEBHOOK_URL:-}
|
||||
- JIBRI_FINALIZE_RECORDING_SCRIPT_PATH=/config/finalize.sh
|
||||
```
|
||||
|
||||
### Step 4: Add Finalize Script (Optional - For Reflector Integration)
|
||||
|
||||
If you want to notify Reflector when recordings complete:
|
||||
|
||||
```bash
|
||||
# Copy the finalize script
|
||||
cp finalize.sh ~/.jitsi-meet-cfg/jibri/finalize.sh
|
||||
chmod +x ~/.jitsi-meet-cfg/jibri/finalize.sh
|
||||
|
||||
# Add to .env
|
||||
REFLECTOR_WEBHOOK_URL=http://your-reflector-api:8000
|
||||
```
|
||||
|
||||
### Step 5: Restart Services
|
||||
|
||||
```bash
|
||||
docker-compose down
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
## What Gets Created
|
||||
|
||||
After a recording, you'll find in `~/.jitsi-meet-cfg/recordings/{session-id}/`:
|
||||
- `recording.mp4` - The video recording (created by Jibri)
|
||||
- `metadata.json` - Basic metadata (created by Jibri)
|
||||
- `events.jsonl` - Complete participant timeline (created by this module)
|
||||
|
||||
## Event Format
|
||||
|
||||
Each line in `events.jsonl` is a JSON object:
|
||||
|
||||
```json
|
||||
{"type":"room_created","timestamp":1234567890,"room_name":"TestRoom","room_jid":"testroom@conference.meet.jitsi","meeting_url":"https://meet.jitsi/TestRoom"}
|
||||
{"type":"recording_started","timestamp":1234567891,"room_name":"TestRoom","session_id":"20240115120000_TestRoom","jibri_jid":"jibri@recorder.meet.jitsi"}
|
||||
{"type":"participant_joined","timestamp":1234567892,"room_name":"TestRoom","participant":{"jid":"user1@meet.jitsi/web","nick":"John Doe","id":"user1@meet.jitsi","is_moderator":false}}
|
||||
{"type":"speaker_active","timestamp":1234567895,"room_name":"TestRoom","speaker_jid":"user1@meet.jitsi","speaker_nick":"John Doe","duration":10}
|
||||
{"type":"participant_left","timestamp":1234567920,"room_name":"TestRoom","participant":{"jid":"user1@meet.jitsi/web","nick":"John Doe","duration_seconds":28}}
|
||||
{"type":"recording_stopped","timestamp":1234567950,"room_name":"TestRoom","session_id":"20240115120000_TestRoom","meeting_url":"https://meet.jitsi/TestRoom"}
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
All configuration can be done via environment variables:
|
||||
|
||||
| Environment Variable | Default | Description |
|
||||
|---------------------|---------|-------------|
|
||||
| `JIBRI_RECORDINGS_PATH` | `/config/recordings` | Path where recordings are stored |
|
||||
| `JIBRI_LOG_SPEAKER_STATS` | `true` | Enable speaker statistics logging |
|
||||
| `JIBRI_SPEAKER_STATS_INTERVAL` | `10` | Seconds between speaker stats updates |
|
||||
|
||||
## Verifying Installation
|
||||
|
||||
Check that the module is loaded:
|
||||
```bash
|
||||
docker-compose logs prosody | grep "Event Logger"
|
||||
# Should see: "Event Logger loaded - writing to /config/recordings"
|
||||
```
|
||||
|
||||
Check for events after a recording:
|
||||
```bash
|
||||
ls -la ~/.jitsi-meet-cfg/recordings/*/events.jsonl
|
||||
cat ~/.jitsi-meet-cfg/recordings/*/events.jsonl | jq .
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No events.jsonl file created
|
||||
|
||||
1. **Check module is enabled**:
|
||||
```bash
|
||||
docker-compose exec prosody grep -r "event_logger" /config
|
||||
```
|
||||
|
||||
2. **Verify volume permissions**:
|
||||
```bash
|
||||
docker-compose exec prosody ls -la /config/recordings
|
||||
```
|
||||
|
||||
3. **Check Prosody logs for errors**:
|
||||
```bash
|
||||
docker-compose logs prosody | grep -i error
|
||||
```
|
||||
|
||||
### Module not loading
|
||||
|
||||
1. **Verify file exists in container**:
|
||||
```bash
|
||||
docker-compose exec prosody ls -la /prosody-plugins-custom/
|
||||
```
|
||||
|
||||
2. **Check XMPP_MUC_MODULES format** (must be comma-separated, no spaces):
|
||||
- ✅ Correct: `XMPP_MUC_MODULES=mod1,mod2,event_logger`
|
||||
- ❌ Wrong: `XMPP_MUC_MODULES=mod1, mod2, event_logger`
|
||||
|
||||
## Common docker-compose.yml Patterns
|
||||
|
||||
### Minimal Addition (if you trust defaults)
|
||||
```yaml
|
||||
services:
|
||||
prosody:
|
||||
volumes:
|
||||
- ${CONFIG}/recordings:/config/recordings:Z # Just add this
|
||||
```
|
||||
|
||||
### Full Configuration
|
||||
```yaml
|
||||
services:
|
||||
prosody:
|
||||
volumes:
|
||||
- ${CONFIG}/prosody/config:/config:Z
|
||||
- ${CONFIG}/prosody/prosody-plugins-custom:/prosody-plugins-custom:Z
|
||||
- ${CONFIG}/recordings:/config/recordings:Z
|
||||
environment:
|
||||
- XMPP_MUC_MODULES=event_logger
|
||||
- JIBRI_RECORDINGS_PATH=/config/recordings
|
||||
- JIBRI_LOG_SPEAKER_STATS=true
|
||||
- JIBRI_SPEAKER_STATS_INTERVAL=10
|
||||
|
||||
jibri:
|
||||
volumes:
|
||||
- ${CONFIG}/jibri:/config:Z
|
||||
- ${CONFIG}/recordings:/config/recordings:Z
|
||||
environment:
|
||||
- JIBRI_RECORDING_DIR=/config/recordings
|
||||
- JIBRI_FINALIZE_RECORDING_SCRIPT_PATH=/config/finalize.sh
|
||||
```
|
||||
|
||||
## Integration with Reflector
|
||||
|
||||
The finalize.sh script will automatically notify Reflector when a recording completes if `REFLECTOR_WEBHOOK_URL` is set. Reflector will receive:
|
||||
|
||||
```json
|
||||
{
|
||||
"session_id": "20240115120000_TestRoom",
|
||||
"path": "20240115120000_TestRoom",
|
||||
"meeting_url": "https://meet.jitsi/TestRoom"
|
||||
}
|
||||
```
|
||||
|
||||
Reflector then processes the recording along with the complete participant timeline from `events.jsonl`.
|
||||
49
server/contrib/jitsi/finalize.sh
Executable file
49
server/contrib/jitsi/finalize.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
# Jibri finalize script to notify Reflector when recording is complete
|
||||
# This script is called by Jibri with the recording directory as argument
|
||||
|
||||
RECORDING_PATH="$1"
|
||||
SESSION_ID=$(basename "$RECORDING_PATH")
|
||||
METADATA_FILE="$RECORDING_PATH/metadata.json"
|
||||
|
||||
# Extract meeting URL from Jibri's metadata
|
||||
MEETING_URL=""
|
||||
if [ -f "$METADATA_FILE" ]; then
|
||||
MEETING_URL=$(jq -r '.meeting_url' "$METADATA_FILE" 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
echo "[$(date)] Recording finalized: $RECORDING_PATH"
|
||||
echo "[$(date)] Session ID: $SESSION_ID"
|
||||
echo "[$(date)] Meeting URL: $MEETING_URL"
|
||||
|
||||
# Check if events.jsonl was created by our Prosody module
|
||||
if [ -f "$RECORDING_PATH/events.jsonl" ]; then
|
||||
EVENT_COUNT=$(wc -l < "$RECORDING_PATH/events.jsonl")
|
||||
echo "[$(date)] Found events.jsonl with $EVENT_COUNT events"
|
||||
else
|
||||
echo "[$(date)] Warning: No events.jsonl found"
|
||||
fi
|
||||
|
||||
# Notify Reflector if webhook URL is configured
|
||||
if [ -n "$REFLECTOR_WEBHOOK_URL" ]; then
|
||||
echo "[$(date)] Notifying Reflector at: $REFLECTOR_WEBHOOK_URL"
|
||||
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "$REFLECTOR_WEBHOOK_URL/api/v1/jibri/recording-ready" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"session_id\":\"$SESSION_ID\",\"path\":\"$SESSION_ID\",\"meeting_url\":\"$MEETING_URL\"}")
|
||||
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
BODY=$(echo "$RESPONSE" | sed '$d')
|
||||
|
||||
if [ "$HTTP_CODE" = "200" ]; then
|
||||
echo "[$(date)] Reflector notified successfully"
|
||||
echo "[$(date)] Response: $BODY"
|
||||
else
|
||||
echo "[$(date)] Failed to notify Reflector. HTTP code: $HTTP_CODE"
|
||||
echo "[$(date)] Response: $BODY"
|
||||
fi
|
||||
else
|
||||
echo "[$(date)] No REFLECTOR_WEBHOOK_URL configured, skipping notification"
|
||||
fi
|
||||
|
||||
echo "[$(date)] Finalize script completed"
|
||||
372
server/contrib/jitsi/mod_event_logger.lua
Normal file
372
server/contrib/jitsi/mod_event_logger.lua
Normal file
@@ -0,0 +1,372 @@
|
||||
local json = require "util.json"
|
||||
local st = require "util.stanza"
|
||||
local jid_bare = require "util.jid".bare
|
||||
|
||||
local recordings_path = os.getenv("JIBRI_RECORDINGS_PATH") or
|
||||
module:get_option_string("jibri_recordings_path", "/recordings")
|
||||
|
||||
-- room_jid -> { session_id, participants = {jid -> info} }
|
||||
local active_recordings = {}
|
||||
-- room_jid -> { participants = {jid -> info}, created_at }
|
||||
local room_states = {}
|
||||
|
||||
local function get_timestamp()
|
||||
return os.time()
|
||||
end
|
||||
|
||||
local function write_event(session_id, event)
|
||||
if not session_id then
|
||||
module:log("warn", "No session_id for event: %s", event.type)
|
||||
return
|
||||
end
|
||||
|
||||
local session_dir = string.format("%s/%s", recordings_path, session_id)
|
||||
local event_file = string.format("%s/events.jsonl", session_dir)
|
||||
|
||||
module:log("info", "Writing event %s to %s", event.type, event_file)
|
||||
|
||||
-- Create directory
|
||||
local mkdir_cmd = string.format("mkdir -p '%s' 2>&1", session_dir)
|
||||
local mkdir_result = os.execute(mkdir_cmd)
|
||||
module:log("debug", "mkdir result: %s", tostring(mkdir_result))
|
||||
|
||||
local file, err = io.open(event_file, "a")
|
||||
if file then
|
||||
local json_str = json.encode(event)
|
||||
file:write(json_str .. "\n")
|
||||
file:close()
|
||||
module:log("info", "Successfully wrote event %s", event.type)
|
||||
else
|
||||
module:log("error", "Failed to write event to %s: %s", event_file, err)
|
||||
end
|
||||
end
|
||||
|
||||
local function extract_participant_info(occupant)
|
||||
local info = {
|
||||
jid = occupant.jid,
|
||||
bare_jid = occupant.bare_jid,
|
||||
nick = occupant.nick,
|
||||
display_name = nil,
|
||||
role = occupant.role
|
||||
}
|
||||
|
||||
local presence = occupant:get_presence()
|
||||
if presence then
|
||||
local nick_element = presence:get_child("nick", "http://jabber.org/protocol/nick")
|
||||
if nick_element then
|
||||
info.display_name = nick_element:get_text()
|
||||
end
|
||||
|
||||
local identity = presence:get_child("identity")
|
||||
if identity then
|
||||
local user = identity:get_child("user")
|
||||
if user then
|
||||
local name = user:get_child("name")
|
||||
if name then
|
||||
info.display_name = name:get_text()
|
||||
end
|
||||
|
||||
local id_element = user:get_child("id")
|
||||
if id_element then
|
||||
info.id = id_element:get_text()
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if not info.display_name and occupant.nick then
|
||||
local _, _, resource = occupant.nick:match("([^@]+)@([^/]+)/(.+)")
|
||||
if resource then
|
||||
info.display_name = resource
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return info
|
||||
end
|
||||
|
||||
local function get_room_participant_count(room)
|
||||
local count = 0
|
||||
for _ in room:each_occupant() do
|
||||
count = count + 1
|
||||
end
|
||||
return count
|
||||
end
|
||||
|
||||
local function snapshot_room_participants(room)
|
||||
local participants = {}
|
||||
local total = 0
|
||||
local skipped = 0
|
||||
|
||||
module:log("info", "Snapshotting room participants")
|
||||
|
||||
for _, occupant in room:each_occupant() do
|
||||
total = total + 1
|
||||
-- Skip recorders (Jibri)
|
||||
if occupant.bare_jid and (occupant.bare_jid:match("^recorder@") or
|
||||
occupant.bare_jid:match("^jibri@")) then
|
||||
skipped = skipped + 1
|
||||
else
|
||||
local info = extract_participant_info(occupant)
|
||||
participants[occupant.jid] = info
|
||||
module:log("debug", "Added participant: %s", info.display_name or info.bare_jid)
|
||||
end
|
||||
end
|
||||
|
||||
module:log("info", "Snapshot: %d total, %d participants", total, total - skipped)
|
||||
return participants
|
||||
end
|
||||
|
||||
-- Import utility functions if available
|
||||
local util = module:require "util";
|
||||
local get_room_from_jid = util.get_room_from_jid;
|
||||
local room_jid_match_rewrite = util.room_jid_match_rewrite;
|
||||
|
||||
-- Main IQ handler for Jibri stanzas
|
||||
module:hook("pre-iq/full", function(event)
|
||||
local stanza = event.stanza
|
||||
if stanza.name ~= "iq" then
|
||||
return
|
||||
end
|
||||
|
||||
local jibri = stanza:get_child('jibri', 'http://jitsi.org/protocol/jibri')
|
||||
if not jibri then
|
||||
return
|
||||
end
|
||||
|
||||
module:log("info", "=== Jibri IQ intercepted ===")
|
||||
|
||||
local action = jibri.attr.action
|
||||
local session_id = jibri.attr.session_id
|
||||
local room_jid = jibri.attr.room
|
||||
local recording_mode = jibri.attr.recording_mode
|
||||
local app_data = jibri.attr.app_data
|
||||
|
||||
module:log("info", "Jibri %s - session: %s, room: %s, mode: %s",
|
||||
action or "?", session_id or "?", room_jid or "?", recording_mode or "?")
|
||||
|
||||
if not room_jid or not session_id then
|
||||
module:log("warn", "Missing room_jid or session_id")
|
||||
return
|
||||
end
|
||||
|
||||
-- Get the room using util function
|
||||
local room = get_room_from_jid(room_jid_match_rewrite(jid_bare(stanza.attr.to)))
|
||||
if not room then
|
||||
-- Try with the room_jid directly
|
||||
room = get_room_from_jid(room_jid)
|
||||
end
|
||||
|
||||
if not room then
|
||||
module:log("error", "Room not found for jid: %s", room_jid)
|
||||
return
|
||||
end
|
||||
|
||||
module:log("info", "Room found: %s", room:get_name() or room_jid)
|
||||
|
||||
if action == "start" then
|
||||
module:log("info", "Recording START for session %s", session_id)
|
||||
|
||||
-- Count and snapshot participants
|
||||
local participant_count = 0
|
||||
for _ in room:each_occupant() do
|
||||
participant_count = participant_count + 1
|
||||
end
|
||||
|
||||
local participants = snapshot_room_participants(room)
|
||||
local participant_list = {}
|
||||
for jid, info in pairs(participants) do
|
||||
table.insert(participant_list, info)
|
||||
end
|
||||
|
||||
active_recordings[room_jid] = {
|
||||
session_id = session_id,
|
||||
participants = participants,
|
||||
started_at = get_timestamp()
|
||||
}
|
||||
|
||||
write_event(session_id, {
|
||||
type = "recording_started",
|
||||
timestamp = get_timestamp(),
|
||||
room_jid = room_jid,
|
||||
room_name = room:get_name(),
|
||||
session_id = session_id,
|
||||
recording_mode = recording_mode,
|
||||
app_data = app_data,
|
||||
participant_count = participant_count,
|
||||
participants_at_start = participant_list
|
||||
})
|
||||
|
||||
elseif action == "stop" then
|
||||
module:log("info", "Recording STOP for session %s", session_id)
|
||||
|
||||
local recording = active_recordings[room_jid]
|
||||
if recording and recording.session_id == session_id then
|
||||
write_event(session_id, {
|
||||
type = "recording_stopped",
|
||||
timestamp = get_timestamp(),
|
||||
room_jid = room_jid,
|
||||
room_name = room:get_name(),
|
||||
session_id = session_id,
|
||||
duration = get_timestamp() - recording.started_at,
|
||||
participant_count = get_room_participant_count(room)
|
||||
})
|
||||
|
||||
active_recordings[room_jid] = nil
|
||||
else
|
||||
module:log("warn", "No active recording found for room %s", room_jid)
|
||||
end
|
||||
end
|
||||
end);
|
||||
|
||||
-- Room and participant event hooks
|
||||
local function setup_room_hooks(host_module)
|
||||
module:log("info", "Setting up room hooks on %s", host_module.host or "unknown")
|
||||
|
||||
-- Room created
|
||||
host_module:hook("muc-room-created", function(event)
|
||||
local room = event.room
|
||||
local room_jid = room.jid
|
||||
|
||||
room_states[room_jid] = {
|
||||
participants = {},
|
||||
created_at = get_timestamp()
|
||||
}
|
||||
|
||||
module:log("info", "Room created: %s", room_jid)
|
||||
end)
|
||||
|
||||
-- Room destroyed
|
||||
host_module:hook("muc-room-destroyed", function(event)
|
||||
local room = event.room
|
||||
local room_jid = room.jid
|
||||
|
||||
room_states[room_jid] = nil
|
||||
active_recordings[room_jid] = nil
|
||||
|
||||
module:log("info", "Room destroyed: %s", room_jid)
|
||||
end)
|
||||
|
||||
-- Occupant joined
|
||||
host_module:hook("muc-occupant-joined", function(event)
|
||||
local room = event.room
|
||||
local occupant = event.occupant
|
||||
local room_jid = room.jid
|
||||
|
||||
-- Skip recorders
|
||||
if occupant.bare_jid and (occupant.bare_jid:match("^recorder@") or
|
||||
occupant.bare_jid:match("^jibri@")) then
|
||||
return
|
||||
end
|
||||
|
||||
local participant_info = extract_participant_info(occupant)
|
||||
|
||||
-- Update room state
|
||||
if room_states[room_jid] then
|
||||
room_states[room_jid].participants[occupant.jid] = participant_info
|
||||
end
|
||||
|
||||
-- Log to active recording if exists
|
||||
local recording = active_recordings[room_jid]
|
||||
if recording then
|
||||
recording.participants[occupant.jid] = participant_info
|
||||
|
||||
write_event(recording.session_id, {
|
||||
type = "participant_joined",
|
||||
timestamp = get_timestamp(),
|
||||
room_jid = room_jid,
|
||||
room_name = room:get_name(),
|
||||
participant = participant_info,
|
||||
participant_count = get_room_participant_count(room)
|
||||
})
|
||||
end
|
||||
|
||||
module:log("info", "Participant joined %s: %s (%d total)",
|
||||
room:get_name() or room_jid,
|
||||
participant_info.display_name or participant_info.bare_jid,
|
||||
get_room_participant_count(room))
|
||||
end)
|
||||
|
||||
-- Occupant left
|
||||
host_module:hook("muc-occupant-left", function(event)
|
||||
local room = event.room
|
||||
local occupant = event.occupant
|
||||
local room_jid = room.jid
|
||||
|
||||
-- Skip recorders
|
||||
if occupant.bare_jid and (occupant.bare_jid:match("^recorder@") or
|
||||
occupant.bare_jid:match("^jibri@")) then
|
||||
return
|
||||
end
|
||||
|
||||
local participant_info = extract_participant_info(occupant)
|
||||
|
||||
-- Update room state
|
||||
if room_states[room_jid] then
|
||||
room_states[room_jid].participants[occupant.jid] = nil
|
||||
end
|
||||
|
||||
-- Log to active recording if exists
|
||||
local recording = active_recordings[room_jid]
|
||||
if recording then
|
||||
if recording.participants[occupant.jid] then
|
||||
recording.participants[occupant.jid] = nil
|
||||
end
|
||||
|
||||
write_event(recording.session_id, {
|
||||
type = "participant_left",
|
||||
timestamp = get_timestamp(),
|
||||
room_jid = room_jid,
|
||||
room_name = room:get_name(),
|
||||
participant = participant_info,
|
||||
participant_count = get_room_participant_count(room)
|
||||
})
|
||||
end
|
||||
|
||||
module:log("info", "Participant left %s: %s (%d remaining)",
|
||||
room:get_name() or room_jid,
|
||||
participant_info.display_name or participant_info.bare_jid,
|
||||
get_room_participant_count(room))
|
||||
end)
|
||||
end
|
||||
|
||||
-- Module initialization
|
||||
local current_host = module:get_host()
|
||||
local host_type = module:get_host_type()
|
||||
|
||||
module:log("info", "Event Logger loading on %s (type: %s)", current_host, host_type or "unknown")
|
||||
module:log("info", "Recording path: %s", recordings_path)
|
||||
|
||||
-- Setup room hooks based on host type
|
||||
if host_type == "component" and current_host:match("^[^.]+%.") then
|
||||
setup_room_hooks(module)
|
||||
else
|
||||
-- Try to find and hook to MUC component
|
||||
local process_host_module = util.process_host_module
|
||||
local muc_component_host = module:get_option_string("muc_component") or
|
||||
module:get_option_string("main_muc")
|
||||
|
||||
if not muc_component_host then
|
||||
local possible_hosts = {
|
||||
"muc." .. current_host,
|
||||
"conference." .. current_host,
|
||||
"rooms." .. current_host
|
||||
}
|
||||
|
||||
for _, host in ipairs(possible_hosts) do
|
||||
if prosody.hosts[host] then
|
||||
muc_component_host = host
|
||||
module:log("info", "Auto-detected MUC component: %s", muc_component_host)
|
||||
break
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if muc_component_host then
|
||||
process_host_module(muc_component_host, function(host_module, host)
|
||||
module:log("info", "Hooking to MUC events on %s", host)
|
||||
setup_room_hooks(host_module)
|
||||
end)
|
||||
else
|
||||
module:log("error", "Could not find MUC component")
|
||||
end
|
||||
end
|
||||
@@ -1,421 +0,0 @@
|
||||
# Daily.co pipeline
|
||||
|
||||
This document details every external call, storage operation, and database write that occurs when a new Daily.co recording is discovered.
|
||||
It includes a bunch of common logic that other pipelines use, therefore not everything is Daily-oriented.
|
||||
|
||||
**The doc was generated at 12.12.2025 and things may have changed since.**
|
||||
|
||||
## Trigger
|
||||
|
||||
Two entry points, both converging to the same handler:
|
||||
|
||||
1. **Webhook**: Daily.co sends `POST /v1/daily/webhook` with `recording.ready-to-download`
|
||||
2. **Polling**: `GET /recordings` (paginated, max 100/call) → filter new → convert to same payload format
|
||||
|
||||
Both produce `RecordingReadyPayload` and call `handleRecordingReady(payload)`.
|
||||
|
||||
```
|
||||
┌─────────────────┐ ┌──────────────────────────┐
|
||||
│ Daily Webhook │────▶│ RecordingReadyPayload │
|
||||
│ (push) │ │ {room_name, recording_id│
|
||||
└─────────────────┘ │ tracks[], ...} │
|
||||
└────────────┬─────────────┘
|
||||
┌─────────────────┐ │
|
||||
│ GET /recordings│ ▼
|
||||
│ (poll) │────▶ convert ──▶ handleRecordingReady()
|
||||
└─────────────────┘ │
|
||||
▼
|
||||
┌────────────────────────┐
|
||||
│ process_multitrack_ │
|
||||
│ recording pipeline │
|
||||
└────────────────────────┘
|
||||
```
|
||||
|
||||
**Polling API**: `GET https://api.daily.co/v1/recordings`
|
||||
- Pagination: `limit` (max 100), `starting_after`, `ending_before`
|
||||
- Rate limit: ~2 req/sec
|
||||
- Response: `{total_count, data: Recording[]}`
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Trigger["1. Recording Discovery - Daily.co Webhook"]
|
||||
DAILY_WEBHOOK["Daily.co sends POST /v1/daily/webhook<br/>type: recording.ready-to-download"]
|
||||
VERIFY["Verify X-Webhook-Signature (HMAC)"]
|
||||
PARSE["Parse DailyWebhookEvent<br/>Extract tracks[], room_name, recording_id"]
|
||||
FILTER["Filter audio tracks only<br/>track_keys = [t.s3Key for t in tracks if t.type == 'audio']"]
|
||||
DISPATCH["process_multitrack_recording.delay()"]
|
||||
|
||||
DAILY_WEBHOOK --> VERIFY --> PARSE --> FILTER --> DISPATCH
|
||||
end
|
||||
|
||||
subgraph Init["2. Recording Initialization"]
|
||||
FETCH_MEETING[DB READ: meetings_controller.get_by_room_name]
|
||||
FETCH_ROOM[DB READ: rooms_controller.get_by_name]
|
||||
DAILY_API_REC[Daily API: GET /recordings/recording_id]
|
||||
DAILY_API_PART[Daily API: GET /meetings/mtgSessionId/participants]
|
||||
CREATE_RECORDING[DB WRITE: recordings_controller.create]
|
||||
CREATE_TRANSCRIPT[DB WRITE: transcripts_controller.add]
|
||||
MAP_PARTICIPANTS[DB WRITE: transcript.participants upsert]
|
||||
end
|
||||
|
||||
subgraph Pipeline["3. Processing Pipeline"]
|
||||
direction TB
|
||||
PAD[Track Padding & Mixdown]
|
||||
TRANSCRIBE[GPU: Transcription per track]
|
||||
TOPICS[LLM: Topic Detection]
|
||||
TITLE[LLM: Title Generation]
|
||||
SUMMARY[LLM: Summary Generation]
|
||||
end
|
||||
|
||||
subgraph Storage["4. S3 Operations"]
|
||||
S3_PRESIGN[S3: generate_presigned_url for tracks]
|
||||
S3_UPLOAD_PADDED[S3 UPLOAD: padded tracks temp]
|
||||
S3_UPLOAD_MP3[S3 UPLOAD: audio.mp3]
|
||||
S3_DELETE_TEMP[S3 DELETE: cleanup temp files]
|
||||
end
|
||||
|
||||
subgraph PostProcess["5. Post-Processing"]
|
||||
CONSENT[Consent check & cleanup]
|
||||
ZULIP[Zulip: send/update message]
|
||||
WEBHOOK_OUT[Webhook: POST to room.webhook_url]
|
||||
end
|
||||
|
||||
Trigger --> Init --> Pipeline
|
||||
Pipeline --> Storage
|
||||
Pipeline --> PostProcess
|
||||
```
|
||||
|
||||
## Detailed Sequence: Daily.co Multitrack Recording
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant DailyCo as Daily.co
|
||||
participant API as FastAPI /v1/daily/webhook
|
||||
participant Worker as Celery Worker
|
||||
participant DB as PostgreSQL
|
||||
participant DailyAPI as Daily.co REST API
|
||||
participant S3 as AWS S3
|
||||
participant GPU as Modal.com GPU
|
||||
participant LLM as LLM Service
|
||||
participant WS as WebSocket
|
||||
participant Zulip as Zulip
|
||||
participant ExtWH as External Webhook
|
||||
|
||||
Note over DailyCo,API: Phase 0: Webhook Receipt
|
||||
DailyCo->>API: POST /v1/daily/webhook
|
||||
Note right of DailyCo: X-Webhook-Signature, X-Webhook-Timestamp
|
||||
API->>API: verify_webhook_signature()
|
||||
API->>API: Extract audio track s3Keys from payload.tracks[]
|
||||
API->>Worker: process_multitrack_recording.delay()
|
||||
API-->>DailyCo: 200 OK
|
||||
|
||||
Note over Worker,DailyAPI: Phase 1: Recording Initialization
|
||||
Worker->>DB: SELECT meeting WHERE room_name=?
|
||||
Worker->>DB: SELECT room WHERE name=?
|
||||
Worker->>DailyAPI: GET /recordings/{recording_id}
|
||||
DailyAPI-->>Worker: {mtgSessionId, ...}
|
||||
Worker->>DailyAPI: GET /meetings/{mtgSessionId}/participants
|
||||
DailyAPI-->>Worker: [{participant_id, user_name}, ...]
|
||||
Worker->>DB: INSERT INTO recording
|
||||
Worker->>DB: INSERT INTO transcript (status='idle')
|
||||
loop For each track_key (parse participant_id from filename)
|
||||
Worker->>DB: UPSERT transcript.participants[speaker=idx, name=X]
|
||||
end
|
||||
|
||||
Note over Worker,S3: Phase 2: Track Padding
|
||||
Worker->>DB: UPDATE transcript SET status='processing'
|
||||
Worker->>WS: broadcast STATUS='processing'
|
||||
loop For each track in track_keys (N tracks)
|
||||
Worker->>S3: generate_presigned_url(track_key, DAILYCO_BUCKET)
|
||||
S3-->>Worker: presigned_url (2hr)
|
||||
Note over Worker: PyAV: read WebM, extract start_time
|
||||
Note over Worker: PyAV: adelay filter (pad silence)
|
||||
Worker->>S3: PUT file_pipeline/{id}/tracks/padded_{idx}.webm
|
||||
Worker->>S3: generate_presigned_url(padded_{idx}.webm)
|
||||
end
|
||||
|
||||
Note over Worker,S3: Phase 3: Audio Mixdown
|
||||
Note over Worker: PyAV: amix filter → stereo MP3
|
||||
Worker->>DB: UPDATE transcript SET duration=X
|
||||
Worker->>WS: broadcast DURATION
|
||||
Worker->>S3: PUT {transcript_id}/audio.mp3
|
||||
Worker->>DB: UPDATE transcript SET audio_location='storage'
|
||||
|
||||
Note over Worker: Phase 4: Waveform
|
||||
Note over Worker: Generate peaks from MP3
|
||||
Worker->>DB: UPDATE events+=WAVEFORM
|
||||
Worker->>WS: broadcast WAVEFORM
|
||||
|
||||
Note over Worker,GPU: Phase 5: Transcription (N GPU calls)
|
||||
loop For each padded track URL (N tracks)
|
||||
Worker->>GPU: POST /v1/audio/transcriptions-from-url
|
||||
Note right of GPU: {audio_file_url, language, batch:true}
|
||||
GPU-->>Worker: {words: [{word, start, end}, ...]}
|
||||
Note over Worker: Assign speaker=track_idx to words
|
||||
end
|
||||
Note over Worker: Merge all words, sort by start time
|
||||
Worker->>DB: UPDATE events+=TRANSCRIPT
|
||||
Worker->>WS: broadcast TRANSCRIPT
|
||||
|
||||
Note over Worker,S3: Cleanup temp files
|
||||
loop For each padded file
|
||||
Worker->>S3: DELETE padded_{idx}.webm
|
||||
end
|
||||
|
||||
Note over Worker,LLM: Phase 6: Topic Detection (C LLM calls)
|
||||
Note over Worker: C = ceil(total_words / 300)
|
||||
loop For each 300-word chunk (C chunks)
|
||||
Worker->>LLM: TOPIC_PROMPT + words[i:i+300]
|
||||
Note right of LLM: "Extract main topic title + 2-sentence summary"
|
||||
LLM-->>Worker: TitleSummary{title, summary}
|
||||
Worker->>DB: UPSERT topics[]
|
||||
Worker->>DB: UPDATE events+=TOPIC
|
||||
Worker->>WS: broadcast TOPIC
|
||||
end
|
||||
|
||||
Note over Worker,LLM: Phase 7a: Title Generation (1 LLM call)
|
||||
Note over Worker: Input: all TitleSummary[].title joined
|
||||
Worker->>LLM: TITLE_PROMPT
|
||||
Note right of LLM: "Generate concise title from topic titles"
|
||||
LLM-->>Worker: "Meeting Title"
|
||||
Worker->>DB: UPDATE transcript SET title=X
|
||||
Worker->>DB: UPDATE events+=FINAL_TITLE
|
||||
Worker->>WS: broadcast FINAL_TITLE
|
||||
|
||||
Note over Worker,LLM: Phase 7b: Summary Generation (2+2M LLM calls)
|
||||
Note over Worker: Reconstruct full transcript from TitleSummary[].transcript
|
||||
opt If participants unknown
|
||||
Worker->>LLM: PARTICIPANTS_PROMPT
|
||||
LLM-->>Worker: ParticipantsResponse
|
||||
end
|
||||
Worker->>LLM: SUBJECTS_PROMPT (call #1)
|
||||
Note right of LLM: "Main high-level topics? Max 6"
|
||||
LLM-->>Worker: SubjectsResponse{subjects: ["A", "B", ...]}
|
||||
|
||||
loop For each subject (M subjects, max 6)
|
||||
Worker->>LLM: DETAILED_SUBJECT_PROMPT (call #2..#1+M)
|
||||
Note right of LLM: "Info about 'A': decisions, actions, deadlines"
|
||||
LLM-->>Worker: detailed_response (discarded after next call)
|
||||
Worker->>LLM: PARAGRAPH_SUMMARY_PROMPT (call #2+M..#1+2M)
|
||||
Note right of LLM: "Summarize in 1 paragraph"
|
||||
LLM-->>Worker: paragraph → summaries[]
|
||||
end
|
||||
|
||||
Worker->>LLM: RECAP_PROMPT (call #2+2M)
|
||||
Note right of LLM: "High-level quick recap, 1 paragraph"
|
||||
LLM-->>Worker: recap
|
||||
Note over Worker: long_summary = "# Quick recap\n{recap}\n# Summary\n**A**\n{para1}..."
|
||||
Note over Worker: short_summary = recap only
|
||||
Worker->>DB: UPDATE long_summary, short_summary
|
||||
Worker->>DB: UPDATE events+=FINAL_LONG_SUMMARY
|
||||
Worker->>WS: broadcast FINAL_LONG_SUMMARY
|
||||
Worker->>DB: UPDATE events+=FINAL_SHORT_SUMMARY
|
||||
Worker->>WS: broadcast FINAL_SHORT_SUMMARY
|
||||
|
||||
Note over Worker,DB: Phase 8: Finalize
|
||||
Worker->>DB: UPDATE transcript SET status='ended'
|
||||
Worker->>DB: UPDATE events+=STATUS
|
||||
Worker->>WS: broadcast STATUS='ended'
|
||||
|
||||
Note over Worker,ExtWH: Phase 9: Post-Processing Chain
|
||||
Worker->>DB: SELECT meeting_consent WHERE meeting_id=?
|
||||
alt Any consent denied
|
||||
Worker->>S3: DELETE tracks from DAILYCO_BUCKET
|
||||
Worker->>S3: DELETE audio.mp3 from TRANSCRIPT_BUCKET
|
||||
Worker->>DB: UPDATE transcript SET audio_deleted=true
|
||||
end
|
||||
|
||||
opt Room has zulip_auto_post=true
|
||||
alt Existing zulip_message_id
|
||||
Worker->>Zulip: PATCH /api/v1/messages/{id}
|
||||
else New
|
||||
Worker->>Zulip: POST /api/v1/messages
|
||||
Zulip-->>Worker: {id}
|
||||
Worker->>DB: UPDATE transcript SET zulip_message_id=X
|
||||
end
|
||||
end
|
||||
|
||||
opt Room has webhook_url
|
||||
Worker->>ExtWH: POST {webhook_url}
|
||||
Note right of ExtWH: X-Webhook-Signature: HMAC-SHA256
|
||||
Note right of ExtWH: Body: {transcript_id, room_id, ...}
|
||||
end
|
||||
```
|
||||
|
||||
## Title & Summary Generation Data Flow
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Input["Input: TitleSummary[] from Topic Detection"]
|
||||
TS1["TitleSummary 1<br/>title: 'Q1 Budget'<br/>transcript: words[0:300]"]
|
||||
TS2["TitleSummary 2<br/>title: 'Product Launch'<br/>transcript: words[300:600]"]
|
||||
TS3["TitleSummary N..."]
|
||||
end
|
||||
|
||||
subgraph TitleGen["Title Generation"]
|
||||
T1["Extract .title from each TitleSummary"]
|
||||
T2["Concatenate: '- Q1 Budget\n- Product Launch\n...'"]
|
||||
T3["LLM: TITLE_PROMPT\n'Generate concise title from topic titles'"]
|
||||
T4["Output: FinalTitle"]
|
||||
|
||||
T1 --> T2 --> T3 --> T4
|
||||
end
|
||||
|
||||
subgraph SummaryGen["Summary Generation"]
|
||||
direction TB
|
||||
|
||||
subgraph Reconstruct["1. Reconstruct Full Transcript"]
|
||||
S1["For each TitleSummary.transcript.as_segments()"]
|
||||
S2["Map speaker ID → name"]
|
||||
S3["Build: 'Alice: hello\nBob: hi\n...'"]
|
||||
S1 --> S2 --> S3
|
||||
end
|
||||
|
||||
subgraph Subjects["2. Extract Subjects - LLM call #1"]
|
||||
S4["LLM: SUBJECTS_PROMPT\n'Main high-level topics? Max 6'"]
|
||||
S5["subjects[] = ['Budget Review', ...]"]
|
||||
S4 --> S5
|
||||
end
|
||||
|
||||
subgraph DetailedSum["3. Per-Subject Summary - LLM calls #2 to #(1+2M)"]
|
||||
S6["For each subject:"]
|
||||
S7["LLM: DETAILED_SUBJECT_PROMPT\n'Info about subject: decisions, actions...'"]
|
||||
S8["detailed_response - NOT STORED"]
|
||||
S9["LLM: PARAGRAPH_SUMMARY_PROMPT\n'Summarize in 1 paragraph'"]
|
||||
S10["paragraph → summaries[]"]
|
||||
|
||||
S6 --> S7 --> S8 --> S9 --> S10
|
||||
end
|
||||
|
||||
subgraph Recap["4. Generate Recap - LLM call #(2+2M)"]
|
||||
S11["Concatenate paragraph summaries"]
|
||||
S12["LLM: RECAP_PROMPT\n'High-level recap, 1 paragraph'"]
|
||||
S13["recap"]
|
||||
S11 --> S12 --> S13
|
||||
end
|
||||
|
||||
subgraph Output["5. Output"]
|
||||
S14["long_summary = markdown:\n# Quick recap\n[recap]\n# Summary\n**Subject 1**\n[para1]..."]
|
||||
S15["short_summary = recap only"]
|
||||
S14 --> S15
|
||||
end
|
||||
|
||||
Reconstruct --> Subjects --> DetailedSum --> Recap --> Output
|
||||
end
|
||||
|
||||
Input --> TitleGen
|
||||
Input --> SummaryGen
|
||||
```
|
||||
|
||||
### topics[] vs subjects[]
|
||||
|
||||
| | topics[] | subjects[] |
|
||||
|-|----------|------------|
|
||||
| **Source** | 300-word chunk splitting | LLM extraction from full text |
|
||||
| **Count** | Variable (words / 300) | Max 6 |
|
||||
| **Purpose** | Timeline segmentation | Summary structure |
|
||||
| **Has timestamp?** | Yes | No |
|
||||
|
||||
## External API Calls Summary
|
||||
|
||||
### 1. Daily.co REST API (called during initialization)
|
||||
|
||||
| Endpoint | Method | When | Purpose |
|
||||
|----------|--------|------|---------|
|
||||
| `GET /recordings/{recording_id}` | GET | After webhook | Get mtgSessionId for participant lookup |
|
||||
| `GET /meetings/{mtgSessionId}/participants` | GET | After above | Map participant_id → user_name |
|
||||
|
||||
### 2. GPU Service (Modal.com or Self-Hosted)
|
||||
|
||||
| Endpoint | Method | Count | Request |
|
||||
|----------|--------|-------|---------|
|
||||
| `{TRANSCRIPT_URL}/v1/audio/transcriptions-from-url` | POST | **N** (N = num tracks) | `{audio_file_url, language, batch: true}` |
|
||||
|
||||
**Note**: Diarization is NOT called for multitrack - speaker identification comes from separate tracks.
|
||||
|
||||
### 3. LLM Service (OpenAI-compatible via LlamaIndex)
|
||||
|
||||
| Phase | Operation | Input | LLM Calls | Output |
|
||||
|-------|-----------|-------|-----------|--------|
|
||||
| Topic Detection | TOPIC_PROMPT per 300-word chunk | words[i:i+300] | **C** = ceil(words/300) | TitleSummary{title, summary, timestamp} |
|
||||
| Title Generation | TITLE_PROMPT | All topic titles joined | **1** | FinalTitle |
|
||||
| Participant ID | PARTICIPANTS_PROMPT | Full transcript | **0-1** (skipped if known) | ParticipantsResponse |
|
||||
| Subject Extraction | SUBJECTS_PROMPT | Full transcript | **1** | SubjectsResponse{subjects[]} |
|
||||
| Subject Detail | DETAILED_SUBJECT_PROMPT | Full transcript + subject name | **M** (M = subjects, max 6) | detailed text (discarded) |
|
||||
| Subject Paragraph | PARAGRAPH_SUMMARY_PROMPT | Detailed text | **M** | paragraph text → summaries[] |
|
||||
| Recap | RECAP_PROMPT | All paragraph summaries | **1** | recap text |
|
||||
|
||||
**Total LLM calls**: C + 2M + 3 (+ 1 if participants unknown)
|
||||
- Short meeting (1000 words, 3 subjects): ~4 + 6 + 3 = **13 calls**
|
||||
- Long meeting (5000 words, 6 subjects): ~17 + 12 + 3 = **32 calls**
|
||||
|
||||
## S3 Operations Summary
|
||||
|
||||
### Source Bucket: `DAILYCO_STORAGE_AWS_BUCKET_NAME`
|
||||
Daily.co uploads raw-tracks recordings here.
|
||||
|
||||
| Operation | Key Pattern | When |
|
||||
|-----------|-------------|------|
|
||||
| **READ** (presign) | `{domain}/{room_name}/{ts}/{participant_id}-cam-audio-{ts}.webm` | Track acquisition |
|
||||
| **DELETE** | Same as above | Consent denied cleanup |
|
||||
|
||||
### Transcript Storage Bucket: `TRANSCRIPT_STORAGE_AWS_BUCKET_NAME`
|
||||
Reflector's own storage.
|
||||
|
||||
| Operation | Key Pattern | When |
|
||||
|-----------|-------------|------|
|
||||
| **PUT** | `file_pipeline/{transcript_id}/tracks/padded_{idx}.webm` | After track padding |
|
||||
| **READ** (presign) | Same | For GPU transcription |
|
||||
| **DELETE** | Same | After transcription complete |
|
||||
| **PUT** | `{transcript_id}/audio.mp3` | After mixdown |
|
||||
| **DELETE** | Same | Consent denied cleanup |
|
||||
|
||||
## Database Operations
|
||||
|
||||
### Tables Written
|
||||
|
||||
| Table | Operation | When |
|
||||
|-------|-----------|------|
|
||||
| `recording` | INSERT | Initialization |
|
||||
| `transcript` | INSERT | Initialization |
|
||||
| `transcript` | UPDATE (participants) | After Daily API participant fetch |
|
||||
| `transcript` | UPDATE (status, events, duration, topics, title, summaries, etc.) | Throughout pipeline |
|
||||
|
||||
### Transcript Update Sequence
|
||||
|
||||
```
|
||||
1. INSERT: id, name, status='idle', source_kind='room', user_id, recording_id, room_id, meeting_id
|
||||
2. UPDATE: participants[] (speaker index → participant name mapping)
|
||||
3. UPDATE: status='processing', events+=[{event:'STATUS', data:{value:'processing'}}]
|
||||
4. UPDATE: duration=X, events+=[{event:'DURATION', data:{duration:X}}]
|
||||
5. UPDATE: audio_location='storage'
|
||||
6. UPDATE: events+=[{event:'WAVEFORM', data:{waveform:[...]}}]
|
||||
7. UPDATE: events+=[{event:'TRANSCRIPT', data:{text, translation}}]
|
||||
8. UPDATE: topics[]+=topic, events+=[{event:'TOPIC'}] -- repeated per chunk
|
||||
9. UPDATE: title=X, events+=[{event:'FINAL_TITLE'}]
|
||||
10. UPDATE: long_summary=X, events+=[{event:'FINAL_LONG_SUMMARY'}]
|
||||
11. UPDATE: short_summary=X, events+=[{event:'FINAL_SHORT_SUMMARY'}]
|
||||
12. UPDATE: status='ended', events+=[{event:'STATUS', data:{value:'ended'}}]
|
||||
13. UPDATE: zulip_message_id=X -- if Zulip enabled
|
||||
14. UPDATE: audio_deleted=true -- if consent denied
|
||||
```
|
||||
|
||||
## WebSocket Events
|
||||
|
||||
All broadcast to room `ts:{transcript_id}`:
|
||||
|
||||
| Event | Payload | Trigger |
|
||||
|-------|---------|---------|
|
||||
| STATUS | `{value: "processing"\|"ended"\|"error"}` | Status transitions |
|
||||
| DURATION | `{duration: float}` | After audio processing |
|
||||
| WAVEFORM | `{waveform: float[]}` | After waveform generation |
|
||||
| TRANSCRIPT | `{text: string, translation: string\|null}` | After transcription merge |
|
||||
| TOPIC | `{id, title, summary, timestamp, duration, transcript, words}` | Per topic detected |
|
||||
| FINAL_TITLE | `{title: string}` | After LLM title generation |
|
||||
| FINAL_LONG_SUMMARY | `{long_summary: string}` | After LLM summary |
|
||||
| FINAL_SHORT_SUMMARY | `{short_summary: string}` | After LLM recap |
|
||||
|
||||
User-room broadcasts to `user:{user_id}`:
|
||||
- `TRANSCRIPT_STATUS`
|
||||
- `TRANSCRIPT_FINAL_TITLE`
|
||||
- `TRANSCRIPT_DURATION`
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user