mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-20 20:29:06 +00:00
Compare commits
32 Commits
feat/durab
...
mathieu/sq
| Author | SHA1 | Date | |
|---|---|---|---|
| 8fcfac80fa | |||
| 9b3da4b2c8 | |||
| d86dc59bf2 | |||
| b7f8e8ef8d | |||
| 27f19ec6ba | |||
| 2aa99fe846 | |||
| df909363f5 | |||
| ad2accb574 | |||
| a07c621bcd | |||
| f51dae8da3 | |||
| b217c7ba41 | |||
| 0b2152ea75 | |||
| e0c71c5548 | |||
| a883df0d63 | |||
| 1c9e8b9cde | |||
| 27b3b9cdee | |||
| 8ad1270229 | |||
| 617a1c8b32 | |||
| 60cc2b16ae | |||
| 606c5f5059 | |||
| 5e036d17b6 | |||
| 04a9c2f2f7 | |||
| fb5bb39716 | |||
| 4f70a7f593 | |||
| 224e40225d | |||
| 24980de4e0 | |||
| 7f178b5f9e | |||
| 1520f88e9e | |||
| 9b90aaa57f | |||
| d21b65e4e8 | |||
| 45d1608950 | |||
| 06639d4d8f |
90
.github/workflows/deploy.yml
vendored
Normal file
90
.github/workflows/deploy.yml
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
name: Deploy to Amazon ECS
|
||||
|
||||
on: [workflow_dispatch]
|
||||
|
||||
env:
|
||||
# 950402358378.dkr.ecr.us-east-1.amazonaws.com/reflector
|
||||
AWS_REGION: us-east-1
|
||||
ECR_REPOSITORY: reflector
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: linux-amd64
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: linux-arm64
|
||||
arch: arm64
|
||||
|
||||
runs-on: ${{ matrix.runner }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
outputs:
|
||||
registry: ${{ steps.login-ecr.outputs.registry }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push ${{ matrix.arch }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: server
|
||||
platforms: ${{ matrix.platform }}
|
||||
push: true
|
||||
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
provenance: false
|
||||
|
||||
create-manifest:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
|
||||
permissions:
|
||||
deployments: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Create and push multi-arch manifest
|
||||
run: |
|
||||
# Get the registry URL (since we can't easily access job outputs in matrix)
|
||||
ECR_REGISTRY=$(aws ecr describe-registry --query 'registryId' --output text).dkr.ecr.${{ env.AWS_REGION }}.amazonaws.com
|
||||
|
||||
docker manifest create \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-amd64 \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-arm64
|
||||
|
||||
docker manifest push $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest
|
||||
|
||||
echo "✅ Multi-arch manifest pushed: $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest"
|
||||
53
.github/workflows/dockerhub-backend.yml
vendored
53
.github/workflows/dockerhub-backend.yml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Build and Push Backend Docker Image (Docker Hub)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
REGISTRY: docker.io
|
||||
IMAGE_NAME: monadicalsas/reflector-backend
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: monadicalsas
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./server
|
||||
file: ./server/Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
70
.github/workflows/dockerhub-frontend.yml
vendored
70
.github/workflows/dockerhub-frontend.yml
vendored
@@ -1,70 +0,0 @@
|
||||
name: Build and Push Frontend Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
REGISTRY: docker.io
|
||||
IMAGE_NAME: monadicalsas/reflector-frontend
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: monadicalsas
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=tag
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./www
|
||||
file: ./www/Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
deploy:
|
||||
needs: build-and-push
|
||||
runs-on: ubuntu-latest
|
||||
if: success()
|
||||
strategy:
|
||||
matrix:
|
||||
environment: [reflector-monadical, reflector-media]
|
||||
environment: ${{ matrix.environment }}
|
||||
steps:
|
||||
- name: Trigger Coolify deployment
|
||||
run: |
|
||||
curl -X POST "${{ secrets.COOLIFY_WEBHOOK_URL }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer ${{ secrets.COOLIFY_WEBHOOK_TOKEN }}" \
|
||||
-f || (echo "Failed to trigger Coolify deployment for ${{ matrix.environment }}" && exit 1)
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,4 +18,3 @@ CLAUDE.local.md
|
||||
www/.env.development
|
||||
www/.env.production
|
||||
.playwright-mcp
|
||||
.secrets
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# Example secrets file for GitHub Actions workflows
|
||||
# Copy this to .secrets and fill in your values
|
||||
# These secrets should be configured in GitHub repository settings:
|
||||
# Settings > Secrets and variables > Actions
|
||||
|
||||
# DockerHub Configuration (required for frontend and backend deployment)
|
||||
# Create a Docker Hub access token at https://hub.docker.com/settings/security
|
||||
# Username: monadicalsas
|
||||
DOCKERHUB_TOKEN=your-dockerhub-access-token
|
||||
|
||||
# GitHub Token (required for frontend and backend deployment)
|
||||
# Used by docker/metadata-action for extracting image metadata
|
||||
# Can use the default GITHUB_TOKEN or create a personal access token
|
||||
GITHUB_TOKEN=your-github-token-or-use-default-GITHUB_TOKEN
|
||||
|
||||
# Coolify Deployment Webhook (required for frontend deployment)
|
||||
# Used to trigger automatic deployment after image push
|
||||
# Configure these secrets in GitHub Environments:
|
||||
# Each environment should have:
|
||||
# - COOLIFY_WEBHOOK_URL: The webhook URL for that specific deployment
|
||||
# - COOLIFY_WEBHOOK_TOKEN: The webhook token (can be the same for both if using same token)
|
||||
|
||||
# Optional: GitHub Actions Cache Token (for local testing with act)
|
||||
GHA_CACHE_TOKEN=your-github-token-or-empty
|
||||
164
CHANGELOG.md
164
CHANGELOG.md
@@ -1,169 +1,5 @@
|
||||
# Changelog
|
||||
|
||||
## [0.24.0](https://github.com/Monadical-SAS/reflector/compare/v0.23.2...v0.24.0) (2025-12-18)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* identify action items ([#790](https://github.com/Monadical-SAS/reflector/issues/790)) ([964cd78](https://github.com/Monadical-SAS/reflector/commit/964cd78bb699d83d012ae4b8c96565df25b90a5d))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* automatically reprocess daily recordings ([#797](https://github.com/Monadical-SAS/reflector/issues/797)) ([5f458aa](https://github.com/Monadical-SAS/reflector/commit/5f458aa4a7ec3d00ca5ec49d62fcc8ad232b138e))
|
||||
* daily video optimisation ([#789](https://github.com/Monadical-SAS/reflector/issues/789)) ([16284e1](https://github.com/Monadical-SAS/reflector/commit/16284e1ac3faede2b74f0d91b50c0b5612af2c35))
|
||||
* main menu login ([#800](https://github.com/Monadical-SAS/reflector/issues/800)) ([0bc971b](https://github.com/Monadical-SAS/reflector/commit/0bc971ba966a52d719c8c240b47dc7b3bdea4391))
|
||||
* retry on workflow timeout ([#798](https://github.com/Monadical-SAS/reflector/issues/798)) ([5f7dfad](https://github.com/Monadical-SAS/reflector/commit/5f7dfadabd3e8017406ad3720ba495a59963ee34))
|
||||
|
||||
## [0.23.2](https://github.com/Monadical-SAS/reflector/compare/v0.23.1...v0.23.2) (2025-12-11)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* build on push tags ([#785](https://github.com/Monadical-SAS/reflector/issues/785)) ([d7f140b](https://github.com/Monadical-SAS/reflector/commit/d7f140b7d1f4660d5da7a0da1357f68869e0b5cd))
|
||||
|
||||
## [0.23.1](https://github.com/Monadical-SAS/reflector/compare/v0.23.0...v0.23.1) (2025-12-11)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* populate room_name in transcript GET endpoint ([#783](https://github.com/Monadical-SAS/reflector/issues/783)) ([0eba147](https://github.com/Monadical-SAS/reflector/commit/0eba1470181c7b9e0a79964a1ef28c09bcbdd9d7))
|
||||
|
||||
## [0.23.0](https://github.com/Monadical-SAS/reflector/compare/v0.22.4...v0.23.0) (2025-12-10)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* dockerhub ci ([#772](https://github.com/Monadical-SAS/reflector/issues/772)) ([00549f1](https://github.com/Monadical-SAS/reflector/commit/00549f153ade922cf4cb6c5358a7d11a39c426d2))
|
||||
* llm retries ([#739](https://github.com/Monadical-SAS/reflector/issues/739)) ([61f0e29](https://github.com/Monadical-SAS/reflector/commit/61f0e29d4c51eab54ee67af92141fbb171e8ccaa))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* celery inspect bug sidestep in restart script ([#766](https://github.com/Monadical-SAS/reflector/issues/766)) ([ec17ed7](https://github.com/Monadical-SAS/reflector/commit/ec17ed7b587cf6ee143646baaee67a7c017044d4))
|
||||
* deploy frontend to coolify ([#779](https://github.com/Monadical-SAS/reflector/issues/779)) ([91650ec](https://github.com/Monadical-SAS/reflector/commit/91650ec65f65713faa7ee0dcfb75af427b7c4ba0))
|
||||
* hide rooms settings instead of disabling ([#763](https://github.com/Monadical-SAS/reflector/issues/763)) ([3ad78be](https://github.com/Monadical-SAS/reflector/commit/3ad78be7628c0d029296b301a0e87236c76b7598))
|
||||
* return participant emails from transcript endpoint ([#769](https://github.com/Monadical-SAS/reflector/issues/769)) ([d3a5cd1](https://github.com/Monadical-SAS/reflector/commit/d3a5cd12d2d0d9c32af2d5bd9322e030ef69b85d))
|
||||
|
||||
## [0.22.4](https://github.com/Monadical-SAS/reflector/compare/v0.22.3...v0.22.4) (2025-12-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* Multitrack mixdown optimisation 2 ([#764](https://github.com/Monadical-SAS/reflector/issues/764)) ([bd5df1c](https://github.com/Monadical-SAS/reflector/commit/bd5df1ce2ebf35d7f3413b295e56937a9a28ef7b))
|
||||
|
||||
## [0.22.3](https://github.com/Monadical-SAS/reflector/compare/v0.22.2...v0.22.3) (2025-12-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* align daily room settings ([#759](https://github.com/Monadical-SAS/reflector/issues/759)) ([28f87c0](https://github.com/Monadical-SAS/reflector/commit/28f87c09dc459846873d0dde65b03e3d7b2b9399))
|
||||
|
||||
## [0.22.2](https://github.com/Monadical-SAS/reflector/compare/v0.22.1...v0.22.2) (2025-12-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* daily auto refresh fix ([#755](https://github.com/Monadical-SAS/reflector/issues/755)) ([fe47c46](https://github.com/Monadical-SAS/reflector/commit/fe47c46489c5aa0cc538109f7559cc9accb35c01))
|
||||
* Skip mixdown for multitrack ([#760](https://github.com/Monadical-SAS/reflector/issues/760)) ([b51b7aa](https://github.com/Monadical-SAS/reflector/commit/b51b7aa9176c1a53ba57ad99f5e976c804a1e80c))
|
||||
|
||||
## [0.22.1](https://github.com/Monadical-SAS/reflector/compare/v0.22.0...v0.22.1) (2025-11-27)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* participants update from daily ([#749](https://github.com/Monadical-SAS/reflector/issues/749)) ([7f0b728](https://github.com/Monadical-SAS/reflector/commit/7f0b728991c1b9f9aae702c96297eae63b561ef5))
|
||||
|
||||
## [0.22.0](https://github.com/Monadical-SAS/reflector/compare/v0.21.0...v0.22.0) (2025-11-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Multitrack segmentation ([#747](https://github.com/Monadical-SAS/reflector/issues/747)) ([d63040e](https://github.com/Monadical-SAS/reflector/commit/d63040e2fdc07e7b272e85a39eb2411cd6a14798))
|
||||
|
||||
## [0.21.0](https://github.com/Monadical-SAS/reflector/compare/v0.20.0...v0.21.0) (2025-11-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add transcript format parameter to GET endpoint ([#709](https://github.com/Monadical-SAS/reflector/issues/709)) ([f6ca075](https://github.com/Monadical-SAS/reflector/commit/f6ca07505f34483b02270a2ef3bd809e9d2e1045))
|
||||
|
||||
## [0.20.0](https://github.com/Monadical-SAS/reflector/compare/v0.19.0...v0.20.0) (2025-11-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* link transcript participants ([#737](https://github.com/Monadical-SAS/reflector/issues/737)) ([9bec398](https://github.com/Monadical-SAS/reflector/commit/9bec39808fc6322612d8b87e922a6f7901fc01c1))
|
||||
* transcript restart script ([#742](https://github.com/Monadical-SAS/reflector/issues/742)) ([86d5e26](https://github.com/Monadical-SAS/reflector/commit/86d5e26224bb55a0f1cc785aeda52065bb92ee6f))
|
||||
|
||||
## [0.19.0](https://github.com/Monadical-SAS/reflector/compare/v0.18.0...v0.19.0) (2025-11-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* dailyco api module ([#725](https://github.com/Monadical-SAS/reflector/issues/725)) ([4287f8b](https://github.com/Monadical-SAS/reflector/commit/4287f8b8aeee60e51db7539f4dcbda5f6e696bd8))
|
||||
* dailyco poll ([#730](https://github.com/Monadical-SAS/reflector/issues/730)) ([8e438ca](https://github.com/Monadical-SAS/reflector/commit/8e438ca285152bd48fdc42767e706fb448d3525c))
|
||||
* multitrack cli ([#735](https://github.com/Monadical-SAS/reflector/issues/735)) ([11731c9](https://github.com/Monadical-SAS/reflector/commit/11731c9d38439b04e93b1c3afbd7090bad11a11f))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* default platform fix ([#736](https://github.com/Monadical-SAS/reflector/issues/736)) ([c442a62](https://github.com/Monadical-SAS/reflector/commit/c442a627873ca667656eeaefb63e54ab10b8d19e))
|
||||
* parakeet vad not getting the end timestamp ([#728](https://github.com/Monadical-SAS/reflector/issues/728)) ([18ed713](https://github.com/Monadical-SAS/reflector/commit/18ed7133693653ef4ddac6c659a8c14b320d1657))
|
||||
* start raw tracks recording ([#729](https://github.com/Monadical-SAS/reflector/issues/729)) ([3e47c2c](https://github.com/Monadical-SAS/reflector/commit/3e47c2c0573504858e0d2e1798b6ed31f16b4a5d))
|
||||
|
||||
## [0.18.0](https://github.com/Monadical-SAS/reflector/compare/v0.17.0...v0.18.0) (2025-11-14)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* daily QOL: participants dictionary ([#721](https://github.com/Monadical-SAS/reflector/issues/721)) ([b20cad7](https://github.com/Monadical-SAS/reflector/commit/b20cad76e69fb6a76405af299a005f1ddcf60eae))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add proccessing page to file upload and reprocessing ([#650](https://github.com/Monadical-SAS/reflector/issues/650)) ([28a7258](https://github.com/Monadical-SAS/reflector/commit/28a7258e45317b78e60e6397be2bc503647eaace))
|
||||
* copy transcript ([#674](https://github.com/Monadical-SAS/reflector/issues/674)) ([a9a4f32](https://github.com/Monadical-SAS/reflector/commit/a9a4f32324f66c838e081eee42bb9502f38c1db1))
|
||||
|
||||
## [0.17.0](https://github.com/Monadical-SAS/reflector/compare/v0.16.0...v0.17.0) (2025-11-13)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add API key management UI ([#716](https://github.com/Monadical-SAS/reflector/issues/716)) ([372202b](https://github.com/Monadical-SAS/reflector/commit/372202b0e1a86823900b0aa77be1bfbc2893d8a1))
|
||||
* daily.co support as alternative to whereby ([#691](https://github.com/Monadical-SAS/reflector/issues/691)) ([1473fd8](https://github.com/Monadical-SAS/reflector/commit/1473fd82dc472c394cbaa2987212ad662a74bcac))
|
||||
|
||||
## [0.16.0](https://github.com/Monadical-SAS/reflector/compare/v0.15.0...v0.16.0) (2025-10-24)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* search date filter ([#710](https://github.com/Monadical-SAS/reflector/issues/710)) ([962c40e](https://github.com/Monadical-SAS/reflector/commit/962c40e2b6428ac42fd10aea926782d7a6f3f902))
|
||||
|
||||
## [0.15.0](https://github.com/Monadical-SAS/reflector/compare/v0.14.0...v0.15.0) (2025-10-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* api tokens ([#705](https://github.com/Monadical-SAS/reflector/issues/705)) ([9a258ab](https://github.com/Monadical-SAS/reflector/commit/9a258abc0209b0ac3799532a507ea6a9125d703a))
|
||||
|
||||
## [0.14.0](https://github.com/Monadical-SAS/reflector/compare/v0.13.1...v0.14.0) (2025-10-08)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Add calendar event data to transcript webhook payload ([#689](https://github.com/Monadical-SAS/reflector/issues/689)) ([5f6910e](https://github.com/Monadical-SAS/reflector/commit/5f6910e5131b7f28f86c9ecdcc57fed8412ee3cd))
|
||||
* container build for www / github ([#672](https://github.com/Monadical-SAS/reflector/issues/672)) ([969bd84](https://github.com/Monadical-SAS/reflector/commit/969bd84fcc14851d1a101412a0ba115f1b7cde82))
|
||||
* docker-compose for production frontend ([#664](https://github.com/Monadical-SAS/reflector/issues/664)) ([5bf64b5](https://github.com/Monadical-SAS/reflector/commit/5bf64b5a41f64535e22849b4bb11734d4dbb4aae))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* restore feature boolean logic ([#671](https://github.com/Monadical-SAS/reflector/issues/671)) ([3660884](https://github.com/Monadical-SAS/reflector/commit/36608849ec64e953e3be456172502762e3c33df9))
|
||||
* security review ([#656](https://github.com/Monadical-SAS/reflector/issues/656)) ([5d98754](https://github.com/Monadical-SAS/reflector/commit/5d98754305c6c540dd194dda268544f6d88bfaf8))
|
||||
* update transcript list on reprocess ([#676](https://github.com/Monadical-SAS/reflector/issues/676)) ([9a71af1](https://github.com/Monadical-SAS/reflector/commit/9a71af145ee9b833078c78d0c684590ab12e9f0e))
|
||||
* upgrade nemo toolkit ([#678](https://github.com/Monadical-SAS/reflector/issues/678)) ([eef6dc3](https://github.com/Monadical-SAS/reflector/commit/eef6dc39037329b65804297786d852dddb0557f9))
|
||||
|
||||
## [0.13.1](https://github.com/Monadical-SAS/reflector/compare/v0.13.0...v0.13.1) (2025-09-22)
|
||||
|
||||
|
||||
|
||||
@@ -151,7 +151,7 @@ All endpoints prefixed `/v1/`:
|
||||
|
||||
**Frontend** (`www/.env`):
|
||||
- `NEXTAUTH_URL`, `NEXTAUTH_SECRET` - Authentication configuration
|
||||
- `REFLECTOR_API_URL` - Backend API endpoint
|
||||
- `NEXT_PUBLIC_REFLECTOR_API_URL` - Backend API endpoint
|
||||
- `REFLECTOR_DOMAIN_CONFIG` - Feature flags and domain settings
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
31
README.md
31
README.md
@@ -168,19 +168,6 @@ You can manually process an audio file by calling the process tool:
|
||||
uv run python -m reflector.tools.process path/to/audio.wav
|
||||
```
|
||||
|
||||
## Reprocessing any transcription
|
||||
|
||||
```bash
|
||||
uv run -m reflector.tools.process_transcript 81ec38d1-9dd7-43d2-b3f8-51f4d34a07cd --sync
|
||||
```
|
||||
|
||||
## Build-time env variables
|
||||
|
||||
Next.js projects are more used to NEXT_PUBLIC_ prefixed buildtime vars. We don't have those for the reason we need to serve a ccustomizable prebuild docker container.
|
||||
|
||||
Instead, all the variables are runtime. Variables needed to the frontend are served to the frontend app at initial render.
|
||||
|
||||
It also means there's no static prebuild and no static files to serve for js/html.
|
||||
|
||||
## Feature Flags
|
||||
|
||||
@@ -190,24 +177,24 @@ Reflector uses environment variable-based feature flags to control application f
|
||||
|
||||
| Feature Flag | Environment Variable |
|
||||
|-------------|---------------------|
|
||||
| `requireLogin` | `FEATURE_REQUIRE_LOGIN` |
|
||||
| `privacy` | `FEATURE_PRIVACY` |
|
||||
| `browse` | `FEATURE_BROWSE` |
|
||||
| `sendToZulip` | `FEATURE_SEND_TO_ZULIP` |
|
||||
| `rooms` | `FEATURE_ROOMS` |
|
||||
| `requireLogin` | `NEXT_PUBLIC_FEATURE_REQUIRE_LOGIN` |
|
||||
| `privacy` | `NEXT_PUBLIC_FEATURE_PRIVACY` |
|
||||
| `browse` | `NEXT_PUBLIC_FEATURE_BROWSE` |
|
||||
| `sendToZulip` | `NEXT_PUBLIC_FEATURE_SEND_TO_ZULIP` |
|
||||
| `rooms` | `NEXT_PUBLIC_FEATURE_ROOMS` |
|
||||
|
||||
### Setting Feature Flags
|
||||
|
||||
Feature flags are controlled via environment variables using the pattern `FEATURE_{FEATURE_NAME}` where `{FEATURE_NAME}` is the SCREAMING_SNAKE_CASE version of the feature name.
|
||||
Feature flags are controlled via environment variables using the pattern `NEXT_PUBLIC_FEATURE_{FEATURE_NAME}` where `{FEATURE_NAME}` is the SCREAMING_SNAKE_CASE version of the feature name.
|
||||
|
||||
**Examples:**
|
||||
```bash
|
||||
# Enable user authentication requirement
|
||||
FEATURE_REQUIRE_LOGIN=true
|
||||
NEXT_PUBLIC_FEATURE_REQUIRE_LOGIN=true
|
||||
|
||||
# Disable browse functionality
|
||||
FEATURE_BROWSE=false
|
||||
NEXT_PUBLIC_FEATURE_BROWSE=false
|
||||
|
||||
# Enable Zulip integration
|
||||
FEATURE_SEND_TO_ZULIP=true
|
||||
NEXT_PUBLIC_FEATURE_SEND_TO_ZULIP=true
|
||||
```
|
||||
|
||||
67
compose.yml
Normal file
67
compose.yml
Normal file
@@ -0,0 +1,67 @@
|
||||
services:
|
||||
server:
|
||||
build:
|
||||
context: server
|
||||
ports:
|
||||
- 1250:1250
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: server
|
||||
|
||||
worker:
|
||||
build:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: worker
|
||||
|
||||
beat:
|
||||
build:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: beat
|
||||
|
||||
redis:
|
||||
image: redis:7.2
|
||||
ports:
|
||||
- 6379:6379
|
||||
web:
|
||||
image: node:18
|
||||
ports:
|
||||
- "3000:3000"
|
||||
command: sh -c "corepack enable && pnpm install && pnpm dev"
|
||||
restart: unless-stopped
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ./www:/app/
|
||||
- /app/node_modules
|
||||
env_file:
|
||||
- ./www/.env.local
|
||||
|
||||
postgres:
|
||||
image: postgres:17
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
POSTGRES_USER: reflector
|
||||
POSTGRES_PASSWORD: reflector
|
||||
POSTGRES_DB: reflector
|
||||
volumes:
|
||||
- ./data/postgres:/var/lib/postgresql/data
|
||||
|
||||
networks:
|
||||
default:
|
||||
attachable: true
|
||||
@@ -1,37 +0,0 @@
|
||||
# Production Docker Compose configuration for Frontend
|
||||
# Usage: docker compose -f docker-compose.prod.yml up -d
|
||||
|
||||
services:
|
||||
web:
|
||||
image: monadicalsas/reflector-frontend:latest
|
||||
pull_policy: always
|
||||
environment:
|
||||
- KV_URL=${KV_URL:-redis://redis:6379}
|
||||
- SITE_URL=${SITE_URL}
|
||||
- API_URL=${API_URL}
|
||||
- WEBSOCKET_URL=${WEBSOCKET_URL}
|
||||
- NEXTAUTH_URL=${NEXTAUTH_URL:-http://localhost:3000}
|
||||
- NEXTAUTH_SECRET=${NEXTAUTH_SECRET:-changeme-in-production}
|
||||
- AUTHENTIK_ISSUER=${AUTHENTIK_ISSUER}
|
||||
- AUTHENTIK_CLIENT_ID=${AUTHENTIK_CLIENT_ID}
|
||||
- AUTHENTIK_CLIENT_SECRET=${AUTHENTIK_CLIENT_SECRET}
|
||||
- AUTHENTIK_REFRESH_TOKEN_URL=${AUTHENTIK_REFRESH_TOKEN_URL}
|
||||
- SENTRY_DSN=${SENTRY_DSN}
|
||||
- SENTRY_IGNORE_API_RESOLUTION_ERROR=${SENTRY_IGNORE_API_RESOLUTION_ERROR:-1}
|
||||
depends_on:
|
||||
- redis
|
||||
restart: unless-stopped
|
||||
|
||||
redis:
|
||||
image: redis:7.2-alpine
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 30s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
volumes:
|
||||
redis_data:
|
||||
@@ -1,120 +0,0 @@
|
||||
services:
|
||||
server:
|
||||
build:
|
||||
context: server
|
||||
ports:
|
||||
- 1250:1250
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: server
|
||||
|
||||
worker:
|
||||
build:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: worker
|
||||
|
||||
beat:
|
||||
build:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: beat
|
||||
|
||||
hatchet-worker:
|
||||
build:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: hatchet-worker
|
||||
depends_on:
|
||||
hatchet:
|
||||
condition: service_healthy
|
||||
|
||||
redis:
|
||||
image: redis:7.2
|
||||
ports:
|
||||
- 6379:6379
|
||||
web:
|
||||
image: node:22-alpine
|
||||
ports:
|
||||
- "3000:3000"
|
||||
command: sh -c "corepack enable && pnpm install && pnpm dev"
|
||||
restart: unless-stopped
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ./www:/app/
|
||||
- /app/node_modules
|
||||
env_file:
|
||||
- ./www/.env.local
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
|
||||
postgres:
|
||||
image: postgres:17
|
||||
command: postgres -c 'max_connections=200'
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
POSTGRES_USER: reflector
|
||||
POSTGRES_PASSWORD: reflector
|
||||
POSTGRES_DB: reflector
|
||||
volumes:
|
||||
- ./data/postgres:/var/lib/postgresql/data
|
||||
- ./server/docker/init-hatchet-db.sql:/docker-entrypoint-initdb.d/init-hatchet-db.sql:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -d reflector -U reflector"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 10s
|
||||
|
||||
hatchet:
|
||||
image: ghcr.io/hatchet-dev/hatchet/hatchet-lite:latest
|
||||
ports:
|
||||
- "8889:8888"
|
||||
- "7078:7077"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: "postgresql://reflector:reflector@postgres:5432/hatchet?sslmode=disable"
|
||||
SERVER_AUTH_COOKIE_DOMAIN: localhost
|
||||
SERVER_AUTH_COOKIE_INSECURE: "t"
|
||||
SERVER_GRPC_BIND_ADDRESS: "0.0.0.0"
|
||||
SERVER_GRPC_INSECURE: "t"
|
||||
SERVER_GRPC_BROADCAST_ADDRESS: hatchet:7077
|
||||
SERVER_GRPC_PORT: "7077"
|
||||
SERVER_URL: http://localhost:8889
|
||||
SERVER_AUTH_SET_EMAIL_VERIFIED: "t"
|
||||
# SERVER_DEFAULT_ENGINE_VERSION: "V1" # default
|
||||
SERVER_INTERNAL_CLIENT_INTERNAL_GRPC_BROADCAST_ADDRESS: hatchet:7077
|
||||
volumes:
|
||||
- ./data/hatchet-config:/config
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8888/api/live"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
|
||||
networks:
|
||||
default:
|
||||
attachable: true
|
||||
@@ -1,241 +0,0 @@
|
||||
# Transcript Formats
|
||||
|
||||
The Reflector API provides multiple output formats for transcript data through the `transcript_format` query parameter on the GET `/v1/transcripts/{id}` endpoint.
|
||||
|
||||
## Overview
|
||||
|
||||
When retrieving a transcript, you can specify the desired format using the `transcript_format` query parameter. The API supports four formats optimized for different use cases:
|
||||
|
||||
- **text** - Plain text with speaker names (default)
|
||||
- **text-timestamped** - Timestamped text with speaker names
|
||||
- **webvtt-named** - WebVTT subtitle format with participant names
|
||||
- **json** - Structured JSON segments with full metadata
|
||||
|
||||
All formats include participant information when available, resolving speaker IDs to actual names.
|
||||
|
||||
## Query Parameter Usage
|
||||
|
||||
```
|
||||
GET /v1/transcripts/{id}?transcript_format={format}
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- `transcript_format` (optional): The desired output format
|
||||
- Type: `"text" | "text-timestamped" | "webvtt-named" | "json"`
|
||||
- Default: `"text"`
|
||||
|
||||
## Format Descriptions
|
||||
|
||||
### Text Format (`text`)
|
||||
|
||||
**Use case:** Simple, human-readable transcript for display or export.
|
||||
|
||||
**Format:** Speaker names followed by their dialogue, one line per segment.
|
||||
|
||||
**Example:**
|
||||
```
|
||||
John Smith: Hello everyone
|
||||
Jane Doe: Hi there
|
||||
John Smith: How are you today?
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=text
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "text",
|
||||
"transcript": "John Smith: Hello everyone\nJane Doe: Hi there\nJohn Smith: How are you today?",
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Text Timestamped Format (`text-timestamped`)
|
||||
|
||||
**Use case:** Transcript with timing information for navigation or reference.
|
||||
|
||||
**Format:** `[MM:SS]` timestamp prefix before each speaker and dialogue.
|
||||
|
||||
**Example:**
|
||||
```
|
||||
[00:00] John Smith: Hello everyone
|
||||
[00:05] Jane Doe: Hi there
|
||||
[00:12] John Smith: How are you today?
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=text-timestamped
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "text-timestamped",
|
||||
"transcript": "[00:00] John Smith: Hello everyone\n[00:05] Jane Doe: Hi there\n[00:12] John Smith: How are you today?",
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### WebVTT Named Format (`webvtt-named`)
|
||||
|
||||
**Use case:** Subtitle files for video players, accessibility tools, or video editing.
|
||||
|
||||
**Format:** Standard WebVTT subtitle format with voice tags using participant names.
|
||||
|
||||
**Example:**
|
||||
```
|
||||
WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:05.000
|
||||
<v John Smith>Hello everyone
|
||||
|
||||
00:00:05.000 --> 00:00:12.000
|
||||
<v Jane Doe>Hi there
|
||||
|
||||
00:00:12.000 --> 00:00:18.000
|
||||
<v John Smith>How are you today?
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=webvtt-named
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "webvtt-named",
|
||||
"transcript": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v John Smith>Hello everyone\n\n...",
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Format (`json`)
|
||||
|
||||
**Use case:** Programmatic access with full timing and speaker metadata.
|
||||
|
||||
**Format:** Array of segment objects with speaker information, text content, and precise timing.
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
[
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "Hello everyone",
|
||||
"start": 0.0,
|
||||
"end": 5.0
|
||||
},
|
||||
{
|
||||
"speaker": 1,
|
||||
"speaker_name": "Jane Doe",
|
||||
"text": "Hi there",
|
||||
"start": 5.0,
|
||||
"end": 12.0
|
||||
},
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "How are you today?",
|
||||
"start": 12.0,
|
||||
"end": 18.0
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=json
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "json",
|
||||
"transcript": [
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "Hello everyone",
|
||||
"start": 0.0,
|
||||
"end": 5.0
|
||||
},
|
||||
{
|
||||
"speaker": 1,
|
||||
"speaker_name": "Jane Doe",
|
||||
"text": "Hi there",
|
||||
"start": 5.0,
|
||||
"end": 12.0
|
||||
}
|
||||
],
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Response Structure
|
||||
|
||||
All formats return the same base transcript metadata with an additional `transcript_format` field and format-specific `transcript` field:
|
||||
|
||||
### Common Fields
|
||||
|
||||
- `id`: Transcript identifier
|
||||
- `user_id`: Owner user ID (if authenticated)
|
||||
- `name`: Transcript name
|
||||
- `status`: Processing status
|
||||
- `locked`: Whether transcript is locked for editing
|
||||
- `duration`: Total duration in seconds
|
||||
- `title`: Auto-generated or custom title
|
||||
- `short_summary`: Brief summary
|
||||
- `long_summary`: Detailed summary
|
||||
- `created_at`: Creation timestamp
|
||||
- `share_mode`: Access control setting
|
||||
- `source_language`: Original audio language
|
||||
- `target_language`: Translation target language
|
||||
- `reviewed`: Whether transcript has been reviewed
|
||||
- `meeting_id`: Associated meeting ID (if applicable)
|
||||
- `source_kind`: Source type (live, file, room)
|
||||
- `room_id`: Associated room ID (if applicable)
|
||||
- `audio_deleted`: Whether audio has been deleted
|
||||
- `participants`: Array of participant objects with speaker mappings
|
||||
|
||||
### Format-Specific Fields
|
||||
|
||||
- `transcript_format`: The format identifier (discriminator field)
|
||||
- `transcript`: The formatted transcript content (string for text/webvtt formats, array for json format)
|
||||
|
||||
## Speaker Name Resolution
|
||||
|
||||
All formats resolve speaker IDs to participant names when available:
|
||||
|
||||
- If a participant exists for the speaker ID, their name is used
|
||||
- If no participant exists, a default name like "Speaker 0" is generated
|
||||
- Speaker IDs are integers (0, 1, 2, etc.) assigned during diarization
|
||||
@@ -77,13 +77,13 @@ image = (
|
||||
.pip_install(
|
||||
"hf_transfer==0.1.9",
|
||||
"huggingface_hub[hf-xet]==0.31.2",
|
||||
"nemo_toolkit[asr]==2.5.0",
|
||||
"nemo_toolkit[asr]==2.3.0",
|
||||
"cuda-python==12.8.0",
|
||||
"fastapi==0.115.12",
|
||||
"numpy<2",
|
||||
"librosa==0.11.0",
|
||||
"librosa==0.10.1",
|
||||
"requests",
|
||||
"silero-vad==6.2.0",
|
||||
"silero-vad==5.1.0",
|
||||
"torch",
|
||||
)
|
||||
.entrypoint([]) # silence chatty logs by container on start
|
||||
@@ -306,7 +306,6 @@ class TranscriberParakeetFile:
|
||||
) -> Generator[TimeSegment, None, None]:
|
||||
"""Generate speech segments using VAD with start/end sample indices"""
|
||||
vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE)
|
||||
audio_duration = len(audio_array) / float(SAMPLERATE)
|
||||
window_size = VAD_CONFIG["window_size"]
|
||||
start = None
|
||||
|
||||
@@ -333,10 +332,6 @@ class TranscriberParakeetFile:
|
||||
yield TimeSegment(start_time, end_time)
|
||||
start = None
|
||||
|
||||
if start is not None:
|
||||
start_time = start / float(SAMPLERATE)
|
||||
yield TimeSegment(start_time, audio_duration)
|
||||
|
||||
vad_iterator.reset_states()
|
||||
|
||||
def batch_speech_segments(
|
||||
|
||||
@@ -1,29 +1,3 @@
|
||||
## API Key Management
|
||||
|
||||
### Finding Your User ID
|
||||
|
||||
```bash
|
||||
# Get your OAuth sub (user ID) - requires authentication
|
||||
curl -H "Authorization: Bearer <your_jwt>" http://localhost:1250/v1/me
|
||||
# Returns: {"sub": "your-oauth-sub-here", "email": "...", ...}
|
||||
```
|
||||
|
||||
### Creating API Keys
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:1250/v1/user/api-keys \
|
||||
-H "Authorization: Bearer <your_jwt>" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name": "My API Key"}'
|
||||
```
|
||||
|
||||
### Using API Keys
|
||||
|
||||
```bash
|
||||
# Use X-API-Key header instead of Authorization
|
||||
curl -H "X-API-Key: <your_api_key>" http://localhost:1250/v1/transcripts
|
||||
```
|
||||
|
||||
## AWS S3/SQS usage clarification
|
||||
|
||||
Whereby.com uploads recordings directly to our S3 bucket when meetings end.
|
||||
@@ -53,36 +27,6 @@ response = sqs.receive_message(QueueUrl=queue_url, ...)
|
||||
uv run /app/requeue_uploaded_file.py TRANSCRIPT_ID
|
||||
```
|
||||
|
||||
## Hatchet Setup (Fresh DB)
|
||||
|
||||
After resetting the Hatchet database:
|
||||
|
||||
### Option A: Automatic (CLI)
|
||||
|
||||
```bash
|
||||
# Get default tenant ID and create token in one command
|
||||
TENANT_ID=$(docker compose exec -T postgres psql -U reflector -d hatchet -t -c \
|
||||
"SELECT id FROM \"Tenant\" WHERE slug = 'default';" | tr -d ' \n') && \
|
||||
TOKEN=$(docker compose exec -T hatchet /hatchet-admin token create \
|
||||
--config /config --tenant-id "$TENANT_ID" 2>/dev/null | tr -d '\n') && \
|
||||
echo "HATCHET_CLIENT_TOKEN=$TOKEN"
|
||||
```
|
||||
|
||||
Copy the output to `server/.env`.
|
||||
|
||||
### Option B: Manual (UI)
|
||||
|
||||
1. Create API token at http://localhost:8889 → Settings → API Tokens
|
||||
2. Update `server/.env`: `HATCHET_CLIENT_TOKEN=<new-token>`
|
||||
|
||||
### Then restart workers
|
||||
|
||||
```bash
|
||||
docker compose restart server hatchet-worker
|
||||
```
|
||||
|
||||
Workflows register automatically when hatchet-worker starts.
|
||||
|
||||
## Pipeline Management
|
||||
|
||||
### Continue stuck pipeline from final summaries (identify_participants) step:
|
||||
|
||||
118
server/asyncio_loop_analysis.md
Normal file
118
server/asyncio_loop_analysis.md
Normal file
@@ -0,0 +1,118 @@
|
||||
# AsyncIO Event Loop Analysis for test_attendee_parsing_bug.py
|
||||
|
||||
## Problem Summary
|
||||
The test passes but encounters an error during teardown where asyncpg tries to use a different/closed event loop, resulting in:
|
||||
- `RuntimeError: Task got Future attached to a different loop`
|
||||
- `RuntimeError: Event loop is closed`
|
||||
|
||||
## Root Cause Analysis
|
||||
|
||||
### 1. Multiple Event Loop Creation Points
|
||||
|
||||
The test environment creates event loops at different scopes:
|
||||
|
||||
1. **Session-scoped loop** (conftest.py:27-34):
|
||||
- Created once per test session
|
||||
- Used by session-scoped fixtures
|
||||
- Closed after all tests complete
|
||||
|
||||
2. **Function-scoped loop** (pytest-asyncio default):
|
||||
- Created for each async test function
|
||||
- This is the loop that runs the actual test
|
||||
- Closed immediately after test completes
|
||||
|
||||
3. **AsyncPG internal loop**:
|
||||
- AsyncPG connections store a reference to the loop they were created with
|
||||
- Used for connection lifecycle management
|
||||
|
||||
### 2. Event Loop Lifecycle Mismatch
|
||||
|
||||
The issue occurs because:
|
||||
|
||||
1. **Session fixture creates database connection** on session-scoped loop
|
||||
2. **Test runs** on function-scoped loop (different from session loop)
|
||||
3. **During teardown**, the session fixture tries to rollback/close using the original session loop
|
||||
4. **AsyncPG connection** still references the function-scoped loop which is now closed
|
||||
5. **Conflict**: SQLAlchemy tries to use session loop, but asyncpg Future is attached to the closed function loop
|
||||
|
||||
### 3. Configuration Issues
|
||||
|
||||
Current pytest configuration:
|
||||
- `asyncio_mode = "auto"` in pyproject.toml
|
||||
- `asyncio_default_fixture_loop_scope=session` (shown in test output)
|
||||
- `asyncio_default_test_loop_scope=function` (shown in test output)
|
||||
|
||||
This mismatch between fixture loop scope (session) and test loop scope (function) causes the problem.
|
||||
|
||||
## Solutions
|
||||
|
||||
### Option 1: Align Loop Scopes (Recommended)
|
||||
Change pytest-asyncio configuration to use consistent loop scopes:
|
||||
|
||||
```python
|
||||
# pyproject.toml
|
||||
[tool.pytest.ini_options]
|
||||
asyncio_mode = "auto"
|
||||
asyncio_default_fixture_loop_scope = "function" # Change from session to function
|
||||
```
|
||||
|
||||
### Option 2: Use Function-Scoped Database Fixture
|
||||
Change the `session` fixture scope from session to function:
|
||||
|
||||
```python
|
||||
@pytest_asyncio.fixture # Remove scope="session"
|
||||
async def session(setup_database):
|
||||
# ... existing code ...
|
||||
```
|
||||
|
||||
### Option 3: Explicit Loop Management
|
||||
Ensure all async operations use the same loop:
|
||||
|
||||
```python
|
||||
@pytest_asyncio.fixture
|
||||
async def session(setup_database, event_loop):
|
||||
# Force using the current event loop
|
||||
engine = create_async_engine(
|
||||
settings.DATABASE_URL,
|
||||
echo=False,
|
||||
poolclass=NullPool,
|
||||
connect_args={"loop": event_loop} # Pass explicit loop
|
||||
)
|
||||
# ... rest of fixture ...
|
||||
```
|
||||
|
||||
### Option 4: Upgrade pytest-asyncio
|
||||
The current version (1.1.0) has known issues with loop management. Consider upgrading to the latest version which has better loop scope handling.
|
||||
|
||||
## Immediate Workaround
|
||||
|
||||
For the test to run cleanly without the teardown error, you can:
|
||||
|
||||
1. Add explicit cleanup in the test:
|
||||
```python
|
||||
@pytest.mark.asyncio
|
||||
async def test_attendee_parsing_bug(session):
|
||||
# ... existing test code ...
|
||||
|
||||
# Explicit cleanup before fixture teardown
|
||||
await session.commit() # or await session.close()
|
||||
```
|
||||
|
||||
2. Or suppress the teardown error (not recommended for production):
|
||||
```python
|
||||
@pytest.fixture
|
||||
async def session(setup_database):
|
||||
# ... existing setup ...
|
||||
try:
|
||||
yield session
|
||||
await session.rollback()
|
||||
except RuntimeError as e:
|
||||
if "Event loop is closed" not in str(e):
|
||||
raise
|
||||
finally:
|
||||
await session.close()
|
||||
```
|
||||
|
||||
## Recommendation
|
||||
|
||||
The cleanest solution is to align the loop scopes by setting both fixture and test loop scopes to "function" scope. This ensures each test gets its own clean event loop and avoids cross-contamination between tests.
|
||||
@@ -1,2 +0,0 @@
|
||||
-- Create hatchet database for Hatchet workflow engine
|
||||
CREATE DATABASE hatchet;
|
||||
@@ -1,236 +0,0 @@
|
||||
# Reflector Architecture: Whereby + Daily.co Recording Storage
|
||||
|
||||
## System Overview
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "Actors"
|
||||
APP[Our App<br/>Reflector]
|
||||
WHEREBY[Whereby Service<br/>External]
|
||||
DAILY[Daily.co Service<br/>External]
|
||||
end
|
||||
|
||||
subgraph "AWS S3 Buckets"
|
||||
TRANSCRIPT_BUCKET[Transcript Bucket<br/>reflector-transcripts<br/>Output: Processed MP3s]
|
||||
WHEREBY_BUCKET[Whereby Bucket<br/>reflector-whereby-recordings<br/>Input: Raw MP4s]
|
||||
DAILY_BUCKET[Daily.co Bucket<br/>reflector-dailyco-recordings<br/>Input: Raw WebM tracks]
|
||||
end
|
||||
|
||||
subgraph "AWS Infrastructure"
|
||||
SQS[SQS Queue<br/>Whereby notifications]
|
||||
end
|
||||
|
||||
subgraph "Database"
|
||||
DB[(PostgreSQL<br/>Recordings, Transcripts, Meetings)]
|
||||
end
|
||||
|
||||
APP -->|Write processed| TRANSCRIPT_BUCKET
|
||||
APP -->|Read/Delete| WHEREBY_BUCKET
|
||||
APP -->|Read/Delete| DAILY_BUCKET
|
||||
APP -->|Poll| SQS
|
||||
APP -->|Store metadata| DB
|
||||
|
||||
WHEREBY -->|Write recordings| WHEREBY_BUCKET
|
||||
WHEREBY_BUCKET -->|S3 Event| SQS
|
||||
WHEREBY -->|Participant webhooks<br/>room.client.joined/left| APP
|
||||
|
||||
DAILY -->|Write recordings| DAILY_BUCKET
|
||||
DAILY -->|Recording webhook<br/>recording.ready-to-download| APP
|
||||
```
|
||||
|
||||
**Note on Webhook vs S3 Event for Recording Processing:**
|
||||
- **Whereby**: Uses S3 Events → SQS for recording availability (S3 as source of truth, no race conditions)
|
||||
- **Daily.co**: Uses webhooks for recording availability (more immediate, built-in reliability)
|
||||
- **Both**: Use webhooks for participant tracking (real-time updates)
|
||||
|
||||
## Credentials & Permissions
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph "Master Credentials"
|
||||
MASTER[TRANSCRIPT_STORAGE_AWS_*<br/>Access Key ID + Secret]
|
||||
end
|
||||
|
||||
subgraph "Whereby Upload Credentials"
|
||||
WHEREBY_CREDS[AWS_WHEREBY_ACCESS_KEY_*<br/>Access Key ID + Secret]
|
||||
end
|
||||
|
||||
subgraph "Daily.co Upload Role"
|
||||
DAILY_ROLE[DAILY_STORAGE_AWS_ROLE_ARN<br/>IAM Role ARN]
|
||||
end
|
||||
|
||||
subgraph "Our App Uses"
|
||||
MASTER -->|Read/Write/Delete| TRANSCRIPT_BUCKET[Transcript Bucket]
|
||||
MASTER -->|Read/Delete| WHEREBY_BUCKET[Whereby Bucket]
|
||||
MASTER -->|Read/Delete| DAILY_BUCKET[Daily.co Bucket]
|
||||
MASTER -->|Poll/Delete| SQS[SQS Queue]
|
||||
end
|
||||
|
||||
subgraph "We Give To Services"
|
||||
WHEREBY_CREDS -->|Passed in API call| WHEREBY_SERVICE[Whereby Service]
|
||||
WHEREBY_SERVICE -->|Write Only| WHEREBY_BUCKET
|
||||
|
||||
DAILY_ROLE -->|Passed in API call| DAILY_SERVICE[Daily.co Service]
|
||||
DAILY_SERVICE -->|Assume Role| DAILY_ROLE
|
||||
DAILY_SERVICE -->|Write Only| DAILY_BUCKET
|
||||
end
|
||||
```
|
||||
|
||||
# Video Platform Recording Integration
|
||||
|
||||
This document explains how Reflector receives and identifies multitrack audio recordings from different video platforms.
|
||||
|
||||
## Platform Comparison
|
||||
|
||||
| Platform | Delivery Method | Track Identification |
|
||||
|----------|----------------|---------------------|
|
||||
| **Daily.co** | Webhook | Explicit track list in payload |
|
||||
| **Whereby** | SQS (S3 notifications) | Single file per notification |
|
||||
|
||||
---
|
||||
|
||||
## Daily.co
|
||||
|
||||
**Note:** Primary discovery via polling (`poll_daily_recordings`), webhooks as backup.
|
||||
|
||||
Daily.co uses **webhooks** to notify Reflector when recordings are ready.
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Daily.co sends webhook** when recording is ready
|
||||
- Event type: `recording.ready-to-download`
|
||||
- Endpoint: `/v1/daily/webhook` (`reflector/views/daily.py:46-102`)
|
||||
|
||||
2. **Webhook payload explicitly includes track list**:
|
||||
```json
|
||||
{
|
||||
"recording_id": "7443ee0a-dab1-40eb-b316-33d6c0d5ff88",
|
||||
"room_name": "daily-20251020193458",
|
||||
"tracks": [
|
||||
{
|
||||
"type": "audio",
|
||||
"s3Key": "monadical/daily-20251020193458/1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922",
|
||||
"size": 831843
|
||||
},
|
||||
{
|
||||
"type": "audio",
|
||||
"s3Key": "monadical/daily-20251020193458/1760988935484-a37c35e3-6f8e-4274-a482-e9d0f102a732-cam-audio-1760988943823",
|
||||
"size": 408438
|
||||
},
|
||||
{
|
||||
"type": "video",
|
||||
"s3Key": "monadical/daily-20251020193458/...-video.webm",
|
||||
"size": 30000000
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
3. **System extracts audio tracks** (`daily.py:211`):
|
||||
```python
|
||||
track_keys = [t.s3Key for t in tracks if t.type == "audio"]
|
||||
```
|
||||
|
||||
4. **Triggers multitrack processing** (`daily.py:213-218`):
|
||||
```python
|
||||
process_multitrack_recording.delay(
|
||||
bucket_name=bucket_name, # reflector-dailyco-local
|
||||
room_name=room_name, # daily-20251020193458
|
||||
recording_id=recording_id, # 7443ee0a-dab1-40eb-b316-33d6c0d5ff88
|
||||
track_keys=track_keys # Only audio s3Keys
|
||||
)
|
||||
```
|
||||
|
||||
### Key Advantage: No Ambiguity
|
||||
|
||||
Even though multiple meetings may share the same S3 bucket/folder (`monadical/`), **there's no ambiguity** because:
|
||||
- Each webhook payload contains the exact `s3Key` list for that specific `recording_id`
|
||||
- No need to scan folders or guess which files belong together
|
||||
- Each track's s3Key includes the room timestamp subfolder (e.g., `daily-20251020193458/`)
|
||||
|
||||
The room name includes timestamp (`daily-20251020193458`) to keep recordings organized, but **the webhook's explicit track list is what prevents mixing files from different meetings**.
|
||||
|
||||
### Track Timeline Extraction
|
||||
|
||||
Daily.co provides timing information in two places:
|
||||
|
||||
**1. PyAV WebM Metadata (current approach)**:
|
||||
```python
|
||||
# Read from WebM container stream metadata
|
||||
stream.start_time = 8.130s # Meeting-relative timing
|
||||
```
|
||||
|
||||
**2. Filename Timestamps (alternative approach, commit 3bae9076)**:
|
||||
```
|
||||
Filename format: {recording_start_ts}-{uuid}-cam-audio-{track_start_ts}.webm
|
||||
Example: 1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922.webm
|
||||
|
||||
Parse timestamps:
|
||||
- recording_start_ts: 1760988935484 (Unix ms)
|
||||
- track_start_ts: 1760988935922 (Unix ms)
|
||||
- offset: (1760988935922 - 1760988935484) / 1000 = 0.438s
|
||||
```
|
||||
|
||||
**Time Difference (PyAV vs Filename)**:
|
||||
```
|
||||
Track 0:
|
||||
Filename offset: 438ms
|
||||
PyAV metadata: 229ms
|
||||
Difference: 209ms
|
||||
|
||||
Track 1:
|
||||
Filename offset: 8339ms
|
||||
PyAV metadata: 8130ms
|
||||
Difference: 209ms
|
||||
```
|
||||
|
||||
**Consistent 209ms delta** suggests network/encoding delay between file upload initiation (filename) and actual audio stream start (metadata).
|
||||
|
||||
**Current implementation uses PyAV metadata** because:
|
||||
- More accurate (represents when audio actually started)
|
||||
- Padding BEFORE transcription produces correct Whisper timestamps automatically
|
||||
- No manual offset adjustment needed during transcript merge
|
||||
|
||||
### Why Re-encoding During Padding
|
||||
|
||||
Padding coincidentally involves re-encoding, which is important for Daily.co + Whisper:
|
||||
|
||||
**Problem:** Daily.co skips frames in recordings when microphone is muted or paused
|
||||
- WebM containers have gaps where audio frames should be
|
||||
- Whisper doesn't understand these gaps and produces incorrect timestamps
|
||||
- Example: 5s of audio with 2s muted → file has frames only for 3s, Whisper thinks duration is 3s
|
||||
|
||||
**Solution:** Re-encoding via PyAV filter graph (`adelay` + `aresample`)
|
||||
- Restores missing frames as silence
|
||||
- Produces continuous audio stream without gaps
|
||||
- Whisper now sees correct duration and produces accurate timestamps
|
||||
|
||||
**Why combined with padding:**
|
||||
- Already re-encoding for padding (adding initial silence)
|
||||
- More performant to do both operations in single PyAV pipeline
|
||||
- Padded values needed for mixdown anyway (creating final MP3)
|
||||
|
||||
Implementation: `main_multitrack_pipeline.py:_apply_audio_padding_streaming()`
|
||||
|
||||
---
|
||||
|
||||
## Whereby (SQS-based)
|
||||
|
||||
Whereby uses **AWS SQS** (via S3 notifications) to notify Reflector when files are uploaded.
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Whereby uploads recording** to S3
|
||||
2. **S3 sends notification** to SQS queue (one notification per file)
|
||||
3. **Reflector polls SQS queue** (`worker/process.py:process_messages()`)
|
||||
4. **System processes single file** (`worker/process.py:process_recording()`)
|
||||
|
||||
### Key Difference from Daily.co
|
||||
|
||||
**Whereby (SQS):** System receives S3 notification "file X was created" - only knows about one file at a time, would need to scan folder to find related files
|
||||
|
||||
**Daily.co (Webhook):** Daily explicitly tells system which files belong together in the webhook payload
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ Webhooks are configured at the room level with two fields:
|
||||
|
||||
### `transcript.completed`
|
||||
|
||||
Triggered when a transcript has been fully processed, including transcription, diarization, summarization, topic detection and calendar event integration.
|
||||
Triggered when a transcript has been fully processed, including transcription, diarization, summarization, and topic detection.
|
||||
|
||||
### `test`
|
||||
|
||||
@@ -128,27 +128,6 @@ This event includes a convenient URL for accessing the transcript:
|
||||
"room": {
|
||||
"id": "room-789",
|
||||
"name": "Product Team Room"
|
||||
},
|
||||
"calendar_event": {
|
||||
"id": "calendar-event-123",
|
||||
"ics_uid": "event-123",
|
||||
"title": "Q3 Product Planning Meeting",
|
||||
"start_time": "2025-08-27T12:00:00Z",
|
||||
"end_time": "2025-08-27T12:30:00Z",
|
||||
"description": "Team discussed Q3 product roadmap, prioritizing mobile app features and API improvements.",
|
||||
"location": "Conference Room 1",
|
||||
"attendees": [
|
||||
{
|
||||
"id": "participant-1",
|
||||
"name": "John Doe",
|
||||
"speaker": "Speaker 1"
|
||||
},
|
||||
{
|
||||
"id": "participant-2",
|
||||
"name": "Jane Smith",
|
||||
"speaker": "Speaker 2"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -27,7 +27,7 @@ AUTH_JWT_AUDIENCE=
|
||||
#TRANSCRIPT_MODAL_API_KEY=xxxxx
|
||||
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-parakeet-web.modal.run
|
||||
TRANSCRIPT_URL=https://monadical-sas--reflector-transcriber-web.modal.run
|
||||
TRANSCRIPT_MODAL_API_KEY=
|
||||
|
||||
## =======================================================
|
||||
@@ -71,30 +71,3 @@ DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
|
||||
|
||||
## Sentry DSN configuration
|
||||
#SENTRY_DSN=
|
||||
|
||||
## =======================================================
|
||||
## Video Platform Configuration
|
||||
## =======================================================
|
||||
|
||||
## Whereby
|
||||
#WHEREBY_API_KEY=your-whereby-api-key
|
||||
#WHEREBY_WEBHOOK_SECRET=your-whereby-webhook-secret
|
||||
#WHEREBY_STORAGE_AWS_ACCESS_KEY_ID=your-aws-key
|
||||
#WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret
|
||||
#AWS_PROCESS_RECORDING_QUEUE_URL=https://sqs.us-west-2.amazonaws.com/...
|
||||
|
||||
## Daily.co
|
||||
#DAILY_API_KEY=your-daily-api-key
|
||||
#DAILY_WEBHOOK_SECRET=your-daily-webhook-secret
|
||||
#DAILY_SUBDOMAIN=your-subdomain
|
||||
#DAILY_WEBHOOK_UUID= # Auto-populated by recreate_daily_webhook.py script
|
||||
#DAILYCO_STORAGE_AWS_ROLE_ARN=... # IAM role ARN for Daily.co S3 access
|
||||
#DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
||||
#DAILYCO_STORAGE_AWS_REGION=us-west-2
|
||||
|
||||
## Whereby (optional separate bucket)
|
||||
#WHEREBY_STORAGE_AWS_BUCKET_NAME=reflector-whereby
|
||||
#WHEREBY_STORAGE_AWS_REGION=us-east-1
|
||||
|
||||
## Platform Configuration
|
||||
#DEFAULT_VIDEO_PLATFORM=whereby # Default platform for new rooms
|
||||
|
||||
583
server/migration.md
Normal file
583
server/migration.md
Normal file
@@ -0,0 +1,583 @@
|
||||
# Celery to TaskIQ Migration Guide
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document outlines the migration path from Celery to TaskIQ for the Reflector project. TaskIQ is a modern, async-first distributed task queue that provides similar functionality to Celery while being designed specifically for async Python applications.
|
||||
|
||||
## Current Celery Usage Analysis
|
||||
|
||||
### Key Patterns in Use
|
||||
1. **Task Decorators**: `@shared_task`, `@asynctask`, `@with_session` decorators
|
||||
2. **Task Invocation**: `.delay()`, `.si()` for signatures
|
||||
3. **Workflow Patterns**: `chain()`, `group()`, `chord()` for complex pipelines
|
||||
4. **Scheduled Tasks**: Celery Beat with crontab and periodic schedules
|
||||
5. **Session Management**: Custom `@with_session` and `@with_session_and_transcript` decorators
|
||||
6. **Retry Logic**: Auto-retry with exponential backoff
|
||||
7. **Redis Backend**: Using Redis for broker and result backend
|
||||
|
||||
### Critical Files to Migrate
|
||||
- `reflector/worker/app.py` - Celery app configuration and beat schedule
|
||||
- `reflector/worker/session_decorator.py` - Session management decorators
|
||||
- `reflector/pipelines/main_file_pipeline.py` - File processing pipeline
|
||||
- `reflector/pipelines/main_live_pipeline.py` - Live streaming pipeline (10 tasks)
|
||||
- `reflector/worker/process.py` - Background processing tasks
|
||||
- `reflector/worker/ics_sync.py` - Calendar sync tasks
|
||||
- `reflector/worker/cleanup.py` - Cleanup tasks
|
||||
- `reflector/worker/webhook.py` - Webhook notifications
|
||||
|
||||
## TaskIQ Architecture Mapping
|
||||
|
||||
### 1. Installation
|
||||
|
||||
```bash
|
||||
# Remove Celery dependencies
|
||||
uv remove celery flower
|
||||
|
||||
# Install TaskIQ with Redis support
|
||||
uv add taskiq taskiq-redis taskiq-pipelines
|
||||
```
|
||||
|
||||
### 2. Broker Configuration
|
||||
|
||||
#### Current (Celery)
|
||||
```python
|
||||
# reflector/worker/app.py
|
||||
from celery import Celery
|
||||
|
||||
app = Celery(
|
||||
"reflector",
|
||||
broker=settings.CELERY_BROKER_URL,
|
||||
backend=settings.CELERY_RESULT_BACKEND,
|
||||
include=[...],
|
||||
)
|
||||
```
|
||||
|
||||
#### New (TaskIQ)
|
||||
```python
|
||||
# reflector/worker/broker.py
|
||||
from taskiq_redis import RedisAsyncResultBackend, RedisStreamBroker
|
||||
from taskiq import PipelineMiddleware, SimpleRetryMiddleware
|
||||
|
||||
result_backend = RedisAsyncResultBackend(
|
||||
redis_url=settings.REDIS_URL,
|
||||
result_ex_time=86400, # 24 hours
|
||||
)
|
||||
|
||||
broker = RedisStreamBroker(
|
||||
url=settings.REDIS_URL,
|
||||
max_connection_pool_size=10,
|
||||
).with_result_backend(result_backend).with_middlewares(
|
||||
PipelineMiddleware(), # For chain/group/chord support
|
||||
SimpleRetryMiddleware(default_retry_count=3),
|
||||
)
|
||||
|
||||
# For testing environment
|
||||
if os.environ.get("ENVIRONMENT") == "pytest":
|
||||
from taskiq import InMemoryBroker
|
||||
broker = InMemoryBroker(await_inplace=True)
|
||||
```
|
||||
|
||||
### 3. Task Definition Migration
|
||||
|
||||
#### Current (Celery)
|
||||
```python
|
||||
@shared_task
|
||||
@asynctask
|
||||
@with_session
|
||||
async def task_pipeline_file_process(session: AsyncSession, transcript_id: str):
|
||||
pipeline = PipelineMainFile(transcript_id=transcript_id)
|
||||
await pipeline.process()
|
||||
```
|
||||
|
||||
#### New (TaskIQ)
|
||||
```python
|
||||
from taskiq import TaskiqDepends
|
||||
from reflector.worker.broker import broker
|
||||
from reflector.worker.dependencies import get_db_session
|
||||
|
||||
@broker.task
|
||||
async def task_pipeline_file_process(transcript_id: str):
|
||||
# Use get_session for proper test mocking
|
||||
async for session in get_session():
|
||||
pipeline = PipelineMainFile(transcript_id=transcript_id)
|
||||
await pipeline.process()
|
||||
```
|
||||
|
||||
### 4. Session Management
|
||||
|
||||
#### Current Session Decorators (Keep Using These!)
|
||||
```python
|
||||
# reflector/worker/session_decorator.py
|
||||
def with_session(func):
|
||||
@functools.wraps(func)
|
||||
async def wrapper(*args, **kwargs):
|
||||
async with get_session_context() as session:
|
||||
return await func(session, *args, **kwargs)
|
||||
return wrapper
|
||||
```
|
||||
|
||||
#### Session Management Strategy
|
||||
|
||||
**⚠️ CRITICAL**: The key insight is to maintain consistent session management patterns:
|
||||
|
||||
1. **For Worker Tasks**: Continue using `@with_session` decorator pattern
|
||||
2. **For FastAPI endpoints**: Use `get_session` dependency injection
|
||||
3. **Never use `get_session_factory()` directly** in application code
|
||||
|
||||
```python
|
||||
# APPROACH 1: Simple migration keeping decorator pattern
|
||||
from reflector.worker.session_decorator import with_session
|
||||
|
||||
@taskiq_broker.task
|
||||
@with_session
|
||||
async def task_pipeline_file_process(session, *, transcript_id: str):
|
||||
# Session is provided by decorator, just like Celery version
|
||||
transcript = await transcripts_controller.get_by_id(session, transcript_id)
|
||||
pipeline = PipelineMainFile(transcript_id=transcript_id)
|
||||
await pipeline.process()
|
||||
|
||||
# APPROACH 2: For test compatibility without decorator
|
||||
from reflector.db import get_session
|
||||
|
||||
@taskiq_broker.task
|
||||
async def task_pipeline_file_process(transcript_id: str):
|
||||
# Use get_session which is mocked in tests
|
||||
async for session in get_session():
|
||||
transcript = await transcripts_controller.get_by_id(session, transcript_id)
|
||||
pipeline = PipelineMainFile(transcript_id=transcript_id)
|
||||
await pipeline.process()
|
||||
|
||||
# APPROACH 3: Future - TaskIQ dependency injection (after full migration)
|
||||
from taskiq import TaskiqDepends
|
||||
|
||||
async def get_session_context():
|
||||
"""Context manager version of get_session for consistency"""
|
||||
async for session in get_session():
|
||||
yield session
|
||||
|
||||
@taskiq_broker.task
|
||||
async def task_pipeline_file_process(
|
||||
transcript_id: str,
|
||||
session: AsyncSession = TaskiqDepends(get_session_context)
|
||||
):
|
||||
transcript = await transcripts_controller.get_by_id(session, transcript_id)
|
||||
pipeline = PipelineMainFile(transcript_id=transcript_id)
|
||||
await pipeline.process()
|
||||
```
|
||||
|
||||
**Key Points:**
|
||||
- `@with_session` decorator works with TaskIQ tasks (remove `@asynctask`, keep `@with_session`)
|
||||
- For testing: `get_session()` from `reflector.db` is properly mocked
|
||||
- Never call `get_session_factory()` directly - always use the abstractions
|
||||
|
||||
### 5. Task Invocation
|
||||
|
||||
#### Current (Celery)
|
||||
```python
|
||||
# Simple async execution
|
||||
task_pipeline_file_process.delay(transcript_id=transcript.id)
|
||||
|
||||
# With signature for chaining
|
||||
task_cleanup_consent.si(transcript_id=transcript_id)
|
||||
```
|
||||
|
||||
#### New (TaskIQ)
|
||||
```python
|
||||
# Simple async execution
|
||||
await task_pipeline_file_process.kiq(transcript_id=transcript.id)
|
||||
|
||||
# With kicker for advanced configuration
|
||||
await task_cleanup_consent.kicker().with_labels(
|
||||
priority="high"
|
||||
).kiq(transcript_id=transcript_id)
|
||||
```
|
||||
|
||||
### 6. Workflow Patterns (Chain, Group, Chord)
|
||||
|
||||
#### Current (Celery)
|
||||
```python
|
||||
from celery import chain, group, chord
|
||||
|
||||
# Chain example
|
||||
post_chain = chain(
|
||||
task_cleanup_consent.si(transcript_id=transcript_id),
|
||||
task_pipeline_post_to_zulip.si(transcript_id=transcript_id),
|
||||
task_send_webhook_if_needed.si(transcript_id=transcript_id),
|
||||
)
|
||||
|
||||
# Chord example (parallel + callback)
|
||||
chain = chord(
|
||||
group(chain_mp3_and_diarize, chain_title_preview),
|
||||
chain_final_summaries,
|
||||
) | task_pipeline_post_to_zulip.si(transcript_id=transcript_id)
|
||||
```
|
||||
|
||||
#### New (TaskIQ with Pipelines)
|
||||
```python
|
||||
from taskiq_pipelines import Pipeline
|
||||
from taskiq import gather
|
||||
|
||||
# Chain example using Pipeline
|
||||
post_pipeline = (
|
||||
Pipeline(broker, task_cleanup_consent)
|
||||
.call_next(task_pipeline_post_to_zulip, transcript_id=transcript_id)
|
||||
.call_next(task_send_webhook_if_needed, transcript_id=transcript_id)
|
||||
)
|
||||
await post_pipeline.kiq(transcript_id=transcript_id)
|
||||
|
||||
# Parallel execution with gather
|
||||
results = await gather([
|
||||
chain_mp3_and_diarize.kiq(transcript_id),
|
||||
chain_title_preview.kiq(transcript_id),
|
||||
])
|
||||
|
||||
# Then execute callback
|
||||
await chain_final_summaries.kiq(transcript_id, results)
|
||||
await task_pipeline_post_to_zulip.kiq(transcript_id)
|
||||
```
|
||||
|
||||
### 7. Scheduled Tasks (Celery Beat → TaskIQ Scheduler)
|
||||
|
||||
#### Current (Celery Beat)
|
||||
```python
|
||||
# reflector/worker/app.py
|
||||
app.conf.beat_schedule = {
|
||||
"process_messages": {
|
||||
"task": "reflector.worker.process.process_messages",
|
||||
"schedule": float(settings.SQS_POLLING_TIMEOUT_SECONDS),
|
||||
},
|
||||
"reprocess_failed_recordings": {
|
||||
"task": "reflector.worker.process.reprocess_failed_recordings",
|
||||
"schedule": crontab(hour=5, minute=0),
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
#### New (TaskIQ Scheduler)
|
||||
```python
|
||||
# reflector/worker/scheduler.py
|
||||
from taskiq import TaskiqScheduler
|
||||
from taskiq_redis import ListRedisScheduleSource
|
||||
|
||||
schedule_source = ListRedisScheduleSource(settings.REDIS_URL)
|
||||
|
||||
# Define scheduled tasks with decorators
|
||||
@broker.task(
|
||||
schedule=[
|
||||
{
|
||||
"cron": f"*/{int(settings.SQS_POLLING_TIMEOUT_SECONDS)} * * * * *"
|
||||
}
|
||||
]
|
||||
)
|
||||
async def process_messages():
|
||||
# Task implementation
|
||||
pass
|
||||
|
||||
@broker.task(
|
||||
schedule=[{"cron": "0 5 * * *"}] # Daily at 5 AM
|
||||
)
|
||||
async def reprocess_failed_recordings():
|
||||
# Task implementation
|
||||
pass
|
||||
|
||||
# Initialize scheduler
|
||||
scheduler = TaskiqScheduler(broker, sources=[schedule_source])
|
||||
|
||||
# Run scheduler (separate process)
|
||||
# taskiq scheduler reflector.worker.scheduler:scheduler
|
||||
```
|
||||
|
||||
### 8. Retry Configuration
|
||||
|
||||
#### Current (Celery)
|
||||
```python
|
||||
@shared_task(
|
||||
bind=True,
|
||||
max_retries=30,
|
||||
default_retry_delay=60,
|
||||
retry_backoff=True,
|
||||
retry_backoff_max=3600,
|
||||
)
|
||||
async def task_send_webhook_if_needed(self, ...):
|
||||
try:
|
||||
# Task logic
|
||||
except Exception as exc:
|
||||
raise self.retry(exc=exc)
|
||||
```
|
||||
|
||||
#### New (TaskIQ)
|
||||
```python
|
||||
from taskiq.middlewares import SimpleRetryMiddleware
|
||||
|
||||
# Global middleware configuration (1:1 with Celery defaults)
|
||||
broker = broker.with_middlewares(
|
||||
SimpleRetryMiddleware(default_retry_count=3),
|
||||
)
|
||||
|
||||
# For specific tasks with custom retry logic:
|
||||
@broker.task(retry_on_error=True, max_retries=30)
|
||||
async def task_send_webhook_if_needed(...):
|
||||
# Task logic - exceptions auto-retry
|
||||
pass
|
||||
```
|
||||
|
||||
## Testing Migration
|
||||
|
||||
### Current Pytest Setup (Celery)
|
||||
```python
|
||||
# tests/conftest.py
|
||||
@pytest.fixture(scope="session")
|
||||
def celery_config():
|
||||
return {
|
||||
"broker_url": "memory://",
|
||||
"result_backend": "cache+memory://",
|
||||
}
|
||||
|
||||
@pytest.mark.usefixtures("celery_session_app")
|
||||
@pytest.mark.usefixtures("celery_session_worker")
|
||||
async def test_task():
|
||||
pass
|
||||
```
|
||||
|
||||
### New Pytest Setup (TaskIQ)
|
||||
```python
|
||||
# tests/conftest.py
|
||||
import pytest
|
||||
from taskiq import InMemoryBroker
|
||||
from reflector.worker.broker import broker
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
async def setup_taskiq_broker():
|
||||
"""Replace broker with InMemoryBroker for testing"""
|
||||
original_broker = broker
|
||||
test_broker = InMemoryBroker(await_inplace=True)
|
||||
|
||||
# Copy task registrations
|
||||
for task_name, task in original_broker._tasks.items():
|
||||
test_broker.register_task(task.original_function, task_name=task_name)
|
||||
|
||||
yield test_broker
|
||||
await test_broker.shutdown()
|
||||
|
||||
@pytest.fixture
|
||||
async def taskiq_with_db_session(db_session):
|
||||
"""Setup TaskIQ with database session"""
|
||||
from reflector.worker.broker import broker
|
||||
broker.add_dependency_context({
|
||||
AsyncSession: db_session
|
||||
})
|
||||
yield
|
||||
broker.custom_dependency_context = {}
|
||||
|
||||
# Test example
|
||||
@pytest.mark.anyio
|
||||
async def test_task(taskiq_with_db_session):
|
||||
result = await task_pipeline_file_process("transcript-id")
|
||||
assert result is not None
|
||||
```
|
||||
|
||||
## Migration Steps
|
||||
|
||||
### Phase 1: Setup (Week 1)
|
||||
1. **Install TaskIQ packages**
|
||||
```bash
|
||||
uv add taskiq taskiq-redis taskiq-pipelines
|
||||
```
|
||||
|
||||
2. **Create new broker configuration**
|
||||
- Create `reflector/worker/broker.py` with TaskIQ broker setup
|
||||
- Create `reflector/worker/dependencies.py` for dependency injection
|
||||
|
||||
3. **Update settings**
|
||||
- Keep existing Redis configuration
|
||||
- Add TaskIQ-specific settings if needed
|
||||
|
||||
### Phase 2: Parallel Running (Week 2-3)
|
||||
1. **Migrate simple tasks first**
|
||||
- Start with `cleanup.py` (1 task)
|
||||
- Move to `webhook.py` (1 task)
|
||||
- Test thoroughly in isolation
|
||||
|
||||
2. **Setup dual-mode operation**
|
||||
- Keep Celery tasks running
|
||||
- Add TaskIQ versions alongside
|
||||
- Use feature flags to switch between them
|
||||
|
||||
### Phase 3: Complex Tasks (Week 3-4)
|
||||
1. **Migrate pipeline tasks**
|
||||
- Convert `main_file_pipeline.py`
|
||||
- Convert `main_live_pipeline.py` (most complex with 10 tasks)
|
||||
- Ensure chain/group/chord patterns work
|
||||
|
||||
2. **Migrate scheduled tasks**
|
||||
- Setup TaskIQ scheduler
|
||||
- Convert beat schedule to TaskIQ schedules
|
||||
- Test cron patterns
|
||||
|
||||
### Phase 4: Testing & Validation (Week 4-5)
|
||||
1. **Update test suite**
|
||||
- Replace Celery fixtures with TaskIQ fixtures
|
||||
- Update all test files
|
||||
- Ensure coverage remains the same
|
||||
|
||||
2. **Performance testing**
|
||||
- Compare task execution times
|
||||
- Monitor Redis memory usage
|
||||
- Test under load
|
||||
|
||||
### Phase 5: Cutover (Week 5-6)
|
||||
1. **Final migration**
|
||||
- Remove Celery dependencies
|
||||
- Update deployment scripts
|
||||
- Update documentation
|
||||
|
||||
2. **Monitoring**
|
||||
- Setup TaskIQ monitoring (if available)
|
||||
- Create health checks
|
||||
- Document operational procedures
|
||||
|
||||
## Key Differences to Note
|
||||
|
||||
### Advantages of TaskIQ
|
||||
1. **Native async support** - No need for `@asynctask` wrapper
|
||||
2. **Dependency injection** - Cleaner than decorators for session management
|
||||
3. **Type hints** - Better IDE support and autocompletion
|
||||
4. **Modern Python** - Designed for Python 3.7+
|
||||
5. **Simpler testing** - InMemoryBroker makes testing easier
|
||||
|
||||
### Potential Challenges
|
||||
1. **Less mature ecosystem** - Fewer third-party integrations
|
||||
2. **Documentation** - Less comprehensive than Celery
|
||||
3. **Monitoring tools** - No Flower equivalent (may need custom solution)
|
||||
4. **Community support** - Smaller community than Celery
|
||||
|
||||
## Command Line Changes
|
||||
|
||||
### Current (Celery)
|
||||
```bash
|
||||
# Start worker
|
||||
celery -A reflector.worker.app worker --loglevel=info
|
||||
|
||||
# Start beat scheduler
|
||||
celery -A reflector.worker.app beat
|
||||
```
|
||||
|
||||
### New (TaskIQ)
|
||||
```bash
|
||||
# Start worker
|
||||
taskiq worker reflector.worker.broker:broker
|
||||
|
||||
# Start scheduler
|
||||
taskiq scheduler reflector.worker.scheduler:scheduler
|
||||
|
||||
# With custom settings
|
||||
taskiq worker reflector.worker.broker:broker --workers 4 --log-level INFO
|
||||
```
|
||||
|
||||
## Rollback Plan
|
||||
|
||||
If issues arise during migration:
|
||||
|
||||
1. **Keep Celery code in version control** - Tag the last Celery version
|
||||
2. **Maintain dual broker setup** - Can switch back via environment variable
|
||||
3. **Database compatibility** - No schema changes required
|
||||
4. **Redis compatibility** - Both use Redis, easy to switch back
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. ✅ All tasks migrated and functioning
|
||||
2. ✅ Test coverage maintained at current levels
|
||||
3. ✅ Performance equal or better than Celery
|
||||
4. ✅ Scheduled tasks running reliably
|
||||
5. ✅ Error handling and retries working correctly
|
||||
6. ✅ WebSocket notifications still functioning
|
||||
7. ✅ Pipeline processing maintaining same behavior
|
||||
|
||||
## Monitoring & Operations
|
||||
|
||||
### Health Checks
|
||||
```python
|
||||
# reflector/worker/healthcheck.py
|
||||
@broker.task
|
||||
async def healthcheck_ping():
|
||||
"""TaskIQ health check task"""
|
||||
return {"status": "healthy", "timestamp": datetime.now()}
|
||||
```
|
||||
|
||||
### Metrics Collection
|
||||
- Task execution times
|
||||
- Success/failure rates
|
||||
- Queue depths
|
||||
- Worker utilization
|
||||
|
||||
## Key Implementation Points - MUST READ
|
||||
|
||||
### Critical Changes Required
|
||||
|
||||
1. **Session Management in Tasks**
|
||||
- ✅ **VERIFIED**: Tasks MUST use `get_session()` from `reflector.db` for test compatibility
|
||||
- ❌ Do NOT use `get_session_factory()` directly in tasks - it bypasses test mocks
|
||||
- ✅ The test database session IS properly shared when using `get_session()`
|
||||
|
||||
2. **Task Invocation Changes**
|
||||
- Replace `.delay()` with `await .kiq()`
|
||||
- All task invocations become async/await
|
||||
- No need to commit sessions before task invocation (controllers handle this)
|
||||
|
||||
3. **Broker Configuration**
|
||||
- TaskIQ broker must be initialized in `worker/app.py`
|
||||
- Use `InMemoryBroker(await_inplace=True)` for testing
|
||||
- Use `RedisStreamBroker` for production
|
||||
|
||||
4. **Test Setup Requirements**
|
||||
- Set `os.environ["ENVIRONMENT"] = "pytest"` at top of test files
|
||||
- Add TaskIQ broker fixture to test functions
|
||||
- Keep Celery fixtures for now (dual-mode operation)
|
||||
|
||||
5. **Import Pattern Changes**
|
||||
```python
|
||||
# Each file needs both imports during migration
|
||||
from reflector.pipelines.main_file_pipeline import (
|
||||
task_pipeline_file_process, # Celery version
|
||||
task_pipeline_file_process_taskiq, # TaskIQ version
|
||||
)
|
||||
```
|
||||
|
||||
6. **Decorator Changes**
|
||||
- Remove `@asynctask` - TaskIQ is async-native
|
||||
- **Keep `@with_session`** - it works with TaskIQ tasks!
|
||||
- Remove `@shared_task` from TaskIQ version
|
||||
- Keep `@shared_task` on Celery version for backward compatibility
|
||||
|
||||
## Verified POC Results
|
||||
|
||||
✅ **Database transactions work correctly** across test and TaskIQ tasks
|
||||
✅ **Tasks execute immediately** in tests with `InMemoryBroker(await_inplace=True)`
|
||||
✅ **Session mocking works** when using `get_session()` properly
|
||||
✅ **"OK" output confirmed** - TaskIQ task executes and accesses test data
|
||||
|
||||
## Conclusion
|
||||
|
||||
The migration from Celery to TaskIQ is feasible and offers several advantages for an async-first codebase like Reflector. The key challenges will be:
|
||||
|
||||
1. Migrating complex pipeline patterns (chain/chord)
|
||||
2. Ensuring scheduled task reliability
|
||||
3. **SOLVED**: Maintaining session management patterns - use `get_session()`
|
||||
4. Updating the test suite
|
||||
|
||||
The phased approach allows for gradual migration with minimal risk. The ability to run both systems in parallel provides a safety net during the transition period.
|
||||
|
||||
## Appendix: Quick Reference
|
||||
|
||||
| Celery | TaskIQ |
|
||||
|--------|--------|
|
||||
| `@shared_task` | `@broker.task` |
|
||||
| `.delay()` | `.kiq()` |
|
||||
| `.apply_async()` | `.kicker().kiq()` |
|
||||
| `chain()` | `Pipeline()` |
|
||||
| `group()` | `gather()` |
|
||||
| `chord()` | `gather() + callback` |
|
||||
| `@task.retry()` | `retry_on_error=True` |
|
||||
| Celery Beat | TaskIQ Scheduler |
|
||||
| `celery worker` | `taskiq worker` |
|
||||
| Flower | Custom monitoring needed |
|
||||
@@ -3,7 +3,7 @@ from logging.config import fileConfig
|
||||
from alembic import context
|
||||
from sqlalchemy import engine_from_config, pool
|
||||
|
||||
from reflector.db import metadata
|
||||
from reflector.db.base import metadata
|
||||
from reflector.settings import settings
|
||||
|
||||
# this is the Alembic Config object, which provides
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
"""add_action_items
|
||||
|
||||
Revision ID: 05f8688d6895
|
||||
Revises: bbafedfa510c
|
||||
Create Date: 2025-12-12 11:57:50.209658
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "05f8688d6895"
|
||||
down_revision: Union[str, None] = "bbafedfa510c"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column("transcript", sa.Column("action_items", sa.JSON(), nullable=True))
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("transcript", "action_items")
|
||||
@@ -23,14 +23,16 @@ def upgrade() -> None:
|
||||
op.drop_column("transcript", "search_vector_en")
|
||||
|
||||
# Recreate the search vector column with long_summary included
|
||||
op.execute("""
|
||||
op.execute(
|
||||
"""
|
||||
ALTER TABLE transcript ADD COLUMN search_vector_en tsvector
|
||||
GENERATED ALWAYS AS (
|
||||
setweight(to_tsvector('english', coalesce(title, '')), 'A') ||
|
||||
setweight(to_tsvector('english', coalesce(long_summary, '')), 'B') ||
|
||||
setweight(to_tsvector('english', coalesce(webvtt, '')), 'C')
|
||||
) STORED
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
# Recreate the GIN index for the search vector
|
||||
op.create_index(
|
||||
@@ -47,13 +49,15 @@ def downgrade() -> None:
|
||||
op.drop_column("transcript", "search_vector_en")
|
||||
|
||||
# Recreate the original search vector column without long_summary
|
||||
op.execute("""
|
||||
op.execute(
|
||||
"""
|
||||
ALTER TABLE transcript ADD COLUMN search_vector_en tsvector
|
||||
GENERATED ALWAYS AS (
|
||||
setweight(to_tsvector('english', coalesce(title, '')), 'A') ||
|
||||
setweight(to_tsvector('english', coalesce(webvtt, '')), 'B')
|
||||
) STORED
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
# Recreate the GIN index for the search vector
|
||||
op.create_index(
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
"""add workflow_run_id to transcript
|
||||
|
||||
Revision ID: 0f943fede0e0
|
||||
Revises: 05f8688d6895
|
||||
Create Date: 2025-12-16 01:54:13.855106
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "0f943fede0e0"
|
||||
down_revision: Union[str, None] = "05f8688d6895"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
with op.batch_alter_table("transcript", schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column("workflow_run_id", sa.String(), nullable=True))
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
with op.batch_alter_table("transcript", schema=None) as batch_op:
|
||||
batch_op.drop_column("workflow_run_id")
|
||||
@@ -21,13 +21,15 @@ def upgrade() -> None:
|
||||
if conn.dialect.name != "postgresql":
|
||||
return
|
||||
|
||||
op.execute("""
|
||||
op.execute(
|
||||
"""
|
||||
ALTER TABLE transcript ADD COLUMN search_vector_en tsvector
|
||||
GENERATED ALWAYS AS (
|
||||
setweight(to_tsvector('english', coalesce(title, '')), 'A') ||
|
||||
setweight(to_tsvector('english', coalesce(webvtt, '')), 'B')
|
||||
) STORED
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
op.create_index(
|
||||
"idx_transcript_search_vector_en",
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
"""add_platform_support
|
||||
|
||||
Revision ID: 1e49625677e4
|
||||
Revises: 9e3f7b2a4c8e
|
||||
Create Date: 2025-10-08 13:17:29.943612
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "1e49625677e4"
|
||||
down_revision: Union[str, None] = "9e3f7b2a4c8e"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Add platform field with default 'whereby' for backward compatibility."""
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"platform",
|
||||
sa.String(),
|
||||
nullable=True,
|
||||
server_default=None,
|
||||
)
|
||||
)
|
||||
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"platform",
|
||||
sa.String(),
|
||||
nullable=False,
|
||||
server_default="whereby",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Remove platform field."""
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.drop_column("platform")
|
||||
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.drop_column("platform")
|
||||
@@ -19,12 +19,14 @@ depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
def upgrade() -> None:
|
||||
# Set room_id to NULL for meetings that reference non-existent rooms
|
||||
op.execute("""
|
||||
op.execute(
|
||||
"""
|
||||
UPDATE meeting
|
||||
SET room_id = NULL
|
||||
WHERE room_id IS NOT NULL
|
||||
AND room_id NOT IN (SELECT id FROM room WHERE id IS NOT NULL)
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
"""add daily participant session table with immutable left_at
|
||||
|
||||
Revision ID: 2b92a1b03caa
|
||||
Revises: f8294b31f022
|
||||
Create Date: 2025-11-13 20:29:30.486577
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "2b92a1b03caa"
|
||||
down_revision: Union[str, None] = "f8294b31f022"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Create table
|
||||
op.create_table(
|
||||
"daily_participant_session",
|
||||
sa.Column("id", sa.String(), nullable=False),
|
||||
sa.Column("meeting_id", sa.String(), nullable=False),
|
||||
sa.Column("room_id", sa.String(), nullable=False),
|
||||
sa.Column("session_id", sa.String(), nullable=False),
|
||||
sa.Column("user_id", sa.String(), nullable=True),
|
||||
sa.Column("user_name", sa.String(), nullable=False),
|
||||
sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("left_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.ForeignKeyConstraint(["meeting_id"], ["meeting.id"], ondelete="CASCADE"),
|
||||
sa.ForeignKeyConstraint(["room_id"], ["room.id"], ondelete="CASCADE"),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
with op.batch_alter_table("daily_participant_session", schema=None) as batch_op:
|
||||
batch_op.create_index(
|
||||
"idx_daily_session_meeting_left", ["meeting_id", "left_at"], unique=False
|
||||
)
|
||||
batch_op.create_index("idx_daily_session_room", ["room_id"], unique=False)
|
||||
|
||||
# Create trigger function to prevent left_at from being updated once set
|
||||
op.execute("""
|
||||
CREATE OR REPLACE FUNCTION prevent_left_at_update()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF OLD.left_at IS NOT NULL THEN
|
||||
RAISE EXCEPTION 'left_at is immutable once set';
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
""")
|
||||
|
||||
# Create trigger
|
||||
op.execute("""
|
||||
CREATE TRIGGER prevent_left_at_update_trigger
|
||||
BEFORE UPDATE ON daily_participant_session
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION prevent_left_at_update();
|
||||
""")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop trigger
|
||||
op.execute(
|
||||
"DROP TRIGGER IF EXISTS prevent_left_at_update_trigger ON daily_participant_session;"
|
||||
)
|
||||
|
||||
# Drop trigger function
|
||||
op.execute("DROP FUNCTION IF EXISTS prevent_left_at_update();")
|
||||
|
||||
# Drop indexes and table
|
||||
with op.batch_alter_table("daily_participant_session", schema=None) as batch_op:
|
||||
batch_op.drop_index("idx_daily_session_room")
|
||||
batch_op.drop_index("idx_daily_session_meeting_left")
|
||||
|
||||
op.drop_table("daily_participant_session")
|
||||
@@ -28,7 +28,7 @@ def upgrade() -> None:
|
||||
transcript = table("transcript", column("id", sa.String), column("topics", sa.JSON))
|
||||
|
||||
# Select all rows from the transcript table
|
||||
results = bind.execute(select([transcript.c.id, transcript.c.topics]))
|
||||
results = bind.execute(select(transcript.c.id, transcript.c.topics))
|
||||
|
||||
for row in results:
|
||||
transcript_id = row["id"]
|
||||
@@ -58,7 +58,7 @@ def downgrade() -> None:
|
||||
transcript = table("transcript", column("id", sa.String), column("topics", sa.JSON))
|
||||
|
||||
# Select all rows from the transcript table
|
||||
results = bind.execute(select([transcript.c.id, transcript.c.topics]))
|
||||
results = bind.execute(select(transcript.c.id, transcript.c.topics))
|
||||
|
||||
for row in results:
|
||||
transcript_id = row["id"]
|
||||
|
||||
@@ -36,9 +36,7 @@ def upgrade() -> None:
|
||||
|
||||
# select only the one with duration = 0
|
||||
results = bind.execute(
|
||||
select([transcript.c.id, transcript.c.duration]).where(
|
||||
transcript.c.duration == 0
|
||||
)
|
||||
select(transcript.c.id, transcript.c.duration).where(transcript.c.duration == 0)
|
||||
)
|
||||
|
||||
data_dir = Path(settings.DATA_DIR)
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
"""Make room platform non-nullable with dynamic default
|
||||
|
||||
Revision ID: 5d6b9df9b045
|
||||
Revises: 2b92a1b03caa
|
||||
Create Date: 2025-11-21 13:22:25.756584
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "5d6b9df9b045"
|
||||
down_revision: Union[str, None] = "2b92a1b03caa"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.execute("UPDATE room SET platform = 'whereby' WHERE platform IS NULL")
|
||||
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.alter_column("platform", existing_type=sa.String(), nullable=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.alter_column("platform", existing_type=sa.String(), nullable=True)
|
||||
@@ -28,7 +28,7 @@ def upgrade() -> None:
|
||||
transcript = table("transcript", column("id", sa.String), column("topics", sa.JSON))
|
||||
|
||||
# Select all rows from the transcript table
|
||||
results = bind.execute(select([transcript.c.id, transcript.c.topics]))
|
||||
results = bind.execute(select(transcript.c.id, transcript.c.topics))
|
||||
|
||||
for row in results:
|
||||
transcript_id = row["id"]
|
||||
@@ -58,7 +58,7 @@ def downgrade() -> None:
|
||||
transcript = table("transcript", column("id", sa.String), column("topics", sa.JSON))
|
||||
|
||||
# Select all rows from the transcript table
|
||||
results = bind.execute(select([transcript.c.id, transcript.c.topics]))
|
||||
results = bind.execute(select(transcript.c.id, transcript.c.topics))
|
||||
|
||||
for row in results:
|
||||
transcript_id = row["id"]
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
"""add user api keys
|
||||
|
||||
Revision ID: 9e3f7b2a4c8e
|
||||
Revises: dc035ff72fd5
|
||||
Create Date: 2025-10-17 00:00:00.000000
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "9e3f7b2a4c8e"
|
||||
down_revision: Union[str, None] = "dc035ff72fd5"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"user_api_key",
|
||||
sa.Column("id", sa.String(), nullable=False),
|
||||
sa.Column("user_id", sa.String(), nullable=False),
|
||||
sa.Column("key_hash", sa.String(), nullable=False),
|
||||
sa.Column("name", sa.String(), nullable=True),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
with op.batch_alter_table("user_api_key", schema=None) as batch_op:
|
||||
batch_op.create_index("idx_user_api_key_hash", ["key_hash"], unique=True)
|
||||
batch_op.create_index("idx_user_api_key_user_id", ["user_id"], unique=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("user_api_key")
|
||||
@@ -1,38 +0,0 @@
|
||||
"""add user table
|
||||
|
||||
Revision ID: bbafedfa510c
|
||||
Revises: 5d6b9df9b045
|
||||
Create Date: 2025-11-19 21:06:30.543262
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "bbafedfa510c"
|
||||
down_revision: Union[str, None] = "5d6b9df9b045"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"user",
|
||||
sa.Column("id", sa.String(), nullable=False),
|
||||
sa.Column("email", sa.String(), nullable=False),
|
||||
sa.Column("authentik_uid", sa.String(), nullable=False),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.create_index("idx_user_authentik_uid", ["authentik_uid"], unique=True)
|
||||
batch_op.create_index("idx_user_email", ["email"], unique=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("user")
|
||||
@@ -1,35 +0,0 @@
|
||||
"""add use_hatchet to room
|
||||
|
||||
Revision ID: bd3a729bb379
|
||||
Revises: 0f943fede0e0
|
||||
Create Date: 2025-12-16 16:34:03.594231
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "bd3a729bb379"
|
||||
down_revision: Union[str, None] = "0f943fede0e0"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"use_hatchet",
|
||||
sa.Boolean(),
|
||||
server_default=sa.text("false"),
|
||||
nullable=False,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.drop_column("use_hatchet")
|
||||
@@ -27,7 +27,8 @@ def upgrade() -> None:
|
||||
|
||||
# Populate room_id for existing ROOM-type transcripts
|
||||
# This joins through recording -> meeting -> room to get the room_id
|
||||
op.execute("""
|
||||
op.execute(
|
||||
"""
|
||||
UPDATE transcript AS t
|
||||
SET room_id = r.id
|
||||
FROM recording rec
|
||||
@@ -36,11 +37,13 @@ def upgrade() -> None:
|
||||
WHERE t.recording_id = rec.id
|
||||
AND t.source_kind = 'room'
|
||||
AND t.room_id IS NULL
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
# Fix missing meeting_id for ROOM-type transcripts
|
||||
# The meeting_id field exists but was never populated
|
||||
op.execute("""
|
||||
op.execute(
|
||||
"""
|
||||
UPDATE transcript AS t
|
||||
SET meeting_id = rec.meeting_id
|
||||
FROM recording rec
|
||||
@@ -48,7 +51,8 @@ def upgrade() -> None:
|
||||
AND t.source_kind = 'room'
|
||||
AND t.meeting_id IS NULL
|
||||
AND rec.meeting_id IS NOT NULL
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
"""add_track_keys
|
||||
|
||||
Revision ID: f8294b31f022
|
||||
Revises: 1e49625677e4
|
||||
Create Date: 2025-10-27 18:52:17.589167
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "f8294b31f022"
|
||||
down_revision: Union[str, None] = "1e49625677e4"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
with op.batch_alter_table("recording", schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column("track_keys", sa.JSON(), nullable=True))
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
with op.batch_alter_table("recording", schema=None) as batch_op:
|
||||
batch_op.drop_column("track_keys")
|
||||
@@ -19,14 +19,13 @@ dependencies = [
|
||||
"sentry-sdk[fastapi]>=1.29.2",
|
||||
"httpx>=0.24.1",
|
||||
"fastapi-pagination>=0.12.6",
|
||||
"databases[aiosqlite, asyncpg]>=0.7.0",
|
||||
"sqlalchemy<1.5",
|
||||
"sqlalchemy>=2.0.0",
|
||||
"asyncpg>=0.29.0",
|
||||
"alembic>=1.11.3",
|
||||
"nltk>=3.8.1",
|
||||
"prometheus-fastapi-instrumentator>=6.1.0",
|
||||
"sentencepiece>=0.1.99",
|
||||
"protobuf>=4.24.3",
|
||||
"celery>=5.3.4",
|
||||
"redis>=5.0.1",
|
||||
"python-jose[cryptography]>=3.3.0",
|
||||
"python-multipart>=0.0.6",
|
||||
@@ -39,7 +38,8 @@ dependencies = [
|
||||
"pytest-env>=1.1.5",
|
||||
"webvtt-py>=0.5.0",
|
||||
"icalendar>=6.0.0",
|
||||
"hatchet-sdk>=0.47.0",
|
||||
"taskiq>=0.11.18",
|
||||
"taskiq-redis>=1.1.0",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
@@ -47,6 +47,7 @@ dev = [
|
||||
"black>=24.1.1",
|
||||
"stamina>=23.1.0",
|
||||
"pyinstrument>=4.6.1",
|
||||
"pytest-async-sqlalchemy>=0.2.0",
|
||||
]
|
||||
tests = [
|
||||
"pytest-cov>=4.1.0",
|
||||
@@ -55,7 +56,6 @@ tests = [
|
||||
"pytest>=7.4.0",
|
||||
"httpx-ws>=0.4.1",
|
||||
"pytest-httpx>=0.23.1",
|
||||
"pytest-celery>=0.0.0",
|
||||
"pytest-recording>=0.13.4",
|
||||
"pytest-docker>=3.2.3",
|
||||
"asgi-lifespan>=2.1.0",
|
||||
@@ -112,13 +112,15 @@ source = ["reflector"]
|
||||
|
||||
[tool.pytest_env]
|
||||
ENVIRONMENT = "pytest"
|
||||
DATABASE_URL = "postgresql://test_user:test_password@localhost:15432/reflector_test"
|
||||
AUTH_BACKEND = "jwt"
|
||||
DATABASE_URL = "postgresql+asyncpg://test_user:test_password@localhost:15432/reflector_test"
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
addopts = "-ra -q --disable-pytest-warnings --cov --cov-report html -v"
|
||||
testpaths = ["tests"]
|
||||
asyncio_mode = "auto"
|
||||
asyncio_debug = true
|
||||
asyncio_default_fixture_loop_scope = "session"
|
||||
asyncio_default_test_loop_scope = "session"
|
||||
markers = [
|
||||
"model_api: tests for the unified model-serving HTTP API (backend- and hardware-agnostic)",
|
||||
]
|
||||
@@ -127,7 +129,6 @@ markers = [
|
||||
select = [
|
||||
"I", # isort - import sorting
|
||||
"F401", # unused imports
|
||||
"E402", # module level import not at top of file
|
||||
"PLC0415", # import-outside-top-level - detect inline imports
|
||||
]
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ from reflector.events import subscribers_shutdown, subscribers_startup
|
||||
from reflector.logger import logger
|
||||
from reflector.metrics import metrics_init
|
||||
from reflector.settings import settings
|
||||
from reflector.views.daily import router as daily_router
|
||||
from reflector.views.meetings import router as meetings_router
|
||||
from reflector.views.rooms import router as rooms_router
|
||||
from reflector.views.rtc_offer import router as rtc_offer_router
|
||||
@@ -27,8 +26,6 @@ from reflector.views.transcripts_upload import router as transcripts_upload_rout
|
||||
from reflector.views.transcripts_webrtc import router as transcripts_webrtc_router
|
||||
from reflector.views.transcripts_websocket import router as transcripts_websocket_router
|
||||
from reflector.views.user import router as user_router
|
||||
from reflector.views.user_api_keys import router as user_api_keys_router
|
||||
from reflector.views.user_websocket import router as user_ws_router
|
||||
from reflector.views.whereby import router as whereby_router
|
||||
from reflector.views.zulip import router as zulip_router
|
||||
|
||||
@@ -68,12 +65,6 @@ app.add_middleware(
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health():
|
||||
return {"status": "healthy"}
|
||||
|
||||
|
||||
# metrics
|
||||
instrumentator = Instrumentator(
|
||||
excluded_handlers=["/docs", "/metrics"],
|
||||
@@ -93,15 +84,12 @@ app.include_router(transcripts_websocket_router, prefix="/v1")
|
||||
app.include_router(transcripts_webrtc_router, prefix="/v1")
|
||||
app.include_router(transcripts_process_router, prefix="/v1")
|
||||
app.include_router(user_router, prefix="/v1")
|
||||
app.include_router(user_api_keys_router, prefix="/v1")
|
||||
app.include_router(user_ws_router, prefix="/v1")
|
||||
app.include_router(zulip_router, prefix="/v1")
|
||||
app.include_router(whereby_router, prefix="/v1")
|
||||
app.include_router(daily_router, prefix="/v1/daily")
|
||||
add_pagination(app)
|
||||
|
||||
# prepare celery
|
||||
from reflector.worker import app as celery_app # noqa
|
||||
# prepare taskiq
|
||||
from reflector.worker import app as taskiq_app # noqa
|
||||
|
||||
|
||||
# simpler openapi id
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
import asyncio
|
||||
import functools
|
||||
from uuid import uuid4
|
||||
|
||||
from celery import current_task
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.llm import llm_session_id
|
||||
|
||||
|
||||
def asynctask(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
async def run_with_db():
|
||||
task_id = current_task.request.id if current_task else None
|
||||
llm_session_id.set(task_id or f"random-{uuid4().hex}")
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
return await f(*args, **kwargs)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
coro = run_with_db()
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
loop = None
|
||||
if loop and loop.is_running():
|
||||
return loop.run_until_complete(coro)
|
||||
return asyncio.run(coro)
|
||||
|
||||
return wrapper
|
||||
@@ -1,18 +1,14 @@
|
||||
from typing import Annotated, List, Optional
|
||||
from typing import Annotated, Optional
|
||||
|
||||
from fastapi import Depends, HTTPException
|
||||
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from jose import JWTError, jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.db.user_api_keys import user_api_keys_controller
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
|
||||
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
||||
|
||||
jwt_public_key = open(f"reflector/auth/jwt/keys/{settings.AUTH_JWT_PUBLIC_KEY}").read()
|
||||
jwt_algorithm = settings.AUTH_JWT_ALGORITHM
|
||||
@@ -30,7 +26,7 @@ class JWTException(Exception):
|
||||
|
||||
class UserInfo(BaseModel):
|
||||
sub: str
|
||||
email: Optional[str] = None
|
||||
email: str
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
@@ -62,65 +58,34 @@ def authenticated(token: Annotated[str, Depends(oauth2_scheme)]):
|
||||
return None
|
||||
|
||||
|
||||
async def _authenticate_user(
|
||||
jwt_token: Optional[str],
|
||||
api_key: Optional[str],
|
||||
jwtauth: JWTAuth,
|
||||
) -> UserInfo | None:
|
||||
user_infos: List[UserInfo] = []
|
||||
if api_key:
|
||||
user_api_key = await user_api_keys_controller.verify_key(api_key)
|
||||
if user_api_key:
|
||||
user_infos.append(UserInfo(sub=user_api_key.user_id, email=None))
|
||||
|
||||
if jwt_token:
|
||||
try:
|
||||
payload = jwtauth.verify_token(jwt_token)
|
||||
authentik_uid = payload["sub"]
|
||||
email = payload["email"]
|
||||
|
||||
user = await user_controller.get_by_authentik_uid(authentik_uid)
|
||||
if not user:
|
||||
logger.info(
|
||||
f"Creating new user on first login: {authentik_uid} ({email})"
|
||||
)
|
||||
user = await user_controller.create_or_update(
|
||||
id=generate_uuid4(),
|
||||
authentik_uid=authentik_uid,
|
||||
email=email,
|
||||
)
|
||||
|
||||
user_infos.append(UserInfo(sub=user.id, email=email))
|
||||
except JWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||
|
||||
if len(user_infos) == 0:
|
||||
return None
|
||||
|
||||
if len(set([x.sub for x in user_infos])) > 1:
|
||||
raise JWTException(
|
||||
status_code=401,
|
||||
detail="Invalid authentication: more than one user provided",
|
||||
)
|
||||
|
||||
return user_infos[0]
|
||||
|
||||
|
||||
async def current_user(
|
||||
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||
api_key: Annotated[Optional[str], Depends(api_key_header)],
|
||||
def current_user(
|
||||
token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||
jwtauth: JWTAuth = Depends(),
|
||||
):
|
||||
user = await _authenticate_user(jwt_token, api_key, jwtauth)
|
||||
if user is None:
|
||||
if token is None:
|
||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||
return user
|
||||
try:
|
||||
payload = jwtauth.verify_token(token)
|
||||
sub = payload["sub"]
|
||||
email = payload["email"]
|
||||
return UserInfo(sub=sub, email=email)
|
||||
except JWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||
|
||||
|
||||
async def current_user_optional(
|
||||
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||
api_key: Annotated[Optional[str], Depends(api_key_header)],
|
||||
def current_user_optional(
|
||||
token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||
jwtauth: JWTAuth = Depends(),
|
||||
):
|
||||
return await _authenticate_user(jwt_token, api_key, jwtauth)
|
||||
# we accept no token, but if one is provided, it must be a valid one.
|
||||
if token is None:
|
||||
return None
|
||||
try:
|
||||
payload = jwtauth.verify_token(token)
|
||||
sub = payload["sub"]
|
||||
email = payload["email"]
|
||||
return UserInfo(sub=sub, email=email)
|
||||
except JWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
anything about Daily.co api interaction
|
||||
|
||||
- webhook event shapes
|
||||
- REST api client
|
||||
|
||||
No REST api client existing found in the wild; the official lib is about working with videocall as a bot
|
||||
@@ -1,110 +0,0 @@
|
||||
"""
|
||||
Daily.co API Module
|
||||
"""
|
||||
|
||||
# Client
|
||||
from .client import DailyApiClient, DailyApiError
|
||||
|
||||
# Request models
|
||||
from .requests import (
|
||||
CreateMeetingTokenRequest,
|
||||
CreateRoomRequest,
|
||||
CreateWebhookRequest,
|
||||
MeetingTokenProperties,
|
||||
RecordingsBucketConfig,
|
||||
RoomProperties,
|
||||
UpdateWebhookRequest,
|
||||
)
|
||||
|
||||
# Response models
|
||||
from .responses import (
|
||||
FinishedRecordingResponse,
|
||||
MeetingParticipant,
|
||||
MeetingParticipantsResponse,
|
||||
MeetingResponse,
|
||||
MeetingTokenResponse,
|
||||
RecordingResponse,
|
||||
RecordingS3Info,
|
||||
RoomPresenceParticipant,
|
||||
RoomPresenceResponse,
|
||||
RoomResponse,
|
||||
WebhookResponse,
|
||||
)
|
||||
|
||||
# Webhook utilities
|
||||
from .webhook_utils import (
|
||||
extract_room_name,
|
||||
parse_participant_joined,
|
||||
parse_participant_left,
|
||||
parse_recording_error,
|
||||
parse_recording_ready,
|
||||
parse_recording_started,
|
||||
parse_webhook_payload,
|
||||
verify_webhook_signature,
|
||||
)
|
||||
|
||||
# Webhook models
|
||||
from .webhooks import (
|
||||
DailyTrack,
|
||||
DailyWebhookEvent,
|
||||
DailyWebhookEventUnion,
|
||||
ParticipantJoinedEvent,
|
||||
ParticipantJoinedPayload,
|
||||
ParticipantLeftEvent,
|
||||
ParticipantLeftPayload,
|
||||
RecordingErrorEvent,
|
||||
RecordingErrorPayload,
|
||||
RecordingReadyEvent,
|
||||
RecordingReadyToDownloadPayload,
|
||||
RecordingStartedEvent,
|
||||
RecordingStartedPayload,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Client
|
||||
"DailyApiClient",
|
||||
"DailyApiError",
|
||||
# Requests
|
||||
"CreateRoomRequest",
|
||||
"RoomProperties",
|
||||
"RecordingsBucketConfig",
|
||||
"CreateMeetingTokenRequest",
|
||||
"MeetingTokenProperties",
|
||||
"CreateWebhookRequest",
|
||||
"UpdateWebhookRequest",
|
||||
# Responses
|
||||
"RoomResponse",
|
||||
"RoomPresenceResponse",
|
||||
"RoomPresenceParticipant",
|
||||
"MeetingParticipantsResponse",
|
||||
"MeetingParticipant",
|
||||
"MeetingResponse",
|
||||
"RecordingResponse",
|
||||
"FinishedRecordingResponse",
|
||||
"RecordingS3Info",
|
||||
"MeetingTokenResponse",
|
||||
"WebhookResponse",
|
||||
# Webhooks
|
||||
"DailyWebhookEvent",
|
||||
"DailyWebhookEventUnion",
|
||||
"DailyTrack",
|
||||
"ParticipantJoinedEvent",
|
||||
"ParticipantJoinedPayload",
|
||||
"ParticipantLeftEvent",
|
||||
"ParticipantLeftPayload",
|
||||
"RecordingStartedEvent",
|
||||
"RecordingStartedPayload",
|
||||
"RecordingReadyEvent",
|
||||
"RecordingReadyToDownloadPayload",
|
||||
"RecordingErrorEvent",
|
||||
"RecordingErrorPayload",
|
||||
# Webhook utilities
|
||||
"verify_webhook_signature",
|
||||
"extract_room_name",
|
||||
"parse_webhook_payload",
|
||||
"parse_participant_joined",
|
||||
"parse_participant_left",
|
||||
"parse_recording_started",
|
||||
"parse_recording_ready",
|
||||
"parse_recording_error",
|
||||
]
|
||||
@@ -1,573 +0,0 @@
|
||||
"""
|
||||
Daily.co API Client
|
||||
|
||||
Complete async client for Daily.co REST API with Pydantic models.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api
|
||||
"""
|
||||
|
||||
from http import HTTPStatus
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
import structlog
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
from .requests import (
|
||||
CreateMeetingTokenRequest,
|
||||
CreateRoomRequest,
|
||||
CreateWebhookRequest,
|
||||
UpdateWebhookRequest,
|
||||
)
|
||||
from .responses import (
|
||||
MeetingParticipantsResponse,
|
||||
MeetingResponse,
|
||||
MeetingTokenResponse,
|
||||
RecordingResponse,
|
||||
RoomPresenceResponse,
|
||||
RoomResponse,
|
||||
WebhookResponse,
|
||||
)
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
|
||||
class DailyApiError(Exception):
|
||||
"""Daily.co API error with full request/response context."""
|
||||
|
||||
def __init__(self, operation: str, response: httpx.Response):
|
||||
self.operation = operation
|
||||
self.response = response
|
||||
self.status_code = response.status_code
|
||||
self.response_body = response.text
|
||||
self.url = str(response.url)
|
||||
self.request_body = (
|
||||
response.request.content.decode() if response.request.content else None
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
f"Daily.co API error: {operation} failed with status {self.status_code}: {response.text}"
|
||||
)
|
||||
|
||||
|
||||
class DailyApiClient:
|
||||
"""
|
||||
Complete async client for Daily.co REST API.
|
||||
|
||||
Usage:
|
||||
# Direct usage
|
||||
client = DailyApiClient(api_key="your_api_key")
|
||||
room = await client.create_room(CreateRoomRequest(name="my-room"))
|
||||
await client.close() # Clean up when done
|
||||
|
||||
# Context manager (recommended)
|
||||
async with DailyApiClient(api_key="your_api_key") as client:
|
||||
room = await client.create_room(CreateRoomRequest(name="my-room"))
|
||||
"""
|
||||
|
||||
BASE_URL = "https://api.daily.co/v1"
|
||||
DEFAULT_TIMEOUT = 10.0
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: NonEmptyString,
|
||||
webhook_secret: NonEmptyString | None = None,
|
||||
timeout: float = DEFAULT_TIMEOUT,
|
||||
base_url: NonEmptyString | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize Daily.co API client.
|
||||
|
||||
Args:
|
||||
api_key: Daily.co API key (Bearer token)
|
||||
webhook_secret: Base64-encoded HMAC secret for webhook verification.
|
||||
Must match the 'hmac' value provided when creating webhooks.
|
||||
Generate with: base64.b64encode(os.urandom(32)).decode()
|
||||
timeout: Default request timeout in seconds
|
||||
base_url: Override base URL (for testing)
|
||||
"""
|
||||
self.api_key = api_key
|
||||
self.webhook_secret = webhook_secret
|
||||
self.timeout = timeout
|
||||
self.base_url = base_url or self.BASE_URL
|
||||
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
self._client: httpx.AsyncClient | None = None
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
|
||||
async def _get_client(self) -> httpx.AsyncClient:
|
||||
if self._client is None:
|
||||
self._client = httpx.AsyncClient(timeout=self.timeout)
|
||||
return self._client
|
||||
|
||||
async def close(self):
|
||||
if self._client is not None:
|
||||
await self._client.aclose()
|
||||
self._client = None
|
||||
|
||||
async def _handle_response(
|
||||
self, response: httpx.Response, operation: str
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Handle API response with error logging.
|
||||
|
||||
Args:
|
||||
response: HTTP response
|
||||
operation: Operation name for logging (e.g., "create_room")
|
||||
|
||||
Returns:
|
||||
Parsed JSON response
|
||||
|
||||
Raises:
|
||||
DailyApiError: If request failed with full context
|
||||
"""
|
||||
if response.status_code >= 400:
|
||||
logger.error(
|
||||
f"Daily.co API error: {operation}",
|
||||
status_code=response.status_code,
|
||||
response_body=response.text,
|
||||
request_body=response.request.content.decode()
|
||||
if response.request.content
|
||||
else None,
|
||||
url=str(response.url),
|
||||
)
|
||||
raise DailyApiError(operation, response)
|
||||
|
||||
return response.json()
|
||||
|
||||
# ============================================================================
|
||||
# ROOMS
|
||||
# ============================================================================
|
||||
|
||||
async def create_room(self, request: CreateRoomRequest) -> RoomResponse:
|
||||
"""
|
||||
Create a new Daily.co room.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
||||
|
||||
Args:
|
||||
request: Room creation request with name, privacy, and properties
|
||||
|
||||
Returns:
|
||||
Created room data including URL and ID
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.post(
|
||||
f"{self.base_url}/rooms",
|
||||
headers=self.headers,
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "create_room")
|
||||
return RoomResponse(**data)
|
||||
|
||||
async def get_room(self, room_name: NonEmptyString) -> RoomResponse:
|
||||
"""
|
||||
Get room configuration.
|
||||
|
||||
Args:
|
||||
room_name: Daily.co room name
|
||||
|
||||
Returns:
|
||||
Room configuration data
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/rooms/{room_name}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_room")
|
||||
return RoomResponse(**data)
|
||||
|
||||
async def get_room_presence(
|
||||
self, room_name: NonEmptyString
|
||||
) -> RoomPresenceResponse:
|
||||
"""
|
||||
Get current participants in a room (real-time presence).
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
|
||||
|
||||
Args:
|
||||
room_name: Daily.co room name
|
||||
|
||||
Returns:
|
||||
List of currently present participants with join time and duration
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/rooms/{room_name}/presence",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_room_presence")
|
||||
return RoomPresenceResponse(**data)
|
||||
|
||||
async def delete_room(self, room_name: NonEmptyString) -> None:
|
||||
"""
|
||||
Delete a room (idempotent - succeeds even if room doesn't exist).
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/delete-room
|
||||
|
||||
Args:
|
||||
room_name: Daily.co room name
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails (except 404)
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.delete(
|
||||
f"{self.base_url}/rooms/{room_name}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
# Idempotent delete - 404 means already deleted
|
||||
if response.status_code == HTTPStatus.NOT_FOUND:
|
||||
logger.debug("Room not found (already deleted)", room_name=room_name)
|
||||
return
|
||||
|
||||
await self._handle_response(response, "delete_room")
|
||||
|
||||
# ============================================================================
|
||||
# MEETINGS
|
||||
# ============================================================================
|
||||
|
||||
async def get_meeting(self, meeting_id: NonEmptyString) -> MeetingResponse:
|
||||
"""
|
||||
Get full meeting information including participants.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information
|
||||
|
||||
Args:
|
||||
meeting_id: Daily.co meeting/session ID
|
||||
|
||||
Returns:
|
||||
Meeting metadata including room, duration, participants, and status
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/meetings/{meeting_id}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_meeting")
|
||||
return MeetingResponse(**data)
|
||||
|
||||
async def get_meeting_participants(
|
||||
self,
|
||||
meeting_id: NonEmptyString,
|
||||
limit: int | None = None,
|
||||
joined_after: NonEmptyString | None = None,
|
||||
joined_before: NonEmptyString | None = None,
|
||||
) -> MeetingParticipantsResponse:
|
||||
"""
|
||||
Get historical participant data from a completed meeting (paginated).
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
|
||||
|
||||
Args:
|
||||
meeting_id: Daily.co meeting/session ID
|
||||
limit: Maximum number of participant records to return
|
||||
joined_after: Return participants who joined after this participant_id
|
||||
joined_before: Return participants who joined before this participant_id
|
||||
|
||||
Returns:
|
||||
List of participants with join times and duration
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails (404 when no more participants)
|
||||
|
||||
Note:
|
||||
For pagination, use joined_after with the last participant_id from previous response.
|
||||
Returns 404 when no more participants remain.
|
||||
"""
|
||||
params = {}
|
||||
if limit is not None:
|
||||
params["limit"] = limit
|
||||
if joined_after is not None:
|
||||
params["joined_after"] = joined_after
|
||||
if joined_before is not None:
|
||||
params["joined_before"] = joined_before
|
||||
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/meetings/{meeting_id}/participants",
|
||||
headers=self.headers,
|
||||
params=params,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_meeting_participants")
|
||||
return MeetingParticipantsResponse(**data)
|
||||
|
||||
# ============================================================================
|
||||
# RECORDINGS
|
||||
# ============================================================================
|
||||
|
||||
async def get_recording(self, recording_id: NonEmptyString) -> RecordingResponse:
|
||||
"""
|
||||
https://docs.daily.co/reference/rest-api/recordings/get-recording-information
|
||||
Get recording metadata and status.
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/recordings/{recording_id}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_recording")
|
||||
return RecordingResponse(**data)
|
||||
|
||||
async def list_recordings(
|
||||
self,
|
||||
room_name: NonEmptyString | None = None,
|
||||
starting_after: str | None = None,
|
||||
ending_before: str | None = None,
|
||||
limit: int = 100,
|
||||
) -> list[RecordingResponse]:
|
||||
"""
|
||||
List recordings with optional filters.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
||||
|
||||
Args:
|
||||
room_name: Filter by room name
|
||||
starting_after: Pagination cursor - recording ID to start after
|
||||
ending_before: Pagination cursor - recording ID to end before
|
||||
limit: Max results per page (default 100, max 100)
|
||||
|
||||
Note: starting_after/ending_before are pagination cursors (recording IDs),
|
||||
NOT time filters. API returns recordings in reverse chronological order.
|
||||
"""
|
||||
client = await self._get_client()
|
||||
|
||||
params = {"limit": limit}
|
||||
if room_name:
|
||||
params["room_name"] = room_name
|
||||
if starting_after:
|
||||
params["starting_after"] = starting_after
|
||||
if ending_before:
|
||||
params["ending_before"] = ending_before
|
||||
|
||||
response = await client.get(
|
||||
f"{self.base_url}/recordings",
|
||||
headers=self.headers,
|
||||
params=params,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "list_recordings")
|
||||
|
||||
if not isinstance(data, dict) or "data" not in data:
|
||||
logger.error(
|
||||
"Daily.co API returned unexpected format for list_recordings",
|
||||
data_type=type(data).__name__,
|
||||
data_keys=list(data.keys()) if isinstance(data, dict) else None,
|
||||
data_sample=str(data)[:500],
|
||||
room_name=room_name,
|
||||
operation="list_recordings",
|
||||
)
|
||||
raise httpx.HTTPStatusError(
|
||||
message=f"Unexpected response format from list_recordings: {type(data).__name__}",
|
||||
request=response.request,
|
||||
response=response,
|
||||
)
|
||||
|
||||
return [RecordingResponse(**r) for r in data["data"]]
|
||||
|
||||
# ============================================================================
|
||||
# MEETING TOKENS
|
||||
# ============================================================================
|
||||
|
||||
async def create_meeting_token(
|
||||
self, request: CreateMeetingTokenRequest
|
||||
) -> MeetingTokenResponse:
|
||||
"""
|
||||
Create a meeting token for participant authentication.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
||||
|
||||
Args:
|
||||
request: Token properties including room name, user_id, permissions
|
||||
|
||||
Returns:
|
||||
JWT meeting token
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.post(
|
||||
f"{self.base_url}/meeting-tokens",
|
||||
headers=self.headers,
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "create_meeting_token")
|
||||
return MeetingTokenResponse(**data)
|
||||
|
||||
# ============================================================================
|
||||
# WEBHOOKS
|
||||
# ============================================================================
|
||||
|
||||
async def list_webhooks(self) -> list[WebhookResponse]:
|
||||
"""
|
||||
List all configured webhooks for this account.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Returns:
|
||||
List of webhook configurations
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/webhooks",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "list_webhooks")
|
||||
|
||||
# Daily.co returns array directly (not paginated)
|
||||
if isinstance(data, list):
|
||||
return [WebhookResponse(**wh) for wh in data]
|
||||
|
||||
# Future-proof: handle potential pagination envelope
|
||||
if isinstance(data, dict) and "data" in data:
|
||||
return [WebhookResponse(**wh) for wh in data["data"]]
|
||||
|
||||
logger.warning("Unexpected webhook list response format", data=data)
|
||||
return []
|
||||
|
||||
async def create_webhook(self, request: CreateWebhookRequest) -> WebhookResponse:
|
||||
"""
|
||||
Create a new webhook subscription.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Args:
|
||||
request: Webhook configuration with URL, event types, and HMAC secret
|
||||
|
||||
Returns:
|
||||
Created webhook with UUID and state
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.post(
|
||||
f"{self.base_url}/webhooks",
|
||||
headers=self.headers,
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "create_webhook")
|
||||
return WebhookResponse(**data)
|
||||
|
||||
async def update_webhook(
|
||||
self, webhook_uuid: NonEmptyString, request: UpdateWebhookRequest
|
||||
) -> WebhookResponse:
|
||||
"""
|
||||
Update webhook configuration.
|
||||
|
||||
Note: Daily.co may not support PATCH for all fields.
|
||||
Common pattern is delete + recreate.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Args:
|
||||
webhook_uuid: Webhook UUID to update
|
||||
request: Updated webhook configuration
|
||||
|
||||
Returns:
|
||||
Updated webhook configuration
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.patch(
|
||||
f"{self.base_url}/webhooks/{webhook_uuid}",
|
||||
headers=self.headers,
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "update_webhook")
|
||||
return WebhookResponse(**data)
|
||||
|
||||
async def delete_webhook(self, webhook_uuid: NonEmptyString) -> None:
|
||||
"""
|
||||
Delete a webhook.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Args:
|
||||
webhook_uuid: Webhook UUID to delete
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If webhook not found or deletion fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.delete(
|
||||
f"{self.base_url}/webhooks/{webhook_uuid}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
await self._handle_response(response, "delete_webhook")
|
||||
|
||||
# ============================================================================
|
||||
# HELPER METHODS
|
||||
# ============================================================================
|
||||
|
||||
async def find_webhook_by_url(self, url: NonEmptyString) -> WebhookResponse | None:
|
||||
"""
|
||||
Find a webhook by its URL.
|
||||
|
||||
Args:
|
||||
url: Webhook endpoint URL to search for
|
||||
|
||||
Returns:
|
||||
Webhook if found, None otherwise
|
||||
"""
|
||||
webhooks = await self.list_webhooks()
|
||||
for webhook in webhooks:
|
||||
if webhook.url == url:
|
||||
return webhook
|
||||
return None
|
||||
|
||||
async def find_webhooks_by_pattern(
|
||||
self, pattern: NonEmptyString
|
||||
) -> list[WebhookResponse]:
|
||||
"""
|
||||
Find webhooks matching a URL pattern (e.g., 'ngrok').
|
||||
|
||||
Args:
|
||||
pattern: String to match in webhook URLs
|
||||
|
||||
Returns:
|
||||
List of matching webhooks
|
||||
"""
|
||||
webhooks = await self.list_webhooks()
|
||||
return [wh for wh in webhooks if pattern in wh.url]
|
||||
@@ -1,162 +0,0 @@
|
||||
"""
|
||||
Daily.co API Request Models
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api
|
||||
"""
|
||||
|
||||
from typing import List, Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
class RecordingsBucketConfig(BaseModel):
|
||||
"""
|
||||
S3 bucket configuration for raw-tracks recordings.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
||||
"""
|
||||
|
||||
bucket_name: NonEmptyString = Field(description="S3 bucket name")
|
||||
bucket_region: NonEmptyString = Field(description="AWS region (e.g., 'us-east-1')")
|
||||
assume_role_arn: NonEmptyString = Field(
|
||||
description="AWS IAM role ARN that Daily.co will assume to write recordings"
|
||||
)
|
||||
allow_api_access: bool = Field(
|
||||
default=True,
|
||||
description="Whether to allow API access to recording metadata",
|
||||
)
|
||||
|
||||
|
||||
class RoomProperties(BaseModel):
|
||||
"""
|
||||
Room configuration properties.
|
||||
"""
|
||||
|
||||
enable_recording: Literal["cloud", "local", "raw-tracks"] | None = Field(
|
||||
default=None,
|
||||
description="Recording mode: 'cloud' for mixed, 'local' for local recording, 'raw-tracks' for multitrack, None to disable",
|
||||
)
|
||||
enable_chat: bool = Field(default=True, description="Enable in-meeting chat")
|
||||
enable_screenshare: bool = Field(default=True, description="Enable screen sharing")
|
||||
enable_knocking: bool = Field(
|
||||
default=False,
|
||||
description="Enable knocking for private rooms (allows participants to request access)",
|
||||
)
|
||||
start_video_off: bool = Field(
|
||||
default=False, description="Start with video off for all participants"
|
||||
)
|
||||
start_audio_off: bool = Field(
|
||||
default=False, description="Start with audio muted for all participants"
|
||||
)
|
||||
exp: int | None = Field(
|
||||
None, description="Room expiration timestamp (Unix epoch seconds)"
|
||||
)
|
||||
recordings_bucket: RecordingsBucketConfig | None = Field(
|
||||
None, description="S3 bucket configuration for raw-tracks recordings"
|
||||
)
|
||||
|
||||
|
||||
class CreateRoomRequest(BaseModel):
|
||||
"""
|
||||
Request to create a new Daily.co room.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
||||
"""
|
||||
|
||||
name: NonEmptyString = Field(description="Room name (must be unique within domain)")
|
||||
privacy: Literal["public", "private"] = Field(
|
||||
default="public", description="Room privacy setting"
|
||||
)
|
||||
properties: RoomProperties = Field(
|
||||
default_factory=RoomProperties, description="Room configuration properties"
|
||||
)
|
||||
|
||||
|
||||
class MeetingTokenProperties(BaseModel):
|
||||
"""
|
||||
Properties for meeting token creation.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
||||
"""
|
||||
|
||||
room_name: NonEmptyString = Field(description="Room name this token is valid for")
|
||||
user_id: NonEmptyString | None = Field(
|
||||
None, description="User identifier to associate with token"
|
||||
)
|
||||
is_owner: bool = Field(
|
||||
default=False, description="Grant owner privileges to token holder"
|
||||
)
|
||||
start_cloud_recording: bool = Field(
|
||||
default=False, description="Automatically start cloud recording on join"
|
||||
)
|
||||
enable_recording_ui: bool = Field(
|
||||
default=True, description="Show recording controls in UI"
|
||||
)
|
||||
eject_at_token_exp: bool = Field(
|
||||
default=False, description="Eject participant when token expires"
|
||||
)
|
||||
nbf: int | None = Field(
|
||||
None, description="Not-before timestamp (Unix epoch seconds)"
|
||||
)
|
||||
exp: int | None = Field(
|
||||
None, description="Expiration timestamp (Unix epoch seconds)"
|
||||
)
|
||||
|
||||
|
||||
class CreateMeetingTokenRequest(BaseModel):
|
||||
"""
|
||||
Request to create a meeting token for participant authentication.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
||||
"""
|
||||
|
||||
properties: MeetingTokenProperties = Field(description="Token properties")
|
||||
|
||||
|
||||
class CreateWebhookRequest(BaseModel):
|
||||
"""
|
||||
Request to create a webhook subscription.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
url: NonEmptyString = Field(description="Webhook endpoint URL (must be HTTPS)")
|
||||
eventTypes: List[
|
||||
Literal[
|
||||
"participant.joined",
|
||||
"participant.left",
|
||||
"recording.started",
|
||||
"recording.ready-to-download",
|
||||
"recording.error",
|
||||
]
|
||||
] = Field(
|
||||
description="Array of event types to subscribe to (only events we handle)"
|
||||
)
|
||||
hmac: NonEmptyString = Field(
|
||||
description="Base64-encoded HMAC secret for webhook signature verification"
|
||||
)
|
||||
basicAuth: NonEmptyString | None = Field(
|
||||
None, description="Optional basic auth credentials for webhook endpoint"
|
||||
)
|
||||
|
||||
|
||||
class UpdateWebhookRequest(BaseModel):
|
||||
"""
|
||||
Request to update an existing webhook.
|
||||
|
||||
Note: Daily.co API may not support PATCH for webhooks.
|
||||
Common pattern is to delete and recreate.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
url: NonEmptyString | None = Field(None, description="New webhook endpoint URL")
|
||||
eventTypes: List[NonEmptyString] | None = Field(
|
||||
None, description="New array of event types"
|
||||
)
|
||||
hmac: NonEmptyString | None = Field(None, description="New HMAC secret")
|
||||
basicAuth: NonEmptyString | None = Field(
|
||||
None, description="New basic auth credentials"
|
||||
)
|
||||
@@ -1,217 +0,0 @@
|
||||
"""
|
||||
Daily.co API Response Models
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.dailyco_api.webhooks import DailyTrack
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
# not documented in daily; we fill it according to observations
|
||||
RecordingStatus = Literal["in-progress", "finished"]
|
||||
|
||||
|
||||
class RoomResponse(BaseModel):
|
||||
"""
|
||||
Response from room creation or retrieval.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
||||
"""
|
||||
|
||||
id: NonEmptyString = Field(description="Unique room identifier (UUID)")
|
||||
name: NonEmptyString = Field(description="Room name used in URLs")
|
||||
api_created: bool = Field(description="Whether room was created via API")
|
||||
privacy: Literal["public", "private"] = Field(description="Room privacy setting")
|
||||
url: NonEmptyString = Field(description="Full room URL")
|
||||
created_at: NonEmptyString = Field(description="ISO 8601 creation timestamp")
|
||||
config: Dict[NonEmptyString, Any] = Field(
|
||||
default_factory=dict, description="Room configuration properties"
|
||||
)
|
||||
|
||||
|
||||
class RoomPresenceParticipant(BaseModel):
|
||||
"""
|
||||
Participant presence information in a room.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
|
||||
"""
|
||||
|
||||
room: NonEmptyString = Field(description="Room name")
|
||||
id: NonEmptyString = Field(description="Participant session ID")
|
||||
userId: NonEmptyString | None = Field(None, description="User ID if provided")
|
||||
userName: NonEmptyString | None = Field(None, description="User display name")
|
||||
joinTime: NonEmptyString = Field(description="ISO 8601 join timestamp")
|
||||
duration: int = Field(description="Duration in room (seconds)")
|
||||
|
||||
|
||||
class RoomPresenceResponse(BaseModel):
|
||||
"""
|
||||
Response from room presence endpoint.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
|
||||
"""
|
||||
|
||||
total_count: int = Field(
|
||||
description="Total number of participants currently in room"
|
||||
)
|
||||
data: List[RoomPresenceParticipant] = Field(
|
||||
default_factory=list, description="Array of participant presence data"
|
||||
)
|
||||
|
||||
|
||||
class MeetingParticipant(BaseModel):
|
||||
"""
|
||||
Historical participant data from a meeting.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
|
||||
"""
|
||||
|
||||
user_id: NonEmptyString | None = Field(None, description="User identifier")
|
||||
participant_id: NonEmptyString = Field(description="Participant session identifier")
|
||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
||||
join_time: int = Field(description="Join timestamp (Unix epoch seconds)")
|
||||
duration: int = Field(description="Duration in meeting (seconds)")
|
||||
|
||||
|
||||
class MeetingParticipantsResponse(BaseModel):
|
||||
"""
|
||||
Response from meeting participants endpoint.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
|
||||
"""
|
||||
|
||||
data: List[MeetingParticipant] = Field(
|
||||
default_factory=list, description="Array of participant data"
|
||||
)
|
||||
|
||||
|
||||
class MeetingResponse(BaseModel):
|
||||
"""
|
||||
Response from meeting information endpoint.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information
|
||||
"""
|
||||
|
||||
id: NonEmptyString = Field(description="Meeting session identifier (UUID)")
|
||||
room: NonEmptyString = Field(description="Room name where meeting occurred")
|
||||
start_time: int = Field(
|
||||
description="Meeting start Unix timestamp (~15s granularity)"
|
||||
)
|
||||
duration: int = Field(description="Total meeting duration in seconds")
|
||||
ongoing: bool = Field(description="Whether meeting is currently active")
|
||||
max_participants: int = Field(description="Peak concurrent participant count")
|
||||
participants: List[MeetingParticipant] = Field(
|
||||
default_factory=list, description="Array of participant session data"
|
||||
)
|
||||
|
||||
|
||||
class RecordingS3Info(BaseModel):
|
||||
"""
|
||||
S3 bucket information for a recording.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
||||
"""
|
||||
|
||||
bucket_name: NonEmptyString
|
||||
bucket_region: NonEmptyString
|
||||
endpoint: NonEmptyString | None = None
|
||||
|
||||
|
||||
class RecordingResponse(BaseModel):
|
||||
"""
|
||||
Response from recording retrieval endpoint (network layer).
|
||||
|
||||
Duration may be None for recordings still being processed by Daily.
|
||||
Use FinishedRecordingResponse for recordings ready for processing.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
||||
"""
|
||||
|
||||
id: NonEmptyString = Field(description="Recording identifier")
|
||||
room_name: NonEmptyString = Field(description="Room where recording occurred")
|
||||
start_ts: int = Field(description="Recording start timestamp (Unix epoch seconds)")
|
||||
status: RecordingStatus = Field(
|
||||
description="Recording status ('in-progress' or 'finished')"
|
||||
)
|
||||
max_participants: int | None = Field(
|
||||
None, description="Maximum participants during recording (may be missing)"
|
||||
)
|
||||
duration: int | None = Field(
|
||||
None, description="Recording duration in seconds (None if still processing)"
|
||||
)
|
||||
share_token: NonEmptyString | None = Field(
|
||||
None, description="Token for sharing recording"
|
||||
)
|
||||
s3: RecordingS3Info | None = Field(None, description="S3 bucket information")
|
||||
tracks: list[DailyTrack] = Field(
|
||||
default_factory=list,
|
||||
description="Track list for raw-tracks recordings (always array, never null)",
|
||||
)
|
||||
# this is not a mistake but a deliberate Daily.co naming decision
|
||||
mtgSessionId: NonEmptyString | None = Field(
|
||||
None, description="Meeting session identifier (may be missing)"
|
||||
)
|
||||
|
||||
def to_finished(self) -> "FinishedRecordingResponse | None":
|
||||
"""Convert to FinishedRecordingResponse if duration is available and status is finished."""
|
||||
if self.duration is None or self.status != "finished":
|
||||
return None
|
||||
return FinishedRecordingResponse(**self.model_dump())
|
||||
|
||||
|
||||
class FinishedRecordingResponse(RecordingResponse):
|
||||
"""
|
||||
Recording with confirmed duration - ready for processing.
|
||||
|
||||
This model guarantees duration is present and status is finished.
|
||||
"""
|
||||
|
||||
status: Literal["finished"] = Field(
|
||||
description="Recording status (always 'finished')"
|
||||
)
|
||||
duration: int = Field(description="Recording duration in seconds")
|
||||
|
||||
|
||||
class MeetingTokenResponse(BaseModel):
|
||||
"""
|
||||
Response from meeting token creation.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
||||
"""
|
||||
|
||||
token: NonEmptyString = Field(
|
||||
description="JWT meeting token for participant authentication"
|
||||
)
|
||||
|
||||
|
||||
class WebhookResponse(BaseModel):
|
||||
"""
|
||||
Response from webhook creation or retrieval.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
uuid: NonEmptyString = Field(description="Unique webhook identifier")
|
||||
url: NonEmptyString = Field(description="Webhook endpoint URL")
|
||||
hmac: NonEmptyString | None = Field(
|
||||
None, description="Base64-encoded HMAC secret for signature verification"
|
||||
)
|
||||
basicAuth: NonEmptyString | None = Field(
|
||||
None, description="Basic auth credentials if configured"
|
||||
)
|
||||
eventTypes: List[NonEmptyString] = Field(
|
||||
default_factory=list,
|
||||
description="Array of event types (e.g., ['recording.started', 'participant.joined'])",
|
||||
)
|
||||
state: Literal["ACTIVE", "FAILED"] = Field(
|
||||
description="Webhook state - FAILED after 3+ consecutive failures"
|
||||
)
|
||||
failedCount: int = Field(default=0, description="Number of consecutive failures")
|
||||
lastMomentPushed: NonEmptyString | None = Field(
|
||||
None, description="ISO 8601 timestamp of last successful push"
|
||||
)
|
||||
domainId: NonEmptyString = Field(description="Daily.co domain/account identifier")
|
||||
createdAt: NonEmptyString = Field(description="ISO 8601 creation timestamp")
|
||||
updatedAt: NonEmptyString = Field(description="ISO 8601 last update timestamp")
|
||||
@@ -1,228 +0,0 @@
|
||||
"""
|
||||
Daily.co Webhook Utilities
|
||||
|
||||
Utilities for verifying and parsing Daily.co webhook events.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
import base64
|
||||
import hmac
|
||||
from hashlib import sha256
|
||||
|
||||
import structlog
|
||||
|
||||
from .webhooks import (
|
||||
DailyWebhookEvent,
|
||||
ParticipantJoinedPayload,
|
||||
ParticipantLeftPayload,
|
||||
RecordingErrorPayload,
|
||||
RecordingReadyToDownloadPayload,
|
||||
RecordingStartedPayload,
|
||||
)
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
|
||||
def verify_webhook_signature(
|
||||
body: bytes,
|
||||
signature: str,
|
||||
timestamp: str,
|
||||
webhook_secret: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Verify Daily.co webhook signature using HMAC-SHA256.
|
||||
|
||||
Daily.co signature verification:
|
||||
1. Base64-decode the webhook secret
|
||||
2. Create signed content: timestamp + '.' + body
|
||||
3. Compute HMAC-SHA256(secret, signed_content)
|
||||
4. Base64-encode the result
|
||||
5. Compare with provided signature using constant-time comparison
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Args:
|
||||
body: Raw request body bytes
|
||||
signature: X-Webhook-Signature header value
|
||||
timestamp: X-Webhook-Timestamp header value
|
||||
webhook_secret: Base64-encoded HMAC secret
|
||||
|
||||
Returns:
|
||||
True if signature is valid, False otherwise
|
||||
|
||||
Example:
|
||||
>>> body = b'{"version":"1.0.0","type":"participant.joined",...}'
|
||||
>>> signature = "abc123..."
|
||||
>>> timestamp = "1234567890"
|
||||
>>> secret = "your-base64-secret"
|
||||
>>> is_valid = verify_webhook_signature(body, signature, timestamp, secret)
|
||||
"""
|
||||
if not signature or not timestamp or not webhook_secret:
|
||||
logger.warning(
|
||||
"Missing required data for webhook verification",
|
||||
has_signature=bool(signature),
|
||||
has_timestamp=bool(timestamp),
|
||||
has_secret=bool(webhook_secret),
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
secret_bytes = base64.b64decode(webhook_secret)
|
||||
signed_content = timestamp.encode() + b"." + body
|
||||
expected = hmac.new(secret_bytes, signed_content, sha256).digest()
|
||||
expected_b64 = base64.b64encode(expected).decode()
|
||||
|
||||
# Constant-time comparison to prevent timing attacks
|
||||
return hmac.compare_digest(expected_b64, signature)
|
||||
|
||||
except (base64.binascii.Error, ValueError, TypeError, UnicodeDecodeError) as e:
|
||||
logger.error(
|
||||
"Webhook signature verification failed",
|
||||
error=str(e),
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def extract_room_name(event: DailyWebhookEvent) -> str | None:
|
||||
"""
|
||||
Extract room name from Daily.co webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Parsed webhook event
|
||||
|
||||
Returns:
|
||||
Room name if present and is a string, None otherwise
|
||||
|
||||
Example:
|
||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
||||
>>> room_name = extract_room_name(event)
|
||||
"""
|
||||
room = event.payload.get("room_name")
|
||||
# Ensure we return a string, not any falsy value that might be in payload
|
||||
return room if isinstance(room, str) else None
|
||||
|
||||
|
||||
def parse_participant_joined(event: DailyWebhookEvent) -> ParticipantJoinedPayload:
|
||||
"""
|
||||
Parse participant.joined webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "participant.joined"
|
||||
|
||||
Returns:
|
||||
Parsed participant joined payload
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
"""
|
||||
return ParticipantJoinedPayload(**event.payload)
|
||||
|
||||
|
||||
def parse_participant_left(event: DailyWebhookEvent) -> ParticipantLeftPayload:
|
||||
"""
|
||||
Parse participant.left webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "participant.left"
|
||||
|
||||
Returns:
|
||||
Parsed participant left payload
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
"""
|
||||
return ParticipantLeftPayload(**event.payload)
|
||||
|
||||
|
||||
def parse_recording_started(event: DailyWebhookEvent) -> RecordingStartedPayload:
|
||||
"""
|
||||
Parse recording.started webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "recording.started"
|
||||
|
||||
Returns:
|
||||
Parsed recording started payload
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
"""
|
||||
return RecordingStartedPayload(**event.payload)
|
||||
|
||||
|
||||
def parse_recording_ready(
|
||||
event: DailyWebhookEvent,
|
||||
) -> RecordingReadyToDownloadPayload:
|
||||
"""
|
||||
Parse recording.ready-to-download webhook event payload.
|
||||
|
||||
This event is sent when raw-tracks recordings are complete and uploaded to S3.
|
||||
The payload includes a 'tracks' array with individual audio/video files.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "recording.ready-to-download"
|
||||
|
||||
Returns:
|
||||
Parsed recording ready payload with tracks array
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
|
||||
Example:
|
||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
||||
>>> if event.type == "recording.ready-to-download":
|
||||
... payload = parse_recording_ready(event)
|
||||
... audio_tracks = [t for t in payload.tracks if t.type == "audio"]
|
||||
"""
|
||||
return RecordingReadyToDownloadPayload(**event.payload)
|
||||
|
||||
|
||||
def parse_recording_error(event: DailyWebhookEvent) -> RecordingErrorPayload:
|
||||
"""
|
||||
Parse recording.error webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "recording.error"
|
||||
|
||||
Returns:
|
||||
Parsed recording error payload
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
"""
|
||||
return RecordingErrorPayload(**event.payload)
|
||||
|
||||
|
||||
WEBHOOK_PARSERS = {
|
||||
"participant.joined": parse_participant_joined,
|
||||
"participant.left": parse_participant_left,
|
||||
"recording.started": parse_recording_started,
|
||||
"recording.ready-to-download": parse_recording_ready,
|
||||
"recording.error": parse_recording_error,
|
||||
}
|
||||
|
||||
|
||||
def parse_webhook_payload(event: DailyWebhookEvent):
|
||||
"""
|
||||
Parse webhook event payload based on event type.
|
||||
|
||||
Args:
|
||||
event: Webhook event
|
||||
|
||||
Returns:
|
||||
Typed payload model based on event type, or raw dict if unknown
|
||||
|
||||
Example:
|
||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
||||
>>> payload = parse_webhook_payload(event)
|
||||
>>> if isinstance(payload, ParticipantJoinedPayload):
|
||||
... print(f"User {payload.user_name} joined")
|
||||
"""
|
||||
parser = WEBHOOK_PARSERS.get(event.type)
|
||||
if parser:
|
||||
return parser(event)
|
||||
else:
|
||||
logger.warning("Unknown webhook event type", event_type=event.type)
|
||||
return event.payload
|
||||
@@ -1,271 +0,0 @@
|
||||
"""
|
||||
Daily.co Webhook Event Models
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
from typing import Annotated, Any, Dict, Literal, Union
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
def normalize_timestamp_to_int(v):
|
||||
"""
|
||||
Normalize float timestamps to int by truncating decimal part.
|
||||
|
||||
Daily.co sometimes sends timestamps as floats (e.g., 1708972279.96).
|
||||
Pydantic expects int for fields typed as `int`.
|
||||
"""
|
||||
if v is None:
|
||||
return v
|
||||
if isinstance(v, float):
|
||||
return int(v)
|
||||
return v
|
||||
|
||||
|
||||
WebhookEventType = Literal[
|
||||
"participant.joined",
|
||||
"participant.left",
|
||||
"recording.started",
|
||||
"recording.ready-to-download",
|
||||
"recording.error",
|
||||
]
|
||||
|
||||
|
||||
class DailyTrack(BaseModel):
|
||||
"""
|
||||
Individual audio or video track from a multitrack recording.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
||||
"""
|
||||
|
||||
type: Literal["audio", "video"]
|
||||
s3Key: NonEmptyString = Field(description="S3 object key for the track file")
|
||||
size: int = Field(description="File size in bytes")
|
||||
|
||||
|
||||
class DailyWebhookEvent(BaseModel):
|
||||
"""
|
||||
Base structure for all Daily.co webhook events.
|
||||
All events share five common fields documented below.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
version: NonEmptyString = Field(
|
||||
description="Represents the version of the event. This uses semantic versioning to inform a consumer if the payload has introduced any breaking changes"
|
||||
)
|
||||
type: WebhookEventType = Field(
|
||||
description="Represents the type of the event described in the payload"
|
||||
)
|
||||
id: NonEmptyString = Field(
|
||||
description="An identifier representing this specific event"
|
||||
)
|
||||
payload: Dict[NonEmptyString, Any] = Field(
|
||||
description="An object representing the event, whose fields are described in the corresponding payload class"
|
||||
)
|
||||
event_ts: int = Field(
|
||||
description="Documenting when the webhook itself was sent. This timestamp is different than the time of the event the webhook describes. For example, a recording.started event will contain a start_ts timestamp of when the actual recording started, and a slightly later event_ts timestamp indicating when the webhook event was sent"
|
||||
)
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class ParticipantJoinedPayload(BaseModel):
|
||||
"""
|
||||
Payload for participant.joined webhook event.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-joined
|
||||
"""
|
||||
|
||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
||||
session_id: NonEmptyString = Field(description="Daily.co session identifier")
|
||||
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
|
||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
||||
joined_at: int = Field(description="Join timestamp in Unix epoch seconds")
|
||||
|
||||
_normalize_joined_at = field_validator("joined_at", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class ParticipantLeftPayload(BaseModel):
|
||||
"""
|
||||
Payload for participant.left webhook event.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-left
|
||||
"""
|
||||
|
||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
||||
session_id: NonEmptyString = Field(description="Daily.co session identifier")
|
||||
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
|
||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
||||
joined_at: int = Field(description="Join timestamp in Unix epoch seconds")
|
||||
duration: int | None = Field(
|
||||
None, description="Duration of participation in seconds"
|
||||
)
|
||||
|
||||
_normalize_joined_at = field_validator("joined_at", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingStartedPayload(BaseModel):
|
||||
"""
|
||||
Payload for recording.started webhook event.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-started
|
||||
"""
|
||||
|
||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
||||
recording_id: NonEmptyString = Field(description="Recording identifier")
|
||||
start_ts: int | None = Field(None, description="Recording start timestamp")
|
||||
|
||||
_normalize_start_ts = field_validator("start_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingReadyToDownloadPayload(BaseModel):
|
||||
"""
|
||||
Payload for recording.ready-to-download webhook event.
|
||||
This is sent when raw-tracks recordings are complete and uploaded to S3.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-ready-to-download
|
||||
"""
|
||||
|
||||
type: Literal["cloud", "raw-tracks"] = Field(
|
||||
description="The type of recording that was generated"
|
||||
)
|
||||
recording_id: NonEmptyString = Field(
|
||||
description="An ID identifying the recording that was generated"
|
||||
)
|
||||
room_name: NonEmptyString = Field(
|
||||
description="The name of the room where the recording was made"
|
||||
)
|
||||
start_ts: int = Field(
|
||||
description="The Unix epoch time in seconds representing when the recording started"
|
||||
)
|
||||
status: Literal["finished"] = Field(
|
||||
description="The status of the given recording (always 'finished' in ready-to-download webhook, see RecordingStatus in responses.py for full API statuses)"
|
||||
)
|
||||
max_participants: int = Field(
|
||||
description="The number of participants on the call that were recorded"
|
||||
)
|
||||
duration: int = Field(description="The duration in seconds of the call")
|
||||
s3_key: NonEmptyString = Field(
|
||||
description="The location of the recording in the provided S3 bucket"
|
||||
)
|
||||
share_token: NonEmptyString | None = Field(
|
||||
None, description="undocumented documented secret field"
|
||||
)
|
||||
tracks: list[DailyTrack] | None = Field(
|
||||
None,
|
||||
description="If the recording is a raw-tracks recording, a tracks field will be provided. If role permissions have been removed, the tracks field may be null",
|
||||
)
|
||||
|
||||
_normalize_start_ts = field_validator("start_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingErrorPayload(BaseModel):
|
||||
"""
|
||||
Payload for recording.error webhook event.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-error
|
||||
"""
|
||||
|
||||
action: Literal["clourd-recording-err", "cloud-recording-error"] = Field(
|
||||
description="A string describing the event that was emitted (both variants are documented)"
|
||||
)
|
||||
error_msg: NonEmptyString = Field(description="The error message returned")
|
||||
instance_id: NonEmptyString = Field(
|
||||
description="The recording instance ID that was passed into the start recording command"
|
||||
)
|
||||
room_name: NonEmptyString = Field(
|
||||
description="The name of the room where the recording was made"
|
||||
)
|
||||
timestamp: int = Field(
|
||||
description="The Unix epoch time in seconds representing when the error was emitted"
|
||||
)
|
||||
|
||||
_normalize_timestamp = field_validator("timestamp", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class ParticipantJoinedEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["participant.joined"]
|
||||
id: NonEmptyString
|
||||
payload: ParticipantJoinedPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class ParticipantLeftEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["participant.left"]
|
||||
id: NonEmptyString
|
||||
payload: ParticipantLeftPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingStartedEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["recording.started"]
|
||||
id: NonEmptyString
|
||||
payload: RecordingStartedPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingReadyEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["recording.ready-to-download"]
|
||||
id: NonEmptyString
|
||||
payload: RecordingReadyToDownloadPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingErrorEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["recording.error"]
|
||||
id: NonEmptyString
|
||||
payload: RecordingErrorPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
DailyWebhookEventUnion = Annotated[
|
||||
Union[
|
||||
ParticipantJoinedEvent,
|
||||
ParticipantLeftEvent,
|
||||
RecordingStartedEvent,
|
||||
RecordingReadyEvent,
|
||||
RecordingErrorEvent,
|
||||
],
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
@@ -1,51 +1,82 @@
|
||||
import contextvars
|
||||
from typing import Optional
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import AsyncGenerator
|
||||
|
||||
import databases
|
||||
import sqlalchemy
|
||||
from sqlalchemy.ext.asyncio import (
|
||||
AsyncEngine,
|
||||
AsyncSession,
|
||||
async_sessionmaker,
|
||||
create_async_engine,
|
||||
)
|
||||
|
||||
from reflector.db.base import Base as Base
|
||||
from reflector.db.base import metadata as metadata
|
||||
from reflector.events import subscribers_shutdown, subscribers_startup
|
||||
from reflector.settings import settings
|
||||
|
||||
metadata = sqlalchemy.MetaData()
|
||||
|
||||
_database_context: contextvars.ContextVar[Optional[databases.Database]] = (
|
||||
contextvars.ContextVar("database", default=None)
|
||||
)
|
||||
_engine: AsyncEngine | None = None
|
||||
_session_factory: async_sessionmaker[AsyncSession] | None = None
|
||||
|
||||
|
||||
def get_database() -> databases.Database:
|
||||
"""Get database instance for current asyncio context"""
|
||||
db = _database_context.get()
|
||||
if db is None:
|
||||
db = databases.Database(settings.DATABASE_URL)
|
||||
_database_context.set(db)
|
||||
return db
|
||||
def get_engine() -> AsyncEngine:
|
||||
global _engine
|
||||
if _engine is None:
|
||||
_engine = create_async_engine(
|
||||
settings.DATABASE_URL,
|
||||
echo=False,
|
||||
pool_pre_ping=True,
|
||||
)
|
||||
return _engine
|
||||
|
||||
|
||||
def get_session_factory() -> async_sessionmaker[AsyncSession]:
|
||||
global _session_factory
|
||||
if _session_factory is None:
|
||||
_session_factory = async_sessionmaker(
|
||||
get_engine(),
|
||||
class_=AsyncSession,
|
||||
expire_on_commit=False,
|
||||
)
|
||||
return _session_factory
|
||||
|
||||
|
||||
async def _get_session() -> AsyncGenerator[AsyncSession, None]:
|
||||
# necessary implementation to ease mocking on pytest
|
||||
async with get_session_factory()() as session:
|
||||
yield session
|
||||
|
||||
|
||||
async def get_session() -> AsyncGenerator[AsyncSession, None]:
|
||||
"""
|
||||
Get a database session, fastapi dependency injection style
|
||||
"""
|
||||
async for session in _get_session():
|
||||
yield session
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def get_session_context():
|
||||
"""
|
||||
Get a database session as an async context manager
|
||||
"""
|
||||
async for session in _get_session():
|
||||
yield session
|
||||
|
||||
|
||||
# import models
|
||||
import reflector.db.calendar_events # noqa
|
||||
import reflector.db.daily_participant_sessions # noqa
|
||||
import reflector.db.meetings # noqa
|
||||
import reflector.db.recordings # noqa
|
||||
import reflector.db.rooms # noqa
|
||||
import reflector.db.transcripts # noqa
|
||||
import reflector.db.user_api_keys # noqa
|
||||
import reflector.db.users # noqa
|
||||
|
||||
kwargs = {}
|
||||
if "postgres" not in settings.DATABASE_URL:
|
||||
raise Exception("Only postgres database is supported in reflector")
|
||||
engine = sqlalchemy.create_engine(settings.DATABASE_URL, **kwargs)
|
||||
|
||||
|
||||
@subscribers_startup.append
|
||||
async def database_connect(_):
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
get_engine()
|
||||
|
||||
|
||||
@subscribers_shutdown.append
|
||||
async def database_disconnect(_):
|
||||
database = get_database()
|
||||
await database.disconnect()
|
||||
global _engine
|
||||
if _engine:
|
||||
await _engine.dispose()
|
||||
_engine = None
|
||||
|
||||
237
server/reflector/db/base.py
Normal file
237
server/reflector/db/base.py
Normal file
@@ -0,0 +1,237 @@
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects.postgresql import JSONB, TSVECTOR
|
||||
from sqlalchemy.ext.asyncio import AsyncAttrs
|
||||
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
|
||||
|
||||
|
||||
class Base(AsyncAttrs, DeclarativeBase):
|
||||
pass
|
||||
|
||||
|
||||
class TranscriptModel(Base):
|
||||
__tablename__ = "transcript"
|
||||
|
||||
id: Mapped[str] = mapped_column(sa.String, primary_key=True)
|
||||
name: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
status: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
locked: Mapped[Optional[bool]] = mapped_column(sa.Boolean)
|
||||
duration: Mapped[Optional[float]] = mapped_column(sa.Float)
|
||||
created_at: Mapped[Optional[datetime]] = mapped_column(sa.DateTime(timezone=True))
|
||||
title: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
short_summary: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
long_summary: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
topics: Mapped[Optional[list]] = mapped_column(sa.JSON)
|
||||
events: Mapped[Optional[list]] = mapped_column(sa.JSON)
|
||||
participants: Mapped[Optional[list]] = mapped_column(sa.JSON)
|
||||
source_language: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
target_language: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
reviewed: Mapped[bool] = mapped_column(
|
||||
sa.Boolean, nullable=False, server_default=sa.text("false")
|
||||
)
|
||||
audio_location: Mapped[str] = mapped_column(
|
||||
sa.String, nullable=False, server_default="local"
|
||||
)
|
||||
user_id: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
share_mode: Mapped[str] = mapped_column(
|
||||
sa.String, nullable=False, server_default="private"
|
||||
)
|
||||
meeting_id: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
recording_id: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
zulip_message_id: Mapped[Optional[int]] = mapped_column(sa.Integer)
|
||||
source_kind: Mapped[str] = mapped_column(
|
||||
sa.String, nullable=False
|
||||
) # Enum will be handled separately
|
||||
audio_deleted: Mapped[Optional[bool]] = mapped_column(sa.Boolean)
|
||||
room_id: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
webvtt: Mapped[Optional[str]] = mapped_column(sa.Text)
|
||||
|
||||
__table_args__ = (
|
||||
sa.Index("idx_transcript_recording_id", "recording_id"),
|
||||
sa.Index("idx_transcript_user_id", "user_id"),
|
||||
sa.Index("idx_transcript_created_at", "created_at"),
|
||||
sa.Index("idx_transcript_user_id_recording_id", "user_id", "recording_id"),
|
||||
sa.Index("idx_transcript_room_id", "room_id"),
|
||||
sa.Index("idx_transcript_source_kind", "source_kind"),
|
||||
sa.Index("idx_transcript_room_id_created_at", "room_id", "created_at"),
|
||||
)
|
||||
|
||||
|
||||
TranscriptModel.search_vector_en = sa.Column(
|
||||
"search_vector_en",
|
||||
TSVECTOR,
|
||||
sa.Computed(
|
||||
"setweight(to_tsvector('english', coalesce(title, '')), 'A') || "
|
||||
"setweight(to_tsvector('english', coalesce(long_summary, '')), 'B') || "
|
||||
"setweight(to_tsvector('english', coalesce(webvtt, '')), 'C')",
|
||||
persisted=True,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class RoomModel(Base):
|
||||
__tablename__ = "room"
|
||||
|
||||
id: Mapped[str] = mapped_column(sa.String, primary_key=True)
|
||||
name: Mapped[str] = mapped_column(sa.String, nullable=False, unique=True)
|
||||
user_id: Mapped[str] = mapped_column(sa.String, nullable=False)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
sa.DateTime(timezone=True), nullable=False
|
||||
)
|
||||
zulip_auto_post: Mapped[bool] = mapped_column(
|
||||
sa.Boolean, nullable=False, server_default=sa.text("false")
|
||||
)
|
||||
zulip_stream: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
zulip_topic: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
is_locked: Mapped[bool] = mapped_column(
|
||||
sa.Boolean, nullable=False, server_default=sa.text("false")
|
||||
)
|
||||
room_mode: Mapped[str] = mapped_column(
|
||||
sa.String, nullable=False, server_default="normal"
|
||||
)
|
||||
recording_type: Mapped[str] = mapped_column(
|
||||
sa.String, nullable=False, server_default="cloud"
|
||||
)
|
||||
recording_trigger: Mapped[str] = mapped_column(
|
||||
sa.String, nullable=False, server_default="automatic-2nd-participant"
|
||||
)
|
||||
is_shared: Mapped[bool] = mapped_column(
|
||||
sa.Boolean, nullable=False, server_default=sa.text("false")
|
||||
)
|
||||
webhook_url: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
webhook_secret: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
ics_url: Mapped[Optional[str]] = mapped_column(sa.Text)
|
||||
ics_fetch_interval: Mapped[Optional[int]] = mapped_column(
|
||||
sa.Integer, server_default=sa.text("300")
|
||||
)
|
||||
ics_enabled: Mapped[bool] = mapped_column(
|
||||
sa.Boolean, nullable=False, server_default=sa.text("false")
|
||||
)
|
||||
ics_last_sync: Mapped[Optional[datetime]] = mapped_column(
|
||||
sa.DateTime(timezone=True)
|
||||
)
|
||||
ics_last_etag: Mapped[Optional[str]] = mapped_column(sa.Text)
|
||||
|
||||
__table_args__ = (
|
||||
sa.Index("idx_room_is_shared", "is_shared"),
|
||||
sa.Index("idx_room_ics_enabled", "ics_enabled"),
|
||||
)
|
||||
|
||||
|
||||
class MeetingModel(Base):
|
||||
__tablename__ = "meeting"
|
||||
|
||||
id: Mapped[str] = mapped_column(sa.String, primary_key=True)
|
||||
room_name: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
room_url: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
host_room_url: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
start_date: Mapped[Optional[datetime]] = mapped_column(sa.DateTime(timezone=True))
|
||||
end_date: Mapped[Optional[datetime]] = mapped_column(sa.DateTime(timezone=True))
|
||||
room_id: Mapped[Optional[str]] = mapped_column(
|
||||
sa.String, sa.ForeignKey("room.id", ondelete="CASCADE")
|
||||
)
|
||||
is_locked: Mapped[bool] = mapped_column(
|
||||
sa.Boolean, nullable=False, server_default=sa.text("false")
|
||||
)
|
||||
room_mode: Mapped[str] = mapped_column(
|
||||
sa.String, nullable=False, server_default="normal"
|
||||
)
|
||||
recording_type: Mapped[str] = mapped_column(
|
||||
sa.String, nullable=False, server_default="cloud"
|
||||
)
|
||||
recording_trigger: Mapped[str] = mapped_column(
|
||||
sa.String, nullable=False, server_default="automatic-2nd-participant"
|
||||
)
|
||||
num_clients: Mapped[int] = mapped_column(
|
||||
sa.Integer, nullable=False, server_default=sa.text("0")
|
||||
)
|
||||
is_active: Mapped[bool] = mapped_column(
|
||||
sa.Boolean, nullable=False, server_default=sa.text("true")
|
||||
)
|
||||
calendar_event_id: Mapped[Optional[str]] = mapped_column(
|
||||
sa.String,
|
||||
sa.ForeignKey(
|
||||
"calendar_event.id",
|
||||
ondelete="SET NULL",
|
||||
name="fk_meeting_calendar_event_id",
|
||||
),
|
||||
)
|
||||
calendar_metadata: Mapped[Optional[dict]] = mapped_column(JSONB)
|
||||
|
||||
__table_args__ = (
|
||||
sa.Index("idx_meeting_room_id", "room_id"),
|
||||
sa.Index("idx_meeting_calendar_event", "calendar_event_id"),
|
||||
)
|
||||
|
||||
|
||||
class MeetingConsentModel(Base):
|
||||
__tablename__ = "meeting_consent"
|
||||
|
||||
id: Mapped[str] = mapped_column(sa.String, primary_key=True)
|
||||
meeting_id: Mapped[str] = mapped_column(
|
||||
sa.String, sa.ForeignKey("meeting.id", ondelete="CASCADE"), nullable=False
|
||||
)
|
||||
user_id: Mapped[Optional[str]] = mapped_column(sa.String)
|
||||
consent_given: Mapped[bool] = mapped_column(sa.Boolean, nullable=False)
|
||||
consent_timestamp: Mapped[datetime] = mapped_column(
|
||||
sa.DateTime(timezone=True), nullable=False
|
||||
)
|
||||
|
||||
|
||||
class RecordingModel(Base):
|
||||
__tablename__ = "recording"
|
||||
|
||||
id: Mapped[str] = mapped_column(sa.String, primary_key=True)
|
||||
meeting_id: Mapped[str] = mapped_column(
|
||||
sa.String, sa.ForeignKey("meeting.id", ondelete="CASCADE"), nullable=False
|
||||
)
|
||||
url: Mapped[str] = mapped_column(sa.String, nullable=False)
|
||||
object_key: Mapped[str] = mapped_column(sa.String, nullable=False)
|
||||
duration: Mapped[Optional[float]] = mapped_column(sa.Float)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
sa.DateTime(timezone=True), nullable=False
|
||||
)
|
||||
|
||||
__table_args__ = (sa.Index("idx_recording_meeting_id", "meeting_id"),)
|
||||
|
||||
|
||||
class CalendarEventModel(Base):
|
||||
__tablename__ = "calendar_event"
|
||||
|
||||
id: Mapped[str] = mapped_column(sa.String, primary_key=True)
|
||||
room_id: Mapped[str] = mapped_column(
|
||||
sa.String, sa.ForeignKey("room.id", ondelete="CASCADE"), nullable=False
|
||||
)
|
||||
ics_uid: Mapped[str] = mapped_column(sa.Text, nullable=False)
|
||||
title: Mapped[Optional[str]] = mapped_column(sa.Text)
|
||||
description: Mapped[Optional[str]] = mapped_column(sa.Text)
|
||||
start_time: Mapped[datetime] = mapped_column(
|
||||
sa.DateTime(timezone=True), nullable=False
|
||||
)
|
||||
end_time: Mapped[datetime] = mapped_column(
|
||||
sa.DateTime(timezone=True), nullable=False
|
||||
)
|
||||
attendees: Mapped[Optional[dict]] = mapped_column(JSONB)
|
||||
location: Mapped[Optional[str]] = mapped_column(sa.Text)
|
||||
ics_raw_data: Mapped[Optional[str]] = mapped_column(sa.Text)
|
||||
last_synced: Mapped[datetime] = mapped_column(
|
||||
sa.DateTime(timezone=True), nullable=False
|
||||
)
|
||||
is_deleted: Mapped[bool] = mapped_column(
|
||||
sa.Boolean, nullable=False, server_default=sa.text("false")
|
||||
)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
sa.DateTime(timezone=True), nullable=False
|
||||
)
|
||||
updated_at: Mapped[datetime] = mapped_column(
|
||||
sa.DateTime(timezone=True), nullable=False
|
||||
)
|
||||
|
||||
__table_args__ = (
|
||||
sa.Index("idx_calendar_event_room_start", "room_id", "start_time"),
|
||||
)
|
||||
|
||||
|
||||
metadata = Base.metadata
|
||||
@@ -2,45 +2,17 @@ from datetime import datetime, timedelta, timezone
|
||||
from typing import Any
|
||||
|
||||
import sqlalchemy as sa
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from sqlalchemy import delete, select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.db.base import CalendarEventModel
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
calendar_events = sa.Table(
|
||||
"calendar_event",
|
||||
metadata,
|
||||
sa.Column("id", sa.String, primary_key=True),
|
||||
sa.Column(
|
||||
"room_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("room.id", ondelete="CASCADE", name="fk_calendar_event_room_id"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("ics_uid", sa.Text, nullable=False),
|
||||
sa.Column("title", sa.Text),
|
||||
sa.Column("description", sa.Text),
|
||||
sa.Column("start_time", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("end_time", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("attendees", JSONB),
|
||||
sa.Column("location", sa.Text),
|
||||
sa.Column("ics_raw_data", sa.Text),
|
||||
sa.Column("last_synced", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("is_deleted", sa.Boolean, nullable=False, server_default=sa.false()),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.UniqueConstraint("room_id", "ics_uid", name="uq_room_calendar_event"),
|
||||
sa.Index("idx_calendar_event_room_start", "room_id", "start_time"),
|
||||
sa.Index(
|
||||
"idx_calendar_event_deleted",
|
||||
"is_deleted",
|
||||
postgresql_where=sa.text("NOT is_deleted"),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class CalendarEvent(BaseModel):
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
room_id: str
|
||||
ics_uid: str
|
||||
@@ -58,129 +30,159 @@ class CalendarEvent(BaseModel):
|
||||
|
||||
|
||||
class CalendarEventController:
|
||||
async def get_by_room(
|
||||
async def get_upcoming_events(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
room_id: str,
|
||||
include_deleted: bool = False,
|
||||
start_after: datetime | None = None,
|
||||
end_before: datetime | None = None,
|
||||
current_time: datetime,
|
||||
buffer_minutes: int = 15,
|
||||
) -> list[CalendarEvent]:
|
||||
query = calendar_events.select().where(calendar_events.c.room_id == room_id)
|
||||
|
||||
if not include_deleted:
|
||||
query = query.where(calendar_events.c.is_deleted == False)
|
||||
|
||||
if start_after:
|
||||
query = query.where(calendar_events.c.start_time >= start_after)
|
||||
|
||||
if end_before:
|
||||
query = query.where(calendar_events.c.end_time <= end_before)
|
||||
|
||||
query = query.order_by(calendar_events.c.start_time.asc())
|
||||
|
||||
results = await get_database().fetch_all(query)
|
||||
return [CalendarEvent(**result) for result in results]
|
||||
|
||||
async def get_upcoming(
|
||||
self, room_id: str, minutes_ahead: int = 120
|
||||
) -> list[CalendarEvent]:
|
||||
"""Get upcoming events for a room within the specified minutes, including currently happening events."""
|
||||
now = datetime.now(timezone.utc)
|
||||
future_time = now + timedelta(minutes=minutes_ahead)
|
||||
buffer_time = current_time + timedelta(minutes=buffer_minutes)
|
||||
|
||||
query = (
|
||||
calendar_events.select()
|
||||
select(CalendarEventModel)
|
||||
.where(
|
||||
sa.and_(
|
||||
calendar_events.c.room_id == room_id,
|
||||
calendar_events.c.is_deleted == False,
|
||||
calendar_events.c.start_time <= future_time,
|
||||
calendar_events.c.end_time >= now,
|
||||
CalendarEventModel.room_id == room_id,
|
||||
CalendarEventModel.start_time <= buffer_time,
|
||||
CalendarEventModel.end_time > current_time,
|
||||
)
|
||||
)
|
||||
.order_by(calendar_events.c.start_time.asc())
|
||||
.order_by(CalendarEventModel.start_time)
|
||||
)
|
||||
|
||||
results = await get_database().fetch_all(query)
|
||||
return [CalendarEvent(**result) for result in results]
|
||||
result = await session.execute(query)
|
||||
return [CalendarEvent.model_validate(row) for row in result.scalars().all()]
|
||||
|
||||
async def get_by_id(self, event_id: str) -> CalendarEvent | None:
|
||||
query = calendar_events.select().where(calendar_events.c.id == event_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
return CalendarEvent(**result) if result else None
|
||||
async def get_by_id(
|
||||
self, session: AsyncSession, event_id: str
|
||||
) -> CalendarEvent | None:
|
||||
query = select(CalendarEventModel).where(CalendarEventModel.id == event_id)
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return CalendarEvent.model_validate(row)
|
||||
|
||||
async def get_by_ics_uid(self, room_id: str, ics_uid: str) -> CalendarEvent | None:
|
||||
query = calendar_events.select().where(
|
||||
async def get_by_ics_uid(
|
||||
self, session: AsyncSession, room_id: str, ics_uid: str
|
||||
) -> CalendarEvent | None:
|
||||
query = select(CalendarEventModel).where(
|
||||
sa.and_(
|
||||
calendar_events.c.room_id == room_id,
|
||||
calendar_events.c.ics_uid == ics_uid,
|
||||
CalendarEventModel.room_id == room_id,
|
||||
CalendarEventModel.ics_uid == ics_uid,
|
||||
)
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
return CalendarEvent(**result) if result else None
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return CalendarEvent.model_validate(row)
|
||||
|
||||
async def upsert(self, event: CalendarEvent) -> CalendarEvent:
|
||||
existing = await self.get_by_ics_uid(event.room_id, event.ics_uid)
|
||||
async def upsert(
|
||||
self, session: AsyncSession, event: CalendarEvent
|
||||
) -> CalendarEvent:
|
||||
existing = await self.get_by_ics_uid(session, event.room_id, event.ics_uid)
|
||||
|
||||
if existing:
|
||||
event.id = existing.id
|
||||
event.created_at = existing.created_at
|
||||
event.updated_at = datetime.now(timezone.utc)
|
||||
|
||||
query = (
|
||||
calendar_events.update()
|
||||
.where(calendar_events.c.id == existing.id)
|
||||
.values(**event.model_dump())
|
||||
update(CalendarEventModel)
|
||||
.where(CalendarEventModel.id == existing.id)
|
||||
.values(**event.model_dump(exclude={"id"}))
|
||||
)
|
||||
await session.execute(query)
|
||||
await session.commit()
|
||||
return event
|
||||
else:
|
||||
query = calendar_events.insert().values(**event.model_dump())
|
||||
new_event = CalendarEventModel(**event.model_dump())
|
||||
session.add(new_event)
|
||||
await session.commit()
|
||||
return event
|
||||
|
||||
await get_database().execute(query)
|
||||
return event
|
||||
|
||||
async def soft_delete_missing(
|
||||
self, room_id: str, current_ics_uids: list[str]
|
||||
async def delete_old_events(
|
||||
self, session: AsyncSession, room_id: str, cutoff_date: datetime
|
||||
) -> int:
|
||||
"""Soft delete future events that are no longer in the calendar."""
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
select_query = calendar_events.select().where(
|
||||
query = delete(CalendarEventModel).where(
|
||||
sa.and_(
|
||||
calendar_events.c.room_id == room_id,
|
||||
calendar_events.c.start_time > now,
|
||||
calendar_events.c.is_deleted == False,
|
||||
calendar_events.c.ics_uid.notin_(current_ics_uids)
|
||||
if current_ics_uids
|
||||
else True,
|
||||
CalendarEventModel.room_id == room_id,
|
||||
CalendarEventModel.end_time < cutoff_date,
|
||||
)
|
||||
)
|
||||
result = await session.execute(query)
|
||||
await session.commit()
|
||||
return result.rowcount
|
||||
|
||||
to_delete = await get_database().fetch_all(select_query)
|
||||
delete_count = len(to_delete)
|
||||
|
||||
if delete_count > 0:
|
||||
update_query = (
|
||||
calendar_events.update()
|
||||
.where(
|
||||
sa.and_(
|
||||
calendar_events.c.room_id == room_id,
|
||||
calendar_events.c.start_time > now,
|
||||
calendar_events.c.is_deleted == False,
|
||||
calendar_events.c.ics_uid.notin_(current_ics_uids)
|
||||
if current_ics_uids
|
||||
else True,
|
||||
)
|
||||
async def delete_events_not_in_list(
|
||||
self, session: AsyncSession, room_id: str, keep_ics_uids: list[str]
|
||||
) -> int:
|
||||
if not keep_ics_uids:
|
||||
query = delete(CalendarEventModel).where(
|
||||
CalendarEventModel.room_id == room_id
|
||||
)
|
||||
else:
|
||||
query = delete(CalendarEventModel).where(
|
||||
sa.and_(
|
||||
CalendarEventModel.room_id == room_id,
|
||||
CalendarEventModel.ics_uid.notin_(keep_ics_uids),
|
||||
)
|
||||
.values(is_deleted=True, updated_at=now)
|
||||
)
|
||||
|
||||
await get_database().execute(update_query)
|
||||
result = await session.execute(query)
|
||||
await session.commit()
|
||||
return result.rowcount
|
||||
|
||||
return delete_count
|
||||
async def get_by_room(
|
||||
self, session: AsyncSession, room_id: str, include_deleted: bool = True
|
||||
) -> list[CalendarEvent]:
|
||||
query = select(CalendarEventModel).where(CalendarEventModel.room_id == room_id)
|
||||
if not include_deleted:
|
||||
query = query.where(CalendarEventModel.is_deleted == False)
|
||||
result = await session.execute(query)
|
||||
return [CalendarEvent.model_validate(row) for row in result.scalars().all()]
|
||||
|
||||
async def delete_by_room(self, room_id: str) -> int:
|
||||
query = calendar_events.delete().where(calendar_events.c.room_id == room_id)
|
||||
result = await get_database().execute(query)
|
||||
async def get_upcoming(
|
||||
self, session: AsyncSession, room_id: str, minutes_ahead: int = 120
|
||||
) -> list[CalendarEvent]:
|
||||
now = datetime.now(timezone.utc)
|
||||
buffer_time = now + timedelta(minutes=minutes_ahead)
|
||||
|
||||
query = (
|
||||
select(CalendarEventModel)
|
||||
.where(
|
||||
sa.and_(
|
||||
CalendarEventModel.room_id == room_id,
|
||||
CalendarEventModel.start_time <= buffer_time,
|
||||
CalendarEventModel.end_time > now,
|
||||
CalendarEventModel.is_deleted == False,
|
||||
)
|
||||
)
|
||||
.order_by(CalendarEventModel.start_time)
|
||||
)
|
||||
|
||||
result = await session.execute(query)
|
||||
return [CalendarEvent.model_validate(row) for row in result.scalars().all()]
|
||||
|
||||
async def soft_delete_missing(
|
||||
self, session: AsyncSession, room_id: str, current_ics_uids: list[str]
|
||||
) -> int:
|
||||
query = (
|
||||
update(CalendarEventModel)
|
||||
.where(
|
||||
sa.and_(
|
||||
CalendarEventModel.room_id == room_id,
|
||||
(
|
||||
CalendarEventModel.ics_uid.notin_(current_ics_uids)
|
||||
if current_ics_uids
|
||||
else True
|
||||
),
|
||||
CalendarEventModel.end_time > datetime.now(timezone.utc),
|
||||
)
|
||||
)
|
||||
.values(is_deleted=True)
|
||||
)
|
||||
result = await session.execute(query)
|
||||
await session.commit()
|
||||
return result.rowcount
|
||||
|
||||
|
||||
|
||||
@@ -1,229 +0,0 @@
|
||||
"""Daily.co participant session tracking.
|
||||
|
||||
Stores webhook data for participant.joined and participant.left events to provide
|
||||
historical session information (Daily.co API only returns current participants).
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
import sqlalchemy as sa
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.dialects.postgresql import insert
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
daily_participant_sessions = sa.Table(
|
||||
"daily_participant_session",
|
||||
metadata,
|
||||
sa.Column("id", sa.String, primary_key=True),
|
||||
sa.Column(
|
||||
"meeting_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("meeting.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column(
|
||||
"room_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("room.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("session_id", sa.String, nullable=False),
|
||||
sa.Column("user_id", sa.String, nullable=True),
|
||||
sa.Column("user_name", sa.String, nullable=False),
|
||||
sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("left_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Index("idx_daily_session_meeting_left", "meeting_id", "left_at"),
|
||||
sa.Index("idx_daily_session_room", "room_id"),
|
||||
)
|
||||
|
||||
|
||||
class DailyParticipantSession(BaseModel):
|
||||
"""Daily.co participant session record.
|
||||
|
||||
Tracks when a participant joined and left a meeting. Populated from webhooks:
|
||||
- participant.joined: Creates record with left_at=None
|
||||
- participant.left: Updates record with left_at
|
||||
|
||||
ID format: {meeting_id}:{user_id}:{joined_at_ms}
|
||||
- Ensures idempotency (duplicate webhooks don't create duplicates)
|
||||
- Allows same user to rejoin (different joined_at = different session)
|
||||
|
||||
Duration is calculated as: left_at - joined_at (not stored)
|
||||
"""
|
||||
|
||||
id: NonEmptyString
|
||||
meeting_id: NonEmptyString
|
||||
room_id: NonEmptyString
|
||||
session_id: NonEmptyString # Daily.co's session_id (identifies room session)
|
||||
user_id: NonEmptyString | None = None
|
||||
user_name: str
|
||||
joined_at: datetime
|
||||
left_at: datetime | None = None
|
||||
|
||||
|
||||
class DailyParticipantSessionController:
|
||||
"""Controller for Daily.co participant session persistence."""
|
||||
|
||||
async def get_by_id(self, id: str) -> DailyParticipantSession | None:
|
||||
"""Get a session by its ID."""
|
||||
query = daily_participant_sessions.select().where(
|
||||
daily_participant_sessions.c.id == id
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
return DailyParticipantSession(**result) if result else None
|
||||
|
||||
async def get_open_session(
|
||||
self, meeting_id: NonEmptyString, session_id: NonEmptyString
|
||||
) -> DailyParticipantSession | None:
|
||||
"""Get the open (not left) session for a user in a meeting."""
|
||||
query = daily_participant_sessions.select().where(
|
||||
sa.and_(
|
||||
daily_participant_sessions.c.meeting_id == meeting_id,
|
||||
daily_participant_sessions.c.session_id == session_id,
|
||||
daily_participant_sessions.c.left_at.is_(None),
|
||||
)
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
|
||||
if len(results) > 1:
|
||||
raise ValueError(
|
||||
f"Multiple open sessions for daily session {session_id} in meeting {meeting_id}: "
|
||||
f"found {len(results)} sessions"
|
||||
)
|
||||
|
||||
return DailyParticipantSession(**results[0]) if results else None
|
||||
|
||||
async def upsert_joined(self, session: DailyParticipantSession) -> None:
|
||||
"""Insert or update when participant.joined webhook arrives.
|
||||
|
||||
Idempotent: Duplicate webhooks with same ID are safely ignored.
|
||||
Out-of-order: If left webhook arrived first, preserves left_at.
|
||||
"""
|
||||
query = insert(daily_participant_sessions).values(**session.model_dump())
|
||||
query = query.on_conflict_do_update(
|
||||
index_elements=["id"],
|
||||
set_={"user_name": session.user_name},
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def upsert_left(self, session: DailyParticipantSession) -> None:
|
||||
"""Update session when participant.left webhook arrives.
|
||||
|
||||
Finds the open session for this user in this meeting and updates left_at.
|
||||
Works around Daily.co webhook timestamp inconsistency (joined_at differs by ~4ms between webhooks).
|
||||
|
||||
Handles three cases:
|
||||
1. Normal flow: open session exists → updates left_at
|
||||
2. Out-of-order: left arrives first → creates new record with left data
|
||||
3. Duplicate: left arrives again → idempotent (DB trigger prevents left_at modification)
|
||||
"""
|
||||
if session.left_at is None:
|
||||
raise ValueError("left_at is required for upsert_left")
|
||||
|
||||
if session.left_at <= session.joined_at:
|
||||
raise ValueError(
|
||||
f"left_at ({session.left_at}) must be after joined_at ({session.joined_at})"
|
||||
)
|
||||
|
||||
# Find existing open session (works around timestamp mismatch in webhooks)
|
||||
existing = await self.get_open_session(session.meeting_id, session.session_id)
|
||||
|
||||
if existing:
|
||||
# Update existing open session
|
||||
query = (
|
||||
daily_participant_sessions.update()
|
||||
.where(daily_participant_sessions.c.id == existing.id)
|
||||
.values(left_at=session.left_at)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
else:
|
||||
# Out-of-order or first webhook: insert new record
|
||||
query = insert(daily_participant_sessions).values(**session.model_dump())
|
||||
query = query.on_conflict_do_nothing(index_elements=["id"])
|
||||
await get_database().execute(query)
|
||||
|
||||
async def get_by_meeting(self, meeting_id: str) -> list[DailyParticipantSession]:
|
||||
"""Get all participant sessions for a meeting (active and ended)."""
|
||||
query = daily_participant_sessions.select().where(
|
||||
daily_participant_sessions.c.meeting_id == meeting_id
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [DailyParticipantSession(**result) for result in results]
|
||||
|
||||
async def get_active_by_meeting(
|
||||
self, meeting_id: str
|
||||
) -> list[DailyParticipantSession]:
|
||||
"""Get only active (not left) participant sessions for a meeting."""
|
||||
query = daily_participant_sessions.select().where(
|
||||
sa.and_(
|
||||
daily_participant_sessions.c.meeting_id == meeting_id,
|
||||
daily_participant_sessions.c.left_at.is_(None),
|
||||
)
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [DailyParticipantSession(**result) for result in results]
|
||||
|
||||
async def get_all_sessions_for_meeting(
|
||||
self, meeting_id: NonEmptyString
|
||||
) -> dict[NonEmptyString, DailyParticipantSession]:
|
||||
query = daily_participant_sessions.select().where(
|
||||
daily_participant_sessions.c.meeting_id == meeting_id
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
# TODO DailySessionId custom type
|
||||
return {row["session_id"]: DailyParticipantSession(**row) for row in results}
|
||||
|
||||
async def batch_upsert_sessions(
|
||||
self, sessions: list[DailyParticipantSession]
|
||||
) -> None:
|
||||
"""Upsert multiple sessions in single query.
|
||||
|
||||
Uses ON CONFLICT for idempotency. Updates user_name on conflict since they may change it during a meeting.
|
||||
|
||||
"""
|
||||
if not sessions:
|
||||
return
|
||||
|
||||
values = [session.model_dump() for session in sessions]
|
||||
query = insert(daily_participant_sessions).values(values)
|
||||
query = query.on_conflict_do_update(
|
||||
index_elements=["id"],
|
||||
set_={
|
||||
# Preserve existing left_at to prevent race conditions
|
||||
"left_at": sa.func.coalesce(
|
||||
daily_participant_sessions.c.left_at,
|
||||
query.excluded.left_at,
|
||||
),
|
||||
"user_name": query.excluded.user_name,
|
||||
},
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def batch_close_sessions(
|
||||
self, session_ids: list[NonEmptyString], left_at: datetime
|
||||
) -> None:
|
||||
"""Mark multiple sessions as left in single query.
|
||||
|
||||
Only updates sessions where left_at is NULL (protects already-closed sessions).
|
||||
|
||||
Left_at mismatch for existing sessions is ignored, assumed to be not important issue if ever happens.
|
||||
"""
|
||||
if not session_ids:
|
||||
return
|
||||
|
||||
query = (
|
||||
daily_participant_sessions.update()
|
||||
.where(
|
||||
sa.and_(
|
||||
daily_participant_sessions.c.id.in_(session_ids),
|
||||
daily_participant_sessions.c.left_at.is_(None),
|
||||
)
|
||||
)
|
||||
.values(left_at=left_at)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
|
||||
daily_participant_sessions_controller = DailyParticipantSessionController()
|
||||
@@ -2,88 +2,18 @@ from datetime import datetime
|
||||
from typing import Any, Literal
|
||||
|
||||
import sqlalchemy as sa
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from sqlalchemy import select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.db.base import MeetingConsentModel, MeetingModel
|
||||
from reflector.db.rooms import Room
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform
|
||||
from reflector.utils import generate_uuid4
|
||||
from reflector.utils.string import assert_equal
|
||||
|
||||
meetings = sa.Table(
|
||||
"meeting",
|
||||
metadata,
|
||||
sa.Column("id", sa.String, primary_key=True),
|
||||
sa.Column("room_name", sa.String),
|
||||
sa.Column("room_url", sa.String),
|
||||
sa.Column("host_room_url", sa.String),
|
||||
sa.Column("start_date", sa.DateTime(timezone=True)),
|
||||
sa.Column("end_date", sa.DateTime(timezone=True)),
|
||||
sa.Column(
|
||||
"room_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("room.id", ondelete="CASCADE"),
|
||||
nullable=True,
|
||||
),
|
||||
sa.Column("is_locked", sa.Boolean, nullable=False, server_default=sa.false()),
|
||||
sa.Column("room_mode", sa.String, nullable=False, server_default="normal"),
|
||||
sa.Column("recording_type", sa.String, nullable=False, server_default="cloud"),
|
||||
sa.Column(
|
||||
"recording_trigger",
|
||||
sa.String,
|
||||
nullable=False,
|
||||
server_default="automatic-2nd-participant",
|
||||
),
|
||||
sa.Column(
|
||||
"num_clients",
|
||||
sa.Integer,
|
||||
nullable=False,
|
||||
server_default=sa.text("0"),
|
||||
),
|
||||
sa.Column(
|
||||
"is_active",
|
||||
sa.Boolean,
|
||||
nullable=False,
|
||||
server_default=sa.true(),
|
||||
),
|
||||
sa.Column(
|
||||
"calendar_event_id",
|
||||
sa.String,
|
||||
sa.ForeignKey(
|
||||
"calendar_event.id",
|
||||
ondelete="SET NULL",
|
||||
name="fk_meeting_calendar_event_id",
|
||||
),
|
||||
),
|
||||
sa.Column("calendar_metadata", JSONB),
|
||||
sa.Column(
|
||||
"platform",
|
||||
sa.String,
|
||||
nullable=False,
|
||||
server_default=assert_equal(WHEREBY_PLATFORM, "whereby"),
|
||||
),
|
||||
sa.Index("idx_meeting_room_id", "room_id"),
|
||||
sa.Index("idx_meeting_calendar_event", "calendar_event_id"),
|
||||
)
|
||||
|
||||
meeting_consent = sa.Table(
|
||||
"meeting_consent",
|
||||
metadata,
|
||||
sa.Column("id", sa.String, primary_key=True),
|
||||
sa.Column(
|
||||
"meeting_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("meeting.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("user_id", sa.String),
|
||||
sa.Column("consent_given", sa.Boolean, nullable=False),
|
||||
sa.Column("consent_timestamp", sa.DateTime(timezone=True), nullable=False),
|
||||
)
|
||||
|
||||
|
||||
class MeetingConsent(BaseModel):
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
meeting_id: str
|
||||
user_id: str | None = None
|
||||
@@ -92,6 +22,8 @@ class MeetingConsent(BaseModel):
|
||||
|
||||
|
||||
class Meeting(BaseModel):
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
id: str
|
||||
room_name: str
|
||||
room_url: str
|
||||
@@ -102,19 +34,19 @@ class Meeting(BaseModel):
|
||||
is_locked: bool = False
|
||||
room_mode: Literal["normal", "group"] = "normal"
|
||||
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
||||
recording_trigger: Literal[ # whereby-specific
|
||||
recording_trigger: Literal[
|
||||
"none", "prompt", "automatic", "automatic-2nd-participant"
|
||||
] = "automatic-2nd-participant"
|
||||
num_clients: int = 0
|
||||
is_active: bool = True
|
||||
calendar_event_id: str | None = None
|
||||
calendar_metadata: dict[str, Any] | None = None
|
||||
platform: Platform = WHEREBY_PLATFORM
|
||||
|
||||
|
||||
class MeetingController:
|
||||
async def create(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
id: str,
|
||||
room_name: str,
|
||||
room_url: str,
|
||||
@@ -139,22 +71,20 @@ class MeetingController:
|
||||
recording_trigger=room.recording_trigger,
|
||||
calendar_event_id=calendar_event_id,
|
||||
calendar_metadata=calendar_metadata,
|
||||
platform=room.platform,
|
||||
)
|
||||
query = meetings.insert().values(**meeting.model_dump())
|
||||
await get_database().execute(query)
|
||||
new_meeting = MeetingModel(**meeting.model_dump())
|
||||
session.add(new_meeting)
|
||||
await session.commit()
|
||||
return meeting
|
||||
|
||||
async def get_all_active(self, platform: str | None = None) -> list[Meeting]:
|
||||
conditions = [meetings.c.is_active]
|
||||
if platform is not None:
|
||||
conditions.append(meetings.c.platform == platform)
|
||||
query = meetings.select().where(sa.and_(*conditions))
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Meeting(**result) for result in results]
|
||||
async def get_all_active(self, session: AsyncSession) -> list[Meeting]:
|
||||
query = select(MeetingModel).where(MeetingModel.is_active)
|
||||
result = await session.execute(query)
|
||||
return [Meeting.model_validate(row) for row in result.scalars().all()]
|
||||
|
||||
async def get_by_room_name(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
room_name: str,
|
||||
) -> Meeting | None:
|
||||
"""
|
||||
@@ -162,182 +92,178 @@ class MeetingController:
|
||||
For backward compatibility, returns the most recent meeting.
|
||||
"""
|
||||
query = (
|
||||
meetings.select()
|
||||
.where(meetings.c.room_name == room_name)
|
||||
.order_by(meetings.c.end_date.desc())
|
||||
select(MeetingModel)
|
||||
.where(MeetingModel.room_name == room_name)
|
||||
.order_by(MeetingModel.end_date.desc())
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return Meeting(**result)
|
||||
return Meeting.model_validate(row)
|
||||
|
||||
async def get_active(self, room: Room, current_time: datetime) -> Meeting | None:
|
||||
async def get_active(
|
||||
self, session: AsyncSession, room: Room, current_time: datetime
|
||||
) -> Meeting | None:
|
||||
"""
|
||||
Get latest active meeting for a room.
|
||||
For backward compatibility, returns the most recent active meeting.
|
||||
"""
|
||||
end_date = getattr(meetings.c, "end_date")
|
||||
query = (
|
||||
meetings.select()
|
||||
select(MeetingModel)
|
||||
.where(
|
||||
sa.and_(
|
||||
meetings.c.room_id == room.id,
|
||||
meetings.c.end_date > current_time,
|
||||
meetings.c.is_active,
|
||||
MeetingModel.room_id == room.id,
|
||||
MeetingModel.end_date > current_time,
|
||||
MeetingModel.is_active,
|
||||
)
|
||||
)
|
||||
.order_by(end_date.desc())
|
||||
.order_by(MeetingModel.end_date.desc())
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return Meeting(**result)
|
||||
return Meeting.model_validate(row)
|
||||
|
||||
async def get_all_active_for_room(
|
||||
self, room: Room, current_time: datetime
|
||||
self, session: AsyncSession, room: Room, current_time: datetime
|
||||
) -> list[Meeting]:
|
||||
end_date = getattr(meetings.c, "end_date")
|
||||
query = (
|
||||
meetings.select()
|
||||
select(MeetingModel)
|
||||
.where(
|
||||
sa.and_(
|
||||
meetings.c.room_id == room.id,
|
||||
meetings.c.end_date > current_time,
|
||||
meetings.c.is_active,
|
||||
MeetingModel.room_id == room.id,
|
||||
MeetingModel.end_date > current_time,
|
||||
MeetingModel.is_active,
|
||||
)
|
||||
)
|
||||
.order_by(end_date.desc())
|
||||
.order_by(MeetingModel.end_date.desc())
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Meeting(**result) for result in results]
|
||||
result = await session.execute(query)
|
||||
return [Meeting.model_validate(row) for row in result.scalars().all()]
|
||||
|
||||
async def get_active_by_calendar_event(
|
||||
self, room: Room, calendar_event_id: str, current_time: datetime
|
||||
self,
|
||||
session: AsyncSession,
|
||||
room: Room,
|
||||
calendar_event_id: str,
|
||||
current_time: datetime,
|
||||
) -> Meeting | None:
|
||||
"""
|
||||
Get active meeting for a specific calendar event.
|
||||
"""
|
||||
query = meetings.select().where(
|
||||
query = select(MeetingModel).where(
|
||||
sa.and_(
|
||||
meetings.c.room_id == room.id,
|
||||
meetings.c.calendar_event_id == calendar_event_id,
|
||||
meetings.c.end_date > current_time,
|
||||
meetings.c.is_active,
|
||||
MeetingModel.room_id == room.id,
|
||||
MeetingModel.calendar_event_id == calendar_event_id,
|
||||
MeetingModel.end_date > current_time,
|
||||
MeetingModel.is_active,
|
||||
)
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return Meeting(**result)
|
||||
return Meeting.model_validate(row)
|
||||
|
||||
async def get_by_id(
|
||||
self, meeting_id: str, room: Room | None = None
|
||||
self, session: AsyncSession, meeting_id: str, **kwargs
|
||||
) -> Meeting | None:
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
|
||||
if room:
|
||||
query = query.where(meetings.c.room_id == room.id)
|
||||
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
query = select(MeetingModel).where(MeetingModel.id == meeting_id)
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return Meeting(**result)
|
||||
return Meeting.model_validate(row)
|
||||
|
||||
async def get_by_calendar_event(
|
||||
self, calendar_event_id: str, room: Room
|
||||
self, session: AsyncSession, calendar_event_id: str
|
||||
) -> Meeting | None:
|
||||
query = meetings.select().where(
|
||||
meetings.c.calendar_event_id == calendar_event_id
|
||||
query = select(MeetingModel).where(
|
||||
MeetingModel.calendar_event_id == calendar_event_id
|
||||
)
|
||||
if room:
|
||||
query = query.where(meetings.c.room_id == room.id)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return Meeting(**result)
|
||||
return Meeting.model_validate(row)
|
||||
|
||||
async def update_meeting(self, meeting_id: str, **kwargs):
|
||||
query = meetings.update().where(meetings.c.id == meeting_id).values(**kwargs)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def increment_num_clients(self, meeting_id: str) -> None:
|
||||
"""Atomically increment participant count."""
|
||||
async def update_meeting(self, session: AsyncSession, meeting_id: str, **kwargs):
|
||||
query = (
|
||||
meetings.update()
|
||||
.where(meetings.c.id == meeting_id)
|
||||
.values(num_clients=meetings.c.num_clients + 1)
|
||||
update(MeetingModel).where(MeetingModel.id == meeting_id).values(**kwargs)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def decrement_num_clients(self, meeting_id: str) -> None:
|
||||
"""Atomically decrement participant count (min 0)."""
|
||||
query = (
|
||||
meetings.update()
|
||||
.where(meetings.c.id == meeting_id)
|
||||
.values(
|
||||
num_clients=sa.case(
|
||||
(meetings.c.num_clients > 0, meetings.c.num_clients - 1), else_=0
|
||||
)
|
||||
)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
await session.execute(query)
|
||||
await session.commit()
|
||||
|
||||
|
||||
class MeetingConsentController:
|
||||
async def get_by_meeting_id(self, meeting_id: str) -> list[MeetingConsent]:
|
||||
query = meeting_consent.select().where(
|
||||
meeting_consent.c.meeting_id == meeting_id
|
||||
async def get_by_meeting_id(
|
||||
self, session: AsyncSession, meeting_id: str
|
||||
) -> list[MeetingConsent]:
|
||||
query = select(MeetingConsentModel).where(
|
||||
MeetingConsentModel.meeting_id == meeting_id
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [MeetingConsent(**result) for result in results]
|
||||
result = await session.execute(query)
|
||||
return [MeetingConsent.model_validate(row) for row in result.scalars().all()]
|
||||
|
||||
async def get_by_meeting_and_user(
|
||||
self, meeting_id: str, user_id: str
|
||||
self, session: AsyncSession, meeting_id: str, user_id: str
|
||||
) -> MeetingConsent | None:
|
||||
"""Get existing consent for a specific user and meeting"""
|
||||
query = meeting_consent.select().where(
|
||||
meeting_consent.c.meeting_id == meeting_id,
|
||||
meeting_consent.c.user_id == user_id,
|
||||
query = select(MeetingConsentModel).where(
|
||||
sa.and_(
|
||||
MeetingConsentModel.meeting_id == meeting_id,
|
||||
MeetingConsentModel.user_id == user_id,
|
||||
)
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
if result is None:
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if row is None:
|
||||
return None
|
||||
return MeetingConsent(**result)
|
||||
return MeetingConsent.model_validate(row)
|
||||
|
||||
async def upsert(self, consent: MeetingConsent) -> MeetingConsent:
|
||||
async def upsert(
|
||||
self, session: AsyncSession, consent: MeetingConsent
|
||||
) -> MeetingConsent:
|
||||
if consent.user_id:
|
||||
# For authenticated users, check if consent already exists
|
||||
# not transactional but we're ok with that; the consents ain't deleted anyways
|
||||
existing = await self.get_by_meeting_and_user(
|
||||
consent.meeting_id, consent.user_id
|
||||
session, consent.meeting_id, consent.user_id
|
||||
)
|
||||
if existing:
|
||||
query = (
|
||||
meeting_consent.update()
|
||||
.where(meeting_consent.c.id == existing.id)
|
||||
update(MeetingConsentModel)
|
||||
.where(MeetingConsentModel.id == existing.id)
|
||||
.values(
|
||||
consent_given=consent.consent_given,
|
||||
consent_timestamp=consent.consent_timestamp,
|
||||
)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
await session.execute(query)
|
||||
await session.commit()
|
||||
|
||||
existing.consent_given = consent.consent_given
|
||||
existing.consent_timestamp = consent.consent_timestamp
|
||||
return existing
|
||||
existing.consent_given = consent.consent_given
|
||||
existing.consent_timestamp = consent.consent_timestamp
|
||||
return existing
|
||||
|
||||
query = meeting_consent.insert().values(**consent.model_dump())
|
||||
await get_database().execute(query)
|
||||
new_consent = MeetingConsentModel(**consent.model_dump())
|
||||
session.add(new_consent)
|
||||
await session.commit()
|
||||
return consent
|
||||
|
||||
async def has_any_denial(self, meeting_id: str) -> bool:
|
||||
async def has_any_denial(self, session: AsyncSession, meeting_id: str) -> bool:
|
||||
"""Check if any participant denied consent for this meeting"""
|
||||
query = meeting_consent.select().where(
|
||||
meeting_consent.c.meeting_id == meeting_id,
|
||||
meeting_consent.c.consent_given.is_(False),
|
||||
query = select(MeetingConsentModel).where(
|
||||
sa.and_(
|
||||
MeetingConsentModel.meeting_id == meeting_id,
|
||||
MeetingConsentModel.consent_given.is_(False),
|
||||
)
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
return result is not None
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
return row is not None
|
||||
|
||||
|
||||
meetings_controller = MeetingController()
|
||||
|
||||
@@ -1,114 +1,79 @@
|
||||
from datetime import datetime
|
||||
from typing import Literal
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import sqlalchemy as sa
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy import or_
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from sqlalchemy import delete, select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.db.base import RecordingModel
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
recordings = sa.Table(
|
||||
"recording",
|
||||
metadata,
|
||||
sa.Column("id", sa.String, primary_key=True),
|
||||
sa.Column("bucket_name", sa.String, nullable=False),
|
||||
sa.Column("object_key", sa.String, nullable=False),
|
||||
sa.Column("recorded_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column(
|
||||
"status",
|
||||
sa.String,
|
||||
nullable=False,
|
||||
server_default="pending",
|
||||
),
|
||||
sa.Column("meeting_id", sa.String),
|
||||
sa.Column("track_keys", sa.JSON, nullable=True),
|
||||
sa.Index("idx_recording_meeting_id", "meeting_id"),
|
||||
)
|
||||
|
||||
|
||||
class Recording(BaseModel):
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
bucket_name: str
|
||||
# for single-track
|
||||
object_key: str
|
||||
recorded_at: datetime
|
||||
status: Literal["pending", "processing", "completed", "failed"] = "pending"
|
||||
meeting_id: str | None = None
|
||||
# for multitrack reprocessing
|
||||
# track_keys can be empty list [] if recording finished but no audio was captured (silence/muted)
|
||||
# None means not a multitrack recording, [] means multitrack with no tracks
|
||||
track_keys: list[str] | None = None
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
@property
|
||||
def is_multitrack(self) -> bool:
|
||||
"""True if recording has separate audio tracks (1+ tracks counts as multitrack)."""
|
||||
return self.track_keys is not None and len(self.track_keys) > 0
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
meeting_id: str
|
||||
url: str
|
||||
object_key: str
|
||||
duration: float | None = None
|
||||
created_at: datetime
|
||||
|
||||
|
||||
class RecordingController:
|
||||
async def create(self, recording: Recording):
|
||||
query = recordings.insert().values(**recording.model_dump())
|
||||
await get_database().execute(query)
|
||||
async def create(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
meeting_id: str,
|
||||
url: str,
|
||||
object_key: str,
|
||||
duration: float | None = None,
|
||||
created_at: datetime | None = None,
|
||||
):
|
||||
if created_at is None:
|
||||
created_at = datetime.now(timezone.utc)
|
||||
|
||||
recording = Recording(
|
||||
meeting_id=meeting_id,
|
||||
url=url,
|
||||
object_key=object_key,
|
||||
duration=duration,
|
||||
created_at=created_at,
|
||||
)
|
||||
new_recording = RecordingModel(**recording.model_dump())
|
||||
session.add(new_recording)
|
||||
await session.commit()
|
||||
return recording
|
||||
|
||||
async def get_by_id(self, id: str) -> Recording | None:
|
||||
query = recordings.select().where(recordings.c.id == id)
|
||||
result = await get_database().fetch_one(query)
|
||||
return Recording(**result) if result else None
|
||||
|
||||
async def get_by_object_key(
|
||||
self, bucket_name: str, object_key: str
|
||||
async def get_by_id(
|
||||
self, session: AsyncSession, recording_id: str
|
||||
) -> Recording | None:
|
||||
query = recordings.select().where(
|
||||
recordings.c.bucket_name == bucket_name,
|
||||
recordings.c.object_key == object_key,
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
return Recording(**result) if result else None
|
||||
"""
|
||||
Get a recording by id
|
||||
"""
|
||||
query = select(RecordingModel).where(RecordingModel.id == recording_id)
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return Recording.model_validate(row)
|
||||
|
||||
async def remove_by_id(self, id: str) -> None:
|
||||
query = recordings.delete().where(recordings.c.id == id)
|
||||
await get_database().execute(query)
|
||||
|
||||
# no check for existence
|
||||
async def get_by_ids(self, recording_ids: list[str]) -> list[Recording]:
|
||||
if not recording_ids:
|
||||
return []
|
||||
|
||||
query = recordings.select().where(recordings.c.id.in_(recording_ids))
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Recording(**row) for row in results]
|
||||
|
||||
async def get_multitrack_needing_reprocessing(
|
||||
self, bucket_name: str
|
||||
async def get_by_meeting_id(
|
||||
self, session: AsyncSession, meeting_id: str
|
||||
) -> list[Recording]:
|
||||
"""
|
||||
Get multitrack recordings that need reprocessing:
|
||||
- Have track_keys (multitrack)
|
||||
- Either have no transcript OR transcript has error status
|
||||
|
||||
This is more efficient than fetching all recordings and filtering in Python.
|
||||
Get all recordings for a meeting
|
||||
"""
|
||||
from reflector.db.transcripts import (
|
||||
transcripts, # noqa: PLC0415 cyclic import
|
||||
)
|
||||
query = select(RecordingModel).where(RecordingModel.meeting_id == meeting_id)
|
||||
result = await session.execute(query)
|
||||
return [Recording.model_validate(row) for row in result.scalars().all()]
|
||||
|
||||
query = (
|
||||
recordings.select()
|
||||
.outerjoin(transcripts, recordings.c.id == transcripts.c.recording_id)
|
||||
.where(
|
||||
recordings.c.bucket_name == bucket_name,
|
||||
recordings.c.track_keys.isnot(None),
|
||||
or_(
|
||||
transcripts.c.id.is_(None),
|
||||
transcripts.c.status == "error",
|
||||
),
|
||||
)
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
recordings_list = [Recording(**row) for row in results]
|
||||
return [r for r in recordings_list if r.is_multitrack]
|
||||
async def remove_by_id(self, session: AsyncSession, recording_id: str) -> None:
|
||||
"""
|
||||
Remove a recording by id
|
||||
"""
|
||||
query = delete(RecordingModel).where(RecordingModel.id == recording_id)
|
||||
await session.execute(query)
|
||||
await session.commit()
|
||||
|
||||
|
||||
recordings_controller = RecordingController()
|
||||
|
||||
@@ -3,72 +3,19 @@ from datetime import datetime, timezone
|
||||
from sqlite3 import IntegrityError
|
||||
from typing import Literal
|
||||
|
||||
import sqlalchemy
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.sql import false, or_
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from sqlalchemy import delete, select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.sql import or_
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.schemas.platform import Platform
|
||||
from reflector.settings import settings
|
||||
from reflector.db.base import RoomModel
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
rooms = sqlalchemy.Table(
|
||||
"room",
|
||||
metadata,
|
||||
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
|
||||
sqlalchemy.Column("name", sqlalchemy.String, nullable=False, unique=True),
|
||||
sqlalchemy.Column("user_id", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
||||
sqlalchemy.Column(
|
||||
"zulip_auto_post", sqlalchemy.Boolean, nullable=False, server_default=false()
|
||||
),
|
||||
sqlalchemy.Column("zulip_stream", sqlalchemy.String),
|
||||
sqlalchemy.Column("zulip_topic", sqlalchemy.String),
|
||||
sqlalchemy.Column(
|
||||
"is_locked", sqlalchemy.Boolean, nullable=False, server_default=false()
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"room_mode", sqlalchemy.String, nullable=False, server_default="normal"
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"recording_type", sqlalchemy.String, nullable=False, server_default="cloud"
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"recording_trigger",
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
server_default="automatic-2nd-participant",
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"is_shared", sqlalchemy.Boolean, nullable=False, server_default=false()
|
||||
),
|
||||
sqlalchemy.Column("webhook_url", sqlalchemy.String, nullable=True),
|
||||
sqlalchemy.Column("webhook_secret", sqlalchemy.String, nullable=True),
|
||||
sqlalchemy.Column("ics_url", sqlalchemy.Text),
|
||||
sqlalchemy.Column("ics_fetch_interval", sqlalchemy.Integer, server_default="300"),
|
||||
sqlalchemy.Column(
|
||||
"ics_enabled", sqlalchemy.Boolean, nullable=False, server_default=false()
|
||||
),
|
||||
sqlalchemy.Column("ics_last_sync", sqlalchemy.DateTime(timezone=True)),
|
||||
sqlalchemy.Column("ics_last_etag", sqlalchemy.Text),
|
||||
sqlalchemy.Column(
|
||||
"platform",
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"use_hatchet",
|
||||
sqlalchemy.Boolean,
|
||||
nullable=False,
|
||||
server_default=false(),
|
||||
),
|
||||
sqlalchemy.Index("idx_room_is_shared", "is_shared"),
|
||||
sqlalchemy.Index("idx_room_ics_enabled", "ics_enabled"),
|
||||
)
|
||||
|
||||
|
||||
class Room(BaseModel):
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
name: str
|
||||
user_id: str
|
||||
@@ -79,7 +26,7 @@ class Room(BaseModel):
|
||||
is_locked: bool = False
|
||||
room_mode: Literal["normal", "group"] = "normal"
|
||||
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
||||
recording_trigger: Literal[ # whereby-specific
|
||||
recording_trigger: Literal[
|
||||
"none", "prompt", "automatic", "automatic-2nd-participant"
|
||||
] = "automatic-2nd-participant"
|
||||
is_shared: bool = False
|
||||
@@ -90,13 +37,12 @@ class Room(BaseModel):
|
||||
ics_enabled: bool = False
|
||||
ics_last_sync: datetime | None = None
|
||||
ics_last_etag: str | None = None
|
||||
platform: Platform = Field(default_factory=lambda: settings.DEFAULT_VIDEO_PLATFORM)
|
||||
use_hatchet: bool = False
|
||||
|
||||
|
||||
class RoomController:
|
||||
async def get_all(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
user_id: str | None = None,
|
||||
order_by: str | None = None,
|
||||
return_query: bool = False,
|
||||
@@ -110,14 +56,14 @@ class RoomController:
|
||||
Parameters:
|
||||
- `order_by`: field to order by, e.g. "-created_at"
|
||||
"""
|
||||
query = rooms.select()
|
||||
query = select(RoomModel)
|
||||
if user_id is not None:
|
||||
query = query.where(or_(rooms.c.user_id == user_id, rooms.c.is_shared))
|
||||
query = query.where(or_(RoomModel.user_id == user_id, RoomModel.is_shared))
|
||||
else:
|
||||
query = query.where(rooms.c.is_shared)
|
||||
query = query.where(RoomModel.is_shared)
|
||||
|
||||
if order_by is not None:
|
||||
field = getattr(rooms.c, order_by[1:])
|
||||
field = getattr(RoomModel, order_by[1:])
|
||||
if order_by.startswith("-"):
|
||||
field = field.desc()
|
||||
query = query.order_by(field)
|
||||
@@ -125,11 +71,12 @@ class RoomController:
|
||||
if return_query:
|
||||
return query
|
||||
|
||||
results = await get_database().fetch_all(query)
|
||||
return results
|
||||
result = await session.execute(query)
|
||||
return [Room.model_validate(row) for row in result.scalars().all()]
|
||||
|
||||
async def add(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
name: str,
|
||||
user_id: str,
|
||||
zulip_auto_post: bool,
|
||||
@@ -145,7 +92,6 @@ class RoomController:
|
||||
ics_url: str | None = None,
|
||||
ics_fetch_interval: int = 300,
|
||||
ics_enabled: bool = False,
|
||||
platform: Platform = settings.DEFAULT_VIDEO_PLATFORM,
|
||||
):
|
||||
"""
|
||||
Add a new room
|
||||
@@ -153,43 +99,44 @@ class RoomController:
|
||||
if webhook_url and not webhook_secret:
|
||||
webhook_secret = secrets.token_urlsafe(32)
|
||||
|
||||
room_data = {
|
||||
"name": name,
|
||||
"user_id": user_id,
|
||||
"zulip_auto_post": zulip_auto_post,
|
||||
"zulip_stream": zulip_stream,
|
||||
"zulip_topic": zulip_topic,
|
||||
"is_locked": is_locked,
|
||||
"room_mode": room_mode,
|
||||
"recording_type": recording_type,
|
||||
"recording_trigger": recording_trigger,
|
||||
"is_shared": is_shared,
|
||||
"webhook_url": webhook_url,
|
||||
"webhook_secret": webhook_secret,
|
||||
"ics_url": ics_url,
|
||||
"ics_fetch_interval": ics_fetch_interval,
|
||||
"ics_enabled": ics_enabled,
|
||||
"platform": platform,
|
||||
}
|
||||
|
||||
room = Room(**room_data)
|
||||
query = rooms.insert().values(**room.model_dump())
|
||||
room = Room(
|
||||
name=name,
|
||||
user_id=user_id,
|
||||
zulip_auto_post=zulip_auto_post,
|
||||
zulip_stream=zulip_stream,
|
||||
zulip_topic=zulip_topic,
|
||||
is_locked=is_locked,
|
||||
room_mode=room_mode,
|
||||
recording_type=recording_type,
|
||||
recording_trigger=recording_trigger,
|
||||
is_shared=is_shared,
|
||||
webhook_url=webhook_url,
|
||||
webhook_secret=webhook_secret,
|
||||
ics_url=ics_url,
|
||||
ics_fetch_interval=ics_fetch_interval,
|
||||
ics_enabled=ics_enabled,
|
||||
)
|
||||
new_room = RoomModel(**room.model_dump())
|
||||
session.add(new_room)
|
||||
try:
|
||||
await get_database().execute(query)
|
||||
await session.flush()
|
||||
except IntegrityError:
|
||||
raise HTTPException(status_code=400, detail="Room name is not unique")
|
||||
return room
|
||||
|
||||
async def update(self, room: Room, values: dict, mutate=True):
|
||||
async def update(
|
||||
self, session: AsyncSession, room: Room, values: dict, mutate=True
|
||||
):
|
||||
"""
|
||||
Update a room fields with key/values in values
|
||||
"""
|
||||
if values.get("webhook_url") and not values.get("webhook_secret"):
|
||||
values["webhook_secret"] = secrets.token_urlsafe(32)
|
||||
|
||||
query = rooms.update().where(rooms.c.id == room.id).values(**values)
|
||||
query = update(RoomModel).where(RoomModel.id == room.id).values(**values)
|
||||
try:
|
||||
await get_database().execute(query)
|
||||
await session.execute(query)
|
||||
await session.flush()
|
||||
except IntegrityError:
|
||||
raise HTTPException(status_code=400, detail="Room name is not unique")
|
||||
|
||||
@@ -197,67 +144,79 @@ class RoomController:
|
||||
for key, value in values.items():
|
||||
setattr(room, key, value)
|
||||
|
||||
async def get_by_id(self, room_id: str, **kwargs) -> Room | None:
|
||||
async def get_by_id(
|
||||
self, session: AsyncSession, room_id: str, **kwargs
|
||||
) -> Room | None:
|
||||
"""
|
||||
Get a room by id
|
||||
"""
|
||||
query = rooms.select().where(rooms.c.id == room_id)
|
||||
query = select(RoomModel).where(RoomModel.id == room_id)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(rooms.c.user_id == kwargs["user_id"])
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
query = query.where(RoomModel.user_id == kwargs["user_id"])
|
||||
result = await session.execute(query)
|
||||
row = result.scalars().first()
|
||||
if not row:
|
||||
return None
|
||||
return Room(**result)
|
||||
return Room.model_validate(row)
|
||||
|
||||
async def get_by_name(self, room_name: str, **kwargs) -> Room | None:
|
||||
async def get_by_name(
|
||||
self, session: AsyncSession, room_name: str, **kwargs
|
||||
) -> Room | None:
|
||||
"""
|
||||
Get a room by name
|
||||
"""
|
||||
query = rooms.select().where(rooms.c.name == room_name)
|
||||
query = select(RoomModel).where(RoomModel.name == room_name)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(rooms.c.user_id == kwargs["user_id"])
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
query = query.where(RoomModel.user_id == kwargs["user_id"])
|
||||
result = await session.execute(query)
|
||||
row = result.scalars().first()
|
||||
if not row:
|
||||
return None
|
||||
return Room(**result)
|
||||
return Room.model_validate(row)
|
||||
|
||||
async def get_by_id_for_http(self, meeting_id: str, user_id: str | None) -> Room:
|
||||
async def get_by_id_for_http(
|
||||
self, session: AsyncSession, meeting_id: str, user_id: str | None
|
||||
) -> Room:
|
||||
"""
|
||||
Get a room by ID for HTTP request.
|
||||
|
||||
If not found, it will raise a 404 error.
|
||||
"""
|
||||
query = rooms.select().where(rooms.c.id == meeting_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
query = select(RoomModel).where(RoomModel.id == meeting_id)
|
||||
result = await session.execute(query)
|
||||
row = result.scalars().first()
|
||||
if not row:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
|
||||
room = Room(**result)
|
||||
room = Room.model_validate(row)
|
||||
|
||||
return room
|
||||
|
||||
async def get_ics_enabled(self) -> list[Room]:
|
||||
query = rooms.select().where(
|
||||
rooms.c.ics_enabled == True, rooms.c.ics_url != None
|
||||
async def get_ics_enabled(self, session: AsyncSession) -> list[Room]:
|
||||
query = select(RoomModel).where(
|
||||
RoomModel.ics_enabled == True, RoomModel.ics_url != None
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Room(**result) for result in results]
|
||||
result = await session.execute(query)
|
||||
results = result.scalars().all()
|
||||
return [Room(**row.__dict__) for row in results]
|
||||
|
||||
async def remove_by_id(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
room_id: str,
|
||||
user_id: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Remove a room by id
|
||||
"""
|
||||
room = await self.get_by_id(room_id, user_id=user_id)
|
||||
room = await self.get_by_id(session, room_id, user_id=user_id)
|
||||
if not room:
|
||||
return
|
||||
if user_id is not None and room.user_id != user_id:
|
||||
return
|
||||
query = rooms.delete().where(rooms.c.id == room_id)
|
||||
await get_database().execute(query)
|
||||
query = delete(RoomModel).where(RoomModel.id == room_id)
|
||||
await session.execute(query)
|
||||
await session.flush()
|
||||
|
||||
|
||||
rooms_controller = RoomController()
|
||||
|
||||
@@ -8,7 +8,6 @@ from typing import Annotated, Any, Dict, Iterator
|
||||
|
||||
import sqlalchemy
|
||||
import webvtt
|
||||
from databases.interfaces import Record as DbRecord
|
||||
from fastapi import HTTPException
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
@@ -20,11 +19,10 @@ from pydantic import (
|
||||
constr,
|
||||
field_serializer,
|
||||
)
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.db.rooms import rooms
|
||||
from reflector.db.transcripts import SourceKind, TranscriptStatus, transcripts
|
||||
from reflector.db.utils import is_postgresql
|
||||
from reflector.db.base import RoomModel, TranscriptModel
|
||||
from reflector.db.transcripts import SourceKind, TranscriptStatus
|
||||
from reflector.logger import logger
|
||||
from reflector.utils.string import NonEmptyString, try_parse_non_empty_string
|
||||
|
||||
@@ -135,8 +133,6 @@ class SearchParameters(BaseModel):
|
||||
user_id: str | None = None
|
||||
room_id: str | None = None
|
||||
source_kind: SourceKind | None = None
|
||||
from_datetime: datetime | None = None
|
||||
to_datetime: datetime | None = None
|
||||
|
||||
|
||||
class SearchResultDB(BaseModel):
|
||||
@@ -333,36 +329,30 @@ class SearchController:
|
||||
|
||||
@classmethod
|
||||
async def search_transcripts(
|
||||
cls, params: SearchParameters
|
||||
cls, session: AsyncSession, params: SearchParameters
|
||||
) -> tuple[list[SearchResult], int]:
|
||||
"""
|
||||
Full-text search for transcripts using PostgreSQL tsvector.
|
||||
Returns (results, total_count).
|
||||
"""
|
||||
|
||||
if not is_postgresql():
|
||||
logger.warning(
|
||||
"Full-text search requires PostgreSQL. Returning empty results."
|
||||
)
|
||||
return [], 0
|
||||
|
||||
base_columns = [
|
||||
transcripts.c.id,
|
||||
transcripts.c.title,
|
||||
transcripts.c.created_at,
|
||||
transcripts.c.duration,
|
||||
transcripts.c.status,
|
||||
transcripts.c.user_id,
|
||||
transcripts.c.room_id,
|
||||
transcripts.c.source_kind,
|
||||
transcripts.c.webvtt,
|
||||
transcripts.c.long_summary,
|
||||
TranscriptModel.id,
|
||||
TranscriptModel.title,
|
||||
TranscriptModel.created_at,
|
||||
TranscriptModel.duration,
|
||||
TranscriptModel.status,
|
||||
TranscriptModel.user_id,
|
||||
TranscriptModel.room_id,
|
||||
TranscriptModel.source_kind,
|
||||
TranscriptModel.webvtt,
|
||||
TranscriptModel.long_summary,
|
||||
sqlalchemy.case(
|
||||
(
|
||||
transcripts.c.room_id.isnot(None) & rooms.c.id.is_(None),
|
||||
TranscriptModel.room_id.isnot(None) & RoomModel.id.is_(None),
|
||||
"Deleted Room",
|
||||
),
|
||||
else_=rooms.c.name,
|
||||
else_=RoomModel.name,
|
||||
).label("room_name"),
|
||||
]
|
||||
search_query = None
|
||||
@@ -371,7 +361,7 @@ class SearchController:
|
||||
"english", params.query_text
|
||||
)
|
||||
rank_column = sqlalchemy.func.ts_rank(
|
||||
transcripts.c.search_vector_en,
|
||||
TranscriptModel.search_vector_en,
|
||||
search_query,
|
||||
32, # normalization flag: rank/(rank+1) for 0-1 range
|
||||
).label("rank")
|
||||
@@ -379,55 +369,51 @@ class SearchController:
|
||||
rank_column = sqlalchemy.cast(1.0, sqlalchemy.Float).label("rank")
|
||||
|
||||
columns = base_columns + [rank_column]
|
||||
base_query = sqlalchemy.select(columns).select_from(
|
||||
transcripts.join(rooms, transcripts.c.room_id == rooms.c.id, isouter=True)
|
||||
base_query = (
|
||||
sqlalchemy.select(*columns)
|
||||
.select_from(TranscriptModel)
|
||||
.outerjoin(RoomModel, TranscriptModel.room_id == RoomModel.id)
|
||||
)
|
||||
|
||||
if params.query_text is not None:
|
||||
# because already initialized based on params.query_text presence above
|
||||
assert search_query is not None
|
||||
base_query = base_query.where(
|
||||
transcripts.c.search_vector_en.op("@@")(search_query)
|
||||
TranscriptModel.search_vector_en.op("@@")(search_query)
|
||||
)
|
||||
|
||||
if params.user_id:
|
||||
base_query = base_query.where(
|
||||
sqlalchemy.or_(
|
||||
transcripts.c.user_id == params.user_id, rooms.c.is_shared
|
||||
TranscriptModel.user_id == params.user_id, RoomModel.is_shared
|
||||
)
|
||||
)
|
||||
else:
|
||||
base_query = base_query.where(rooms.c.is_shared)
|
||||
base_query = base_query.where(RoomModel.is_shared)
|
||||
if params.room_id:
|
||||
base_query = base_query.where(transcripts.c.room_id == params.room_id)
|
||||
base_query = base_query.where(TranscriptModel.room_id == params.room_id)
|
||||
if params.source_kind:
|
||||
base_query = base_query.where(
|
||||
transcripts.c.source_kind == params.source_kind
|
||||
)
|
||||
if params.from_datetime:
|
||||
base_query = base_query.where(
|
||||
transcripts.c.created_at >= params.from_datetime
|
||||
)
|
||||
if params.to_datetime:
|
||||
base_query = base_query.where(
|
||||
transcripts.c.created_at <= params.to_datetime
|
||||
TranscriptModel.source_kind == params.source_kind
|
||||
)
|
||||
|
||||
if params.query_text is not None:
|
||||
order_by = sqlalchemy.desc(sqlalchemy.text("rank"))
|
||||
else:
|
||||
order_by = sqlalchemy.desc(transcripts.c.created_at)
|
||||
order_by = sqlalchemy.desc(TranscriptModel.created_at)
|
||||
|
||||
query = base_query.order_by(order_by).limit(params.limit).offset(params.offset)
|
||||
|
||||
rs = await get_database().fetch_all(query)
|
||||
result = await session.execute(query)
|
||||
rs = result.mappings().all()
|
||||
|
||||
count_query = sqlalchemy.select([sqlalchemy.func.count()]).select_from(
|
||||
count_query = sqlalchemy.select(sqlalchemy.func.count()).select_from(
|
||||
base_query.alias("search_results")
|
||||
)
|
||||
total = await get_database().fetch_val(count_query)
|
||||
count_result = await session.execute(count_query)
|
||||
total = count_result.scalar()
|
||||
|
||||
def _process_result(r: DbRecord) -> SearchResult:
|
||||
def _process_result(r: dict) -> SearchResult:
|
||||
r_dict: Dict[str, Any] = dict(r)
|
||||
|
||||
webvtt_raw: str | None = r_dict.pop("webvtt", None)
|
||||
|
||||
@@ -2,26 +2,22 @@ import enum
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Literal
|
||||
|
||||
import sqlalchemy
|
||||
from fastapi import HTTPException
|
||||
from pydantic import BaseModel, ConfigDict, Field, field_serializer
|
||||
from sqlalchemy import Enum
|
||||
from sqlalchemy.dialects.postgresql import TSVECTOR
|
||||
from sqlalchemy.sql import false, or_
|
||||
from sqlalchemy import delete, insert, select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.sql import or_
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.db.base import RoomModel, TranscriptModel
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.rooms import rooms
|
||||
from reflector.db.utils import is_postgresql
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.types import Word as ProcessorWord
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.storage import get_recordings_storage, get_transcripts_storage
|
||||
from reflector.utils import generate_uuid4
|
||||
from reflector.utils.webvtt import topics_to_webvtt
|
||||
|
||||
@@ -32,94 +28,6 @@ class SourceKind(enum.StrEnum):
|
||||
FILE = enum.auto()
|
||||
|
||||
|
||||
transcripts = sqlalchemy.Table(
|
||||
"transcript",
|
||||
metadata,
|
||||
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
|
||||
sqlalchemy.Column("name", sqlalchemy.String),
|
||||
sqlalchemy.Column("status", sqlalchemy.String),
|
||||
sqlalchemy.Column("locked", sqlalchemy.Boolean),
|
||||
sqlalchemy.Column("duration", sqlalchemy.Float),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True)),
|
||||
sqlalchemy.Column("title", sqlalchemy.String),
|
||||
sqlalchemy.Column("short_summary", sqlalchemy.String),
|
||||
sqlalchemy.Column("long_summary", sqlalchemy.String),
|
||||
sqlalchemy.Column("action_items", sqlalchemy.JSON),
|
||||
sqlalchemy.Column("topics", sqlalchemy.JSON),
|
||||
sqlalchemy.Column("events", sqlalchemy.JSON),
|
||||
sqlalchemy.Column("participants", sqlalchemy.JSON),
|
||||
sqlalchemy.Column("source_language", sqlalchemy.String),
|
||||
sqlalchemy.Column("target_language", sqlalchemy.String),
|
||||
sqlalchemy.Column(
|
||||
"reviewed", sqlalchemy.Boolean, nullable=False, server_default=false()
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"audio_location",
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
server_default="local",
|
||||
),
|
||||
# with user attached, optional
|
||||
sqlalchemy.Column("user_id", sqlalchemy.String),
|
||||
sqlalchemy.Column(
|
||||
"share_mode",
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
server_default="private",
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"meeting_id",
|
||||
sqlalchemy.String,
|
||||
),
|
||||
sqlalchemy.Column("recording_id", sqlalchemy.String),
|
||||
sqlalchemy.Column("zulip_message_id", sqlalchemy.Integer),
|
||||
sqlalchemy.Column(
|
||||
"source_kind",
|
||||
Enum(SourceKind, values_callable=lambda obj: [e.value for e in obj]),
|
||||
nullable=False,
|
||||
),
|
||||
# indicative field: whether associated audio is deleted
|
||||
# the main "audio deleted" is the presence of the audio itself / consents not-given
|
||||
# same field could've been in recording/meeting, and it's maybe even ok to dupe it at need
|
||||
sqlalchemy.Column("audio_deleted", sqlalchemy.Boolean),
|
||||
sqlalchemy.Column("room_id", sqlalchemy.String),
|
||||
sqlalchemy.Column("webvtt", sqlalchemy.Text),
|
||||
# Hatchet workflow run ID for resumption of failed workflows
|
||||
sqlalchemy.Column("workflow_run_id", sqlalchemy.String),
|
||||
sqlalchemy.Index("idx_transcript_recording_id", "recording_id"),
|
||||
sqlalchemy.Index("idx_transcript_user_id", "user_id"),
|
||||
sqlalchemy.Index("idx_transcript_created_at", "created_at"),
|
||||
sqlalchemy.Index("idx_transcript_user_id_recording_id", "user_id", "recording_id"),
|
||||
sqlalchemy.Index("idx_transcript_room_id", "room_id"),
|
||||
sqlalchemy.Index("idx_transcript_source_kind", "source_kind"),
|
||||
sqlalchemy.Index("idx_transcript_room_id_created_at", "room_id", "created_at"),
|
||||
)
|
||||
|
||||
# Add PostgreSQL-specific full-text search column
|
||||
# This matches the migration in migrations/versions/116b2f287eab_add_full_text_search.py
|
||||
if is_postgresql():
|
||||
transcripts.append_column(
|
||||
sqlalchemy.Column(
|
||||
"search_vector_en",
|
||||
TSVECTOR,
|
||||
sqlalchemy.Computed(
|
||||
"setweight(to_tsvector('english', coalesce(title, '')), 'A') || "
|
||||
"setweight(to_tsvector('english', coalesce(long_summary, '')), 'B') || "
|
||||
"setweight(to_tsvector('english', coalesce(webvtt, '')), 'C')",
|
||||
persisted=True,
|
||||
),
|
||||
)
|
||||
)
|
||||
# Add GIN index for the search vector
|
||||
transcripts.append_constraint(
|
||||
sqlalchemy.Index(
|
||||
"idx_transcript_search_vector_en",
|
||||
"search_vector_en",
|
||||
postgresql_using="gin",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def generate_transcript_name() -> str:
|
||||
now = datetime.now(timezone.utc)
|
||||
return f"Transcript {now.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
@@ -167,10 +75,6 @@ class TranscriptFinalLongSummary(BaseModel):
|
||||
long_summary: str
|
||||
|
||||
|
||||
class TranscriptActionItems(BaseModel):
|
||||
action_items: dict
|
||||
|
||||
|
||||
class TranscriptFinalTitle(BaseModel):
|
||||
title: str
|
||||
|
||||
@@ -193,12 +97,13 @@ class TranscriptParticipant(BaseModel):
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
speaker: int | None
|
||||
name: str
|
||||
user_id: str | None = None
|
||||
|
||||
|
||||
class Transcript(BaseModel):
|
||||
"""Full transcript model with all fields."""
|
||||
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
user_id: str | None = None
|
||||
name: str = Field(default_factory=generate_transcript_name)
|
||||
@@ -211,7 +116,6 @@ class Transcript(BaseModel):
|
||||
locked: bool = False
|
||||
short_summary: str | None = None
|
||||
long_summary: str | None = None
|
||||
action_items: dict | None = None
|
||||
topics: list[TranscriptTopic] = []
|
||||
events: list[TranscriptEvent] = []
|
||||
participants: list[TranscriptParticipant] | None = []
|
||||
@@ -225,7 +129,6 @@ class Transcript(BaseModel):
|
||||
zulip_message_id: int | None = None
|
||||
audio_deleted: bool | None = None
|
||||
webvtt: str | None = None
|
||||
workflow_run_id: str | None = None # Hatchet workflow run ID for resumption
|
||||
|
||||
@field_serializer("created_at", when_used="json")
|
||||
def serialize_datetime(self, dt: datetime) -> str:
|
||||
@@ -369,6 +272,7 @@ class Transcript(BaseModel):
|
||||
class TranscriptController:
|
||||
async def get_all(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
user_id: str | None = None,
|
||||
order_by: str | None = None,
|
||||
filter_empty: bool | None = False,
|
||||
@@ -377,12 +281,7 @@ class TranscriptController:
|
||||
room_id: str | None = None,
|
||||
search_term: str | None = None,
|
||||
return_query: bool = False,
|
||||
exclude_columns: list[str] = [
|
||||
"topics",
|
||||
"events",
|
||||
"participants",
|
||||
"action_items",
|
||||
],
|
||||
exclude_columns: list[str] = ["topics", "events", "participants"],
|
||||
) -> list[Transcript]:
|
||||
"""
|
||||
Get all transcripts
|
||||
@@ -398,102 +297,114 @@ class TranscriptController:
|
||||
- `search_term`: filter transcripts by search term
|
||||
"""
|
||||
|
||||
query = transcripts.select().join(
|
||||
rooms, transcripts.c.room_id == rooms.c.id, isouter=True
|
||||
query = select(TranscriptModel).join(
|
||||
RoomModel, TranscriptModel.room_id == RoomModel.id, isouter=True
|
||||
)
|
||||
|
||||
if user_id:
|
||||
query = query.where(
|
||||
or_(transcripts.c.user_id == user_id, rooms.c.is_shared)
|
||||
or_(TranscriptModel.user_id == user_id, RoomModel.is_shared)
|
||||
)
|
||||
else:
|
||||
query = query.where(rooms.c.is_shared)
|
||||
query = query.where(RoomModel.is_shared)
|
||||
|
||||
if source_kind:
|
||||
query = query.where(transcripts.c.source_kind == source_kind)
|
||||
query = query.where(TranscriptModel.source_kind == source_kind)
|
||||
|
||||
if room_id:
|
||||
query = query.where(transcripts.c.room_id == room_id)
|
||||
query = query.where(TranscriptModel.room_id == room_id)
|
||||
|
||||
if search_term:
|
||||
query = query.where(transcripts.c.title.ilike(f"%{search_term}%"))
|
||||
query = query.where(TranscriptModel.title.ilike(f"%{search_term}%"))
|
||||
|
||||
# Exclude heavy JSON columns from list queries
|
||||
# Get all ORM column attributes except excluded ones
|
||||
transcript_columns = [
|
||||
col for col in transcripts.c if col.name not in exclude_columns
|
||||
getattr(TranscriptModel, col.name)
|
||||
for col in TranscriptModel.__table__.c
|
||||
if col.name not in exclude_columns
|
||||
]
|
||||
|
||||
query = query.with_only_columns(
|
||||
transcript_columns
|
||||
+ [
|
||||
rooms.c.name.label("room_name"),
|
||||
]
|
||||
*transcript_columns,
|
||||
RoomModel.name.label("room_name"),
|
||||
)
|
||||
|
||||
if order_by is not None:
|
||||
field = getattr(transcripts.c, order_by[1:])
|
||||
field = getattr(TranscriptModel, order_by[1:])
|
||||
if order_by.startswith("-"):
|
||||
field = field.desc()
|
||||
query = query.order_by(field)
|
||||
|
||||
if filter_empty:
|
||||
query = query.filter(transcripts.c.status != "idle")
|
||||
query = query.filter(TranscriptModel.status != "idle")
|
||||
|
||||
if filter_recording:
|
||||
query = query.filter(transcripts.c.status != "recording")
|
||||
query = query.filter(TranscriptModel.status != "recording")
|
||||
|
||||
# print(query.compile(compile_kwargs={"literal_binds": True}))
|
||||
|
||||
if return_query:
|
||||
return query
|
||||
|
||||
results = await get_database().fetch_all(query)
|
||||
return results
|
||||
result = await session.execute(query)
|
||||
return [dict(row) for row in result.mappings().all()]
|
||||
|
||||
async def get_by_id(self, transcript_id: str, **kwargs) -> Transcript | None:
|
||||
async def get_by_id(
|
||||
self, session: AsyncSession, transcript_id: str, **kwargs
|
||||
) -> Transcript | None:
|
||||
"""
|
||||
Get a transcript by id
|
||||
"""
|
||||
query = transcripts.select().where(transcripts.c.id == transcript_id)
|
||||
query = select(TranscriptModel).where(TranscriptModel.id == transcript_id)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(transcripts.c.user_id == kwargs["user_id"])
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
query = query.where(TranscriptModel.user_id == kwargs["user_id"])
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return Transcript(**result)
|
||||
return Transcript.model_validate(row)
|
||||
|
||||
async def get_by_recording_id(
|
||||
self, recording_id: str, **kwargs
|
||||
self, session: AsyncSession, recording_id: str, **kwargs
|
||||
) -> Transcript | None:
|
||||
"""
|
||||
Get a transcript by recording_id
|
||||
"""
|
||||
query = transcripts.select().where(transcripts.c.recording_id == recording_id)
|
||||
query = select(TranscriptModel).where(
|
||||
TranscriptModel.recording_id == recording_id
|
||||
)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(transcripts.c.user_id == kwargs["user_id"])
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
query = query.where(TranscriptModel.user_id == kwargs["user_id"])
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return Transcript(**result)
|
||||
return Transcript.model_validate(row)
|
||||
|
||||
async def get_by_room_id(self, room_id: str, **kwargs) -> list[Transcript]:
|
||||
async def get_by_room_id(
|
||||
self, session: AsyncSession, room_id: str, **kwargs
|
||||
) -> list[Transcript]:
|
||||
"""
|
||||
Get transcripts by room_id (direct access without joins)
|
||||
"""
|
||||
query = transcripts.select().where(transcripts.c.room_id == room_id)
|
||||
query = select(TranscriptModel).where(TranscriptModel.room_id == room_id)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(transcripts.c.user_id == kwargs["user_id"])
|
||||
query = query.where(TranscriptModel.user_id == kwargs["user_id"])
|
||||
if "order_by" in kwargs:
|
||||
order_by = kwargs["order_by"]
|
||||
field = getattr(transcripts.c, order_by[1:])
|
||||
field = getattr(TranscriptModel, order_by[1:])
|
||||
if order_by.startswith("-"):
|
||||
field = field.desc()
|
||||
query = query.order_by(field)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Transcript(**result) for result in results]
|
||||
results = await session.execute(query)
|
||||
return [
|
||||
Transcript.model_validate(dict(row)) for row in results.mappings().all()
|
||||
]
|
||||
|
||||
async def get_by_id_for_http(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
transcript_id: str,
|
||||
user_id: str | None,
|
||||
) -> Transcript:
|
||||
@@ -506,13 +417,14 @@ class TranscriptController:
|
||||
This method checks the share mode of the transcript and the user_id
|
||||
to determine if the user can access the transcript.
|
||||
"""
|
||||
query = transcripts.select().where(transcripts.c.id == transcript_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
query = select(TranscriptModel).where(TranscriptModel.id == transcript_id)
|
||||
result = await session.execute(query)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
raise HTTPException(status_code=404, detail="Transcript not found")
|
||||
|
||||
# if the transcript is anonymous, share mode is not checked
|
||||
transcript = Transcript(**result)
|
||||
transcript = Transcript.model_validate(row)
|
||||
if transcript.user_id is None:
|
||||
return transcript
|
||||
|
||||
@@ -535,6 +447,7 @@ class TranscriptController:
|
||||
|
||||
async def add(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
name: str,
|
||||
source_kind: SourceKind,
|
||||
source_language: str = "en",
|
||||
@@ -559,14 +472,20 @@ class TranscriptController:
|
||||
meeting_id=meeting_id,
|
||||
room_id=room_id,
|
||||
)
|
||||
query = transcripts.insert().values(**transcript.model_dump())
|
||||
await get_database().execute(query)
|
||||
query = insert(TranscriptModel).values(**transcript.model_dump())
|
||||
await session.execute(query)
|
||||
await session.commit()
|
||||
return transcript
|
||||
|
||||
# TODO investigate why mutate= is used. it's used in one place currently, maybe because of ORM field updates.
|
||||
# using mutate=True is discouraged
|
||||
async def update(
|
||||
self, transcript: Transcript, values: dict, mutate=False
|
||||
self,
|
||||
session: AsyncSession,
|
||||
transcript: Transcript,
|
||||
values: dict,
|
||||
commit=True,
|
||||
mutate=False,
|
||||
) -> Transcript:
|
||||
"""
|
||||
Update a transcript fields with key/values in values.
|
||||
@@ -575,11 +494,13 @@ class TranscriptController:
|
||||
values = TranscriptController._handle_topics_update(values)
|
||||
|
||||
query = (
|
||||
transcripts.update()
|
||||
.where(transcripts.c.id == transcript.id)
|
||||
update(TranscriptModel)
|
||||
.where(TranscriptModel.id == transcript.id)
|
||||
.values(**values)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
await session.execute(query)
|
||||
if commit:
|
||||
await session.commit()
|
||||
if mutate:
|
||||
for key, value in values.items():
|
||||
setattr(transcript, key, value)
|
||||
@@ -608,13 +529,14 @@ class TranscriptController:
|
||||
|
||||
async def remove_by_id(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
transcript_id: str,
|
||||
user_id: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Remove a transcript by id
|
||||
"""
|
||||
transcript = await self.get_by_id(transcript_id)
|
||||
transcript = await self.get_by_id(session, transcript_id)
|
||||
if not transcript:
|
||||
return
|
||||
if user_id is not None and transcript.user_id != user_id:
|
||||
@@ -634,72 +556,60 @@ class TranscriptController:
|
||||
if transcript.recording_id:
|
||||
try:
|
||||
recording = await recordings_controller.get_by_id(
|
||||
transcript.recording_id
|
||||
session, transcript.recording_id
|
||||
)
|
||||
if recording:
|
||||
try:
|
||||
await get_transcripts_storage().delete_file(
|
||||
recording.object_key, bucket=recording.bucket_name
|
||||
)
|
||||
await get_recordings_storage().delete_file(recording.object_key)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete recording object from S3",
|
||||
exc_info=e,
|
||||
recording_id=transcript.recording_id,
|
||||
)
|
||||
await recordings_controller.remove_by_id(transcript.recording_id)
|
||||
await recordings_controller.remove_by_id(
|
||||
session, transcript.recording_id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete recording row",
|
||||
exc_info=e,
|
||||
recording_id=transcript.recording_id,
|
||||
)
|
||||
query = transcripts.delete().where(transcripts.c.id == transcript_id)
|
||||
await get_database().execute(query)
|
||||
query = delete(TranscriptModel).where(TranscriptModel.id == transcript_id)
|
||||
await session.execute(query)
|
||||
await session.commit()
|
||||
|
||||
async def remove_by_recording_id(self, recording_id: str):
|
||||
async def remove_by_recording_id(self, session: AsyncSession, recording_id: str):
|
||||
"""
|
||||
Remove a transcript by recording_id
|
||||
"""
|
||||
query = transcripts.delete().where(transcripts.c.recording_id == recording_id)
|
||||
await get_database().execute(query)
|
||||
|
||||
@staticmethod
|
||||
def user_can_mutate(transcript: Transcript, user_id: str | None) -> bool:
|
||||
"""
|
||||
Returns True if the given user is allowed to modify the transcript.
|
||||
|
||||
Policy:
|
||||
- Anonymous transcripts (user_id is None) cannot be modified via API
|
||||
- Only the owner (matching user_id) can modify their transcript
|
||||
"""
|
||||
if transcript.user_id is None:
|
||||
return False
|
||||
return user_id and transcript.user_id == user_id
|
||||
|
||||
@asynccontextmanager
|
||||
async def transaction(self):
|
||||
"""
|
||||
A context manager for database transaction
|
||||
"""
|
||||
async with get_database().transaction(isolation="serializable"):
|
||||
yield
|
||||
query = delete(TranscriptModel).where(
|
||||
TranscriptModel.recording_id == recording_id
|
||||
)
|
||||
await session.execute(query)
|
||||
await session.commit()
|
||||
|
||||
async def append_event(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
transcript: Transcript,
|
||||
event: str,
|
||||
data: Any,
|
||||
commit=True,
|
||||
) -> TranscriptEvent:
|
||||
"""
|
||||
Append an event to a transcript
|
||||
"""
|
||||
resp = transcript.add_event(event=event, data=data)
|
||||
await self.update(transcript, {"events": transcript.events_dump()})
|
||||
await self.update(
|
||||
session, transcript, {"events": transcript.events_dump()}, commit=commit
|
||||
)
|
||||
return resp
|
||||
|
||||
async def upsert_topic(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
transcript: Transcript,
|
||||
topic: TranscriptTopic,
|
||||
) -> TranscriptEvent:
|
||||
@@ -707,9 +617,9 @@ class TranscriptController:
|
||||
Upsert topics to a transcript
|
||||
"""
|
||||
transcript.upsert_topic(topic)
|
||||
await self.update(transcript, {"topics": transcript.topics_dump()})
|
||||
await self.update(session, transcript, {"topics": transcript.topics_dump()})
|
||||
|
||||
async def move_mp3_to_storage(self, transcript: Transcript):
|
||||
async def move_mp3_to_storage(self, session: AsyncSession, transcript: Transcript):
|
||||
"""
|
||||
Move mp3 file to storage
|
||||
"""
|
||||
@@ -733,25 +643,28 @@ class TranscriptController:
|
||||
|
||||
# indicate on the transcript that the audio is now on storage
|
||||
# mutates transcript argument
|
||||
await self.update(transcript, {"audio_location": "storage"}, mutate=True)
|
||||
await self.update(
|
||||
session, transcript, {"audio_location": "storage"}, mutate=True
|
||||
)
|
||||
|
||||
# unlink the local file
|
||||
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
||||
|
||||
async def download_mp3_from_storage(self, transcript: Transcript):
|
||||
async def download_mp3_from_storage(
|
||||
self, session: AsyncSession, transcript: Transcript
|
||||
):
|
||||
"""
|
||||
Download audio from storage
|
||||
"""
|
||||
storage = get_transcripts_storage()
|
||||
try:
|
||||
with open(transcript.audio_mp3_filename, "wb") as f:
|
||||
await storage.stream_to_fileobj(transcript.storage_audio_path, f)
|
||||
except Exception:
|
||||
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
||||
raise
|
||||
transcript.audio_mp3_filename.write_bytes(
|
||||
await get_transcripts_storage().get_file(
|
||||
transcript.storage_audio_path,
|
||||
)
|
||||
)
|
||||
|
||||
async def upsert_participant(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
transcript: Transcript,
|
||||
participant: TranscriptParticipant,
|
||||
) -> TranscriptParticipant:
|
||||
@@ -759,11 +672,14 @@ class TranscriptController:
|
||||
Add/update a participant to a transcript
|
||||
"""
|
||||
result = transcript.upsert_participant(participant)
|
||||
await self.update(transcript, {"participants": transcript.participants_dump()})
|
||||
await self.update(
|
||||
session, transcript, {"participants": transcript.participants_dump()}
|
||||
)
|
||||
return result
|
||||
|
||||
async def delete_participant(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
transcript: Transcript,
|
||||
participant_id: str,
|
||||
):
|
||||
@@ -771,28 +687,37 @@ class TranscriptController:
|
||||
Delete a participant from a transcript
|
||||
"""
|
||||
transcript.delete_participant(participant_id)
|
||||
await self.update(transcript, {"participants": transcript.participants_dump()})
|
||||
await self.update(
|
||||
session, transcript, {"participants": transcript.participants_dump()}
|
||||
)
|
||||
|
||||
async def set_status(
|
||||
self, transcript_id: str, status: TranscriptStatus
|
||||
self, session: AsyncSession, transcript_id: str, status: TranscriptStatus
|
||||
) -> TranscriptEvent | None:
|
||||
"""
|
||||
Update the status of a transcript
|
||||
|
||||
Will add an event STATUS + update the status field of transcript
|
||||
"""
|
||||
async with self.transaction():
|
||||
transcript = await self.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
raise Exception(f"Transcript {transcript_id} not found")
|
||||
if transcript.status == status:
|
||||
return
|
||||
resp = await self.append_event(
|
||||
transcript=transcript,
|
||||
event="STATUS",
|
||||
data=StrValue(value=status),
|
||||
)
|
||||
await self.update(transcript, {"status": status})
|
||||
transcript = await self.get_by_id(session, transcript_id)
|
||||
if not transcript:
|
||||
raise Exception(f"Transcript {transcript_id} not found")
|
||||
if transcript.status == status:
|
||||
return
|
||||
resp = await self.append_event(
|
||||
session,
|
||||
transcript=transcript,
|
||||
event="STATUS",
|
||||
data=StrValue(value=status),
|
||||
commit=False,
|
||||
)
|
||||
await self.update(
|
||||
session,
|
||||
transcript,
|
||||
{"status": status},
|
||||
commit=False,
|
||||
)
|
||||
await session.commit()
|
||||
return resp
|
||||
|
||||
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
import hmac
|
||||
import secrets
|
||||
from datetime import datetime, timezone
|
||||
from hashlib import sha256
|
||||
|
||||
import sqlalchemy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.settings import settings
|
||||
from reflector.utils import generate_uuid4
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
user_api_keys = sqlalchemy.Table(
|
||||
"user_api_key",
|
||||
metadata,
|
||||
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
|
||||
sqlalchemy.Column("user_id", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("key_hash", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("name", sqlalchemy.String, nullable=True),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
||||
sqlalchemy.Index("idx_user_api_key_hash", "key_hash", unique=True),
|
||||
sqlalchemy.Index("idx_user_api_key_user_id", "user_id"),
|
||||
)
|
||||
|
||||
|
||||
class UserApiKey(BaseModel):
|
||||
id: NonEmptyString = Field(default_factory=generate_uuid4)
|
||||
user_id: NonEmptyString
|
||||
key_hash: NonEmptyString
|
||||
name: NonEmptyString | None = None
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
|
||||
class UserApiKeyController:
|
||||
@staticmethod
|
||||
def generate_key() -> NonEmptyString:
|
||||
return secrets.token_urlsafe(48)
|
||||
|
||||
@staticmethod
|
||||
def hash_key(key: NonEmptyString) -> str:
|
||||
return hmac.new(
|
||||
settings.SECRET_KEY.encode(), key.encode(), digestmod=sha256
|
||||
).hexdigest()
|
||||
|
||||
@classmethod
|
||||
async def create_key(
|
||||
cls,
|
||||
user_id: NonEmptyString,
|
||||
name: NonEmptyString | None = None,
|
||||
) -> tuple[UserApiKey, NonEmptyString]:
|
||||
plaintext = cls.generate_key()
|
||||
api_key = UserApiKey(
|
||||
user_id=user_id,
|
||||
key_hash=cls.hash_key(plaintext),
|
||||
name=name,
|
||||
)
|
||||
query = user_api_keys.insert().values(**api_key.model_dump())
|
||||
await get_database().execute(query)
|
||||
return api_key, plaintext
|
||||
|
||||
@classmethod
|
||||
async def verify_key(cls, plaintext_key: NonEmptyString) -> UserApiKey | None:
|
||||
key_hash = cls.hash_key(plaintext_key)
|
||||
query = user_api_keys.select().where(
|
||||
user_api_keys.c.key_hash == key_hash,
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
return UserApiKey(**result) if result else None
|
||||
|
||||
@staticmethod
|
||||
async def list_by_user_id(user_id: NonEmptyString) -> list[UserApiKey]:
|
||||
query = (
|
||||
user_api_keys.select()
|
||||
.where(user_api_keys.c.user_id == user_id)
|
||||
.order_by(user_api_keys.c.created_at.desc())
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [UserApiKey(**r) for r in results]
|
||||
|
||||
@staticmethod
|
||||
async def delete_key(key_id: NonEmptyString, user_id: NonEmptyString) -> bool:
|
||||
query = user_api_keys.delete().where(
|
||||
(user_api_keys.c.id == key_id) & (user_api_keys.c.user_id == user_id)
|
||||
)
|
||||
result = await get_database().execute(query)
|
||||
# asyncpg returns None for DELETE, consider it success if no exception
|
||||
return result is None or result > 0
|
||||
|
||||
|
||||
user_api_keys_controller = UserApiKeyController()
|
||||
@@ -1,98 +0,0 @@
|
||||
"""User table for storing Authentik user information."""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import sqlalchemy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.utils import generate_uuid4
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
users = sqlalchemy.Table(
|
||||
"user",
|
||||
metadata,
|
||||
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
|
||||
sqlalchemy.Column("email", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("authentik_uid", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
||||
sqlalchemy.Column("updated_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
||||
sqlalchemy.Index("idx_user_authentik_uid", "authentik_uid", unique=True),
|
||||
sqlalchemy.Index("idx_user_email", "email", unique=False),
|
||||
)
|
||||
|
||||
|
||||
class User(BaseModel):
|
||||
id: NonEmptyString = Field(default_factory=generate_uuid4)
|
||||
email: NonEmptyString
|
||||
authentik_uid: NonEmptyString
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
|
||||
class UserController:
|
||||
@staticmethod
|
||||
async def get_by_id(user_id: NonEmptyString) -> User | None:
|
||||
query = users.select().where(users.c.id == user_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
return User(**result) if result else None
|
||||
|
||||
@staticmethod
|
||||
async def get_by_authentik_uid(authentik_uid: NonEmptyString) -> User | None:
|
||||
query = users.select().where(users.c.authentik_uid == authentik_uid)
|
||||
result = await get_database().fetch_one(query)
|
||||
return User(**result) if result else None
|
||||
|
||||
@staticmethod
|
||||
async def get_by_email(email: NonEmptyString) -> User | None:
|
||||
query = users.select().where(users.c.email == email)
|
||||
result = await get_database().fetch_one(query)
|
||||
return User(**result) if result else None
|
||||
|
||||
@staticmethod
|
||||
async def create_or_update(
|
||||
id: NonEmptyString, authentik_uid: NonEmptyString, email: NonEmptyString
|
||||
) -> User:
|
||||
existing = await UserController.get_by_authentik_uid(authentik_uid)
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
if existing:
|
||||
query = (
|
||||
users.update()
|
||||
.where(users.c.authentik_uid == authentik_uid)
|
||||
.values(email=email, updated_at=now)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
return User(
|
||||
id=existing.id,
|
||||
authentik_uid=authentik_uid,
|
||||
email=email,
|
||||
created_at=existing.created_at,
|
||||
updated_at=now,
|
||||
)
|
||||
else:
|
||||
user = User(
|
||||
id=id,
|
||||
authentik_uid=authentik_uid,
|
||||
email=email,
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
query = users.insert().values(**user.model_dump())
|
||||
await get_database().execute(query)
|
||||
return user
|
||||
|
||||
@staticmethod
|
||||
async def list_all() -> list[User]:
|
||||
query = users.select().order_by(users.c.created_at.desc())
|
||||
results = await get_database().fetch_all(query)
|
||||
return [User(**r) for r in results]
|
||||
|
||||
@staticmethod
|
||||
async def get_by_ids(user_ids: list[NonEmptyString]) -> dict[str, User]:
|
||||
query = users.select().where(users.c.id.in_(user_ids))
|
||||
results = await get_database().fetch_all(query)
|
||||
return {user.id: User(**user) for user in results}
|
||||
|
||||
|
||||
user_controller = UserController()
|
||||
@@ -1,9 +0,0 @@
|
||||
"""Database utility functions."""
|
||||
|
||||
from reflector.db import get_database
|
||||
|
||||
|
||||
def is_postgresql() -> bool:
|
||||
return get_database().url.scheme and get_database().url.scheme.startswith(
|
||||
"postgresql"
|
||||
)
|
||||
@@ -1,5 +0,0 @@
|
||||
"""Hatchet workflow orchestration for Reflector."""
|
||||
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
|
||||
__all__ = ["HatchetClientManager"]
|
||||
@@ -1,98 +0,0 @@
|
||||
"""WebSocket broadcasting helpers for Hatchet workflows.
|
||||
|
||||
DUPLICATION NOTE: To be kept when Celery is deprecated. Currently dupes Celery logic.
|
||||
|
||||
Provides WebSocket broadcasting for Hatchet that matches Celery's @broadcast_to_sockets
|
||||
decorator behavior. Events are broadcast to transcript rooms and user rooms.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
import structlog
|
||||
|
||||
from reflector.db.transcripts import Transcript, TranscriptEvent, transcripts_controller
|
||||
from reflector.utils.string import NonEmptyString
|
||||
from reflector.ws_manager import get_ws_manager
|
||||
|
||||
# Events that should also be sent to user room (matches Celery behavior)
|
||||
USER_ROOM_EVENTS = {"STATUS", "FINAL_TITLE", "DURATION"}
|
||||
|
||||
|
||||
async def broadcast_event(
|
||||
transcript_id: NonEmptyString,
|
||||
event: TranscriptEvent,
|
||||
logger: structlog.BoundLogger,
|
||||
) -> None:
|
||||
"""Broadcast a TranscriptEvent to WebSocket subscribers.
|
||||
|
||||
Fire-and-forget: errors are logged but don't interrupt workflow execution.
|
||||
"""
|
||||
logger.info(
|
||||
"Broadcasting event",
|
||||
transcript_id=transcript_id,
|
||||
event_type=event.event,
|
||||
)
|
||||
try:
|
||||
ws_manager = get_ws_manager()
|
||||
|
||||
await ws_manager.send_json(
|
||||
room_id=f"ts:{transcript_id}",
|
||||
message=event.model_dump(mode="json"),
|
||||
)
|
||||
logger.info(
|
||||
"Event sent to transcript room",
|
||||
transcript_id=transcript_id,
|
||||
event_type=event.event,
|
||||
)
|
||||
|
||||
if event.event in USER_ROOM_EVENTS:
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if transcript and transcript.user_id:
|
||||
await ws_manager.send_json(
|
||||
room_id=f"user:{transcript.user_id}",
|
||||
message={
|
||||
"event": f"TRANSCRIPT_{event.event}",
|
||||
"data": {"id": transcript_id, **event.data},
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to broadcast event",
|
||||
error=str(e),
|
||||
transcript_id=transcript_id,
|
||||
event_type=event.event,
|
||||
)
|
||||
|
||||
|
||||
async def set_status_and_broadcast(
|
||||
transcript_id: NonEmptyString,
|
||||
status: str,
|
||||
logger: structlog.BoundLogger,
|
||||
) -> None:
|
||||
"""Set transcript status and broadcast to WebSocket.
|
||||
|
||||
Wrapper around transcripts_controller.set_status that adds WebSocket broadcasting.
|
||||
"""
|
||||
event = await transcripts_controller.set_status(transcript_id, status)
|
||||
if event:
|
||||
await broadcast_event(transcript_id, event, logger=logger)
|
||||
|
||||
|
||||
async def append_event_and_broadcast(
|
||||
transcript_id: NonEmptyString,
|
||||
transcript: Transcript,
|
||||
event_name: str,
|
||||
data: Any,
|
||||
logger: structlog.BoundLogger,
|
||||
) -> TranscriptEvent:
|
||||
"""Append event to transcript and broadcast to WebSocket.
|
||||
|
||||
Wrapper around transcripts_controller.append_event that adds WebSocket broadcasting.
|
||||
"""
|
||||
event = await transcripts_controller.append_event(
|
||||
transcript=transcript,
|
||||
event=event_name,
|
||||
data=data,
|
||||
)
|
||||
await broadcast_event(transcript_id, event, logger=logger)
|
||||
return event
|
||||
@@ -1,111 +0,0 @@
|
||||
"""Hatchet Python client wrapper.
|
||||
|
||||
Uses singleton pattern because:
|
||||
1. Hatchet client maintains persistent gRPC connections for workflow registration
|
||||
2. Creating multiple clients would cause registration conflicts and resource leaks
|
||||
3. The SDK is designed for a single client instance per process
|
||||
4. Tests use `HatchetClientManager.reset()` to isolate state between tests
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from hatchet_sdk import ClientConfig, Hatchet
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class HatchetClientManager:
|
||||
"""Singleton manager for Hatchet client connections.
|
||||
|
||||
See module docstring for rationale. For test isolation, use `reset()`.
|
||||
"""
|
||||
|
||||
_instance: Hatchet | None = None
|
||||
_lock = threading.Lock()
|
||||
|
||||
@classmethod
|
||||
def get_client(cls) -> Hatchet:
|
||||
"""Get or create the Hatchet client (thread-safe singleton)."""
|
||||
if cls._instance is None:
|
||||
with cls._lock:
|
||||
if cls._instance is None:
|
||||
if not settings.HATCHET_CLIENT_TOKEN:
|
||||
raise ValueError("HATCHET_CLIENT_TOKEN must be set")
|
||||
|
||||
# Pass root logger to Hatchet so workflow logs appear in dashboard
|
||||
root_logger = logging.getLogger()
|
||||
cls._instance = Hatchet(
|
||||
debug=settings.HATCHET_DEBUG,
|
||||
config=ClientConfig(logger=root_logger),
|
||||
)
|
||||
return cls._instance
|
||||
|
||||
@classmethod
|
||||
async def start_workflow(
|
||||
cls,
|
||||
workflow_name: str,
|
||||
input_data: dict,
|
||||
additional_metadata: dict | None = None,
|
||||
) -> str:
|
||||
"""Start a workflow and return the workflow run ID.
|
||||
|
||||
Args:
|
||||
workflow_name: Name of the workflow to trigger.
|
||||
input_data: Input data for the workflow run.
|
||||
additional_metadata: Optional metadata for filtering in dashboard
|
||||
(e.g., transcript_id, recording_id).
|
||||
"""
|
||||
client = cls.get_client()
|
||||
result = await client.runs.aio_create(
|
||||
workflow_name,
|
||||
input_data,
|
||||
additional_metadata=additional_metadata,
|
||||
)
|
||||
return result.run.metadata.id
|
||||
|
||||
@classmethod
|
||||
async def get_workflow_run_status(cls, workflow_run_id: str) -> V1TaskStatus:
|
||||
client = cls.get_client()
|
||||
return await client.runs.aio_get_status(workflow_run_id)
|
||||
|
||||
@classmethod
|
||||
async def cancel_workflow(cls, workflow_run_id: str) -> None:
|
||||
client = cls.get_client()
|
||||
await client.runs.aio_cancel(workflow_run_id)
|
||||
logger.info("[Hatchet] Cancelled workflow", workflow_run_id=workflow_run_id)
|
||||
|
||||
@classmethod
|
||||
async def replay_workflow(cls, workflow_run_id: str) -> None:
|
||||
client = cls.get_client()
|
||||
await client.runs.aio_replay(workflow_run_id)
|
||||
logger.info("[Hatchet] Replaying workflow", workflow_run_id=workflow_run_id)
|
||||
|
||||
@classmethod
|
||||
async def can_replay(cls, workflow_run_id: str) -> bool:
|
||||
"""Check if workflow can be replayed (is FAILED)."""
|
||||
try:
|
||||
status = await cls.get_workflow_run_status(workflow_run_id)
|
||||
return status == V1TaskStatus.FAILED or status == V1TaskStatus.CANCELLED
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"[Hatchet] Failed to check replay status",
|
||||
workflow_run_id=workflow_run_id,
|
||||
error=str(e),
|
||||
)
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
async def get_workflow_status(cls, workflow_run_id: str) -> dict:
|
||||
"""Get the full workflow run details as dict."""
|
||||
client = cls.get_client()
|
||||
run = await client.runs.aio_get(workflow_run_id)
|
||||
return run.to_dict()
|
||||
|
||||
@classmethod
|
||||
def reset(cls) -> None:
|
||||
"""Reset the client instance (for testing)."""
|
||||
with cls._lock:
|
||||
cls._instance = None
|
||||
@@ -1,63 +0,0 @@
|
||||
"""
|
||||
Run Hatchet workers for the diarization pipeline.
|
||||
Runs as a separate process, just like Celery workers.
|
||||
|
||||
Usage:
|
||||
uv run -m reflector.hatchet.run_workers
|
||||
|
||||
# Or via docker:
|
||||
docker compose exec server uv run -m reflector.hatchet.run_workers
|
||||
"""
|
||||
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Start Hatchet worker polling."""
|
||||
if not settings.HATCHET_ENABLED:
|
||||
logger.error("HATCHET_ENABLED is False, not starting workers")
|
||||
sys.exit(1)
|
||||
|
||||
if not settings.HATCHET_CLIENT_TOKEN:
|
||||
logger.error("HATCHET_CLIENT_TOKEN is not set")
|
||||
sys.exit(1)
|
||||
|
||||
logger.info(
|
||||
"Starting Hatchet workers",
|
||||
debug=settings.HATCHET_DEBUG,
|
||||
)
|
||||
|
||||
# Import here (not top-level) - workflow modules call HatchetClientManager.get_client()
|
||||
# at module level because Hatchet SDK decorators (@workflow.task) bind at import time.
|
||||
# Can't use lazy init: decorators need the client object when function is defined.
|
||||
from reflector.hatchet.client import HatchetClientManager # noqa: PLC0415
|
||||
from reflector.hatchet.workflows import ( # noqa: PLC0415
|
||||
diarization_pipeline,
|
||||
track_workflow,
|
||||
)
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
|
||||
worker = hatchet.worker(
|
||||
"reflector-diarization-worker",
|
||||
workflows=[diarization_pipeline, track_workflow],
|
||||
)
|
||||
|
||||
def shutdown_handler(signum: int, frame) -> None:
|
||||
logger.info("Received shutdown signal, stopping workers...")
|
||||
# Worker cleanup happens automatically on exit
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, shutdown_handler)
|
||||
signal.signal(signal.SIGTERM, shutdown_handler)
|
||||
|
||||
logger.info("Starting Hatchet worker polling...")
|
||||
worker.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,14 +0,0 @@
|
||||
"""Hatchet workflow definitions."""
|
||||
|
||||
from reflector.hatchet.workflows.diarization_pipeline import (
|
||||
PipelineInput,
|
||||
diarization_pipeline,
|
||||
)
|
||||
from reflector.hatchet.workflows.track_processing import TrackInput, track_workflow
|
||||
|
||||
__all__ = [
|
||||
"diarization_pipeline",
|
||||
"track_workflow",
|
||||
"PipelineInput",
|
||||
"TrackInput",
|
||||
]
|
||||
@@ -1,938 +0,0 @@
|
||||
"""
|
||||
Hatchet main workflow: DiarizationPipeline
|
||||
|
||||
Multitrack diarization pipeline for Daily.co recordings.
|
||||
Orchestrates the full processing flow from recording metadata to final transcript.
|
||||
|
||||
Note: This file uses deferred imports (inside functions/tasks) intentionally.
|
||||
Hatchet workers run in forked processes; fresh imports per task ensure DB connections
|
||||
are not shared across forks, avoiding connection pooling issues.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import functools
|
||||
import tempfile
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import timedelta
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
import httpx
|
||||
from hatchet_sdk import Context
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.dailyco_api.client import DailyApiClient
|
||||
from reflector.hatchet.broadcast import (
|
||||
append_event_and_broadcast,
|
||||
set_status_and_broadcast,
|
||||
)
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.workflows.models import (
|
||||
ConsentResult,
|
||||
FinalizeResult,
|
||||
MixdownResult,
|
||||
PaddedTrackInfo,
|
||||
ParticipantsResult,
|
||||
ProcessTracksResult,
|
||||
RecordingResult,
|
||||
SummaryResult,
|
||||
TitleResult,
|
||||
TopicsResult,
|
||||
WaveformResult,
|
||||
WebhookResult,
|
||||
ZulipResult,
|
||||
)
|
||||
from reflector.hatchet.workflows.track_processing import TrackInput, track_workflow
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines import topic_processing
|
||||
from reflector.processors import AudioFileWriterProcessor
|
||||
from reflector.processors.types import (
|
||||
TitleSummary,
|
||||
TitleSummaryWithId,
|
||||
Word,
|
||||
)
|
||||
from reflector.processors.types import (
|
||||
Transcript as TranscriptType,
|
||||
)
|
||||
from reflector.settings import settings
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
from reflector.utils.audio_constants import (
|
||||
PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
WAVEFORM_SEGMENTS,
|
||||
)
|
||||
from reflector.utils.audio_mixdown import (
|
||||
detect_sample_rate_from_tracks,
|
||||
mixdown_tracks_pyav,
|
||||
)
|
||||
from reflector.utils.audio_waveform import get_audio_waveform
|
||||
from reflector.utils.daily import (
|
||||
filter_cam_audio_tracks,
|
||||
parse_daily_recording_filename,
|
||||
)
|
||||
from reflector.utils.string import NonEmptyString, assert_non_none_and_non_empty
|
||||
from reflector.zulip import post_transcript_notification
|
||||
|
||||
|
||||
class PipelineInput(BaseModel):
|
||||
"""Input to trigger the diarization pipeline."""
|
||||
|
||||
recording_id: NonEmptyString
|
||||
tracks: list[dict] # List of {"s3_key": str}
|
||||
bucket_name: NonEmptyString
|
||||
transcript_id: NonEmptyString
|
||||
room_id: NonEmptyString | None = None
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
|
||||
diarization_pipeline = hatchet.workflow(
|
||||
name="DiarizationPipeline", input_validator=PipelineInput
|
||||
)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def fresh_db_connection():
|
||||
"""Context manager for database connections in Hatchet workers.
|
||||
TECH DEBT: Made to make connection fork-aware without changing db code too much.
|
||||
The real fix would be making the db module fork-aware instead of bypassing it.
|
||||
Current pattern is acceptable given Hatchet's process model.
|
||||
"""
|
||||
import databases # noqa: PLC0415
|
||||
|
||||
from reflector.db import _database_context # noqa: PLC0415
|
||||
|
||||
_database_context.set(None)
|
||||
db = databases.Database(settings.DATABASE_URL)
|
||||
_database_context.set(db)
|
||||
await db.connect()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
await db.disconnect()
|
||||
_database_context.set(None)
|
||||
|
||||
|
||||
async def set_workflow_error_status(transcript_id: NonEmptyString) -> bool:
|
||||
"""Set transcript status to 'error' on workflow failure.
|
||||
|
||||
Returns:
|
||||
True if status was set successfully, False if failed.
|
||||
Failure is logged as CRITICAL since it means transcript may be stuck.
|
||||
"""
|
||||
try:
|
||||
async with fresh_db_connection():
|
||||
await set_status_and_broadcast(transcript_id, "error", logger=logger)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.critical(
|
||||
"[Hatchet] CRITICAL: Failed to set error status - transcript may be stuck in 'processing'",
|
||||
transcript_id=transcript_id,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def _spawn_storage():
|
||||
"""Create fresh storage instance."""
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
)
|
||||
|
||||
|
||||
def with_error_handling(step_name: str, set_error_status: bool = True) -> Callable:
|
||||
"""Decorator that handles task failures uniformly.
|
||||
|
||||
Args:
|
||||
step_name: Name of the step for logging and progress tracking.
|
||||
set_error_status: Whether to set transcript status to 'error' on failure.
|
||||
"""
|
||||
|
||||
def decorator(func: Callable) -> Callable:
|
||||
@functools.wraps(func)
|
||||
async def wrapper(input: PipelineInput, ctx: Context):
|
||||
try:
|
||||
return await func(input, ctx)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[Hatchet] {step_name} failed",
|
||||
transcript_id=input.transcript_id,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
if set_error_status:
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
@diarization_pipeline.task(execution_timeout=timedelta(seconds=60), retries=3)
|
||||
@with_error_handling("get_recording")
|
||||
async def get_recording(input: PipelineInput, ctx: Context) -> RecordingResult:
|
||||
"""Fetch recording metadata from Daily.co API."""
|
||||
ctx.log(f"get_recording: recording_id={input.recording_id}")
|
||||
|
||||
# Set transcript status to "processing" at workflow start (broadcasts to WebSocket)
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript:
|
||||
await set_status_and_broadcast(
|
||||
input.transcript_id, "processing", logger=logger
|
||||
)
|
||||
ctx.log(f"Set transcript status to processing: {input.transcript_id}")
|
||||
|
||||
if not settings.DAILY_API_KEY:
|
||||
raise ValueError("DAILY_API_KEY not configured")
|
||||
|
||||
async with DailyApiClient(api_key=settings.DAILY_API_KEY) as client:
|
||||
recording = await client.get_recording(input.recording_id)
|
||||
|
||||
ctx.log(
|
||||
f"get_recording complete: room={recording.room_name}, duration={recording.duration}s"
|
||||
)
|
||||
|
||||
return RecordingResult(
|
||||
id=recording.id,
|
||||
mtg_session_id=recording.mtgSessionId,
|
||||
duration=recording.duration,
|
||||
)
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[get_recording], execution_timeout=timedelta(seconds=60), retries=3
|
||||
)
|
||||
@with_error_handling("get_participants")
|
||||
async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsResult:
|
||||
"""Fetch participant list from Daily.co API and update transcript in database."""
|
||||
ctx.log(f"get_participants: transcript_id={input.transcript_id}")
|
||||
|
||||
recording = ctx.task_output(get_recording)
|
||||
mtg_session_id = recording.mtg_session_id
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptParticipant,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript:
|
||||
# Note: title NOT cleared - preserves existing titles
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"events": [],
|
||||
"topics": [],
|
||||
"participants": [],
|
||||
},
|
||||
)
|
||||
|
||||
mtg_session_id = assert_non_none_and_non_empty(
|
||||
mtg_session_id, "mtg_session_id is required"
|
||||
)
|
||||
daily_api_key = assert_non_none_and_non_empty(
|
||||
settings.DAILY_API_KEY, "DAILY_API_KEY is required"
|
||||
)
|
||||
|
||||
async with DailyApiClient(api_key=daily_api_key) as client:
|
||||
participants = await client.get_meeting_participants(mtg_session_id)
|
||||
|
||||
id_to_name = {}
|
||||
id_to_user_id = {}
|
||||
for p in participants.data:
|
||||
if p.user_name:
|
||||
id_to_name[p.participant_id] = p.user_name
|
||||
if p.user_id:
|
||||
id_to_user_id[p.participant_id] = p.user_id
|
||||
|
||||
track_keys = [t["s3_key"] for t in input.tracks]
|
||||
cam_audio_keys = filter_cam_audio_tracks(track_keys)
|
||||
|
||||
participants_list = []
|
||||
for idx, key in enumerate(cam_audio_keys):
|
||||
try:
|
||||
parsed = parse_daily_recording_filename(key)
|
||||
participant_id = parsed.participant_id
|
||||
except ValueError as e:
|
||||
logger.error(
|
||||
"Failed to parse Daily recording filename",
|
||||
error=str(e),
|
||||
key=key,
|
||||
)
|
||||
continue
|
||||
|
||||
default_name = f"Speaker {idx}"
|
||||
name = id_to_name.get(participant_id, default_name)
|
||||
user_id = id_to_user_id.get(participant_id)
|
||||
|
||||
participant = TranscriptParticipant(
|
||||
id=participant_id, speaker=idx, name=name, user_id=user_id
|
||||
)
|
||||
await transcripts_controller.upsert_participant(transcript, participant)
|
||||
participants_list.append(
|
||||
{
|
||||
"participant_id": participant_id,
|
||||
"user_name": name,
|
||||
"speaker": idx,
|
||||
}
|
||||
)
|
||||
|
||||
ctx.log(f"get_participants complete: {len(participants_list)} participants")
|
||||
|
||||
return ParticipantsResult(
|
||||
participants=participants_list,
|
||||
num_tracks=len(input.tracks),
|
||||
source_language=transcript.source_language if transcript else "en",
|
||||
target_language=transcript.target_language if transcript else "en",
|
||||
)
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[get_participants], execution_timeout=timedelta(seconds=600), retries=3
|
||||
)
|
||||
@with_error_handling("process_tracks")
|
||||
async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksResult:
|
||||
"""Spawn child workflows for each track (dynamic fan-out)."""
|
||||
ctx.log(f"process_tracks: spawning {len(input.tracks)} track workflows")
|
||||
|
||||
participants_result = ctx.task_output(get_participants)
|
||||
source_language = participants_result.source_language
|
||||
|
||||
child_coroutines = [
|
||||
track_workflow.aio_run(
|
||||
TrackInput(
|
||||
track_index=i,
|
||||
s3_key=track["s3_key"],
|
||||
bucket_name=input.bucket_name,
|
||||
transcript_id=input.transcript_id,
|
||||
language=source_language,
|
||||
)
|
||||
)
|
||||
for i, track in enumerate(input.tracks)
|
||||
]
|
||||
|
||||
results = await asyncio.gather(*child_coroutines)
|
||||
|
||||
target_language = participants_result.target_language
|
||||
|
||||
track_words = []
|
||||
padded_tracks = []
|
||||
created_padded_files = set()
|
||||
|
||||
for result in results:
|
||||
transcribe_result = result.get("transcribe_track", {})
|
||||
track_words.append(transcribe_result.get("words", []))
|
||||
|
||||
pad_result = result.get("pad_track", {})
|
||||
padded_key = pad_result.get("padded_key")
|
||||
bucket_name = pad_result.get("bucket_name")
|
||||
|
||||
# Store S3 key info (not presigned URL) - consumer tasks presign on demand
|
||||
if padded_key:
|
||||
padded_tracks.append(
|
||||
PaddedTrackInfo(key=padded_key, bucket_name=bucket_name)
|
||||
)
|
||||
|
||||
track_index = pad_result.get("track_index")
|
||||
if pad_result.get("size", 0) > 0 and track_index is not None:
|
||||
storage_path = f"file_pipeline_hatchet/{input.transcript_id}/tracks/padded_{track_index}.webm"
|
||||
created_padded_files.add(storage_path)
|
||||
|
||||
all_words = [word for words in track_words for word in words]
|
||||
all_words.sort(key=lambda w: w.get("start", 0))
|
||||
|
||||
ctx.log(
|
||||
f"process_tracks complete: {len(all_words)} words from {len(input.tracks)} tracks"
|
||||
)
|
||||
|
||||
return ProcessTracksResult(
|
||||
all_words=all_words,
|
||||
padded_tracks=padded_tracks,
|
||||
word_count=len(all_words),
|
||||
num_tracks=len(input.tracks),
|
||||
target_language=target_language,
|
||||
created_padded_files=list(created_padded_files),
|
||||
)
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[process_tracks], execution_timeout=timedelta(seconds=300), retries=3
|
||||
)
|
||||
@with_error_handling("mixdown_tracks")
|
||||
async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
|
||||
"""Mix all padded tracks into single audio file using PyAV (same as Celery)."""
|
||||
ctx.log("mixdown_tracks: mixing padded tracks into single audio file")
|
||||
|
||||
track_result = ctx.task_output(process_tracks)
|
||||
padded_tracks = track_result.padded_tracks
|
||||
|
||||
# TODO think of NonEmpty type to avoid those checks, e.g. sized.NonEmpty from https://github.com/antonagestam/phantom-types/
|
||||
if not padded_tracks:
|
||||
raise ValueError("No padded tracks to mixdown")
|
||||
|
||||
storage = _spawn_storage()
|
||||
|
||||
# Presign URLs on demand (avoids stale URLs on workflow replay)
|
||||
padded_urls = []
|
||||
for track_info in padded_tracks:
|
||||
if track_info.key:
|
||||
url = await storage.get_file_url(
|
||||
track_info.key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
bucket=track_info.bucket_name,
|
||||
)
|
||||
padded_urls.append(url)
|
||||
|
||||
valid_urls = [url for url in padded_urls if url]
|
||||
if not valid_urls:
|
||||
raise ValueError("No valid padded tracks to mixdown")
|
||||
|
||||
target_sample_rate = detect_sample_rate_from_tracks(valid_urls, logger=logger)
|
||||
if not target_sample_rate:
|
||||
logger.error("Mixdown failed - no decodable audio frames found")
|
||||
raise ValueError("No decodable audio frames in any track")
|
||||
|
||||
output_path = tempfile.mktemp(suffix=".mp3")
|
||||
duration_ms_callback_capture_container = [0.0]
|
||||
|
||||
async def capture_duration(d):
|
||||
duration_ms_callback_capture_container[0] = d
|
||||
|
||||
writer = AudioFileWriterProcessor(path=output_path, on_duration=capture_duration)
|
||||
|
||||
await mixdown_tracks_pyav(
|
||||
valid_urls,
|
||||
writer,
|
||||
target_sample_rate,
|
||||
offsets_seconds=None,
|
||||
logger=logger,
|
||||
)
|
||||
await writer.flush()
|
||||
|
||||
file_size = Path(output_path).stat().st_size
|
||||
storage_path = f"{input.transcript_id}/audio.mp3"
|
||||
|
||||
with open(output_path, "rb") as mixed_file:
|
||||
await storage.put_file(storage_path, mixed_file)
|
||||
|
||||
Path(output_path).unlink(missing_ok=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript:
|
||||
await transcripts_controller.update(
|
||||
transcript, {"audio_location": "storage"}
|
||||
)
|
||||
|
||||
ctx.log(f"mixdown_tracks complete: uploaded {file_size} bytes to {storage_path}")
|
||||
|
||||
return MixdownResult(
|
||||
audio_key=storage_path,
|
||||
duration=duration_ms_callback_capture_container[0],
|
||||
tracks_mixed=len(valid_urls),
|
||||
)
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[mixdown_tracks], execution_timeout=timedelta(seconds=120), retries=3
|
||||
)
|
||||
@with_error_handling("generate_waveform")
|
||||
async def generate_waveform(input: PipelineInput, ctx: Context) -> WaveformResult:
|
||||
"""Generate audio waveform visualization using AudioWaveformProcessor (matches Celery)."""
|
||||
ctx.log(f"generate_waveform: transcript_id={input.transcript_id}")
|
||||
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptWaveform,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
mixdown_result = ctx.task_output(mixdown_tracks)
|
||||
audio_key = mixdown_result.audio_key
|
||||
|
||||
storage = _spawn_storage()
|
||||
audio_url = await storage.get_file_url(
|
||||
audio_key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
)
|
||||
|
||||
# Download MP3 to temp file (AudioWaveformProcessor needs local file)
|
||||
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(audio_url, timeout=120)
|
||||
response.raise_for_status()
|
||||
with open(temp_path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
waveform = get_audio_waveform(
|
||||
path=Path(temp_path), segments_count=WAVEFORM_SEGMENTS
|
||||
)
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript:
|
||||
waveform_data = TranscriptWaveform(waveform=waveform)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"WAVEFORM",
|
||||
waveform_data,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
finally:
|
||||
Path(temp_path).unlink(missing_ok=True)
|
||||
|
||||
ctx.log("generate_waveform complete")
|
||||
|
||||
return WaveformResult(waveform_generated=True)
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[mixdown_tracks], execution_timeout=timedelta(seconds=300), retries=3
|
||||
)
|
||||
@with_error_handling("detect_topics")
|
||||
async def detect_topics(input: PipelineInput, ctx: Context) -> TopicsResult:
|
||||
"""Detect topics using LLM and save to database (matches Celery on_topic callback)."""
|
||||
ctx.log("detect_topics: analyzing transcript for topics")
|
||||
|
||||
track_result = ctx.task_output(process_tracks)
|
||||
words = track_result.all_words
|
||||
target_language = track_result.target_language
|
||||
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
word_objects = [Word(**w) for w in words]
|
||||
transcript_type = TranscriptType(words=word_objects)
|
||||
|
||||
empty_pipeline = topic_processing.EmptyPipeline(logger=logger)
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
|
||||
async def on_topic_callback(data):
|
||||
topic = TranscriptTopic(
|
||||
title=data.title,
|
||||
summary=data.summary,
|
||||
timestamp=data.timestamp,
|
||||
transcript=data.transcript.text,
|
||||
words=data.transcript.words,
|
||||
)
|
||||
if isinstance(
|
||||
data, TitleSummaryWithId
|
||||
): # Celery parity: main_live_pipeline.py
|
||||
topic.id = data.id
|
||||
await transcripts_controller.upsert_topic(transcript, topic)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id, transcript, "TOPIC", topic, logger=logger
|
||||
)
|
||||
|
||||
topics = await topic_processing.detect_topics(
|
||||
transcript_type,
|
||||
target_language,
|
||||
on_topic_callback=on_topic_callback,
|
||||
empty_pipeline=empty_pipeline,
|
||||
)
|
||||
|
||||
topics_list = [t.model_dump() for t in topics]
|
||||
|
||||
ctx.log(f"detect_topics complete: found {len(topics_list)} topics")
|
||||
|
||||
return TopicsResult(topics=topics_list)
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[detect_topics], execution_timeout=timedelta(seconds=120), retries=3
|
||||
)
|
||||
@with_error_handling("generate_title")
|
||||
async def generate_title(input: PipelineInput, ctx: Context) -> TitleResult:
|
||||
"""Generate meeting title using LLM and save to database (matches Celery on_title callback)."""
|
||||
ctx.log("generate_title: generating title from topics")
|
||||
|
||||
topics_result = ctx.task_output(detect_topics)
|
||||
topics = topics_result.topics
|
||||
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptFinalTitle,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
topic_objects = [TitleSummary(**t) for t in topics]
|
||||
|
||||
empty_pipeline = topic_processing.EmptyPipeline(logger=logger)
|
||||
title_result = None
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
|
||||
async def on_title_callback(data):
|
||||
nonlocal title_result
|
||||
title_result = data.title
|
||||
final_title = TranscriptFinalTitle(title=data.title)
|
||||
if not transcript.title:
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{"title": final_title.title},
|
||||
)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"FINAL_TITLE",
|
||||
final_title,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
await topic_processing.generate_title(
|
||||
topic_objects,
|
||||
on_title_callback=on_title_callback,
|
||||
empty_pipeline=empty_pipeline,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
ctx.log(f"generate_title complete: '{title_result}'")
|
||||
|
||||
return TitleResult(title=title_result)
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[detect_topics], execution_timeout=timedelta(seconds=300), retries=3
|
||||
)
|
||||
@with_error_handling("generate_summary")
|
||||
async def generate_summary(input: PipelineInput, ctx: Context) -> SummaryResult:
|
||||
"""Generate meeting summary using LLM and save to database (matches Celery callbacks)."""
|
||||
ctx.log("generate_summary: generating long and short summaries")
|
||||
|
||||
topics_result = ctx.task_output(detect_topics)
|
||||
topics = topics_result.topics
|
||||
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptFinalLongSummary,
|
||||
TranscriptFinalShortSummary,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
topic_objects = [TitleSummary(**t) for t in topics]
|
||||
|
||||
empty_pipeline = topic_processing.EmptyPipeline(logger=logger)
|
||||
summary_result = None
|
||||
short_summary_result = None
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
|
||||
async def on_long_summary_callback(data):
|
||||
nonlocal summary_result
|
||||
summary_result = data.long_summary
|
||||
final_long_summary = TranscriptFinalLongSummary(
|
||||
long_summary=data.long_summary
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{"long_summary": final_long_summary.long_summary},
|
||||
)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"FINAL_LONG_SUMMARY",
|
||||
final_long_summary,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
async def on_short_summary_callback(data):
|
||||
nonlocal short_summary_result
|
||||
short_summary_result = data.short_summary
|
||||
final_short_summary = TranscriptFinalShortSummary(
|
||||
short_summary=data.short_summary
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{"short_summary": final_short_summary.short_summary},
|
||||
)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"FINAL_SHORT_SUMMARY",
|
||||
final_short_summary,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
await topic_processing.generate_summaries(
|
||||
topic_objects,
|
||||
transcript, # DB transcript for context
|
||||
on_long_summary_callback=on_long_summary_callback,
|
||||
on_short_summary_callback=on_short_summary_callback,
|
||||
empty_pipeline=empty_pipeline,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
ctx.log("generate_summary complete")
|
||||
|
||||
return SummaryResult(summary=summary_result, short_summary=short_summary_result)
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[generate_waveform, generate_title, generate_summary],
|
||||
execution_timeout=timedelta(seconds=60),
|
||||
retries=3,
|
||||
)
|
||||
@with_error_handling("finalize")
|
||||
async def finalize(input: PipelineInput, ctx: Context) -> FinalizeResult:
|
||||
"""Finalize transcript: save words, emit TRANSCRIPT event, set status to 'ended'.
|
||||
|
||||
Matches Celery's on_transcript + set_status behavior.
|
||||
Note: Title and summaries are already saved by their respective task callbacks.
|
||||
"""
|
||||
ctx.log("finalize: saving transcript and setting status to 'ended'")
|
||||
|
||||
mixdown_result = ctx.task_output(mixdown_tracks)
|
||||
track_result = ctx.task_output(process_tracks)
|
||||
|
||||
duration = mixdown_result.duration
|
||||
all_words = track_result.all_words
|
||||
|
||||
# Cleanup temporary padded S3 files (deferred until finalize for semantic parity with Celery)
|
||||
created_padded_files = track_result.created_padded_files
|
||||
if created_padded_files:
|
||||
ctx.log(f"Cleaning up {len(created_padded_files)} temporary S3 files")
|
||||
storage = _spawn_storage()
|
||||
cleanup_results = await asyncio.gather(
|
||||
*[storage.delete_file(path) for path in created_padded_files],
|
||||
return_exceptions=True,
|
||||
)
|
||||
for storage_path, result in zip(created_padded_files, cleanup_results):
|
||||
if isinstance(result, Exception):
|
||||
logger.warning(
|
||||
"[Hatchet] Failed to cleanup temporary padded track",
|
||||
storage_path=storage_path,
|
||||
error=str(result),
|
||||
)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptDuration,
|
||||
TranscriptText,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript is None:
|
||||
raise ValueError(f"Transcript {input.transcript_id} not found in database")
|
||||
|
||||
word_objects = [Word(**w) for w in all_words]
|
||||
merged_transcript = TranscriptType(words=word_objects, translation=None)
|
||||
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"TRANSCRIPT",
|
||||
TranscriptText(
|
||||
text=merged_transcript.text,
|
||||
translation=merged_transcript.translation,
|
||||
),
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
# Save duration and clear workflow_run_id (workflow completed successfully)
|
||||
# Note: title/long_summary/short_summary already saved by their callbacks
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"duration": duration,
|
||||
"workflow_run_id": None, # Clear on success - no need to resume
|
||||
},
|
||||
)
|
||||
|
||||
duration_data = TranscriptDuration(duration=duration)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id, transcript, "DURATION", duration_data, logger=logger
|
||||
)
|
||||
|
||||
await set_status_and_broadcast(input.transcript_id, "ended", logger=logger)
|
||||
|
||||
ctx.log(
|
||||
f"finalize complete: transcript {input.transcript_id} status set to 'ended'"
|
||||
)
|
||||
|
||||
return FinalizeResult(status="COMPLETED")
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[finalize], execution_timeout=timedelta(seconds=60), retries=3
|
||||
)
|
||||
@with_error_handling("cleanup_consent", set_error_status=False)
|
||||
async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
|
||||
"""Check consent and delete audio files if any participant denied."""
|
||||
ctx.log(f"cleanup_consent: transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.meetings import ( # noqa: PLC0415
|
||||
meeting_consent_controller,
|
||||
meetings_controller,
|
||||
)
|
||||
from reflector.db.recordings import recordings_controller # noqa: PLC0415
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
from reflector.storage import get_transcripts_storage # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
ctx.log("cleanup_consent: transcript not found")
|
||||
return ConsentResult()
|
||||
|
||||
consent_denied = False
|
||||
if transcript.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(transcript.meeting_id)
|
||||
if meeting:
|
||||
consent_denied = await meeting_consent_controller.has_any_denial(
|
||||
meeting.id
|
||||
)
|
||||
|
||||
if not consent_denied:
|
||||
ctx.log("cleanup_consent: consent approved, keeping all files")
|
||||
return ConsentResult()
|
||||
|
||||
ctx.log("cleanup_consent: consent denied, deleting audio files")
|
||||
|
||||
input_track_keys = set(t["s3_key"] for t in input.tracks)
|
||||
|
||||
# Detect if recording.track_keys was manually modified after workflow started
|
||||
if transcript.recording_id:
|
||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||
if recording and recording.track_keys:
|
||||
db_track_keys = set(filter_cam_audio_tracks(recording.track_keys))
|
||||
|
||||
if input_track_keys != db_track_keys:
|
||||
added = db_track_keys - input_track_keys
|
||||
removed = input_track_keys - db_track_keys
|
||||
logger.warning(
|
||||
"[Hatchet] Track keys mismatch: DB changed since workflow start",
|
||||
transcript_id=input.transcript_id,
|
||||
recording_id=transcript.recording_id,
|
||||
input_count=len(input_track_keys),
|
||||
db_count=len(db_track_keys),
|
||||
added_in_db=list(added) if added else None,
|
||||
removed_from_db=list(removed) if removed else None,
|
||||
)
|
||||
ctx.log(
|
||||
f"WARNING: track_keys mismatch - "
|
||||
f"input has {len(input_track_keys)}, DB has {len(db_track_keys)}. "
|
||||
f"Using input tracks for deletion."
|
||||
)
|
||||
|
||||
deletion_errors = []
|
||||
|
||||
if input_track_keys and input.bucket_name:
|
||||
master_storage = get_transcripts_storage()
|
||||
for key in input_track_keys:
|
||||
try:
|
||||
await master_storage.delete_file(key, bucket=input.bucket_name)
|
||||
ctx.log(f"Deleted recording file: {input.bucket_name}/{key}")
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete {key}: {e}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
deletion_errors.append(error_msg)
|
||||
|
||||
if transcript.audio_location == "storage":
|
||||
storage = get_transcripts_storage()
|
||||
try:
|
||||
await storage.delete_file(transcript.storage_audio_path)
|
||||
ctx.log(f"Deleted processed audio: {transcript.storage_audio_path}")
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete processed audio: {e}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
deletion_errors.append(error_msg)
|
||||
|
||||
if deletion_errors:
|
||||
logger.warning(
|
||||
"[Hatchet] cleanup_consent completed with errors",
|
||||
transcript_id=input.transcript_id,
|
||||
error_count=len(deletion_errors),
|
||||
errors=deletion_errors,
|
||||
)
|
||||
ctx.log(f"cleanup_consent completed with {len(deletion_errors)} errors")
|
||||
else:
|
||||
await transcripts_controller.update(transcript, {"audio_deleted": True})
|
||||
ctx.log("cleanup_consent: all audio deleted successfully")
|
||||
|
||||
return ConsentResult()
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[cleanup_consent], execution_timeout=timedelta(seconds=60), retries=5
|
||||
)
|
||||
@with_error_handling("post_zulip", set_error_status=False)
|
||||
async def post_zulip(input: PipelineInput, ctx: Context) -> ZulipResult:
|
||||
"""Post notification to Zulip."""
|
||||
ctx.log(f"post_zulip: transcript_id={input.transcript_id}")
|
||||
|
||||
if not settings.ZULIP_REALM:
|
||||
ctx.log("post_zulip skipped (Zulip not configured)")
|
||||
return ZulipResult(zulip_message_id=None, skipped=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript:
|
||||
message_id = await post_transcript_notification(transcript)
|
||||
ctx.log(f"post_zulip complete: zulip_message_id={message_id}")
|
||||
else:
|
||||
message_id = None
|
||||
|
||||
return ZulipResult(zulip_message_id=message_id)
|
||||
|
||||
|
||||
@diarization_pipeline.task(
|
||||
parents=[post_zulip], execution_timeout=timedelta(seconds=120), retries=30
|
||||
)
|
||||
@with_error_handling("send_webhook", set_error_status=False)
|
||||
async def send_webhook(input: PipelineInput, ctx: Context) -> WebhookResult:
|
||||
"""Send completion webhook to external service."""
|
||||
ctx.log(f"send_webhook: transcript_id={input.transcript_id}")
|
||||
|
||||
if not input.room_id:
|
||||
ctx.log("send_webhook skipped (no room_id)")
|
||||
return WebhookResult(webhook_sent=False, skipped=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.rooms import rooms_controller # noqa: PLC0415
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
room = await rooms_controller.get_by_id(input.room_id)
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
|
||||
if room and room.webhook_url and transcript:
|
||||
webhook_payload = {
|
||||
"event": "transcript.completed",
|
||||
"transcript_id": input.transcript_id,
|
||||
"title": transcript.title,
|
||||
"duration": transcript.duration,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
room.webhook_url, json=webhook_payload, timeout=30
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
ctx.log(f"send_webhook complete: status_code={response.status_code}")
|
||||
|
||||
return WebhookResult(webhook_sent=True, response_code=response.status_code)
|
||||
|
||||
return WebhookResult(webhook_sent=False, skipped=True)
|
||||
@@ -1,123 +0,0 @@
|
||||
"""
|
||||
Pydantic models for Hatchet workflow task return types.
|
||||
|
||||
Provides static typing for all task outputs, enabling type checking
|
||||
and better IDE support.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
class PadTrackResult(BaseModel):
|
||||
"""Result from pad_track task."""
|
||||
|
||||
padded_key: NonEmptyString # S3 key (not presigned URL) - presign on demand to avoid stale URLs on replay
|
||||
bucket_name: (
|
||||
NonEmptyString | None
|
||||
) # None means use default transcript storage bucket
|
||||
size: int
|
||||
track_index: int
|
||||
|
||||
|
||||
class TranscribeTrackResult(BaseModel):
|
||||
"""Result from transcribe_track task."""
|
||||
|
||||
words: list[dict[str, Any]]
|
||||
track_index: int
|
||||
|
||||
|
||||
class RecordingResult(BaseModel):
|
||||
"""Result from get_recording task."""
|
||||
|
||||
id: NonEmptyString | None
|
||||
mtg_session_id: NonEmptyString | None
|
||||
duration: float
|
||||
|
||||
|
||||
class ParticipantsResult(BaseModel):
|
||||
"""Result from get_participants task."""
|
||||
|
||||
participants: list[dict[str, Any]]
|
||||
num_tracks: int
|
||||
source_language: NonEmptyString
|
||||
target_language: NonEmptyString
|
||||
|
||||
|
||||
class PaddedTrackInfo(BaseModel):
|
||||
"""Info for a padded track - S3 key + bucket for on-demand presigning."""
|
||||
|
||||
key: NonEmptyString
|
||||
bucket_name: NonEmptyString | None # None = use default storage bucket
|
||||
|
||||
|
||||
class ProcessTracksResult(BaseModel):
|
||||
"""Result from process_tracks task."""
|
||||
|
||||
all_words: list[dict[str, Any]]
|
||||
padded_tracks: list[PaddedTrackInfo] # S3 keys, not presigned URLs
|
||||
word_count: int
|
||||
num_tracks: int
|
||||
target_language: NonEmptyString
|
||||
created_padded_files: list[NonEmptyString]
|
||||
|
||||
|
||||
class MixdownResult(BaseModel):
|
||||
"""Result from mixdown_tracks task."""
|
||||
|
||||
audio_key: NonEmptyString
|
||||
duration: float
|
||||
tracks_mixed: int
|
||||
|
||||
|
||||
class WaveformResult(BaseModel):
|
||||
"""Result from generate_waveform task."""
|
||||
|
||||
waveform_generated: bool
|
||||
|
||||
|
||||
class TopicsResult(BaseModel):
|
||||
"""Result from detect_topics task."""
|
||||
|
||||
topics: list[dict[str, Any]]
|
||||
|
||||
|
||||
class TitleResult(BaseModel):
|
||||
"""Result from generate_title task."""
|
||||
|
||||
title: str | None
|
||||
|
||||
|
||||
class SummaryResult(BaseModel):
|
||||
"""Result from generate_summary task."""
|
||||
|
||||
summary: str | None
|
||||
short_summary: str | None
|
||||
|
||||
|
||||
class FinalizeResult(BaseModel):
|
||||
"""Result from finalize task."""
|
||||
|
||||
status: NonEmptyString
|
||||
|
||||
|
||||
class ConsentResult(BaseModel):
|
||||
"""Result from cleanup_consent task."""
|
||||
|
||||
|
||||
class ZulipResult(BaseModel):
|
||||
"""Result from post_zulip task."""
|
||||
|
||||
zulip_message_id: int | None = None
|
||||
skipped: bool = False
|
||||
|
||||
|
||||
class WebhookResult(BaseModel):
|
||||
"""Result from send_webhook task."""
|
||||
|
||||
webhook_sent: bool
|
||||
skipped: bool = False
|
||||
response_code: int | None = None
|
||||
@@ -1,222 +0,0 @@
|
||||
"""
|
||||
Hatchet child workflow: TrackProcessing
|
||||
|
||||
Handles individual audio track processing: padding and transcription.
|
||||
Spawned dynamically by the main diarization pipeline for each track.
|
||||
|
||||
Architecture note: This is a separate workflow (not inline tasks in DiarizationPipeline)
|
||||
because Hatchet workflow DAGs are defined statically, but the number of tracks varies
|
||||
at runtime. Child workflow spawning via `aio_run()` + `asyncio.gather()` is the
|
||||
standard pattern for dynamic fan-out. See `process_tracks` in diarization_pipeline.py.
|
||||
|
||||
Note: This file uses deferred imports (inside tasks) intentionally.
|
||||
Hatchet workers run in forked processes; fresh imports per task ensure
|
||||
storage/DB connections are not shared across forks.
|
||||
"""
|
||||
|
||||
import tempfile
|
||||
from datetime import timedelta
|
||||
from pathlib import Path
|
||||
|
||||
import av
|
||||
from hatchet_sdk import Context
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.workflows.models import PadTrackResult, TranscribeTrackResult
|
||||
from reflector.logger import logger
|
||||
from reflector.utils.audio_constants import PRESIGNED_URL_EXPIRATION_SECONDS
|
||||
from reflector.utils.audio_padding import (
|
||||
apply_audio_padding_to_file,
|
||||
extract_stream_start_time_from_container,
|
||||
)
|
||||
|
||||
|
||||
class TrackInput(BaseModel):
|
||||
"""Input for individual track processing."""
|
||||
|
||||
track_index: int
|
||||
s3_key: str
|
||||
bucket_name: str
|
||||
transcript_id: str
|
||||
language: str = "en"
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
|
||||
track_workflow = hatchet.workflow(name="TrackProcessing", input_validator=TrackInput)
|
||||
|
||||
|
||||
@track_workflow.task(execution_timeout=timedelta(seconds=300), retries=3)
|
||||
async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
"""Pad single audio track with silence for alignment.
|
||||
|
||||
Extracts stream.start_time from WebM container metadata and applies
|
||||
silence padding using PyAV filter graph (adelay).
|
||||
"""
|
||||
ctx.log(f"pad_track: track {input.track_index}, s3_key={input.s3_key}")
|
||||
logger.info(
|
||||
"[Hatchet] pad_track",
|
||||
track_index=input.track_index,
|
||||
s3_key=input.s3_key,
|
||||
transcript_id=input.transcript_id,
|
||||
)
|
||||
|
||||
try:
|
||||
# Create fresh storage instance to avoid aioboto3 fork issues
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
)
|
||||
|
||||
source_url = await storage.get_file_url(
|
||||
input.s3_key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
bucket=input.bucket_name,
|
||||
)
|
||||
|
||||
with av.open(source_url) as in_container:
|
||||
start_time_seconds = extract_stream_start_time_from_container(
|
||||
in_container, input.track_index, logger=logger
|
||||
)
|
||||
|
||||
# If no padding needed, return original S3 key
|
||||
if start_time_seconds <= 0:
|
||||
logger.info(
|
||||
f"Track {input.track_index} requires no padding",
|
||||
track_index=input.track_index,
|
||||
)
|
||||
return PadTrackResult(
|
||||
padded_key=input.s3_key,
|
||||
bucket_name=input.bucket_name,
|
||||
size=0,
|
||||
track_index=input.track_index,
|
||||
)
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".webm", delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
try:
|
||||
apply_audio_padding_to_file(
|
||||
in_container,
|
||||
temp_path,
|
||||
start_time_seconds,
|
||||
input.track_index,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
file_size = Path(temp_path).stat().st_size
|
||||
storage_path = f"file_pipeline_hatchet/{input.transcript_id}/tracks/padded_{input.track_index}.webm"
|
||||
|
||||
logger.info(
|
||||
f"About to upload padded track",
|
||||
key=storage_path,
|
||||
size=file_size,
|
||||
)
|
||||
|
||||
with open(temp_path, "rb") as padded_file:
|
||||
await storage.put_file(storage_path, padded_file)
|
||||
|
||||
logger.info(
|
||||
f"Uploaded padded track to S3",
|
||||
key=storage_path,
|
||||
size=file_size,
|
||||
)
|
||||
finally:
|
||||
Path(temp_path).unlink(missing_ok=True)
|
||||
|
||||
ctx.log(f"pad_track complete: track {input.track_index} -> {storage_path}")
|
||||
logger.info(
|
||||
"[Hatchet] pad_track complete",
|
||||
track_index=input.track_index,
|
||||
padded_key=storage_path,
|
||||
)
|
||||
|
||||
# Return S3 key (not presigned URL) - consumer tasks presign on demand
|
||||
# This avoids stale URLs when workflow is replayed
|
||||
return PadTrackResult(
|
||||
padded_key=storage_path,
|
||||
bucket_name=None, # None = use default transcript storage bucket
|
||||
size=file_size,
|
||||
track_index=input.track_index,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("[Hatchet] pad_track failed", error=str(e), exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@track_workflow.task(
|
||||
parents=[pad_track], execution_timeout=timedelta(seconds=600), retries=3
|
||||
)
|
||||
async def transcribe_track(input: TrackInput, ctx: Context) -> TranscribeTrackResult:
|
||||
"""Transcribe audio track using GPU (Modal.com) or local Whisper."""
|
||||
ctx.log(f"transcribe_track: track {input.track_index}, language={input.language}")
|
||||
logger.info(
|
||||
"[Hatchet] transcribe_track",
|
||||
track_index=input.track_index,
|
||||
language=input.language,
|
||||
)
|
||||
|
||||
try:
|
||||
pad_result = ctx.task_output(pad_track)
|
||||
padded_key = pad_result.padded_key
|
||||
bucket_name = pad_result.bucket_name
|
||||
|
||||
if not padded_key:
|
||||
raise ValueError("Missing padded_key from pad_track")
|
||||
|
||||
# Presign URL on demand (avoids stale URLs on workflow replay)
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
)
|
||||
|
||||
audio_url = await storage.get_file_url(
|
||||
padded_key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
bucket=bucket_name,
|
||||
)
|
||||
|
||||
from reflector.pipelines.transcription_helpers import ( # noqa: PLC0415
|
||||
transcribe_file_with_processor,
|
||||
)
|
||||
|
||||
transcript = await transcribe_file_with_processor(audio_url, input.language)
|
||||
|
||||
# Tag all words with speaker index
|
||||
words = []
|
||||
for word in transcript.words:
|
||||
word_dict = word.model_dump()
|
||||
word_dict["speaker"] = input.track_index
|
||||
words.append(word_dict)
|
||||
|
||||
ctx.log(
|
||||
f"transcribe_track complete: track {input.track_index}, {len(words)} words"
|
||||
)
|
||||
logger.info(
|
||||
"[Hatchet] transcribe_track complete",
|
||||
track_index=input.track_index,
|
||||
word_count=len(words),
|
||||
)
|
||||
|
||||
return TranscribeTrackResult(
|
||||
words=words,
|
||||
track_index=input.track_index,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("[Hatchet] transcribe_track failed", error=str(e), exc_info=True)
|
||||
raise
|
||||
@@ -1,32 +1,13 @@
|
||||
import logging
|
||||
from contextvars import ContextVar
|
||||
from typing import Generic, Type, TypeVar
|
||||
from uuid import uuid4
|
||||
from typing import Type, TypeVar
|
||||
|
||||
from llama_index.core import Settings
|
||||
from llama_index.core.output_parsers import PydanticOutputParser
|
||||
from llama_index.core.program import LLMTextCompletionProgram
|
||||
from llama_index.core.response_synthesizers import TreeSummarize
|
||||
from llama_index.core.workflow import (
|
||||
Context,
|
||||
Event,
|
||||
StartEvent,
|
||||
StopEvent,
|
||||
Workflow,
|
||||
step,
|
||||
)
|
||||
from llama_index.llms.openai_like import OpenAILike
|
||||
from pydantic import BaseModel, ValidationError
|
||||
from workflows.errors import WorkflowTimeoutError
|
||||
|
||||
from reflector.utils.retry import retry
|
||||
from pydantic import BaseModel
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
OutputT = TypeVar("OutputT", bound=BaseModel)
|
||||
|
||||
# Session ID for LiteLLM request grouping - set per processing run
|
||||
llm_session_id: ContextVar[str | None] = ContextVar("llm_session_id", default=None)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STRUCTURED_RESPONSE_PROMPT_TEMPLATE = """
|
||||
Based on the following analysis, provide the information in the requested JSON format:
|
||||
@@ -38,158 +19,6 @@ Analysis:
|
||||
"""
|
||||
|
||||
|
||||
class LLMParseError(Exception):
|
||||
"""Raised when LLM output cannot be parsed after retries."""
|
||||
|
||||
def __init__(self, output_cls: Type[BaseModel], error_msg: str, attempts: int):
|
||||
self.output_cls = output_cls
|
||||
self.error_msg = error_msg
|
||||
self.attempts = attempts
|
||||
super().__init__(
|
||||
f"Failed to parse {output_cls.__name__} after {attempts} attempts: {error_msg}"
|
||||
)
|
||||
|
||||
|
||||
class ExtractionDone(Event):
|
||||
"""Event emitted when LLM JSON formatting completes."""
|
||||
|
||||
output: str
|
||||
|
||||
|
||||
class ValidationErrorEvent(Event):
|
||||
"""Event emitted when validation fails."""
|
||||
|
||||
error: str
|
||||
wrong_output: str
|
||||
|
||||
|
||||
class StructuredOutputWorkflow(Workflow, Generic[OutputT]):
|
||||
"""Workflow for structured output extraction with validation retry.
|
||||
|
||||
This workflow handles parse/validation retries only. Network error retries
|
||||
are handled internally by Settings.llm (OpenAILike max_retries=3).
|
||||
The caller should NOT wrap this workflow in additional retry logic.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
output_cls: Type[OutputT],
|
||||
max_retries: int = 3,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.output_cls: Type[OutputT] = output_cls
|
||||
self.max_retries = max_retries
|
||||
self.output_parser = PydanticOutputParser(output_cls)
|
||||
|
||||
@step
|
||||
async def extract(
|
||||
self, ctx: Context, ev: StartEvent | ValidationErrorEvent
|
||||
) -> StopEvent | ExtractionDone:
|
||||
"""Extract structured data from text using two-step LLM process.
|
||||
|
||||
Step 1 (first call only): TreeSummarize generates text analysis
|
||||
Step 2 (every call): Settings.llm.acomplete formats analysis as JSON
|
||||
"""
|
||||
current_retries = await ctx.store.get("retries", default=0)
|
||||
await ctx.store.set("retries", current_retries + 1)
|
||||
|
||||
if current_retries >= self.max_retries:
|
||||
last_error = await ctx.store.get("last_error", default=None)
|
||||
logger.error(
|
||||
f"Max retries ({self.max_retries}) reached for {self.output_cls.__name__}"
|
||||
)
|
||||
return StopEvent(result={"error": last_error, "attempts": current_retries})
|
||||
|
||||
if isinstance(ev, StartEvent):
|
||||
# First call: run TreeSummarize to get analysis, store in context
|
||||
prompt = ev.get("prompt")
|
||||
texts = ev.get("texts")
|
||||
tone_name = ev.get("tone_name")
|
||||
if not prompt or not isinstance(texts, list):
|
||||
raise ValueError(
|
||||
"StartEvent must contain 'prompt' (str) and 'texts' (list)"
|
||||
)
|
||||
|
||||
summarizer = TreeSummarize(verbose=False)
|
||||
analysis = await summarizer.aget_response(
|
||||
prompt, texts, tone_name=tone_name
|
||||
)
|
||||
await ctx.store.set("analysis", str(analysis))
|
||||
reflection = ""
|
||||
else:
|
||||
# Retry: reuse analysis from context
|
||||
analysis = await ctx.store.get("analysis")
|
||||
if not analysis:
|
||||
raise RuntimeError("Internal error: analysis not found in context")
|
||||
|
||||
wrong_output = ev.wrong_output
|
||||
if len(wrong_output) > 2000:
|
||||
wrong_output = wrong_output[:2000] + "... [truncated]"
|
||||
reflection = (
|
||||
f"\n\nYour previous response could not be parsed:\n{wrong_output}\n\n"
|
||||
f"Error:\n{ev.error}\n\n"
|
||||
"Please try again. Return ONLY valid JSON matching the schema above, "
|
||||
"with no markdown formatting or extra text."
|
||||
)
|
||||
|
||||
# Step 2: Format analysis as JSON using LLM completion
|
||||
format_instructions = self.output_parser.format(
|
||||
"Please structure the above information in the following JSON format:"
|
||||
)
|
||||
|
||||
json_prompt = STRUCTURED_RESPONSE_PROMPT_TEMPLATE.format(
|
||||
analysis=analysis,
|
||||
format_instructions=format_instructions + reflection,
|
||||
)
|
||||
|
||||
# Network retries handled by OpenAILike (max_retries=3)
|
||||
response = await Settings.llm.acomplete(json_prompt)
|
||||
return ExtractionDone(output=response.text)
|
||||
|
||||
@step
|
||||
async def validate(
|
||||
self, ctx: Context, ev: ExtractionDone
|
||||
) -> StopEvent | ValidationErrorEvent:
|
||||
"""Validate extracted output against Pydantic schema."""
|
||||
raw_output = ev.output
|
||||
retries = await ctx.store.get("retries", default=0)
|
||||
|
||||
try:
|
||||
parsed = self.output_parser.parse(raw_output)
|
||||
if retries > 1:
|
||||
logger.info(
|
||||
f"LLM parse succeeded on attempt {retries}/{self.max_retries} "
|
||||
f"for {self.output_cls.__name__}"
|
||||
)
|
||||
return StopEvent(result={"success": parsed})
|
||||
|
||||
except (ValidationError, ValueError) as e:
|
||||
error_msg = self._format_error(e, raw_output)
|
||||
await ctx.store.set("last_error", error_msg)
|
||||
|
||||
logger.error(
|
||||
f"LLM parse error (attempt {retries}/{self.max_retries}): "
|
||||
f"{type(e).__name__}: {e}\nRaw response: {raw_output[:500]}"
|
||||
)
|
||||
|
||||
return ValidationErrorEvent(
|
||||
error=error_msg,
|
||||
wrong_output=raw_output,
|
||||
)
|
||||
|
||||
def _format_error(self, error: Exception, raw_output: str) -> str:
|
||||
"""Format error for LLM feedback."""
|
||||
if isinstance(error, ValidationError):
|
||||
error_messages = []
|
||||
for err in error.errors():
|
||||
field = ".".join(str(loc) for loc in err["loc"])
|
||||
error_messages.append(f"- {err['msg']} in field '{field}'")
|
||||
return "Schema validation errors:\n" + "\n".join(error_messages)
|
||||
else:
|
||||
return f"Parse error: {str(error)}"
|
||||
|
||||
|
||||
class LLM:
|
||||
def __init__(self, settings, temperature: float = 0.4, max_tokens: int = 2048):
|
||||
self.settings_obj = settings
|
||||
@@ -200,12 +29,11 @@ class LLM:
|
||||
self.temperature = temperature
|
||||
self.max_tokens = max_tokens
|
||||
|
||||
# Configure llamaindex Settings
|
||||
self._configure_llamaindex()
|
||||
|
||||
def _configure_llamaindex(self):
|
||||
"""Configure llamaindex Settings with OpenAILike LLM"""
|
||||
session_id = llm_session_id.get() or f"fallback-{uuid4().hex}"
|
||||
|
||||
Settings.llm = OpenAILike(
|
||||
model=self.model_name,
|
||||
api_base=self.url,
|
||||
@@ -215,7 +43,6 @@ class LLM:
|
||||
is_function_calling_model=False,
|
||||
temperature=self.temperature,
|
||||
max_tokens=self.max_tokens,
|
||||
additional_kwargs={"extra_body": {"litellm_session_id": session_id}},
|
||||
)
|
||||
|
||||
async def get_response(
|
||||
@@ -232,38 +59,25 @@ class LLM:
|
||||
texts: list[str],
|
||||
output_cls: Type[T],
|
||||
tone_name: str | None = None,
|
||||
timeout: int | None = None,
|
||||
) -> T:
|
||||
"""Get structured output from LLM with validation retry via Workflow."""
|
||||
if timeout is None:
|
||||
timeout = self.settings_obj.LLM_STRUCTURED_RESPONSE_TIMEOUT
|
||||
"""Get structured output from LLM for non-function-calling models"""
|
||||
summarizer = TreeSummarize(verbose=True)
|
||||
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
|
||||
|
||||
async def run_workflow():
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=output_cls,
|
||||
max_retries=self.settings_obj.LLM_PARSE_MAX_RETRIES + 1,
|
||||
timeout=timeout,
|
||||
)
|
||||
output_parser = PydanticOutputParser(output_cls)
|
||||
|
||||
result = await workflow.run(
|
||||
prompt=prompt,
|
||||
texts=texts,
|
||||
tone_name=tone_name,
|
||||
)
|
||||
|
||||
if "error" in result:
|
||||
error_msg = result["error"] or "Max retries exceeded"
|
||||
raise LLMParseError(
|
||||
output_cls=output_cls,
|
||||
error_msg=error_msg,
|
||||
attempts=result.get("attempts", 0),
|
||||
)
|
||||
|
||||
return result["success"]
|
||||
|
||||
return await retry(run_workflow)(
|
||||
retry_attempts=3,
|
||||
retry_backoff_interval=1.0,
|
||||
retry_backoff_max=30.0,
|
||||
retry_ignore_exc_types=(WorkflowTimeoutError,),
|
||||
program = LLMTextCompletionProgram.from_defaults(
|
||||
output_parser=output_parser,
|
||||
prompt_template_str=STRUCTURED_RESPONSE_PROMPT_TEMPLATE,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
format_instructions = output_parser.format(
|
||||
"Please structure the above information in the following JSON format:"
|
||||
)
|
||||
|
||||
output = await program.acall(
|
||||
analysis=str(response), format_instructions=format_instructions
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
"""Pipeline modules for audio processing."""
|
||||
@@ -12,9 +12,8 @@ from pathlib import Path
|
||||
|
||||
import av
|
||||
import structlog
|
||||
from celery import chain, shared_task
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from reflector.asynctask import asynctask
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import (
|
||||
SourceKind,
|
||||
@@ -23,18 +22,23 @@ from reflector.db.transcripts import (
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines import topic_processing
|
||||
from reflector.pipelines.main_live_pipeline import (
|
||||
PipelineMainBase,
|
||||
broadcast_to_sockets,
|
||||
task_cleanup_consent,
|
||||
task_pipeline_post_to_zulip,
|
||||
task_cleanup_consent_taskiq,
|
||||
task_pipeline_post_to_zulip_taskiq,
|
||||
)
|
||||
from reflector.processors import (
|
||||
AudioFileWriterProcessor,
|
||||
TranscriptFinalSummaryProcessor,
|
||||
TranscriptFinalTitleProcessor,
|
||||
TranscriptTopicDetectorProcessor,
|
||||
)
|
||||
from reflector.pipelines.transcription_helpers import transcribe_file_with_processor
|
||||
from reflector.processors import AudioFileWriterProcessor
|
||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||
from reflector.processors.file_diarization import FileDiarizationInput
|
||||
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
|
||||
from reflector.processors.file_transcript import FileTranscriptInput
|
||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||
from reflector.processors.transcript_diarization_assembler import (
|
||||
TranscriptDiarizationAssemblerInput,
|
||||
TranscriptDiarizationAssemblerProcessor,
|
||||
@@ -48,7 +52,22 @@ from reflector.processors.types import (
|
||||
)
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.worker.webhook import send_transcript_webhook
|
||||
from reflector.worker.app import taskiq_broker
|
||||
from reflector.worker.session_decorator import catch_exception, with_session
|
||||
from reflector.worker.webhook import send_transcript_webhook_taskiq
|
||||
|
||||
|
||||
class EmptyPipeline:
|
||||
"""Empty pipeline for processors that need a pipeline reference"""
|
||||
|
||||
def __init__(self, logger: structlog.BoundLogger):
|
||||
self.logger = logger
|
||||
|
||||
def get_pref(self, k, d=None):
|
||||
return d
|
||||
|
||||
async def emit(self, event):
|
||||
pass
|
||||
|
||||
|
||||
class PipelineMainFile(PipelineMainBase):
|
||||
@@ -63,7 +82,7 @@ class PipelineMainFile(PipelineMainBase):
|
||||
def __init__(self, transcript_id: str):
|
||||
super().__init__(transcript_id=transcript_id)
|
||||
self.logger = logger.bind(transcript_id=self.transcript_id)
|
||||
self.empty_pipeline = topic_processing.EmptyPipeline(logger=self.logger)
|
||||
self.empty_pipeline = EmptyPipeline(logger=self.logger)
|
||||
|
||||
def _handle_gather_exceptions(self, results: list, operation: str) -> None:
|
||||
"""Handle exceptions from asyncio.gather with return_exceptions=True"""
|
||||
@@ -77,29 +96,39 @@ class PipelineMainFile(PipelineMainBase):
|
||||
)
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def set_status(self, transcript_id: str, status: TranscriptStatus):
|
||||
async with self.lock_transaction():
|
||||
return await transcripts_controller.set_status(transcript_id, status)
|
||||
async def set_status(
|
||||
self,
|
||||
session: AsyncSession,
|
||||
transcript_id: str,
|
||||
status: TranscriptStatus,
|
||||
):
|
||||
return await transcripts_controller.set_status(session, transcript_id, status)
|
||||
|
||||
async def process(self, file_path: Path):
|
||||
async def process(self, session: AsyncSession, file_path: Path):
|
||||
"""Main entry point for file processing"""
|
||||
self.logger.info(f"Starting file pipeline for {file_path}")
|
||||
|
||||
transcript = await self.get_transcript()
|
||||
transcript = await transcripts_controller.get_by_id(session, self.transcript_id)
|
||||
|
||||
# Clear transcript as we're going to regenerate everything
|
||||
async with self.transaction():
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"events": [],
|
||||
"topics": [],
|
||||
},
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
session,
|
||||
transcript,
|
||||
{
|
||||
"events": [],
|
||||
"topics": [],
|
||||
},
|
||||
)
|
||||
|
||||
# Extract audio and write to transcript location
|
||||
audio_path = await self.extract_and_write_audio(file_path, transcript)
|
||||
|
||||
# Upload for processing
|
||||
audio_url = await self.upload_audio(audio_path, transcript)
|
||||
|
||||
# Run parallel processing
|
||||
await self.run_parallel_processing(
|
||||
session,
|
||||
audio_path,
|
||||
audio_url,
|
||||
transcript.source_language,
|
||||
@@ -108,7 +137,7 @@ class PipelineMainFile(PipelineMainBase):
|
||||
|
||||
self.logger.info("File pipeline complete")
|
||||
|
||||
await self.set_status(transcript.id, "ended")
|
||||
await transcripts_controller.set_status(session, transcript.id, "ended")
|
||||
|
||||
async def extract_and_write_audio(
|
||||
self, file_path: Path, transcript: Transcript
|
||||
@@ -170,6 +199,7 @@ class PipelineMainFile(PipelineMainBase):
|
||||
|
||||
async def run_parallel_processing(
|
||||
self,
|
||||
session,
|
||||
audio_path: Path,
|
||||
audio_url: str,
|
||||
source_language: str,
|
||||
@@ -183,7 +213,7 @@ class PipelineMainFile(PipelineMainBase):
|
||||
# Phase 1: Parallel processing of independent tasks
|
||||
transcription_task = self.transcribe_file(audio_url, source_language)
|
||||
diarization_task = self.diarize_file(audio_url)
|
||||
waveform_task = self.generate_waveform(audio_path)
|
||||
waveform_task = self.generate_waveform(session, audio_path)
|
||||
|
||||
results = await asyncio.gather(
|
||||
transcription_task, diarization_task, waveform_task, return_exceptions=True
|
||||
@@ -192,6 +222,7 @@ class PipelineMainFile(PipelineMainBase):
|
||||
transcript_result = results[0]
|
||||
diarization_result = results[1]
|
||||
|
||||
# Handle errors - raise any exception that occurred
|
||||
self._handle_gather_exceptions(results, "parallel processing")
|
||||
for result in results:
|
||||
if isinstance(result, Exception):
|
||||
@@ -206,6 +237,7 @@ class PipelineMainFile(PipelineMainBase):
|
||||
transcript=transcript_result, diarization=diarization_result or []
|
||||
)
|
||||
|
||||
# Store result for retrieval
|
||||
diarized_transcript: Transcript | None = None
|
||||
|
||||
async def capture_result(transcript):
|
||||
@@ -229,7 +261,7 @@ class PipelineMainFile(PipelineMainBase):
|
||||
)
|
||||
results = await asyncio.gather(
|
||||
self.generate_title(topics),
|
||||
self.generate_summaries(topics),
|
||||
self.generate_summaries(session, topics),
|
||||
return_exceptions=True,
|
||||
)
|
||||
|
||||
@@ -237,7 +269,24 @@ class PipelineMainFile(PipelineMainBase):
|
||||
|
||||
async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType:
|
||||
"""Transcribe complete file"""
|
||||
return await transcribe_file_with_processor(audio_url, language)
|
||||
processor = FileTranscriptAutoProcessor()
|
||||
input_data = FileTranscriptInput(audio_url=audio_url, language=language)
|
||||
|
||||
# Store result for retrieval
|
||||
result: TranscriptType | None = None
|
||||
|
||||
async def capture_result(transcript):
|
||||
nonlocal result
|
||||
result = transcript
|
||||
|
||||
processor.on(capture_result)
|
||||
await processor.push(input_data)
|
||||
await processor.flush()
|
||||
|
||||
if not result:
|
||||
raise ValueError("No transcript captured")
|
||||
|
||||
return result
|
||||
|
||||
async def diarize_file(self, audio_url: str) -> list[DiarizationSegment] | None:
|
||||
"""Get diarization for file"""
|
||||
@@ -264,9 +313,9 @@ class PipelineMainFile(PipelineMainBase):
|
||||
self.logger.error(f"Diarization failed: {e}")
|
||||
return None
|
||||
|
||||
async def generate_waveform(self, audio_path: Path):
|
||||
async def generate_waveform(self, session: AsyncSession, audio_path: Path):
|
||||
"""Generate and save waveform"""
|
||||
transcript = await self.get_transcript()
|
||||
transcript = await transcripts_controller.get_by_id(session, self.transcript_id)
|
||||
|
||||
processor = AudioWaveformProcessor(
|
||||
audio_path=audio_path,
|
||||
@@ -280,44 +329,74 @@ class PipelineMainFile(PipelineMainBase):
|
||||
async def detect_topics(
|
||||
self, transcript: TranscriptType, target_language: str
|
||||
) -> list[TitleSummary]:
|
||||
return await topic_processing.detect_topics(
|
||||
transcript,
|
||||
target_language,
|
||||
on_topic_callback=self.on_topic,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
)
|
||||
"""Detect topics from complete transcript"""
|
||||
chunk_size = 300
|
||||
topics: list[TitleSummary] = []
|
||||
|
||||
async def on_topic(topic: TitleSummary):
|
||||
topics.append(topic)
|
||||
return await self.on_topic(topic)
|
||||
|
||||
topic_detector = TranscriptTopicDetectorProcessor(callback=on_topic)
|
||||
topic_detector.set_pipeline(self.empty_pipeline)
|
||||
|
||||
for i in range(0, len(transcript.words), chunk_size):
|
||||
chunk_words = transcript.words[i : i + chunk_size]
|
||||
if not chunk_words:
|
||||
continue
|
||||
|
||||
chunk_transcript = TranscriptType(
|
||||
words=chunk_words, translation=transcript.translation
|
||||
)
|
||||
|
||||
await topic_detector.push(chunk_transcript)
|
||||
|
||||
await topic_detector.flush()
|
||||
return topics
|
||||
|
||||
async def generate_title(self, topics: list[TitleSummary]):
|
||||
return await topic_processing.generate_title(
|
||||
topics,
|
||||
on_title_callback=self.on_title,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
logger=self.logger,
|
||||
"""Generate title from topics"""
|
||||
if not topics:
|
||||
self.logger.warning("No topics for title generation")
|
||||
return
|
||||
|
||||
processor = TranscriptFinalTitleProcessor(callback=self.on_title)
|
||||
processor.set_pipeline(self.empty_pipeline)
|
||||
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
|
||||
async def generate_summaries(self, session, topics: list[TitleSummary]):
|
||||
"""Generate long and short summaries from topics"""
|
||||
if not topics:
|
||||
self.logger.warning("No topics for summary generation")
|
||||
return
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(session, self.transcript_id)
|
||||
processor = TranscriptFinalSummaryProcessor(
|
||||
transcript=transcript,
|
||||
callback=self.on_long_summary,
|
||||
on_short_summary=self.on_short_summary,
|
||||
)
|
||||
processor.set_pipeline(self.empty_pipeline)
|
||||
|
||||
async def generate_summaries(self, topics: list[TitleSummary]):
|
||||
transcript = await self.get_transcript()
|
||||
return await topic_processing.generate_summaries(
|
||||
topics,
|
||||
transcript,
|
||||
on_long_summary_callback=self.on_long_summary,
|
||||
on_short_summary_callback=self.on_short_summary,
|
||||
on_action_items_callback=self.on_action_items,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
logger=self.logger,
|
||||
)
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_send_webhook_if_needed(*, transcript_id: str):
|
||||
"""Send webhook if this is a room recording with webhook configured"""
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session
|
||||
async def task_send_webhook_if_needed(session, *, transcript_id: str):
|
||||
transcript = await transcripts_controller.get_by_id(session, transcript_id)
|
||||
if not transcript:
|
||||
return
|
||||
|
||||
if transcript.source_kind == SourceKind.ROOM and transcript.room_id:
|
||||
room = await rooms_controller.get_by_id(transcript.room_id)
|
||||
room = await rooms_controller.get_by_id(session, transcript.room_id)
|
||||
if room and room.webhook_url:
|
||||
logger.info(
|
||||
"Dispatching webhook",
|
||||
@@ -325,22 +404,22 @@ async def task_send_webhook_if_needed(*, transcript_id: str):
|
||||
room_id=room.id,
|
||||
webhook_url=room.webhook_url,
|
||||
)
|
||||
send_transcript_webhook.delay(
|
||||
await send_transcript_webhook_taskiq.kiq(
|
||||
transcript_id, room.id, event_id=uuid.uuid4().hex
|
||||
)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_file_process(*, transcript_id: str):
|
||||
"""Celery task for file pipeline processing"""
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
@taskiq_broker.task
|
||||
@catch_exception
|
||||
@with_session
|
||||
async def task_pipeline_file_process(session: AsyncSession, *, transcript_id: str):
|
||||
transcript = await transcripts_controller.get_by_id(session, transcript_id)
|
||||
if not transcript:
|
||||
raise Exception(f"Transcript {transcript_id} not found")
|
||||
|
||||
pipeline = PipelineMainFile(transcript_id=transcript_id)
|
||||
try:
|
||||
await pipeline.set_status(transcript_id, "processing")
|
||||
await pipeline.set_status(session, transcript_id, "processing")
|
||||
|
||||
audio_file = next(transcript.data_path.glob("upload.*"), None)
|
||||
if not audio_file:
|
||||
@@ -349,21 +428,18 @@ async def task_pipeline_file_process(*, transcript_id: str):
|
||||
if not audio_file:
|
||||
raise Exception("No audio file found to process")
|
||||
|
||||
await pipeline.process(audio_file)
|
||||
await pipeline.process(session, audio_file)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"File pipeline failed for transcript {transcript_id}: {type(e).__name__}: {str(e)}",
|
||||
exc_info=True,
|
||||
transcript_id=transcript_id,
|
||||
)
|
||||
await pipeline.set_status(transcript_id, "error")
|
||||
except Exception:
|
||||
logger.error("Error while processing the file", exc_info=True)
|
||||
try:
|
||||
await pipeline.set_status(session, transcript_id, "error")
|
||||
except:
|
||||
logger.error(
|
||||
"Error setting status in task_pipeline_file_process during exception, ignoring it"
|
||||
)
|
||||
raise
|
||||
|
||||
# Run post-processing chain: consent cleanup -> zulip -> webhook
|
||||
post_chain = chain(
|
||||
task_cleanup_consent.si(transcript_id=transcript_id),
|
||||
task_pipeline_post_to_zulip.si(transcript_id=transcript_id),
|
||||
task_send_webhook_if_needed.si(transcript_id=transcript_id),
|
||||
)
|
||||
post_chain.delay()
|
||||
await task_cleanup_consent_taskiq.kiq(transcript_id=transcript_id)
|
||||
await task_pipeline_post_to_zulip_taskiq.kiq(transcript_id=transcript_id)
|
||||
await task_send_webhook_if_needed.kiq(transcript_id=transcript_id)
|
||||
|
||||
@@ -12,22 +12,21 @@ It is directly linked to our data model.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import functools
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Generic
|
||||
|
||||
import av
|
||||
from celery import chord, current_task, group, shared_task
|
||||
import boto3
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from structlog import BoundLogger as Logger
|
||||
|
||||
from reflector.asynctask import asynctask
|
||||
from reflector.db import get_session_context
|
||||
from reflector.db.meetings import meeting_consent_controller, meetings_controller
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import (
|
||||
Transcript,
|
||||
TranscriptActionItems,
|
||||
TranscriptDuration,
|
||||
TranscriptFinalLongSummary,
|
||||
TranscriptFinalShortSummary,
|
||||
@@ -62,6 +61,8 @@ from reflector.processors.types import (
|
||||
from reflector.processors.types import Transcript as TranscriptProcessorType
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.worker.app import taskiq_broker
|
||||
from reflector.worker.session_decorator import with_session_and_transcript
|
||||
from reflector.ws_manager import WebsocketManager, get_ws_manager
|
||||
from reflector.zulip import (
|
||||
get_zulip_message,
|
||||
@@ -85,53 +86,6 @@ def broadcast_to_sockets(func):
|
||||
message=resp.model_dump(mode="json"),
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(self.transcript_id)
|
||||
if transcript and transcript.user_id:
|
||||
# Emit only relevant events to the user room to avoid noisy updates.
|
||||
# Allowed: STATUS, FINAL_TITLE, DURATION. All are prefixed with TRANSCRIPT_
|
||||
allowed_user_events = {"STATUS", "FINAL_TITLE", "DURATION"}
|
||||
if resp.event in allowed_user_events:
|
||||
await self.ws_manager.send_json(
|
||||
room_id=f"user:{transcript.user_id}",
|
||||
message={
|
||||
"event": f"TRANSCRIPT_{resp.event}",
|
||||
"data": {"id": self.transcript_id, **resp.data},
|
||||
},
|
||||
)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def get_transcript(func):
|
||||
"""
|
||||
Decorator to fetch the transcript from the database from the first argument
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
async def wrapper(**kwargs):
|
||||
transcript_id = kwargs.pop("transcript_id")
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id=transcript_id)
|
||||
if not transcript:
|
||||
raise Exception("Transcript {transcript_id} not found")
|
||||
|
||||
# Enhanced logger with Celery task context
|
||||
tlogger = logger.bind(transcript_id=transcript.id)
|
||||
if current_task:
|
||||
tlogger = tlogger.bind(
|
||||
task_id=current_task.request.id,
|
||||
task_name=current_task.name,
|
||||
worker_hostname=current_task.request.hostname,
|
||||
task_retries=current_task.request.retries,
|
||||
transcript_id=transcript_id,
|
||||
)
|
||||
|
||||
try:
|
||||
result = await func(transcript=transcript, logger=tlogger, **kwargs)
|
||||
return result
|
||||
except Exception as exc:
|
||||
tlogger.error("Pipeline error", function_name=func.__name__, exc_info=exc)
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@@ -153,11 +107,9 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
self._ws_manager = get_ws_manager()
|
||||
return self._ws_manager
|
||||
|
||||
async def get_transcript(self) -> Transcript:
|
||||
async def get_transcript(self, session: AsyncSession) -> Transcript:
|
||||
# fetch the transcript
|
||||
result = await transcripts_controller.get_by_id(
|
||||
transcript_id=self.transcript_id
|
||||
)
|
||||
result = await transcripts_controller.get_by_id(session, self.transcript_id)
|
||||
if not result:
|
||||
raise Exception("Transcript not found")
|
||||
return result
|
||||
@@ -187,10 +139,10 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
yield
|
||||
|
||||
@asynccontextmanager
|
||||
async def transaction(self):
|
||||
async def locked_session(self):
|
||||
async with self.lock_transaction():
|
||||
async with transcripts_controller.transaction():
|
||||
yield
|
||||
async with get_session_context() as session:
|
||||
yield session
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def on_status(self, status):
|
||||
@@ -221,13 +173,17 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
|
||||
# when the status of the pipeline changes, update the transcript
|
||||
async with self._lock:
|
||||
return await transcripts_controller.set_status(self.transcript_id, status)
|
||||
async with get_session_context() as session:
|
||||
return await transcripts_controller.set_status(
|
||||
session, self.transcript_id, status
|
||||
)
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def on_transcript(self, data):
|
||||
async with self.transaction():
|
||||
transcript = await self.get_transcript()
|
||||
async with self.locked_session() as session:
|
||||
transcript = await self.get_transcript(session)
|
||||
return await transcripts_controller.append_event(
|
||||
session,
|
||||
transcript=transcript,
|
||||
event="TRANSCRIPT",
|
||||
data=TranscriptText(text=data.text, translation=data.translation),
|
||||
@@ -244,10 +200,11 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
)
|
||||
if isinstance(data, TitleSummaryWithIdProcessorType):
|
||||
topic.id = data.id
|
||||
async with self.transaction():
|
||||
transcript = await self.get_transcript()
|
||||
await transcripts_controller.upsert_topic(transcript, topic)
|
||||
async with self.locked_session() as session:
|
||||
transcript = await self.get_transcript(session)
|
||||
await transcripts_controller.upsert_topic(session, transcript, topic)
|
||||
return await transcripts_controller.append_event(
|
||||
session,
|
||||
transcript=transcript,
|
||||
event="TOPIC",
|
||||
data=topic,
|
||||
@@ -256,16 +213,18 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
@broadcast_to_sockets
|
||||
async def on_title(self, data):
|
||||
final_title = TranscriptFinalTitle(title=data.title)
|
||||
async with self.transaction():
|
||||
transcript = await self.get_transcript()
|
||||
async with self.locked_session() as session:
|
||||
transcript = await self.get_transcript(session)
|
||||
if not transcript.title:
|
||||
await transcripts_controller.update(
|
||||
session,
|
||||
transcript,
|
||||
{
|
||||
"title": final_title.title,
|
||||
},
|
||||
)
|
||||
return await transcripts_controller.append_event(
|
||||
session,
|
||||
transcript=transcript,
|
||||
event="FINAL_TITLE",
|
||||
data=final_title,
|
||||
@@ -274,15 +233,17 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
@broadcast_to_sockets
|
||||
async def on_long_summary(self, data):
|
||||
final_long_summary = TranscriptFinalLongSummary(long_summary=data.long_summary)
|
||||
async with self.transaction():
|
||||
transcript = await self.get_transcript()
|
||||
async with self.locked_session() as session:
|
||||
transcript = await self.get_transcript(session)
|
||||
await transcripts_controller.update(
|
||||
session,
|
||||
transcript,
|
||||
{
|
||||
"long_summary": final_long_summary.long_summary,
|
||||
},
|
||||
)
|
||||
return await transcripts_controller.append_event(
|
||||
session,
|
||||
transcript=transcript,
|
||||
event="FINAL_LONG_SUMMARY",
|
||||
data=final_long_summary,
|
||||
@@ -293,62 +254,48 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
final_short_summary = TranscriptFinalShortSummary(
|
||||
short_summary=data.short_summary
|
||||
)
|
||||
async with self.transaction():
|
||||
transcript = await self.get_transcript()
|
||||
async with self.locked_session() as session:
|
||||
transcript = await self.get_transcript(session)
|
||||
await transcripts_controller.update(
|
||||
session,
|
||||
transcript,
|
||||
{
|
||||
"short_summary": final_short_summary.short_summary,
|
||||
},
|
||||
)
|
||||
return await transcripts_controller.append_event(
|
||||
session,
|
||||
transcript=transcript,
|
||||
event="FINAL_SHORT_SUMMARY",
|
||||
data=final_short_summary,
|
||||
)
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def on_action_items(self, data):
|
||||
action_items = TranscriptActionItems(action_items=data.action_items)
|
||||
async with self.transaction():
|
||||
transcript = await self.get_transcript()
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"action_items": action_items.action_items,
|
||||
},
|
||||
)
|
||||
return await transcripts_controller.append_event(
|
||||
transcript=transcript,
|
||||
event="ACTION_ITEMS",
|
||||
data=action_items,
|
||||
)
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def on_duration(self, data):
|
||||
async with self.transaction():
|
||||
async with self.locked_session() as session:
|
||||
duration = TranscriptDuration(duration=data)
|
||||
|
||||
transcript = await self.get_transcript()
|
||||
transcript = await self.get_transcript(session)
|
||||
await transcripts_controller.update(
|
||||
session,
|
||||
transcript,
|
||||
{
|
||||
"duration": duration.duration,
|
||||
},
|
||||
)
|
||||
return await transcripts_controller.append_event(
|
||||
transcript=transcript, event="DURATION", data=duration
|
||||
session, transcript=transcript, event="DURATION", data=duration
|
||||
)
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def on_waveform(self, data):
|
||||
async with self.transaction():
|
||||
async with self.locked_session() as session:
|
||||
waveform = TranscriptWaveform(waveform=data)
|
||||
|
||||
transcript = await self.get_transcript()
|
||||
transcript = await self.get_transcript(session)
|
||||
|
||||
return await transcripts_controller.append_event(
|
||||
transcript=transcript, event="WAVEFORM", data=waveform
|
||||
session, transcript=transcript, event="WAVEFORM", data=waveform
|
||||
)
|
||||
|
||||
|
||||
@@ -361,7 +308,8 @@ class PipelineMainLive(PipelineMainBase):
|
||||
async def create(self) -> Pipeline:
|
||||
# create a context for the whole rtc transaction
|
||||
# add a customised logger to the context
|
||||
transcript = await self.get_transcript()
|
||||
async with get_session_context() as session:
|
||||
transcript = await self.get_transcript(session)
|
||||
|
||||
processors = [
|
||||
AudioFileWriterProcessor(
|
||||
@@ -409,7 +357,8 @@ class PipelineMainDiarization(PipelineMainBase[AudioDiarizationInput]):
|
||||
# now let's start the pipeline by pushing information to the
|
||||
# first processor diarization processor
|
||||
# XXX translation is lost when converting our data model to the processor model
|
||||
transcript = await self.get_transcript()
|
||||
async with get_session_context() as session:
|
||||
transcript = await self.get_transcript(session)
|
||||
|
||||
# diarization works only if the file is uploaded to an external storage
|
||||
if transcript.audio_location == "local":
|
||||
@@ -442,7 +391,8 @@ class PipelineMainFromTopics(PipelineMainBase[TitleSummaryWithIdProcessorType]):
|
||||
|
||||
async def create(self) -> Pipeline:
|
||||
# get transcript
|
||||
self._transcript = transcript = await self.get_transcript()
|
||||
async with get_session_context() as session:
|
||||
self._transcript = transcript = await self.get_transcript(session)
|
||||
|
||||
# create pipeline
|
||||
processors = self.get_processors()
|
||||
@@ -483,7 +433,6 @@ class PipelineMainFinalSummaries(PipelineMainFromTopics):
|
||||
transcript=self._transcript,
|
||||
callback=self.on_long_summary,
|
||||
on_short_summary=self.on_short_summary,
|
||||
on_action_items=self.on_action_items,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -503,8 +452,7 @@ class PipelineMainWaveform(PipelineMainFromTopics):
|
||||
]
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def pipeline_remove_upload(transcript: Transcript, logger: Logger):
|
||||
async def pipeline_remove_upload(session, transcript: Transcript, logger: Logger):
|
||||
# for future changes: note that there's also a consent process happens, beforehand and users may not consent with keeping files. currently, we delete regardless, so it's no need for that
|
||||
logger.info("Starting remove upload")
|
||||
uploads = transcript.data_path.glob("upload.*")
|
||||
@@ -513,16 +461,14 @@ async def pipeline_remove_upload(transcript: Transcript, logger: Logger):
|
||||
logger.info("Remove upload done")
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def pipeline_waveform(transcript: Transcript, logger: Logger):
|
||||
async def pipeline_waveform(session, transcript: Transcript, logger: Logger):
|
||||
logger.info("Starting waveform")
|
||||
runner = PipelineMainWaveform(transcript_id=transcript.id)
|
||||
await runner.run()
|
||||
logger.info("Waveform done")
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def pipeline_convert_to_mp3(transcript: Transcript, logger: Logger):
|
||||
async def pipeline_convert_to_mp3(session, transcript: Transcript, logger: Logger):
|
||||
logger.info("Starting convert to mp3")
|
||||
|
||||
# If the audio wav is not available, just skip
|
||||
@@ -548,8 +494,7 @@ async def pipeline_convert_to_mp3(transcript: Transcript, logger: Logger):
|
||||
logger.info("Convert to mp3 done")
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def pipeline_upload_mp3(transcript: Transcript, logger: Logger):
|
||||
async def pipeline_upload_mp3(session, transcript: Transcript, logger: Logger):
|
||||
if not settings.TRANSCRIPT_STORAGE_BACKEND:
|
||||
logger.info("No storage backend configured, skipping mp3 upload")
|
||||
return
|
||||
@@ -567,54 +512,53 @@ async def pipeline_upload_mp3(transcript: Transcript, logger: Logger):
|
||||
return
|
||||
|
||||
# Upload to external storage and delete the file
|
||||
await transcripts_controller.move_mp3_to_storage(transcript)
|
||||
await transcripts_controller.move_mp3_to_storage(session, transcript)
|
||||
|
||||
logger.info("Upload mp3 done")
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def pipeline_diarization(transcript: Transcript, logger: Logger):
|
||||
async def pipeline_diarization(session, transcript: Transcript, logger: Logger):
|
||||
logger.info("Starting diarization")
|
||||
runner = PipelineMainDiarization(transcript_id=transcript.id)
|
||||
await runner.run()
|
||||
logger.info("Diarization done")
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def pipeline_title(transcript: Transcript, logger: Logger):
|
||||
async def pipeline_title(session, transcript: Transcript, logger: Logger):
|
||||
logger.info("Starting title")
|
||||
runner = PipelineMainTitle(transcript_id=transcript.id)
|
||||
await runner.run()
|
||||
logger.info("Title done")
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def pipeline_summaries(transcript: Transcript, logger: Logger):
|
||||
async def pipeline_summaries(session, transcript: Transcript, logger: Logger):
|
||||
logger.info("Starting summaries")
|
||||
runner = PipelineMainFinalSummaries(transcript_id=transcript.id)
|
||||
await runner.run()
|
||||
logger.info("Summaries done")
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def cleanup_consent(transcript: Transcript, logger: Logger):
|
||||
async def cleanup_consent(session, transcript: Transcript, logger: Logger):
|
||||
logger.info("Starting consent cleanup")
|
||||
|
||||
consent_denied = False
|
||||
recording = None
|
||||
meeting = None
|
||||
try:
|
||||
if transcript.recording_id:
|
||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||
recording = await recordings_controller.get_by_id(
|
||||
session, transcript.recording_id
|
||||
)
|
||||
if recording and recording.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(recording.meeting_id)
|
||||
meeting = await meetings_controller.get_by_id(
|
||||
session, recording.meeting_id
|
||||
)
|
||||
if meeting:
|
||||
consent_denied = await meeting_consent_controller.has_any_denial(
|
||||
meeting.id
|
||||
session, meeting.id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to fetch consent: {e}", exc_info=e)
|
||||
raise
|
||||
logger.error(f"Failed to get fetch consent: {e}", exc_info=e)
|
||||
consent_denied = True
|
||||
|
||||
if not consent_denied:
|
||||
logger.info("Consent approved, keeping all files")
|
||||
@@ -622,24 +566,25 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
||||
|
||||
logger.info("Consent denied, cleaning up all related audio files")
|
||||
|
||||
deletion_errors = []
|
||||
if recording and recording.bucket_name:
|
||||
keys_to_delete = []
|
||||
if recording.track_keys:
|
||||
keys_to_delete = recording.track_keys
|
||||
elif recording.object_key:
|
||||
keys_to_delete = [recording.object_key]
|
||||
|
||||
master_storage = get_transcripts_storage()
|
||||
for key in keys_to_delete:
|
||||
try:
|
||||
await master_storage.delete_file(key, bucket=recording.bucket_name)
|
||||
logger.info(f"Deleted recording file: {recording.bucket_name}/{key}")
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete {key}: {e}"
|
||||
logger.error(error_msg, exc_info=e)
|
||||
deletion_errors.append(error_msg)
|
||||
if recording and recording.bucket_name and recording.object_key:
|
||||
s3_whereby = boto3.client(
|
||||
"s3",
|
||||
aws_access_key_id=settings.AWS_WHEREBY_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.AWS_WHEREBY_ACCESS_KEY_SECRET,
|
||||
)
|
||||
try:
|
||||
s3_whereby.delete_object(
|
||||
Bucket=recording.bucket_name, Key=recording.object_key
|
||||
)
|
||||
logger.info(
|
||||
f"Deleted original Whereby recording: {recording.bucket_name}/{recording.object_key}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete Whereby recording: {e}", exc_info=e)
|
||||
|
||||
# non-transactional, files marked for deletion not actually deleted is possible
|
||||
await transcripts_controller.update(session, transcript, {"audio_deleted": True})
|
||||
# 2. Delete processed audio from transcript storage S3 bucket
|
||||
if transcript.audio_location == "storage":
|
||||
storage = get_transcripts_storage()
|
||||
try:
|
||||
@@ -648,39 +593,28 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
||||
f"Deleted processed audio from storage: {transcript.storage_audio_path}"
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete processed audio: {e}"
|
||||
logger.error(error_msg, exc_info=e)
|
||||
deletion_errors.append(error_msg)
|
||||
logger.error(f"Failed to delete processed audio: {e}", exc_info=e)
|
||||
|
||||
# 3. Delete local audio files
|
||||
try:
|
||||
if hasattr(transcript, "audio_mp3_filename") and transcript.audio_mp3_filename:
|
||||
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
||||
if hasattr(transcript, "audio_wav_filename") and transcript.audio_wav_filename:
|
||||
transcript.audio_wav_filename.unlink(missing_ok=True)
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete local audio files: {e}"
|
||||
logger.error(error_msg, exc_info=e)
|
||||
deletion_errors.append(error_msg)
|
||||
logger.error(f"Failed to delete local audio files: {e}", exc_info=e)
|
||||
|
||||
if deletion_errors:
|
||||
logger.warning(
|
||||
f"Consent cleanup completed with {len(deletion_errors)} errors",
|
||||
errors=deletion_errors,
|
||||
)
|
||||
else:
|
||||
await transcripts_controller.update(transcript, {"audio_deleted": True})
|
||||
logger.info("Consent cleanup done - all audio deleted")
|
||||
logger.info("Consent cleanup done")
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def pipeline_post_to_zulip(transcript: Transcript, logger: Logger):
|
||||
async def pipeline_post_to_zulip(session, transcript: Transcript, logger: Logger):
|
||||
logger.info("Starting post to zulip")
|
||||
|
||||
if not transcript.recording_id:
|
||||
logger.info("Transcript has no recording")
|
||||
return
|
||||
|
||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||
recording = await recordings_controller.get_by_id(session, transcript.recording_id)
|
||||
if not recording:
|
||||
logger.info("Recording not found")
|
||||
return
|
||||
@@ -689,12 +623,12 @@ async def pipeline_post_to_zulip(transcript: Transcript, logger: Logger):
|
||||
logger.info("Recording has no meeting")
|
||||
return
|
||||
|
||||
meeting = await meetings_controller.get_by_id(recording.meeting_id)
|
||||
meeting = await meetings_controller.get_by_id(session, recording.meeting_id)
|
||||
if not meeting:
|
||||
logger.info("No meeting found for this recording")
|
||||
return
|
||||
|
||||
room = await rooms_controller.get_by_id(meeting.room_id)
|
||||
room = await rooms_controller.get_by_id(session, meeting.room_id)
|
||||
if not room:
|
||||
logger.error(f"Missing room for a meeting {meeting.id}")
|
||||
return
|
||||
@@ -720,7 +654,7 @@ async def pipeline_post_to_zulip(transcript: Transcript, logger: Logger):
|
||||
room.zulip_stream, room.zulip_topic, message
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"zulip_message_id": response["id"]}
|
||||
session, transcript, {"zulip_message_id": response["id"]}
|
||||
)
|
||||
|
||||
logger.info("Posted to zulip")
|
||||
@@ -731,92 +665,120 @@ async def pipeline_post_to_zulip(transcript: Transcript, logger: Logger):
|
||||
# ===================================================================
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_remove_upload(*, transcript_id: str):
|
||||
await pipeline_remove_upload(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_remove_upload(
|
||||
session, *, transcript: Transcript, logger: Logger, transcript_id: str
|
||||
):
|
||||
await pipeline_remove_upload(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_waveform(*, transcript_id: str):
|
||||
await pipeline_waveform(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_waveform(
|
||||
session, *, transcript: Transcript, logger: Logger, transcript_id: str
|
||||
):
|
||||
await pipeline_waveform(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_convert_to_mp3(*, transcript_id: str):
|
||||
await pipeline_convert_to_mp3(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_convert_to_mp3(
|
||||
session, *, transcript: Transcript, logger: Logger, transcript_id: str
|
||||
):
|
||||
await pipeline_convert_to_mp3(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_upload_mp3(*, transcript_id: str):
|
||||
await pipeline_upload_mp3(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_upload_mp3(
|
||||
session, *, transcript: Transcript, logger: Logger, transcript_id: str
|
||||
):
|
||||
await pipeline_upload_mp3(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_diarization(*, transcript_id: str):
|
||||
await pipeline_diarization(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_diarization(
|
||||
session, *, transcript: Transcript, logger: Logger, transcript_id: str
|
||||
):
|
||||
await pipeline_diarization(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_title(*, transcript_id: str):
|
||||
await pipeline_title(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_title(
|
||||
session, *, transcript: Transcript, logger: Logger, transcript_id: str
|
||||
):
|
||||
await pipeline_title(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_final_summaries(*, transcript_id: str):
|
||||
await pipeline_summaries(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_final_summaries(
|
||||
session, *, transcript: Transcript, logger: Logger, transcript_id: str
|
||||
):
|
||||
await pipeline_summaries(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_cleanup_consent(*, transcript_id: str):
|
||||
await cleanup_consent(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_cleanup_consent(session, *, transcript: Transcript, logger: Logger):
|
||||
await cleanup_consent(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_post_to_zulip(*, transcript_id: str):
|
||||
await pipeline_post_to_zulip(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_post_to_zulip(
|
||||
session, *, transcript: Transcript, logger: Logger
|
||||
):
|
||||
await pipeline_post_to_zulip(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
def pipeline_post(*, transcript_id: str):
|
||||
"""
|
||||
Run the post pipeline
|
||||
"""
|
||||
chain_mp3_and_diarize = (
|
||||
task_pipeline_waveform.si(transcript_id=transcript_id)
|
||||
| task_pipeline_convert_to_mp3.si(transcript_id=transcript_id)
|
||||
| task_pipeline_upload_mp3.si(transcript_id=transcript_id)
|
||||
| task_pipeline_remove_upload.si(transcript_id=transcript_id)
|
||||
| task_pipeline_diarization.si(transcript_id=transcript_id)
|
||||
| task_cleanup_consent.si(transcript_id=transcript_id)
|
||||
)
|
||||
chain_title_preview = task_pipeline_title.si(transcript_id=transcript_id)
|
||||
chain_final_summaries = task_pipeline_final_summaries.si(
|
||||
transcript_id=transcript_id
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_cleanup_consent_taskiq(
|
||||
session, *, transcript: Transcript, logger: Logger
|
||||
):
|
||||
await cleanup_consent(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_post_to_zulip_taskiq(
|
||||
session, *, transcript: Transcript, logger: Logger
|
||||
):
|
||||
await pipeline_post_to_zulip(session, transcript=transcript, logger=logger)
|
||||
|
||||
|
||||
async def pipeline_post(*, transcript_id: str):
|
||||
await task_pipeline_post_sequential.kiq(transcript_id=transcript_id)
|
||||
|
||||
|
||||
@taskiq_broker.task
|
||||
async def task_pipeline_post_sequential(*, transcript_id: str):
|
||||
await task_pipeline_waveform.kiq(transcript_id=transcript_id)
|
||||
await task_pipeline_convert_to_mp3.kiq(transcript_id=transcript_id)
|
||||
await task_pipeline_upload_mp3.kiq(transcript_id=transcript_id)
|
||||
await task_pipeline_remove_upload.kiq(transcript_id=transcript_id)
|
||||
await task_pipeline_diarization.kiq(transcript_id=transcript_id)
|
||||
await task_cleanup_consent.kiq(transcript_id=transcript_id)
|
||||
|
||||
await asyncio.gather(
|
||||
task_pipeline_title.kiq(transcript_id=transcript_id),
|
||||
task_pipeline_final_summaries.kiq(transcript_id=transcript_id),
|
||||
)
|
||||
|
||||
chain = chord(
|
||||
group(chain_mp3_and_diarize, chain_title_preview),
|
||||
chain_final_summaries,
|
||||
) | task_pipeline_post_to_zulip.si(transcript_id=transcript_id)
|
||||
|
||||
return chain.delay()
|
||||
await task_pipeline_post_to_zulip.kiq(transcript_id=transcript_id)
|
||||
|
||||
|
||||
@get_transcript
|
||||
async def pipeline_process(transcript: Transcript, logger: Logger):
|
||||
async def pipeline_process(session, transcript: Transcript, logger: Logger):
|
||||
try:
|
||||
if transcript.audio_location == "storage":
|
||||
await transcripts_controller.download_mp3_from_storage(transcript)
|
||||
transcript.audio_waveform_filename.unlink(missing_ok=True)
|
||||
await transcripts_controller.update(
|
||||
session,
|
||||
transcript,
|
||||
{
|
||||
"topics": [],
|
||||
@@ -854,6 +816,7 @@ async def pipeline_process(transcript: Transcript, logger: Logger):
|
||||
except Exception as exc:
|
||||
logger.error("Pipeline error", exc_info=exc)
|
||||
await transcripts_controller.update(
|
||||
session,
|
||||
transcript,
|
||||
{
|
||||
"status": "error",
|
||||
@@ -864,7 +827,9 @@ async def pipeline_process(transcript: Transcript, logger: Logger):
|
||||
logger.info("Pipeline ended")
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_process(*, transcript_id: str):
|
||||
return await pipeline_process(transcript_id=transcript_id)
|
||||
@taskiq_broker.task
|
||||
@with_session_and_transcript
|
||||
async def task_pipeline_process(
|
||||
session, *, transcript: Transcript, logger: Logger, transcript_id: str
|
||||
):
|
||||
return await pipeline_process(session, transcript=transcript, logger=logger)
|
||||
|
||||
@@ -1,512 +0,0 @@
|
||||
import asyncio
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import av
|
||||
from celery import chain, shared_task
|
||||
|
||||
from reflector.asynctask import asynctask
|
||||
from reflector.dailyco_api import MeetingParticipantsResponse
|
||||
from reflector.db.transcripts import (
|
||||
Transcript,
|
||||
TranscriptParticipant,
|
||||
TranscriptStatus,
|
||||
TranscriptWaveform,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines import topic_processing
|
||||
from reflector.pipelines.main_file_pipeline import task_send_webhook_if_needed
|
||||
from reflector.pipelines.main_live_pipeline import (
|
||||
PipelineMainBase,
|
||||
broadcast_to_sockets,
|
||||
task_cleanup_consent,
|
||||
task_pipeline_post_to_zulip,
|
||||
)
|
||||
from reflector.pipelines.transcription_helpers import transcribe_file_with_processor
|
||||
from reflector.processors import AudioFileWriterProcessor
|
||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||
from reflector.processors.types import TitleSummary
|
||||
from reflector.processors.types import Transcript as TranscriptType
|
||||
from reflector.storage import Storage, get_transcripts_storage
|
||||
from reflector.utils.audio_constants import PRESIGNED_URL_EXPIRATION_SECONDS
|
||||
from reflector.utils.audio_mixdown import (
|
||||
detect_sample_rate_from_tracks,
|
||||
mixdown_tracks_pyav,
|
||||
)
|
||||
from reflector.utils.audio_padding import (
|
||||
apply_audio_padding_to_file,
|
||||
extract_stream_start_time_from_container,
|
||||
)
|
||||
from reflector.utils.daily import (
|
||||
filter_cam_audio_tracks,
|
||||
parse_daily_recording_filename,
|
||||
)
|
||||
from reflector.utils.string import NonEmptyString
|
||||
from reflector.video_platforms.factory import create_platform_client
|
||||
|
||||
|
||||
class PipelineMainMultitrack(PipelineMainBase):
|
||||
def __init__(self, transcript_id: str):
|
||||
super().__init__(transcript_id=transcript_id)
|
||||
self.logger = logger.bind(transcript_id=self.transcript_id)
|
||||
self.empty_pipeline = topic_processing.EmptyPipeline(logger=self.logger)
|
||||
|
||||
async def pad_track_for_transcription(
|
||||
self,
|
||||
track_url: NonEmptyString,
|
||||
track_idx: int,
|
||||
storage: Storage,
|
||||
) -> NonEmptyString:
|
||||
"""
|
||||
Pad a single track with silence based on stream metadata start_time.
|
||||
Downloads from S3 presigned URL, processes via PyAV using tempfile, uploads to S3.
|
||||
Returns presigned URL of padded track (or original URL if no padding needed).
|
||||
|
||||
Memory usage:
|
||||
- Pattern: fixed_overhead(2-5MB) for PyAV codec/filters
|
||||
- PyAV streams input efficiently (no full download, verified)
|
||||
- Output written to tempfile (disk-based, not memory)
|
||||
- Upload streams from file handle (boto3 chunks, typically 5-10MB)
|
||||
|
||||
Daily.co raw-tracks timing - Two approaches:
|
||||
|
||||
CURRENT APPROACH (PyAV metadata):
|
||||
The WebM stream.start_time field encodes MEETING-RELATIVE timing:
|
||||
- t=0: When Daily.co recording started (first participant joined)
|
||||
- start_time=8.13s: This participant's track began 8.13s after recording started
|
||||
- Purpose: Enables track alignment without external manifest files
|
||||
|
||||
This is NOT:
|
||||
- Stream-internal offset (first packet timestamp relative to stream start)
|
||||
- Absolute/wall-clock time
|
||||
- Recording duration
|
||||
|
||||
ALTERNATIVE APPROACH (filename parsing):
|
||||
Daily.co filenames contain Unix timestamps (milliseconds):
|
||||
Format: {recording_start_ts}-{participant_id}-cam-audio-{track_start_ts}.webm
|
||||
Example: 1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922.webm
|
||||
|
||||
Can calculate offset: (track_start_ts - recording_start_ts) / 1000
|
||||
- Track 0: (1760988935922 - 1760988935484) / 1000 = 0.438s
|
||||
- Track 1: (1760988943823 - 1760988935484) / 1000 = 8.339s
|
||||
|
||||
TIME DIFFERENCE: PyAV metadata vs filename timestamps differ by ~209ms:
|
||||
- Track 0: filename=438ms, metadata=229ms (diff: 209ms)
|
||||
- Track 1: filename=8339ms, metadata=8130ms (diff: 209ms)
|
||||
|
||||
Consistent delta suggests network/encoding delay. PyAV metadata is ground truth
|
||||
(represents when audio stream actually started vs when file upload initiated).
|
||||
|
||||
Example with 2 participants:
|
||||
Track A: start_time=0.2s → Joined 200ms after recording began
|
||||
Track B: start_time=8.1s → Joined 8.1 seconds later
|
||||
|
||||
After padding:
|
||||
Track A: [0.2s silence] + [speech...]
|
||||
Track B: [8.1s silence] + [speech...]
|
||||
|
||||
Whisper transcription timestamps are now synchronized:
|
||||
Track A word at 5.0s → happened at meeting t=5.0s
|
||||
Track B word at 10.0s → happened at meeting t=10.0s
|
||||
|
||||
Merging just sorts by timestamp - no offset calculation needed.
|
||||
|
||||
Padding coincidentally involves re-encoding. It's important when we work with Daily.co + Whisper.
|
||||
This is because Daily.co returns recordings with skipped frames e.g. when microphone muted.
|
||||
Daily.co doesn't understand those frames and ignores them, causing timestamp issues in transcription.
|
||||
Re-encoding restores those frames. We do padding and re-encoding together just because it's convenient and more performant:
|
||||
we need padded values for mix mp3 anyways
|
||||
"""
|
||||
|
||||
transcript = await self.get_transcript()
|
||||
|
||||
try:
|
||||
# PyAV streams input from S3 URL efficiently (2-5MB fixed overhead for codec/filters)
|
||||
with av.open(track_url) as in_container:
|
||||
start_time_seconds = extract_stream_start_time_from_container(
|
||||
in_container, track_idx, logger=self.logger
|
||||
)
|
||||
|
||||
if start_time_seconds <= 0:
|
||||
self.logger.info(
|
||||
f"Track {track_idx} requires no padding (start_time={start_time_seconds}s)",
|
||||
track_idx=track_idx,
|
||||
)
|
||||
return track_url
|
||||
|
||||
# Use tempfile instead of BytesIO for better memory efficiency
|
||||
# Reduces peak memory usage during encoding/upload
|
||||
with tempfile.NamedTemporaryFile(
|
||||
suffix=".webm", delete=False
|
||||
) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
try:
|
||||
apply_audio_padding_to_file(
|
||||
in_container,
|
||||
temp_path,
|
||||
start_time_seconds,
|
||||
track_idx,
|
||||
logger=self.logger,
|
||||
)
|
||||
|
||||
storage_path = (
|
||||
f"file_pipeline/{transcript.id}/tracks/padded_{track_idx}.webm"
|
||||
)
|
||||
|
||||
# Upload using file handle for streaming
|
||||
with open(temp_path, "rb") as padded_file:
|
||||
await storage.put_file(storage_path, padded_file)
|
||||
finally:
|
||||
Path(temp_path).unlink(missing_ok=True)
|
||||
|
||||
padded_url = await storage.get_file_url(
|
||||
storage_path,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
f"Successfully padded track {track_idx}",
|
||||
track_idx=track_idx,
|
||||
start_time_seconds=start_time_seconds,
|
||||
padded_url=padded_url,
|
||||
)
|
||||
|
||||
return padded_url
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
f"Failed to process track {track_idx}",
|
||||
track_idx=track_idx,
|
||||
url=track_url,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
raise Exception(
|
||||
f"Track {track_idx} padding failed - transcript would have incorrect timestamps"
|
||||
) from e
|
||||
|
||||
async def mixdown_tracks(
|
||||
self,
|
||||
track_urls: list[str],
|
||||
writer: AudioFileWriterProcessor,
|
||||
offsets_seconds: list[float] | None = None,
|
||||
) -> None:
|
||||
"""Multi-track mixdown using PyAV filter graph (amix), reading from S3 presigned URLs."""
|
||||
target_sample_rate = detect_sample_rate_from_tracks(
|
||||
track_urls, logger=self.logger
|
||||
)
|
||||
if not target_sample_rate:
|
||||
self.logger.error("Mixdown failed - no decodable audio frames found")
|
||||
raise Exception("Mixdown failed: No decodable audio frames in any track")
|
||||
|
||||
await mixdown_tracks_pyav(
|
||||
track_urls,
|
||||
writer,
|
||||
target_sample_rate,
|
||||
offsets_seconds=offsets_seconds,
|
||||
logger=self.logger,
|
||||
)
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def set_status(self, transcript_id: str, status: TranscriptStatus):
|
||||
async with self.lock_transaction():
|
||||
return await transcripts_controller.set_status(transcript_id, status)
|
||||
|
||||
async def on_waveform(self, data):
|
||||
async with self.transaction():
|
||||
waveform = TranscriptWaveform(waveform=data)
|
||||
transcript = await self.get_transcript()
|
||||
return await transcripts_controller.append_event(
|
||||
transcript=transcript, event="WAVEFORM", data=waveform
|
||||
)
|
||||
|
||||
async def update_participants_from_daily(
|
||||
self, transcript: Transcript, track_keys: list[str]
|
||||
) -> None:
|
||||
"""Update transcript participants with user_id and names from Daily.co API."""
|
||||
if not transcript.recording_id:
|
||||
return
|
||||
|
||||
try:
|
||||
async with create_platform_client("daily") as daily_client:
|
||||
id_to_name = {}
|
||||
id_to_user_id = {}
|
||||
|
||||
try:
|
||||
rec_details = await daily_client.get_recording(
|
||||
transcript.recording_id
|
||||
)
|
||||
mtg_session_id = rec_details.mtgSessionId
|
||||
if mtg_session_id:
|
||||
try:
|
||||
payload: MeetingParticipantsResponse = (
|
||||
await daily_client.get_meeting_participants(
|
||||
mtg_session_id
|
||||
)
|
||||
)
|
||||
for p in payload.data:
|
||||
pid = p.participant_id
|
||||
name = p.user_name
|
||||
user_id = p.user_id
|
||||
if name:
|
||||
id_to_name[pid] = name
|
||||
if user_id:
|
||||
id_to_user_id[pid] = user_id
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
"Failed to fetch Daily meeting participants",
|
||||
error=str(e),
|
||||
mtg_session_id=mtg_session_id,
|
||||
exc_info=True,
|
||||
)
|
||||
else:
|
||||
self.logger.warning(
|
||||
"No mtgSessionId found for recording; participant names may be generic",
|
||||
recording_id=transcript.recording_id,
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
"Failed to fetch Daily recording details",
|
||||
error=str(e),
|
||||
recording_id=transcript.recording_id,
|
||||
exc_info=True,
|
||||
)
|
||||
return
|
||||
|
||||
cam_audio_keys = filter_cam_audio_tracks(track_keys)
|
||||
|
||||
for idx, key in enumerate(cam_audio_keys):
|
||||
try:
|
||||
parsed = parse_daily_recording_filename(key)
|
||||
participant_id = parsed.participant_id
|
||||
except ValueError as e:
|
||||
self.logger.error(
|
||||
"Failed to parse Daily recording filename",
|
||||
error=str(e),
|
||||
key=key,
|
||||
exc_info=True,
|
||||
)
|
||||
continue
|
||||
|
||||
default_name = f"Speaker {idx}"
|
||||
name = id_to_name.get(participant_id, default_name)
|
||||
user_id = id_to_user_id.get(participant_id)
|
||||
|
||||
participant = TranscriptParticipant(
|
||||
id=participant_id, speaker=idx, name=name, user_id=user_id
|
||||
)
|
||||
await transcripts_controller.upsert_participant(
|
||||
transcript, participant
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
"Failed to map participant names", error=str(e), exc_info=True
|
||||
)
|
||||
|
||||
async def process(self, bucket_name: str, track_keys: list[str]):
|
||||
transcript = await self.get_transcript()
|
||||
async with self.transaction():
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"events": [],
|
||||
"topics": [],
|
||||
"participants": [],
|
||||
},
|
||||
)
|
||||
|
||||
await self.update_participants_from_daily(transcript, track_keys)
|
||||
|
||||
source_storage = get_transcripts_storage()
|
||||
transcript_storage = source_storage
|
||||
|
||||
track_urls: list[str] = []
|
||||
for key in track_keys:
|
||||
url = await source_storage.get_file_url(
|
||||
key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
bucket=bucket_name,
|
||||
)
|
||||
track_urls.append(url)
|
||||
self.logger.info(
|
||||
f"Generated presigned URL for track from {bucket_name}",
|
||||
key=key,
|
||||
)
|
||||
|
||||
created_padded_files = set()
|
||||
padded_track_urls: list[str] = []
|
||||
for idx, url in enumerate(track_urls):
|
||||
padded_url = await self.pad_track_for_transcription(
|
||||
url, idx, transcript_storage
|
||||
)
|
||||
padded_track_urls.append(padded_url)
|
||||
if padded_url != url:
|
||||
storage_path = f"file_pipeline/{transcript.id}/tracks/padded_{idx}.webm"
|
||||
created_padded_files.add(storage_path)
|
||||
self.logger.info(f"Track {idx} processed, padded URL: {padded_url}")
|
||||
|
||||
transcript.data_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
mp3_writer = AudioFileWriterProcessor(
|
||||
path=str(transcript.audio_mp3_filename),
|
||||
on_duration=self.on_duration,
|
||||
)
|
||||
await self.mixdown_tracks(padded_track_urls, mp3_writer, offsets_seconds=None)
|
||||
await mp3_writer.flush()
|
||||
|
||||
if not transcript.audio_mp3_filename.exists():
|
||||
raise Exception(
|
||||
"Mixdown failed - no MP3 file generated. Cannot proceed without playable audio."
|
||||
)
|
||||
|
||||
storage_path = f"{transcript.id}/audio.mp3"
|
||||
# Use file handle streaming to avoid loading entire MP3 into memory
|
||||
mp3_size = transcript.audio_mp3_filename.stat().st_size
|
||||
with open(transcript.audio_mp3_filename, "rb") as mp3_file:
|
||||
await transcript_storage.put_file(storage_path, mp3_file)
|
||||
mp3_url = await transcript_storage.get_file_url(storage_path)
|
||||
|
||||
await transcripts_controller.update(transcript, {"audio_location": "storage"})
|
||||
|
||||
self.logger.info(
|
||||
f"Uploaded mixed audio to storage",
|
||||
storage_path=storage_path,
|
||||
size=mp3_size,
|
||||
url=mp3_url,
|
||||
)
|
||||
|
||||
self.logger.info("Generating waveform from mixed audio")
|
||||
waveform_processor = AudioWaveformProcessor(
|
||||
audio_path=transcript.audio_mp3_filename,
|
||||
waveform_path=transcript.audio_waveform_filename,
|
||||
on_waveform=self.on_waveform,
|
||||
)
|
||||
waveform_processor.set_pipeline(self.empty_pipeline)
|
||||
await waveform_processor.flush()
|
||||
self.logger.info("Waveform generated successfully")
|
||||
|
||||
speaker_transcripts: list[TranscriptType] = []
|
||||
for idx, padded_url in enumerate(padded_track_urls):
|
||||
if not padded_url:
|
||||
continue
|
||||
|
||||
t = await self.transcribe_file(padded_url, transcript.source_language)
|
||||
|
||||
if not t.words:
|
||||
self.logger.debug(f"no words in track {idx}")
|
||||
# not skipping, it may be silence or indistinguishable mumbling
|
||||
|
||||
for w in t.words:
|
||||
w.speaker = idx
|
||||
|
||||
speaker_transcripts.append(t)
|
||||
self.logger.info(
|
||||
f"Track {idx} transcribed successfully with {len(t.words)} words",
|
||||
track_idx=idx,
|
||||
)
|
||||
|
||||
valid_track_count = len([url for url in padded_track_urls if url])
|
||||
if valid_track_count > 0 and len(speaker_transcripts) != valid_track_count:
|
||||
raise Exception(
|
||||
f"Only {len(speaker_transcripts)}/{valid_track_count} tracks transcribed successfully. "
|
||||
f"All tracks must succeed to avoid incomplete transcripts."
|
||||
)
|
||||
|
||||
if not speaker_transcripts:
|
||||
raise Exception("No valid track transcriptions")
|
||||
|
||||
self.logger.info(f"Cleaning up {len(created_padded_files)} temporary S3 files")
|
||||
cleanup_tasks = []
|
||||
for storage_path in created_padded_files:
|
||||
cleanup_tasks.append(transcript_storage.delete_file(storage_path))
|
||||
|
||||
if cleanup_tasks:
|
||||
cleanup_results = await asyncio.gather(
|
||||
*cleanup_tasks, return_exceptions=True
|
||||
)
|
||||
for storage_path, result in zip(created_padded_files, cleanup_results):
|
||||
if isinstance(result, Exception):
|
||||
self.logger.warning(
|
||||
"Failed to cleanup temporary padded track",
|
||||
storage_path=storage_path,
|
||||
error=str(result),
|
||||
)
|
||||
|
||||
merged_words = []
|
||||
for t in speaker_transcripts:
|
||||
merged_words.extend(t.words)
|
||||
merged_words.sort(
|
||||
key=lambda w: w.start if hasattr(w, "start") and w.start is not None else 0
|
||||
)
|
||||
|
||||
merged_transcript = TranscriptType(words=merged_words, translation=None)
|
||||
|
||||
await self.on_transcript(merged_transcript)
|
||||
|
||||
topics = await self.detect_topics(merged_transcript, transcript.target_language)
|
||||
await asyncio.gather(
|
||||
self.generate_title(topics),
|
||||
self.generate_summaries(topics),
|
||||
return_exceptions=False,
|
||||
)
|
||||
|
||||
await self.set_status(transcript.id, "ended")
|
||||
|
||||
async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType:
|
||||
return await transcribe_file_with_processor(audio_url, language)
|
||||
|
||||
async def detect_topics(
|
||||
self, transcript: TranscriptType, target_language: str
|
||||
) -> list[TitleSummary]:
|
||||
return await topic_processing.detect_topics(
|
||||
transcript,
|
||||
target_language,
|
||||
on_topic_callback=self.on_topic,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
)
|
||||
|
||||
async def generate_title(self, topics: list[TitleSummary]):
|
||||
return await topic_processing.generate_title(
|
||||
topics,
|
||||
on_title_callback=self.on_title,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
logger=self.logger,
|
||||
)
|
||||
|
||||
async def generate_summaries(self, topics: list[TitleSummary]):
|
||||
transcript = await self.get_transcript()
|
||||
return await topic_processing.generate_summaries(
|
||||
topics,
|
||||
transcript,
|
||||
on_long_summary_callback=self.on_long_summary,
|
||||
on_short_summary_callback=self.on_short_summary,
|
||||
on_action_items_callback=self.on_action_items,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
logger=self.logger,
|
||||
)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_multitrack_process(
|
||||
*, transcript_id: str, bucket_name: str, track_keys: list[str]
|
||||
):
|
||||
pipeline = PipelineMainMultitrack(transcript_id=transcript_id)
|
||||
try:
|
||||
await pipeline.set_status(transcript_id, "processing")
|
||||
await pipeline.process(bucket_name, track_keys)
|
||||
except Exception:
|
||||
await pipeline.set_status(transcript_id, "error")
|
||||
raise
|
||||
|
||||
post_chain = chain(
|
||||
task_cleanup_consent.si(transcript_id=transcript_id),
|
||||
task_pipeline_post_to_zulip.si(transcript_id=transcript_id),
|
||||
task_send_webhook_if_needed.si(transcript_id=transcript_id),
|
||||
)
|
||||
post_chain.delay()
|
||||
@@ -1,113 +0,0 @@
|
||||
"""
|
||||
Topic processing utilities
|
||||
==========================
|
||||
|
||||
Shared topic detection, title generation, and summarization logic
|
||||
used across file and multitrack pipelines.
|
||||
"""
|
||||
|
||||
from typing import Callable
|
||||
|
||||
import structlog
|
||||
|
||||
from reflector.db.transcripts import Transcript
|
||||
from reflector.processors import (
|
||||
TranscriptFinalSummaryProcessor,
|
||||
TranscriptFinalTitleProcessor,
|
||||
TranscriptTopicDetectorProcessor,
|
||||
)
|
||||
from reflector.processors.types import TitleSummary
|
||||
from reflector.processors.types import Transcript as TranscriptType
|
||||
|
||||
|
||||
class EmptyPipeline:
|
||||
def __init__(self, logger: structlog.BoundLogger):
|
||||
self.logger = logger
|
||||
|
||||
def get_pref(self, k, d=None):
|
||||
return d
|
||||
|
||||
async def emit(self, event):
|
||||
pass
|
||||
|
||||
|
||||
async def detect_topics(
|
||||
transcript: TranscriptType,
|
||||
target_language: str,
|
||||
*,
|
||||
on_topic_callback: Callable,
|
||||
empty_pipeline: EmptyPipeline,
|
||||
) -> list[TitleSummary]:
|
||||
chunk_size = 300
|
||||
topics: list[TitleSummary] = []
|
||||
|
||||
async def on_topic(topic: TitleSummary):
|
||||
topics.append(topic)
|
||||
return await on_topic_callback(topic)
|
||||
|
||||
topic_detector = TranscriptTopicDetectorProcessor(callback=on_topic)
|
||||
topic_detector.set_pipeline(empty_pipeline)
|
||||
|
||||
for i in range(0, len(transcript.words), chunk_size):
|
||||
chunk_words = transcript.words[i : i + chunk_size]
|
||||
if not chunk_words:
|
||||
continue
|
||||
|
||||
chunk_transcript = TranscriptType(
|
||||
words=chunk_words, translation=transcript.translation
|
||||
)
|
||||
|
||||
await topic_detector.push(chunk_transcript)
|
||||
|
||||
await topic_detector.flush()
|
||||
return topics
|
||||
|
||||
|
||||
async def generate_title(
|
||||
topics: list[TitleSummary],
|
||||
*,
|
||||
on_title_callback: Callable,
|
||||
empty_pipeline: EmptyPipeline,
|
||||
logger: structlog.BoundLogger,
|
||||
):
|
||||
if not topics:
|
||||
logger.warning("No topics for title generation")
|
||||
return
|
||||
|
||||
processor = TranscriptFinalTitleProcessor(callback=on_title_callback)
|
||||
processor.set_pipeline(empty_pipeline)
|
||||
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
|
||||
|
||||
async def generate_summaries(
|
||||
topics: list[TitleSummary],
|
||||
transcript: Transcript,
|
||||
*,
|
||||
on_long_summary_callback: Callable,
|
||||
on_short_summary_callback: Callable,
|
||||
on_action_items_callback: Callable,
|
||||
empty_pipeline: EmptyPipeline,
|
||||
logger: structlog.BoundLogger,
|
||||
):
|
||||
if not topics:
|
||||
logger.warning("No topics for summary generation")
|
||||
return
|
||||
|
||||
processor_kwargs = {
|
||||
"transcript": transcript,
|
||||
"callback": on_long_summary_callback,
|
||||
"on_short_summary": on_short_summary_callback,
|
||||
"on_action_items": on_action_items_callback,
|
||||
}
|
||||
|
||||
processor = TranscriptFinalSummaryProcessor(**processor_kwargs)
|
||||
processor.set_pipeline(empty_pipeline)
|
||||
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
@@ -1,34 +0,0 @@
|
||||
from reflector.processors.file_transcript import FileTranscriptInput
|
||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||
from reflector.processors.types import Transcript as TranscriptType
|
||||
|
||||
|
||||
async def transcribe_file_with_processor(
|
||||
audio_url: str,
|
||||
language: str,
|
||||
processor_name: str | None = None,
|
||||
) -> TranscriptType:
|
||||
processor = (
|
||||
FileTranscriptAutoProcessor(name=processor_name)
|
||||
if processor_name
|
||||
else FileTranscriptAutoProcessor()
|
||||
)
|
||||
input_data = FileTranscriptInput(audio_url=audio_url, language=language)
|
||||
|
||||
result: TranscriptType | None = None
|
||||
|
||||
async def capture_result(transcript):
|
||||
nonlocal result
|
||||
result = transcript
|
||||
|
||||
processor.on(capture_result)
|
||||
await processor.push(input_data)
|
||||
await processor.flush()
|
||||
|
||||
if not result:
|
||||
processor_label = processor_name or "default"
|
||||
raise ValueError(
|
||||
f"No transcript captured from {processor_label} processor for audio: {audio_url}"
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -56,16 +56,6 @@ class FileTranscriptModalProcessor(FileTranscriptProcessor):
|
||||
},
|
||||
follow_redirects=True,
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
error_body = response.text
|
||||
self.logger.error(
|
||||
"Modal API error",
|
||||
audio_url=data.audio_url,
|
||||
status_code=response.status_code,
|
||||
error_body=error_body,
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
|
||||
@@ -96,36 +96,6 @@ RECAP_PROMPT = dedent(
|
||||
"""
|
||||
).strip()
|
||||
|
||||
ACTION_ITEMS_PROMPT = dedent(
|
||||
"""
|
||||
Identify action items from this meeting transcript. Your goal is to identify what was decided and what needs to happen next.
|
||||
|
||||
Look for:
|
||||
|
||||
1. **Decisions Made**: Any decisions, choices, or conclusions reached during the meeting. For each decision:
|
||||
- What was decided? (be specific)
|
||||
- Who made the decision or was involved? (use actual participant names)
|
||||
- Why was this decision made? (key factors, reasoning, or rationale)
|
||||
|
||||
2. **Next Steps / Action Items**: Any tasks, follow-ups, or actions that were mentioned or assigned. For each action item:
|
||||
- What specific task needs to be done? (be concrete and actionable)
|
||||
- Who is responsible? (use actual participant names if mentioned, or "team" if unclear)
|
||||
- When is it due? (any deadlines, timeframes, or "by next meeting" type commitments)
|
||||
- What context is needed? (any additional details that help understand the task)
|
||||
|
||||
Guidelines:
|
||||
- Be thorough and identify all action items, even if they seem minor
|
||||
- Include items that were agreed upon, assigned, or committed to
|
||||
- Include decisions even if they seem obvious or implicit
|
||||
- If someone says "I'll do X" or "We should do Y", that's an action item
|
||||
- If someone says "Let's go with option A", that's a decision
|
||||
- Use the exact participant names from the transcript
|
||||
- If no participant name is mentioned, you can leave assigned_to/decided_by as null
|
||||
|
||||
Only return empty lists if the transcript contains NO decisions and NO action items whatsoever.
|
||||
"""
|
||||
).strip()
|
||||
|
||||
STRUCTURED_RESPONSE_PROMPT_TEMPLATE = dedent(
|
||||
"""
|
||||
Based on the following analysis, provide the information in the requested JSON format:
|
||||
@@ -185,53 +155,6 @@ class SubjectsResponse(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class ActionItem(BaseModel):
|
||||
"""A single action item from the meeting"""
|
||||
|
||||
task: str = Field(description="The task or action item to be completed")
|
||||
assigned_to: str | None = Field(
|
||||
default=None, description="Person or team assigned to this task (name)"
|
||||
)
|
||||
assigned_to_participant_id: str | None = Field(
|
||||
default=None, description="Participant ID if assigned_to matches a participant"
|
||||
)
|
||||
deadline: str | None = Field(
|
||||
default=None, description="Deadline or timeframe mentioned for this task"
|
||||
)
|
||||
context: str | None = Field(
|
||||
default=None, description="Additional context or notes about this task"
|
||||
)
|
||||
|
||||
|
||||
class Decision(BaseModel):
|
||||
"""A decision made during the meeting"""
|
||||
|
||||
decision: str = Field(description="What was decided")
|
||||
rationale: str | None = Field(
|
||||
default=None,
|
||||
description="Reasoning or key factors that influenced this decision",
|
||||
)
|
||||
decided_by: str | None = Field(
|
||||
default=None, description="Person or group who made the decision (name)"
|
||||
)
|
||||
decided_by_participant_id: str | None = Field(
|
||||
default=None, description="Participant ID if decided_by matches a participant"
|
||||
)
|
||||
|
||||
|
||||
class ActionItemsResponse(BaseModel):
|
||||
"""Pydantic model for identified action items"""
|
||||
|
||||
decisions: list[Decision] = Field(
|
||||
default_factory=list,
|
||||
description="List of decisions made during the meeting",
|
||||
)
|
||||
next_steps: list[ActionItem] = Field(
|
||||
default_factory=list,
|
||||
description="List of action items and next steps to be taken",
|
||||
)
|
||||
|
||||
|
||||
class SummaryBuilder:
|
||||
def __init__(self, llm: LLM, filename: str | None = None, logger=None) -> None:
|
||||
self.transcript: str | None = None
|
||||
@@ -242,9 +165,6 @@ class SummaryBuilder:
|
||||
self.llm: LLM = llm
|
||||
self.model_name: str = llm.model_name
|
||||
self.logger = logger or structlog.get_logger()
|
||||
self.participant_instructions: str | None = None
|
||||
self.action_items: ActionItemsResponse | None = None
|
||||
self.participant_name_to_id: dict[str, str] = {}
|
||||
if filename:
|
||||
self.read_transcript_from_file(filename)
|
||||
|
||||
@@ -268,81 +188,17 @@ class SummaryBuilder:
|
||||
self.llm = llm
|
||||
|
||||
async def _get_structured_response(
|
||||
self,
|
||||
prompt: str,
|
||||
output_cls: Type[T],
|
||||
tone_name: str | None = None,
|
||||
timeout: int | None = None,
|
||||
self, prompt: str, output_cls: Type[T], tone_name: str | None = None
|
||||
) -> T:
|
||||
"""Generic function to get structured output from LLM for non-function-calling models."""
|
||||
enhanced_prompt = self._enhance_prompt_with_participants(prompt)
|
||||
return await self.llm.get_structured_response(
|
||||
enhanced_prompt,
|
||||
[self.transcript],
|
||||
output_cls,
|
||||
tone_name=tone_name,
|
||||
timeout=timeout,
|
||||
prompt, [self.transcript], output_cls, tone_name=tone_name
|
||||
)
|
||||
|
||||
async def _get_response(
|
||||
self, prompt: str, texts: list[str], tone_name: str | None = None
|
||||
) -> str:
|
||||
"""Get text response with automatic participant instructions injection."""
|
||||
enhanced_prompt = self._enhance_prompt_with_participants(prompt)
|
||||
return await self.llm.get_response(enhanced_prompt, texts, tone_name=tone_name)
|
||||
|
||||
def _enhance_prompt_with_participants(self, prompt: str) -> str:
|
||||
"""Add participant instructions to any prompt if participants are known."""
|
||||
if self.participant_instructions:
|
||||
self.logger.debug("Adding participant instructions to prompt")
|
||||
return f"{prompt}\n\n{self.participant_instructions}"
|
||||
return prompt
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Participants
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
def set_known_participants(
|
||||
self,
|
||||
participants: list[str],
|
||||
participant_name_to_id: dict[str, str] | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Set known participants directly without LLM identification.
|
||||
This is used when participants are already identified and stored.
|
||||
They are appended at the end of the transcript, providing more context for the assistant.
|
||||
|
||||
Args:
|
||||
participants: List of participant names
|
||||
participant_name_to_id: Optional mapping of participant names to their IDs
|
||||
"""
|
||||
if not participants:
|
||||
self.logger.warning("No participants provided")
|
||||
return
|
||||
|
||||
self.logger.info(
|
||||
"Using known participants",
|
||||
participants=participants,
|
||||
)
|
||||
|
||||
if participant_name_to_id:
|
||||
self.participant_name_to_id = participant_name_to_id
|
||||
|
||||
participants_md = self.format_list_md(participants)
|
||||
self.transcript += f"\n\n# Participants\n\n{participants_md}"
|
||||
|
||||
participants_list = ", ".join(participants)
|
||||
self.participant_instructions = dedent(
|
||||
f"""
|
||||
# IMPORTANT: Participant Names
|
||||
The following participants are identified in this conversation: {participants_list}
|
||||
|
||||
You MUST use these specific participant names when referring to people in your response.
|
||||
Do NOT use generic terms like "a participant", "someone", "attendee", "Speaker 1", "Speaker 2", etc.
|
||||
Always refer to people by their actual names (e.g., "John suggested..." not "A participant suggested...").
|
||||
"""
|
||||
).strip()
|
||||
|
||||
async def identify_participants(self) -> None:
|
||||
"""
|
||||
From a transcript, try to identify the participants using TreeSummarize with structured output.
|
||||
@@ -376,19 +232,6 @@ class SummaryBuilder:
|
||||
if unique_participants:
|
||||
participants_md = self.format_list_md(unique_participants)
|
||||
self.transcript += f"\n\n# Participants\n\n{participants_md}"
|
||||
|
||||
# Set instructions that will be automatically added to all prompts
|
||||
participants_list = ", ".join(unique_participants)
|
||||
self.participant_instructions = dedent(
|
||||
f"""
|
||||
# IMPORTANT: Participant Names
|
||||
The following participants are identified in this conversation: {participants_list}
|
||||
|
||||
You MUST use these specific participant names when referring to people in your response.
|
||||
Do NOT use generic terms like "a participant", "someone", "attendee", "Speaker 1", "Speaker 2", etc.
|
||||
Always refer to people by their actual names (e.g., "John suggested..." not "A participant suggested...").
|
||||
"""
|
||||
).strip()
|
||||
else:
|
||||
self.logger.warning("No participants identified in the transcript")
|
||||
|
||||
@@ -475,13 +318,13 @@ class SummaryBuilder:
|
||||
for subject in self.subjects:
|
||||
detailed_prompt = DETAILED_SUBJECT_PROMPT_TEMPLATE.format(subject=subject)
|
||||
|
||||
detailed_response = await self._get_response(
|
||||
detailed_response = await self.llm.get_response(
|
||||
detailed_prompt, [self.transcript], tone_name="Topic assistant"
|
||||
)
|
||||
|
||||
paragraph_prompt = PARAGRAPH_SUMMARY_PROMPT
|
||||
|
||||
paragraph_response = await self._get_response(
|
||||
paragraph_response = await self.llm.get_response(
|
||||
paragraph_prompt, [str(detailed_response)], tone_name="Topic summarizer"
|
||||
)
|
||||
|
||||
@@ -502,99 +345,13 @@ class SummaryBuilder:
|
||||
|
||||
recap_prompt = RECAP_PROMPT
|
||||
|
||||
recap_response = await self._get_response(
|
||||
recap_response = await self.llm.get_response(
|
||||
recap_prompt, [summaries_text], tone_name="Recap summarizer"
|
||||
)
|
||||
|
||||
self.recap = str(recap_response)
|
||||
self.logger.info(f"Quick recap: {self.recap}")
|
||||
|
||||
def _map_participant_names_to_ids(
|
||||
self, response: ActionItemsResponse
|
||||
) -> ActionItemsResponse:
|
||||
"""Map participant names in action items to participant IDs."""
|
||||
if not self.participant_name_to_id:
|
||||
return response
|
||||
|
||||
decisions = []
|
||||
for decision in response.decisions:
|
||||
new_decision = decision.model_copy()
|
||||
if (
|
||||
decision.decided_by
|
||||
and decision.decided_by in self.participant_name_to_id
|
||||
):
|
||||
new_decision.decided_by_participant_id = self.participant_name_to_id[
|
||||
decision.decided_by
|
||||
]
|
||||
decisions.append(new_decision)
|
||||
|
||||
next_steps = []
|
||||
for item in response.next_steps:
|
||||
new_item = item.model_copy()
|
||||
if item.assigned_to and item.assigned_to in self.participant_name_to_id:
|
||||
new_item.assigned_to_participant_id = self.participant_name_to_id[
|
||||
item.assigned_to
|
||||
]
|
||||
next_steps.append(new_item)
|
||||
|
||||
return ActionItemsResponse(decisions=decisions, next_steps=next_steps)
|
||||
|
||||
async def identify_action_items(self) -> ActionItemsResponse | None:
|
||||
"""Identify action items (decisions and next steps) from the transcript."""
|
||||
self.logger.info("--- identify action items using TreeSummarize")
|
||||
|
||||
if not self.transcript:
|
||||
self.logger.warning(
|
||||
"No transcript available for action items identification"
|
||||
)
|
||||
self.action_items = None
|
||||
return None
|
||||
|
||||
action_items_prompt = ACTION_ITEMS_PROMPT
|
||||
|
||||
try:
|
||||
response = await self._get_structured_response(
|
||||
action_items_prompt,
|
||||
ActionItemsResponse,
|
||||
tone_name="Action item identifier",
|
||||
timeout=settings.LLM_STRUCTURED_RESPONSE_TIMEOUT,
|
||||
)
|
||||
|
||||
response = self._map_participant_names_to_ids(response)
|
||||
|
||||
self.action_items = response
|
||||
self.logger.info(
|
||||
f"Identified {len(response.decisions)} decisions and {len(response.next_steps)} action items",
|
||||
decisions_count=len(response.decisions),
|
||||
next_steps_count=len(response.next_steps),
|
||||
)
|
||||
|
||||
if response.decisions:
|
||||
self.logger.debug(
|
||||
"Decisions identified",
|
||||
decisions=[d.decision for d in response.decisions],
|
||||
)
|
||||
if response.next_steps:
|
||||
self.logger.debug(
|
||||
"Action items identified",
|
||||
tasks=[item.task for item in response.next_steps],
|
||||
)
|
||||
if not response.decisions and not response.next_steps:
|
||||
self.logger.warning(
|
||||
"No action items identified from transcript",
|
||||
transcript_length=len(self.transcript),
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
f"Error identifying action items: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
self.action_items = None
|
||||
return None
|
||||
|
||||
async def generate_summary(self, only_subjects: bool = False) -> None:
|
||||
"""
|
||||
Generate summary by extracting subjects, creating summaries for each, and generating a recap.
|
||||
@@ -606,7 +363,6 @@ class SummaryBuilder:
|
||||
|
||||
await self.generate_subject_summaries()
|
||||
await self.generate_recap()
|
||||
await self.identify_action_items()
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Markdown
|
||||
@@ -709,6 +465,8 @@ if __name__ == "__main__":
|
||||
if args.summary:
|
||||
await sm.generate_summary()
|
||||
|
||||
# Note: action items generation has been removed
|
||||
|
||||
print("")
|
||||
print("-" * 80)
|
||||
print("")
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
from reflector.llm import LLM
|
||||
from reflector.processors.base import Processor
|
||||
from reflector.processors.summary.summary_builder import SummaryBuilder
|
||||
from reflector.processors.types import (
|
||||
ActionItems,
|
||||
FinalLongSummary,
|
||||
FinalShortSummary,
|
||||
TitleSummary,
|
||||
)
|
||||
from reflector.processors.types import FinalLongSummary, FinalShortSummary, TitleSummary
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
@@ -31,30 +26,7 @@ class TranscriptFinalSummaryProcessor(Processor):
|
||||
async def get_summary_builder(self, text) -> SummaryBuilder:
|
||||
builder = SummaryBuilder(self.llm, logger=self.logger)
|
||||
builder.set_transcript(text)
|
||||
|
||||
if self.transcript and self.transcript.participants:
|
||||
participant_names = [p.name for p in self.transcript.participants if p.name]
|
||||
if participant_names:
|
||||
self.logger.info(
|
||||
f"Using {len(participant_names)} known participants from transcript"
|
||||
)
|
||||
participant_name_to_id = {
|
||||
p.name: p.id
|
||||
for p in self.transcript.participants
|
||||
if p.name and p.id
|
||||
}
|
||||
builder.set_known_participants(
|
||||
participant_names, participant_name_to_id=participant_name_to_id
|
||||
)
|
||||
else:
|
||||
self.logger.info(
|
||||
"Participants field exists but is empty, identifying participants"
|
||||
)
|
||||
await builder.identify_participants()
|
||||
else:
|
||||
self.logger.info("No participants stored, identifying participants")
|
||||
await builder.identify_participants()
|
||||
|
||||
await builder.identify_participants()
|
||||
await builder.generate_summary()
|
||||
return builder
|
||||
|
||||
@@ -73,31 +45,22 @@ class TranscriptFinalSummaryProcessor(Processor):
|
||||
self.logger.warning("No summary to output")
|
||||
return
|
||||
|
||||
# build the speakermap from the transcript
|
||||
speakermap = {}
|
||||
if self.transcript:
|
||||
speakermap = {
|
||||
p.speaker: p.name
|
||||
for p in (self.transcript.participants or [])
|
||||
if p.speaker is not None and p.name
|
||||
participant["speaker"]: participant["name"]
|
||||
for participant in self.transcript.participants
|
||||
}
|
||||
self.logger.info(
|
||||
f"Built speaker map with {len(speakermap)} participants",
|
||||
speakermap=speakermap,
|
||||
)
|
||||
|
||||
# build the transcript as a single string
|
||||
# XXX: unsure if the participants name as replaced directly in speaker ?
|
||||
text_transcript = []
|
||||
unique_speakers = set()
|
||||
for topic in self.chunks:
|
||||
for segment in topic.transcript.as_segments():
|
||||
name = speakermap.get(segment.speaker, f"Speaker {segment.speaker}")
|
||||
unique_speakers.add((segment.speaker, name))
|
||||
text_transcript.append(f"{name}: {segment.text}")
|
||||
|
||||
self.logger.info(
|
||||
f"Built transcript with {len(unique_speakers)} unique speakers",
|
||||
speakers=list(unique_speakers),
|
||||
)
|
||||
|
||||
text_transcript = "\n".join(text_transcript)
|
||||
|
||||
last_chunk = self.chunks[-1]
|
||||
@@ -118,9 +81,4 @@ class TranscriptFinalSummaryProcessor(Processor):
|
||||
)
|
||||
await self.emit(final_short_summary, name="short_summary")
|
||||
|
||||
if self.builder and self.builder.action_items:
|
||||
action_items = self.builder.action_items.model_dump()
|
||||
action_items = ActionItems(action_items=action_items)
|
||||
await self.emit(action_items, name="action_items")
|
||||
|
||||
await self.emit(final_long_summary)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from textwrap import dedent
|
||||
|
||||
from pydantic import AliasChoices, BaseModel, Field
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.llm import LLM
|
||||
from reflector.processors.base import Processor
|
||||
@@ -34,14 +34,8 @@ TOPIC_PROMPT = dedent(
|
||||
class TopicResponse(BaseModel):
|
||||
"""Structured response for topic detection"""
|
||||
|
||||
title: str = Field(
|
||||
description="A descriptive title for the topic being discussed",
|
||||
validation_alias=AliasChoices("title", "Title"),
|
||||
)
|
||||
summary: str = Field(
|
||||
description="A concise 1-2 sentence summary of the discussion",
|
||||
validation_alias=AliasChoices("summary", "Summary"),
|
||||
)
|
||||
title: str = Field(description="A descriptive title for the topic being discussed")
|
||||
summary: str = Field(description="A concise 1-2 sentence summary of the discussion")
|
||||
|
||||
|
||||
class TranscriptTopicDetectorProcessor(Processor):
|
||||
@@ -78,11 +72,7 @@ class TranscriptTopicDetectorProcessor(Processor):
|
||||
"""
|
||||
prompt = TOPIC_PROMPT.format(text=text)
|
||||
response = await self.llm.get_structured_response(
|
||||
prompt,
|
||||
[text],
|
||||
TopicResponse,
|
||||
tone_name="Topic analyzer",
|
||||
timeout=settings.LLM_STRUCTURED_RESPONSE_TIMEOUT,
|
||||
prompt, [text], TopicResponse, tone_name="Topic analyzer"
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import io
|
||||
import re
|
||||
import tempfile
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Annotated, TypedDict
|
||||
|
||||
@@ -17,17 +16,6 @@ class DiarizationSegment(TypedDict):
|
||||
|
||||
|
||||
PUNC_RE = re.compile(r"[.;:?!…]")
|
||||
SENTENCE_END_RE = re.compile(r"[.?!…]$")
|
||||
|
||||
# Max segment length for words_to_segments() - breaks on any punctuation (. ; : ? ! …)
|
||||
# when segment exceeds this limit. Used for non-multitrack recordings.
|
||||
MAX_SEGMENT_CHARS = 120
|
||||
|
||||
# Max segment length for words_to_segments_by_sentence() - only breaks on sentence-ending
|
||||
# punctuation (. ? ! …) when segment exceeds this limit. Higher threshold allows complete
|
||||
# sentences in multitrack recordings where speakers overlap.
|
||||
# similar number to server/reflector/processors/transcript_liner.py
|
||||
MAX_SENTENCE_SEGMENT_CHARS = 1000
|
||||
|
||||
|
||||
class AudioFile(BaseModel):
|
||||
@@ -88,6 +76,7 @@ def words_to_segments(words: list[Word]) -> list[TranscriptSegment]:
|
||||
# but separate if the speaker changes, or if the punctuation is a . , ; : ? !
|
||||
segments = []
|
||||
current_segment = None
|
||||
MAX_SEGMENT_LENGTH = 120
|
||||
|
||||
for word in words:
|
||||
if current_segment is None:
|
||||
@@ -117,7 +106,7 @@ def words_to_segments(words: list[Word]) -> list[TranscriptSegment]:
|
||||
current_segment.end = word.end
|
||||
|
||||
have_punc = PUNC_RE.search(word.text)
|
||||
if have_punc and (len(current_segment.text) > MAX_SEGMENT_CHARS):
|
||||
if have_punc and (len(current_segment.text) > MAX_SEGMENT_LENGTH):
|
||||
segments.append(current_segment)
|
||||
current_segment = None
|
||||
|
||||
@@ -127,70 +116,6 @@ def words_to_segments(words: list[Word]) -> list[TranscriptSegment]:
|
||||
return segments
|
||||
|
||||
|
||||
def words_to_segments_by_sentence(words: list[Word]) -> list[TranscriptSegment]:
|
||||
"""Group words by speaker, then split into sentences.
|
||||
|
||||
For multitrack recordings where words from different speakers are interleaved
|
||||
by timestamp, this function first groups all words by speaker, then creates
|
||||
segments based on sentence boundaries within each speaker's words.
|
||||
|
||||
This produces cleaner output than words_to_segments() which breaks on every
|
||||
speaker change, resulting in many tiny segments when speakers overlap.
|
||||
"""
|
||||
if not words:
|
||||
return []
|
||||
|
||||
# Group words by speaker, preserving order within each speaker
|
||||
by_speaker: dict[int, list[Word]] = defaultdict(list)
|
||||
for w in words:
|
||||
by_speaker[w.speaker].append(w)
|
||||
|
||||
segments: list[TranscriptSegment] = []
|
||||
|
||||
for speaker, speaker_words in by_speaker.items():
|
||||
current_text = ""
|
||||
current_start: float | None = None
|
||||
current_end: float = 0.0
|
||||
|
||||
for word in speaker_words:
|
||||
if current_start is None:
|
||||
current_start = word.start
|
||||
|
||||
current_text += word.text
|
||||
current_end = word.end
|
||||
|
||||
# Check for sentence end or max length
|
||||
is_sentence_end = SENTENCE_END_RE.search(word.text.strip())
|
||||
is_too_long = len(current_text) >= MAX_SENTENCE_SEGMENT_CHARS
|
||||
|
||||
if is_sentence_end or is_too_long:
|
||||
segments.append(
|
||||
TranscriptSegment(
|
||||
text=current_text,
|
||||
start=current_start,
|
||||
end=current_end,
|
||||
speaker=speaker,
|
||||
)
|
||||
)
|
||||
current_text = ""
|
||||
current_start = None
|
||||
|
||||
# Flush remaining words for this speaker
|
||||
if current_text and current_start is not None:
|
||||
segments.append(
|
||||
TranscriptSegment(
|
||||
text=current_text,
|
||||
start=current_start,
|
||||
end=current_end,
|
||||
speaker=speaker,
|
||||
)
|
||||
)
|
||||
|
||||
# Sort segments by start time
|
||||
segments.sort(key=lambda s: s.start)
|
||||
return segments
|
||||
|
||||
|
||||
class Transcript(BaseModel):
|
||||
translation: str | None = None
|
||||
words: list[Word] = []
|
||||
@@ -229,9 +154,7 @@ class Transcript(BaseModel):
|
||||
word.start += offset
|
||||
word.end += offset
|
||||
|
||||
def as_segments(self, is_multitrack: bool = False) -> list[TranscriptSegment]:
|
||||
if is_multitrack:
|
||||
return words_to_segments_by_sentence(self.words)
|
||||
def as_segments(self) -> list[TranscriptSegment]:
|
||||
return words_to_segments(self.words)
|
||||
|
||||
|
||||
@@ -264,10 +187,6 @@ class FinalShortSummary(BaseModel):
|
||||
duration: float
|
||||
|
||||
|
||||
class ActionItems(BaseModel):
|
||||
action_items: dict # JSON-serializable dict from ActionItemsResponse
|
||||
|
||||
|
||||
class FinalTitle(BaseModel):
|
||||
title: str
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
from typing import Literal
|
||||
|
||||
Platform = Literal["whereby", "daily"]
|
||||
WHEREBY_PLATFORM: Platform = "whereby"
|
||||
DAILY_PLATFORM: Platform = "daily"
|
||||
@@ -1,17 +0,0 @@
|
||||
"""Schema definitions for transcript format types and segments."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
TranscriptFormat = Literal["text", "text-timestamped", "webvtt-named", "json"]
|
||||
|
||||
|
||||
class TranscriptSegment(BaseModel):
|
||||
"""A single transcript segment with speaker and timing information."""
|
||||
|
||||
speaker: int
|
||||
speaker_name: str
|
||||
text: str
|
||||
start: float
|
||||
end: float
|
||||
@@ -55,6 +55,7 @@ import httpx
|
||||
import pytz
|
||||
import structlog
|
||||
from icalendar import Calendar, Event
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from reflector.db.calendar_events import CalendarEvent, calendar_events_controller
|
||||
from reflector.db.rooms import Room, rooms_controller
|
||||
@@ -247,15 +248,21 @@ class ICSFetchService:
|
||||
)
|
||||
att_data: AttendeeData = {
|
||||
"email": clean_email,
|
||||
"name": att.params.get("CN")
|
||||
if hasattr(att, "params") and email == email_parts[0]
|
||||
else None,
|
||||
"status": att.params.get("PARTSTAT")
|
||||
if hasattr(att, "params") and email == email_parts[0]
|
||||
else None,
|
||||
"role": att.params.get("ROLE")
|
||||
if hasattr(att, "params") and email == email_parts[0]
|
||||
else None,
|
||||
"name": (
|
||||
att.params.get("CN")
|
||||
if hasattr(att, "params") and email == email_parts[0]
|
||||
else None
|
||||
),
|
||||
"status": (
|
||||
att.params.get("PARTSTAT")
|
||||
if hasattr(att, "params") and email == email_parts[0]
|
||||
else None
|
||||
),
|
||||
"role": (
|
||||
att.params.get("ROLE")
|
||||
if hasattr(att, "params") and email == email_parts[0]
|
||||
else None
|
||||
),
|
||||
}
|
||||
final_attendees.append(att_data)
|
||||
else:
|
||||
@@ -263,9 +270,9 @@ class ICSFetchService:
|
||||
att_data: AttendeeData = {
|
||||
"email": email_str,
|
||||
"name": att.params.get("CN") if hasattr(att, "params") else None,
|
||||
"status": att.params.get("PARTSTAT")
|
||||
if hasattr(att, "params")
|
||||
else None,
|
||||
"status": (
|
||||
att.params.get("PARTSTAT") if hasattr(att, "params") else None
|
||||
),
|
||||
"role": att.params.get("ROLE") if hasattr(att, "params") else None,
|
||||
}
|
||||
final_attendees.append(att_data)
|
||||
@@ -280,9 +287,9 @@ class ICSFetchService:
|
||||
)
|
||||
org_data: AttendeeData = {
|
||||
"email": org_email,
|
||||
"name": organizer.params.get("CN")
|
||||
if hasattr(organizer, "params")
|
||||
else None,
|
||||
"name": (
|
||||
organizer.params.get("CN") if hasattr(organizer, "params") else None
|
||||
),
|
||||
"role": "ORGANIZER",
|
||||
}
|
||||
final_attendees.append(org_data)
|
||||
@@ -294,7 +301,7 @@ class ICSSyncService:
|
||||
def __init__(self):
|
||||
self.fetch_service = ICSFetchService()
|
||||
|
||||
async def sync_room_calendar(self, room: Room) -> SyncResult:
|
||||
async def sync_room_calendar(self, session: AsyncSession, room: Room) -> SyncResult:
|
||||
async with RedisAsyncLock(
|
||||
f"ics_sync_room:{room.id}", skip_if_locked=True
|
||||
) as lock:
|
||||
@@ -305,9 +312,11 @@ class ICSSyncService:
|
||||
"reason": "Sync already in progress",
|
||||
}
|
||||
|
||||
return await self._sync_room_calendar(room)
|
||||
return await self._sync_room_calendar(session, room)
|
||||
|
||||
async def _sync_room_calendar(self, room: Room) -> SyncResult:
|
||||
async def _sync_room_calendar(
|
||||
self, session: AsyncSession, room: Room
|
||||
) -> SyncResult:
|
||||
if not room.ics_enabled or not room.ics_url:
|
||||
return {"status": SyncStatus.SKIPPED, "reason": "ICS not configured"}
|
||||
|
||||
@@ -340,10 +349,11 @@ class ICSSyncService:
|
||||
events, total_events = self.fetch_service.extract_room_events(
|
||||
calendar, room.name, room_url
|
||||
)
|
||||
sync_result = await self._sync_events_to_database(room.id, events)
|
||||
sync_result = await self._sync_events_to_database(session, room.id, events)
|
||||
|
||||
# Update room sync metadata
|
||||
await rooms_controller.update(
|
||||
session,
|
||||
room,
|
||||
{
|
||||
"ics_last_sync": datetime.now(timezone.utc),
|
||||
@@ -372,7 +382,7 @@ class ICSSyncService:
|
||||
return time_since_sync.total_seconds() >= room.ics_fetch_interval
|
||||
|
||||
async def _sync_events_to_database(
|
||||
self, room_id: str, events: list[EventData]
|
||||
self, session: AsyncSession, room_id: str, events: list[EventData]
|
||||
) -> SyncStats:
|
||||
created = 0
|
||||
updated = 0
|
||||
@@ -382,7 +392,7 @@ class ICSSyncService:
|
||||
for event_data in events:
|
||||
calendar_event = CalendarEvent(room_id=room_id, **event_data)
|
||||
existing = await calendar_events_controller.get_by_ics_uid(
|
||||
room_id, event_data["ics_uid"]
|
||||
session, room_id, event_data["ics_uid"]
|
||||
)
|
||||
|
||||
if existing:
|
||||
@@ -390,12 +400,12 @@ class ICSSyncService:
|
||||
else:
|
||||
created += 1
|
||||
|
||||
await calendar_events_controller.upsert(calendar_event)
|
||||
await calendar_events_controller.upsert(session, calendar_event)
|
||||
current_ics_uids.append(event_data["ics_uid"])
|
||||
|
||||
# Soft delete events that are no longer in calendar
|
||||
deleted = await calendar_events_controller.soft_delete_missing(
|
||||
room_id, current_ics_uids
|
||||
session, room_id, current_ics_uids
|
||||
)
|
||||
|
||||
return {
|
||||
|
||||
@@ -1,297 +0,0 @@
|
||||
"""
|
||||
Transcript processing service - shared logic for HTTP endpoints and Celery tasks.
|
||||
|
||||
This module provides result-based error handling that works in both contexts:
|
||||
- HTTP endpoint: converts errors to HTTPException
|
||||
- Celery task: converts errors to Exception
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Literal, Union, assert_never
|
||||
|
||||
import celery
|
||||
from celery.result import AsyncResult
|
||||
from hatchet_sdk.clients.rest.exceptions import ApiException
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import Transcript, transcripts_controller
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.pipelines.main_multitrack_pipeline import (
|
||||
task_pipeline_multitrack_process,
|
||||
)
|
||||
from reflector.settings import settings
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProcessError:
|
||||
detail: NonEmptyString
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileProcessingConfig:
|
||||
transcript_id: NonEmptyString
|
||||
mode: Literal["file"] = "file"
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultitrackProcessingConfig:
|
||||
transcript_id: NonEmptyString
|
||||
bucket_name: NonEmptyString
|
||||
track_keys: list[str]
|
||||
recording_id: NonEmptyString | None = None
|
||||
room_id: NonEmptyString | None = None
|
||||
mode: Literal["multitrack"] = "multitrack"
|
||||
|
||||
|
||||
ProcessingConfig = Union[FileProcessingConfig, MultitrackProcessingConfig]
|
||||
PrepareResult = Union[ProcessingConfig, ProcessError]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationOk:
|
||||
# transcript currently doesnt always have recording_id
|
||||
recording_id: NonEmptyString | None
|
||||
transcript_id: NonEmptyString
|
||||
room_id: NonEmptyString | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationLocked:
|
||||
detail: NonEmptyString
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationNotReady:
|
||||
detail: NonEmptyString
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationAlreadyScheduled:
|
||||
detail: NonEmptyString
|
||||
|
||||
|
||||
ValidationError = Union[
|
||||
ValidationNotReady, ValidationLocked, ValidationAlreadyScheduled
|
||||
]
|
||||
ValidationResult = Union[ValidationOk, ValidationError]
|
||||
|
||||
|
||||
@dataclass
|
||||
class DispatchOk:
|
||||
status: Literal["ok"] = "ok"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DispatchAlreadyRunning:
|
||||
status: Literal["already_running"] = "already_running"
|
||||
|
||||
|
||||
DispatchResult = Union[
|
||||
DispatchOk, DispatchAlreadyRunning, ProcessError, ValidationError
|
||||
]
|
||||
|
||||
|
||||
async def validate_transcript_for_processing(
|
||||
transcript: Transcript,
|
||||
) -> ValidationResult:
|
||||
if transcript.locked:
|
||||
return ValidationLocked(detail="Recording is locked")
|
||||
|
||||
if transcript.status == "idle":
|
||||
return ValidationNotReady(detail="Recording is not ready for processing")
|
||||
|
||||
# Check Celery tasks
|
||||
if task_is_scheduled_or_active(
|
||||
"reflector.pipelines.main_file_pipeline.task_pipeline_file_process",
|
||||
transcript_id=transcript.id,
|
||||
) or task_is_scheduled_or_active(
|
||||
"reflector.pipelines.main_multitrack_pipeline.task_pipeline_multitrack_process",
|
||||
transcript_id=transcript.id,
|
||||
):
|
||||
return ValidationAlreadyScheduled(detail="already running")
|
||||
|
||||
# Check Hatchet workflows (if enabled)
|
||||
if settings.HATCHET_ENABLED and transcript.workflow_run_id:
|
||||
try:
|
||||
status = await HatchetClientManager.get_workflow_run_status(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
# If workflow is running or queued, don't allow new processing
|
||||
if status in (V1TaskStatus.RUNNING, V1TaskStatus.QUEUED):
|
||||
return ValidationAlreadyScheduled(
|
||||
detail="Hatchet workflow already running"
|
||||
)
|
||||
except ApiException:
|
||||
# Workflow might be gone (404) or API issue - allow processing
|
||||
pass
|
||||
|
||||
return ValidationOk(
|
||||
recording_id=transcript.recording_id,
|
||||
transcript_id=transcript.id,
|
||||
room_id=transcript.room_id,
|
||||
)
|
||||
|
||||
|
||||
async def prepare_transcript_processing(validation: ValidationOk) -> PrepareResult:
|
||||
"""
|
||||
Determine processing mode from transcript/recording data.
|
||||
"""
|
||||
bucket_name: str | None = None
|
||||
track_keys: list[str] | None = None
|
||||
recording_id: str | None = validation.recording_id
|
||||
|
||||
if validation.recording_id:
|
||||
recording = await recordings_controller.get_by_id(validation.recording_id)
|
||||
if recording:
|
||||
bucket_name = recording.bucket_name
|
||||
track_keys = recording.track_keys
|
||||
|
||||
if track_keys is not None and len(track_keys) == 0:
|
||||
return ProcessError(
|
||||
detail="No track keys found, must be either > 0 or None",
|
||||
)
|
||||
if track_keys is not None and not bucket_name:
|
||||
return ProcessError(
|
||||
detail="Bucket name must be specified",
|
||||
)
|
||||
|
||||
if track_keys:
|
||||
return MultitrackProcessingConfig(
|
||||
bucket_name=bucket_name, # type: ignore (validated above)
|
||||
track_keys=track_keys,
|
||||
transcript_id=validation.transcript_id,
|
||||
recording_id=recording_id,
|
||||
room_id=validation.room_id,
|
||||
)
|
||||
|
||||
return FileProcessingConfig(
|
||||
transcript_id=validation.transcript_id,
|
||||
)
|
||||
|
||||
|
||||
async def dispatch_transcript_processing(
|
||||
config: ProcessingConfig, force: bool = False
|
||||
) -> AsyncResult | None:
|
||||
"""Dispatch transcript processing to appropriate backend (Hatchet or Celery).
|
||||
|
||||
Returns AsyncResult for Celery tasks, None for Hatchet workflows.
|
||||
"""
|
||||
if isinstance(config, MultitrackProcessingConfig):
|
||||
# Check if room has use_hatchet=True (overrides env vars)
|
||||
room_forces_hatchet = False
|
||||
if config.room_id:
|
||||
room = await rooms_controller.get_by_id(config.room_id)
|
||||
room_forces_hatchet = room.use_hatchet if room else False
|
||||
|
||||
# Start durable workflow if enabled (Hatchet)
|
||||
# or if room has use_hatchet=True
|
||||
use_hatchet = settings.HATCHET_ENABLED or room_forces_hatchet
|
||||
|
||||
if room_forces_hatchet:
|
||||
logger.info(
|
||||
"Room forces Hatchet workflow",
|
||||
room_id=config.room_id,
|
||||
transcript_id=config.transcript_id,
|
||||
)
|
||||
|
||||
if use_hatchet:
|
||||
# First check if we can replay (outside transaction since it's read-only)
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id and not force:
|
||||
can_replay = await HatchetClientManager.can_replay(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
if can_replay:
|
||||
await HatchetClientManager.replay_workflow(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
logger.info(
|
||||
"Replaying Hatchet workflow",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
return None
|
||||
|
||||
# Force: cancel old workflow if exists
|
||||
if force and transcript and transcript.workflow_run_id:
|
||||
await HatchetClientManager.cancel_workflow(transcript.workflow_run_id)
|
||||
logger.info(
|
||||
"Cancelled old workflow (--force)",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": None}
|
||||
)
|
||||
|
||||
# Re-fetch and check for concurrent dispatch (optimistic approach).
|
||||
# No database lock - worst case is duplicate dispatch, but Hatchet
|
||||
# workflows are idempotent so this is acceptable.
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id:
|
||||
# Another process started a workflow between validation and now
|
||||
try:
|
||||
status = await HatchetClientManager.get_workflow_run_status(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
if status in (V1TaskStatus.RUNNING, V1TaskStatus.QUEUED):
|
||||
logger.info(
|
||||
"Concurrent workflow detected, skipping dispatch",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
return None
|
||||
except ApiException:
|
||||
# Workflow might be gone (404) or API issue - proceed with new workflow
|
||||
pass
|
||||
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": config.recording_id,
|
||||
"tracks": [{"s3_key": k} for k in config.track_keys],
|
||||
"bucket_name": config.bucket_name,
|
||||
"transcript_id": config.transcript_id,
|
||||
"room_id": config.room_id,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": config.transcript_id,
|
||||
"recording_id": config.recording_id,
|
||||
"daily_recording_id": config.recording_id,
|
||||
},
|
||||
)
|
||||
|
||||
if transcript:
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
|
||||
logger.info("Hatchet workflow dispatched", workflow_id=workflow_id)
|
||||
return None
|
||||
|
||||
# Celery pipeline (durable workflows disabled)
|
||||
return task_pipeline_multitrack_process.delay(
|
||||
transcript_id=config.transcript_id,
|
||||
bucket_name=config.bucket_name,
|
||||
track_keys=config.track_keys,
|
||||
)
|
||||
elif isinstance(config, FileProcessingConfig):
|
||||
return task_pipeline_file_process.delay(transcript_id=config.transcript_id)
|
||||
else:
|
||||
assert_never(config)
|
||||
|
||||
|
||||
def task_is_scheduled_or_active(task_name: str, **kwargs):
|
||||
inspect = celery.current_app.control.inspect()
|
||||
|
||||
scheduled = inspect.scheduled() or {}
|
||||
active = inspect.active() or {}
|
||||
all = scheduled | active
|
||||
for worker, tasks in all.items():
|
||||
for task in tasks:
|
||||
if task["name"] == task_name and task["kwargs"] == kwargs:
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -1,7 +1,6 @@
|
||||
from pydantic.types import PositiveInt
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
@@ -48,17 +47,14 @@ class Settings(BaseSettings):
|
||||
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID: str | None = None
|
||||
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
|
||||
|
||||
# Platform-specific recording storage (follows {PREFIX}_STORAGE_AWS_{CREDENTIAL} pattern)
|
||||
# Whereby storage configuration
|
||||
WHEREBY_STORAGE_AWS_BUCKET_NAME: str | None = None
|
||||
WHEREBY_STORAGE_AWS_REGION: str | None = None
|
||||
WHEREBY_STORAGE_AWS_ACCESS_KEY_ID: str | None = None
|
||||
WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
|
||||
# Recording storage
|
||||
RECORDING_STORAGE_BACKEND: str | None = None
|
||||
|
||||
# Daily.co storage configuration
|
||||
DAILYCO_STORAGE_AWS_BUCKET_NAME: str | None = None
|
||||
DAILYCO_STORAGE_AWS_REGION: str | None = None
|
||||
DAILYCO_STORAGE_AWS_ROLE_ARN: str | None = None
|
||||
# Recording storage configuration for AWS
|
||||
RECORDING_STORAGE_AWS_BUCKET_NAME: str = "recording-bucket"
|
||||
RECORDING_STORAGE_AWS_REGION: str = "us-east-1"
|
||||
RECORDING_STORAGE_AWS_ACCESS_KEY_ID: str | None = None
|
||||
RECORDING_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
|
||||
|
||||
# Translate into the target language
|
||||
TRANSLATION_BACKEND: str = "passthrough"
|
||||
@@ -74,13 +70,6 @@ class Settings(BaseSettings):
|
||||
LLM_API_KEY: str | None = None
|
||||
LLM_CONTEXT_WINDOW: int = 16000
|
||||
|
||||
LLM_PARSE_MAX_RETRIES: int = (
|
||||
3 # Max retries for JSON/validation errors (total attempts = retries + 1)
|
||||
)
|
||||
LLM_STRUCTURED_RESPONSE_TIMEOUT: int = (
|
||||
300 # Timeout in seconds for structured responses (5 minutes)
|
||||
)
|
||||
|
||||
# Diarization
|
||||
DIARIZATION_ENABLED: bool = True
|
||||
DIARIZATION_BACKEND: str = "modal"
|
||||
@@ -135,37 +124,15 @@ class Settings(BaseSettings):
|
||||
WHEREBY_API_URL: str = "https://api.whereby.dev/v1"
|
||||
WHEREBY_API_KEY: NonEmptyString | None = None
|
||||
WHEREBY_WEBHOOK_SECRET: str | None = None
|
||||
AWS_WHEREBY_ACCESS_KEY_ID: str | None = None
|
||||
AWS_WHEREBY_ACCESS_KEY_SECRET: str | None = None
|
||||
AWS_PROCESS_RECORDING_QUEUE_URL: str | None = None
|
||||
SQS_POLLING_TIMEOUT_SECONDS: int = 60
|
||||
|
||||
# Daily.co integration
|
||||
DAILY_API_KEY: str | None = None
|
||||
DAILY_WEBHOOK_SECRET: str | None = None
|
||||
DAILY_SUBDOMAIN: str | None = None
|
||||
DAILY_WEBHOOK_UUID: str | None = (
|
||||
None # Webhook UUID for this environment. Not used by production code
|
||||
)
|
||||
# Platform Configuration
|
||||
DEFAULT_VIDEO_PLATFORM: Platform = WHEREBY_PLATFORM
|
||||
|
||||
# Zulip integration
|
||||
ZULIP_REALM: str | None = None
|
||||
ZULIP_API_KEY: str | None = None
|
||||
ZULIP_BOT_EMAIL: str | None = None
|
||||
|
||||
# Durable workflow orchestration
|
||||
# Provider: "hatchet" (or "none" to disable)
|
||||
DURABLE_WORKFLOW_PROVIDER: str = "none"
|
||||
|
||||
# Hatchet workflow orchestration
|
||||
HATCHET_CLIENT_TOKEN: str | None = None
|
||||
HATCHET_CLIENT_TLS_STRATEGY: str = "none" # none, tls, mtls
|
||||
HATCHET_DEBUG: bool = False
|
||||
|
||||
@property
|
||||
def HATCHET_ENABLED(self) -> bool:
|
||||
"""True if Hatchet is the active provider."""
|
||||
return self.DURABLE_WORKFLOW_PROVIDER == "hatchet"
|
||||
|
||||
|
||||
settings = Settings()
|
||||
|
||||
@@ -3,13 +3,6 @@ from reflector.settings import settings
|
||||
|
||||
|
||||
def get_transcripts_storage() -> Storage:
|
||||
"""
|
||||
Get storage for processed transcript files (master credentials).
|
||||
|
||||
Also use this for ALL our file operations with bucket override:
|
||||
master = get_transcripts_storage()
|
||||
master.delete_file(key, bucket=recording.bucket_name)
|
||||
"""
|
||||
assert settings.TRANSCRIPT_STORAGE_BACKEND
|
||||
return Storage.get_instance(
|
||||
name=settings.TRANSCRIPT_STORAGE_BACKEND,
|
||||
@@ -17,53 +10,8 @@ def get_transcripts_storage() -> Storage:
|
||||
)
|
||||
|
||||
|
||||
def get_whereby_storage() -> Storage:
|
||||
"""
|
||||
Get storage config for Whereby (for passing to Whereby API).
|
||||
|
||||
Usage:
|
||||
whereby_storage = get_whereby_storage()
|
||||
key_id, secret = whereby_storage.key_credentials
|
||||
whereby_api.create_meeting(
|
||||
bucket=whereby_storage.bucket_name,
|
||||
access_key_id=key_id,
|
||||
secret=secret,
|
||||
)
|
||||
|
||||
Do NOT use for our file operations - use get_transcripts_storage() instead.
|
||||
"""
|
||||
if not settings.WHEREBY_STORAGE_AWS_BUCKET_NAME:
|
||||
raise ValueError(
|
||||
"WHEREBY_STORAGE_AWS_BUCKET_NAME required for Whereby with AWS storage"
|
||||
)
|
||||
|
||||
def get_recordings_storage() -> Storage:
|
||||
return Storage.get_instance(
|
||||
name="aws",
|
||||
settings_prefix="WHEREBY_STORAGE_",
|
||||
)
|
||||
|
||||
|
||||
def get_dailyco_storage() -> Storage:
|
||||
"""
|
||||
Get storage config for Daily.co (for passing to Daily API).
|
||||
|
||||
Usage:
|
||||
daily_storage = get_dailyco_storage()
|
||||
daily_api.create_meeting(
|
||||
bucket=daily_storage.bucket_name,
|
||||
region=daily_storage.region,
|
||||
role_arn=daily_storage.role_credential,
|
||||
)
|
||||
|
||||
Do NOT use for our file operations - use get_transcripts_storage() instead.
|
||||
"""
|
||||
# Fail fast if platform-specific config missing
|
||||
if not settings.DAILYCO_STORAGE_AWS_BUCKET_NAME:
|
||||
raise ValueError(
|
||||
"DAILYCO_STORAGE_AWS_BUCKET_NAME required for Daily.co with AWS storage"
|
||||
)
|
||||
|
||||
return Storage.get_instance(
|
||||
name="aws",
|
||||
settings_prefix="DAILYCO_STORAGE_",
|
||||
name=settings.RECORDING_STORAGE_BACKEND,
|
||||
settings_prefix="RECORDING_STORAGE_",
|
||||
)
|
||||
|
||||
@@ -1,23 +1,10 @@
|
||||
import importlib
|
||||
from typing import BinaryIO, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class StorageError(Exception):
|
||||
"""Base exception for storage operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class StoragePermissionError(StorageError):
|
||||
"""Exception raised when storage operation fails due to permission issues."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FileResult(BaseModel):
|
||||
filename: str
|
||||
url: str
|
||||
@@ -49,113 +36,26 @@ class Storage:
|
||||
|
||||
return cls._registry[name](**config)
|
||||
|
||||
# Credential properties for API passthrough
|
||||
@property
|
||||
def bucket_name(self) -> str:
|
||||
"""Default bucket name for this storage instance."""
|
||||
async def put_file(self, filename: str, data: bytes) -> FileResult:
|
||||
return await self._put_file(filename, data)
|
||||
|
||||
async def _put_file(self, filename: str, data: bytes) -> FileResult:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def region(self) -> str:
|
||||
"""AWS region for this storage instance."""
|
||||
async def delete_file(self, filename: str):
|
||||
return await self._delete_file(filename)
|
||||
|
||||
async def _delete_file(self, filename: str):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def access_key_id(self) -> str | None:
|
||||
"""AWS access key ID (None for role-based auth). Prefer key_credentials property."""
|
||||
return None
|
||||
async def get_file_url(self, filename: str) -> str:
|
||||
return await self._get_file_url(filename)
|
||||
|
||||
@property
|
||||
def secret_access_key(self) -> str | None:
|
||||
"""AWS secret access key (None for role-based auth). Prefer key_credentials property."""
|
||||
return None
|
||||
|
||||
@property
|
||||
def role_arn(self) -> str | None:
|
||||
"""AWS IAM role ARN for role-based auth (None for key-based auth). Prefer role_credential property."""
|
||||
return None
|
||||
|
||||
@property
|
||||
def key_credentials(self) -> tuple[str, str]:
|
||||
"""
|
||||
Get (access_key_id, secret_access_key) for key-based auth.
|
||||
Raises ValueError if storage uses IAM role instead.
|
||||
"""
|
||||
async def _get_file_url(self, filename: str) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def role_credential(self) -> str:
|
||||
"""
|
||||
Get IAM role ARN for role-based auth.
|
||||
Raises ValueError if storage uses access keys instead.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def put_file(
|
||||
self, filename: str, data: Union[bytes, BinaryIO], *, bucket: str | None = None
|
||||
) -> FileResult:
|
||||
"""Upload data. bucket: override instance default if provided."""
|
||||
return await self._put_file(filename, data, bucket=bucket)
|
||||
|
||||
async def _put_file(
|
||||
self, filename: str, data: Union[bytes, BinaryIO], *, bucket: str | None = None
|
||||
) -> FileResult:
|
||||
raise NotImplementedError
|
||||
|
||||
async def delete_file(self, filename: str, *, bucket: str | None = None):
|
||||
"""Delete file. bucket: override instance default if provided."""
|
||||
return await self._delete_file(filename, bucket=bucket)
|
||||
|
||||
async def _delete_file(self, filename: str, *, bucket: str | None = None):
|
||||
raise NotImplementedError
|
||||
|
||||
async def get_file_url(
|
||||
self,
|
||||
filename: str,
|
||||
operation: str = "get_object",
|
||||
expires_in: int = 3600,
|
||||
*,
|
||||
bucket: str | None = None,
|
||||
) -> str:
|
||||
"""Generate presigned URL. bucket: override instance default if provided."""
|
||||
return await self._get_file_url(filename, operation, expires_in, bucket=bucket)
|
||||
|
||||
async def _get_file_url(
|
||||
self,
|
||||
filename: str,
|
||||
operation: str = "get_object",
|
||||
expires_in: int = 3600,
|
||||
*,
|
||||
bucket: str | None = None,
|
||||
) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
async def get_file(self, filename: str, *, bucket: str | None = None):
|
||||
"""Download file. bucket: override instance default if provided."""
|
||||
return await self._get_file(filename, bucket=bucket)
|
||||
|
||||
async def _get_file(self, filename: str, *, bucket: str | None = None):
|
||||
raise NotImplementedError
|
||||
|
||||
async def list_objects(
|
||||
self, prefix: str = "", *, bucket: str | None = None
|
||||
) -> list[str]:
|
||||
"""List object keys. bucket: override instance default if provided."""
|
||||
return await self._list_objects(prefix, bucket=bucket)
|
||||
|
||||
async def _list_objects(
|
||||
self, prefix: str = "", *, bucket: str | None = None
|
||||
) -> list[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
async def stream_to_fileobj(
|
||||
self, filename: str, fileobj: BinaryIO, *, bucket: str | None = None
|
||||
):
|
||||
"""Stream file directly to file object without loading into memory.
|
||||
bucket: override instance default if provided."""
|
||||
return await self._stream_to_fileobj(filename, fileobj, bucket=bucket)
|
||||
|
||||
async def _stream_to_fileobj(
|
||||
self, filename: str, fileobj: BinaryIO, *, bucket: str | None = None
|
||||
):
|
||||
async def get_file(self, filename: str):
|
||||
return await self._get_file(filename)
|
||||
|
||||
async def _get_file(self, filename: str):
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -1,236 +1,79 @@
|
||||
from functools import wraps
|
||||
from typing import BinaryIO, Union
|
||||
|
||||
import aioboto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.storage.base import FileResult, Storage, StoragePermissionError
|
||||
|
||||
|
||||
def handle_s3_client_errors(operation_name: str):
|
||||
"""Decorator to handle S3 ClientError with bucket-aware messaging.
|
||||
|
||||
Args:
|
||||
operation_name: Human-readable operation name for error messages (e.g., "upload", "delete")
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
async def wrapper(self, *args, **kwargs):
|
||||
bucket = kwargs.get("bucket")
|
||||
try:
|
||||
return await func(self, *args, **kwargs)
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code")
|
||||
if error_code in ("AccessDenied", "NoSuchBucket"):
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
bucket_context = (
|
||||
f"overridden bucket '{actual_bucket}'"
|
||||
if bucket
|
||||
else f"default bucket '{actual_bucket}'"
|
||||
)
|
||||
raise StoragePermissionError(
|
||||
f"S3 {operation_name} failed for {bucket_context}: {error_code}. "
|
||||
f"Check TRANSCRIPT_STORAGE_AWS_* credentials have permission."
|
||||
) from e
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
from reflector.storage.base import FileResult, Storage
|
||||
|
||||
|
||||
class AwsStorage(Storage):
|
||||
"""AWS S3 storage with bucket override for multi-platform recording architecture.
|
||||
Master credentials access all buckets via optional bucket parameter in operations."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
aws_access_key_id: str,
|
||||
aws_secret_access_key: str,
|
||||
aws_bucket_name: str,
|
||||
aws_region: str,
|
||||
aws_access_key_id: str | None = None,
|
||||
aws_secret_access_key: str | None = None,
|
||||
aws_role_arn: str | None = None,
|
||||
):
|
||||
if not aws_access_key_id:
|
||||
raise ValueError("Storage `aws_storage` require `aws_access_key_id`")
|
||||
if not aws_secret_access_key:
|
||||
raise ValueError("Storage `aws_storage` require `aws_secret_access_key`")
|
||||
if not aws_bucket_name:
|
||||
raise ValueError("Storage `aws_storage` require `aws_bucket_name`")
|
||||
if not aws_region:
|
||||
raise ValueError("Storage `aws_storage` require `aws_region`")
|
||||
if not aws_access_key_id and not aws_role_arn:
|
||||
raise ValueError(
|
||||
"Storage `aws_storage` require either `aws_access_key_id` or `aws_role_arn`"
|
||||
)
|
||||
if aws_role_arn and (aws_access_key_id or aws_secret_access_key):
|
||||
raise ValueError(
|
||||
"Storage `aws_storage` cannot use both `aws_role_arn` and access keys"
|
||||
)
|
||||
|
||||
super().__init__()
|
||||
self._bucket_name = aws_bucket_name
|
||||
self._region = aws_region
|
||||
self._access_key_id = aws_access_key_id
|
||||
self._secret_access_key = aws_secret_access_key
|
||||
self._role_arn = aws_role_arn
|
||||
|
||||
self.aws_bucket_name = aws_bucket_name
|
||||
self.aws_folder = ""
|
||||
if "/" in aws_bucket_name:
|
||||
self._bucket_name, self.aws_folder = aws_bucket_name.split("/", 1)
|
||||
self.boto_config = Config(retries={"max_attempts": 3, "mode": "adaptive"})
|
||||
self.aws_bucket_name, self.aws_folder = aws_bucket_name.split("/", 1)
|
||||
self.session = aioboto3.Session(
|
||||
aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
region_name=aws_region,
|
||||
)
|
||||
self.base_url = f"https://{self._bucket_name}.s3.amazonaws.com/"
|
||||
self.base_url = f"https://{aws_bucket_name}.s3.amazonaws.com/"
|
||||
|
||||
# Implement credential properties
|
||||
@property
|
||||
def bucket_name(self) -> str:
|
||||
return self._bucket_name
|
||||
|
||||
@property
|
||||
def region(self) -> str:
|
||||
return self._region
|
||||
|
||||
@property
|
||||
def access_key_id(self) -> str | None:
|
||||
return self._access_key_id
|
||||
|
||||
@property
|
||||
def secret_access_key(self) -> str | None:
|
||||
return self._secret_access_key
|
||||
|
||||
@property
|
||||
def role_arn(self) -> str | None:
|
||||
return self._role_arn
|
||||
|
||||
@property
|
||||
def key_credentials(self) -> tuple[str, str]:
|
||||
"""Get (access_key_id, secret_access_key) for key-based auth."""
|
||||
if self._role_arn:
|
||||
raise ValueError(
|
||||
"Storage uses IAM role authentication. "
|
||||
"Use role_credential property instead of key_credentials."
|
||||
async def _put_file(self, filename: str, data: bytes) -> FileResult:
|
||||
bucket = self.aws_bucket_name
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Uploading {filename} to S3 {bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3") as client:
|
||||
await client.put_object(
|
||||
Bucket=bucket,
|
||||
Key=s3filename,
|
||||
Body=data,
|
||||
)
|
||||
if not self._access_key_id or not self._secret_access_key:
|
||||
raise ValueError("Storage access key credentials not configured")
|
||||
return (self._access_key_id, self._secret_access_key)
|
||||
|
||||
@property
|
||||
def role_credential(self) -> str:
|
||||
"""Get IAM role ARN for role-based auth."""
|
||||
if self._access_key_id or self._secret_access_key:
|
||||
raise ValueError(
|
||||
"Storage uses access key authentication. "
|
||||
"Use key_credentials property instead of role_credential."
|
||||
)
|
||||
if not self._role_arn:
|
||||
raise ValueError("Storage IAM role ARN not configured")
|
||||
return self._role_arn
|
||||
|
||||
@handle_s3_client_errors("upload")
|
||||
async def _put_file(
|
||||
self, filename: str, data: Union[bytes, BinaryIO], *, bucket: str | None = None
|
||||
) -> FileResult:
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
async def _get_file_url(self, filename: str) -> FileResult:
|
||||
bucket = self.aws_bucket_name
|
||||
folder = self.aws_folder
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
logger.info(f"Uploading {filename} to S3 {actual_bucket}/{folder}")
|
||||
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
if isinstance(data, bytes):
|
||||
await client.put_object(Bucket=actual_bucket, Key=s3filename, Body=data)
|
||||
else:
|
||||
# boto3 reads file-like object in chunks
|
||||
# avoids creating extra memory copy vs bytes.getvalue() approach
|
||||
await client.upload_fileobj(data, Bucket=actual_bucket, Key=s3filename)
|
||||
|
||||
url = await self._get_file_url(filename, bucket=bucket)
|
||||
return FileResult(filename=filename, url=url)
|
||||
|
||||
@handle_s3_client_errors("presign")
|
||||
async def _get_file_url(
|
||||
self,
|
||||
filename: str,
|
||||
operation: str = "get_object",
|
||||
expires_in: int = 3600,
|
||||
*,
|
||||
bucket: str | None = None,
|
||||
) -> str:
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
async with self.session.client("s3") as client:
|
||||
presigned_url = await client.generate_presigned_url(
|
||||
operation,
|
||||
Params={"Bucket": actual_bucket, "Key": s3filename},
|
||||
ExpiresIn=expires_in,
|
||||
"get_object",
|
||||
Params={"Bucket": bucket, "Key": s3filename},
|
||||
ExpiresIn=3600,
|
||||
)
|
||||
|
||||
return presigned_url
|
||||
|
||||
@handle_s3_client_errors("delete")
|
||||
async def _delete_file(self, filename: str, *, bucket: str | None = None):
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
async def _delete_file(self, filename: str):
|
||||
bucket = self.aws_bucket_name
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Deleting {filename} from S3 {actual_bucket}/{folder}")
|
||||
logger.info(f"Deleting {filename} from S3 {bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
await client.delete_object(Bucket=actual_bucket, Key=s3filename)
|
||||
async with self.session.client("s3") as client:
|
||||
await client.delete_object(Bucket=bucket, Key=s3filename)
|
||||
|
||||
@handle_s3_client_errors("download")
|
||||
async def _get_file(self, filename: str, *, bucket: str | None = None):
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
async def _get_file(self, filename: str):
|
||||
bucket = self.aws_bucket_name
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Downloading {filename} from S3 {actual_bucket}/{folder}")
|
||||
logger.info(f"Downloading {filename} from S3 {bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
response = await client.get_object(Bucket=actual_bucket, Key=s3filename)
|
||||
async with self.session.client("s3") as client:
|
||||
response = await client.get_object(Bucket=bucket, Key=s3filename)
|
||||
return await response["Body"].read()
|
||||
|
||||
@handle_s3_client_errors("list_objects")
|
||||
async def _list_objects(
|
||||
self, prefix: str = "", *, bucket: str | None = None
|
||||
) -> list[str]:
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
# Combine folder and prefix
|
||||
s3prefix = f"{folder}/{prefix}" if folder else prefix
|
||||
logger.info(f"Listing objects from S3 {actual_bucket} with prefix '{s3prefix}'")
|
||||
|
||||
keys = []
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
async for page in paginator.paginate(Bucket=actual_bucket, Prefix=s3prefix):
|
||||
if "Contents" in page:
|
||||
for obj in page["Contents"]:
|
||||
# Strip folder prefix from keys if present
|
||||
key = obj["Key"]
|
||||
if folder:
|
||||
if key.startswith(f"{folder}/"):
|
||||
key = key[len(folder) + 1 :]
|
||||
elif key == folder:
|
||||
# Skip folder marker itself
|
||||
continue
|
||||
keys.append(key)
|
||||
|
||||
return keys
|
||||
|
||||
@handle_s3_client_errors("stream")
|
||||
async def _stream_to_fileobj(
|
||||
self, filename: str, fileobj: BinaryIO, *, bucket: str | None = None
|
||||
):
|
||||
"""Stream file from S3 directly to file object without loading into memory."""
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Streaming {filename} from S3 {actual_bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
await client.download_fileobj(
|
||||
Bucket=actual_bucket, Key=s3filename, Fileobj=fileobj
|
||||
)
|
||||
|
||||
|
||||
Storage.register("aws", AwsStorage)
|
||||
|
||||
@@ -1,347 +0,0 @@
|
||||
import asyncio
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Optional, Protocol
|
||||
|
||||
import structlog
|
||||
from celery.result import AsyncResult
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.db.transcripts import SourceKind, Transcript, transcripts_controller
|
||||
from reflector.pipelines.main_multitrack_pipeline import (
|
||||
task_pipeline_multitrack_process,
|
||||
)
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.tools.process import (
|
||||
extract_result_from_entry,
|
||||
parse_s3_url,
|
||||
validate_s3_objects,
|
||||
)
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
DEFAULT_PROCESSING_TIMEOUT_SECONDS = 3600
|
||||
|
||||
MAX_ERROR_MESSAGE_LENGTH = 500
|
||||
|
||||
TASK_POLL_INTERVAL_SECONDS = 2
|
||||
|
||||
|
||||
class StatusCallback(Protocol):
|
||||
def __call__(self, state: str, elapsed_seconds: int) -> None: ...
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultitrackTaskResult:
|
||||
success: bool
|
||||
transcript_id: str
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
async def create_multitrack_transcript(
|
||||
bucket_name: str,
|
||||
track_keys: List[str],
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
user_id: Optional[str] = None,
|
||||
) -> Transcript:
|
||||
num_tracks = len(track_keys)
|
||||
track_word = "track" if num_tracks == 1 else "tracks"
|
||||
transcript_name = f"Multitrack ({num_tracks} {track_word})"
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
transcript_name,
|
||||
source_kind=SourceKind.FILE,
|
||||
source_language=source_language,
|
||||
target_language=target_language,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Created multitrack transcript",
|
||||
transcript_id=transcript.id,
|
||||
name=transcript_name,
|
||||
bucket=bucket_name,
|
||||
num_tracks=len(track_keys),
|
||||
)
|
||||
|
||||
return transcript
|
||||
|
||||
|
||||
def submit_multitrack_task(
|
||||
transcript_id: str, bucket_name: str, track_keys: List[str]
|
||||
) -> AsyncResult:
|
||||
result = task_pipeline_multitrack_process.delay(
|
||||
transcript_id=transcript_id,
|
||||
bucket_name=bucket_name,
|
||||
track_keys=track_keys,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Multitrack task submitted",
|
||||
transcript_id=transcript_id,
|
||||
task_id=result.id,
|
||||
bucket=bucket_name,
|
||||
num_tracks=len(track_keys),
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def wait_for_task(
|
||||
result: AsyncResult,
|
||||
transcript_id: str,
|
||||
timeout_seconds: int = DEFAULT_PROCESSING_TIMEOUT_SECONDS,
|
||||
poll_interval: int = TASK_POLL_INTERVAL_SECONDS,
|
||||
status_callback: Optional[StatusCallback] = None,
|
||||
) -> MultitrackTaskResult:
|
||||
start_time = time.time()
|
||||
last_status = None
|
||||
|
||||
while not result.ready():
|
||||
elapsed = time.time() - start_time
|
||||
if elapsed > timeout_seconds:
|
||||
error_msg = (
|
||||
f"Task {result.id} did not complete within {timeout_seconds}s "
|
||||
f"for transcript {transcript_id}"
|
||||
)
|
||||
logger.error(
|
||||
"Task timeout",
|
||||
task_id=result.id,
|
||||
transcript_id=transcript_id,
|
||||
elapsed_seconds=elapsed,
|
||||
)
|
||||
raise TimeoutError(error_msg)
|
||||
|
||||
if result.state != last_status:
|
||||
if status_callback:
|
||||
status_callback(result.state, int(elapsed))
|
||||
last_status = result.state
|
||||
|
||||
await asyncio.sleep(poll_interval)
|
||||
|
||||
if result.failed():
|
||||
error_info = result.info
|
||||
traceback_info = getattr(result, "traceback", None)
|
||||
|
||||
logger.error(
|
||||
"Multitrack task failed",
|
||||
transcript_id=transcript_id,
|
||||
task_id=result.id,
|
||||
error=str(error_info),
|
||||
has_traceback=bool(traceback_info),
|
||||
)
|
||||
|
||||
error_detail = str(error_info)
|
||||
if traceback_info:
|
||||
error_detail += f"\nTraceback:\n{traceback_info}"
|
||||
|
||||
return MultitrackTaskResult(
|
||||
success=False, transcript_id=transcript_id, error=error_detail
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Multitrack task completed",
|
||||
transcript_id=transcript_id,
|
||||
task_id=result.id,
|
||||
state=result.state,
|
||||
)
|
||||
|
||||
return MultitrackTaskResult(success=True, transcript_id=transcript_id)
|
||||
|
||||
|
||||
async def update_transcript_status(
|
||||
transcript_id: str,
|
||||
status: str,
|
||||
error: Optional[str] = None,
|
||||
max_error_length: int = MAX_ERROR_MESSAGE_LENGTH,
|
||||
) -> None:
|
||||
database = get_database()
|
||||
connected = False
|
||||
|
||||
try:
|
||||
await database.connect()
|
||||
connected = True
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if transcript:
|
||||
update_data: Dict[str, Any] = {"status": status}
|
||||
|
||||
if error:
|
||||
if len(error) > max_error_length:
|
||||
error = error[: max_error_length - 3] + "..."
|
||||
update_data["error"] = error
|
||||
|
||||
await transcripts_controller.update(transcript, update_data)
|
||||
|
||||
logger.info(
|
||||
"Updated transcript status",
|
||||
transcript_id=transcript_id,
|
||||
status=status,
|
||||
has_error=bool(error),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to update transcript status",
|
||||
transcript_id=transcript_id,
|
||||
error=str(e),
|
||||
)
|
||||
finally:
|
||||
if connected:
|
||||
try:
|
||||
await database.disconnect()
|
||||
except Exception as e:
|
||||
logger.warning(f"Database disconnect failed: {e}")
|
||||
|
||||
|
||||
async def process_multitrack(
|
||||
bucket_name: str,
|
||||
track_keys: List[str],
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
user_id: Optional[str] = None,
|
||||
timeout_seconds: int = DEFAULT_PROCESSING_TIMEOUT_SECONDS,
|
||||
status_callback: Optional[StatusCallback] = None,
|
||||
) -> MultitrackTaskResult:
|
||||
"""High-level orchestration for multitrack processing."""
|
||||
database = get_database()
|
||||
transcript = None
|
||||
connected = False
|
||||
|
||||
try:
|
||||
await database.connect()
|
||||
connected = True
|
||||
|
||||
transcript = await create_multitrack_transcript(
|
||||
bucket_name=bucket_name,
|
||||
track_keys=track_keys,
|
||||
source_language=source_language,
|
||||
target_language=target_language,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
result = submit_multitrack_task(
|
||||
transcript_id=transcript.id, bucket_name=bucket_name, track_keys=track_keys
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if transcript:
|
||||
try:
|
||||
await update_transcript_status(
|
||||
transcript_id=transcript.id, status="failed", error=str(e)
|
||||
)
|
||||
except Exception as update_error:
|
||||
logger.error(
|
||||
"Failed to update transcript status after error",
|
||||
original_error=str(e),
|
||||
update_error=str(update_error),
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
if connected:
|
||||
try:
|
||||
await database.disconnect()
|
||||
except Exception as e:
|
||||
logger.warning(f"Database disconnect failed: {e}")
|
||||
|
||||
# Poll outside database connection
|
||||
task_result = await wait_for_task(
|
||||
result=result,
|
||||
transcript_id=transcript.id,
|
||||
timeout_seconds=timeout_seconds,
|
||||
poll_interval=2,
|
||||
status_callback=status_callback,
|
||||
)
|
||||
|
||||
if not task_result.success:
|
||||
await update_transcript_status(
|
||||
transcript_id=transcript.id, status="failed", error=task_result.error
|
||||
)
|
||||
|
||||
return task_result
|
||||
|
||||
|
||||
def print_progress(message: str) -> None:
|
||||
"""Print progress message to stderr for CLI visibility."""
|
||||
print(f"{message}", file=sys.stderr)
|
||||
|
||||
|
||||
def create_status_callback() -> StatusCallback:
|
||||
"""Create callback for task status updates during polling."""
|
||||
|
||||
def callback(state: str, elapsed_seconds: int) -> None:
|
||||
print_progress(
|
||||
f"Multitrack pipeline status: {state} (elapsed: {elapsed_seconds}s)"
|
||||
)
|
||||
|
||||
return callback
|
||||
|
||||
|
||||
async def process_multitrack_cli(
|
||||
s3_urls: List[str],
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
output_path: Optional[str] = None,
|
||||
) -> None:
|
||||
if not s3_urls:
|
||||
raise ValueError("At least one track required for multitrack processing")
|
||||
|
||||
bucket_keys = []
|
||||
for url in s3_urls:
|
||||
try:
|
||||
bucket, key = parse_s3_url(url)
|
||||
bucket_keys.append((bucket, key))
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid S3 URL '{url}': {e}") from e
|
||||
|
||||
buckets = set(bucket for bucket, _ in bucket_keys)
|
||||
if len(buckets) > 1:
|
||||
raise ValueError(
|
||||
f"All tracks must be in the same S3 bucket. "
|
||||
f"Found {len(buckets)} different buckets: {sorted(buckets)}. "
|
||||
f"Please upload all files to a single bucket."
|
||||
)
|
||||
|
||||
primary_bucket = bucket_keys[0][0]
|
||||
track_keys = [key for _, key in bucket_keys]
|
||||
|
||||
print_progress(
|
||||
f"Starting multitrack CLI processing: "
|
||||
f"bucket={primary_bucket}, num_tracks={len(track_keys)}, "
|
||||
f"source_language={source_language}, target_language={target_language}"
|
||||
)
|
||||
|
||||
storage = get_transcripts_storage()
|
||||
await validate_s3_objects(storage, bucket_keys)
|
||||
print_progress(f"S3 validation complete: {len(bucket_keys)} objects verified")
|
||||
|
||||
result = await process_multitrack(
|
||||
bucket_name=primary_bucket,
|
||||
track_keys=track_keys,
|
||||
source_language=source_language,
|
||||
target_language=target_language,
|
||||
user_id=None,
|
||||
timeout_seconds=3600,
|
||||
status_callback=create_status_callback(),
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
error_msg = (
|
||||
f"Multitrack pipeline failed for transcript {result.transcript_id}\n"
|
||||
)
|
||||
if result.error:
|
||||
error_msg += f"Error: {result.error}\n"
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
print_progress(
|
||||
f"Multitrack processing complete for transcript {result.transcript_id}"
|
||||
)
|
||||
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
await extract_result_from_entry(result.transcript_id, output_path)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
@@ -9,12 +9,11 @@ async def export_db(filename: str) -> None:
|
||||
filename = pathlib.Path(filename).resolve()
|
||||
settings.DATABASE_URL = f"sqlite:///{filename}"
|
||||
|
||||
from reflector.db import get_database, transcripts
|
||||
from reflector.db import get_session_context
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
transcripts = await database.fetch_all(transcripts.select())
|
||||
await database.disconnect()
|
||||
async with get_session_context() as session:
|
||||
transcripts = await transcripts_controller.get_all(session)
|
||||
|
||||
def export_transcript(transcript, output_dir):
|
||||
for topic in transcript.topics:
|
||||
|
||||
@@ -8,12 +8,11 @@ async def export_db(filename: str) -> None:
|
||||
filename = pathlib.Path(filename).resolve()
|
||||
settings.DATABASE_URL = f"sqlite:///{filename}"
|
||||
|
||||
from reflector.db import get_database, transcripts
|
||||
from reflector.db import get_session_context
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
transcripts = await database.fetch_all(transcripts.select())
|
||||
await database.disconnect()
|
||||
async with get_session_context() as session:
|
||||
transcripts = await transcripts_controller.get_all(session)
|
||||
|
||||
def export_transcript(transcript):
|
||||
tid = transcript.id
|
||||
|
||||
@@ -7,13 +7,12 @@ import asyncio
|
||||
import json
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal, Tuple
|
||||
from urllib.parse import unquote, urlparse
|
||||
from typing import Any, Dict, List, Literal
|
||||
|
||||
from botocore.exceptions import BotoCoreError, ClientError, NoCredentialsError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from reflector.db import get_session_context
|
||||
from reflector.db.transcripts import SourceKind, TranscriptTopic, transcripts_controller
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines.main_file_pipeline import (
|
||||
@@ -23,119 +22,10 @@ from reflector.pipelines.main_live_pipeline import pipeline_post as live_pipelin
|
||||
from reflector.pipelines.main_live_pipeline import (
|
||||
pipeline_process as live_pipeline_process,
|
||||
)
|
||||
from reflector.storage import Storage
|
||||
|
||||
|
||||
def validate_s3_bucket_name(bucket: str) -> None:
|
||||
if not bucket:
|
||||
raise ValueError("Bucket name cannot be empty")
|
||||
if len(bucket) > 255: # Absolute max for any region
|
||||
raise ValueError(f"Bucket name too long: {len(bucket)} characters (max 255)")
|
||||
|
||||
|
||||
def validate_s3_key(key: str) -> None:
|
||||
if not key:
|
||||
raise ValueError("S3 key cannot be empty")
|
||||
if len(key) > 1024:
|
||||
raise ValueError(f"S3 key too long: {len(key)} characters (max 1024)")
|
||||
|
||||
|
||||
def parse_s3_url(url: str) -> Tuple[str, str]:
|
||||
parsed = urlparse(url)
|
||||
|
||||
if parsed.scheme == "s3":
|
||||
bucket = parsed.netloc
|
||||
key = parsed.path.lstrip("/")
|
||||
if parsed.fragment:
|
||||
logger.debug(
|
||||
"URL fragment ignored (not part of S3 key)",
|
||||
url=url,
|
||||
fragment=parsed.fragment,
|
||||
)
|
||||
if not bucket or not key:
|
||||
raise ValueError(f"Invalid S3 URL: {url} (missing bucket or key)")
|
||||
bucket = unquote(bucket)
|
||||
key = unquote(key)
|
||||
validate_s3_bucket_name(bucket)
|
||||
validate_s3_key(key)
|
||||
return bucket, key
|
||||
|
||||
elif parsed.scheme in ("http", "https"):
|
||||
if ".s3." in parsed.netloc or parsed.netloc.endswith(".s3.amazonaws.com"):
|
||||
bucket = parsed.netloc.split(".")[0]
|
||||
key = parsed.path.lstrip("/")
|
||||
if parsed.fragment:
|
||||
logger.debug("URL fragment ignored", url=url, fragment=parsed.fragment)
|
||||
if not bucket or not key:
|
||||
raise ValueError(f"Invalid S3 URL: {url} (missing bucket or key)")
|
||||
bucket = unquote(bucket)
|
||||
key = unquote(key)
|
||||
validate_s3_bucket_name(bucket)
|
||||
validate_s3_key(key)
|
||||
return bucket, key
|
||||
|
||||
elif parsed.netloc.startswith("s3.") and "amazonaws.com" in parsed.netloc:
|
||||
path_parts = parsed.path.lstrip("/").split("/", 1)
|
||||
if len(path_parts) != 2:
|
||||
raise ValueError(f"Invalid S3 URL: {url} (missing bucket or key)")
|
||||
bucket, key = path_parts
|
||||
if parsed.fragment:
|
||||
logger.debug("URL fragment ignored", url=url, fragment=parsed.fragment)
|
||||
bucket = unquote(bucket)
|
||||
key = unquote(key)
|
||||
validate_s3_bucket_name(bucket)
|
||||
validate_s3_key(key)
|
||||
return bucket, key
|
||||
|
||||
else:
|
||||
raise ValueError(f"Invalid S3 URL format: {url} (not recognized as S3 URL)")
|
||||
|
||||
else:
|
||||
raise ValueError(f"Invalid S3 URL scheme: {url} (must be s3:// or https://)")
|
||||
|
||||
|
||||
async def validate_s3_objects(
|
||||
storage: Storage, bucket_keys: List[Tuple[str, str]]
|
||||
) -> None:
|
||||
async with storage.session.client("s3") as client:
|
||||
|
||||
async def check_object(bucket: str, key: str) -> None:
|
||||
try:
|
||||
await client.head_object(Bucket=bucket, Key=key)
|
||||
except ClientError as e:
|
||||
error_code = e.response["Error"]["Code"]
|
||||
if error_code in ("404", "NoSuchKey"):
|
||||
raise ValueError(f"S3 object not found: s3://{bucket}/{key}") from e
|
||||
elif error_code in ("403", "Forbidden", "AccessDenied"):
|
||||
raise ValueError(
|
||||
f"Access denied for S3 object: s3://{bucket}/{key}. "
|
||||
f"Check AWS credentials and permissions"
|
||||
) from e
|
||||
else:
|
||||
raise ValueError(
|
||||
f"S3 error {error_code} for s3://{bucket}/{key}: "
|
||||
f"{e.response['Error'].get('Message', 'Unknown error')}"
|
||||
) from e
|
||||
except NoCredentialsError as e:
|
||||
raise ValueError(
|
||||
"AWS credentials not configured. Set AWS_ACCESS_KEY_ID and "
|
||||
"AWS_SECRET_ACCESS_KEY environment variables"
|
||||
) from e
|
||||
except BotoCoreError as e:
|
||||
raise ValueError(
|
||||
f"AWS service error for s3://{bucket}/{key}: {str(e)}"
|
||||
) from e
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Unexpected error validating s3://{bucket}/{key}: {str(e)}"
|
||||
) from e
|
||||
|
||||
await asyncio.gather(
|
||||
*(check_object(bucket, key) for bucket, key in bucket_keys)
|
||||
)
|
||||
|
||||
|
||||
def serialize_topics(topics: List[TranscriptTopic]) -> List[Dict[str, Any]]:
|
||||
"""Convert TranscriptTopic objects to JSON-serializable dicts"""
|
||||
serialized = []
|
||||
for topic in topics:
|
||||
topic_dict = topic.model_dump()
|
||||
@@ -144,6 +34,7 @@ def serialize_topics(topics: List[TranscriptTopic]) -> List[Dict[str, Any]]:
|
||||
|
||||
|
||||
def debug_print_speakers(serialized_topics: List[Dict[str, Any]]) -> None:
|
||||
"""Print debug info about speakers found in topics"""
|
||||
all_speakers = set()
|
||||
for topic_dict in serialized_topics:
|
||||
for word in topic_dict.get("words", []):
|
||||
@@ -158,7 +49,10 @@ def debug_print_speakers(serialized_topics: List[Dict[str, Any]]) -> None:
|
||||
TranscriptId = str
|
||||
|
||||
|
||||
# common interface for every flow: it needs an Entry in db with specific ceremony (file path + status + actual file in file system)
|
||||
# ideally we want to get rid of it at some point
|
||||
async def prepare_entry(
|
||||
session: AsyncSession,
|
||||
source_path: str,
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
@@ -166,6 +60,7 @@ async def prepare_entry(
|
||||
file_path = Path(source_path)
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
session,
|
||||
file_path.name,
|
||||
# note that the real file upload has SourceKind: LIVE for the reason of it's an error
|
||||
source_kind=SourceKind.FILE,
|
||||
@@ -174,7 +69,9 @@ async def prepare_entry(
|
||||
user_id=None,
|
||||
)
|
||||
|
||||
logger.info(f"Created transcript {transcript.id} for {file_path.name}")
|
||||
logger.info(
|
||||
f"Created empty transcript {transcript.id} for file {file_path.name} because technically we need an empty transcript before we start transcript"
|
||||
)
|
||||
|
||||
# pipelines expect files as upload.*
|
||||
|
||||
@@ -185,15 +82,20 @@ async def prepare_entry(
|
||||
logger.info(f"Copied {source_path} to {upload_path}")
|
||||
|
||||
# pipelines expect entity status "uploaded"
|
||||
await transcripts_controller.update(transcript, {"status": "uploaded"})
|
||||
await transcripts_controller.update(session, transcript, {"status": "uploaded"})
|
||||
|
||||
return transcript.id
|
||||
|
||||
|
||||
# same reason as prepare_entry
|
||||
async def extract_result_from_entry(
|
||||
transcript_id: TranscriptId, output_path: str
|
||||
session: AsyncSession,
|
||||
transcript_id: TranscriptId,
|
||||
output_path: str,
|
||||
) -> None:
|
||||
post_final_transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
post_final_transcript = await transcripts_controller.get_by_id(
|
||||
session, transcript_id
|
||||
)
|
||||
|
||||
# assert post_final_transcript.status == "ended"
|
||||
# File pipeline doesn't set status to "ended", only live pipeline does https://github.com/Monadical-SAS/reflector/issues/582
|
||||
@@ -221,6 +123,7 @@ async def extract_result_from_entry(
|
||||
|
||||
|
||||
async def process_live_pipeline(
|
||||
session: AsyncSession,
|
||||
transcript_id: TranscriptId,
|
||||
):
|
||||
"""Process transcript_id with transcription and diarization"""
|
||||
@@ -229,18 +132,14 @@ async def process_live_pipeline(
|
||||
await live_pipeline_process(transcript_id=transcript_id)
|
||||
print(f"Processing complete for transcript {transcript_id}", file=sys.stderr)
|
||||
|
||||
pre_final_transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
pre_final_transcript = await transcripts_controller.get_by_id(
|
||||
session, transcript_id
|
||||
)
|
||||
|
||||
# assert documented behaviour: after process, the pipeline isn't ended. this is the reason of calling pipeline_post
|
||||
assert pre_final_transcript.status != "ended"
|
||||
|
||||
# at this point, diarization is running but we have no access to it. run diarization in parallel - one will hopefully win after polling
|
||||
result = live_pipeline_post(transcript_id=transcript_id)
|
||||
|
||||
# result.ready() blocks even without await; it mutates result also
|
||||
while not result.ready():
|
||||
print(f"Status: {result.state}")
|
||||
time.sleep(2)
|
||||
await live_pipeline_post(transcript_id=transcript_id)
|
||||
|
||||
|
||||
async def process_file_pipeline(
|
||||
@@ -248,13 +147,7 @@ async def process_file_pipeline(
|
||||
):
|
||||
"""Process audio/video file using the optimized file pipeline"""
|
||||
|
||||
# task_pipeline_file_process is a Celery task, need to use .delay() for async execution
|
||||
result = task_pipeline_file_process.delay(transcript_id=transcript_id)
|
||||
|
||||
# Wait for the Celery task to complete
|
||||
while not result.ready():
|
||||
print(f"File pipeline status: {result.state}", file=sys.stderr)
|
||||
time.sleep(2)
|
||||
await task_pipeline_file_process.kiq(transcript_id=transcript_id)
|
||||
|
||||
logger.info("File pipeline processing complete")
|
||||
|
||||
@@ -266,21 +159,16 @@ async def process(
|
||||
pipeline: Literal["live", "file"],
|
||||
output_path: str = None,
|
||||
):
|
||||
from reflector.db import get_database
|
||||
|
||||
database = get_database()
|
||||
# db connect is a part of ceremony
|
||||
await database.connect()
|
||||
|
||||
try:
|
||||
async with get_session_context() as session:
|
||||
transcript_id = await prepare_entry(
|
||||
session,
|
||||
source_path,
|
||||
source_language,
|
||||
target_language,
|
||||
)
|
||||
|
||||
pipeline_handlers = {
|
||||
"live": process_live_pipeline,
|
||||
"live": lambda tid: process_live_pipeline(session, tid),
|
||||
"file": process_file_pipeline,
|
||||
}
|
||||
|
||||
@@ -290,29 +178,20 @@ async def process(
|
||||
|
||||
await handler(transcript_id)
|
||||
|
||||
await extract_result_from_entry(transcript_id, output_path)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
await extract_result_from_entry(session, transcript_id, output_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Process audio files with speaker diarization"
|
||||
)
|
||||
parser.add_argument(
|
||||
"source",
|
||||
help="Source file (mp3, wav, mp4...) or comma-separated S3 URLs with --multitrack",
|
||||
)
|
||||
parser.add_argument("source", help="Source file (mp3, wav, mp4...)")
|
||||
parser.add_argument(
|
||||
"--pipeline",
|
||||
required=True,
|
||||
choices=["live", "file"],
|
||||
help="Pipeline type to use for processing (live: streaming/incremental, file: batch/parallel)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--multitrack",
|
||||
action="store_true",
|
||||
help="Process multiple audio tracks from comma-separated S3 URLs",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--source-language", default="en", help="Source language code (default: en)"
|
||||
)
|
||||
@@ -322,40 +201,12 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--output", "-o", help="Output file (output.jsonl)")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.multitrack:
|
||||
if not args.source:
|
||||
parser.error("Source URLs required for multitrack processing")
|
||||
|
||||
s3_urls = [url.strip() for url in args.source.split(",") if url.strip()]
|
||||
|
||||
if not s3_urls:
|
||||
parser.error("At least one S3 URL required for multitrack processing")
|
||||
|
||||
from reflector.tools.cli_multitrack import process_multitrack_cli
|
||||
|
||||
asyncio.run(
|
||||
process_multitrack_cli(
|
||||
s3_urls,
|
||||
args.source_language,
|
||||
args.target_language,
|
||||
args.output,
|
||||
)
|
||||
)
|
||||
else:
|
||||
if not args.pipeline:
|
||||
parser.error("--pipeline is required for single-track processing")
|
||||
|
||||
if "," in args.source:
|
||||
parser.error(
|
||||
"Multiple files detected. Use --multitrack flag for multitrack processing"
|
||||
)
|
||||
|
||||
asyncio.run(
|
||||
process(
|
||||
args.source,
|
||||
args.source_language,
|
||||
args.target_language,
|
||||
args.pipeline,
|
||||
args.output,
|
||||
)
|
||||
asyncio.run(
|
||||
process(
|
||||
args.source,
|
||||
args.source_language,
|
||||
args.target_language,
|
||||
args.pipeline,
|
||||
args.output,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
"""
|
||||
Process transcript by ID - auto-detects multitrack vs file pipeline.
|
||||
|
||||
Usage:
|
||||
uv run -m reflector.tools.process_transcript <transcript_id>
|
||||
|
||||
# Or via docker:
|
||||
docker compose exec server uv run -m reflector.tools.process_transcript <transcript_id>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import sys
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
from celery.result import AsyncResult
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.db.transcripts import Transcript, transcripts_controller
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.services.transcript_process import (
|
||||
FileProcessingConfig,
|
||||
MultitrackProcessingConfig,
|
||||
PrepareResult,
|
||||
ProcessError,
|
||||
ValidationError,
|
||||
ValidationResult,
|
||||
dispatch_transcript_processing,
|
||||
prepare_transcript_processing,
|
||||
validate_transcript_for_processing,
|
||||
)
|
||||
|
||||
|
||||
async def process_transcript_inner(
|
||||
transcript: Transcript,
|
||||
on_validation: Callable[[ValidationResult], None],
|
||||
on_preprocess: Callable[[PrepareResult], None],
|
||||
force: bool = False,
|
||||
) -> AsyncResult | None:
|
||||
validation = await validate_transcript_for_processing(transcript)
|
||||
on_validation(validation)
|
||||
config = await prepare_transcript_processing(validation)
|
||||
on_preprocess(config)
|
||||
return await dispatch_transcript_processing(config, force=force)
|
||||
|
||||
|
||||
async def process_transcript(
|
||||
transcript_id: str, sync: bool = False, force: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Process a transcript by ID, auto-detecting multitrack vs file pipeline.
|
||||
|
||||
Args:
|
||||
transcript_id: The transcript UUID
|
||||
sync: If True, wait for task completion. If False, dispatch and exit.
|
||||
force: If True, cancel old workflow and start new (latest code). If False, replay failed workflow.
|
||||
"""
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
|
||||
try:
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
print(f"Error: Transcript {transcript_id} not found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Found transcript: {transcript.title or transcript_id}", file=sys.stderr)
|
||||
print(f" Status: {transcript.status}", file=sys.stderr)
|
||||
print(f" Recording ID: {transcript.recording_id or 'None'}", file=sys.stderr)
|
||||
|
||||
def on_validation(validation: ValidationResult) -> None:
|
||||
if isinstance(validation, ValidationError):
|
||||
print(f"Error: {validation.detail}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
def on_preprocess(config: PrepareResult) -> None:
|
||||
if isinstance(config, ProcessError):
|
||||
print(f"Error: {config.detail}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
elif isinstance(config, MultitrackProcessingConfig):
|
||||
print(f"Dispatching multitrack pipeline", file=sys.stderr)
|
||||
print(f" Bucket: {config.bucket_name}", file=sys.stderr)
|
||||
print(f" Tracks: {len(config.track_keys)}", file=sys.stderr)
|
||||
elif isinstance(config, FileProcessingConfig):
|
||||
print(f"Dispatching file pipeline", file=sys.stderr)
|
||||
|
||||
result = await process_transcript_inner(
|
||||
transcript,
|
||||
on_validation=on_validation,
|
||||
on_preprocess=on_preprocess,
|
||||
force=force,
|
||||
)
|
||||
|
||||
if result is None:
|
||||
# Hatchet workflow dispatched
|
||||
if sync:
|
||||
# Re-fetch transcript to get workflow_run_id
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if not transcript or not transcript.workflow_run_id:
|
||||
print("Error: workflow_run_id not found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print("Waiting for Hatchet workflow...", file=sys.stderr)
|
||||
while True:
|
||||
status = await HatchetClientManager.get_workflow_run_status(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
print(f" Status: {status.value}", file=sys.stderr)
|
||||
|
||||
if status == V1TaskStatus.COMPLETED:
|
||||
print("Workflow completed successfully", file=sys.stderr)
|
||||
break
|
||||
elif status in (V1TaskStatus.FAILED, V1TaskStatus.CANCELLED):
|
||||
print(f"Workflow failed: {status}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
await asyncio.sleep(5)
|
||||
else:
|
||||
print(
|
||||
"Task dispatched (use --sync to wait for completion)",
|
||||
file=sys.stderr,
|
||||
)
|
||||
elif sync:
|
||||
print("Waiting for task completion...", file=sys.stderr)
|
||||
while not result.ready():
|
||||
print(f" Status: {result.state}", file=sys.stderr)
|
||||
time.sleep(5)
|
||||
|
||||
if result.successful():
|
||||
print("Task completed successfully", file=sys.stderr)
|
||||
else:
|
||||
print(f"Task failed: {result.result}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(
|
||||
"Task dispatched (use --sync to wait for completion)", file=sys.stderr
|
||||
)
|
||||
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Process transcript by ID - auto-detects multitrack vs file pipeline"
|
||||
)
|
||||
parser.add_argument(
|
||||
"transcript_id",
|
||||
help="Transcript UUID to process",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sync",
|
||||
action="store_true",
|
||||
help="Wait for task completion instead of just dispatching",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--force",
|
||||
action="store_true",
|
||||
help="Cancel old workflow and start new (uses latest code instead of replaying)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
asyncio.run(
|
||||
process_transcript(args.transcript_id, sync=args.sync, force=args.force)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,14 +1,10 @@
|
||||
import argparse
|
||||
import asyncio
|
||||
|
||||
from reflector.app import celery_app # noqa
|
||||
from reflector.pipelines.main_live_pipeline import task_pipeline_main_post
|
||||
from reflector.pipelines.main_live_pipeline import pipeline_post
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("transcript_id", type=str)
|
||||
parser.add_argument("--delay", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.delay:
|
||||
task_pipeline_main_post.delay(args.transcript_id)
|
||||
else:
|
||||
task_pipeline_main_post(args.transcript_id)
|
||||
asyncio.run(pipeline_post(transcript_id=args.transcript_id))
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
"""
|
||||
Shared audio processing constants.
|
||||
|
||||
Used by both Hatchet workflows and Celery pipelines for consistent audio encoding.
|
||||
"""
|
||||
|
||||
# Opus codec settings
|
||||
OPUS_STANDARD_SAMPLE_RATE = 48000
|
||||
OPUS_DEFAULT_BIT_RATE = 128000 # 128kbps for good speech quality
|
||||
|
||||
# S3 presigned URL expiration
|
||||
PRESIGNED_URL_EXPIRATION_SECONDS = 7200 # 2 hours
|
||||
|
||||
# Waveform visualization
|
||||
WAVEFORM_SEGMENTS = 255
|
||||
@@ -1,227 +0,0 @@
|
||||
"""
|
||||
Audio track mixdown utilities.
|
||||
|
||||
Shared PyAV-based functions for mixing multiple audio tracks into a single output.
|
||||
Used by both Hatchet workflows and Celery pipelines.
|
||||
"""
|
||||
|
||||
from fractions import Fraction
|
||||
|
||||
import av
|
||||
from av.audio.resampler import AudioResampler
|
||||
|
||||
|
||||
def detect_sample_rate_from_tracks(track_urls: list[str], logger=None) -> int | None:
|
||||
"""Detect sample rate from first decodable audio frame.
|
||||
|
||||
Args:
|
||||
track_urls: List of URLs to audio files (S3 presigned or local)
|
||||
logger: Optional logger instance
|
||||
|
||||
Returns:
|
||||
Sample rate in Hz, or None if no decodable frames found
|
||||
"""
|
||||
for url in track_urls:
|
||||
if not url:
|
||||
continue
|
||||
container = None
|
||||
try:
|
||||
container = av.open(url)
|
||||
for frame in container.decode(audio=0):
|
||||
return frame.sample_rate
|
||||
except Exception:
|
||||
continue
|
||||
finally:
|
||||
if container is not None:
|
||||
container.close()
|
||||
return None
|
||||
|
||||
|
||||
async def mixdown_tracks_pyav(
|
||||
track_urls: list[str],
|
||||
writer,
|
||||
target_sample_rate: int,
|
||||
offsets_seconds: list[float] | None = None,
|
||||
logger=None,
|
||||
) -> None:
|
||||
"""Multi-track mixdown using PyAV filter graph (amix).
|
||||
|
||||
Builds a filter graph: N abuffer -> optional adelay -> amix -> aformat -> sink
|
||||
Reads from S3 presigned URLs or local files, pushes mixed frames to writer.
|
||||
|
||||
Args:
|
||||
track_urls: List of URLs to audio tracks (S3 presigned or local)
|
||||
writer: AudioFileWriterProcessor instance with async push() method
|
||||
target_sample_rate: Sample rate for output (Hz)
|
||||
offsets_seconds: Optional per-track delays in seconds for alignment.
|
||||
If provided, must have same length as track_urls. Delays are relative
|
||||
to the minimum offset (earliest track has delay=0).
|
||||
logger: Optional logger instance
|
||||
|
||||
Raises:
|
||||
ValueError: If offsets_seconds length doesn't match track_urls,
|
||||
no valid tracks provided, or no containers can be opened
|
||||
"""
|
||||
if offsets_seconds is not None and len(offsets_seconds) != len(track_urls):
|
||||
raise ValueError(
|
||||
f"offsets_seconds length ({len(offsets_seconds)}) must match track_urls ({len(track_urls)})"
|
||||
)
|
||||
|
||||
valid_track_urls = [url for url in track_urls if url]
|
||||
if not valid_track_urls:
|
||||
if logger:
|
||||
logger.error("Mixdown failed - no valid track URLs provided")
|
||||
raise ValueError("Mixdown failed: No valid track URLs")
|
||||
|
||||
# Calculate per-input delays if offsets provided
|
||||
input_offsets_seconds = None
|
||||
if offsets_seconds is not None:
|
||||
input_offsets_seconds = [
|
||||
offsets_seconds[i] for i, url in enumerate(track_urls) if url
|
||||
]
|
||||
|
||||
# Build PyAV filter graph:
|
||||
# N abuffer (s32/stereo)
|
||||
# -> optional adelay per input (for alignment)
|
||||
# -> amix (s32)
|
||||
# -> aformat(s16)
|
||||
# -> sink
|
||||
graph = av.filter.Graph()
|
||||
inputs = []
|
||||
|
||||
for idx, url in enumerate(valid_track_urls):
|
||||
args = (
|
||||
f"time_base=1/{target_sample_rate}:"
|
||||
f"sample_rate={target_sample_rate}:"
|
||||
f"sample_fmt=s32:"
|
||||
f"channel_layout=stereo"
|
||||
)
|
||||
in_ctx = graph.add("abuffer", args=args, name=f"in{idx}")
|
||||
inputs.append(in_ctx)
|
||||
|
||||
if not inputs:
|
||||
if logger:
|
||||
logger.error("Mixdown failed - no valid inputs for graph")
|
||||
raise ValueError("Mixdown failed: No valid inputs for filter graph")
|
||||
|
||||
mixer = graph.add("amix", args=f"inputs={len(inputs)}:normalize=0", name="mix")
|
||||
|
||||
fmt = graph.add(
|
||||
"aformat",
|
||||
args=f"sample_fmts=s32:channel_layouts=stereo:sample_rates={target_sample_rate}",
|
||||
name="fmt",
|
||||
)
|
||||
|
||||
sink = graph.add("abuffersink", name="out")
|
||||
|
||||
# Optional per-input delay before mixing
|
||||
delays_ms: list[int] = []
|
||||
if input_offsets_seconds is not None:
|
||||
base = min(input_offsets_seconds) if input_offsets_seconds else 0.0
|
||||
delays_ms = [
|
||||
max(0, int(round((o - base) * 1000))) for o in input_offsets_seconds
|
||||
]
|
||||
else:
|
||||
delays_ms = [0 for _ in inputs]
|
||||
|
||||
for idx, in_ctx in enumerate(inputs):
|
||||
delay_ms = delays_ms[idx] if idx < len(delays_ms) else 0
|
||||
if delay_ms > 0:
|
||||
# adelay requires one value per channel; use same for stereo
|
||||
adelay = graph.add(
|
||||
"adelay",
|
||||
args=f"delays={delay_ms}|{delay_ms}:all=1",
|
||||
name=f"delay{idx}",
|
||||
)
|
||||
in_ctx.link_to(adelay)
|
||||
adelay.link_to(mixer, 0, idx)
|
||||
else:
|
||||
in_ctx.link_to(mixer, 0, idx)
|
||||
|
||||
mixer.link_to(fmt)
|
||||
fmt.link_to(sink)
|
||||
graph.configure()
|
||||
|
||||
containers = []
|
||||
try:
|
||||
# Open all containers with cleanup guaranteed
|
||||
for i, url in enumerate(valid_track_urls):
|
||||
try:
|
||||
c = av.open(
|
||||
url,
|
||||
options={
|
||||
# S3 streaming options
|
||||
"reconnect": "1",
|
||||
"reconnect_streamed": "1",
|
||||
"reconnect_delay_max": "5",
|
||||
},
|
||||
)
|
||||
containers.append(c)
|
||||
except Exception as e:
|
||||
if logger:
|
||||
logger.warning(
|
||||
"Mixdown: failed to open container from URL",
|
||||
input=i,
|
||||
url=url,
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
if not containers:
|
||||
if logger:
|
||||
logger.error("Mixdown failed - no valid containers opened")
|
||||
raise ValueError("Mixdown failed: Could not open any track containers")
|
||||
|
||||
decoders = [c.decode(audio=0) for c in containers]
|
||||
active = [True] * len(decoders)
|
||||
resamplers = [
|
||||
AudioResampler(format="s32", layout="stereo", rate=target_sample_rate)
|
||||
for _ in decoders
|
||||
]
|
||||
|
||||
while any(active):
|
||||
for i, (dec, is_active) in enumerate(zip(decoders, active)):
|
||||
if not is_active:
|
||||
continue
|
||||
try:
|
||||
frame = next(dec)
|
||||
except StopIteration:
|
||||
active[i] = False
|
||||
# Signal end of stream to filter graph
|
||||
inputs[i].push(None)
|
||||
continue
|
||||
|
||||
if frame.sample_rate != target_sample_rate:
|
||||
continue
|
||||
out_frames = resamplers[i].resample(frame) or []
|
||||
for rf in out_frames:
|
||||
rf.sample_rate = target_sample_rate
|
||||
rf.time_base = Fraction(1, target_sample_rate)
|
||||
inputs[i].push(rf)
|
||||
|
||||
while True:
|
||||
try:
|
||||
mixed = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
mixed.sample_rate = target_sample_rate
|
||||
mixed.time_base = Fraction(1, target_sample_rate)
|
||||
await writer.push(mixed)
|
||||
|
||||
# Flush remaining frames from filter graph
|
||||
while True:
|
||||
try:
|
||||
mixed = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
mixed.sample_rate = target_sample_rate
|
||||
mixed.time_base = Fraction(1, target_sample_rate)
|
||||
await writer.push(mixed)
|
||||
|
||||
finally:
|
||||
# Cleanup all containers, even if processing failed
|
||||
for c in containers:
|
||||
if c is not None:
|
||||
try:
|
||||
c.close()
|
||||
except Exception:
|
||||
pass # Best effort cleanup
|
||||
@@ -1,186 +0,0 @@
|
||||
"""
|
||||
Audio track padding utilities.
|
||||
|
||||
Shared PyAV-based functions for extracting stream metadata and applying
|
||||
silence padding to audio tracks. Used by both Hatchet workflows and Celery pipelines.
|
||||
"""
|
||||
|
||||
import math
|
||||
from fractions import Fraction
|
||||
|
||||
import av
|
||||
from av.audio.resampler import AudioResampler
|
||||
|
||||
from reflector.utils.audio_constants import (
|
||||
OPUS_DEFAULT_BIT_RATE,
|
||||
OPUS_STANDARD_SAMPLE_RATE,
|
||||
)
|
||||
|
||||
|
||||
def extract_stream_start_time_from_container(
|
||||
container,
|
||||
track_idx: int,
|
||||
logger=None,
|
||||
) -> float:
|
||||
"""Extract meeting-relative start time from WebM stream metadata.
|
||||
|
||||
Uses PyAV to read stream.start_time from WebM container.
|
||||
More accurate than filename timestamps by ~209ms due to network/encoding delays.
|
||||
|
||||
Args:
|
||||
container: PyAV container opened from audio file/URL
|
||||
track_idx: Track index for logging context
|
||||
logger: Optional logger instance (structlog or stdlib compatible)
|
||||
|
||||
Returns:
|
||||
Start time in seconds (0.0 if extraction fails)
|
||||
"""
|
||||
start_time_seconds = 0.0
|
||||
try:
|
||||
audio_streams = [s for s in container.streams if s.type == "audio"]
|
||||
stream = audio_streams[0] if audio_streams else container.streams[0]
|
||||
|
||||
# 1) Try stream-level start_time (most reliable for Daily.co tracks)
|
||||
if stream.start_time is not None and stream.time_base is not None:
|
||||
start_time_seconds = float(stream.start_time * stream.time_base)
|
||||
|
||||
# 2) Fallback to container-level start_time (in av.time_base units)
|
||||
if (start_time_seconds <= 0) and (container.start_time is not None):
|
||||
start_time_seconds = float(container.start_time * av.time_base)
|
||||
|
||||
# 3) Fallback to first packet DTS in stream.time_base
|
||||
if start_time_seconds <= 0:
|
||||
for packet in container.demux(stream):
|
||||
if packet.dts is not None:
|
||||
start_time_seconds = float(packet.dts * stream.time_base)
|
||||
break
|
||||
except Exception as e:
|
||||
if logger:
|
||||
logger.warning(
|
||||
"PyAV metadata read failed; assuming 0 start_time",
|
||||
track_idx=track_idx,
|
||||
error=str(e),
|
||||
)
|
||||
start_time_seconds = 0.0
|
||||
|
||||
if logger:
|
||||
logger.info(
|
||||
f"Track {track_idx} stream metadata: start_time={start_time_seconds:.3f}s",
|
||||
track_idx=track_idx,
|
||||
)
|
||||
return start_time_seconds
|
||||
|
||||
|
||||
def apply_audio_padding_to_file(
|
||||
in_container,
|
||||
output_path: str,
|
||||
start_time_seconds: float,
|
||||
track_idx: int,
|
||||
logger=None,
|
||||
) -> None:
|
||||
"""Apply silence padding to audio track using PyAV filter graph.
|
||||
|
||||
Uses adelay filter to prepend silence, aligning track to meeting start time.
|
||||
Output is WebM/Opus format.
|
||||
|
||||
Args:
|
||||
in_container: PyAV container opened from source audio
|
||||
output_path: Path for output WebM file
|
||||
start_time_seconds: Amount of silence to prepend (in seconds)
|
||||
track_idx: Track index for logging context
|
||||
logger: Optional logger instance (structlog or stdlib compatible)
|
||||
|
||||
Raises:
|
||||
Exception: If no audio stream found or PyAV processing fails
|
||||
"""
|
||||
delay_ms = math.floor(start_time_seconds * 1000)
|
||||
|
||||
if logger:
|
||||
logger.info(
|
||||
f"Padding track {track_idx} with {delay_ms}ms delay using PyAV",
|
||||
track_idx=track_idx,
|
||||
delay_ms=delay_ms,
|
||||
)
|
||||
|
||||
try:
|
||||
with av.open(output_path, "w", format="webm") as out_container:
|
||||
in_stream = next(
|
||||
(s for s in in_container.streams if s.type == "audio"), None
|
||||
)
|
||||
if in_stream is None:
|
||||
raise Exception("No audio stream in input")
|
||||
|
||||
out_stream = out_container.add_stream(
|
||||
"libopus", rate=OPUS_STANDARD_SAMPLE_RATE
|
||||
)
|
||||
out_stream.bit_rate = OPUS_DEFAULT_BIT_RATE
|
||||
graph = av.filter.Graph()
|
||||
|
||||
abuf_args = (
|
||||
f"time_base=1/{OPUS_STANDARD_SAMPLE_RATE}:"
|
||||
f"sample_rate={OPUS_STANDARD_SAMPLE_RATE}:"
|
||||
f"sample_fmt=s16:"
|
||||
f"channel_layout=stereo"
|
||||
)
|
||||
src = graph.add("abuffer", args=abuf_args, name="src")
|
||||
aresample_f = graph.add("aresample", args="async=1", name="ares")
|
||||
# adelay requires one delay value per channel separated by '|'
|
||||
delays_arg = f"{delay_ms}|{delay_ms}"
|
||||
adelay_f = graph.add(
|
||||
"adelay", args=f"delays={delays_arg}:all=1", name="delay"
|
||||
)
|
||||
sink = graph.add("abuffersink", name="sink")
|
||||
|
||||
src.link_to(aresample_f)
|
||||
aresample_f.link_to(adelay_f)
|
||||
adelay_f.link_to(sink)
|
||||
graph.configure()
|
||||
|
||||
resampler = AudioResampler(
|
||||
format="s16", layout="stereo", rate=OPUS_STANDARD_SAMPLE_RATE
|
||||
)
|
||||
|
||||
# Decode -> resample -> push through graph -> encode Opus
|
||||
for frame in in_container.decode(in_stream):
|
||||
out_frames = resampler.resample(frame) or []
|
||||
for rframe in out_frames:
|
||||
rframe.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
rframe.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
src.push(rframe)
|
||||
|
||||
while True:
|
||||
try:
|
||||
f_out = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
for packet in out_stream.encode(f_out):
|
||||
out_container.mux(packet)
|
||||
|
||||
# Flush remaining frames from filter graph
|
||||
src.push(None)
|
||||
while True:
|
||||
try:
|
||||
f_out = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
for packet in out_stream.encode(f_out):
|
||||
out_container.mux(packet)
|
||||
|
||||
# Flush encoder
|
||||
for packet in out_stream.encode(None):
|
||||
out_container.mux(packet)
|
||||
|
||||
except Exception as e:
|
||||
if logger:
|
||||
logger.error(
|
||||
"PyAV padding failed for track",
|
||||
track_idx=track_idx,
|
||||
delay_ms=delay_ms,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
@@ -1,4 +0,0 @@
|
||||
def assert_not_none[T](value: T | None, message: str = "Value is None") -> T:
|
||||
if value is None:
|
||||
raise ValueError(message)
|
||||
return value
|
||||
@@ -1,92 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
from typing import NamedTuple
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
DailyRoomName = NonEmptyString
|
||||
|
||||
|
||||
class DailyRecordingFilename(NamedTuple):
|
||||
"""Parsed components from Daily.co recording filename.
|
||||
|
||||
Format: {recording_start_ts}-{participant_id}-cam-audio-{track_start_ts}
|
||||
Example: 1763152299562-12f0b87c-97d4-4dd3-a65c-cee1f854a79c-cam-audio-1763152314582
|
||||
|
||||
Note: S3 object keys have no extension, but browsers add .webm when downloading
|
||||
from S3 UI due to MIME type headers. If you download manually and wonder.
|
||||
"""
|
||||
|
||||
recording_start_ts: int
|
||||
participant_id: str
|
||||
track_start_ts: int
|
||||
|
||||
|
||||
def parse_daily_recording_filename(filename: str) -> DailyRecordingFilename:
|
||||
"""Parse Daily.co recording filename to extract timestamps and participant ID.
|
||||
|
||||
Args:
|
||||
filename: Full path or basename of Daily.co recording file
|
||||
Format: {recording_start_ts}-{participant_id}-cam-audio-{track_start_ts}
|
||||
|
||||
Returns:
|
||||
DailyRecordingFilename with parsed components
|
||||
|
||||
Raises:
|
||||
ValueError: If filename doesn't match expected format
|
||||
|
||||
Examples:
|
||||
>>> parse_daily_recording_filename("1763152299562-12f0b87c-97d4-4dd3-a65c-cee1f854a79c-cam-audio-1763152314582")
|
||||
DailyRecordingFilename(recording_start_ts=1763152299562, participant_id='12f0b87c-97d4-4dd3-a65c-cee1f854a79c', track_start_ts=1763152314582)
|
||||
"""
|
||||
base = os.path.basename(filename)
|
||||
pattern = r"(\d{13,})-([0-9a-fA-F-]{36})-cam-audio-(\d{13,})"
|
||||
match = re.search(pattern, base)
|
||||
|
||||
if not match:
|
||||
raise ValueError(
|
||||
f"Invalid Daily.co recording filename: {filename}. "
|
||||
f"Expected format: {{recording_start_ts}}-{{participant_id}}-cam-audio-{{track_start_ts}}"
|
||||
)
|
||||
|
||||
recording_start_ts = int(match.group(1))
|
||||
participant_id = match.group(2)
|
||||
track_start_ts = int(match.group(3))
|
||||
|
||||
return DailyRecordingFilename(
|
||||
recording_start_ts=recording_start_ts,
|
||||
participant_id=participant_id,
|
||||
track_start_ts=track_start_ts,
|
||||
)
|
||||
|
||||
|
||||
def recording_lock_key(recording_id: NonEmptyString) -> NonEmptyString:
|
||||
return f"recording:{recording_id}"
|
||||
|
||||
|
||||
def filter_cam_audio_tracks(track_keys: list[str]) -> list[str]:
|
||||
"""Filter track keys to cam-audio tracks only (skip screen-audio, etc.)."""
|
||||
return [k for k in track_keys if "cam-audio" in k]
|
||||
|
||||
|
||||
def extract_base_room_name(daily_room_name: DailyRoomName) -> NonEmptyString:
|
||||
"""
|
||||
Extract base room name from Daily.co timestamped room name.
|
||||
|
||||
Daily.co creates rooms with timestamp suffix: {base_name}-YYYYMMDDHHMMSS
|
||||
This function removes the timestamp to get the original room name.
|
||||
|
||||
Examples:
|
||||
"daily-20251020193458" → "daily"
|
||||
"daily-2-20251020193458" → "daily-2"
|
||||
"my-room-name-20251020193458" → "my-room-name"
|
||||
|
||||
Args:
|
||||
daily_room_name: Full Daily.co room name with optional timestamp
|
||||
|
||||
Returns:
|
||||
Base room name without timestamp suffix
|
||||
"""
|
||||
base_name = daily_room_name.rsplit("-", 1)[0]
|
||||
assert base_name, f"Extracted base name is empty from: {daily_room_name}"
|
||||
return base_name
|
||||
@@ -1,9 +0,0 @@
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
def parse_datetime_with_timezone(iso_string: str) -> datetime:
|
||||
"""Parse ISO datetime string and ensure timezone awareness (defaults to UTC if naive)."""
|
||||
dt = datetime.fromisoformat(iso_string)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
return dt
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user