mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2026-04-03 04:36:45 +00:00
Compare commits
50 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
61d6fbd344 | ||
|
|
7b3b5b9858 | ||
|
|
a22789d548 | ||
|
|
e3cc646cf5 | ||
|
|
778ff6268c | ||
|
|
d164e486cc | ||
|
|
12bf0c2d77 | ||
|
|
bfaf4f403b | ||
|
|
0258754a4c | ||
|
|
ea89fa5261 | ||
|
|
1f98790e7b | ||
|
|
7b8d190c52 | ||
|
|
f19113a3cf | ||
|
|
e2ba502697 | ||
|
|
74b9b97453 | ||
|
|
9e37d60b3f | ||
|
|
55222ecc47 | ||
|
|
41e7b3e84f | ||
|
|
e5712a4168 | ||
|
|
a76f114378 | ||
|
|
cb1beae90d | ||
|
|
1e396ca0ca | ||
|
|
9a2f973a2e | ||
|
|
a9200d35bf | ||
|
|
5646319e96 | ||
|
|
d0472ebf5f | ||
|
|
628a6d735c | ||
|
|
37a1f01850 | ||
|
|
72dca7cacc | ||
|
|
4ae56b730a | ||
|
|
cf6e867cf1 | ||
|
|
183601a121 | ||
|
|
b53c8da398 | ||
|
|
22a50bb94d | ||
|
|
504ca74184 | ||
|
|
a455b8090a | ||
|
|
6b0292d5f0 | ||
|
|
304315daaf | ||
|
|
7845f679c3 | ||
|
|
c155f66982 | ||
|
|
a682846645 | ||
|
|
4235ab4293 | ||
|
|
f5ec2d28cf | ||
|
|
ac46c60a7c | ||
|
|
1d1a520be9 | ||
|
|
9e64d52461 | ||
|
|
0931095f49 | ||
|
|
4d915e2a9f | ||
|
|
045eae8ff2 | ||
|
|
f6cc03286b |
139
.github/workflows/integration_tests.yml
vendored
Normal file
139
.github/workflows/integration_tests.yml
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
name: Integration Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
llm_model:
|
||||
description: "LLM model name (overrides LLM_MODEL secret)"
|
||||
required: false
|
||||
default: ""
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
integration:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Start infrastructure services
|
||||
working-directory: server/tests
|
||||
env:
|
||||
LLM_URL: ${{ secrets.LLM_URL }}
|
||||
LLM_MODEL: ${{ inputs.llm_model || secrets.LLM_MODEL }}
|
||||
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
docker compose -f docker-compose.integration.yml up -d --build postgres redis garage hatchet mock-daily
|
||||
|
||||
- name: Set up Garage bucket and keys
|
||||
working-directory: server/tests
|
||||
run: |
|
||||
GARAGE="docker compose -f docker-compose.integration.yml exec -T garage /garage"
|
||||
GARAGE_KEY_ID="GK0123456789abcdef01234567" # gitleaks:allow
|
||||
GARAGE_KEY_SECRET="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" # gitleaks:allow
|
||||
|
||||
echo "Waiting for Garage to be healthy..."
|
||||
for i in $(seq 1 60); do
|
||||
if $GARAGE stats &>/dev/null; then break; fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Setting up Garage..."
|
||||
NODE_ID=$($GARAGE node id -q 2>&1 | tr -d '[:space:]')
|
||||
LAYOUT_STATUS=$($GARAGE layout show 2>&1 || true)
|
||||
if echo "$LAYOUT_STATUS" | grep -q "No nodes"; then
|
||||
$GARAGE layout assign "$NODE_ID" -c 1G -z dc1
|
||||
$GARAGE layout apply --version 1
|
||||
fi
|
||||
|
||||
$GARAGE bucket info reflector-media &>/dev/null || $GARAGE bucket create reflector-media
|
||||
if ! $GARAGE key info reflector-test &>/dev/null; then
|
||||
$GARAGE key import --yes "$GARAGE_KEY_ID" "$GARAGE_KEY_SECRET"
|
||||
$GARAGE key rename "$GARAGE_KEY_ID" reflector-test
|
||||
fi
|
||||
$GARAGE bucket allow reflector-media --read --write --key reflector-test
|
||||
|
||||
- name: Wait for Hatchet and generate API token
|
||||
working-directory: server/tests
|
||||
run: |
|
||||
echo "Waiting for Hatchet to be healthy..."
|
||||
for i in $(seq 1 90); do
|
||||
if docker compose -f docker-compose.integration.yml exec -T hatchet curl -sf http://localhost:8888/api/live &>/dev/null; then
|
||||
echo "Hatchet is ready."
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Generating Hatchet API token..."
|
||||
HATCHET_OUTPUT=$(docker compose -f docker-compose.integration.yml exec -T hatchet \
|
||||
/hatchet-admin token create --config /config --name integration-test 2>&1)
|
||||
HATCHET_TOKEN=$(echo "$HATCHET_OUTPUT" | grep -o 'eyJ[A-Za-z0-9_.\-]*')
|
||||
if [ -z "$HATCHET_TOKEN" ]; then
|
||||
echo "ERROR: Failed to extract Hatchet JWT token"
|
||||
exit 1
|
||||
fi
|
||||
echo "HATCHET_CLIENT_TOKEN=${HATCHET_TOKEN}" >> $GITHUB_ENV
|
||||
|
||||
- name: Start backend services
|
||||
working-directory: server/tests
|
||||
env:
|
||||
LLM_URL: ${{ secrets.LLM_URL }}
|
||||
LLM_MODEL: ${{ inputs.llm_model || secrets.LLM_MODEL }}
|
||||
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
run: |
|
||||
# Export garage and hatchet credentials for backend services
|
||||
export GARAGE_KEY_ID="${{ env.GARAGE_KEY_ID }}"
|
||||
export GARAGE_KEY_SECRET="${{ env.GARAGE_KEY_SECRET }}"
|
||||
export HATCHET_CLIENT_TOKEN="${{ env.HATCHET_CLIENT_TOKEN }}"
|
||||
|
||||
docker compose -f docker-compose.integration.yml up -d \
|
||||
server worker hatchet-worker-cpu hatchet-worker-llm test-runner
|
||||
|
||||
- name: Wait for server health check
|
||||
working-directory: server/tests
|
||||
run: |
|
||||
echo "Waiting for server to be healthy..."
|
||||
for i in $(seq 1 60); do
|
||||
if docker compose -f docker-compose.integration.yml exec -T test-runner \
|
||||
curl -sf http://server:1250/health &>/dev/null; then
|
||||
echo "Server is ready."
|
||||
break
|
||||
fi
|
||||
sleep 3
|
||||
done
|
||||
|
||||
- name: Run DB migrations
|
||||
working-directory: server/tests
|
||||
run: |
|
||||
docker compose -f docker-compose.integration.yml exec -T server \
|
||||
uv run alembic upgrade head
|
||||
|
||||
- name: Run integration tests
|
||||
working-directory: server/tests
|
||||
run: |
|
||||
docker compose -f docker-compose.integration.yml exec -T test-runner \
|
||||
uv run pytest tests/integration/ -v -x
|
||||
|
||||
- name: Collect logs on failure
|
||||
if: failure()
|
||||
working-directory: server/tests
|
||||
run: |
|
||||
docker compose -f docker-compose.integration.yml logs --tail=500 > integration-logs.txt 2>&1
|
||||
|
||||
- name: Upload logs artifact
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: integration-logs
|
||||
path: server/tests/integration-logs.txt
|
||||
retention-days: 7
|
||||
|
||||
- name: Teardown
|
||||
if: always()
|
||||
working-directory: server/tests
|
||||
run: |
|
||||
docker compose -f docker-compose.integration.yml down -v --remove-orphans
|
||||
36
.github/workflows/selfhost-script.yml
vendored
Normal file
36
.github/workflows/selfhost-script.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# Validates the self-hosted setup script: runs with --cpu and --garage,
|
||||
# brings up services, runs health checks, then tears down.
|
||||
name: Selfhost script (CPU + Garage)
|
||||
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request: {}
|
||||
|
||||
jobs:
|
||||
selfhost-cpu-garage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 25
|
||||
concurrency:
|
||||
group: selfhost-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Run setup-selfhosted.sh (CPU + Garage)
|
||||
run: |
|
||||
./scripts/setup-selfhosted.sh --cpu --garage
|
||||
|
||||
- name: Quick health checks
|
||||
run: |
|
||||
curl -sf http://localhost:1250/health && echo " Server OK"
|
||||
curl -sf http://localhost:3000 > /dev/null && echo " Frontend OK"
|
||||
curl -sf http://localhost:3903/metrics > /dev/null && echo " Garage admin OK"
|
||||
|
||||
- name: Teardown
|
||||
if: always()
|
||||
run: |
|
||||
docker compose -f docker-compose.selfhosted.yml --profile cpu --profile garage down -v --remove-orphans 2>/dev/null || true
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -3,6 +3,7 @@ server/.env
|
||||
server/.env.production
|
||||
.env
|
||||
Caddyfile
|
||||
.env.hatchet
|
||||
server/exportdanswer
|
||||
.vercel
|
||||
.env*.local
|
||||
@@ -20,8 +21,13 @@ CLAUDE.local.md
|
||||
www/.env.development
|
||||
www/.env.production
|
||||
.playwright-mcp
|
||||
docs/pnpm-lock.yaml
|
||||
.secrets
|
||||
opencode.json
|
||||
|
||||
certs/
|
||||
docker-compose.ca.yml
|
||||
docker-compose.gpu-ca.yml
|
||||
Caddyfile.gpu-host
|
||||
.env.gpu-host
|
||||
vibedocs/
|
||||
server/tests/integration/logs/
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
exclude: '(^uv\.lock$|pnpm-lock\.yaml$)'
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
|
||||
86
CHANGELOG.md
86
CHANGELOG.md
@@ -1,5 +1,91 @@
|
||||
# Changelog
|
||||
|
||||
## [0.42.0](https://github.com/GreyhavenHQ/reflector/compare/v0.41.0...v0.42.0) (2026-03-30)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* custom ca for caddy ([#931](https://github.com/GreyhavenHQ/reflector/issues/931)) ([12bf0c2](https://github.com/GreyhavenHQ/reflector/commit/12bf0c2d77f9915b79b1eb1decd77ed2dadbb31d))
|
||||
* mixdown modal services + processor pattern ([#936](https://github.com/GreyhavenHQ/reflector/issues/936)) ([d164e48](https://github.com/GreyhavenHQ/reflector/commit/d164e486cc33ff8babf6cff6c163893cfc56fd76))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* grpc tls for local hatchet ([#937](https://github.com/GreyhavenHQ/reflector/issues/937)) ([a22789d](https://github.com/GreyhavenHQ/reflector/commit/a22789d5486bf8b83e33ab2fb5eb3ee9799c6d47))
|
||||
* remove share public from integration tests ([#938](https://github.com/GreyhavenHQ/reflector/issues/938)) ([7b3b5b9](https://github.com/GreyhavenHQ/reflector/commit/7b3b5b98586449afd0b6996ba9fd7aec8308bbc6))
|
||||
|
||||
## [0.41.0](https://github.com/GreyhavenHQ/reflector/compare/v0.40.0...v0.41.0) (2026-03-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add auto-generated captions, speaker-colored progress bar with sync controls, and speaker tooltip to cloud video player ([#926](https://github.com/GreyhavenHQ/reflector/issues/926)) ([f19113a](https://github.com/GreyhavenHQ/reflector/commit/f19113a3cfa27797a70b9496bfcf1baff9d89f0d))
|
||||
* send email in share transcript and add email sending in room ([#924](https://github.com/GreyhavenHQ/reflector/issues/924)) ([e2ba502](https://github.com/GreyhavenHQ/reflector/commit/e2ba502697ce331c4d87fb019648fcbe4e7cca73))
|
||||
* zulip dag monitor for failed runs ([#928](https://github.com/GreyhavenHQ/reflector/issues/928)) ([1f98790](https://github.com/GreyhavenHQ/reflector/commit/1f98790e7bc58013690ec81aefa051da5e36e93e))
|
||||
|
||||
## [0.40.0](https://github.com/GreyhavenHQ/reflector/compare/v0.39.0...v0.40.0) (2026-03-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* allow participants to ask for email transcript ([#923](https://github.com/GreyhavenHQ/reflector/issues/923)) ([55222ec](https://github.com/GreyhavenHQ/reflector/commit/55222ecc4736f99ad461f03a006c8d97b5876142))
|
||||
* download files, show cloud video, solf deletion with no reprocessing ([#920](https://github.com/GreyhavenHQ/reflector/issues/920)) ([a76f114](https://github.com/GreyhavenHQ/reflector/commit/a76f1143783d3cf137a8847a851b72302e04445b))
|
||||
|
||||
## [0.39.0](https://github.com/GreyhavenHQ/reflector/compare/v0.38.2...v0.39.0) (2026-03-18)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* migrate file and live post-processing pipelines from Celery to Hatchet workflow engine ([#911](https://github.com/GreyhavenHQ/reflector/issues/911)) ([37a1f01](https://github.com/GreyhavenHQ/reflector/commit/37a1f0185057dd43b68df2b12bb08d3b18e28d34))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* integration tests runner in CI ([#919](https://github.com/GreyhavenHQ/reflector/issues/919)) ([1e396ca](https://github.com/GreyhavenHQ/reflector/commit/1e396ca0ca91bc9d2645ddfc63a1576469491faa))
|
||||
* latest vulns ([#915](https://github.com/GreyhavenHQ/reflector/issues/915)) ([a9200d3](https://github.com/GreyhavenHQ/reflector/commit/a9200d35bf856f65f24a4f34931ebe0d75ad0382))
|
||||
|
||||
## [0.38.2](https://github.com/GreyhavenHQ/reflector/compare/v0.38.1...v0.38.2) (2026-03-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add auth guards to prevent anonymous access to write endpoints in non-public mode ([#907](https://github.com/GreyhavenHQ/reflector/issues/907)) ([cf6e867](https://github.com/GreyhavenHQ/reflector/commit/cf6e867cf12c42411e5a7412f6ec44eee8351665))
|
||||
* add tests that check some of the issues are already fixed ([#905](https://github.com/GreyhavenHQ/reflector/issues/905)) ([b53c8da](https://github.com/GreyhavenHQ/reflector/commit/b53c8da3981c394bdab08504b45d25f62c35495a))
|
||||
|
||||
## [0.38.1](https://github.com/GreyhavenHQ/reflector/compare/v0.38.0...v0.38.1) (2026-03-06)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* pin hatchet sdk version ([#903](https://github.com/GreyhavenHQ/reflector/issues/903)) ([504ca74](https://github.com/GreyhavenHQ/reflector/commit/504ca74184211eda9020d0b38ba7bd2b55d09991))
|
||||
|
||||
## [0.38.0](https://github.com/GreyhavenHQ/reflector/compare/v0.37.0...v0.38.0) (2026-03-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* 3-mode selfhosted refactoring (--gpu, --cpu, --hosted) + audio token auth fallback ([#896](https://github.com/GreyhavenHQ/reflector/issues/896)) ([a682846](https://github.com/GreyhavenHQ/reflector/commit/a6828466456407c808302e9eb8dc4b4f0614dd6f))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* improve hatchet workflow reliability ([#900](https://github.com/GreyhavenHQ/reflector/issues/900)) ([c155f66](https://github.com/GreyhavenHQ/reflector/commit/c155f669825e8e2a6e929821a1ef0bd94237dc11))
|
||||
|
||||
## [0.37.0](https://github.com/GreyhavenHQ/reflector/compare/v0.36.0...v0.37.0) (2026-03-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* enable daily co in selfhosted + only schedule tasks when necessary ([#883](https://github.com/GreyhavenHQ/reflector/issues/883)) ([045eae8](https://github.com/GreyhavenHQ/reflector/commit/045eae8ff2014a7b83061045e3c8cb25cce9d60a))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* aws storage construction ([#895](https://github.com/GreyhavenHQ/reflector/issues/895)) ([f5ec2d2](https://github.com/GreyhavenHQ/reflector/commit/f5ec2d28cfa2de9b2b4aeec81966737b740689c2))
|
||||
* remaining dependabot security issues ([#890](https://github.com/GreyhavenHQ/reflector/issues/890)) ([0931095](https://github.com/GreyhavenHQ/reflector/commit/0931095f49e61216e651025ce92be460e6a9df9e))
|
||||
* test selfhosted script ([#892](https://github.com/GreyhavenHQ/reflector/issues/892)) ([4d915e2](https://github.com/GreyhavenHQ/reflector/commit/4d915e2a9fe9f05f31cbd0018d9c2580daf7854f))
|
||||
* upgrade to nextjs 16 ([#888](https://github.com/GreyhavenHQ/reflector/issues/888)) ([f6cc032](https://github.com/GreyhavenHQ/reflector/commit/f6cc03286baf3e3a115afd3b22ae993ad7a4b7e3))
|
||||
|
||||
## [0.35.1](https://github.com/GreyhavenHQ/reflector/compare/v0.35.0...v0.35.1) (2026-02-25)
|
||||
|
||||
|
||||
|
||||
35
CLAUDE.md
35
CLAUDE.md
@@ -6,7 +6,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
Reflector is an AI-powered audio transcription and meeting analysis platform with real-time processing capabilities. The system consists of:
|
||||
|
||||
- **Frontend**: Next.js 14 React application (`www/`) with Chakra UI, real-time WebSocket integration
|
||||
- **Frontend**: Next.js 16 React application (`www/`) with Chakra UI, real-time WebSocket integration
|
||||
- **Backend**: Python FastAPI server (`server/`) with async database operations and background processing
|
||||
- **Processing**: GPU-accelerated ML pipeline for transcription, diarization, summarization via Modal.com
|
||||
- **Infrastructure**: Redis, PostgreSQL/SQLite, Celery workers, WebRTC streaming
|
||||
@@ -41,14 +41,14 @@ uv run celery -A reflector.worker.app beat
|
||||
|
||||
**Testing:**
|
||||
```bash
|
||||
# Run all tests with coverage
|
||||
uv run pytest
|
||||
# Run all tests with coverage (requires Redis on localhost)
|
||||
REDIS_HOST=localhost REDIS_PORT=6379 uv run pytest
|
||||
|
||||
# Run specific test file
|
||||
uv run pytest tests/test_transcripts.py
|
||||
REDIS_HOST=localhost REDIS_PORT=6379 uv run pytest tests/test_transcripts.py
|
||||
|
||||
# Run tests with verbose output
|
||||
uv run pytest -v
|
||||
REDIS_HOST=localhost REDIS_PORT=6379 uv run pytest -v
|
||||
```
|
||||
|
||||
**Process Audio Files:**
|
||||
@@ -160,6 +160,21 @@ All endpoints prefixed `/v1/`:
|
||||
- **Frontend**: No current test suite - opportunities for Jest/React Testing Library
|
||||
- **Coverage**: Backend maintains test coverage reports in `htmlcov/`
|
||||
|
||||
### Integration Tests (DO NOT run unless explicitly asked)
|
||||
|
||||
There are end-to-end integration tests in `server/tests/integration/` that spin up the full stack (PostgreSQL, Redis, Hatchet, Garage, mock-daily, server, workers) via Docker Compose and exercise real processing pipelines. These tests are:
|
||||
|
||||
- `test_file_pipeline.py` — File upload → FilePipeline
|
||||
- `test_live_pipeline.py` — WebRTC stream → LivePostPipeline
|
||||
- `test_multitrack_pipeline.py` — Multitrack → DailyMultitrackPipeline
|
||||
|
||||
**Important:**
|
||||
- These tests are **excluded** from normal `uv run pytest` runs via `--ignore=tests/integration` in pyproject.toml.
|
||||
- Do **NOT** run them as part of verification, code review, or general testing unless the user explicitly asks.
|
||||
- They require Docker, external LLM credentials, and HuggingFace token — they cannot run in a regular test environment.
|
||||
- To run locally: `./scripts/run-integration-tests.sh` (requires env vars: `LLM_URL`, `LLM_API_KEY`, `HF_TOKEN`).
|
||||
- In CI: triggered manually via the "Integration Tests" GitHub Actions workflow (`workflow_dispatch`).
|
||||
|
||||
## GPU Processing
|
||||
|
||||
Modal.com integration for scalable ML processing:
|
||||
@@ -177,3 +192,13 @@ Modal.com integration for scalable ML processing:
|
||||
## Pipeline/worker related info
|
||||
|
||||
If you need to do any worker/pipeline related work, search for "Pipeline" classes and their "create" or "build" methods to find the main processor sequence. Look for task orchestration patterns (like "chord", "group", or "chain") to identify the post-processing flow with parallel execution chains. This will give you abstract vision on how processing pipeling is organized.
|
||||
|
||||
## Documentation
|
||||
|
||||
- New documentation files go in `docsv2/`, not in `docs/docs/`.
|
||||
- Existing `docs/` directory contains legacy Docusaurus docs.
|
||||
|
||||
## Code Style
|
||||
|
||||
- Always put imports at the top of the file. Let ruff/pre-commit handle sorting and formatting of imports.
|
||||
- Exception: In Hatchet pipeline task functions, DB controller imports (e.g., `transcripts_controller`, `meetings_controller`) stay as deferred/inline imports inside `fresh_db_connection()` blocks — this is intentional to avoid sharing DB connections across forked processes. Non-DB imports (utilities, services) should still go at the top of the file.
|
||||
|
||||
106
docker-compose.gpu-host.yml
Normal file
106
docker-compose.gpu-host.yml
Normal file
@@ -0,0 +1,106 @@
|
||||
# Standalone GPU host for Reflector — transcription, diarization, translation.
|
||||
#
|
||||
# Usage: ./scripts/setup-gpu-host.sh [--domain DOMAIN] [--custom-ca PATH] [--api-key KEY] [--cpu]
|
||||
# or: docker compose -f docker-compose.gpu-host.yml --profile gpu [--profile caddy] up -d
|
||||
#
|
||||
# Processing mode (pick ONE — mutually exclusive, both bind port 8000):
|
||||
# --profile gpu NVIDIA GPU container (requires nvidia-container-toolkit)
|
||||
# --profile cpu CPU-only container (no GPU required, slower)
|
||||
#
|
||||
# Optional:
|
||||
# --profile caddy Caddy reverse proxy with HTTPS
|
||||
#
|
||||
# This file is checked into the repo. The setup script generates:
|
||||
# - .env.gpu-host (HF_TOKEN, API key, port config)
|
||||
# - Caddyfile.gpu-host (Caddy config, only with --domain)
|
||||
# - docker-compose.gpu-ca.yml (CA cert mounts, only with --custom-ca)
|
||||
|
||||
services:
|
||||
# ===========================================================
|
||||
# GPU service — NVIDIA GPU accelerated
|
||||
# Activated with: --profile gpu
|
||||
# ===========================================================
|
||||
|
||||
gpu:
|
||||
build:
|
||||
context: ./gpu/self_hosted
|
||||
dockerfile: Dockerfile
|
||||
profiles: [gpu]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${GPU_HOST_PORT:-8000}:8000"
|
||||
environment:
|
||||
HF_TOKEN: ${HF_TOKEN:-}
|
||||
REFLECTOR_GPU_APIKEY: ${REFLECTOR_GPU_APIKEY:-}
|
||||
volumes:
|
||||
- gpu_cache:/root/.cache
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/docs"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 120s
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- transcription
|
||||
|
||||
# ===========================================================
|
||||
# CPU service — no GPU required, uses Dockerfile.cpu
|
||||
# Activated with: --profile cpu
|
||||
# Mutually exclusive with gpu (both bind port 8000)
|
||||
# ===========================================================
|
||||
|
||||
cpu:
|
||||
build:
|
||||
context: ./gpu/self_hosted
|
||||
dockerfile: Dockerfile.cpu
|
||||
profiles: [cpu]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${GPU_HOST_PORT:-8000}:8000"
|
||||
environment:
|
||||
HF_TOKEN: ${HF_TOKEN:-}
|
||||
REFLECTOR_GPU_APIKEY: ${REFLECTOR_GPU_APIKEY:-}
|
||||
volumes:
|
||||
- gpu_cache:/root/.cache
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/docs"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 120s
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- transcription
|
||||
|
||||
# ===========================================================
|
||||
# Caddy — reverse proxy with HTTPS (optional)
|
||||
# Activated with: --profile caddy
|
||||
# Proxies to "transcription" network alias (works for both gpu and cpu)
|
||||
# ===========================================================
|
||||
|
||||
caddy:
|
||||
image: caddy:2-alpine
|
||||
profiles: [caddy]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "${CADDY_HTTPS_PORT:-443}:443"
|
||||
volumes:
|
||||
- ./Caddyfile.gpu-host:/etc/caddy/Caddyfile:ro
|
||||
- caddy_data:/data
|
||||
- caddy_config:/config
|
||||
|
||||
volumes:
|
||||
gpu_cache:
|
||||
caddy_data:
|
||||
caddy_config:
|
||||
@@ -1,16 +1,20 @@
|
||||
# Self-hosted production Docker Compose — single file for everything.
|
||||
#
|
||||
# Usage: ./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
# or: docker compose -f docker-compose.selfhosted.yml --profile gpu [--profile ollama-gpu] [--profile garage] [--profile caddy] up -d
|
||||
# Usage: ./scripts/setup-selfhosted.sh <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--garage] [--caddy]
|
||||
# or: docker compose -f docker-compose.selfhosted.yml [--profile gpu] [--profile ollama-gpu] [--profile garage] [--profile caddy] up -d
|
||||
#
|
||||
# Specialized models (pick ONE — required):
|
||||
# --profile gpu NVIDIA GPU for transcription/diarization/translation
|
||||
# --profile cpu CPU-only for transcription/diarization/translation
|
||||
# ML processing modes (pick ONE — required):
|
||||
# --gpu NVIDIA GPU container for transcription/diarization/translation (profile: gpu)
|
||||
# --cpu In-process CPU processing on server/worker (no ML container needed)
|
||||
# --hosted Remote GPU service URL (no ML container needed)
|
||||
#
|
||||
# Local LLM (optional — for summarization/topics):
|
||||
# --profile ollama-gpu Local Ollama with NVIDIA GPU
|
||||
# --profile ollama-cpu Local Ollama on CPU only
|
||||
#
|
||||
# Daily.co multitrack processing (auto-detected from server/.env):
|
||||
# --profile dailyco Hatchet workflow engine + CPU/LLM workers
|
||||
#
|
||||
# Other optional services:
|
||||
# --profile garage Local S3-compatible storage (Garage)
|
||||
# --profile caddy Reverse proxy with auto-SSL
|
||||
@@ -32,7 +36,7 @@ services:
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:1250:1250"
|
||||
- "50000-50100:50000-50100/udp"
|
||||
- "40000-40100:40000-40100/udp"
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
@@ -42,18 +46,14 @@ services:
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: ""
|
||||
HATCHET_CLIENT_HOST_PORT: ""
|
||||
# Specialized models via gpu/cpu container (aliased as "transcription")
|
||||
TRANSCRIPT_BACKEND: modal
|
||||
TRANSCRIPT_URL: http://transcription:8000
|
||||
TRANSCRIPT_MODAL_API_KEY: selfhosted
|
||||
DIARIZATION_BACKEND: modal
|
||||
DIARIZATION_URL: http://transcription:8000
|
||||
TRANSLATION_BACKEND: modal
|
||||
TRANSLATE_URL: http://transcription:8000
|
||||
# ML backend config comes from env_file (server/.env), set per-mode by setup script
|
||||
# HF_TOKEN needed for in-process pyannote diarization (--cpu mode)
|
||||
HF_TOKEN: ${HF_TOKEN:-}
|
||||
# WebRTC: fixed UDP port range for ICE candidates (mapped above)
|
||||
WEBRTC_PORT_RANGE: "50000-50100"
|
||||
WEBRTC_PORT_RANGE: "40000-40100"
|
||||
# Hatchet workflow engine (always-on for processing pipelines)
|
||||
HATCHET_CLIENT_SERVER_URL: ${HATCHET_CLIENT_SERVER_URL:-http://hatchet:8888}
|
||||
HATCHET_CLIENT_HOST_PORT: ${HATCHET_CLIENT_HOST_PORT:-hatchet:7077}
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
@@ -76,15 +76,11 @@ services:
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: ""
|
||||
HATCHET_CLIENT_HOST_PORT: ""
|
||||
TRANSCRIPT_BACKEND: modal
|
||||
TRANSCRIPT_URL: http://transcription:8000
|
||||
TRANSCRIPT_MODAL_API_KEY: selfhosted
|
||||
DIARIZATION_BACKEND: modal
|
||||
DIARIZATION_URL: http://transcription:8000
|
||||
TRANSLATION_BACKEND: modal
|
||||
TRANSLATE_URL: http://transcription:8000
|
||||
# ML backend config comes from env_file (server/.env), set per-mode by setup script
|
||||
HF_TOKEN: ${HF_TOKEN:-}
|
||||
# Hatchet workflow engine (always-on for processing pipelines)
|
||||
HATCHET_CLIENT_SERVER_URL: ${HATCHET_CLIENT_SERVER_URL:-http://hatchet:8888}
|
||||
HATCHET_CLIENT_HOST_PORT: ${HATCHET_CLIENT_HOST_PORT:-hatchet:7077}
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
@@ -136,6 +132,8 @@ services:
|
||||
redis:
|
||||
image: redis:7.2-alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "6379:6379"
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 30s
|
||||
@@ -147,12 +145,14 @@ services:
|
||||
postgres:
|
||||
image: postgres:17-alpine
|
||||
restart: unless-stopped
|
||||
command: ["postgres", "-c", "max_connections=200"]
|
||||
environment:
|
||||
POSTGRES_USER: reflector
|
||||
POSTGRES_PASSWORD: reflector
|
||||
POSTGRES_DB: reflector
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
- ./server/docker/init-hatchet-db.sql:/docker-entrypoint-initdb.d/init-hatchet-db.sql:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U reflector"]
|
||||
interval: 30s
|
||||
@@ -161,7 +161,10 @@ services:
|
||||
|
||||
# ===========================================================
|
||||
# Specialized model containers (transcription, diarization, translation)
|
||||
# Both gpu and cpu get alias "transcription" so server config never changes.
|
||||
# Only the gpu profile is activated by the setup script (--gpu mode).
|
||||
# The cpu service definition is kept for manual/standalone use but is
|
||||
# NOT activated by --cpu mode (which uses in-process local backends).
|
||||
# Both services get alias "transcription" so server config never changes.
|
||||
# ===========================================================
|
||||
|
||||
gpu:
|
||||
@@ -305,6 +308,104 @@ services:
|
||||
- web
|
||||
- server
|
||||
|
||||
# ===========================================================
|
||||
# Mailpit — local SMTP sink for testing email transcript notifications
|
||||
# Start with: --profile mailpit
|
||||
# Web UI at http://localhost:8025
|
||||
# ===========================================================
|
||||
|
||||
mailpit:
|
||||
image: axllent/mailpit:latest
|
||||
profiles: [mailpit]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:8025:8025" # Web UI
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:8025/api/v1/messages"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
|
||||
# ===========================================================
|
||||
# Hatchet workflow engine + workers
|
||||
# Required for all processing pipelines (file, live, Daily.co multitrack).
|
||||
# Always-on — every selfhosted deployment needs Hatchet.
|
||||
# ===========================================================
|
||||
|
||||
hatchet:
|
||||
image: ghcr.io/hatchet-dev/hatchet/hatchet-lite:latest
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "127.0.0.1:8888:8888"
|
||||
- "127.0.0.1:7078:7077"
|
||||
env_file:
|
||||
- ./.env.hatchet
|
||||
environment:
|
||||
DATABASE_URL: "postgresql://reflector:reflector@postgres:5432/hatchet?sslmode=disable&connect_timeout=30"
|
||||
SERVER_AUTH_COOKIE_INSECURE: "t"
|
||||
SERVER_GRPC_BIND_ADDRESS: "0.0.0.0"
|
||||
SERVER_GRPC_INSECURE: "t"
|
||||
SERVER_GRPC_BROADCAST_ADDRESS: hatchet:7077
|
||||
SERVER_GRPC_PORT: "7077"
|
||||
SERVER_AUTH_SET_EMAIL_VERIFIED: "t"
|
||||
SERVER_INTERNAL_CLIENT_INTERNAL_GRPC_BROADCAST_ADDRESS: hatchet:7077
|
||||
volumes:
|
||||
- hatchet_config:/config
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8888/api/live"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
|
||||
hatchet-worker-cpu:
|
||||
build:
|
||||
context: ./server
|
||||
dockerfile: Dockerfile
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
profiles: [dailyco]
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: hatchet-worker-cpu
|
||||
DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: http://hatchet:8888
|
||||
HATCHET_CLIENT_HOST_PORT: hatchet:7077
|
||||
depends_on:
|
||||
hatchet:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
|
||||
hatchet-worker-llm:
|
||||
build:
|
||||
context: ./server
|
||||
dockerfile: Dockerfile
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: hatchet-worker-llm
|
||||
DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: http://hatchet:8888
|
||||
HATCHET_CLIENT_HOST_PORT: hatchet:7077
|
||||
depends_on:
|
||||
hatchet:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
redis_data:
|
||||
@@ -315,6 +416,7 @@ volumes:
|
||||
ollama_data:
|
||||
caddy_data:
|
||||
caddy_config:
|
||||
hatchet_config:
|
||||
|
||||
networks:
|
||||
default:
|
||||
|
||||
@@ -93,6 +93,7 @@ services:
|
||||
environment:
|
||||
NODE_ENV: development
|
||||
SERVER_API_URL: http://host.docker.internal:1250
|
||||
KV_URL: redis://redis:6379
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
depends_on:
|
||||
|
||||
7
docs/.dockerignore
Normal file
7
docs/.dockerignore
Normal file
@@ -0,0 +1,7 @@
|
||||
node_modules
|
||||
build
|
||||
.git
|
||||
.gitignore
|
||||
*.log
|
||||
.DS_Store
|
||||
.env*
|
||||
@@ -1,14 +1,17 @@
|
||||
FROM node:18-alpine AS builder
|
||||
FROM node:20-alpine AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Install curl for fetching OpenAPI spec
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json ./
|
||||
# Enable pnpm
|
||||
RUN corepack enable && corepack prepare pnpm@latest --activate
|
||||
|
||||
# Copy package files and lockfile
|
||||
COPY package.json pnpm-lock.yaml* ./
|
||||
|
||||
# Install dependencies
|
||||
RUN npm ci
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
# Copy source
|
||||
COPY . .
|
||||
@@ -21,7 +24,7 @@ RUN mkdir -p ./static && curl -sf "${OPENAPI_URL}" -o ./static/openapi.json || e
|
||||
RUN sed -i "s/onBrokenLinks: 'throw'/onBrokenLinks: 'warn'/g" docusaurus.config.ts
|
||||
|
||||
# Build static site (skip prebuild hook by calling docusaurus directly)
|
||||
RUN npx docusaurus build
|
||||
RUN pnpm exec docusaurus build
|
||||
|
||||
# Production image
|
||||
FROM nginx:alpine
|
||||
|
||||
@@ -5,13 +5,13 @@ This website is built using [Docusaurus](https://docusaurus.io/), a modern stati
|
||||
### Installation
|
||||
|
||||
```
|
||||
$ yarn
|
||||
$ pnpm install
|
||||
```
|
||||
|
||||
### Local Development
|
||||
|
||||
```
|
||||
$ yarn start
|
||||
$ pnpm start
|
||||
```
|
||||
|
||||
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
|
||||
@@ -19,7 +19,7 @@ This command starts a local development server and opens up a browser window. Mo
|
||||
### Build
|
||||
|
||||
```
|
||||
$ yarn build
|
||||
$ pnpm build
|
||||
```
|
||||
|
||||
This command generates static content into the `build` directory and can be served using any static contents hosting service.
|
||||
@@ -29,13 +29,13 @@ This command generates static content into the `build` directory and can be serv
|
||||
Using SSH:
|
||||
|
||||
```
|
||||
$ USE_SSH=true yarn deploy
|
||||
$ USE_SSH=true pnpm deploy
|
||||
```
|
||||
|
||||
Not using SSH:
|
||||
|
||||
```
|
||||
$ GIT_USER=<Your GitHub username> yarn deploy
|
||||
$ GIT_USER=<Your GitHub username> pnpm deploy
|
||||
```
|
||||
|
||||
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
|
||||
|
||||
@@ -254,15 +254,15 @@ Reflector can run completely offline:
|
||||
Control where each step happens:
|
||||
|
||||
```yaml
|
||||
# All local processing
|
||||
TRANSCRIPT_BACKEND=local
|
||||
DIARIZATION_BACKEND=local
|
||||
TRANSLATION_BACKEND=local
|
||||
# All in-process processing
|
||||
TRANSCRIPT_BACKEND=whisper
|
||||
DIARIZATION_BACKEND=pyannote
|
||||
TRANSLATION_BACKEND=marian
|
||||
|
||||
# Hybrid approach
|
||||
TRANSCRIPT_BACKEND=modal # Fast GPU processing
|
||||
DIARIZATION_BACKEND=local # Sensitive speaker data
|
||||
TRANSLATION_BACKEND=modal # Non-sensitive translation
|
||||
TRANSCRIPT_BACKEND=modal # Fast GPU processing
|
||||
DIARIZATION_BACKEND=pyannote # Sensitive speaker data
|
||||
TRANSLATION_BACKEND=modal # Non-sensitive translation
|
||||
```
|
||||
|
||||
### Storage Options
|
||||
|
||||
@@ -11,7 +11,7 @@ Reflector is built as a modern, scalable, microservices-based application design
|
||||
|
||||
### Frontend Application
|
||||
|
||||
The user interface is built with **Next.js 15** using the App Router pattern, providing:
|
||||
The user interface is built with **Next.js 16** using the App Router pattern, providing:
|
||||
|
||||
- Server-side rendering for optimal performance
|
||||
- Real-time WebSocket connections for live transcription
|
||||
|
||||
@@ -36,14 +36,15 @@ This creates `docs/static/openapi.json` (should be ~70KB) which will be copied d
|
||||
The Dockerfile is already in `docs/Dockerfile`:
|
||||
|
||||
```dockerfile
|
||||
FROM node:18-alpine AS builder
|
||||
FROM node:20-alpine AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json ./
|
||||
# Enable pnpm and copy package files + lockfile
|
||||
RUN corepack enable && corepack prepare pnpm@latest --activate
|
||||
COPY package.json pnpm-lock.yaml* ./
|
||||
|
||||
# Inshall dependencies
|
||||
RUN npm ci
|
||||
# Install dependencies
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
# Copy source (includes static/openapi.json if pre-fetched)
|
||||
COPY . .
|
||||
@@ -52,7 +53,7 @@ COPY . .
|
||||
RUN sed -i "s/onBrokenLinks: 'throw'/onBrokenLinks: 'warn'/g" docusaurus.config.ts
|
||||
|
||||
# Build static site
|
||||
RUN npx docusaurus build
|
||||
RUN pnpm exec docusaurus build
|
||||
|
||||
FROM nginx:alpine
|
||||
COPY --from=builder /app/build /usr/share/nginx/html
|
||||
|
||||
@@ -46,7 +46,7 @@ Reflector consists of three main components:
|
||||
|
||||
Ready to deploy Reflector? Head over to our [Installation Guide](./installation/overview) to set up your own instance.
|
||||
|
||||
For a quick overview of how Reflector processes audio, check out our [Pipeline Documentation](./pipelines/overview).
|
||||
For a quick overview of how Reflector processes audio, check out our [Pipeline Documentation](./concepts/pipeline).
|
||||
|
||||
## Open Source
|
||||
|
||||
|
||||
@@ -124,11 +124,11 @@ const config: Config = {
|
||||
items: [
|
||||
{
|
||||
label: 'Architecture',
|
||||
to: '/docs/reference/architecture/overview',
|
||||
to: '/docs/concepts/overview',
|
||||
},
|
||||
{
|
||||
label: 'Pipelines',
|
||||
to: '/docs/pipelines/overview',
|
||||
to: '/docs/concepts/pipeline',
|
||||
},
|
||||
{
|
||||
label: 'Roadmap',
|
||||
|
||||
23526
docs/package-lock.json
generated
23526
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -14,26 +14,26 @@
|
||||
"write-heading-ids": "docusaurus write-heading-ids",
|
||||
"typecheck": "tsc",
|
||||
"fetch-openapi": "./scripts/fetch-openapi.sh",
|
||||
"gen-api-docs": "npm run fetch-openapi && docusaurus gen-api-docs reflector",
|
||||
"prebuild": "npm run fetch-openapi"
|
||||
"gen-api-docs": "pnpm run fetch-openapi && docusaurus gen-api-docs reflector",
|
||||
"prebuild": "pnpm run fetch-openapi"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "3.6.3",
|
||||
"@docusaurus/preset-classic": "3.6.3",
|
||||
"@mdx-js/react": "^3.0.0",
|
||||
"clsx": "^2.0.0",
|
||||
"docusaurus-plugin-openapi-docs": "^4.5.1",
|
||||
"docusaurus-theme-openapi-docs": "^4.5.1",
|
||||
"@docusaurus/theme-mermaid": "3.6.3",
|
||||
"prism-react-renderer": "^2.3.0",
|
||||
"react": "^18.0.0",
|
||||
"react-dom": "^18.0.0"
|
||||
"@docusaurus/core": "3.9.2",
|
||||
"@docusaurus/preset-classic": "3.9.2",
|
||||
"@docusaurus/theme-mermaid": "3.9.2",
|
||||
"@mdx-js/react": "^3.1.1",
|
||||
"clsx": "^2.1.1",
|
||||
"docusaurus-plugin-openapi-docs": "^4.7.1",
|
||||
"docusaurus-theme-openapi-docs": "^4.7.1",
|
||||
"prism-react-renderer": "^2.4.1",
|
||||
"react": "^19.2.4",
|
||||
"react-dom": "^19.2.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@docusaurus/module-type-aliases": "3.6.3",
|
||||
"@docusaurus/tsconfig": "3.6.3",
|
||||
"@docusaurus/types": "3.6.3",
|
||||
"typescript": "~5.6.2"
|
||||
"@docusaurus/module-type-aliases": "3.9.2",
|
||||
"@docusaurus/tsconfig": "3.9.2",
|
||||
"@docusaurus/types": "3.9.2",
|
||||
"typescript": "~5.9.3"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
@@ -49,5 +49,16 @@
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0"
|
||||
},
|
||||
"pnpm": {
|
||||
"overrides": {
|
||||
"minimatch@<3.1.4": "3.1.5",
|
||||
"minimatch@>=5.0.0 <5.1.8": "5.1.8",
|
||||
"minimatch@>=9.0.0 <9.0.7": "9.0.7",
|
||||
"lodash@<4.17.23": "4.17.23",
|
||||
"js-yaml@<4.1.1": "4.1.1",
|
||||
"gray-matter": "github:jonschlinkert/gray-matter#234163e",
|
||||
"serialize-javascript": "7.0.4"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
13976
docs/pnpm-lock.yaml
generated
Normal file
13976
docs/pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
4151
docs/static/openapi.json
vendored
4151
docs/static/openapi.json
vendored
File diff suppressed because it is too large
Load Diff
338
docsv2/custom-ca-setup.md
Normal file
338
docsv2/custom-ca-setup.md
Normal file
@@ -0,0 +1,338 @@
|
||||
# Custom CA Certificate Setup
|
||||
|
||||
Use a private Certificate Authority (CA) with Reflector self-hosted deployments. This covers two scenarios:
|
||||
|
||||
1. **Custom local domain** — Serve Reflector over HTTPS on an internal domain (e.g., `reflector.local`) using certs signed by your own CA
|
||||
2. **Backend CA trust** — Let Reflector's backend services (server, workers, GPU) make HTTPS calls to GPU, LLM, or other internal services behind your private CA
|
||||
|
||||
Both can be used independently or together.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Generate test certificates
|
||||
|
||||
```bash
|
||||
./scripts/generate-certs.sh reflector.local
|
||||
```
|
||||
|
||||
This creates `certs/` with:
|
||||
- `ca.key` + `ca.crt` — Root CA (10-year validity)
|
||||
- `server-key.pem` + `server.pem` — Server certificate (1-year, SAN: domain + localhost + 127.0.0.1)
|
||||
|
||||
### Deploy with custom CA + domain
|
||||
|
||||
```bash
|
||||
# Add domain to /etc/hosts on the server (use 127.0.0.1 for local, or server LAN IP for network access)
|
||||
echo "127.0.0.1 reflector.local" | sudo tee -a /etc/hosts
|
||||
|
||||
# Run setup — pass the certs directory
|
||||
./scripts/setup-selfhosted.sh --gpu --caddy --domain reflector.local --custom-ca certs/
|
||||
|
||||
# Trust the CA on your machine (see "Trust the CA" section below)
|
||||
```
|
||||
|
||||
### Deploy with CA trust only (GPU/LLM behind private CA)
|
||||
|
||||
```bash
|
||||
# Only need the CA cert file — no Caddy TLS certs needed
|
||||
./scripts/setup-selfhosted.sh --hosted --custom-ca /path/to/corporate-ca.crt
|
||||
```
|
||||
|
||||
## How `--custom-ca` Works
|
||||
|
||||
The flag accepts a **directory** or a **single file**:
|
||||
|
||||
### Directory mode
|
||||
|
||||
```bash
|
||||
--custom-ca certs/
|
||||
```
|
||||
|
||||
Looks for these files by convention:
|
||||
- `ca.crt` (required) — CA certificate to trust
|
||||
- `server.pem` + `server-key.pem` (optional) — TLS certificate/key for Caddy
|
||||
|
||||
If `server.pem` + `server-key.pem` are found AND `--domain` is provided:
|
||||
- Caddy serves HTTPS using those certs
|
||||
- Backend containers trust the CA for outbound calls
|
||||
|
||||
If only `ca.crt` is found:
|
||||
- Backend containers trust the CA for outbound calls
|
||||
- Caddy is unaffected (uses Let's Encrypt, self-signed, or no Caddy)
|
||||
|
||||
### Single file mode
|
||||
|
||||
```bash
|
||||
--custom-ca /path/to/corporate-ca.crt
|
||||
```
|
||||
|
||||
Only injects CA trust into backend containers. No Caddy TLS changes.
|
||||
|
||||
## Scenarios
|
||||
|
||||
### Scenario 1: Custom local domain
|
||||
|
||||
Your Reflector instance runs on an internal network. You want `https://reflector.local` with proper TLS (no browser warnings).
|
||||
|
||||
```bash
|
||||
# 1. Generate certs
|
||||
./scripts/generate-certs.sh reflector.local
|
||||
|
||||
# 2. Add to /etc/hosts on the server
|
||||
echo "127.0.0.1 reflector.local" | sudo tee -a /etc/hosts
|
||||
|
||||
# 3. Deploy
|
||||
./scripts/setup-selfhosted.sh --gpu --garage --caddy --domain reflector.local --custom-ca certs/
|
||||
|
||||
# 4. Trust the CA on your machine (see "Trust the CA" section below)
|
||||
```
|
||||
|
||||
If other machines on the network need to access it, add the server's LAN IP to `/etc/hosts` on those machines instead:
|
||||
```bash
|
||||
echo "192.168.1.100 reflector.local" | sudo tee -a /etc/hosts
|
||||
```
|
||||
|
||||
And include that IP as an extra SAN when generating certs:
|
||||
```bash
|
||||
./scripts/generate-certs.sh reflector.local "IP:192.168.1.100"
|
||||
```
|
||||
|
||||
### Scenario 2: GPU/LLM behind corporate CA
|
||||
|
||||
Your GPU or LLM server (e.g., `https://gpu.internal.corp`) uses certificates signed by your corporate CA. Reflector's backend needs to trust that CA for outbound HTTPS calls.
|
||||
|
||||
```bash
|
||||
# Get the CA certificate from your IT team (PEM format)
|
||||
# Then deploy — Caddy can still use Let's Encrypt or self-signed
|
||||
./scripts/setup-selfhosted.sh --hosted --garage --caddy --custom-ca /path/to/corporate-ca.crt
|
||||
```
|
||||
|
||||
This works because:
|
||||
- **TLS cert/key** = "this is my identity" — for Caddy to serve HTTPS to browsers
|
||||
- **CA cert** = "I trust this authority" — for backend containers to verify outbound connections
|
||||
|
||||
Your Reflector frontend can use Let's Encrypt (public domain) or self-signed certs, while the backend trusts a completely different CA for GPU/LLM calls.
|
||||
|
||||
### Scenario 3: Both combined (same CA)
|
||||
|
||||
Custom domain + GPU/LLM all behind the same CA:
|
||||
|
||||
```bash
|
||||
./scripts/generate-certs.sh reflector.local "DNS:gpu.local"
|
||||
./scripts/setup-selfhosted.sh --gpu --garage --caddy --domain reflector.local --custom-ca certs/
|
||||
```
|
||||
|
||||
### Scenario 4: Multiple CAs (local domain + remote GPU on different CA)
|
||||
|
||||
Your Reflector uses one CA for `reflector.local`, but the GPU host uses a different CA:
|
||||
|
||||
```bash
|
||||
# Your local domain setup
|
||||
./scripts/generate-certs.sh reflector.local
|
||||
|
||||
# Deploy with your CA + trust the GPU host's CA too
|
||||
./scripts/setup-selfhosted.sh --hosted --garage --caddy \
|
||||
--domain reflector.local \
|
||||
--custom-ca certs/ \
|
||||
--extra-ca /path/to/gpu-machine-ca.crt
|
||||
```
|
||||
|
||||
`--extra-ca` appends additional CA certs to the trust bundle. Backend containers trust ALL CAs — your local domain AND the GPU host's certs both work.
|
||||
|
||||
You can repeat `--extra-ca` for multiple remote services:
|
||||
```bash
|
||||
--extra-ca /path/to/gpu-ca.crt --extra-ca /path/to/llm-ca.crt
|
||||
```
|
||||
|
||||
For setting up a dedicated GPU host, see [Standalone GPU Host Setup](gpu-host-setup.md).
|
||||
|
||||
## Trust the CA on Client Machines
|
||||
|
||||
After deploying, clients need to trust the CA to avoid browser warnings.
|
||||
|
||||
### macOS
|
||||
|
||||
```bash
|
||||
sudo security add-trusted-cert -d -r trustRoot \
|
||||
-k /Library/Keychains/System.keychain certs/ca.crt
|
||||
```
|
||||
|
||||
### Linux (Ubuntu/Debian)
|
||||
|
||||
```bash
|
||||
sudo cp certs/ca.crt /usr/local/share/ca-certificates/reflector-ca.crt
|
||||
sudo update-ca-certificates
|
||||
```
|
||||
|
||||
### Linux (RHEL/Fedora)
|
||||
|
||||
```bash
|
||||
sudo cp certs/ca.crt /etc/pki/ca-trust/source/anchors/reflector-ca.crt
|
||||
sudo update-ca-trust
|
||||
```
|
||||
|
||||
### Windows (PowerShell as admin)
|
||||
|
||||
```powershell
|
||||
Import-Certificate -FilePath .\certs\ca.crt -CertStoreLocation Cert:\LocalMachine\Root
|
||||
```
|
||||
|
||||
### Firefox (all platforms)
|
||||
|
||||
Firefox uses its own certificate store:
|
||||
1. Settings > Privacy & Security > View Certificates
|
||||
2. Authorities tab > Import
|
||||
3. Select `ca.crt` and check "Trust this CA to identify websites"
|
||||
|
||||
## How It Works Internally
|
||||
|
||||
### Docker entrypoint CA injection
|
||||
|
||||
Each backend container (server, worker, beat, hatchet workers, GPU) has an entrypoint script (`docker-entrypoint.sh`) that:
|
||||
|
||||
1. Checks if a CA cert is mounted at `/usr/local/share/ca-certificates/custom-ca.crt`
|
||||
2. If present, runs `update-ca-certificates` to create a **combined bundle** (system CAs + custom CA)
|
||||
3. Sets environment variables so all Python/gRPC libraries use the combined bundle:
|
||||
|
||||
| Env var | Covers |
|
||||
|---------|--------|
|
||||
| `SSL_CERT_FILE` | httpx, OpenAI SDK, llama-index, Python ssl module |
|
||||
| `REQUESTS_CA_BUNDLE` | requests library (transitive dependencies) |
|
||||
| `CURL_CA_BUNDLE` | curl CLI (container healthchecks) |
|
||||
|
||||
Note: `GRPC_DEFAULT_SSL_ROOTS_FILE_PATH` is intentionally NOT set. Setting it causes grpcio to attempt TLS on internal Hatchet gRPC connections that run without TLS, resulting in handshake failures. The internal Hatchet connection uses `HATCHET_CLIENT_TLS_STRATEGY=none` (plaintext).
|
||||
|
||||
When no CA cert is mounted, the entrypoint is a no-op — containers behave exactly as before.
|
||||
|
||||
### Why this replaces manual certifi patching
|
||||
|
||||
Previously, the workaround for trusting a private CA in Python was to patch certifi's bundle directly:
|
||||
|
||||
```bash
|
||||
# OLD approach — fragile, do NOT use
|
||||
cat custom-ca.crt >> $(python -c "import certifi; print(certifi.where())")
|
||||
```
|
||||
|
||||
This breaks whenever certifi is updated (any `pip install`/`uv sync` overwrites the bundle and the CA is lost).
|
||||
|
||||
Our entrypoint approach is permanent because:
|
||||
|
||||
1. `SSL_CERT_FILE` is checked by Python's `ssl.create_default_context()` **before** falling back to `certifi.where()`. When set, certifi's bundle is never read.
|
||||
2. `REQUESTS_CA_BUNDLE` similarly overrides certifi for the `requests` library.
|
||||
3. The CA is injected at container startup (runtime), not baked into the Python environment. It survives image rebuilds, dependency updates, and `uv sync`.
|
||||
|
||||
```
|
||||
Python SSL lookup chain:
|
||||
ssl.create_default_context()
|
||||
→ SSL_CERT_FILE env var? → YES → use combined bundle (system + custom CA) ✓
|
||||
→ (certifi.where() is never reached)
|
||||
```
|
||||
|
||||
This covers all outbound HTTPS calls: httpx (transcription, diarization, translation, webhooks), OpenAI SDK (transcription), llama-index (LLM/summarization), and requests (transitive dependencies).
|
||||
|
||||
### Compose override
|
||||
|
||||
The setup script generates `docker-compose.ca.yml` which mounts the CA cert into every backend container as a read-only bind mount. This file is:
|
||||
- Only generated when `--custom-ca` is passed
|
||||
- Deleted on re-runs without `--custom-ca` (prevents stale overrides)
|
||||
- Added to `.gitignore`
|
||||
|
||||
### Node.js (frontend)
|
||||
|
||||
The web container uses `NODE_EXTRA_CA_CERTS` which **adds** to Node's trust store (unlike Python's `SSL_CERT_FILE` which replaces it). This is set via the compose override.
|
||||
|
||||
## Generate Your Own CA (Manual)
|
||||
|
||||
If you prefer not to use `generate-certs.sh`:
|
||||
|
||||
```bash
|
||||
# 1. Create CA
|
||||
openssl genrsa -out ca.key 4096
|
||||
openssl req -x509 -new -nodes -key ca.key -sha256 -days 3650 \
|
||||
-out ca.crt -subj "/CN=My CA/O=My Organization"
|
||||
|
||||
# 2. Create server key
|
||||
openssl genrsa -out server-key.pem 2048
|
||||
|
||||
# 3. Create CSR with SANs
|
||||
openssl req -new -key server-key.pem -out server.csr \
|
||||
-subj "/CN=reflector.local" \
|
||||
-addext "subjectAltName=DNS:reflector.local,DNS:localhost,IP:127.0.0.1"
|
||||
|
||||
# 4. Sign with CA
|
||||
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \
|
||||
-CAcreateserial -out server.pem -days 365 -sha256 \
|
||||
-copy_extensions copyall
|
||||
|
||||
# 5. Clean up
|
||||
rm server.csr ca.srl
|
||||
```
|
||||
|
||||
## Using Existing Corporate Certificates
|
||||
|
||||
If your organization already has a CA:
|
||||
|
||||
1. Get the CA certificate in PEM format from your IT team
|
||||
2. If you have a PKCS#12 (.p12/.pfx) bundle, extract the CA cert:
|
||||
```bash
|
||||
openssl pkcs12 -in bundle.p12 -cacerts -nokeys -out ca.crt
|
||||
```
|
||||
3. If you have multiple intermediate CAs, concatenate them into one PEM file:
|
||||
```bash
|
||||
cat intermediate-ca.crt root-ca.crt > ca.crt
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Browser: "Your connection is not private"
|
||||
|
||||
The CA is not trusted on the client machine. See "Trust the CA" section above.
|
||||
|
||||
Check certificate expiry:
|
||||
```bash
|
||||
openssl x509 -noout -dates -in certs/server.pem
|
||||
```
|
||||
|
||||
### Backend: `SSL: CERTIFICATE_VERIFY_FAILED`
|
||||
|
||||
CA cert not mounted or not loaded. Check inside the container:
|
||||
```bash
|
||||
docker compose exec server env | grep SSL_CERT_FILE
|
||||
docker compose exec server python -c "
|
||||
import ssl, os
|
||||
print('SSL_CERT_FILE:', os.environ.get('SSL_CERT_FILE', 'not set'))
|
||||
ctx = ssl.create_default_context()
|
||||
print('CA certs loaded:', ctx.cert_store_stats())
|
||||
"
|
||||
```
|
||||
|
||||
### Caddy: "certificate is not valid for any names"
|
||||
|
||||
Domain in Caddyfile doesn't match the certificate's SAN/CN. Check:
|
||||
```bash
|
||||
openssl x509 -noout -text -in certs/server.pem | grep -A1 "Subject Alternative Name"
|
||||
```
|
||||
|
||||
### Certificate chain issues
|
||||
|
||||
If you have intermediate CAs, concatenate them into `server.pem`:
|
||||
```bash
|
||||
cat server-cert.pem intermediate-ca.pem > certs/server.pem
|
||||
```
|
||||
|
||||
Verify the chain:
|
||||
```bash
|
||||
openssl verify -CAfile certs/ca.crt certs/server.pem
|
||||
```
|
||||
|
||||
### Certificate renewal
|
||||
|
||||
Custom CA certs are NOT auto-renewed (unlike Let's Encrypt). Replace cert files and restart:
|
||||
```bash
|
||||
# Replace certs
|
||||
cp new-server.pem certs/server.pem
|
||||
cp new-server-key.pem certs/server-key.pem
|
||||
|
||||
# Restart Caddy to pick up new certs
|
||||
docker compose restart caddy
|
||||
```
|
||||
294
docsv2/gpu-host-setup.md
Normal file
294
docsv2/gpu-host-setup.md
Normal file
@@ -0,0 +1,294 @@
|
||||
# Standalone GPU Host Setup
|
||||
|
||||
Deploy Reflector's GPU transcription/diarization/translation service on a dedicated machine, separate from the main Reflector instance. Useful when:
|
||||
|
||||
- Your GPU machine is on a different network than the Reflector server
|
||||
- You want to share one GPU service across multiple Reflector instances
|
||||
- The GPU machine has special hardware/drivers that can't run the full stack
|
||||
- You need to scale GPU processing independently
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────┐ HTTPS ┌────────────────────┐
|
||||
│ Reflector Server │ ────────────────────── │ GPU Host │
|
||||
│ (server, worker, │ TRANSCRIPT_URL │ (transcription, │
|
||||
│ web, postgres, │ DIARIZATION_URL │ diarization, │
|
||||
│ redis, hatchet) │ TRANSLATE_URL │ translation) │
|
||||
│ │ │ │
|
||||
│ setup-selfhosted.sh │ │ setup-gpu-host.sh │
|
||||
│ --hosted │ │ │
|
||||
└─────────────────────┘ └────────────────────┘
|
||||
```
|
||||
|
||||
The GPU service is a standalone FastAPI app that exposes transcription, diarization, translation, and audio padding endpoints. It has **no dependencies** on PostgreSQL, Redis, Hatchet, or any other Reflector service.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### On the GPU machine
|
||||
|
||||
```bash
|
||||
git clone <reflector-repo>
|
||||
cd reflector
|
||||
|
||||
# Set HuggingFace token (required for diarization models)
|
||||
export HF_TOKEN=your-huggingface-token
|
||||
|
||||
# Deploy with HTTPS (Let's Encrypt)
|
||||
./scripts/setup-gpu-host.sh --domain gpu.example.com --api-key my-secret-key
|
||||
|
||||
# Or deploy with custom CA
|
||||
./scripts/generate-certs.sh gpu.local
|
||||
./scripts/setup-gpu-host.sh --domain gpu.local --custom-ca certs/ --api-key my-secret-key
|
||||
```
|
||||
|
||||
### On the Reflector machine
|
||||
|
||||
```bash
|
||||
# If the GPU host uses a custom CA, trust it
|
||||
./scripts/setup-selfhosted.sh --hosted --garage --caddy \
|
||||
--extra-ca /path/to/gpu-machine-ca.crt
|
||||
|
||||
# Or if you already have --custom-ca for your local domain
|
||||
./scripts/setup-selfhosted.sh --hosted --garage --caddy \
|
||||
--domain reflector.local --custom-ca certs/ \
|
||||
--extra-ca /path/to/gpu-machine-ca.crt
|
||||
```
|
||||
|
||||
Then configure `server/.env` to point to the GPU host:
|
||||
|
||||
```bash
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=https://gpu.example.com
|
||||
TRANSCRIPT_MODAL_API_KEY=my-secret-key
|
||||
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=https://gpu.example.com
|
||||
DIARIZATION_MODAL_API_KEY=my-secret-key
|
||||
|
||||
TRANSLATION_BACKEND=modal
|
||||
TRANSLATE_URL=https://gpu.example.com
|
||||
TRANSLATION_MODAL_API_KEY=my-secret-key
|
||||
```
|
||||
|
||||
## Script Options
|
||||
|
||||
```
|
||||
./scripts/setup-gpu-host.sh [OPTIONS]
|
||||
|
||||
Options:
|
||||
--domain DOMAIN Domain name for HTTPS (Let's Encrypt or custom cert)
|
||||
--custom-ca PATH Custom CA (directory or single PEM file)
|
||||
--extra-ca FILE Additional CA cert to trust (repeatable)
|
||||
--api-key KEY API key to protect the service (strongly recommended)
|
||||
--cpu CPU-only mode (no NVIDIA GPU required)
|
||||
--port PORT Host port (default: 443 with Caddy, 8000 without)
|
||||
```
|
||||
|
||||
## Deployment Scenarios
|
||||
|
||||
### Public internet with Let's Encrypt
|
||||
|
||||
GPU machine has a public IP and domain:
|
||||
|
||||
```bash
|
||||
./scripts/setup-gpu-host.sh --domain gpu.example.com --api-key my-secret-key
|
||||
```
|
||||
|
||||
Requirements:
|
||||
- DNS A record: `gpu.example.com` → GPU machine's public IP
|
||||
- Ports 80 and 443 open
|
||||
- Caddy auto-provisions Let's Encrypt certificate
|
||||
|
||||
### Internal network with custom CA
|
||||
|
||||
GPU machine on a private network:
|
||||
|
||||
```bash
|
||||
# Generate certs on the GPU machine
|
||||
./scripts/generate-certs.sh gpu.internal "IP:192.168.1.200"
|
||||
|
||||
# Deploy
|
||||
./scripts/setup-gpu-host.sh --domain gpu.internal --custom-ca certs/ --api-key my-secret-key
|
||||
```
|
||||
|
||||
On each machine that connects (including the Reflector server), add DNS:
|
||||
```bash
|
||||
echo "192.168.1.200 gpu.internal" | sudo tee -a /etc/hosts
|
||||
```
|
||||
|
||||
### IP-only (no domain)
|
||||
|
||||
No domain needed — just use the machine's IP:
|
||||
|
||||
```bash
|
||||
./scripts/setup-gpu-host.sh --api-key my-secret-key
|
||||
```
|
||||
|
||||
Caddy is not used; the GPU service runs directly on port 8000 (HTTP). For HTTPS without a domain, the Reflector machine connects via `http://<GPU_IP>:8000`.
|
||||
|
||||
### CPU-only (no NVIDIA GPU)
|
||||
|
||||
Works on any machine — transcription will be slower:
|
||||
|
||||
```bash
|
||||
./scripts/setup-gpu-host.sh --cpu --domain gpu.example.com --api-key my-secret-key
|
||||
```
|
||||
|
||||
## DNS Resolution
|
||||
|
||||
The Reflector server must be able to reach the GPU host by name or IP.
|
||||
|
||||
| Setup | DNS Method | TRANSCRIPT_URL example |
|
||||
|-------|------------|----------------------|
|
||||
| Public domain | DNS A record | `https://gpu.example.com` |
|
||||
| Internal domain | `/etc/hosts` on both machines | `https://gpu.internal` |
|
||||
| IP only | No DNS needed | `http://192.168.1.200:8000` |
|
||||
|
||||
For internal domains, add the GPU machine's IP to `/etc/hosts` on the Reflector machine:
|
||||
```bash
|
||||
echo "192.168.1.200 gpu.internal" | sudo tee -a /etc/hosts
|
||||
```
|
||||
|
||||
If the Reflector server runs in Docker, the containers resolve DNS from the host (Docker's default DNS behavior). So adding to the host's `/etc/hosts` is sufficient.
|
||||
|
||||
## Multi-CA Setup
|
||||
|
||||
When your Reflector instance has its own CA (for `reflector.local`) and the GPU host has a different CA:
|
||||
|
||||
**On the GPU machine:**
|
||||
```bash
|
||||
./scripts/generate-certs.sh gpu.local
|
||||
./scripts/setup-gpu-host.sh --domain gpu.local --custom-ca certs/ --api-key my-key
|
||||
```
|
||||
|
||||
**On the Reflector machine:**
|
||||
```bash
|
||||
# Your local CA for reflector.local + the GPU host's CA
|
||||
./scripts/setup-selfhosted.sh --hosted --garage --caddy \
|
||||
--domain reflector.local \
|
||||
--custom-ca certs/ \
|
||||
--extra-ca /path/to/gpu-machine-ca.crt
|
||||
```
|
||||
|
||||
The `--extra-ca` flag appends the GPU host's CA to the trust bundle. Backend containers trust both CAs — your local domain works AND outbound calls to the GPU host succeed.
|
||||
|
||||
You can repeat `--extra-ca` for multiple remote services:
|
||||
```bash
|
||||
--extra-ca /path/to/gpu-ca.crt --extra-ca /path/to/llm-ca.crt
|
||||
```
|
||||
|
||||
## API Key Authentication
|
||||
|
||||
The GPU service uses Bearer token authentication via `REFLECTOR_GPU_APIKEY`:
|
||||
|
||||
```bash
|
||||
# Test from the Reflector machine
|
||||
curl -s https://gpu.example.com/docs # No auth needed for docs
|
||||
curl -s -X POST https://gpu.example.com/v1/audio/transcriptions \
|
||||
-H "Authorization: Bearer <my-secret-key>" \ #gitleaks:allow
|
||||
-F "file=@audio.wav"
|
||||
```
|
||||
|
||||
If `REFLECTOR_GPU_APIKEY` is not set, the service accepts all requests (open access). Always use `--api-key` for internet-facing deployments.
|
||||
|
||||
The same key goes in Reflector's `server/.env` as `TRANSCRIPT_MODAL_API_KEY` and `DIARIZATION_MODAL_API_KEY`.
|
||||
|
||||
## Files
|
||||
|
||||
| File | Checked in? | Purpose |
|
||||
|------|-------------|---------|
|
||||
| `docker-compose.gpu-host.yml` | Yes | Static compose file with profiles (`gpu`, `cpu`, `caddy`) |
|
||||
| `.env.gpu-host` | No (generated) | Environment variables (HF_TOKEN, API key, ports) |
|
||||
| `Caddyfile.gpu-host` | No (generated) | Caddy config (only when using HTTPS) |
|
||||
| `docker-compose.gpu-ca.yml` | No (generated) | CA cert mounts override (only with --custom-ca) |
|
||||
| `certs/` | No (generated) | Staged certificates (when using --custom-ca) |
|
||||
|
||||
The compose file is checked into the repo — you can read it to understand exactly what runs. The script only generates env vars, Caddyfile, and CA overrides. Profiles control which service starts:
|
||||
|
||||
```bash
|
||||
# What the script does under the hood:
|
||||
docker compose -f docker-compose.gpu-host.yml --profile gpu --profile caddy \
|
||||
--env-file .env.gpu-host up -d
|
||||
|
||||
# CPU mode:
|
||||
docker compose -f docker-compose.gpu-host.yml --profile cpu --profile caddy \
|
||||
--env-file .env.gpu-host up -d
|
||||
```
|
||||
|
||||
Both `gpu` and `cpu` services get the network alias `transcription`, so Caddy's config works with either.
|
||||
|
||||
## Management
|
||||
|
||||
```bash
|
||||
# View logs
|
||||
docker compose -f docker-compose.gpu-host.yml --profile gpu logs -f gpu
|
||||
|
||||
# Restart
|
||||
docker compose -f docker-compose.gpu-host.yml --profile gpu restart gpu
|
||||
|
||||
# Stop
|
||||
docker compose -f docker-compose.gpu-host.yml --profile gpu --profile caddy down
|
||||
|
||||
# Re-run setup
|
||||
./scripts/setup-gpu-host.sh [same flags]
|
||||
|
||||
# Rebuild after code changes
|
||||
docker compose -f docker-compose.gpu-host.yml --profile gpu build gpu
|
||||
docker compose -f docker-compose.gpu-host.yml --profile gpu up -d gpu
|
||||
```
|
||||
|
||||
If you deployed with `--custom-ca`, include the CA override in manual commands:
|
||||
```bash
|
||||
docker compose -f docker-compose.gpu-host.yml -f docker-compose.gpu-ca.yml \
|
||||
--profile gpu logs -f gpu
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### GPU service won't start
|
||||
|
||||
Check logs:
|
||||
```bash
|
||||
docker compose -f docker-compose.gpu-host.yml logs gpu
|
||||
```
|
||||
|
||||
Common causes:
|
||||
- NVIDIA driver not installed or `nvidia-container-toolkit` missing
|
||||
- `HF_TOKEN` not set (diarization model download fails)
|
||||
- Port already in use
|
||||
|
||||
### Reflector can't connect to GPU host
|
||||
|
||||
From the Reflector machine:
|
||||
```bash
|
||||
# Test HTTPS connectivity
|
||||
curl -v https://gpu.example.com/docs
|
||||
|
||||
# If using custom CA, test with explicit CA
|
||||
curl --cacert /path/to/gpu-ca.crt https://gpu.internal/docs
|
||||
```
|
||||
|
||||
From inside the Reflector container:
|
||||
```bash
|
||||
docker compose exec server python -c "
|
||||
import httpx
|
||||
r = httpx.get('https://gpu.internal/docs')
|
||||
print(r.status_code)
|
||||
"
|
||||
```
|
||||
|
||||
### SSL: CERTIFICATE_VERIFY_FAILED
|
||||
|
||||
The Reflector backend doesn't trust the GPU host's CA. Fix:
|
||||
```bash
|
||||
# Re-run Reflector setup with the GPU host's CA
|
||||
./scripts/setup-selfhosted.sh --hosted --extra-ca /path/to/gpu-ca.crt
|
||||
```
|
||||
|
||||
### Diarization returns errors
|
||||
|
||||
- Accept pyannote model licenses on HuggingFace:
|
||||
- https://huggingface.co/pyannote/speaker-diarization-3.1
|
||||
- https://huggingface.co/pyannote/segmentation-3.0
|
||||
- Verify `HF_TOKEN` is set in `.env.gpu-host`
|
||||
@@ -24,6 +24,8 @@ This document explains the internals of the self-hosted deployment: how the setu
|
||||
The self-hosted deployment runs the entire Reflector platform on a single server using Docker Compose. A single bash script (`scripts/setup-selfhosted.sh`) handles all configuration and orchestration. The key design principles are:
|
||||
|
||||
- **One command to deploy** — flags select which features to enable
|
||||
- **Config memory** — CLI args are saved to `data/.selfhosted-last-args`; re-run with no flags to replay
|
||||
- **Per-service overrides** — individual ML backends (transcript, diarization, translation, padding, mixdown) can be overridden independently from the base mode
|
||||
- **Idempotent** — safe to re-run without losing existing configuration
|
||||
- **Profile-based composition** — Docker Compose profiles activate optional services
|
||||
- **No external dependencies required** — with `--garage` and `--ollama-*`, everything runs locally
|
||||
@@ -61,8 +63,9 @@ Creates or updates the backend environment file from `server/.env.selfhosted.exa
|
||||
- **Infrastructure** — PostgreSQL URL, Redis host, Celery broker (all pointing to Docker-internal hostnames)
|
||||
- **Public URLs** — `BASE_URL` and `CORS_ORIGIN` computed from the domain (if `--domain`), IP (if detected on Linux), or `localhost`
|
||||
- **WebRTC** — `WEBRTC_HOST` set to the server's LAN IP so browsers can reach UDP ICE candidates
|
||||
- **Specialized models** — always points to `http://transcription:8000` (the Docker network alias shared by GPU and CPU containers)
|
||||
- **HuggingFace token** — prompts interactively for pyannote model access; writes to root `.env` so Docker Compose can inject it into GPU/CPU containers
|
||||
- **ML backends (per-service)** — Each ML service (transcript, diarization, translation, padding, mixdown) is configured independently using "effective backends" (`EFF_TRANSCRIPT`, `EFF_DIARIZATION`, `EFF_TRANSLATION`, `EFF_PADDING`, `EFF_MIXDOWN`). These are resolved from the base mode default + any `--transcript`/`--diarization`/`--translation`/`--padding`/`--mixdown` overrides. For `modal` backends, the URL is `http://transcription:8000` (GPU mode), user-provided (hosted mode), or read from existing env (CPU mode with override). For CPU backends, no URL is needed (in-process). If a service is overridden to `modal` in CPU mode without a URL configured, the script warns the user to set `TRANSCRIPT_URL` in `server/.env`
|
||||
- **CPU timeouts** — `TRANSCRIPT_FILE_TIMEOUT` and `DIARIZATION_FILE_TIMEOUT` are increased to 3600s only for services actually using CPU backends (whisper/pyannote), not blanket for the whole mode
|
||||
- **HuggingFace token** — prompted when diarization uses `pyannote` (in-process) or when GPU mode is active (GPU container needs it). Writes to root `.env` so Docker Compose can inject it into GPU/CPU containers
|
||||
- **LLM** — if `--ollama-*` is used, configures `LLM_URL` pointing to the Ollama container. Otherwise, warns that the user needs to configure an external LLM
|
||||
- **Public mode** — sets `PUBLIC_MODE=true` so the app is accessible without authentication by default
|
||||
- **Password auth** — if `--password` is passed, sets `AUTH_BACKEND=password`, `PUBLIC_MODE=false`, `ADMIN_EMAIL=admin@localhost`, and `ADMIN_PASSWORD_HASH` (the hash generated in Step 1). The admin user is provisioned in the database on container startup via `runserver.sh`
|
||||
@@ -228,11 +231,19 @@ Both the `gpu` and `cpu` services define a Docker network alias of `transcriptio
|
||||
Environment variables flow through multiple layers. Understanding this prevents confusion when debugging:
|
||||
|
||||
```
|
||||
Flags (--gpu, --garage, etc.)
|
||||
CLI args (--gpu, --garage, --padding modal, --mixdown modal, etc.)
|
||||
│
|
||||
├── setup-selfhosted.sh interprets flags
|
||||
├── Config memory: saved to data/.selfhosted-last-args
|
||||
│ (replayed on next run if no args provided)
|
||||
│
|
||||
├── setup-selfhosted.sh resolves effective backends:
|
||||
│ EFF_TRANSCRIPT = override or base mode default
|
||||
│ EFF_DIARIZATION = override or base mode default
|
||||
│ EFF_TRANSLATION = override or base mode default
|
||||
│ EFF_PADDING = override or base mode default
|
||||
│ EFF_MIXDOWN = override or base mode default
|
||||
│ │
|
||||
│ ├── Writes server/.env (backend config)
|
||||
│ ├── Writes server/.env (backend config, per-service backends)
|
||||
│ ├── Writes www/.env (frontend config)
|
||||
│ ├── Writes .env (HF_TOKEN for compose interpolation)
|
||||
│ └── Writes Caddyfile (proxy routes)
|
||||
|
||||
@@ -53,9 +53,12 @@ cd reflector
|
||||
# Same but without a domain (self-signed cert, access via IP):
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
|
||||
# CPU-only (same, but slower):
|
||||
# CPU-only (in-process ML, no GPU container):
|
||||
./scripts/setup-selfhosted.sh --cpu --ollama-cpu --garage --caddy
|
||||
|
||||
# Remote GPU service (your own hosted GPU, no local ML container):
|
||||
./scripts/setup-selfhosted.sh --hosted --garage --caddy
|
||||
|
||||
# With password authentication (single admin user):
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --password mysecretpass
|
||||
|
||||
@@ -65,14 +68,15 @@ cd reflector
|
||||
|
||||
That's it. The script generates env files, secrets, starts all containers, waits for health checks, and prints the URL.
|
||||
|
||||
## Specialized Models (Required)
|
||||
## ML Processing Modes (Required)
|
||||
|
||||
Pick `--gpu` or `--cpu`. This determines how **transcription, diarization, and translation** run:
|
||||
Pick `--gpu`, `--cpu`, or `--hosted`. This determines how **transcription, diarization, translation, audio padding, and audio mixdown** run:
|
||||
|
||||
| Flag | What it does | Requires |
|
||||
|------|-------------|----------|
|
||||
| `--gpu` | NVIDIA GPU acceleration for ML models | NVIDIA GPU + drivers + `nvidia-container-toolkit` |
|
||||
| `--cpu` | CPU-only (slower but works without GPU) | 8+ cores, 32GB+ RAM recommended |
|
||||
| `--gpu` | NVIDIA GPU container for ML models | NVIDIA GPU + drivers + `nvidia-container-toolkit` |
|
||||
| `--cpu` | In-process CPU processing on server/worker (no ML container) | 8+ cores, 16GB+ RAM (32GB recommended for large files) |
|
||||
| `--hosted` | Remote GPU service URL (no local ML container) | A running GPU service instance (e.g. `gpu/self_hosted/`) |
|
||||
|
||||
## Local LLM (Optional)
|
||||
|
||||
@@ -130,9 +134,11 @@ Browse all available models at https://ollama.com/library.
|
||||
|
||||
- **`--gpu --ollama-gpu`**: Best for servers with NVIDIA GPU. Fully self-contained, no external API keys needed.
|
||||
- **`--cpu --ollama-cpu`**: No GPU available but want everything self-contained. Slower but works.
|
||||
- **`--hosted --ollama-cpu`**: Remote GPU for ML, local CPU for LLM. Great when you have a separate GPU server.
|
||||
- **`--gpu --ollama-cpu`**: GPU for transcription, CPU for LLM. Saves GPU VRAM for ML models.
|
||||
- **`--gpu`**: Have NVIDIA GPU but prefer a cloud LLM (faster/better summaries with GPT-4, Claude, etc.).
|
||||
- **`--cpu`**: No GPU, prefer cloud LLM. Slowest transcription but best summary quality.
|
||||
- **`--hosted`**: Remote GPU, cloud LLM. No local ML at all.
|
||||
|
||||
## Other Optional Flags
|
||||
|
||||
@@ -152,6 +158,56 @@ Without `--caddy` or `--domain`, no ports are exposed. Point your own reverse pr
|
||||
|
||||
**Without a domain:** `--caddy` alone uses a self-signed certificate. Browsers will show a security warning that must be accepted.
|
||||
|
||||
## Per-Service Backend Overrides
|
||||
|
||||
Override individual ML services without changing the base mode. Useful when you want most services on one backend but need specific services on another.
|
||||
|
||||
| Flag | Valid backends | Default (`--gpu`/`--hosted`) | Default (`--cpu`) |
|
||||
|------|---------------|------------------------------|-------------------|
|
||||
| `--transcript BACKEND` | `whisper`, `modal` | `modal` | `whisper` |
|
||||
| `--diarization BACKEND` | `pyannote`, `modal` | `modal` | `pyannote` |
|
||||
| `--translation BACKEND` | `marian`, `modal`, `passthrough` | `modal` | `marian` |
|
||||
| `--padding BACKEND` | `pyav`, `modal` | `modal` | `pyav` |
|
||||
| `--mixdown BACKEND` | `pyav`, `modal` | `modal` | `pyav` |
|
||||
|
||||
**Examples:**
|
||||
|
||||
```bash
|
||||
# CPU base, but use a remote modal service for padding only
|
||||
./scripts/setup-selfhosted.sh --cpu --padding modal --garage --caddy
|
||||
|
||||
# GPU base, but skip translation entirely (passthrough)
|
||||
./scripts/setup-selfhosted.sh --gpu --translation passthrough --garage --caddy
|
||||
|
||||
# CPU base with remote modal diarization and translation
|
||||
./scripts/setup-selfhosted.sh --cpu --diarization modal --translation modal --garage
|
||||
```
|
||||
|
||||
When overriding a service to `modal` in `--cpu` mode, the script will warn you to configure the service URL (`TRANSCRIPT_URL` etc.) in `server/.env` to point to your GPU service, then re-run.
|
||||
|
||||
When overriding a service to a CPU backend (e.g., `--transcript whisper`) in `--gpu` mode, that service runs in-process on the server/worker containers while the GPU container still serves the remaining `modal` services.
|
||||
|
||||
## Config Memory (No-Flag Re-run)
|
||||
|
||||
After a successful run, the script saves your CLI arguments to `data/.selfhosted-last-args`. On subsequent runs with no arguments, the saved configuration is automatically replayed:
|
||||
|
||||
```bash
|
||||
# First run — saves the config
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
|
||||
# Later re-runs — same config, no flags needed
|
||||
./scripts/setup-selfhosted.sh
|
||||
# => "No flags provided — replaying saved configuration:"
|
||||
# => " --gpu --ollama-gpu --garage --caddy"
|
||||
```
|
||||
|
||||
To change the configuration, pass new flags — they override and replace the saved config:
|
||||
|
||||
```bash
|
||||
# Switch to CPU mode with overrides — this becomes the new saved config
|
||||
./scripts/setup-selfhosted.sh --cpu --padding modal --garage --caddy
|
||||
```
|
||||
|
||||
## What the Script Does
|
||||
|
||||
1. **Prerequisites check** — Docker, NVIDIA GPU (if needed), compose file exists
|
||||
@@ -160,8 +216,9 @@ Without `--caddy` or `--domain`, no ports are exposed. Point your own reverse pr
|
||||
4. **Generate `www/.env`** — Auto-detects server IP, sets URLs
|
||||
5. **Storage setup** — Either initializes Garage (bucket, keys, permissions) or prompts for external S3 credentials
|
||||
6. **Caddyfile** — Generates domain-specific (Let's Encrypt) or IP-specific (self-signed) configuration
|
||||
7. **Build & start** — Always builds GPU/CPU model image from source. With `--build`, also builds backend and frontend from source; otherwise pulls prebuilt images from the registry
|
||||
8. **Health checks** — Waits for each service, pulls Ollama model if needed, warns about missing LLM config
|
||||
7. **Build & start** — For `--gpu`, builds the GPU model image from source. For `--cpu` and `--hosted`, no ML container is built. With `--build`, also builds backend and frontend from source; otherwise pulls prebuilt images from the registry
|
||||
8. **Auto-detects video platforms** — If `DAILY_API_KEY` is found in `server/.env`, generates `.env.hatchet` (dashboard URL/cookie config), starts Hatchet workflow engine, and generates an API token. If any video platform is configured, enables the Rooms feature
|
||||
9. **Health checks** — Waits for each service, pulls Ollama model if needed, warns about missing LLM config
|
||||
|
||||
> For a deeper dive into each step, see [How the Self-Hosted Setup Works](selfhosted-architecture.md).
|
||||
|
||||
@@ -180,12 +237,30 @@ Without `--caddy` or `--domain`, no ports are exposed. Point your own reverse pr
|
||||
| `ADMIN_PASSWORD_HASH` | PBKDF2 hash for password auth | *(unset)* |
|
||||
| `WEBRTC_HOST` | IP advertised in WebRTC ICE candidates | Auto-detected (server IP) |
|
||||
| `TRANSCRIPT_URL` | Specialized model endpoint | `http://transcription:8000` |
|
||||
| `PADDING_BACKEND` | Audio padding backend (`pyav` or `modal`) | `modal` (selfhosted), `pyav` (default) |
|
||||
| `PADDING_URL` | Audio padding endpoint (when `PADDING_BACKEND=modal`) | `http://transcription:8000` |
|
||||
| `MIXDOWN_BACKEND` | Audio mixdown backend (`pyav` or `modal`) | `modal` (selfhosted), `pyav` (default) |
|
||||
| `MIXDOWN_URL` | Audio mixdown endpoint (when `MIXDOWN_BACKEND=modal`) | `http://transcription:8000` |
|
||||
| `LLM_URL` | OpenAI-compatible LLM endpoint | Auto-set for Ollama modes |
|
||||
| `LLM_API_KEY` | LLM API key | `not-needed` for Ollama |
|
||||
| `LLM_MODEL` | LLM model name | `qwen2.5:14b` for Ollama (override with `--llm-model`) |
|
||||
| `CELERY_BEAT_POLL_INTERVAL` | Override all worker polling intervals (seconds). `0` = use individual defaults | `300` (selfhosted), `0` (other) |
|
||||
| `TRANSCRIPT_STORAGE_BACKEND` | Storage backend | `aws` |
|
||||
| `TRANSCRIPT_STORAGE_AWS_*` | S3 credentials | Auto-set for Garage |
|
||||
| `DAILY_API_KEY` | Daily.co API key (enables live rooms) | *(unset)* |
|
||||
| `DAILY_SUBDOMAIN` | Daily.co subdomain | *(unset)* |
|
||||
| `DAILYCO_STORAGE_AWS_ACCESS_KEY_ID` | AWS access key for reading Daily's recording bucket | *(unset)* |
|
||||
| `DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY` | AWS secret key for reading Daily's recording bucket | *(unset)* |
|
||||
| `ZULIP_REALM` | Zulip server hostname (e.g. `zulip.example.com`) | *(unset)* |
|
||||
| `ZULIP_API_KEY` | Zulip bot API key | *(unset)* |
|
||||
| `ZULIP_BOT_EMAIL` | Zulip bot email address | *(unset)* |
|
||||
| `ZULIP_DAG_STREAM` | Zulip stream for pipeline failure alerts | *(unset)* |
|
||||
| `ZULIP_DAG_TOPIC` | Zulip topic for pipeline failure alerts | *(unset)* |
|
||||
| `HATCHET_CLIENT_TOKEN` | Hatchet API token (auto-generated) | *(unset)* |
|
||||
| `HATCHET_CLIENT_SERVER_URL` | Hatchet server URL | Auto-set when Daily.co configured |
|
||||
| `HATCHET_CLIENT_HOST_PORT` | Hatchet gRPC address | Auto-set when Daily.co configured |
|
||||
| `TRANSCRIPT_FILE_TIMEOUT` | HTTP timeout (seconds) for file transcription requests | `600` (`3600` in CPU mode) |
|
||||
| `DIARIZATION_FILE_TIMEOUT` | HTTP timeout (seconds) for file diarization requests | `600` (`3600` in CPU mode) |
|
||||
|
||||
### Frontend Environment (`www/.env`)
|
||||
|
||||
@@ -197,6 +272,7 @@ Without `--caddy` or `--domain`, no ports are exposed. Point your own reverse pr
|
||||
| `NEXTAUTH_SECRET` | Auth secret | Auto-generated |
|
||||
| `FEATURE_REQUIRE_LOGIN` | Require authentication | `false` |
|
||||
| `AUTH_PROVIDER` | Auth provider (`authentik` or `credentials`) | *(unset)* |
|
||||
| `FEATURE_ROOMS` | Enable meeting rooms UI | Auto-set when video platform configured |
|
||||
|
||||
## Storage Options
|
||||
|
||||
@@ -353,6 +429,87 @@ By default, authentication is disabled (`AUTH_BACKEND=none`, `FEATURE_REQUIRE_LO
|
||||
```
|
||||
5. Restart: `docker compose -f docker-compose.selfhosted.yml down && ./scripts/setup-selfhosted.sh <same-flags>`
|
||||
|
||||
## Enabling Daily.co Live Rooms
|
||||
|
||||
Daily.co enables real-time meeting rooms with automatic recording and per-participant
|
||||
audio tracks for improved diarization. When configured, the setup script automatically
|
||||
starts the Hatchet workflow engine for multitrack recording processing.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Daily.co account** — Sign up at https://www.daily.co/
|
||||
- **API key** — From Daily.co Dashboard → Developers → API Keys
|
||||
- **Subdomain** — The `yourname` part of `yourname.daily.co`
|
||||
- **AWS S3 bucket** — For Daily.co to store recordings. See [Daily.co recording storage docs](https://docs.daily.co/guides/products/live-streaming-recording/storing-recordings-in-a-custom-s3-bucket)
|
||||
- **IAM role ARN** — An AWS IAM role that Daily.co assumes to write recordings to your bucket
|
||||
|
||||
### Setup
|
||||
|
||||
1. Configure Daily.co env vars in `server/.env` **before** running the setup script:
|
||||
|
||||
```env
|
||||
DAILY_API_KEY=your-daily-api-key
|
||||
DAILY_SUBDOMAIN=your-subdomain
|
||||
DEFAULT_VIDEO_PLATFORM=daily
|
||||
DAILYCO_STORAGE_AWS_BUCKET_NAME=your-recordings-bucket
|
||||
DAILYCO_STORAGE_AWS_REGION=us-east-1
|
||||
DAILYCO_STORAGE_AWS_ROLE_ARN=arn:aws:iam::123456789:role/DailyCoAccess
|
||||
# Worker credentials for reading/deleting recordings from Daily's S3 bucket.
|
||||
# Required when transcript storage is separate from Daily's bucket
|
||||
# (e.g., selfhosted with Garage or a different S3 account).
|
||||
DAILYCO_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
|
||||
DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
|
||||
```
|
||||
|
||||
> **Important:** The `DAILYCO_STORAGE_AWS_ACCESS_KEY_ID` and `SECRET_ACCESS_KEY` are AWS IAM
|
||||
> credentials that allow the Hatchet workers to **read and delete** recording files from Daily's
|
||||
> S3 bucket. These are separate from the `ROLE_ARN` (which Daily's API uses to *write* recordings).
|
||||
> Without these keys, multitrack processing will fail with 404 errors when transcript storage
|
||||
> (e.g., Garage) uses different credentials than the Daily recording bucket.
|
||||
|
||||
2. Run the setup script as normal:
|
||||
|
||||
```bash
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
```
|
||||
|
||||
The script detects `DAILY_API_KEY` and automatically:
|
||||
- Starts the Hatchet workflow engine (`hatchet` container)
|
||||
- Starts Hatchet CPU and LLM workers (`hatchet-worker-cpu`, `hatchet-worker-llm`)
|
||||
- Generates a `HATCHET_CLIENT_TOKEN` and saves it to `server/.env`
|
||||
- Sets `HATCHET_CLIENT_SERVER_URL` and `HATCHET_CLIENT_HOST_PORT`
|
||||
- Enables `FEATURE_ROOMS=true` in `www/.env`
|
||||
- Registers Daily.co beat tasks (recording polling, presence reconciliation)
|
||||
|
||||
3. (Optional) For faster recording discovery, configure a Daily.co webhook:
|
||||
- In the Daily.co dashboard, add a webhook pointing to `https://your-domain/v1/daily/webhook`
|
||||
- Set `DAILY_WEBHOOK_SECRET` in `server/.env` (the signing secret from Daily.co)
|
||||
- Without webhooks, the system polls the Daily.co API every 15 seconds
|
||||
|
||||
### What Gets Started
|
||||
|
||||
| Service | Purpose |
|
||||
|---------|---------|
|
||||
| `hatchet` | Workflow orchestration engine (manages multitrack processing pipelines) |
|
||||
| `hatchet-worker-cpu` | CPU-heavy audio tasks (track mixdown, waveform generation) |
|
||||
| `hatchet-worker-llm` | Transcription, LLM inference (summaries, topics, titles), orchestration |
|
||||
|
||||
### Hatchet Dashboard
|
||||
|
||||
The Hatchet workflow engine includes a web dashboard for monitoring workflow runs and debugging. The setup script auto-generates `.env.hatchet` at the project root with the dashboard URL and cookie domain configuration. This file is git-ignored.
|
||||
|
||||
- **With Caddy**: Accessible at `https://your-domain:8888` (TLS via Caddy)
|
||||
- **Without Caddy**: Accessible at `http://your-ip:8888` (direct port mapping)
|
||||
|
||||
### Conditional Beat Tasks
|
||||
|
||||
Beat tasks are registered based on which services are configured:
|
||||
|
||||
- **Whereby tasks** (only if `WHEREBY_API_KEY` or `AWS_PROCESS_RECORDING_QUEUE_URL`): `process_messages`, `reprocess_failed_recordings`
|
||||
- **Daily.co tasks** (only if `DAILY_API_KEY`): `poll_daily_recordings`, `trigger_daily_reconciliation`, `reprocess_failed_daily_recordings`
|
||||
- **Platform tasks** (if any video platform configured): `process_meetings`, `sync_all_ics_calendars`, `create_upcoming_meetings`
|
||||
- **Always registered**: `cleanup_old_public_data` (if `PUBLIC_MODE`), `healthcheck_ping` (if `HEALTHCHECK_URL`)
|
||||
|
||||
## Enabling Real Domain with Let's Encrypt
|
||||
|
||||
By default, Caddy uses self-signed certificates. For a real domain:
|
||||
@@ -446,6 +603,15 @@ docker compose -f docker-compose.selfhosted.yml logs server --tail 50
|
||||
For self-signed certs, your browser will warn. Click Advanced > Proceed.
|
||||
For Let's Encrypt, ensure ports 80/443 are open and DNS is pointed correctly.
|
||||
|
||||
### File processing timeout on CPU
|
||||
CPU transcription and diarization are significantly slower than GPU. A 20-minute audio file can take 20-40 minutes to process on CPU. The setup script automatically sets `TRANSCRIPT_FILE_TIMEOUT=3600` and `DIARIZATION_FILE_TIMEOUT=3600` (1 hour) for `--cpu` mode. If you still hit timeouts with very long files, increase these values in `server/.env`:
|
||||
```bash
|
||||
# Increase to 2 hours for files over 1 hour
|
||||
TRANSCRIPT_FILE_TIMEOUT=7200
|
||||
DIARIZATION_FILE_TIMEOUT=7200
|
||||
```
|
||||
Then restart the worker: `docker compose -f docker-compose.selfhosted.yml restart worker`
|
||||
|
||||
### Summaries/topics not generating
|
||||
Check LLM configuration:
|
||||
```bash
|
||||
@@ -462,9 +628,9 @@ docker compose -f docker-compose.selfhosted.yml exec gpu curl http://localhost:8
|
||||
## Updating
|
||||
|
||||
```bash
|
||||
# Option A: Pull latest prebuilt images and restart
|
||||
# Option A: Pull latest prebuilt images and restart (replays saved config automatically)
|
||||
docker compose -f docker-compose.selfhosted.yml down
|
||||
./scripts/setup-selfhosted.sh <same-flags-as-before>
|
||||
./scripts/setup-selfhosted.sh
|
||||
|
||||
# Option B: Build from source (after git pull) and restart
|
||||
git pull
|
||||
@@ -475,6 +641,8 @@ docker compose -f docker-compose.selfhosted.yml down
|
||||
docker compose -f docker-compose.selfhosted.yml build gpu # or cpu
|
||||
```
|
||||
|
||||
> **Note on config memory:** Running with no flags replays the saved config from your last run. Running with *any* flags replaces the saved config entirely — the script always saves the complete set of flags you provide. See [Config Memory](#config-memory-no-flag-re-run).
|
||||
|
||||
The setup script is idempotent — it won't overwrite existing secrets or env vars that are already set.
|
||||
|
||||
## Architecture Overview
|
||||
@@ -501,22 +669,29 @@ The setup script is idempotent — it won't overwrite existing secrets or env va
|
||||
│ │ │
|
||||
v v v
|
||||
┌───────────┐ ┌─────────┐ ┌─────────┐
|
||||
│transcription│ │postgres │ │ redis │
|
||||
│(gpu/cpu) │ │ :5432 │ │ :6379 │
|
||||
│ :8000 │ └─────────┘ └─────────┘
|
||||
└───────────┘
|
||||
│ ML models │ │postgres │ │ redis │
|
||||
│ (varies) │ │ :5432 │ │ :6379 │
|
||||
└───────────┘ └─────────┘ └─────────┘
|
||||
│
|
||||
┌─────┴─────┐ ┌─────────┐
|
||||
│ ollama │ │ garage │
|
||||
│ (optional)│ │(optional│
|
||||
│ :11435 │ │ S3) │
|
||||
└───────────┘ └─────────┘
|
||||
|
||||
┌───────────────────────────────────┐
|
||||
│ Hatchet (optional — Daily.co) │
|
||||
│ ┌─────────┐ ┌───────────────┐ │
|
||||
│ │ hatchet │ │ hatchet-worker│ │
|
||||
│ │ :8888 │──│ -cpu / -llm │ │
|
||||
│ └─────────┘ └───────────────┘ │
|
||||
└───────────────────────────────────┘
|
||||
|
||||
ML models box varies by mode:
|
||||
--gpu: Local GPU container (transcription:8000)
|
||||
--cpu: In-process on server/worker (no container)
|
||||
--hosted: Remote GPU service (user URL)
|
||||
```
|
||||
|
||||
All services communicate over Docker's internal network. Only Caddy (if enabled) exposes ports to the internet.
|
||||
All services communicate over Docker's internal network. Only Caddy (if enabled) exposes ports to the internet. Hatchet services are only started when `DAILY_API_KEY` is configured.
|
||||
|
||||
## Future Plans for the Self-Hosted Script
|
||||
|
||||
The following features are supported by Reflector but are **not yet integrated into the self-hosted setup script** and require manual configuration:
|
||||
|
||||
- **Daily.co live rooms with multitrack processing**: Daily.co enables real-time meeting rooms with automatic recording and per-participant audio tracks for improved diarization. Requires a Daily.co account, API key, and an AWS S3 bucket for recording storage. Currently not automated in the script because the worker orchestration (hatchet) is not yet supported in the selfhosted compose setup.
|
||||
|
||||
@@ -132,13 +132,22 @@ fi
|
||||
echo " -> $DIARIZER_URL"
|
||||
|
||||
echo ""
|
||||
echo "Deploying padding (CPU audio processing via Modal SDK)..."
|
||||
modal deploy reflector_padding.py
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Deploying padding (CPU audio processing)..."
|
||||
PADDING_URL=$(modal deploy reflector_padding.py 2>&1 | grep -o 'https://[^ ]*web.modal.run' | head -1)
|
||||
if [ -z "$PADDING_URL" ]; then
|
||||
echo "Error: Failed to deploy padding. Check Modal dashboard for details."
|
||||
exit 1
|
||||
fi
|
||||
echo " -> reflector-padding.pad_track (Modal SDK function)"
|
||||
echo " -> $PADDING_URL"
|
||||
|
||||
echo ""
|
||||
echo "Deploying mixdown (CPU multi-track audio mixing)..."
|
||||
MIXDOWN_URL=$(modal deploy reflector_mixdown.py 2>&1 | grep -o 'https://[^ ]*web.modal.run' | head -1)
|
||||
if [ -z "$MIXDOWN_URL" ]; then
|
||||
echo "Error: Failed to deploy mixdown. Check Modal dashboard for details."
|
||||
exit 1
|
||||
fi
|
||||
echo " -> $MIXDOWN_URL"
|
||||
|
||||
# --- Output Configuration ---
|
||||
echo ""
|
||||
@@ -157,5 +166,11 @@ echo "DIARIZATION_BACKEND=modal"
|
||||
echo "DIARIZATION_URL=$DIARIZER_URL"
|
||||
echo "DIARIZATION_MODAL_API_KEY=$API_KEY"
|
||||
echo ""
|
||||
echo "# Padding uses Modal SDK (requires MODAL_TOKEN_ID/SECRET in worker containers)"
|
||||
echo "PADDING_BACKEND=modal"
|
||||
echo "PADDING_URL=$PADDING_URL"
|
||||
echo "PADDING_MODAL_API_KEY=$API_KEY"
|
||||
echo ""
|
||||
echo "MIXDOWN_BACKEND=modal"
|
||||
echo "MIXDOWN_URL=$MIXDOWN_URL"
|
||||
echo "MIXDOWN_MODAL_API_KEY=$API_KEY"
|
||||
echo "# --- End Modal Configuration ---"
|
||||
|
||||
385
gpu/modal_deployments/reflector_mixdown.py
Normal file
385
gpu/modal_deployments/reflector_mixdown.py
Normal file
@@ -0,0 +1,385 @@
|
||||
"""
|
||||
Reflector GPU backend - audio mixdown
|
||||
=====================================
|
||||
|
||||
CPU-intensive multi-track audio mixdown service.
|
||||
Mixes N audio tracks into a single MP3 using PyAV amix filter graph.
|
||||
|
||||
IMPORTANT: This mixdown logic is duplicated from server/reflector/utils/audio_mixdown.py
|
||||
for Modal deployment isolation (Modal can't import from server/reflector/). If you modify
|
||||
the PyAV filter graph or mixdown algorithm, you MUST update both:
|
||||
- gpu/modal_deployments/reflector_mixdown.py (this file)
|
||||
- server/reflector/utils/audio_mixdown.py
|
||||
|
||||
Constants duplicated from server/reflector/utils/audio_constants.py for same reason.
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from fractions import Fraction
|
||||
import asyncio
|
||||
|
||||
import modal
|
||||
|
||||
S3_TIMEOUT = 120 # Higher than padding (60s) — multiple track downloads
|
||||
MIXDOWN_TIMEOUT = 1200 + (S3_TIMEOUT * 2) # 1440s total
|
||||
SCALEDOWN_WINDOW = 60
|
||||
DISCONNECT_CHECK_INTERVAL = 2
|
||||
|
||||
app = modal.App("reflector-mixdown")
|
||||
|
||||
# CPU-based image (mixdown is CPU-bound, no GPU needed)
|
||||
image = (
|
||||
modal.Image.debian_slim(python_version="3.12")
|
||||
.apt_install("ffmpeg") # Required by PyAV
|
||||
.pip_install(
|
||||
"av==13.1.0", # PyAV for audio processing
|
||||
"requests==2.32.3", # HTTP for presigned URL downloads/uploads
|
||||
"fastapi==0.115.12", # API framework
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@app.function(
|
||||
cpu=4.0, # Higher than padding (2.0) for multi-track mixing
|
||||
timeout=MIXDOWN_TIMEOUT,
|
||||
scaledown_window=SCALEDOWN_WINDOW,
|
||||
image=image,
|
||||
secrets=[modal.Secret.from_name("reflector-gpu")],
|
||||
)
|
||||
@modal.asgi_app()
|
||||
def web():
|
||||
from fastapi import Depends, FastAPI, HTTPException, Request, status
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
class MixdownRequest(BaseModel):
|
||||
track_urls: list[str]
|
||||
output_url: str
|
||||
target_sample_rate: int | None = None
|
||||
offsets_seconds: list[float] | None = None
|
||||
|
||||
class MixdownResponse(BaseModel):
|
||||
size: int
|
||||
duration_ms: float = 0.0
|
||||
cancelled: bool = False
|
||||
|
||||
web_app = FastAPI()
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
if apikey == os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
return
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
@web_app.post("/mixdown", dependencies=[Depends(apikey_auth)])
|
||||
async def mixdown_endpoint(request: Request, req: MixdownRequest) -> MixdownResponse:
|
||||
"""Modal web endpoint for mixing audio tracks with disconnect detection."""
|
||||
import logging
|
||||
import threading
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
valid_urls = [u for u in req.track_urls if u]
|
||||
if not valid_urls:
|
||||
raise HTTPException(status_code=400, detail="No valid track URLs provided")
|
||||
if req.offsets_seconds is not None:
|
||||
if len(req.offsets_seconds) != len(req.track_urls):
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"offsets_seconds length ({len(req.offsets_seconds)}) "
|
||||
f"must match track_urls ({len(req.track_urls)})",
|
||||
)
|
||||
if any(o > 18000 for o in req.offsets_seconds):
|
||||
raise HTTPException(status_code=400, detail="offsets_seconds exceeds maximum 18000s (5 hours)")
|
||||
if not req.output_url:
|
||||
raise HTTPException(status_code=400, detail="output_url cannot be empty")
|
||||
|
||||
logger.info(f"Mixdown request: {len(valid_urls)} tracks")
|
||||
|
||||
# Thread-safe cancellation flag
|
||||
cancelled = threading.Event()
|
||||
|
||||
async def check_disconnect():
|
||||
"""Background task to check for client disconnect."""
|
||||
while not cancelled.is_set():
|
||||
await asyncio.sleep(DISCONNECT_CHECK_INTERVAL)
|
||||
if await request.is_disconnected():
|
||||
logger.warning("Client disconnected, setting cancellation flag")
|
||||
cancelled.set()
|
||||
break
|
||||
|
||||
disconnect_task = asyncio.create_task(check_disconnect())
|
||||
|
||||
try:
|
||||
result = await asyncio.get_event_loop().run_in_executor(
|
||||
None, _mixdown_tracks_blocking, req, cancelled, logger
|
||||
)
|
||||
return MixdownResponse(**result)
|
||||
finally:
|
||||
cancelled.set()
|
||||
disconnect_task.cancel()
|
||||
try:
|
||||
await disconnect_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
def _mixdown_tracks_blocking(req, cancelled, logger) -> dict:
|
||||
"""Blocking CPU-bound mixdown work with periodic cancellation checks.
|
||||
|
||||
Downloads all tracks, builds PyAV amix filter graph, encodes to MP3,
|
||||
and uploads the result to the presigned output URL.
|
||||
"""
|
||||
import av
|
||||
import requests
|
||||
from av.audio.resampler import AudioResampler
|
||||
import time
|
||||
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
track_paths = []
|
||||
output_path = None
|
||||
last_check = time.time()
|
||||
|
||||
try:
|
||||
# --- Download all tracks ---
|
||||
valid_urls = [u for u in req.track_urls if u]
|
||||
for i, url in enumerate(valid_urls):
|
||||
if cancelled.is_set():
|
||||
logger.info("Cancelled during download phase")
|
||||
return {"size": 0, "duration_ms": 0.0, "cancelled": True}
|
||||
|
||||
logger.info(f"Downloading track {i}")
|
||||
response = requests.get(url, stream=True, timeout=S3_TIMEOUT)
|
||||
response.raise_for_status()
|
||||
|
||||
track_path = os.path.join(temp_dir, f"track_{i}.webm")
|
||||
total_bytes = 0
|
||||
chunk_count = 0
|
||||
with open(track_path, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
total_bytes += len(chunk)
|
||||
chunk_count += 1
|
||||
if chunk_count % 12 == 0:
|
||||
now = time.time()
|
||||
if now - last_check >= DISCONNECT_CHECK_INTERVAL:
|
||||
if cancelled.is_set():
|
||||
logger.info(f"Cancelled during track {i} download")
|
||||
return {"size": 0, "duration_ms": 0.0, "cancelled": True}
|
||||
last_check = now
|
||||
|
||||
track_paths.append(track_path)
|
||||
logger.info(f"Track {i} downloaded: {total_bytes} bytes")
|
||||
|
||||
if not track_paths:
|
||||
raise ValueError("No tracks downloaded")
|
||||
|
||||
# --- Detect sample rate ---
|
||||
target_sample_rate = req.target_sample_rate
|
||||
if target_sample_rate is None:
|
||||
for path in track_paths:
|
||||
try:
|
||||
container = av.open(path)
|
||||
for frame in container.decode(audio=0):
|
||||
target_sample_rate = frame.sample_rate
|
||||
container.close()
|
||||
break
|
||||
else:
|
||||
container.close()
|
||||
continue
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
if target_sample_rate is None:
|
||||
raise ValueError("Could not detect sample rate from any track")
|
||||
|
||||
logger.info(f"Target sample rate: {target_sample_rate}")
|
||||
|
||||
# --- Calculate per-input delays ---
|
||||
input_offsets_seconds = None
|
||||
if req.offsets_seconds is not None:
|
||||
input_offsets_seconds = [
|
||||
req.offsets_seconds[i] for i, url in enumerate(req.track_urls) if url
|
||||
]
|
||||
|
||||
delays_ms = []
|
||||
if input_offsets_seconds is not None:
|
||||
base = min(input_offsets_seconds) if input_offsets_seconds else 0.0
|
||||
delays_ms = [max(0, int(round((o - base) * 1000))) for o in input_offsets_seconds]
|
||||
else:
|
||||
delays_ms = [0 for _ in track_paths]
|
||||
|
||||
# --- Build filter graph ---
|
||||
# N abuffer -> optional adelay -> amix -> aformat -> abuffersink
|
||||
graph = av.filter.Graph()
|
||||
inputs = []
|
||||
|
||||
for idx in range(len(track_paths)):
|
||||
args = (
|
||||
f"time_base=1/{target_sample_rate}:"
|
||||
f"sample_rate={target_sample_rate}:"
|
||||
f"sample_fmt=s32:"
|
||||
f"channel_layout=stereo"
|
||||
)
|
||||
in_ctx = graph.add("abuffer", args=args, name=f"in{idx}")
|
||||
inputs.append(in_ctx)
|
||||
|
||||
mixer = graph.add("amix", args=f"inputs={len(inputs)}:normalize=0", name="mix")
|
||||
fmt = graph.add(
|
||||
"aformat",
|
||||
args=f"sample_fmts=s32:channel_layouts=stereo:sample_rates={target_sample_rate}",
|
||||
name="fmt",
|
||||
)
|
||||
sink = graph.add("abuffersink", name="out")
|
||||
|
||||
for idx, in_ctx in enumerate(inputs):
|
||||
delay_ms = delays_ms[idx] if idx < len(delays_ms) else 0
|
||||
if delay_ms > 0:
|
||||
adelay = graph.add(
|
||||
"adelay",
|
||||
args=f"delays={delay_ms}|{delay_ms}:all=1",
|
||||
name=f"delay{idx}",
|
||||
)
|
||||
in_ctx.link_to(adelay)
|
||||
adelay.link_to(mixer, 0, idx)
|
||||
else:
|
||||
in_ctx.link_to(mixer, 0, idx)
|
||||
|
||||
mixer.link_to(fmt)
|
||||
fmt.link_to(sink)
|
||||
graph.configure()
|
||||
|
||||
# --- Open all containers and decode ---
|
||||
containers = []
|
||||
output_path = os.path.join(temp_dir, "mixed.mp3")
|
||||
|
||||
try:
|
||||
for path in track_paths:
|
||||
containers.append(av.open(path))
|
||||
|
||||
decoders = [c.decode(audio=0) for c in containers]
|
||||
active = [True] * len(decoders)
|
||||
resamplers = [
|
||||
AudioResampler(format="s32", layout="stereo", rate=target_sample_rate)
|
||||
for _ in decoders
|
||||
]
|
||||
|
||||
# Open output MP3
|
||||
out_container = av.open(output_path, "w", format="mp3")
|
||||
out_stream = out_container.add_stream("libmp3lame", rate=target_sample_rate)
|
||||
total_duration = 0
|
||||
|
||||
while any(active):
|
||||
# Check cancellation periodically
|
||||
now = time.time()
|
||||
if now - last_check >= DISCONNECT_CHECK_INTERVAL:
|
||||
if cancelled.is_set():
|
||||
logger.info("Cancelled during mixing")
|
||||
out_container.close()
|
||||
return {"size": 0, "duration_ms": 0.0, "cancelled": True}
|
||||
last_check = now
|
||||
|
||||
for i, (dec, is_active) in enumerate(zip(decoders, active)):
|
||||
if not is_active:
|
||||
continue
|
||||
try:
|
||||
frame = next(dec)
|
||||
except StopIteration:
|
||||
active[i] = False
|
||||
inputs[i].push(None)
|
||||
continue
|
||||
|
||||
if frame.sample_rate != target_sample_rate:
|
||||
continue
|
||||
|
||||
out_frames = resamplers[i].resample(frame) or []
|
||||
for rf in out_frames:
|
||||
rf.sample_rate = target_sample_rate
|
||||
rf.time_base = Fraction(1, target_sample_rate)
|
||||
inputs[i].push(rf)
|
||||
|
||||
while True:
|
||||
try:
|
||||
mixed = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
mixed.sample_rate = target_sample_rate
|
||||
mixed.time_base = Fraction(1, target_sample_rate)
|
||||
for packet in out_stream.encode(mixed):
|
||||
out_container.mux(packet)
|
||||
total_duration += packet.duration
|
||||
|
||||
# Flush filter graph
|
||||
while True:
|
||||
try:
|
||||
mixed = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
mixed.sample_rate = target_sample_rate
|
||||
mixed.time_base = Fraction(1, target_sample_rate)
|
||||
for packet in out_stream.encode(mixed):
|
||||
out_container.mux(packet)
|
||||
total_duration += packet.duration
|
||||
|
||||
# Flush encoder
|
||||
for packet in out_stream.encode(None):
|
||||
out_container.mux(packet)
|
||||
total_duration += packet.duration
|
||||
|
||||
# Calculate duration in ms
|
||||
last_tb = out_stream.time_base
|
||||
duration_ms = 0.0
|
||||
if last_tb and total_duration > 0:
|
||||
duration_ms = round(float(total_duration * last_tb * 1000), 2)
|
||||
|
||||
out_container.close()
|
||||
|
||||
finally:
|
||||
for c in containers:
|
||||
try:
|
||||
c.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
file_size = os.path.getsize(output_path)
|
||||
logger.info(f"Mixdown complete: {file_size} bytes, {duration_ms}ms")
|
||||
|
||||
if cancelled.is_set():
|
||||
logger.info("Cancelled after mixing, before upload")
|
||||
return {"size": 0, "duration_ms": 0.0, "cancelled": True}
|
||||
|
||||
# --- Upload result ---
|
||||
logger.info("Uploading mixed audio to S3")
|
||||
with open(output_path, "rb") as f:
|
||||
upload_response = requests.put(req.output_url, data=f, timeout=S3_TIMEOUT)
|
||||
upload_response.raise_for_status()
|
||||
logger.info(f"Upload complete: {file_size} bytes")
|
||||
|
||||
return {"size": file_size, "duration_ms": duration_ms}
|
||||
|
||||
finally:
|
||||
# Cleanup all temp files
|
||||
for path in track_paths:
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
os.unlink(path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to cleanup track file: {e}")
|
||||
if output_path and os.path.exists(output_path):
|
||||
try:
|
||||
os.unlink(output_path)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to cleanup output file: {e}")
|
||||
try:
|
||||
os.rmdir(temp_dir)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to cleanup temp directory: {e}")
|
||||
|
||||
return web_app
|
||||
@@ -52,10 +52,12 @@ OPUS_DEFAULT_BIT_RATE = 128000
|
||||
timeout=PADDING_TIMEOUT,
|
||||
scaledown_window=SCALEDOWN_WINDOW,
|
||||
image=image,
|
||||
secrets=[modal.Secret.from_name("reflector-gpu")],
|
||||
)
|
||||
@modal.asgi_app()
|
||||
def web():
|
||||
from fastapi import FastAPI, Request, HTTPException
|
||||
from fastapi import Depends, FastAPI, HTTPException, Request, status
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
class PaddingRequest(BaseModel):
|
||||
@@ -70,7 +72,18 @@ def web():
|
||||
|
||||
web_app = FastAPI()
|
||||
|
||||
@web_app.post("/pad")
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
if apikey == os.environ["REFLECTOR_GPU_APIKEY"]:
|
||||
return
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid API key",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
@web_app.post("/pad", dependencies=[Depends(apikey_auth)])
|
||||
async def pad_track_endpoint(request: Request, req: PaddingRequest) -> PaddingResponse:
|
||||
"""Modal web endpoint for padding audio tracks with disconnect detection.
|
||||
"""
|
||||
|
||||
@@ -42,6 +42,7 @@ COPY pyproject.toml uv.lock /app/
|
||||
COPY ./app /app/app
|
||||
COPY ./main.py /app/
|
||||
COPY ./runserver.sh /app/
|
||||
COPY ./docker-entrypoint.sh /app/
|
||||
|
||||
# prevent uv failing with too many open files on big cpus
|
||||
ENV UV_CONCURRENT_INSTALLS=16
|
||||
@@ -52,6 +53,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["sh", "/app/runserver.sh"]
|
||||
RUN chmod +x /app/docker-entrypoint.sh
|
||||
|
||||
CMD ["sh", "/app/docker-entrypoint.sh"]
|
||||
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ COPY pyproject.toml uv.lock /app/
|
||||
COPY ./app /app/app
|
||||
COPY ./main.py /app/
|
||||
COPY ./runserver.sh /app/
|
||||
COPY ./docker-entrypoint.sh /app/
|
||||
|
||||
# prevent uv failing with too many open files on big cpus
|
||||
ENV UV_CONCURRENT_INSTALLS=16
|
||||
@@ -36,4 +37,6 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["sh", "/app/runserver.sh"]
|
||||
RUN chmod +x /app/docker-entrypoint.sh
|
||||
|
||||
CMD ["sh", "/app/docker-entrypoint.sh"]
|
||||
|
||||
@@ -3,6 +3,8 @@ from contextlib import asynccontextmanager
|
||||
from fastapi import FastAPI
|
||||
|
||||
from .routers.diarization import router as diarization_router
|
||||
from .routers.mixdown import router as mixdown_router
|
||||
from .routers.padding import router as padding_router
|
||||
from .routers.transcription import router as transcription_router
|
||||
from .routers.translation import router as translation_router
|
||||
from .services.transcriber import WhisperService
|
||||
@@ -27,4 +29,6 @@ def create_app() -> FastAPI:
|
||||
app.include_router(transcription_router)
|
||||
app.include_router(translation_router)
|
||||
app.include_router(diarization_router)
|
||||
app.include_router(padding_router)
|
||||
app.include_router(mixdown_router)
|
||||
return app
|
||||
|
||||
288
gpu/self_hosted/app/routers/mixdown.py
Normal file
288
gpu/self_hosted/app/routers/mixdown.py
Normal file
@@ -0,0 +1,288 @@
|
||||
"""
|
||||
Audio mixdown endpoint for selfhosted GPU service.
|
||||
|
||||
CPU-intensive multi-track audio mixing service for combining N audio tracks
|
||||
into a single MP3 using PyAV amix filter graph.
|
||||
|
||||
IMPORTANT: This mixdown logic is duplicated from server/reflector/utils/audio_mixdown.py
|
||||
for deployment isolation (self_hosted can't import from server/reflector/). If you modify
|
||||
the PyAV filter graph or mixdown algorithm, you MUST update both:
|
||||
- gpu/self_hosted/app/routers/mixdown.py (this file)
|
||||
- server/reflector/utils/audio_mixdown.py
|
||||
|
||||
Constants duplicated from server/reflector/utils/audio_constants.py for same reason.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
from fractions import Fraction
|
||||
|
||||
import av
|
||||
import requests
|
||||
from av.audio.resampler import AudioResampler
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
from ..auth import apikey_auth
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=["mixdown"])
|
||||
|
||||
S3_TIMEOUT = 120
|
||||
|
||||
|
||||
class MixdownRequest(BaseModel):
|
||||
track_urls: list[str]
|
||||
output_url: str
|
||||
target_sample_rate: int | None = None
|
||||
offsets_seconds: list[float] | None = None
|
||||
|
||||
|
||||
class MixdownResponse(BaseModel):
|
||||
size: int
|
||||
duration_ms: float = 0.0
|
||||
cancelled: bool = False
|
||||
|
||||
|
||||
@router.post("/mixdown", dependencies=[Depends(apikey_auth)], response_model=MixdownResponse)
|
||||
def mixdown_tracks(req: MixdownRequest):
|
||||
"""Mix multiple audio tracks into single MP3 using PyAV amix filter graph."""
|
||||
valid_urls = [u for u in req.track_urls if u]
|
||||
if not valid_urls:
|
||||
raise HTTPException(status_code=400, detail="No valid track URLs provided")
|
||||
if req.offsets_seconds is not None:
|
||||
if len(req.offsets_seconds) != len(req.track_urls):
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"offsets_seconds length ({len(req.offsets_seconds)}) "
|
||||
f"must match track_urls ({len(req.track_urls)})",
|
||||
)
|
||||
if any(o > 18000 for o in req.offsets_seconds):
|
||||
raise HTTPException(
|
||||
status_code=400, detail="offsets_seconds exceeds maximum 18000s (5 hours)"
|
||||
)
|
||||
if not req.output_url:
|
||||
raise HTTPException(status_code=400, detail="output_url cannot be empty")
|
||||
|
||||
logger.info("Mixdown request: %d tracks", len(valid_urls))
|
||||
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
track_paths = []
|
||||
output_path = None
|
||||
|
||||
try:
|
||||
# --- Download all tracks ---
|
||||
for i, url in enumerate(valid_urls):
|
||||
logger.info("Downloading track %d", i)
|
||||
response = requests.get(url, stream=True, timeout=S3_TIMEOUT)
|
||||
response.raise_for_status()
|
||||
|
||||
track_path = os.path.join(temp_dir, f"track_{i}.webm")
|
||||
total_bytes = 0
|
||||
with open(track_path, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
total_bytes += len(chunk)
|
||||
|
||||
track_paths.append(track_path)
|
||||
logger.info("Track %d downloaded: %d bytes", i, total_bytes)
|
||||
|
||||
if not track_paths:
|
||||
raise HTTPException(status_code=400, detail="No tracks could be downloaded")
|
||||
|
||||
# --- Detect sample rate ---
|
||||
target_sample_rate = req.target_sample_rate
|
||||
if target_sample_rate is None:
|
||||
for path in track_paths:
|
||||
try:
|
||||
container = av.open(path)
|
||||
for frame in container.decode(audio=0):
|
||||
target_sample_rate = frame.sample_rate
|
||||
container.close()
|
||||
break
|
||||
else:
|
||||
container.close()
|
||||
continue
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
if target_sample_rate is None:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Could not detect sample rate from any track"
|
||||
)
|
||||
|
||||
logger.info("Target sample rate: %d", target_sample_rate)
|
||||
|
||||
# --- Calculate per-input delays ---
|
||||
input_offsets_seconds = None
|
||||
if req.offsets_seconds is not None:
|
||||
input_offsets_seconds = [
|
||||
req.offsets_seconds[i] for i, url in enumerate(req.track_urls) if url
|
||||
]
|
||||
|
||||
delays_ms = []
|
||||
if input_offsets_seconds is not None:
|
||||
base = min(input_offsets_seconds) if input_offsets_seconds else 0.0
|
||||
delays_ms = [max(0, int(round((o - base) * 1000))) for o in input_offsets_seconds]
|
||||
else:
|
||||
delays_ms = [0 for _ in track_paths]
|
||||
|
||||
# --- Build filter graph ---
|
||||
# N abuffer -> optional adelay -> amix -> aformat -> abuffersink
|
||||
graph = av.filter.Graph()
|
||||
inputs = []
|
||||
|
||||
for idx in range(len(track_paths)):
|
||||
args = (
|
||||
f"time_base=1/{target_sample_rate}:"
|
||||
f"sample_rate={target_sample_rate}:"
|
||||
f"sample_fmt=s32:"
|
||||
f"channel_layout=stereo"
|
||||
)
|
||||
in_ctx = graph.add("abuffer", args=args, name=f"in{idx}")
|
||||
inputs.append(in_ctx)
|
||||
|
||||
mixer = graph.add("amix", args=f"inputs={len(inputs)}:normalize=0", name="mix")
|
||||
fmt = graph.add(
|
||||
"aformat",
|
||||
args=f"sample_fmts=s32:channel_layouts=stereo:sample_rates={target_sample_rate}",
|
||||
name="fmt",
|
||||
)
|
||||
sink = graph.add("abuffersink", name="out")
|
||||
|
||||
for idx, in_ctx in enumerate(inputs):
|
||||
delay_ms = delays_ms[idx] if idx < len(delays_ms) else 0
|
||||
if delay_ms > 0:
|
||||
adelay = graph.add(
|
||||
"adelay",
|
||||
args=f"delays={delay_ms}|{delay_ms}:all=1",
|
||||
name=f"delay{idx}",
|
||||
)
|
||||
in_ctx.link_to(adelay)
|
||||
adelay.link_to(mixer, 0, idx)
|
||||
else:
|
||||
in_ctx.link_to(mixer, 0, idx)
|
||||
|
||||
mixer.link_to(fmt)
|
||||
fmt.link_to(sink)
|
||||
graph.configure()
|
||||
|
||||
# --- Open all containers and decode ---
|
||||
containers = []
|
||||
output_path = os.path.join(temp_dir, "mixed.mp3")
|
||||
|
||||
try:
|
||||
for path in track_paths:
|
||||
containers.append(av.open(path))
|
||||
|
||||
decoders = [c.decode(audio=0) for c in containers]
|
||||
active = [True] * len(decoders)
|
||||
resamplers = [
|
||||
AudioResampler(format="s32", layout="stereo", rate=target_sample_rate)
|
||||
for _ in decoders
|
||||
]
|
||||
|
||||
# Open output MP3
|
||||
out_container = av.open(output_path, "w", format="mp3")
|
||||
out_stream = out_container.add_stream("libmp3lame", rate=target_sample_rate)
|
||||
total_duration = 0
|
||||
|
||||
while any(active):
|
||||
for i, (dec, is_active) in enumerate(zip(decoders, active)):
|
||||
if not is_active:
|
||||
continue
|
||||
try:
|
||||
frame = next(dec)
|
||||
except StopIteration:
|
||||
active[i] = False
|
||||
inputs[i].push(None)
|
||||
continue
|
||||
|
||||
if frame.sample_rate != target_sample_rate:
|
||||
continue
|
||||
|
||||
out_frames = resamplers[i].resample(frame) or []
|
||||
for rf in out_frames:
|
||||
rf.sample_rate = target_sample_rate
|
||||
rf.time_base = Fraction(1, target_sample_rate)
|
||||
inputs[i].push(rf)
|
||||
|
||||
while True:
|
||||
try:
|
||||
mixed = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
mixed.sample_rate = target_sample_rate
|
||||
mixed.time_base = Fraction(1, target_sample_rate)
|
||||
for packet in out_stream.encode(mixed):
|
||||
out_container.mux(packet)
|
||||
total_duration += packet.duration
|
||||
|
||||
# Flush filter graph
|
||||
while True:
|
||||
try:
|
||||
mixed = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
mixed.sample_rate = target_sample_rate
|
||||
mixed.time_base = Fraction(1, target_sample_rate)
|
||||
for packet in out_stream.encode(mixed):
|
||||
out_container.mux(packet)
|
||||
total_duration += packet.duration
|
||||
|
||||
# Flush encoder
|
||||
for packet in out_stream.encode(None):
|
||||
out_container.mux(packet)
|
||||
total_duration += packet.duration
|
||||
|
||||
# Calculate duration in ms
|
||||
last_tb = out_stream.time_base
|
||||
duration_ms = 0.0
|
||||
if last_tb and total_duration > 0:
|
||||
duration_ms = round(float(total_duration * last_tb * 1000), 2)
|
||||
|
||||
out_container.close()
|
||||
|
||||
finally:
|
||||
for c in containers:
|
||||
try:
|
||||
c.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
file_size = os.path.getsize(output_path)
|
||||
logger.info("Mixdown complete: %d bytes, %.2fms", file_size, duration_ms)
|
||||
|
||||
# --- Upload result ---
|
||||
logger.info("Uploading mixed audio to S3")
|
||||
with open(output_path, "rb") as f:
|
||||
upload_response = requests.put(req.output_url, data=f, timeout=S3_TIMEOUT)
|
||||
upload_response.raise_for_status()
|
||||
logger.info("Upload complete: %d bytes", file_size)
|
||||
|
||||
return MixdownResponse(size=file_size, duration_ms=duration_ms)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Mixdown failed: %s", e, exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Mixdown failed: {e}") from e
|
||||
finally:
|
||||
for path in track_paths:
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
os.unlink(path)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup track file: %s", e)
|
||||
if output_path and os.path.exists(output_path):
|
||||
try:
|
||||
os.unlink(output_path)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup output file: %s", e)
|
||||
try:
|
||||
os.rmdir(temp_dir)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup temp directory: %s", e)
|
||||
199
gpu/self_hosted/app/routers/padding.py
Normal file
199
gpu/self_hosted/app/routers/padding.py
Normal file
@@ -0,0 +1,199 @@
|
||||
"""
|
||||
Audio padding endpoint for selfhosted GPU service.
|
||||
|
||||
CPU-intensive audio padding service for adding silence to audio tracks.
|
||||
Uses PyAV filter graph (adelay) for precise track synchronization.
|
||||
|
||||
IMPORTANT: This padding logic is duplicated from server/reflector/utils/audio_padding.py
|
||||
for deployment isolation (self_hosted can't import from server/reflector/). If you modify
|
||||
the PyAV filter graph or padding algorithm, you MUST update both:
|
||||
- gpu/self_hosted/app/routers/padding.py (this file)
|
||||
- server/reflector/utils/audio_padding.py
|
||||
|
||||
Constants duplicated from server/reflector/utils/audio_constants.py for same reason.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import tempfile
|
||||
from fractions import Fraction
|
||||
|
||||
import av
|
||||
import requests
|
||||
from av.audio.resampler import AudioResampler
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
from ..auth import apikey_auth
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=["padding"])
|
||||
|
||||
# ref B0F71CE8-FC59-4AA5-8414-DAFB836DB711
|
||||
OPUS_STANDARD_SAMPLE_RATE = 48000
|
||||
OPUS_DEFAULT_BIT_RATE = 128000
|
||||
|
||||
S3_TIMEOUT = 60
|
||||
|
||||
|
||||
class PaddingRequest(BaseModel):
|
||||
track_url: str
|
||||
output_url: str
|
||||
start_time_seconds: float
|
||||
track_index: int
|
||||
|
||||
|
||||
class PaddingResponse(BaseModel):
|
||||
size: int
|
||||
cancelled: bool = False
|
||||
|
||||
|
||||
@router.post("/pad", dependencies=[Depends(apikey_auth)], response_model=PaddingResponse)
|
||||
def pad_track(req: PaddingRequest):
|
||||
"""Pad audio track with silence using PyAV adelay filter graph."""
|
||||
if not req.track_url:
|
||||
raise HTTPException(status_code=400, detail="track_url cannot be empty")
|
||||
if not req.output_url:
|
||||
raise HTTPException(status_code=400, detail="output_url cannot be empty")
|
||||
if req.start_time_seconds <= 0:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"start_time_seconds must be positive, got {req.start_time_seconds}",
|
||||
)
|
||||
if req.start_time_seconds > 18000:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="start_time_seconds exceeds maximum 18000s (5 hours)",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Padding request: track %d, delay=%.3fs", req.track_index, req.start_time_seconds
|
||||
)
|
||||
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
input_path = None
|
||||
output_path = None
|
||||
|
||||
try:
|
||||
# Download source audio
|
||||
logger.info("Downloading track for padding")
|
||||
response = requests.get(req.track_url, stream=True, timeout=S3_TIMEOUT)
|
||||
response.raise_for_status()
|
||||
|
||||
input_path = os.path.join(temp_dir, "track.webm")
|
||||
total_bytes = 0
|
||||
with open(input_path, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
total_bytes += len(chunk)
|
||||
logger.info("Track downloaded: %d bytes", total_bytes)
|
||||
|
||||
# Apply padding using PyAV
|
||||
output_path = os.path.join(temp_dir, "padded.webm")
|
||||
delay_ms = math.floor(req.start_time_seconds * 1000)
|
||||
logger.info("Padding track %d with %dms delay using PyAV", req.track_index, delay_ms)
|
||||
|
||||
in_container = av.open(input_path)
|
||||
in_stream = next((s for s in in_container.streams if s.type == "audio"), None)
|
||||
if in_stream is None:
|
||||
in_container.close()
|
||||
raise HTTPException(status_code=400, detail="No audio stream in input")
|
||||
|
||||
with av.open(output_path, "w", format="webm") as out_container:
|
||||
out_stream = out_container.add_stream("libopus", rate=OPUS_STANDARD_SAMPLE_RATE)
|
||||
out_stream.bit_rate = OPUS_DEFAULT_BIT_RATE
|
||||
graph = av.filter.Graph()
|
||||
|
||||
abuf_args = (
|
||||
f"time_base=1/{OPUS_STANDARD_SAMPLE_RATE}:"
|
||||
f"sample_rate={OPUS_STANDARD_SAMPLE_RATE}:"
|
||||
f"sample_fmt=s16:"
|
||||
f"channel_layout=stereo"
|
||||
)
|
||||
src = graph.add("abuffer", args=abuf_args, name="src")
|
||||
aresample_f = graph.add("aresample", args="async=1", name="ares")
|
||||
delays_arg = f"{delay_ms}|{delay_ms}"
|
||||
adelay_f = graph.add(
|
||||
"adelay", args=f"delays={delays_arg}:all=1", name="delay"
|
||||
)
|
||||
sink = graph.add("abuffersink", name="sink")
|
||||
|
||||
src.link_to(aresample_f)
|
||||
aresample_f.link_to(adelay_f)
|
||||
adelay_f.link_to(sink)
|
||||
graph.configure()
|
||||
|
||||
resampler = AudioResampler(
|
||||
format="s16", layout="stereo", rate=OPUS_STANDARD_SAMPLE_RATE
|
||||
)
|
||||
|
||||
for frame in in_container.decode(in_stream):
|
||||
out_frames = resampler.resample(frame) or []
|
||||
for rframe in out_frames:
|
||||
rframe.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
rframe.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
src.push(rframe)
|
||||
|
||||
while True:
|
||||
try:
|
||||
f_out = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
for packet in out_stream.encode(f_out):
|
||||
out_container.mux(packet)
|
||||
|
||||
# Flush filter graph
|
||||
src.push(None)
|
||||
while True:
|
||||
try:
|
||||
f_out = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
for packet in out_stream.encode(f_out):
|
||||
out_container.mux(packet)
|
||||
|
||||
# Flush encoder
|
||||
for packet in out_stream.encode(None):
|
||||
out_container.mux(packet)
|
||||
|
||||
in_container.close()
|
||||
|
||||
file_size = os.path.getsize(output_path)
|
||||
logger.info("Padding complete: %d bytes", file_size)
|
||||
|
||||
# Upload padded track
|
||||
logger.info("Uploading padded track to S3")
|
||||
with open(output_path, "rb") as f:
|
||||
upload_response = requests.put(req.output_url, data=f, timeout=S3_TIMEOUT)
|
||||
upload_response.raise_for_status()
|
||||
logger.info("Upload complete: %d bytes", file_size)
|
||||
|
||||
return PaddingResponse(size=file_size)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Padding failed for track %d: %s", req.track_index, e, exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Padding failed: {e}") from e
|
||||
finally:
|
||||
if input_path and os.path.exists(input_path):
|
||||
try:
|
||||
os.unlink(input_path)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup input file: %s", e)
|
||||
if output_path and os.path.exists(output_path):
|
||||
try:
|
||||
os.unlink(output_path)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup output file: %s", e)
|
||||
try:
|
||||
os.rmdir(temp_dir)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup temp directory: %s", e)
|
||||
23
gpu/self_hosted/docker-entrypoint.sh
Normal file
23
gpu/self_hosted/docker-entrypoint.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Custom CA certificate injection
|
||||
# If a CA cert is mounted at this path (via docker-compose.ca.yml),
|
||||
# add it to the system trust store and configure all Python SSL libraries.
|
||||
CUSTOM_CA_PATH="/usr/local/share/ca-certificates/custom-ca.crt"
|
||||
|
||||
if [ -s "$CUSTOM_CA_PATH" ]; then
|
||||
echo "[entrypoint] Custom CA certificate detected, updating trust store..."
|
||||
update-ca-certificates 2>/dev/null
|
||||
|
||||
# update-ca-certificates creates a combined bundle (system + custom CAs)
|
||||
COMBINED_BUNDLE="/etc/ssl/certs/ca-certificates.crt"
|
||||
export SSL_CERT_FILE="$COMBINED_BUNDLE"
|
||||
export REQUESTS_CA_BUNDLE="$COMBINED_BUNDLE"
|
||||
export CURL_CA_BUNDLE="$COMBINED_BUNDLE"
|
||||
# Note: GRPC_DEFAULT_SSL_ROOTS_FILE_PATH is intentionally NOT set here.
|
||||
# Setting it causes grpcio to attempt TLS on connections that may be plaintext.
|
||||
echo "[entrypoint] CA trust store updated (SSL_CERT_FILE=$COMBINED_BUNDLE)"
|
||||
fi
|
||||
|
||||
exec sh /app/runserver.sh
|
||||
@@ -11,9 +11,11 @@ dependencies = [
|
||||
"faster-whisper>=1.1.0",
|
||||
"librosa==0.10.1",
|
||||
"numpy<2",
|
||||
"silero-vad==5.1.0",
|
||||
"silero-vad==5.1.2",
|
||||
"transformers>=4.35.0",
|
||||
"sentencepiece",
|
||||
"pyannote.audio==3.1.0",
|
||||
"pyannote.audio==3.4.0",
|
||||
"pytorch-lightning<2.6",
|
||||
"torchaudio>=2.3.0",
|
||||
"av>=13.1.0",
|
||||
]
|
||||
|
||||
29
gpu/self_hosted/uv.lock
generated
29
gpu/self_hosted/uv.lock
generated
@@ -726,7 +726,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/44/69/9b804adb5fd0671f367781560eb5eb586c4d495277c93bde4307b9e28068/greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd", size = 274079, upload-time = "2025-08-07T13:15:45.033Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/e9/d2a80c99f19a153eff70bc451ab78615583b8dac0754cfb942223d2c1a0d/greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb", size = 640997, upload-time = "2025-08-07T13:42:56.234Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/16/035dcfcc48715ccd345f3a93183267167cdd162ad123cd93067d86f27ce4/greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968", size = 655185, upload-time = "2025-08-07T13:45:27.624Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/da/0386695eef69ffae1ad726881571dfe28b41970173947e7c558d9998de0f/greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9", size = 649926, upload-time = "2025-08-07T13:53:15.251Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/88/69bf19fd4dc19981928ceacbc5fd4bb6bc2215d53199e367832e98d1d8fe/greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6", size = 651839, upload-time = "2025-08-07T13:18:30.281Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" },
|
||||
@@ -737,7 +736,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/49/e8/58c7f85958bda41dafea50497cbd59738c5c43dbbea5ee83d651234398f4/greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31", size = 272814, upload-time = "2025-08-07T13:15:50.011Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/dd/b9f59862e9e257a16e4e610480cfffd29e3fae018a68c2332090b53aac3d/greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945", size = 641073, upload-time = "2025-08-07T13:42:57.23Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/0b/bc13f787394920b23073ca3b6c4a7a21396301ed75a655bcb47196b50e6e/greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc", size = 655191, upload-time = "2025-08-07T13:45:29.752Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/d6/6adde57d1345a8d0f14d31e4ab9c23cfe8e2cd39c3baf7674b4b0338d266/greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a", size = 649516, upload-time = "2025-08-07T13:53:16.314Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/3b/3a3328a788d4a473889a2d403199932be55b1b0060f4ddd96ee7cdfcad10/greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504", size = 652169, upload-time = "2025-08-07T13:18:32.861Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/43/3cecdc0349359e1a527cbf2e3e28e5f8f06d3343aaf82ca13437a9aa290f/greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671", size = 610497, upload-time = "2025-08-07T13:18:31.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/19/06b6cf5d604e2c382a6f31cafafd6f33d5dea706f4db7bdab184bad2b21d/greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b", size = 1121662, upload-time = "2025-08-07T13:42:41.117Z" },
|
||||
@@ -748,7 +746,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/22/5c/85273fd7cc388285632b0498dbbab97596e04b154933dfe0f3e68156c68c/greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0", size = 273586, upload-time = "2025-08-07T13:16:08.004Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/75/10aeeaa3da9332c2e761e4c50d4c3556c21113ee3f0afa2cf5769946f7a3/greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f", size = 686346, upload-time = "2025-08-07T13:42:59.944Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/aa/687d6b12ffb505a4447567d1f3abea23bd20e73a5bed63871178e0831b7a/greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5", size = 699218, upload-time = "2025-08-07T13:45:30.969Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/8b/29aae55436521f1d6f8ff4e12fb676f3400de7fcf27fccd1d4d17fd8fecd/greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1", size = 694659, upload-time = "2025-08-07T13:53:17.759Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/2e/ea25914b1ebfde93b6fc4ff46d6864564fba59024e928bdc7de475affc25/greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735", size = 695355, upload-time = "2025-08-07T13:18:34.517Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/60/fc56c62046ec17f6b0d3060564562c64c862948c9d4bc8aa807cf5bd74f4/greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337", size = 657512, upload-time = "2025-08-07T13:18:33.969Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/6e/74407aed965a4ab6ddd93a7ded3180b730d281c77b765788419484cdfeef/greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269", size = 1612508, upload-time = "2025-11-04T12:42:23.427Z" },
|
||||
@@ -1745,7 +1742,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pyannote-audio"
|
||||
version = "3.1.0"
|
||||
version = "3.4.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "asteroid-filterbanks" },
|
||||
@@ -1768,9 +1765,9 @@ dependencies = [
|
||||
{ name = "torchaudio" },
|
||||
{ name = "torchmetrics" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ad/55/7253267c35e2aa9188b1d86cba121eb5bdd91ed12d3194488625a008cae7/pyannote.audio-3.1.0.tar.gz", hash = "sha256:da04705443d3b74607e034d3ca88f8b572c7e9672dd9a4199cab65a0dbc33fad", size = 14812058, upload-time = "2023-11-16T12:26:38.939Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ec/1e/efe9619c38f1281ddf21640654d8ea9e3f67c459b76f78657b26d8557bbe/pyannote_audio-3.4.0.tar.gz", hash = "sha256:d523d883cb8d37cb6daf99f3ba83f9138bb193646ad71e6eae7deb89d8ddd642", size = 804850, upload-time = "2025-09-09T07:04:51.17Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/37/158859ce4c45b5ba2dca40b53b0c10d36f935b7f6d4e737298397167c8b1/pyannote.audio-3.1.0-py2.py3-none-any.whl", hash = "sha256:66ab485728c6e141760e80555cb7a083e7be824cd528cc79b9e6f7d6421a91ae", size = 208592, upload-time = "2023-11-16T12:26:36.726Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/13/620c6f711b723653092fd063bfee82a6af5ea3a4d3c42efc53ce623a7f4d/pyannote_audio-3.4.0-py2.py3-none-any.whl", hash = "sha256:36e38f058059f46da3478dda581cda53d9d85a21173a3e70bbdbc3ba93b5e1b7", size = 897789, upload-time = "2025-09-09T07:04:49.464Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2072,11 +2069,13 @@ name = "reflector-gpu"
|
||||
version = "0.1.0"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "av" },
|
||||
{ name = "fastapi", extra = ["standard"] },
|
||||
{ name = "faster-whisper" },
|
||||
{ name = "librosa" },
|
||||
{ name = "numpy" },
|
||||
{ name = "pyannote-audio" },
|
||||
{ name = "pytorch-lightning" },
|
||||
{ name = "sentencepiece" },
|
||||
{ name = "silero-vad" },
|
||||
{ name = "torch" },
|
||||
@@ -2087,13 +2086,15 @@ dependencies = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "av", specifier = ">=13.1.0" },
|
||||
{ name = "fastapi", extras = ["standard"], specifier = ">=0.116.1" },
|
||||
{ name = "faster-whisper", specifier = ">=1.1.0" },
|
||||
{ name = "librosa", specifier = "==0.10.1" },
|
||||
{ name = "numpy", specifier = "<2" },
|
||||
{ name = "pyannote-audio", specifier = "==3.1.0" },
|
||||
{ name = "pyannote-audio", specifier = "==3.4.0" },
|
||||
{ name = "pytorch-lightning", specifier = "<2.6" },
|
||||
{ name = "sentencepiece" },
|
||||
{ name = "silero-vad", specifier = "==5.1.0" },
|
||||
{ name = "silero-vad", specifier = "==5.1.2" },
|
||||
{ name = "torch", specifier = ">=2.3.0" },
|
||||
{ name = "torchaudio", specifier = ">=2.3.0" },
|
||||
{ name = "transformers", specifier = ">=4.35.0" },
|
||||
@@ -2152,7 +2153,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.5"
|
||||
version = "2.33.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "certifi" },
|
||||
@@ -2160,9 +2161,9 @@ dependencies = [
|
||||
{ name = "idna" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/34/64/8860370b167a9721e8956ae116825caff829224fbca0ca6e7bf8ddef8430/requests-2.33.0.tar.gz", hash = "sha256:c7ebc5e8b0f21837386ad0e1c8fe8b829fa5f544d8df3b2253bff14ef29d7652", size = 134232, upload-time = "2026-03-25T15:10:41.586Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/5d/c814546c2333ceea4ba42262d8c4d55763003e767fa169adc693bd524478/requests-2.33.0-py3-none-any.whl", hash = "sha256:3324635456fa185245e24865e810cecec7b4caf933d7eb133dcde67d48cee69b", size = 65017, upload-time = "2026-03-25T15:10:40.382Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2473,16 +2474,16 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "silero-vad"
|
||||
version = "5.1"
|
||||
version = "5.1.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "onnxruntime" },
|
||||
{ name = "torch" },
|
||||
{ name = "torchaudio" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7c/5d/b912e45d21b8b61859a552554893222d2cdebfd0f9afa7e8ba69c7a3441a/silero_vad-5.1.tar.gz", hash = "sha256:c644275ba5df06cee596cc050ba0bd1e0f5237d1abfa44d58dd4618f6e77434d", size = 3996829, upload-time = "2024-07-09T13:19:24.181Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b1/b4/d0311b2e6220a11f8f4699f4a278cb088131573286cdfe804c87c7eb5123/silero_vad-5.1.2.tar.gz", hash = "sha256:c442971160026d2d7aa0ad83f0c7ee86c89797a65289fe625c8ea59fc6fb828d", size = 5098526, upload-time = "2024-10-09T09:50:47.019Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/be/0fdbc72030b93d6f55107490d5d2185ddf0dbabdc921f589649d3e92ccd5/silero_vad-5.1-py3-none-any.whl", hash = "sha256:ecb50b484f538f7a962ce5cd3c07120d9db7b9d5a0c5861ccafe459856f22c8f", size = 3939986, upload-time = "2024-07-09T13:19:21.383Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/f7/5ae11d13fbb733cd3bfd7ff1c3a3902e6f55437df4b72307c1f168146268/silero_vad-5.1.2-py3-none-any.whl", hash = "sha256:93b41953d7774b165407fda6b533c119c5803864e367d5034dc626c82cfdf661", size = 5026737, upload-time = "2024-10-09T09:50:44.355Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
130
scripts/generate-certs.sh
Executable file
130
scripts/generate-certs.sh
Executable file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Generate a local CA and server certificate for Reflector self-hosted deployments.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/generate-certs.sh DOMAIN [EXTRA_SANS...]
|
||||
#
|
||||
# Examples:
|
||||
# ./scripts/generate-certs.sh reflector.local
|
||||
# ./scripts/generate-certs.sh reflector.local "DNS:gpu.local,IP:192.168.1.100"
|
||||
#
|
||||
# Generates in certs/:
|
||||
# ca.key — CA private key (keep secret)
|
||||
# ca.crt — CA certificate (distribute to clients)
|
||||
# server-key.pem — Server private key
|
||||
# server.pem — Server certificate (signed by CA)
|
||||
#
|
||||
# Then use with setup-selfhosted.sh:
|
||||
# ./scripts/setup-selfhosted.sh --gpu --caddy --domain DOMAIN --custom-ca certs/
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
DOMAIN="${1:?Usage: $0 DOMAIN [EXTRA_SANS...]}"
|
||||
EXTRA_SANS="${2:-}"
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CERTS_DIR="$(cd "$SCRIPT_DIR/.." && pwd)/certs"
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
info() { echo -e "${CYAN}==>${NC} $*"; }
|
||||
ok() { echo -e "${GREEN} ✓${NC} $*"; }
|
||||
|
||||
# Check for openssl
|
||||
if ! command -v openssl &>/dev/null; then
|
||||
echo "Error: openssl is required but not found. Install it first." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "$CERTS_DIR"
|
||||
|
||||
# Build SAN list
|
||||
SAN_LIST="DNS:$DOMAIN,DNS:localhost,IP:127.0.0.1"
|
||||
if [[ -n "$EXTRA_SANS" ]]; then
|
||||
SAN_LIST="$SAN_LIST,$EXTRA_SANS"
|
||||
fi
|
||||
|
||||
info "Generating CA and server certificate for: $DOMAIN"
|
||||
echo " SANs: $SAN_LIST"
|
||||
echo ""
|
||||
|
||||
# --- Step 1: Generate CA ---
|
||||
if [[ -f "$CERTS_DIR/ca.key" ]] && [[ -f "$CERTS_DIR/ca.crt" ]]; then
|
||||
ok "CA already exists at certs/ca.key + certs/ca.crt — reusing"
|
||||
else
|
||||
info "Generating CA key and certificate..."
|
||||
openssl genrsa -out "$CERTS_DIR/ca.key" 4096 2>/dev/null
|
||||
openssl req -x509 -new -nodes \
|
||||
-key "$CERTS_DIR/ca.key" \
|
||||
-sha256 -days 3650 \
|
||||
-out "$CERTS_DIR/ca.crt" \
|
||||
-subj "/CN=Reflector Local CA/O=Reflector Self-Hosted"
|
||||
ok "CA certificate generated (valid for 10 years)"
|
||||
fi
|
||||
|
||||
# --- Step 2: Generate server key ---
|
||||
info "Generating server key..."
|
||||
openssl genrsa -out "$CERTS_DIR/server-key.pem" 2048 2>/dev/null
|
||||
ok "Server key generated"
|
||||
|
||||
# --- Step 3: Create CSR with SANs ---
|
||||
info "Creating certificate signing request..."
|
||||
openssl req -new \
|
||||
-key "$CERTS_DIR/server-key.pem" \
|
||||
-out "$CERTS_DIR/server.csr" \
|
||||
-subj "/CN=$DOMAIN" \
|
||||
-addext "subjectAltName=$SAN_LIST"
|
||||
ok "CSR created"
|
||||
|
||||
# --- Step 4: Sign with CA ---
|
||||
info "Signing server certificate with CA..."
|
||||
openssl x509 -req \
|
||||
-in "$CERTS_DIR/server.csr" \
|
||||
-CA "$CERTS_DIR/ca.crt" \
|
||||
-CAkey "$CERTS_DIR/ca.key" \
|
||||
-CAcreateserial \
|
||||
-out "$CERTS_DIR/server.pem" \
|
||||
-days 365 -sha256 \
|
||||
-copy_extensions copyall \
|
||||
2>/dev/null
|
||||
ok "Server certificate signed (valid for 1 year)"
|
||||
|
||||
# --- Cleanup ---
|
||||
rm -f "$CERTS_DIR/server.csr" "$CERTS_DIR/ca.srl"
|
||||
|
||||
# --- Set permissions ---
|
||||
chmod 644 "$CERTS_DIR/ca.crt" "$CERTS_DIR/server.pem"
|
||||
chmod 600 "$CERTS_DIR/ca.key" "$CERTS_DIR/server-key.pem"
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo -e " ${GREEN}Certificates generated in certs/${NC}"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo " certs/ca.key CA private key (keep secret)"
|
||||
echo " certs/ca.crt CA certificate (distribute to clients)"
|
||||
echo " certs/server-key.pem Server private key"
|
||||
echo " certs/server.pem Server certificate for $DOMAIN"
|
||||
echo ""
|
||||
echo " SANs: $SAN_LIST"
|
||||
echo ""
|
||||
echo "Use with setup-selfhosted.sh:"
|
||||
echo " ./scripts/setup-selfhosted.sh --gpu --caddy --domain $DOMAIN --custom-ca certs/"
|
||||
echo ""
|
||||
echo "Trust the CA on your machine:"
|
||||
case "$(uname -s)" in
|
||||
Darwin)
|
||||
echo " sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain certs/ca.crt"
|
||||
;;
|
||||
Linux)
|
||||
echo " sudo cp certs/ca.crt /usr/local/share/ca-certificates/reflector-ca.crt"
|
||||
echo " sudo update-ca-certificates"
|
||||
;;
|
||||
*)
|
||||
echo " See docsv2/custom-ca-setup.md for your platform"
|
||||
;;
|
||||
esac
|
||||
echo ""
|
||||
167
scripts/run-integration-tests.sh
Executable file
167
scripts/run-integration-tests.sh
Executable file
@@ -0,0 +1,167 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Run integration tests locally.
|
||||
#
|
||||
# Spins up the full stack via Docker Compose, runs the three integration tests,
|
||||
# and tears everything down afterward.
|
||||
#
|
||||
# Required environment variables:
|
||||
# LLM_URL — OpenAI-compatible LLM endpoint (e.g. https://api.openai.com/v1)
|
||||
# LLM_API_KEY — API key for the LLM endpoint
|
||||
# HF_TOKEN — HuggingFace token for pyannote gated models
|
||||
#
|
||||
# Optional:
|
||||
# LLM_MODEL — Model name (default: qwen2.5:14b)
|
||||
#
|
||||
# Flags:
|
||||
# --build — Rebuild backend Docker images (server, workers, test-runner)
|
||||
#
|
||||
# Usage:
|
||||
# export LLM_URL="https://api.openai.com/v1"
|
||||
# export LLM_API_KEY="sk-..."
|
||||
# export HF_TOKEN="hf_..."
|
||||
# ./scripts/run-integration-tests.sh
|
||||
# ./scripts/run-integration-tests.sh --build # rebuild backend images
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
BUILD_FLAG=""
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--build) BUILD_FLAG="--build" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
COMPOSE_DIR="$REPO_ROOT/server/tests"
|
||||
COMPOSE_FILE="$COMPOSE_DIR/docker-compose.integration.yml"
|
||||
COMPOSE="docker compose -f $COMPOSE_FILE"
|
||||
|
||||
# ── Validate required env vars ──────────────────────────────────────────────
|
||||
for var in LLM_URL LLM_API_KEY HF_TOKEN; do
|
||||
if [[ -z "${!var:-}" ]]; then
|
||||
echo "ERROR: $var is not set. See script header for required env vars."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
export LLM_MODEL="${LLM_MODEL:-qwen2.5:14b}"
|
||||
|
||||
# ── Helpers ─────────────────────────────────────────────────────────────────
|
||||
info() { echo -e "\n\033[1;34m▸ $*\033[0m"; }
|
||||
ok() { echo -e "\033[1;32m ✓ $*\033[0m"; }
|
||||
fail() { echo -e "\033[1;31m ✗ $*\033[0m"; }
|
||||
|
||||
wait_for() {
|
||||
local desc="$1" cmd="$2" max="${3:-60}"
|
||||
info "Waiting for $desc (up to ${max}s)..."
|
||||
for i in $(seq 1 "$max"); do
|
||||
if eval "$cmd" &>/dev/null; then
|
||||
ok "$desc is ready"
|
||||
return 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
fail "$desc did not become ready within ${max}s"
|
||||
return 1
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
info "Tearing down..."
|
||||
$COMPOSE down -v --remove-orphans 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Always tear down on exit
|
||||
trap cleanup EXIT
|
||||
|
||||
# ── Step 1: Build and start infrastructure ──────────────────────────────────
|
||||
info "Building and starting infrastructure services..."
|
||||
$COMPOSE up -d --build postgres redis garage hatchet mock-daily mailpit
|
||||
|
||||
# ── Step 2: Set up Garage (S3 bucket + keys) ───────────────────────────────
|
||||
wait_for "Garage" "$COMPOSE exec -T garage /garage stats" 60
|
||||
|
||||
info "Setting up Garage bucket and keys..."
|
||||
GARAGE="$COMPOSE exec -T garage /garage"
|
||||
|
||||
# Hardcoded test credentials — ephemeral containers, destroyed after tests
|
||||
export GARAGE_KEY_ID="GK0123456789abcdef01234567" # gitleaks:allow
|
||||
export GARAGE_KEY_SECRET="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" # gitleaks:allow
|
||||
|
||||
# Layout
|
||||
NODE_ID=$($GARAGE node id -q 2>&1 | tr -d '[:space:]')
|
||||
LAYOUT_STATUS=$($GARAGE layout show 2>&1 || true)
|
||||
if echo "$LAYOUT_STATUS" | grep -q "No nodes"; then
|
||||
$GARAGE layout assign "$NODE_ID" -c 1G -z dc1
|
||||
$GARAGE layout apply --version 1
|
||||
fi
|
||||
|
||||
# Bucket
|
||||
$GARAGE bucket info reflector-media >/dev/null 2>&1 || $GARAGE bucket create reflector-media
|
||||
|
||||
# Import key with known credentials
|
||||
if ! $GARAGE key info reflector-test >/dev/null 2>&1; then
|
||||
$GARAGE key import --yes "$GARAGE_KEY_ID" "$GARAGE_KEY_SECRET"
|
||||
$GARAGE key rename "$GARAGE_KEY_ID" reflector-test
|
||||
fi
|
||||
|
||||
# Permissions
|
||||
$GARAGE bucket allow reflector-media --read --write --key reflector-test
|
||||
|
||||
ok "Garage ready with hardcoded test credentials"
|
||||
|
||||
# ── Step 3: Generate Hatchet API token ──────────────────────────────────────
|
||||
wait_for "Hatchet" "$COMPOSE exec -T hatchet curl -sf http://localhost:8888/api/live" 90
|
||||
|
||||
info "Generating Hatchet API token..."
|
||||
HATCHET_TOKEN_OUTPUT=$($COMPOSE exec -T hatchet /hatchet-admin token create --config /config --name local-test 2>&1)
|
||||
export HATCHET_CLIENT_TOKEN=$(echo "$HATCHET_TOKEN_OUTPUT" | grep -o 'eyJ[A-Za-z0-9_.\-]*')
|
||||
|
||||
if [[ -z "$HATCHET_CLIENT_TOKEN" ]]; then
|
||||
fail "Failed to extract Hatchet token (JWT not found in output)"
|
||||
echo " Output was: $HATCHET_TOKEN_OUTPUT"
|
||||
exit 1
|
||||
fi
|
||||
ok "Hatchet token generated"
|
||||
|
||||
# ── Step 4: Start backend services ──────────────────────────────────────────
|
||||
info "Starting backend services..."
|
||||
$COMPOSE up -d $BUILD_FLAG server worker hatchet-worker-cpu hatchet-worker-llm test-runner
|
||||
|
||||
# ── Step 5: Wait for server + run migrations ────────────────────────────────
|
||||
wait_for "Server" "$COMPOSE exec -T test-runner curl -sf http://server:1250/health" 60
|
||||
|
||||
info "Running database migrations..."
|
||||
$COMPOSE exec -T server uv run alembic upgrade head
|
||||
ok "Migrations applied"
|
||||
|
||||
# ── Step 6: Run integration tests ───────────────────────────────────────────
|
||||
info "Running integration tests..."
|
||||
echo ""
|
||||
|
||||
LOGS_DIR="$COMPOSE_DIR/integration/logs"
|
||||
mkdir -p "$LOGS_DIR"
|
||||
RUN_TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||
TEST_LOG="$LOGS_DIR/$RUN_TIMESTAMP.txt"
|
||||
|
||||
if $COMPOSE exec -T test-runner uv run pytest tests/integration/ -v -x 2>&1 | tee "$TEST_LOG.pytest"; then
|
||||
echo ""
|
||||
ok "All integration tests passed!"
|
||||
EXIT_CODE=0
|
||||
else
|
||||
echo ""
|
||||
fail "Integration tests failed!"
|
||||
EXIT_CODE=1
|
||||
fi
|
||||
|
||||
# Always collect service logs + test output into a single file
|
||||
info "Collecting logs..."
|
||||
$COMPOSE logs --tail=500 > "$TEST_LOG" 2>&1
|
||||
echo -e "\n\n=== PYTEST OUTPUT ===\n" >> "$TEST_LOG"
|
||||
cat "$TEST_LOG.pytest" >> "$TEST_LOG" 2>/dev/null
|
||||
rm -f "$TEST_LOG.pytest"
|
||||
echo " Logs saved to: server/tests/integration/logs/$RUN_TIMESTAMP.txt"
|
||||
|
||||
# cleanup runs via trap
|
||||
exit $EXIT_CODE
|
||||
496
scripts/setup-gpu-host.sh
Executable file
496
scripts/setup-gpu-host.sh
Executable file
@@ -0,0 +1,496 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Standalone GPU service setup for Reflector.
|
||||
# Deploys ONLY the GPU transcription/diarization/translation service on a dedicated machine.
|
||||
# The main Reflector instance connects to this machine over HTTPS.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/setup-gpu-host.sh [--domain DOMAIN] [--custom-ca PATH] [--extra-ca FILE] [--api-key KEY] [--cpu] [--build]
|
||||
#
|
||||
# Options:
|
||||
# --domain DOMAIN Domain name for this GPU host (e.g., gpu.example.com)
|
||||
# With --custom-ca: uses custom TLS cert. Without: uses Let's Encrypt.
|
||||
# --custom-ca PATH Custom CA certificate (dir with ca.crt + server.pem + server-key.pem, or single PEM file)
|
||||
# --extra-ca FILE Additional CA cert to trust (repeatable)
|
||||
# --api-key KEY API key to protect the GPU service (recommended for internet-facing deployments)
|
||||
# --cpu Use CPU-only Dockerfile (no NVIDIA GPU required)
|
||||
# --build Build image from source (default: build, since no pre-built GPU image is published)
|
||||
# --port PORT Host port to expose (default: 443 with Caddy, 8000 without)
|
||||
#
|
||||
# Examples:
|
||||
# # GPU on LAN with custom CA
|
||||
# ./scripts/generate-certs.sh gpu.local
|
||||
# ./scripts/setup-gpu-host.sh --domain gpu.local --custom-ca certs/ --api-key my-secret-key
|
||||
#
|
||||
# # GPU on public internet with Let's Encrypt
|
||||
# ./scripts/setup-gpu-host.sh --domain gpu.example.com --api-key my-secret-key
|
||||
#
|
||||
# # GPU on LAN, IP access only (self-signed cert)
|
||||
# ./scripts/setup-gpu-host.sh --api-key my-secret-key
|
||||
#
|
||||
# # CPU-only mode (no NVIDIA GPU)
|
||||
# ./scripts/setup-gpu-host.sh --cpu --api-key my-secret-key
|
||||
#
|
||||
# After setup, configure the main Reflector instance to use this GPU:
|
||||
# In server/.env on the Reflector machine:
|
||||
# TRANSCRIPT_BACKEND=modal
|
||||
# TRANSCRIPT_URL=https://gpu.example.com
|
||||
# TRANSCRIPT_MODAL_API_KEY=my-secret-key
|
||||
# DIARIZATION_BACKEND=modal
|
||||
# DIARIZATION_URL=https://gpu.example.com
|
||||
# DIARIZATION_MODAL_API_KEY=my-secret-key
|
||||
# TRANSLATION_BACKEND=modal
|
||||
# TRANSLATE_URL=https://gpu.example.com
|
||||
# TRANSLATION_MODAL_API_KEY=my-secret-key
|
||||
#
|
||||
# DNS Resolution:
|
||||
# - Public domain: Create a DNS A record pointing to this machine's public IP.
|
||||
# - Internal domain (e.g., gpu.local): Add to /etc/hosts on both machines:
|
||||
# <GPU_MACHINE_IP> gpu.local
|
||||
# - IP-only: Use the machine's IP directly in TRANSCRIPT_URL/DIARIZATION_URL.
|
||||
# The Reflector backend must trust the CA or accept self-signed certs.
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
GPU_DIR="$ROOT_DIR/gpu/self_hosted"
|
||||
OS="$(uname -s)"
|
||||
|
||||
# --- Colors ---
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
|
||||
info() { echo -e "${CYAN}==>${NC} $*"; }
|
||||
ok() { echo -e "${GREEN} ✓${NC} $*"; }
|
||||
warn() { echo -e "${YELLOW} !${NC} $*"; }
|
||||
err() { echo -e "${RED} ✗${NC} $*" >&2; }
|
||||
|
||||
# --- Parse arguments ---
|
||||
CUSTOM_DOMAIN=""
|
||||
CUSTOM_CA=""
|
||||
EXTRA_CA_FILES=()
|
||||
API_KEY=""
|
||||
USE_CPU=false
|
||||
HOST_PORT=""
|
||||
|
||||
SKIP_NEXT=false
|
||||
ARGS=("$@")
|
||||
for i in "${!ARGS[@]}"; do
|
||||
if [[ "$SKIP_NEXT" == "true" ]]; then
|
||||
SKIP_NEXT=false
|
||||
continue
|
||||
fi
|
||||
arg="${ARGS[$i]}"
|
||||
case "$arg" in
|
||||
--domain)
|
||||
next_i=$((i + 1))
|
||||
if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then
|
||||
err "--domain requires a domain name"
|
||||
exit 1
|
||||
fi
|
||||
CUSTOM_DOMAIN="${ARGS[$next_i]}"
|
||||
SKIP_NEXT=true ;;
|
||||
--custom-ca)
|
||||
next_i=$((i + 1))
|
||||
if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then
|
||||
err "--custom-ca requires a path to a directory or PEM certificate file"
|
||||
exit 1
|
||||
fi
|
||||
CUSTOM_CA="${ARGS[$next_i]}"
|
||||
SKIP_NEXT=true ;;
|
||||
--extra-ca)
|
||||
next_i=$((i + 1))
|
||||
if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then
|
||||
err "--extra-ca requires a path to a PEM certificate file"
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -f "${ARGS[$next_i]}" ]]; then
|
||||
err "--extra-ca file not found: ${ARGS[$next_i]}"
|
||||
exit 1
|
||||
fi
|
||||
EXTRA_CA_FILES+=("${ARGS[$next_i]}")
|
||||
SKIP_NEXT=true ;;
|
||||
--api-key)
|
||||
next_i=$((i + 1))
|
||||
if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then
|
||||
err "--api-key requires a key value"
|
||||
exit 1
|
||||
fi
|
||||
API_KEY="${ARGS[$next_i]}"
|
||||
SKIP_NEXT=true ;;
|
||||
--cpu)
|
||||
USE_CPU=true ;;
|
||||
--port)
|
||||
next_i=$((i + 1))
|
||||
if [[ $next_i -ge ${#ARGS[@]} ]] || [[ "${ARGS[$next_i]}" == --* ]]; then
|
||||
err "--port requires a port number"
|
||||
exit 1
|
||||
fi
|
||||
HOST_PORT="${ARGS[$next_i]}"
|
||||
SKIP_NEXT=true ;;
|
||||
--build)
|
||||
;; # Always build from source for GPU, flag accepted for compatibility
|
||||
*)
|
||||
err "Unknown argument: $arg"
|
||||
err "Usage: $0 [--domain DOMAIN] [--custom-ca PATH] [--extra-ca FILE] [--api-key KEY] [--cpu] [--port PORT]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# --- Resolve CA paths ---
|
||||
CA_CERT_PATH=""
|
||||
TLS_CERT_PATH=""
|
||||
TLS_KEY_PATH=""
|
||||
USE_CUSTOM_CA=false
|
||||
USE_CADDY=false
|
||||
|
||||
if [[ -n "$CUSTOM_CA" ]] || [[ -n "${EXTRA_CA_FILES[0]+x}" ]]; then
|
||||
USE_CUSTOM_CA=true
|
||||
fi
|
||||
|
||||
if [[ -n "$CUSTOM_CA" ]]; then
|
||||
CUSTOM_CA="${CUSTOM_CA%/}"
|
||||
if [[ -d "$CUSTOM_CA" ]]; then
|
||||
[[ -f "$CUSTOM_CA/ca.crt" ]] || { err "$CUSTOM_CA/ca.crt not found"; exit 1; }
|
||||
CA_CERT_PATH="$CUSTOM_CA/ca.crt"
|
||||
if [[ -f "$CUSTOM_CA/server.pem" ]] && [[ -f "$CUSTOM_CA/server-key.pem" ]]; then
|
||||
TLS_CERT_PATH="$CUSTOM_CA/server.pem"
|
||||
TLS_KEY_PATH="$CUSTOM_CA/server-key.pem"
|
||||
elif [[ -f "$CUSTOM_CA/server.pem" ]] || [[ -f "$CUSTOM_CA/server-key.pem" ]]; then
|
||||
warn "Found only one of server.pem/server-key.pem — both needed for TLS. Skipping."
|
||||
fi
|
||||
elif [[ -f "$CUSTOM_CA" ]]; then
|
||||
CA_CERT_PATH="$CUSTOM_CA"
|
||||
else
|
||||
err "--custom-ca path not found: $CUSTOM_CA"
|
||||
exit 1
|
||||
fi
|
||||
elif [[ -n "${EXTRA_CA_FILES[0]+x}" ]]; then
|
||||
CA_CERT_PATH="${EXTRA_CA_FILES[0]}"
|
||||
unset 'EXTRA_CA_FILES[0]'
|
||||
EXTRA_CA_FILES=("${EXTRA_CA_FILES[@]+"${EXTRA_CA_FILES[@]}"}")
|
||||
fi
|
||||
|
||||
# Caddy if we have a domain or TLS certs
|
||||
if [[ -n "$CUSTOM_DOMAIN" ]] || [[ -n "$TLS_CERT_PATH" ]]; then
|
||||
USE_CADDY=true
|
||||
fi
|
||||
|
||||
# Default port
|
||||
if [[ -z "$HOST_PORT" ]]; then
|
||||
if [[ "$USE_CADDY" == "true" ]]; then
|
||||
HOST_PORT="443"
|
||||
else
|
||||
HOST_PORT="8000"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Detect primary IP
|
||||
PRIMARY_IP=""
|
||||
if [[ "$OS" == "Linux" ]]; then
|
||||
PRIMARY_IP=$(hostname -I 2>/dev/null | awk '{print $1}' || true)
|
||||
if [[ "$PRIMARY_IP" == "127."* ]] || [[ -z "$PRIMARY_IP" ]]; then
|
||||
PRIMARY_IP=$(ip -4 route get 1 2>/dev/null | sed -n 's/.*src \([0-9.]*\).*/\1/p' || true)
|
||||
fi
|
||||
fi
|
||||
|
||||
# --- Display config ---
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo " Reflector — Standalone GPU Host Setup"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo " Mode: $(if [[ "$USE_CPU" == "true" ]]; then echo "CPU-only"; else echo "NVIDIA GPU"; fi)"
|
||||
echo " Caddy: $USE_CADDY"
|
||||
[[ -n "$CUSTOM_DOMAIN" ]] && echo " Domain: $CUSTOM_DOMAIN"
|
||||
[[ "$USE_CUSTOM_CA" == "true" ]] && echo " CA: Custom"
|
||||
[[ -n "$TLS_CERT_PATH" ]] && echo " TLS: Custom cert"
|
||||
[[ -n "$API_KEY" ]] && echo " Auth: API key protected"
|
||||
[[ -z "$API_KEY" ]] && echo " Auth: NONE (open access — use --api-key for production!)"
|
||||
echo " Port: $HOST_PORT"
|
||||
echo ""
|
||||
|
||||
# --- Prerequisites ---
|
||||
info "Checking prerequisites"
|
||||
|
||||
if ! command -v docker &>/dev/null; then
|
||||
err "Docker not found. Install Docker first."
|
||||
exit 1
|
||||
fi
|
||||
ok "Docker available"
|
||||
|
||||
if ! docker compose version &>/dev/null; then
|
||||
err "Docker Compose V2 not found."
|
||||
exit 1
|
||||
fi
|
||||
ok "Docker Compose V2 available"
|
||||
|
||||
if [[ "$USE_CPU" != "true" ]]; then
|
||||
if ! docker info 2>/dev/null | grep -qi nvidia; then
|
||||
warn "NVIDIA runtime not detected in Docker. GPU mode may fail."
|
||||
warn "Install nvidia-container-toolkit if you have an NVIDIA GPU."
|
||||
else
|
||||
ok "NVIDIA Docker runtime available"
|
||||
fi
|
||||
fi
|
||||
|
||||
# --- Stage certificates ---
|
||||
CERTS_DIR="$ROOT_DIR/certs"
|
||||
if [[ "$USE_CUSTOM_CA" == "true" ]]; then
|
||||
info "Staging certificates"
|
||||
mkdir -p "$CERTS_DIR"
|
||||
|
||||
if [[ -n "$CA_CERT_PATH" ]]; then
|
||||
local_ca_dest="$CERTS_DIR/ca.crt"
|
||||
src_id=$(ls -i "$CA_CERT_PATH" 2>/dev/null | awk '{print $1}')
|
||||
dst_id=$(ls -i "$local_ca_dest" 2>/dev/null | awk '{print $1}')
|
||||
if [[ "$src_id" != "$dst_id" ]] || [[ -z "$dst_id" ]]; then
|
||||
cp "$CA_CERT_PATH" "$local_ca_dest"
|
||||
fi
|
||||
chmod 644 "$local_ca_dest"
|
||||
ok "CA certificate staged"
|
||||
|
||||
# Append extra CAs
|
||||
for extra_ca in "${EXTRA_CA_FILES[@]+"${EXTRA_CA_FILES[@]}"}"; do
|
||||
echo "" >> "$local_ca_dest"
|
||||
cat "$extra_ca" >> "$local_ca_dest"
|
||||
ok "Appended extra CA: $extra_ca"
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -n "$TLS_CERT_PATH" ]]; then
|
||||
cert_dest="$CERTS_DIR/server.pem"
|
||||
key_dest="$CERTS_DIR/server-key.pem"
|
||||
src_id=$(ls -i "$TLS_CERT_PATH" 2>/dev/null | awk '{print $1}')
|
||||
dst_id=$(ls -i "$cert_dest" 2>/dev/null | awk '{print $1}')
|
||||
if [[ "$src_id" != "$dst_id" ]] || [[ -z "$dst_id" ]]; then
|
||||
cp "$TLS_CERT_PATH" "$cert_dest"
|
||||
cp "$TLS_KEY_PATH" "$key_dest"
|
||||
fi
|
||||
chmod 644 "$cert_dest"
|
||||
chmod 600 "$key_dest"
|
||||
ok "TLS cert/key staged"
|
||||
fi
|
||||
fi
|
||||
|
||||
# --- Build profiles and compose command ---
|
||||
COMPOSE_FILE="$ROOT_DIR/docker-compose.gpu-host.yml"
|
||||
COMPOSE_PROFILES=()
|
||||
GPU_SERVICE="gpu"
|
||||
|
||||
if [[ "$USE_CPU" == "true" ]]; then
|
||||
COMPOSE_PROFILES+=("cpu")
|
||||
GPU_SERVICE="cpu"
|
||||
else
|
||||
COMPOSE_PROFILES+=("gpu")
|
||||
fi
|
||||
if [[ "$USE_CADDY" == "true" ]]; then
|
||||
COMPOSE_PROFILES+=("caddy")
|
||||
fi
|
||||
|
||||
# Compose command helper
|
||||
compose_cmd() {
|
||||
local profiles="" files="-f $COMPOSE_FILE"
|
||||
if [[ "$USE_CUSTOM_CA" == "true" ]] && [[ -f "$ROOT_DIR/docker-compose.gpu-ca.yml" ]]; then
|
||||
files="$files -f $ROOT_DIR/docker-compose.gpu-ca.yml"
|
||||
fi
|
||||
for p in "${COMPOSE_PROFILES[@]}"; do
|
||||
profiles="$profiles --profile $p"
|
||||
done
|
||||
docker compose $files $profiles "$@"
|
||||
}
|
||||
|
||||
# Generate CA compose override if needed (mounts certs into containers)
|
||||
if [[ "$USE_CUSTOM_CA" == "true" ]]; then
|
||||
info "Generating docker-compose.gpu-ca.yml override"
|
||||
ca_override="$ROOT_DIR/docker-compose.gpu-ca.yml"
|
||||
cat > "$ca_override" << 'CAEOF'
|
||||
# Generated by setup-gpu-host.sh — custom CA trust.
|
||||
# Do not edit manually; re-run setup-gpu-host.sh with --custom-ca to regenerate.
|
||||
services:
|
||||
gpu:
|
||||
volumes:
|
||||
- ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro
|
||||
cpu:
|
||||
volumes:
|
||||
- ./certs/ca.crt:/usr/local/share/ca-certificates/custom-ca.crt:ro
|
||||
CAEOF
|
||||
|
||||
if [[ -n "$TLS_CERT_PATH" ]]; then
|
||||
cat >> "$ca_override" << 'CADDYCAEOF'
|
||||
caddy:
|
||||
volumes:
|
||||
- ./certs:/etc/caddy/certs:ro
|
||||
CADDYCAEOF
|
||||
fi
|
||||
ok "Generated docker-compose.gpu-ca.yml"
|
||||
else
|
||||
rm -f "$ROOT_DIR/docker-compose.gpu-ca.yml"
|
||||
fi
|
||||
|
||||
# --- Generate Caddyfile ---
|
||||
if [[ "$USE_CADDY" == "true" ]]; then
|
||||
info "Generating Caddyfile.gpu-host"
|
||||
|
||||
CADDYFILE="$ROOT_DIR/Caddyfile.gpu-host"
|
||||
|
||||
if [[ -n "$TLS_CERT_PATH" ]] && [[ -n "$CUSTOM_DOMAIN" ]]; then
|
||||
cat > "$CADDYFILE" << CADDYEOF
|
||||
# Generated by setup-gpu-host.sh — Custom TLS cert for $CUSTOM_DOMAIN
|
||||
$CUSTOM_DOMAIN {
|
||||
tls /etc/caddy/certs/server.pem /etc/caddy/certs/server-key.pem
|
||||
reverse_proxy transcription:8000
|
||||
}
|
||||
CADDYEOF
|
||||
ok "Caddyfile: custom TLS for $CUSTOM_DOMAIN"
|
||||
elif [[ -n "$CUSTOM_DOMAIN" ]]; then
|
||||
cat > "$CADDYFILE" << CADDYEOF
|
||||
# Generated by setup-gpu-host.sh — Let's Encrypt for $CUSTOM_DOMAIN
|
||||
$CUSTOM_DOMAIN {
|
||||
reverse_proxy transcription:8000
|
||||
}
|
||||
CADDYEOF
|
||||
ok "Caddyfile: Let's Encrypt for $CUSTOM_DOMAIN"
|
||||
else
|
||||
cat > "$CADDYFILE" << 'CADDYEOF'
|
||||
# Generated by setup-gpu-host.sh — self-signed cert for IP access
|
||||
:443 {
|
||||
tls internal
|
||||
reverse_proxy transcription:8000
|
||||
}
|
||||
CADDYEOF
|
||||
ok "Caddyfile: self-signed cert for IP access"
|
||||
fi
|
||||
fi
|
||||
|
||||
# --- Generate .env ---
|
||||
info "Generating GPU service .env"
|
||||
|
||||
GPU_ENV="$ROOT_DIR/.env.gpu-host"
|
||||
cat > "$GPU_ENV" << EOF
|
||||
# Generated by setup-gpu-host.sh
|
||||
# HuggingFace token for pyannote diarization models
|
||||
HF_TOKEN=${HF_TOKEN:-}
|
||||
# API key to protect the GPU service (set via --api-key)
|
||||
REFLECTOR_GPU_APIKEY=${API_KEY:-}
|
||||
# Port configuration
|
||||
GPU_HOST_PORT=${HOST_PORT}
|
||||
CADDY_HTTPS_PORT=${HOST_PORT}
|
||||
EOF
|
||||
|
||||
if [[ -z "${HF_TOKEN:-}" ]]; then
|
||||
warn "HF_TOKEN not set. Diarization requires a HuggingFace token."
|
||||
warn "Set it: export HF_TOKEN=your-token-here and re-run, or edit .env.gpu-host"
|
||||
fi
|
||||
|
||||
ok "Generated .env.gpu-host"
|
||||
|
||||
# --- Build and start ---
|
||||
info "Building $GPU_SERVICE image (first build downloads ML models — may take a while)..."
|
||||
compose_cmd --env-file "$GPU_ENV" build "$GPU_SERVICE"
|
||||
ok "$GPU_SERVICE image built"
|
||||
|
||||
info "Starting services..."
|
||||
compose_cmd --env-file "$GPU_ENV" up -d
|
||||
ok "Services started"
|
||||
|
||||
# --- Wait for health ---
|
||||
info "Waiting for GPU service to be healthy (model loading takes 1-2 minutes)..."
|
||||
local_url="http://localhost:8000"
|
||||
for i in $(seq 1 40); do
|
||||
if curl -sf "$local_url/docs" >/dev/null 2>&1; then
|
||||
ok "GPU service is healthy!"
|
||||
break
|
||||
fi
|
||||
if [[ $i -eq 40 ]]; then
|
||||
err "GPU service did not become healthy after 5 minutes."
|
||||
err "Check logs: docker compose -f docker-compose.gpu-host.yml logs gpu"
|
||||
exit 1
|
||||
fi
|
||||
sleep 8
|
||||
done
|
||||
|
||||
# --- Summary ---
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo -e " ${GREEN}GPU service is running!${NC}"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
if [[ "$USE_CADDY" == "true" ]]; then
|
||||
if [[ -n "$CUSTOM_DOMAIN" ]]; then
|
||||
echo " URL: https://$CUSTOM_DOMAIN"
|
||||
elif [[ -n "$PRIMARY_IP" ]]; then
|
||||
echo " URL: https://$PRIMARY_IP"
|
||||
else
|
||||
echo " URL: https://localhost"
|
||||
fi
|
||||
else
|
||||
if [[ -n "$PRIMARY_IP" ]]; then
|
||||
echo " URL: http://$PRIMARY_IP:$HOST_PORT"
|
||||
else
|
||||
echo " URL: http://localhost:$HOST_PORT"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo " Health: curl \$(URL)/docs"
|
||||
[[ -n "$API_KEY" ]] && echo " API key: $API_KEY"
|
||||
echo ""
|
||||
echo " Configure the main Reflector instance (in server/.env):"
|
||||
echo ""
|
||||
|
||||
local_gpu_url=""
|
||||
if [[ "$USE_CADDY" == "true" ]]; then
|
||||
if [[ -n "$CUSTOM_DOMAIN" ]]; then
|
||||
local_gpu_url="https://$CUSTOM_DOMAIN"
|
||||
elif [[ -n "$PRIMARY_IP" ]]; then
|
||||
local_gpu_url="https://$PRIMARY_IP"
|
||||
else
|
||||
local_gpu_url="https://localhost"
|
||||
fi
|
||||
else
|
||||
if [[ -n "$PRIMARY_IP" ]]; then
|
||||
local_gpu_url="http://$PRIMARY_IP:$HOST_PORT"
|
||||
else
|
||||
local_gpu_url="http://localhost:$HOST_PORT"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo " TRANSCRIPT_BACKEND=modal"
|
||||
echo " TRANSCRIPT_URL=$local_gpu_url"
|
||||
[[ -n "$API_KEY" ]] && echo " TRANSCRIPT_MODAL_API_KEY=$API_KEY"
|
||||
echo " DIARIZATION_BACKEND=modal"
|
||||
echo " DIARIZATION_URL=$local_gpu_url"
|
||||
[[ -n "$API_KEY" ]] && echo " DIARIZATION_MODAL_API_KEY=$API_KEY"
|
||||
echo " TRANSLATION_BACKEND=modal"
|
||||
echo " TRANSLATE_URL=$local_gpu_url"
|
||||
[[ -n "$API_KEY" ]] && echo " TRANSLATION_MODAL_API_KEY=$API_KEY"
|
||||
echo ""
|
||||
|
||||
if [[ "$USE_CUSTOM_CA" == "true" ]]; then
|
||||
echo " The Reflector instance must also trust this CA."
|
||||
echo " On the Reflector machine, run setup-selfhosted.sh with:"
|
||||
echo " --extra-ca /path/to/this-machines-ca.crt"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo " DNS Resolution:"
|
||||
if [[ -n "$CUSTOM_DOMAIN" ]]; then
|
||||
echo " Ensure '$CUSTOM_DOMAIN' resolves to this machine's IP."
|
||||
echo " Public: Create a DNS A record."
|
||||
echo " Internal: Add to /etc/hosts on the Reflector machine:"
|
||||
echo " ${PRIMARY_IP:-<GPU_IP>} $CUSTOM_DOMAIN"
|
||||
else
|
||||
echo " Use this machine's IP directly in TRANSCRIPT_URL/DIARIZATION_URL."
|
||||
fi
|
||||
echo ""
|
||||
echo " To stop: docker compose -f docker-compose.gpu-host.yml down"
|
||||
echo " To re-run: ./scripts/setup-gpu-host.sh $*"
|
||||
echo " Logs: docker compose -f docker-compose.gpu-host.yml logs -f gpu"
|
||||
echo ""
|
||||
File diff suppressed because it is too large
Load Diff
@@ -86,11 +86,23 @@ LLM_API_KEY=not-needed
|
||||
## Context size for summary generation (tokens)
|
||||
LLM_CONTEXT_WINDOW=16000
|
||||
|
||||
## =======================================================
|
||||
## Audio Padding
|
||||
##
|
||||
## backends: pyav (in-process PyAV), modal (HTTP API client)
|
||||
## Default is "pyav" — no external service needed.
|
||||
## Set to "modal" when using Modal.com or self-hosted gpu/self_hosted/ container.
|
||||
## =======================================================
|
||||
#PADDING_BACKEND=pyav
|
||||
#PADDING_BACKEND=modal
|
||||
#PADDING_URL=https://xxxxx--reflector-padding-web.modal.run
|
||||
#PADDING_MODAL_API_KEY=xxxxx
|
||||
|
||||
## =======================================================
|
||||
## Diarization
|
||||
##
|
||||
## Only available on modal
|
||||
## To allow diarization, you need to expose expose the files to be dowloded by the pipeline
|
||||
## backends: modal (HTTP API), pyannote (in-process pyannote.audio)
|
||||
## To allow diarization, you need to expose expose the files to be downloaded by the pipeline
|
||||
## =======================================================
|
||||
DIARIZATION_ENABLED=false
|
||||
DIARIZATION_BACKEND=modal
|
||||
@@ -137,6 +149,10 @@ TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
|
||||
#DAILYCO_STORAGE_AWS_ROLE_ARN=... # IAM role ARN for Daily.co S3 access
|
||||
#DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
||||
#DAILYCO_STORAGE_AWS_REGION=us-west-2
|
||||
# Worker credentials for reading/deleting from Daily's recording bucket
|
||||
# Required when transcript storage is separate from Daily's bucket (e.g., selfhosted with Garage)
|
||||
#DAILYCO_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
|
||||
#DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
|
||||
|
||||
## Whereby (optional separate bucket)
|
||||
#WHEREBY_STORAGE_AWS_BUCKET_NAME=reflector-whereby
|
||||
|
||||
@@ -32,23 +32,46 @@ AUTH_BACKEND=none
|
||||
|
||||
# =======================================================
|
||||
# Specialized Models (Transcription, Diarization, Translation)
|
||||
# These run in the gpu/cpu container — NOT an LLM.
|
||||
# The "modal" backend means "HTTP API client" — it talks to
|
||||
# the self-hosted container, not Modal.com cloud.
|
||||
# These do NOT use an LLM. Configured per mode by the setup script:
|
||||
#
|
||||
# --gpu mode: modal backends → GPU container (http://transcription:8000)
|
||||
# --cpu mode: whisper/pyannote/marian/pyav → in-process ML on server/worker
|
||||
# --hosted mode: modal backends → user-provided remote GPU service URL
|
||||
# =======================================================
|
||||
|
||||
# --- --gpu mode (default) ---
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=http://transcription:8000
|
||||
TRANSCRIPT_MODAL_API_KEY=selfhosted
|
||||
|
||||
DIARIZATION_ENABLED=true
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=http://transcription:8000
|
||||
|
||||
TRANSLATION_BACKEND=modal
|
||||
TRANSLATE_URL=http://transcription:8000
|
||||
PADDING_BACKEND=modal
|
||||
PADDING_URL=http://transcription:8000
|
||||
|
||||
# HuggingFace token — optional, for gated models (e.g. pyannote).
|
||||
# Falls back to public S3 model bundle if not set.
|
||||
# --- --cpu mode (set by setup script) ---
|
||||
# TRANSCRIPT_BACKEND=whisper
|
||||
# DIARIZATION_BACKEND=pyannote
|
||||
# TRANSLATION_BACKEND=marian
|
||||
# PADDING_BACKEND=pyav
|
||||
|
||||
# --- --hosted mode (set by setup script) ---
|
||||
# TRANSCRIPT_BACKEND=modal
|
||||
# TRANSCRIPT_URL=https://your-gpu-service.example.com
|
||||
# DIARIZATION_BACKEND=modal
|
||||
# DIARIZATION_URL=https://your-gpu-service.example.com
|
||||
# ... (all URLs point to one remote service)
|
||||
|
||||
# Whisper model sizes for local transcription (--cpu mode)
|
||||
# Options: "tiny", "base", "small", "medium", "large-v2"
|
||||
# WHISPER_CHUNK_MODEL=tiny
|
||||
# WHISPER_FILE_MODEL=tiny
|
||||
|
||||
# HuggingFace token — for gated models (e.g. pyannote diarization).
|
||||
# Required for --gpu and --cpu modes; falls back to public S3 bundle if not set.
|
||||
# Not needed for --hosted mode (remote service handles its own auth).
|
||||
# HF_TOKEN=hf_xxxxx
|
||||
|
||||
# =======================================================
|
||||
@@ -93,15 +116,42 @@ TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
|
||||
# =======================================================
|
||||
# Daily.co Live Rooms (Optional)
|
||||
# Enable real-time meeting rooms with Daily.co integration.
|
||||
# Requires a Daily.co account: https://www.daily.co/
|
||||
# Configure these BEFORE running setup-selfhosted.sh and the
|
||||
# script will auto-detect and start Hatchet workflow services.
|
||||
#
|
||||
# Prerequisites:
|
||||
# 1. Daily.co account: https://www.daily.co/
|
||||
# 2. API key: Dashboard → Developers → API Keys
|
||||
# 3. S3 bucket for recordings: https://docs.daily.co/guides/products/live-streaming-recording/storing-recordings-in-a-custom-s3-bucket
|
||||
# 4. IAM role ARN for Daily.co to write recordings to your bucket
|
||||
#
|
||||
# After configuring, run: ./scripts/setup-selfhosted.sh <your-flags>
|
||||
# The script will detect DAILY_API_KEY and automatically:
|
||||
# - Start Hatchet workflow engine + CPU/LLM workers
|
||||
# - Generate a Hatchet API token
|
||||
# - Enable FEATURE_ROOMS in the frontend
|
||||
# =======================================================
|
||||
# DEFAULT_VIDEO_PLATFORM=daily
|
||||
# DAILY_API_KEY=your-daily-api-key
|
||||
# DAILY_SUBDOMAIN=your-subdomain
|
||||
# DAILY_WEBHOOK_SECRET=your-daily-webhook-secret
|
||||
# DEFAULT_VIDEO_PLATFORM=daily
|
||||
# DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
||||
# DAILYCO_STORAGE_AWS_REGION=us-east-1
|
||||
# DAILYCO_STORAGE_AWS_ROLE_ARN=arn:aws:iam::role/DailyCoAccess
|
||||
# Worker credentials for reading/deleting from Daily's recording bucket
|
||||
# Required when transcript storage is separate from Daily's bucket (e.g., selfhosted with Garage)
|
||||
# DAILYCO_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
|
||||
# DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
|
||||
# DAILY_WEBHOOK_SECRET=your-daily-webhook-secret # optional, for faster recording discovery
|
||||
|
||||
# =======================================================
|
||||
# Hatchet Workflow Engine (Auto-configured for Daily.co)
|
||||
# Required for Daily.co multitrack recording processing.
|
||||
# The setup script generates HATCHET_CLIENT_TOKEN automatically.
|
||||
# Do not set these manually unless you know what you're doing.
|
||||
# =======================================================
|
||||
# HATCHET_CLIENT_TOKEN=<auto-generated-by-script>
|
||||
# HATCHET_CLIENT_SERVER_URL=http://hatchet:8888
|
||||
# HATCHET_CLIENT_HOST_PORT=hatchet:7077
|
||||
|
||||
# =======================================================
|
||||
# Feature Flags
|
||||
|
||||
@@ -6,7 +6,7 @@ ENV PYTHONUNBUFFERED=1 \
|
||||
|
||||
# builder install base dependencies
|
||||
WORKDIR /tmp
|
||||
RUN apt-get update && apt-get install -y curl && apt-get clean
|
||||
RUN apt-get update && apt-get install -y curl ffmpeg ca-certificates && apt-get clean
|
||||
ADD https://astral.sh/uv/install.sh /uv-installer.sh
|
||||
RUN sh /uv-installer.sh && rm /uv-installer.sh
|
||||
ENV PATH="/root/.local/bin/:$PATH"
|
||||
@@ -18,7 +18,7 @@ COPY pyproject.toml uv.lock README.md /app/
|
||||
RUN uv sync --compile-bytecode --locked
|
||||
|
||||
# bootstrap
|
||||
COPY alembic.ini runserver.sh /app/
|
||||
COPY alembic.ini docker-entrypoint.sh runserver.sh /app/
|
||||
COPY images /app/images
|
||||
COPY migrations /app/migrations
|
||||
COPY reflector /app/reflector
|
||||
@@ -35,4 +35,6 @@ RUN if [ "$(uname -m)" = "aarch64" ] && [ ! -f /usr/lib/libgomp.so.1 ]; then \
|
||||
# Pre-check just to make sure the image will not fail
|
||||
RUN uv run python -c "import silero_vad.model"
|
||||
|
||||
CMD ["./runserver.sh"]
|
||||
RUN chmod +x /app/docker-entrypoint.sh
|
||||
|
||||
CMD ["./docker-entrypoint.sh"]
|
||||
|
||||
25
server/docker-entrypoint.sh
Normal file
25
server/docker-entrypoint.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Custom CA certificate injection
|
||||
# If a CA cert is mounted at this path (via docker-compose.ca.yml),
|
||||
# add it to the system trust store and configure all Python SSL libraries.
|
||||
CUSTOM_CA_PATH="/usr/local/share/ca-certificates/custom-ca.crt"
|
||||
|
||||
if [ -s "$CUSTOM_CA_PATH" ]; then
|
||||
echo "[entrypoint] Custom CA certificate detected, updating trust store..."
|
||||
update-ca-certificates 2>/dev/null
|
||||
|
||||
# update-ca-certificates creates a combined bundle (system + custom CAs)
|
||||
COMBINED_BUNDLE="/etc/ssl/certs/ca-certificates.crt"
|
||||
export SSL_CERT_FILE="$COMBINED_BUNDLE"
|
||||
export REQUESTS_CA_BUNDLE="$COMBINED_BUNDLE"
|
||||
export CURL_CA_BUNDLE="$COMBINED_BUNDLE"
|
||||
# Note: GRPC_DEFAULT_SSL_ROOTS_FILE_PATH is intentionally NOT set here.
|
||||
# Setting it causes grpcio to attempt TLS on internal Hatchet connections
|
||||
# that run without TLS (SERVER_GRPC_INSECURE=t), resulting in handshake failures.
|
||||
# If you need gRPC with custom CA, set GRPC_DEFAULT_SSL_ROOTS_FILE_PATH explicitly.
|
||||
echo "[entrypoint] CA trust store updated (SSL_CERT_FILE=$COMBINED_BUNDLE)"
|
||||
fi
|
||||
|
||||
exec ./runserver.sh
|
||||
@@ -419,3 +419,18 @@ User-room broadcasts to `user:{user_id}`:
|
||||
- `TRANSCRIPT_STATUS`
|
||||
- `TRANSCRIPT_FINAL_TITLE`
|
||||
- `TRANSCRIPT_DURATION`
|
||||
|
||||
## Failed Runs Monitor (Hatchet Cron)
|
||||
|
||||
A `FailedRunsMonitor` Hatchet cron workflow runs hourly (`0 * * * *`) and checks for failed pipeline runs
|
||||
(DiarizationPipeline, FilePipeline, LivePostProcessingPipeline) in the last hour. For each failed run,
|
||||
it renders a DAG status overview and posts it to Zulip.
|
||||
|
||||
**Required env vars** (all must be set to enable):
|
||||
- `ZULIP_REALM` — Zulip server hostname
|
||||
- `ZULIP_API_KEY` — Zulip bot API key
|
||||
- `ZULIP_BOT_EMAIL` — Zulip bot email
|
||||
- `ZULIP_DAG_STREAM` — Zulip stream for alerts
|
||||
- `ZULIP_DAG_TOPIC` — Zulip topic for alerts
|
||||
|
||||
If any of these are unset, the monitor workflow is not registered with the Hatchet worker.
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
"""add soft delete fields to transcript and recording
|
||||
|
||||
Revision ID: 501c73a6b0d5
|
||||
Revises: e1f093f7f124
|
||||
Create Date: 2026-03-19 00:00:00.000000
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
revision: str = "501c73a6b0d5"
|
||||
down_revision: Union[str, None] = "e1f093f7f124"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"transcript",
|
||||
sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True),
|
||||
)
|
||||
op.add_column(
|
||||
"recording",
|
||||
sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True),
|
||||
)
|
||||
op.create_index(
|
||||
"idx_transcript_not_deleted",
|
||||
"transcript",
|
||||
["id"],
|
||||
postgresql_where=sa.text("deleted_at IS NULL"),
|
||||
)
|
||||
op.create_index(
|
||||
"idx_recording_not_deleted",
|
||||
"recording",
|
||||
["id"],
|
||||
postgresql_where=sa.text("deleted_at IS NULL"),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_index("idx_recording_not_deleted", table_name="recording")
|
||||
op.drop_index("idx_transcript_not_deleted", table_name="transcript")
|
||||
op.drop_column("recording", "deleted_at")
|
||||
op.drop_column("transcript", "deleted_at")
|
||||
@@ -0,0 +1,29 @@
|
||||
"""add email_recipients to meeting
|
||||
|
||||
Revision ID: a2b3c4d5e6f7
|
||||
Revises: 501c73a6b0d5
|
||||
Create Date: 2026-03-20 00:00:00.000000
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
from sqlalchemy.dialects.postgresql import JSONB
|
||||
|
||||
revision: str = "a2b3c4d5e6f7"
|
||||
down_revision: Union[str, None] = "501c73a6b0d5"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"meeting",
|
||||
sa.Column("email_recipients", JSONB, nullable=True),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("meeting", "email_recipients")
|
||||
@@ -0,0 +1,28 @@
|
||||
"""add email_transcript_to to room
|
||||
|
||||
Revision ID: b4c7e8f9a012
|
||||
Revises: a2b3c4d5e6f7
|
||||
Create Date: 2026-03-24 00:00:00.000000
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
revision: str = "b4c7e8f9a012"
|
||||
down_revision: Union[str, None] = "a2b3c4d5e6f7"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column(
|
||||
"room",
|
||||
sa.Column("email_transcript_to", sa.String(), nullable=True),
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("room", "email_transcript_to")
|
||||
@@ -27,7 +27,7 @@ dependencies = [
|
||||
"protobuf>=4.24.3",
|
||||
"celery>=5.3.4",
|
||||
"redis>=5.0.1",
|
||||
"python-jose[cryptography]>=3.3.0",
|
||||
"pyjwt[crypto]>=2.8.0",
|
||||
"python-multipart>=0.0.6",
|
||||
"transformers>=4.36.2",
|
||||
"jsonschema>=4.23.0",
|
||||
@@ -38,8 +38,10 @@ dependencies = [
|
||||
"pytest-env>=1.1.5",
|
||||
"webvtt-py>=0.5.0",
|
||||
"icalendar>=6.0.0",
|
||||
"hatchet-sdk>=0.47.0",
|
||||
"hatchet-sdk==1.22.16",
|
||||
"pydantic>=2.12.5",
|
||||
"aiosmtplib>=3.0.0",
|
||||
"email-validator>=2.0.0",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
@@ -71,9 +73,12 @@ local = [
|
||||
"faster-whisper>=0.10.0",
|
||||
]
|
||||
silero-vad = [
|
||||
"silero-vad>=5.1.2",
|
||||
"silero-vad==5.1.2",
|
||||
"torch>=2.8.0",
|
||||
"torchaudio>=2.8.0",
|
||||
"pyannote.audio==3.4.0",
|
||||
"pytorch-lightning<2.6",
|
||||
"librosa==0.10.1",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
@@ -113,9 +118,10 @@ source = ["reflector"]
|
||||
ENVIRONMENT = "pytest"
|
||||
DATABASE_URL = "postgresql://test_user:test_password@localhost:15432/reflector_test"
|
||||
AUTH_BACKEND = "jwt"
|
||||
HATCHET_CLIENT_TOKEN = "test-dummy-token"
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
addopts = "-ra -q --disable-pytest-warnings --cov --cov-report html -v"
|
||||
addopts = "-ra -q --disable-pytest-warnings --cov --cov-report html -v --ignore=tests/integration"
|
||||
testpaths = ["tests"]
|
||||
asyncio_mode = "auto"
|
||||
markers = [
|
||||
|
||||
13
server/reflector/_warnings_filter.py
Normal file
13
server/reflector/_warnings_filter.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""
|
||||
Suppress known dependency warnings. Import this before any reflector/hatchet_sdk
|
||||
imports that pull in pydantic (e.g. llama_index) to hide UnsupportedFieldAttributeWarning
|
||||
about validate_default.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=".*validate_default.*",
|
||||
category=UserWarning,
|
||||
)
|
||||
@@ -13,18 +13,21 @@ from reflector.events import subscribers_shutdown, subscribers_startup
|
||||
from reflector.logger import logger
|
||||
from reflector.metrics import metrics_init
|
||||
from reflector.settings import settings
|
||||
from reflector.views.config import router as config_router
|
||||
from reflector.views.daily import router as daily_router
|
||||
from reflector.views.meetings import router as meetings_router
|
||||
from reflector.views.rooms import router as rooms_router
|
||||
from reflector.views.rtc_offer import router as rtc_offer_router
|
||||
from reflector.views.transcripts import router as transcripts_router
|
||||
from reflector.views.transcripts_audio import router as transcripts_audio_router
|
||||
from reflector.views.transcripts_download import router as transcripts_download_router
|
||||
from reflector.views.transcripts_participants import (
|
||||
router as transcripts_participants_router,
|
||||
)
|
||||
from reflector.views.transcripts_process import router as transcripts_process_router
|
||||
from reflector.views.transcripts_speaker import router as transcripts_speaker_router
|
||||
from reflector.views.transcripts_upload import router as transcripts_upload_router
|
||||
from reflector.views.transcripts_video import router as transcripts_video_router
|
||||
from reflector.views.transcripts_webrtc import router as transcripts_webrtc_router
|
||||
from reflector.views.transcripts_websocket import router as transcripts_websocket_router
|
||||
from reflector.views.user import router as user_router
|
||||
@@ -97,12 +100,15 @@ app.include_router(transcripts_audio_router, prefix="/v1")
|
||||
app.include_router(transcripts_participants_router, prefix="/v1")
|
||||
app.include_router(transcripts_speaker_router, prefix="/v1")
|
||||
app.include_router(transcripts_upload_router, prefix="/v1")
|
||||
app.include_router(transcripts_download_router, prefix="/v1")
|
||||
app.include_router(transcripts_video_router, prefix="/v1")
|
||||
app.include_router(transcripts_websocket_router, prefix="/v1")
|
||||
app.include_router(transcripts_webrtc_router, prefix="/v1")
|
||||
app.include_router(transcripts_process_router, prefix="/v1")
|
||||
app.include_router(user_router, prefix="/v1")
|
||||
app.include_router(user_api_keys_router, prefix="/v1")
|
||||
app.include_router(user_ws_router, prefix="/v1")
|
||||
app.include_router(config_router, prefix="/v1")
|
||||
app.include_router(zulip_router, prefix="/v1")
|
||||
app.include_router(whereby_router, prefix="/v1")
|
||||
app.include_router(daily_router, prefix="/v1/daily")
|
||||
|
||||
@@ -12,8 +12,10 @@ AccessTokenInfo = auth_module.AccessTokenInfo
|
||||
authenticated = auth_module.authenticated
|
||||
current_user = auth_module.current_user
|
||||
current_user_optional = auth_module.current_user_optional
|
||||
current_user_optional_if_public_mode = auth_module.current_user_optional_if_public_mode
|
||||
parse_ws_bearer_token = auth_module.parse_ws_bearer_token
|
||||
current_user_ws_optional = auth_module.current_user_ws_optional
|
||||
verify_raw_token = auth_module.verify_raw_token
|
||||
|
||||
# Optional router (e.g. for /auth/login in password backend)
|
||||
router = getattr(auth_module, "router", None)
|
||||
|
||||
@@ -4,8 +4,8 @@ from fastapi import Depends, HTTPException
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastapi import WebSocket
|
||||
import jwt
|
||||
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer
|
||||
from jose import JWTError, jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.db.user_api_keys import user_api_keys_controller
|
||||
@@ -54,7 +54,7 @@ class JWTAuth:
|
||||
audience=jwt_audience,
|
||||
)
|
||||
return payload
|
||||
except JWTError as e:
|
||||
except jwt.PyJWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise
|
||||
|
||||
@@ -94,7 +94,7 @@ async def _authenticate_user(
|
||||
)
|
||||
|
||||
user_infos.append(UserInfo(sub=user.id, email=email))
|
||||
except JWTError as e:
|
||||
except jwt.PyJWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||
|
||||
@@ -129,6 +129,17 @@ async def current_user_optional(
|
||||
return await _authenticate_user(jwt_token, api_key, jwtauth)
|
||||
|
||||
|
||||
async def current_user_optional_if_public_mode(
|
||||
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||
api_key: Annotated[Optional[str], Depends(api_key_header)],
|
||||
jwtauth: JWTAuth = Depends(),
|
||||
) -> Optional[UserInfo]:
|
||||
user = await _authenticate_user(jwt_token, api_key, jwtauth)
|
||||
if user is None and not settings.PUBLIC_MODE:
|
||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||
return user
|
||||
|
||||
|
||||
def parse_ws_bearer_token(
|
||||
websocket: "WebSocket",
|
||||
) -> tuple[Optional[str], Optional[str]]:
|
||||
@@ -144,3 +155,8 @@ async def current_user_ws_optional(websocket: "WebSocket") -> Optional[UserInfo]
|
||||
if not token:
|
||||
return None
|
||||
return await _authenticate_user(token, None, JWTAuth())
|
||||
|
||||
|
||||
async def verify_raw_token(token: str) -> Optional[UserInfo]:
|
||||
"""Verify a raw JWT token string (used for query-param auth fallback)."""
|
||||
return await _authenticate_user(token, None, JWTAuth())
|
||||
|
||||
@@ -21,9 +21,19 @@ def current_user_optional():
|
||||
return None
|
||||
|
||||
|
||||
def current_user_optional_if_public_mode():
|
||||
# auth_none means no authentication at all — always public
|
||||
return None
|
||||
|
||||
|
||||
def parse_ws_bearer_token(websocket):
|
||||
return None, None
|
||||
|
||||
|
||||
async def current_user_ws_optional(websocket):
|
||||
return None
|
||||
|
||||
|
||||
async def verify_raw_token(token):
|
||||
"""Verify a raw JWT token string (used for query-param auth fallback)."""
|
||||
return None
|
||||
|
||||
@@ -9,9 +9,9 @@ from collections import defaultdict
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import TYPE_CHECKING, Annotated, Optional
|
||||
|
||||
import jwt
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer
|
||||
from jose import JWTError, jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.auth.password_utils import verify_password
|
||||
@@ -110,7 +110,7 @@ async def _authenticate_user(
|
||||
user_id = payload["sub"]
|
||||
email = payload.get("email")
|
||||
user_infos.append(UserInfo(sub=user_id, email=email))
|
||||
except JWTError as e:
|
||||
except jwt.PyJWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||
|
||||
@@ -150,6 +150,16 @@ async def current_user_optional(
|
||||
return await _authenticate_user(jwt_token, api_key)
|
||||
|
||||
|
||||
async def current_user_optional_if_public_mode(
|
||||
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||
api_key: Annotated[Optional[str], Depends(api_key_header)],
|
||||
) -> Optional[UserInfo]:
|
||||
user = await _authenticate_user(jwt_token, api_key)
|
||||
if user is None and not settings.PUBLIC_MODE:
|
||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||
return user
|
||||
|
||||
|
||||
# --- WebSocket auth (same pattern as auth_jwt.py) ---
|
||||
def parse_ws_bearer_token(
|
||||
websocket: "WebSocket",
|
||||
@@ -168,6 +178,11 @@ async def current_user_ws_optional(websocket: "WebSocket") -> Optional[UserInfo]
|
||||
return await _authenticate_user(token, None)
|
||||
|
||||
|
||||
async def verify_raw_token(token: str) -> Optional[UserInfo]:
|
||||
"""Verify a raw JWT token string (used for query-param auth fallback)."""
|
||||
return await _authenticate_user(token, None)
|
||||
|
||||
|
||||
# --- Login router ---
|
||||
router = APIRouter(prefix="/auth", tags=["auth"])
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Literal
|
||||
|
||||
@@ -66,6 +67,8 @@ meetings = sa.Table(
|
||||
# Daily.co composed video (Brady Bunch grid layout) - Daily.co only, not Whereby
|
||||
sa.Column("daily_composed_video_s3_key", sa.String, nullable=True),
|
||||
sa.Column("daily_composed_video_duration", sa.Integer, nullable=True),
|
||||
# Email recipients for transcript notification
|
||||
sa.Column("email_recipients", JSONB, nullable=True),
|
||||
sa.Index("idx_meeting_room_id", "room_id"),
|
||||
sa.Index("idx_meeting_calendar_event", "calendar_event_id"),
|
||||
)
|
||||
@@ -116,6 +119,8 @@ class Meeting(BaseModel):
|
||||
# Daily.co composed video (Brady Bunch grid) - Daily.co only
|
||||
daily_composed_video_s3_key: str | None = None
|
||||
daily_composed_video_duration: int | None = None
|
||||
# Email recipients for transcript notification
|
||||
email_recipients: list[str] | None = None
|
||||
|
||||
|
||||
class MeetingController:
|
||||
@@ -388,6 +393,24 @@ class MeetingController:
|
||||
# If was_null=False, the WHERE clause prevented the update
|
||||
return was_null
|
||||
|
||||
@asynccontextmanager
|
||||
async def transaction(self):
|
||||
"""A context manager for database transaction."""
|
||||
async with get_database().transaction(isolation="serializable"):
|
||||
yield
|
||||
|
||||
async def add_email_recipient(self, meeting_id: str, email: str) -> list[str]:
|
||||
"""Add an email to the meeting's email_recipients list (no duplicates)."""
|
||||
async with self.transaction():
|
||||
meeting = await self.get_by_id(meeting_id)
|
||||
if not meeting:
|
||||
raise ValueError(f"Meeting {meeting_id} not found")
|
||||
current = meeting.email_recipients or []
|
||||
if email not in current:
|
||||
current.append(email)
|
||||
await self.update_meeting(meeting_id, email_recipients=current)
|
||||
return current
|
||||
|
||||
async def increment_num_clients(self, meeting_id: str) -> None:
|
||||
"""Atomically increment participant count."""
|
||||
query = (
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timezone
|
||||
from typing import Literal
|
||||
|
||||
import sqlalchemy as sa
|
||||
@@ -24,6 +24,7 @@ recordings = sa.Table(
|
||||
),
|
||||
sa.Column("meeting_id", sa.String),
|
||||
sa.Column("track_keys", sa.JSON, nullable=True),
|
||||
sa.Column("deleted_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Index("idx_recording_meeting_id", "meeting_id"),
|
||||
)
|
||||
|
||||
@@ -40,6 +41,7 @@ class Recording(BaseModel):
|
||||
# track_keys can be empty list [] if recording finished but no audio was captured (silence/muted)
|
||||
# None means not a multitrack recording, [] means multitrack with no tracks
|
||||
track_keys: list[str] | None = None
|
||||
deleted_at: datetime | None = None
|
||||
|
||||
@property
|
||||
def is_multitrack(self) -> bool:
|
||||
@@ -69,7 +71,11 @@ class RecordingController:
|
||||
return Recording(**result) if result else None
|
||||
|
||||
async def remove_by_id(self, id: str) -> None:
|
||||
query = recordings.delete().where(recordings.c.id == id)
|
||||
query = (
|
||||
recordings.update()
|
||||
.where(recordings.c.id == id)
|
||||
.values(deleted_at=datetime.now(timezone.utc))
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def set_meeting_id(
|
||||
@@ -114,6 +120,7 @@ class RecordingController:
|
||||
.where(
|
||||
recordings.c.bucket_name == bucket_name,
|
||||
recordings.c.track_keys.isnot(None),
|
||||
recordings.c.deleted_at.is_(None),
|
||||
or_(
|
||||
transcripts.c.id.is_(None),
|
||||
transcripts.c.status == "error",
|
||||
|
||||
@@ -63,6 +63,7 @@ rooms = sqlalchemy.Table(
|
||||
nullable=False,
|
||||
server_default=sqlalchemy.sql.false(),
|
||||
),
|
||||
sqlalchemy.Column("email_transcript_to", sqlalchemy.String, nullable=True),
|
||||
sqlalchemy.Index("idx_room_is_shared", "is_shared"),
|
||||
sqlalchemy.Index("idx_room_ics_enabled", "ics_enabled"),
|
||||
)
|
||||
@@ -92,6 +93,7 @@ class Room(BaseModel):
|
||||
ics_last_etag: str | None = None
|
||||
platform: Platform = Field(default_factory=lambda: settings.DEFAULT_VIDEO_PLATFORM)
|
||||
skip_consent: bool = False
|
||||
email_transcript_to: str | None = None
|
||||
|
||||
|
||||
class RoomController:
|
||||
@@ -147,6 +149,7 @@ class RoomController:
|
||||
ics_enabled: bool = False,
|
||||
platform: Platform = settings.DEFAULT_VIDEO_PLATFORM,
|
||||
skip_consent: bool = False,
|
||||
email_transcript_to: str | None = None,
|
||||
):
|
||||
"""
|
||||
Add a new room
|
||||
@@ -172,6 +175,7 @@ class RoomController:
|
||||
"ics_enabled": ics_enabled,
|
||||
"platform": platform,
|
||||
"skip_consent": skip_consent,
|
||||
"email_transcript_to": email_transcript_to,
|
||||
}
|
||||
|
||||
room = Room(**room_data)
|
||||
|
||||
@@ -387,6 +387,8 @@ class SearchController:
|
||||
transcripts.join(rooms, transcripts.c.room_id == rooms.c.id, isouter=True)
|
||||
)
|
||||
|
||||
base_query = base_query.where(transcripts.c.deleted_at.is_(None))
|
||||
|
||||
if params.query_text is not None:
|
||||
# because already initialized based on params.query_text presence above
|
||||
assert search_query is not None
|
||||
|
||||
@@ -91,6 +91,7 @@ transcripts = sqlalchemy.Table(
|
||||
sqlalchemy.Column("webvtt", sqlalchemy.Text),
|
||||
# Hatchet workflow run ID for resumption of failed workflows
|
||||
sqlalchemy.Column("workflow_run_id", sqlalchemy.String),
|
||||
sqlalchemy.Column("deleted_at", sqlalchemy.DateTime(timezone=True), nullable=True),
|
||||
sqlalchemy.Column(
|
||||
"change_seq",
|
||||
sqlalchemy.BigInteger,
|
||||
@@ -238,6 +239,7 @@ class Transcript(BaseModel):
|
||||
webvtt: str | None = None
|
||||
workflow_run_id: str | None = None # Hatchet workflow run ID for resumption
|
||||
change_seq: int | None = None
|
||||
deleted_at: datetime | None = None
|
||||
|
||||
@field_serializer("created_at", when_used="json")
|
||||
def serialize_datetime(self, dt: datetime) -> str:
|
||||
@@ -418,6 +420,8 @@ class TranscriptController:
|
||||
rooms, transcripts.c.room_id == rooms.c.id, isouter=True
|
||||
)
|
||||
|
||||
query = query.where(transcripts.c.deleted_at.is_(None))
|
||||
|
||||
if user_id:
|
||||
query = query.where(
|
||||
or_(transcripts.c.user_id == user_id, rooms.c.is_shared)
|
||||
@@ -500,7 +504,10 @@ class TranscriptController:
|
||||
"""
|
||||
Get transcripts by room_id (direct access without joins)
|
||||
"""
|
||||
query = transcripts.select().where(transcripts.c.room_id == room_id)
|
||||
query = transcripts.select().where(
|
||||
transcripts.c.room_id == room_id,
|
||||
transcripts.c.deleted_at.is_(None),
|
||||
)
|
||||
if "user_id" in kwargs:
|
||||
query = query.where(transcripts.c.user_id == kwargs["user_id"])
|
||||
if "order_by" in kwargs:
|
||||
@@ -531,8 +538,11 @@ class TranscriptController:
|
||||
if not result:
|
||||
raise HTTPException(status_code=404, detail="Transcript not found")
|
||||
|
||||
# if the transcript is anonymous, share mode is not checked
|
||||
transcript = Transcript(**result)
|
||||
if transcript.deleted_at is not None:
|
||||
raise HTTPException(status_code=404, detail="Transcript not found")
|
||||
|
||||
# if the transcript is anonymous, share mode is not checked
|
||||
if transcript.user_id is None:
|
||||
return transcript
|
||||
|
||||
@@ -632,56 +642,49 @@ class TranscriptController:
|
||||
user_id: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Remove a transcript by id
|
||||
Soft-delete a transcript by id.
|
||||
|
||||
Sets deleted_at on the transcript and its associated recording.
|
||||
All files (S3 and local) are preserved for later retrieval.
|
||||
"""
|
||||
transcript = await self.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
return
|
||||
if user_id is not None and transcript.user_id != user_id:
|
||||
return
|
||||
if transcript.audio_location == "storage" and not transcript.audio_deleted:
|
||||
try:
|
||||
await get_transcripts_storage().delete_file(
|
||||
transcript.storage_audio_path
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete transcript audio from storage",
|
||||
exc_info=e,
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
transcript.unlink()
|
||||
if transcript.deleted_at is not None:
|
||||
return
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
# Soft-delete the associated recording (keeps S3 files intact)
|
||||
if transcript.recording_id:
|
||||
try:
|
||||
recording = await recordings_controller.get_by_id(
|
||||
transcript.recording_id
|
||||
)
|
||||
if recording:
|
||||
try:
|
||||
await get_transcripts_storage().delete_file(
|
||||
recording.object_key, bucket=recording.bucket_name
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete recording object from S3",
|
||||
exc_info=e,
|
||||
recording_id=transcript.recording_id,
|
||||
)
|
||||
await recordings_controller.remove_by_id(transcript.recording_id)
|
||||
await recordings_controller.remove_by_id(transcript.recording_id)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete recording row",
|
||||
"Failed to soft-delete recording",
|
||||
exc_info=e,
|
||||
recording_id=transcript.recording_id,
|
||||
)
|
||||
query = transcripts.delete().where(transcripts.c.id == transcript_id)
|
||||
|
||||
# Soft-delete the transcript (keeps all files intact)
|
||||
query = (
|
||||
transcripts.update()
|
||||
.where(transcripts.c.id == transcript_id)
|
||||
.values(deleted_at=now)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def remove_by_recording_id(self, recording_id: str):
|
||||
"""
|
||||
Remove a transcript by recording_id
|
||||
Soft-delete a transcript by recording_id
|
||||
"""
|
||||
query = transcripts.delete().where(transcripts.c.recording_id == recording_id)
|
||||
query = (
|
||||
transcripts.update()
|
||||
.where(transcripts.c.recording_id == recording_id)
|
||||
.values(deleted_at=datetime.now(timezone.utc))
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
@staticmethod
|
||||
@@ -697,6 +700,18 @@ class TranscriptController:
|
||||
return False
|
||||
return user_id and transcript.user_id == user_id
|
||||
|
||||
@staticmethod
|
||||
def check_can_mutate(transcript: Transcript, user_id: str | None) -> None:
|
||||
"""
|
||||
Raises HTTP 403 if the user cannot mutate the transcript.
|
||||
|
||||
Policy:
|
||||
- Anonymous transcripts (user_id is None) are editable by anyone
|
||||
- Owned transcripts can only be mutated by their owner
|
||||
"""
|
||||
if transcript.user_id is not None and transcript.user_id != user_id:
|
||||
raise HTTPException(status_code=403, detail="Not authorized")
|
||||
|
||||
@asynccontextmanager
|
||||
async def transaction(self):
|
||||
"""
|
||||
|
||||
84
server/reflector/email.py
Normal file
84
server/reflector/email.py
Normal file
@@ -0,0 +1,84 @@
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
import aiosmtplib
|
||||
import structlog
|
||||
|
||||
from reflector.db.transcripts import Transcript
|
||||
from reflector.settings import settings
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
|
||||
def is_email_configured() -> bool:
|
||||
return bool(settings.SMTP_HOST and settings.SMTP_FROM_EMAIL)
|
||||
|
||||
|
||||
def get_transcript_url(transcript: Transcript) -> str:
|
||||
return f"{settings.UI_BASE_URL}/transcripts/{transcript.id}"
|
||||
|
||||
|
||||
def _build_plain_text(transcript: Transcript, url: str) -> str:
|
||||
title = transcript.title or "Unnamed recording"
|
||||
lines = [
|
||||
f"Your transcript is ready: {title}",
|
||||
"",
|
||||
f"View it here: {url}",
|
||||
]
|
||||
if transcript.short_summary:
|
||||
lines.extend(["", "Summary:", transcript.short_summary])
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _build_html(transcript: Transcript, url: str) -> str:
|
||||
title = transcript.title or "Unnamed recording"
|
||||
summary_html = ""
|
||||
if transcript.short_summary:
|
||||
summary_html = f"<p style='color:#555;'>{transcript.short_summary}</p>"
|
||||
|
||||
return f"""\
|
||||
<div style="font-family:sans-serif;max-width:600px;margin:0 auto;">
|
||||
<h2>Your transcript is ready</h2>
|
||||
<p><strong>{title}</strong></p>
|
||||
{summary_html}
|
||||
<p><a href="{url}" style="display:inline-block;padding:10px 20px;background:#4A90D9;color:#fff;text-decoration:none;border-radius:4px;">View Transcript</a></p>
|
||||
<p style="color:#999;font-size:12px;">This email was sent because you requested to receive the transcript from a meeting.</p>
|
||||
</div>"""
|
||||
|
||||
|
||||
async def send_transcript_email(to_emails: list[str], transcript: Transcript) -> int:
|
||||
"""Send transcript notification to all emails. Returns count sent."""
|
||||
if not is_email_configured() or not to_emails:
|
||||
return 0
|
||||
|
||||
url = get_transcript_url(transcript)
|
||||
title = transcript.title or "Unnamed recording"
|
||||
sent = 0
|
||||
|
||||
for email_addr in to_emails:
|
||||
msg = MIMEMultipart("alternative")
|
||||
msg["Subject"] = f"Transcript Ready: {title}"
|
||||
msg["From"] = settings.SMTP_FROM_EMAIL
|
||||
msg["To"] = email_addr
|
||||
|
||||
msg.attach(MIMEText(_build_plain_text(transcript, url), "plain"))
|
||||
msg.attach(MIMEText(_build_html(transcript, url), "html"))
|
||||
|
||||
try:
|
||||
await aiosmtplib.send(
|
||||
msg,
|
||||
hostname=settings.SMTP_HOST,
|
||||
port=settings.SMTP_PORT,
|
||||
username=settings.SMTP_USERNAME,
|
||||
password=settings.SMTP_PASSWORD,
|
||||
start_tls=settings.SMTP_USE_TLS,
|
||||
)
|
||||
sent += 1
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to send transcript email",
|
||||
to=email_addr,
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
|
||||
return sent
|
||||
@@ -21,11 +21,27 @@ class TaskName(StrEnum):
|
||||
CLEANUP_CONSENT = "cleanup_consent"
|
||||
POST_ZULIP = "post_zulip"
|
||||
SEND_WEBHOOK = "send_webhook"
|
||||
SEND_EMAIL = "send_email"
|
||||
PAD_TRACK = "pad_track"
|
||||
TRANSCRIBE_TRACK = "transcribe_track"
|
||||
DETECT_CHUNK_TOPIC = "detect_chunk_topic"
|
||||
GENERATE_DETAILED_SUMMARY = "generate_detailed_summary"
|
||||
|
||||
# File pipeline tasks
|
||||
EXTRACT_AUDIO = "extract_audio"
|
||||
UPLOAD_AUDIO = "upload_audio"
|
||||
TRANSCRIBE = "transcribe"
|
||||
DIARIZE = "diarize"
|
||||
ASSEMBLE_TRANSCRIPT = "assemble_transcript"
|
||||
GENERATE_SUMMARIES = "generate_summaries"
|
||||
|
||||
# Live post-processing pipeline tasks
|
||||
WAVEFORM = "waveform"
|
||||
CONVERT_MP3 = "convert_mp3"
|
||||
UPLOAD_MP3 = "upload_mp3"
|
||||
REMOVE_UPLOAD = "remove_upload"
|
||||
FINAL_SUMMARIES = "final_summaries"
|
||||
|
||||
|
||||
# Rate limit key for LLM API calls (shared across all LLM-calling tasks)
|
||||
LLM_RATE_LIMIT_KEY = "llm"
|
||||
@@ -39,5 +55,12 @@ TIMEOUT_MEDIUM = (
|
||||
300 # Single LLM calls, waveform generation (5m for slow LLM responses)
|
||||
)
|
||||
TIMEOUT_LONG = 180 # Action items (larger context LLM)
|
||||
TIMEOUT_AUDIO = 720 # Audio processing: padding, mixdown
|
||||
TIMEOUT_HEAVY = 600 # Transcription, fan-out LLM tasks
|
||||
TIMEOUT_TITLE = 300 # generate_title (single LLM call; doc: reduce from 600s)
|
||||
TIMEOUT_AUDIO = 720 # Audio processing: padding, mixdown (Hatchet execution_timeout)
|
||||
TIMEOUT_AUDIO_HTTP = (
|
||||
660 # httpx timeout for pad_track — below 720 so Hatchet doesn't race
|
||||
)
|
||||
TIMEOUT_HEAVY = 1200 # Transcription, fan-out LLM tasks (Hatchet execution_timeout)
|
||||
TIMEOUT_HEAVY_HTTP = (
|
||||
1150 # httpx timeout for transcribe_track — below 1200 so Hatchet doesn't race
|
||||
)
|
||||
|
||||
74
server/reflector/hatchet/error_classification.py
Normal file
74
server/reflector/hatchet/error_classification.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""Classify exceptions as non-retryable for Hatchet workflows.
|
||||
|
||||
When a task raises NonRetryableException (or an exception classified as
|
||||
non-retryable and re-raised as such), Hatchet stops immediately — no further
|
||||
retries. Used by with_error_handling to avoid wasting retries on config errors,
|
||||
auth failures, corrupt data, etc.
|
||||
"""
|
||||
|
||||
# Optional dependencies: only classify if the exception type is available.
|
||||
# This avoids hard dependency on openai/av/botocore for code paths that don't use them.
|
||||
try:
|
||||
import openai
|
||||
except ImportError:
|
||||
openai = None # type: ignore[assignment]
|
||||
|
||||
try:
|
||||
import av
|
||||
except ImportError:
|
||||
av = None # type: ignore[assignment]
|
||||
|
||||
try:
|
||||
from botocore.exceptions import ClientError as BotoClientError
|
||||
except ImportError:
|
||||
BotoClientError = None # type: ignore[misc, assignment]
|
||||
|
||||
from hatchet_sdk import NonRetryableException
|
||||
from httpx import HTTPStatusError
|
||||
|
||||
from reflector.llm import LLMParseError
|
||||
|
||||
# HTTP status codes that won't change on retry (auth, not found, payment, payload)
|
||||
NON_RETRYABLE_HTTP_STATUSES = {401, 402, 403, 404, 413}
|
||||
NON_RETRYABLE_S3_CODES = {"AccessDenied", "NoSuchBucket", "NoSuchKey"}
|
||||
|
||||
|
||||
def is_non_retryable(e: BaseException) -> bool:
|
||||
"""Return True if the exception should stop Hatchet retries immediately.
|
||||
|
||||
Hard failures (config, auth, missing resource, corrupt data) return True.
|
||||
Transient errors (timeouts, 5xx, 429, connection) return False.
|
||||
"""
|
||||
if isinstance(e, NonRetryableException):
|
||||
return True
|
||||
|
||||
# Config/input errors
|
||||
if isinstance(e, (ValueError, TypeError)):
|
||||
return True
|
||||
|
||||
# HTTP status codes that won't change on retry
|
||||
if isinstance(e, HTTPStatusError):
|
||||
return e.response.status_code in NON_RETRYABLE_HTTP_STATUSES
|
||||
|
||||
# OpenAI auth errors
|
||||
if openai is not None and isinstance(e, openai.AuthenticationError):
|
||||
return True
|
||||
|
||||
# LLM parse failures (already retried internally)
|
||||
if isinstance(e, LLMParseError):
|
||||
return True
|
||||
|
||||
# S3 permission/existence errors
|
||||
if BotoClientError is not None and isinstance(e, BotoClientError):
|
||||
code = e.response.get("Error", {}).get("Code", "")
|
||||
return code in NON_RETRYABLE_S3_CODES
|
||||
|
||||
# Corrupt audio (PyAV) — AVError in some versions; fallback to InvalidDataError
|
||||
if av is not None:
|
||||
av_error = getattr(av, "AVError", None) or getattr(
|
||||
getattr(av, "error", None), "InvalidDataError", None
|
||||
)
|
||||
if av_error is not None and isinstance(e, av_error):
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -7,6 +7,7 @@ Configuration:
|
||||
- Worker affinity: pool=cpu-heavy
|
||||
"""
|
||||
|
||||
import reflector._warnings_filter # noqa: F401 -- side effect: suppress pydantic validate_default warning
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
daily_multitrack_pipeline,
|
||||
|
||||
@@ -5,14 +5,18 @@ Handles: all tasks except mixdown_tracks (transcription, LLM inference, orchestr
|
||||
|
||||
import asyncio
|
||||
|
||||
import reflector._warnings_filter # noqa: F401 -- side effect: suppress pydantic validate_default warning
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
daily_multitrack_pipeline,
|
||||
)
|
||||
from reflector.hatchet.workflows.file_pipeline import file_pipeline
|
||||
from reflector.hatchet.workflows.live_post_pipeline import live_post_pipeline
|
||||
from reflector.hatchet.workflows.subject_processing import subject_workflow
|
||||
from reflector.hatchet.workflows.topic_chunk_processing import topic_chunk_workflow
|
||||
from reflector.hatchet.workflows.track_processing import track_workflow
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
|
||||
SLOTS = 10
|
||||
WORKER_NAME = "llm-worker-pool"
|
||||
@@ -31,6 +35,38 @@ def main():
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
workflows = [
|
||||
daily_multitrack_pipeline,
|
||||
file_pipeline,
|
||||
live_post_pipeline,
|
||||
topic_chunk_workflow,
|
||||
subject_workflow,
|
||||
track_workflow,
|
||||
]
|
||||
|
||||
_zulip_dag_enabled = all(
|
||||
[
|
||||
settings.ZULIP_REALM,
|
||||
settings.ZULIP_API_KEY,
|
||||
settings.ZULIP_BOT_EMAIL,
|
||||
settings.ZULIP_DAG_STREAM,
|
||||
settings.ZULIP_DAG_TOPIC,
|
||||
]
|
||||
)
|
||||
if _zulip_dag_enabled:
|
||||
from reflector.hatchet.workflows.failed_runs_monitor import ( # noqa: PLC0415
|
||||
failed_runs_monitor,
|
||||
)
|
||||
|
||||
workflows.append(failed_runs_monitor)
|
||||
logger.info(
|
||||
"FailedRunsMonitor cron enabled",
|
||||
stream=settings.ZULIP_DAG_STREAM,
|
||||
topic=settings.ZULIP_DAG_TOPIC,
|
||||
)
|
||||
else:
|
||||
logger.info("FailedRunsMonitor cron disabled (Zulip DAG not configured)")
|
||||
|
||||
logger.info(
|
||||
"Starting Hatchet LLM worker pool (all tasks except mixdown)",
|
||||
worker_name=WORKER_NAME,
|
||||
@@ -44,12 +80,7 @@ def main():
|
||||
labels={
|
||||
"pool": POOL,
|
||||
},
|
||||
workflows=[
|
||||
daily_multitrack_pipeline,
|
||||
topic_chunk_workflow,
|
||||
subject_workflow,
|
||||
track_workflow,
|
||||
],
|
||||
workflows=workflows,
|
||||
)
|
||||
|
||||
try:
|
||||
|
||||
@@ -27,11 +27,13 @@ from hatchet_sdk import (
|
||||
ConcurrencyExpression,
|
||||
ConcurrencyLimitStrategy,
|
||||
Context,
|
||||
NonRetryableException,
|
||||
)
|
||||
from hatchet_sdk.labels import DesiredWorkerLabel
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.dailyco_api.client import DailyApiClient
|
||||
from reflector.email import is_email_configured, send_transcript_email
|
||||
from reflector.hatchet.broadcast import (
|
||||
append_event_and_broadcast,
|
||||
set_status_and_broadcast,
|
||||
@@ -43,11 +45,14 @@ from reflector.hatchet.constants import (
|
||||
TIMEOUT_LONG,
|
||||
TIMEOUT_MEDIUM,
|
||||
TIMEOUT_SHORT,
|
||||
TIMEOUT_TITLE,
|
||||
TaskName,
|
||||
)
|
||||
from reflector.hatchet.error_classification import is_non_retryable
|
||||
from reflector.hatchet.workflows.models import (
|
||||
ActionItemsResult,
|
||||
ConsentResult,
|
||||
EmailResult,
|
||||
FinalizeResult,
|
||||
MixdownResult,
|
||||
PaddedTrackInfo,
|
||||
@@ -79,7 +84,7 @@ from reflector.hatchet.workflows.topic_chunk_processing import (
|
||||
from reflector.hatchet.workflows.track_processing import TrackInput, track_workflow
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines import topic_processing
|
||||
from reflector.processors import AudioFileWriterProcessor
|
||||
from reflector.processors.audio_mixdown_auto import AudioMixdownAutoProcessor
|
||||
from reflector.processors.summary.models import ActionItemsResponse
|
||||
from reflector.processors.summary.prompts import (
|
||||
RECAP_PROMPT,
|
||||
@@ -90,15 +95,10 @@ from reflector.processors.summary.summary_builder import SummaryBuilder
|
||||
from reflector.processors.types import TitleSummary, Word
|
||||
from reflector.processors.types import Transcript as TranscriptType
|
||||
from reflector.settings import settings
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
from reflector.utils.audio_constants import (
|
||||
PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
WAVEFORM_SEGMENTS,
|
||||
)
|
||||
from reflector.utils.audio_mixdown import (
|
||||
detect_sample_rate_from_tracks,
|
||||
mixdown_tracks_pyav,
|
||||
)
|
||||
from reflector.utils.audio_waveform import get_audio_waveform
|
||||
from reflector.utils.daily import (
|
||||
filter_cam_audio_tracks,
|
||||
@@ -117,6 +117,7 @@ class PipelineInput(BaseModel):
|
||||
bucket_name: NonEmptyString
|
||||
transcript_id: NonEmptyString
|
||||
room_id: NonEmptyString | None = None
|
||||
source_platform: str = "daily"
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
@@ -170,15 +171,10 @@ async def set_workflow_error_status(transcript_id: NonEmptyString) -> bool:
|
||||
|
||||
|
||||
def _spawn_storage():
|
||||
"""Create fresh storage instance."""
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
)
|
||||
"""Create fresh storage instance for writing to our transcript bucket."""
|
||||
from reflector.storage import get_transcripts_storage # noqa: PLC0415
|
||||
|
||||
return get_transcripts_storage()
|
||||
|
||||
|
||||
class Loggable(Protocol):
|
||||
@@ -221,6 +217,13 @@ def make_audio_progress_logger(
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def _successful_run_results(
|
||||
results: list[dict[str, Any] | BaseException],
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Return only successful (non-exception) results from aio_run_many(return_exceptions=True)."""
|
||||
return [r for r in results if not isinstance(r, BaseException)]
|
||||
|
||||
|
||||
def with_error_handling(
|
||||
step_name: TaskName, set_error_status: bool = True
|
||||
) -> Callable[
|
||||
@@ -248,8 +251,12 @@ def with_error_handling(
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
if set_error_status:
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
if is_non_retryable(e):
|
||||
# Hard fail: stop retries, set error status, fail workflow
|
||||
if set_error_status:
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
raise NonRetryableException(str(e)) from e
|
||||
# Transient: do not set error status — Hatchet will retry
|
||||
raise
|
||||
|
||||
return wrapper # type: ignore[return-value]
|
||||
@@ -258,7 +265,10 @@ def with_error_handling(
|
||||
|
||||
|
||||
@daily_multitrack_pipeline.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT), retries=3
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.GET_RECORDING)
|
||||
async def get_recording(input: PipelineInput, ctx: Context) -> RecordingResult:
|
||||
@@ -295,7 +305,9 @@ async def get_recording(input: PipelineInput, ctx: Context) -> RecordingResult:
|
||||
ctx.log(
|
||||
f"get_recording: calling Daily.co API for recording_id={input.recording_id}..."
|
||||
)
|
||||
async with DailyApiClient(api_key=settings.DAILY_API_KEY) as client:
|
||||
async with DailyApiClient(
|
||||
api_key=settings.DAILY_API_KEY, base_url=settings.DAILY_API_URL
|
||||
) as client:
|
||||
recording = await client.get_recording(input.recording_id)
|
||||
ctx.log(f"get_recording: Daily.co API returned successfully")
|
||||
|
||||
@@ -314,6 +326,8 @@ async def get_recording(input: PipelineInput, ctx: Context) -> RecordingResult:
|
||||
parents=[get_recording],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.GET_PARTICIPANTS)
|
||||
async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsResult:
|
||||
@@ -360,7 +374,9 @@ async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsRe
|
||||
settings.DAILY_API_KEY, "DAILY_API_KEY is required"
|
||||
)
|
||||
|
||||
async with DailyApiClient(api_key=daily_api_key) as client:
|
||||
async with DailyApiClient(
|
||||
api_key=daily_api_key, base_url=settings.DAILY_API_URL
|
||||
) as client:
|
||||
participants = await client.get_meeting_participants(mtg_session_id)
|
||||
|
||||
id_to_name = {}
|
||||
@@ -417,6 +433,8 @@ async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsRe
|
||||
parents=[get_participants],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.PROCESS_TRACKS)
|
||||
async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksResult:
|
||||
@@ -434,12 +452,13 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
|
||||
bucket_name=input.bucket_name,
|
||||
transcript_id=input.transcript_id,
|
||||
language=source_language,
|
||||
source_platform=input.source_platform,
|
||||
)
|
||||
)
|
||||
for i, track in enumerate(input.tracks)
|
||||
]
|
||||
|
||||
results = await track_workflow.aio_run_many(bulk_runs)
|
||||
results = await track_workflow.aio_run_many(bulk_runs, return_exceptions=True)
|
||||
|
||||
target_language = participants_result.target_language
|
||||
|
||||
@@ -447,7 +466,18 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
|
||||
padded_tracks = []
|
||||
created_padded_files = set()
|
||||
|
||||
for result in results:
|
||||
for i, result in enumerate(results):
|
||||
if isinstance(result, BaseException):
|
||||
logger.error(
|
||||
"[Hatchet] process_tracks: track workflow failed, failing step",
|
||||
transcript_id=input.transcript_id,
|
||||
track_index=i,
|
||||
error=str(result),
|
||||
)
|
||||
ctx.log(f"process_tracks: track {i} failed ({result}), failing step")
|
||||
raise ValueError(
|
||||
f"Track {i} workflow failed after retries: {result!s}"
|
||||
) from result
|
||||
transcribe_result = TranscribeTrackResult(**result[TaskName.TRANSCRIBE_TRACK])
|
||||
track_words.append(transcribe_result.words)
|
||||
|
||||
@@ -485,7 +515,9 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
|
||||
@daily_multitrack_pipeline.task(
|
||||
parents=[process_tracks],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_AUDIO),
|
||||
retries=3,
|
||||
retries=2,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
desired_worker_labels={
|
||||
"pool": DesiredWorkerLabel(
|
||||
value="cpu-heavy",
|
||||
@@ -503,7 +535,7 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
|
||||
)
|
||||
@with_error_handling(TaskName.MIXDOWN_TRACKS)
|
||||
async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
|
||||
"""Mix all padded tracks into single audio file using PyAV (same as Celery)."""
|
||||
"""Mix all padded tracks into single audio file via configured backend."""
|
||||
ctx.log("mixdown_tracks: mixing padded tracks into single audio file")
|
||||
|
||||
track_result = ctx.task_output(process_tracks)
|
||||
@@ -543,37 +575,33 @@ async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
|
||||
if not valid_urls:
|
||||
raise ValueError("No valid padded tracks to mixdown")
|
||||
|
||||
target_sample_rate = detect_sample_rate_from_tracks(valid_urls, logger=logger)
|
||||
if not target_sample_rate:
|
||||
logger.error("Mixdown failed - no decodable audio frames found")
|
||||
raise ValueError("No decodable audio frames in any track")
|
||||
|
||||
output_path = tempfile.mktemp(suffix=".mp3")
|
||||
duration_ms_callback_capture_container = [0.0]
|
||||
|
||||
async def capture_duration(d):
|
||||
duration_ms_callback_capture_container[0] = d
|
||||
|
||||
writer = AudioFileWriterProcessor(path=output_path, on_duration=capture_duration)
|
||||
|
||||
await mixdown_tracks_pyav(
|
||||
valid_urls,
|
||||
writer,
|
||||
target_sample_rate,
|
||||
offsets_seconds=None,
|
||||
logger=logger,
|
||||
progress_callback=make_audio_progress_logger(ctx, TaskName.MIXDOWN_TRACKS),
|
||||
expected_duration_sec=recording_duration if recording_duration > 0 else None,
|
||||
)
|
||||
await writer.flush()
|
||||
|
||||
file_size = Path(output_path).stat().st_size
|
||||
storage_path = f"{input.transcript_id}/audio.mp3"
|
||||
|
||||
with open(output_path, "rb") as mixed_file:
|
||||
await storage.put_file(storage_path, mixed_file)
|
||||
# Generate presigned PUT URL for the output (used by modal backend;
|
||||
# pyav backend ignores it and writes locally instead)
|
||||
output_url = await storage.get_file_url(
|
||||
storage_path,
|
||||
operation="put_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
)
|
||||
|
||||
Path(output_path).unlink(missing_ok=True)
|
||||
processor = AudioMixdownAutoProcessor()
|
||||
result = await processor.mixdown_tracks(
|
||||
valid_urls, output_url, offsets_seconds=None
|
||||
)
|
||||
|
||||
if result.output_path:
|
||||
# Pyav backend wrote locally — upload to storage ourselves
|
||||
output_file = Path(result.output_path)
|
||||
with open(output_file, "rb") as mixed_file:
|
||||
await storage.put_file(storage_path, mixed_file)
|
||||
output_file.unlink(missing_ok=True)
|
||||
# Clean up the temp directory the pyav processor created
|
||||
try:
|
||||
output_file.parent.rmdir()
|
||||
except OSError:
|
||||
pass
|
||||
# else: modal backend already uploaded to output_url
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
@@ -584,11 +612,11 @@ async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
|
||||
transcript, {"audio_location": "storage"}
|
||||
)
|
||||
|
||||
ctx.log(f"mixdown_tracks complete: uploaded {file_size} bytes to {storage_path}")
|
||||
ctx.log(f"mixdown_tracks complete: {result.size} bytes to {storage_path}")
|
||||
|
||||
return MixdownResult(
|
||||
audio_key=storage_path,
|
||||
duration=duration_ms_callback_capture_container[0],
|
||||
duration=result.duration_ms,
|
||||
tracks_mixed=len(valid_urls),
|
||||
)
|
||||
|
||||
@@ -597,6 +625,8 @@ async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
|
||||
parents=[mixdown_tracks],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_WAVEFORM)
|
||||
async def generate_waveform(input: PipelineInput, ctx: Context) -> WaveformResult:
|
||||
@@ -665,6 +695,8 @@ async def generate_waveform(input: PipelineInput, ctx: Context) -> WaveformResul
|
||||
parents=[process_tracks],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.DETECT_TOPICS)
|
||||
async def detect_topics(input: PipelineInput, ctx: Context) -> TopicsResult:
|
||||
@@ -726,11 +758,22 @@ async def detect_topics(input: PipelineInput, ctx: Context) -> TopicsResult:
|
||||
for chunk in chunks
|
||||
]
|
||||
|
||||
results = await topic_chunk_workflow.aio_run_many(bulk_runs)
|
||||
results = await topic_chunk_workflow.aio_run_many(bulk_runs, return_exceptions=True)
|
||||
|
||||
topic_chunks = [
|
||||
TopicChunkResult(**result[TaskName.DETECT_CHUNK_TOPIC]) for result in results
|
||||
]
|
||||
topic_chunks: list[TopicChunkResult] = []
|
||||
for i, result in enumerate(results):
|
||||
if isinstance(result, BaseException):
|
||||
logger.error(
|
||||
"[Hatchet] detect_topics: chunk workflow failed, failing step",
|
||||
transcript_id=input.transcript_id,
|
||||
chunk_index=i,
|
||||
error=str(result),
|
||||
)
|
||||
ctx.log(f"detect_topics: chunk {i} failed ({result}), failing step")
|
||||
raise ValueError(
|
||||
f"Topic chunk {i} workflow failed after retries: {result!s}"
|
||||
) from result
|
||||
topic_chunks.append(TopicChunkResult(**result[TaskName.DETECT_CHUNK_TOPIC]))
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
@@ -768,8 +811,10 @@ async def detect_topics(input: PipelineInput, ctx: Context) -> TopicsResult:
|
||||
|
||||
@daily_multitrack_pipeline.task(
|
||||
parents=[detect_topics],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_TITLE),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_TITLE)
|
||||
async def generate_title(input: PipelineInput, ctx: Context) -> TitleResult:
|
||||
@@ -834,7 +879,9 @@ async def generate_title(input: PipelineInput, ctx: Context) -> TitleResult:
|
||||
@daily_multitrack_pipeline.task(
|
||||
parents=[detect_topics],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.EXTRACT_SUBJECTS)
|
||||
async def extract_subjects(input: PipelineInput, ctx: Context) -> SubjectsResult:
|
||||
@@ -913,6 +960,8 @@ async def extract_subjects(input: PipelineInput, ctx: Context) -> SubjectsResult
|
||||
parents=[extract_subjects],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.PROCESS_SUBJECTS)
|
||||
async def process_subjects(input: PipelineInput, ctx: Context) -> ProcessSubjectsResult:
|
||||
@@ -939,12 +988,24 @@ async def process_subjects(input: PipelineInput, ctx: Context) -> ProcessSubject
|
||||
for i, subject in enumerate(subjects)
|
||||
]
|
||||
|
||||
results = await subject_workflow.aio_run_many(bulk_runs)
|
||||
results = await subject_workflow.aio_run_many(bulk_runs, return_exceptions=True)
|
||||
|
||||
subject_summaries = [
|
||||
SubjectSummaryResult(**result[TaskName.GENERATE_DETAILED_SUMMARY])
|
||||
for result in results
|
||||
]
|
||||
subject_summaries: list[SubjectSummaryResult] = []
|
||||
for i, result in enumerate(results):
|
||||
if isinstance(result, BaseException):
|
||||
logger.error(
|
||||
"[Hatchet] process_subjects: subject workflow failed, failing step",
|
||||
transcript_id=input.transcript_id,
|
||||
subject_index=i,
|
||||
error=str(result),
|
||||
)
|
||||
ctx.log(f"process_subjects: subject {i} failed ({result}), failing step")
|
||||
raise ValueError(
|
||||
f"Subject {i} workflow failed after retries: {result!s}"
|
||||
) from result
|
||||
subject_summaries.append(
|
||||
SubjectSummaryResult(**result[TaskName.GENERATE_DETAILED_SUMMARY])
|
||||
)
|
||||
|
||||
ctx.log(f"process_subjects complete: {len(subject_summaries)} summaries")
|
||||
|
||||
@@ -955,6 +1016,8 @@ async def process_subjects(input: PipelineInput, ctx: Context) -> ProcessSubject
|
||||
parents=[process_subjects],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_RECAP)
|
||||
async def generate_recap(input: PipelineInput, ctx: Context) -> RecapResult:
|
||||
@@ -1044,6 +1107,8 @@ async def generate_recap(input: PipelineInput, ctx: Context) -> RecapResult:
|
||||
parents=[extract_subjects],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_LONG),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.IDENTIFY_ACTION_ITEMS)
|
||||
async def identify_action_items(
|
||||
@@ -1112,6 +1177,8 @@ async def identify_action_items(
|
||||
parents=[process_tracks, generate_title, generate_recap, identify_action_items],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=5,
|
||||
)
|
||||
@with_error_handling(TaskName.FINALIZE)
|
||||
async def finalize(input: PipelineInput, ctx: Context) -> FinalizeResult:
|
||||
@@ -1181,7 +1248,11 @@ async def finalize(input: PipelineInput, ctx: Context) -> FinalizeResult:
|
||||
|
||||
|
||||
@daily_multitrack_pipeline.task(
|
||||
parents=[finalize], execution_timeout=timedelta(seconds=TIMEOUT_SHORT), retries=3
|
||||
parents=[finalize],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.CLEANUP_CONSENT, set_error_status=False)
|
||||
async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
|
||||
@@ -1195,7 +1266,10 @@ async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
|
||||
)
|
||||
from reflector.db.recordings import recordings_controller # noqa: PLC0415
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
from reflector.storage import get_transcripts_storage # noqa: PLC0415
|
||||
from reflector.storage import ( # noqa: PLC0415
|
||||
get_source_storage,
|
||||
get_transcripts_storage,
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
@@ -1245,7 +1319,7 @@ async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
|
||||
deletion_errors = []
|
||||
|
||||
if input_track_keys and input.bucket_name:
|
||||
master_storage = get_transcripts_storage()
|
||||
master_storage = get_source_storage(input.source_platform)
|
||||
for key in input_track_keys:
|
||||
try:
|
||||
await master_storage.delete_file(key, bucket=input.bucket_name)
|
||||
@@ -1284,6 +1358,8 @@ async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
|
||||
parents=[cleanup_consent],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.POST_ZULIP, set_error_status=False)
|
||||
async def post_zulip(input: PipelineInput, ctx: Context) -> ZulipResult:
|
||||
@@ -1311,6 +1387,8 @@ async def post_zulip(input: PipelineInput, ctx: Context) -> ZulipResult:
|
||||
parents=[cleanup_consent],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.SEND_WEBHOOK, set_error_status=False)
|
||||
async def send_webhook(input: PipelineInput, ctx: Context) -> WebhookResult:
|
||||
@@ -1379,3 +1457,95 @@ async def send_webhook(input: PipelineInput, ctx: Context) -> WebhookResult:
|
||||
except Exception as e:
|
||||
ctx.log(f"send_webhook unexpected error, continuing anyway: {e}")
|
||||
return WebhookResult(webhook_sent=False)
|
||||
|
||||
|
||||
@daily_multitrack_pipeline.task(
|
||||
parents=[cleanup_consent],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.SEND_EMAIL, set_error_status=False)
|
||||
async def send_email(input: PipelineInput, ctx: Context) -> EmailResult:
|
||||
"""Send transcript email to collected recipients."""
|
||||
ctx.log(f"send_email: transcript_id={input.transcript_id}")
|
||||
|
||||
if not is_email_configured():
|
||||
ctx.log("send_email skipped (SMTP not configured)")
|
||||
return EmailResult(skipped=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.meetings import meetings_controller # noqa: PLC0415
|
||||
from reflector.db.recordings import recordings_controller # noqa: PLC0415
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
ctx.log("send_email skipped (transcript not found)")
|
||||
return EmailResult(skipped=True)
|
||||
|
||||
meeting = None
|
||||
if transcript.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(transcript.meeting_id)
|
||||
if not meeting and transcript.recording_id:
|
||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||
if recording and recording.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(recording.meeting_id)
|
||||
|
||||
recipients = (
|
||||
list(meeting.email_recipients)
|
||||
if meeting and meeting.email_recipients
|
||||
else []
|
||||
)
|
||||
|
||||
# Also check room-level email
|
||||
from reflector.db.rooms import rooms_controller # noqa: PLC0415
|
||||
|
||||
if transcript.room_id:
|
||||
room = await rooms_controller.get_by_id(transcript.room_id)
|
||||
if room and room.email_transcript_to:
|
||||
if room.email_transcript_to not in recipients:
|
||||
recipients.append(room.email_transcript_to)
|
||||
|
||||
if not recipients:
|
||||
ctx.log("send_email skipped (no email recipients)")
|
||||
return EmailResult(skipped=True)
|
||||
|
||||
# For room-level emails, do NOT change share_mode (only set public if meeting had recipients)
|
||||
if meeting and meeting.email_recipients:
|
||||
await transcripts_controller.update(transcript, {"share_mode": "public"})
|
||||
|
||||
count = await send_transcript_email(recipients, transcript)
|
||||
ctx.log(f"send_email complete: sent {count} emails")
|
||||
|
||||
return EmailResult(emails_sent=count)
|
||||
|
||||
|
||||
async def on_workflow_failure(input: PipelineInput, ctx: Context) -> None:
|
||||
"""Run when the workflow is truly dead (all retries exhausted).
|
||||
|
||||
Sets transcript status to 'error' only if it is not already 'ended'.
|
||||
Post-finalize tasks (cleanup_consent, post_zulip, send_webhook) use
|
||||
set_error_status=False; if one of them fails, we must not overwrite
|
||||
the 'ended' status that finalize already set.
|
||||
"""
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript and transcript.status == "ended":
|
||||
logger.info(
|
||||
"[Hatchet] on_workflow_failure: transcript already ended, skipping error status (failure was post-finalize)",
|
||||
transcript_id=input.transcript_id,
|
||||
)
|
||||
ctx.log(
|
||||
"on_workflow_failure: transcript already ended, skipping error status"
|
||||
)
|
||||
return
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
|
||||
|
||||
@daily_multitrack_pipeline.on_failure_task()
|
||||
async def _register_on_workflow_failure(input: PipelineInput, ctx: Context) -> None:
|
||||
await on_workflow_failure(input, ctx)
|
||||
|
||||
109
server/reflector/hatchet/workflows/failed_runs_monitor.py
Normal file
109
server/reflector/hatchet/workflows/failed_runs_monitor.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
Hatchet cron workflow: FailedRunsMonitor
|
||||
|
||||
Runs hourly, queries Hatchet for failed pipeline runs in the last hour,
|
||||
and posts details to Zulip for visibility.
|
||||
|
||||
Only registered with the worker when Zulip DAG settings are configured.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from hatchet_sdk import Context
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
from reflector.tools.render_hatchet_run import render_run_detail
|
||||
from reflector.zulip import send_message_to_zulip
|
||||
|
||||
MONITORED_PIPELINES = {
|
||||
"DiarizationPipeline",
|
||||
"FilePipeline",
|
||||
"LivePostProcessingPipeline",
|
||||
}
|
||||
|
||||
LOOKBACK_HOURS = 1
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
|
||||
failed_runs_monitor = hatchet.workflow(
|
||||
name="FailedRunsMonitor",
|
||||
on_crons=["0 * * * *"],
|
||||
)
|
||||
|
||||
|
||||
async def _check_failed_runs() -> dict:
|
||||
"""Core logic: query for failed pipeline runs and post each to Zulip.
|
||||
|
||||
Extracted from the Hatchet task for testability.
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
since = now - timedelta(hours=LOOKBACK_HOURS)
|
||||
|
||||
client = HatchetClientManager.get_client()
|
||||
|
||||
try:
|
||||
result = await client.runs.aio_list(
|
||||
statuses=[V1TaskStatus.FAILED],
|
||||
since=since,
|
||||
until=now,
|
||||
limit=200,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("[FailedRunsMonitor] Failed to list runs from Hatchet")
|
||||
return {"checked": 0, "reported": 0, "error": "failed to list runs"}
|
||||
|
||||
rows = result.rows or []
|
||||
|
||||
# Filter to main pipelines only (skip child workflows like TrackProcessing, etc.)
|
||||
failed_main_runs = [run for run in rows if run.workflow_name in MONITORED_PIPELINES]
|
||||
|
||||
if not failed_main_runs:
|
||||
logger.info(
|
||||
"[FailedRunsMonitor] No failed pipeline runs in the last hour",
|
||||
total_failed=len(rows),
|
||||
since=since.isoformat(),
|
||||
)
|
||||
return {"checked": len(rows), "reported": 0}
|
||||
|
||||
logger.info(
|
||||
"[FailedRunsMonitor] Found failed pipeline runs",
|
||||
count=len(failed_main_runs),
|
||||
since=since.isoformat(),
|
||||
)
|
||||
|
||||
reported = 0
|
||||
for run in failed_main_runs:
|
||||
try:
|
||||
details = await client.runs.aio_get(run.workflow_run_external_id)
|
||||
content = render_run_detail(details)
|
||||
await send_message_to_zulip(
|
||||
settings.ZULIP_DAG_STREAM,
|
||||
settings.ZULIP_DAG_TOPIC,
|
||||
content,
|
||||
)
|
||||
reported += 1
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"[FailedRunsMonitor] Failed to report run",
|
||||
workflow_run_id=run.workflow_run_external_id,
|
||||
workflow_name=run.workflow_name,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"[FailedRunsMonitor] Finished reporting",
|
||||
reported=reported,
|
||||
total_failed_main=len(failed_main_runs),
|
||||
)
|
||||
return {"checked": len(rows), "reported": reported}
|
||||
|
||||
|
||||
@failed_runs_monitor.task(
|
||||
execution_timeout=timedelta(seconds=120),
|
||||
retries=1,
|
||||
)
|
||||
async def check_failed_runs(input, ctx: Context) -> dict:
|
||||
"""Hatchet task entry point — delegates to _check_failed_runs."""
|
||||
return await _check_failed_runs()
|
||||
951
server/reflector/hatchet/workflows/file_pipeline.py
Normal file
951
server/reflector/hatchet/workflows/file_pipeline.py
Normal file
@@ -0,0 +1,951 @@
|
||||
"""
|
||||
Hatchet workflow: FilePipeline
|
||||
|
||||
Processing pipeline for file uploads and Whereby recordings.
|
||||
Orchestrates: extract audio → upload → transcribe/diarize/waveform (parallel)
|
||||
→ assemble → detect topics → title/summaries (parallel) → finalize
|
||||
→ cleanup consent → post zulip / send webhook.
|
||||
|
||||
Note: This file uses deferred imports (inside functions/tasks) intentionally.
|
||||
Hatchet workers run in forked processes; fresh imports per task ensure DB connections
|
||||
are not shared across forks, avoiding connection pooling issues.
|
||||
"""
|
||||
|
||||
import json
|
||||
from datetime import timedelta
|
||||
from pathlib import Path
|
||||
|
||||
from hatchet_sdk import Context
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.email import is_email_configured, send_transcript_email
|
||||
from reflector.hatchet.broadcast import (
|
||||
append_event_and_broadcast,
|
||||
set_status_and_broadcast,
|
||||
)
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.constants import (
|
||||
TIMEOUT_HEAVY,
|
||||
TIMEOUT_MEDIUM,
|
||||
TIMEOUT_SHORT,
|
||||
TIMEOUT_TITLE,
|
||||
TaskName,
|
||||
)
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
fresh_db_connection,
|
||||
set_workflow_error_status,
|
||||
with_error_handling,
|
||||
)
|
||||
from reflector.hatchet.workflows.models import (
|
||||
ConsentResult,
|
||||
EmailResult,
|
||||
TitleResult,
|
||||
TopicsResult,
|
||||
WaveformResult,
|
||||
WebhookResult,
|
||||
ZulipResult,
|
||||
)
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines import topic_processing
|
||||
from reflector.settings import settings
|
||||
from reflector.utils.audio_constants import WAVEFORM_SEGMENTS
|
||||
from reflector.utils.audio_waveform import get_audio_waveform
|
||||
|
||||
|
||||
class FilePipelineInput(BaseModel):
|
||||
transcript_id: str
|
||||
room_id: str | None = None
|
||||
|
||||
|
||||
# --- Result models specific to file pipeline ---
|
||||
|
||||
|
||||
class ExtractAudioResult(BaseModel):
|
||||
audio_path: str
|
||||
duration_ms: float = 0.0
|
||||
|
||||
|
||||
class UploadAudioResult(BaseModel):
|
||||
audio_url: str
|
||||
audio_path: str
|
||||
|
||||
|
||||
class TranscribeResult(BaseModel):
|
||||
words: list[dict]
|
||||
translation: str | None = None
|
||||
|
||||
|
||||
class DiarizeResult(BaseModel):
|
||||
diarization: list[dict] | None = None
|
||||
|
||||
|
||||
class AssembleTranscriptResult(BaseModel):
|
||||
assembled: bool
|
||||
|
||||
|
||||
class SummariesResult(BaseModel):
|
||||
generated: bool
|
||||
|
||||
|
||||
class FinalizeResult(BaseModel):
|
||||
status: str
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
|
||||
file_pipeline = hatchet.workflow(name="FilePipeline", input_validator=FilePipelineInput)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.EXTRACT_AUDIO)
|
||||
async def extract_audio(input: FilePipelineInput, ctx: Context) -> ExtractAudioResult:
|
||||
"""Extract audio from upload file, convert to MP3."""
|
||||
ctx.log(f"extract_audio: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
await set_status_and_broadcast(input.transcript_id, "processing", logger=logger)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
raise ValueError(f"Transcript {input.transcript_id} not found")
|
||||
|
||||
# Clear transcript as we're going to regenerate everything
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"events": [],
|
||||
"topics": [],
|
||||
},
|
||||
)
|
||||
|
||||
# Find upload file
|
||||
audio_file = next(transcript.data_path.glob("upload.*"), None)
|
||||
if not audio_file:
|
||||
audio_file = next(transcript.data_path.glob("audio.*"), None)
|
||||
if not audio_file:
|
||||
raise ValueError("No audio file found to process")
|
||||
|
||||
ctx.log(f"extract_audio: processing {audio_file}")
|
||||
|
||||
# Extract audio and write as MP3
|
||||
import av # noqa: PLC0415
|
||||
|
||||
from reflector.processors import AudioFileWriterProcessor # noqa: PLC0415
|
||||
|
||||
duration_ms_container = [0.0]
|
||||
|
||||
async def capture_duration(d):
|
||||
duration_ms_container[0] = d
|
||||
|
||||
mp3_writer = AudioFileWriterProcessor(
|
||||
path=transcript.audio_mp3_filename,
|
||||
on_duration=capture_duration,
|
||||
)
|
||||
input_container = av.open(str(audio_file))
|
||||
for frame in input_container.decode(audio=0):
|
||||
await mp3_writer.push(frame)
|
||||
await mp3_writer.flush()
|
||||
input_container.close()
|
||||
|
||||
duration_ms = duration_ms_container[0]
|
||||
audio_path = str(transcript.audio_mp3_filename)
|
||||
|
||||
# Persist duration to database and broadcast to websocket clients
|
||||
from reflector.db.transcripts import TranscriptDuration # noqa: PLC0415
|
||||
from reflector.db.transcripts import transcripts_controller as tc
|
||||
|
||||
await tc.update(transcript, {"duration": duration_ms})
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"DURATION",
|
||||
TranscriptDuration(duration=duration_ms),
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
ctx.log(f"extract_audio complete: {audio_path}, duration={duration_ms}ms")
|
||||
return ExtractAudioResult(audio_path=audio_path, duration_ms=duration_ms)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[extract_audio],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.UPLOAD_AUDIO)
|
||||
async def upload_audio(input: FilePipelineInput, ctx: Context) -> UploadAudioResult:
|
||||
"""Upload audio to S3/storage, return audio_url."""
|
||||
ctx.log(f"upload_audio: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
extract_result = ctx.task_output(extract_audio)
|
||||
audio_path = extract_result.audio_path
|
||||
|
||||
from reflector.storage import get_transcripts_storage # noqa: PLC0415
|
||||
|
||||
storage = get_transcripts_storage()
|
||||
if not storage:
|
||||
raise ValueError(
|
||||
"Storage backend required for file processing. "
|
||||
"Configure TRANSCRIPT_STORAGE_* settings."
|
||||
)
|
||||
|
||||
with open(audio_path, "rb") as f:
|
||||
audio_data = f.read()
|
||||
|
||||
storage_path = f"file_pipeline/{input.transcript_id}/audio.mp3"
|
||||
await storage.put_file(storage_path, audio_data)
|
||||
audio_url = await storage.get_file_url(storage_path)
|
||||
|
||||
ctx.log(f"upload_audio complete: {audio_url}")
|
||||
return UploadAudioResult(audio_url=audio_url, audio_path=audio_path)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[upload_audio],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.TRANSCRIBE)
|
||||
async def transcribe(input: FilePipelineInput, ctx: Context) -> TranscribeResult:
|
||||
"""Transcribe the audio file using the configured backend."""
|
||||
ctx.log(f"transcribe: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
upload_result = ctx.task_output(upload_audio)
|
||||
audio_url = upload_result.audio_url
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
raise ValueError(f"Transcript {input.transcript_id} not found")
|
||||
source_language = transcript.source_language
|
||||
|
||||
from reflector.pipelines.transcription_helpers import ( # noqa: PLC0415
|
||||
transcribe_file_with_processor,
|
||||
)
|
||||
|
||||
result = await transcribe_file_with_processor(audio_url, source_language)
|
||||
|
||||
ctx.log(f"transcribe complete: {len(result.words)} words")
|
||||
return TranscribeResult(
|
||||
words=[w.model_dump() for w in result.words],
|
||||
translation=result.translation,
|
||||
)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[upload_audio],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.DIARIZE)
|
||||
async def diarize(input: FilePipelineInput, ctx: Context) -> DiarizeResult:
|
||||
"""Diarize the audio file (speaker identification)."""
|
||||
ctx.log(f"diarize: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
if not settings.DIARIZATION_BACKEND:
|
||||
ctx.log("diarize: diarization disabled, skipping")
|
||||
return DiarizeResult(diarization=None)
|
||||
|
||||
upload_result = ctx.task_output(upload_audio)
|
||||
audio_url = upload_result.audio_url
|
||||
|
||||
from reflector.processors.file_diarization import ( # noqa: PLC0415
|
||||
FileDiarizationInput,
|
||||
)
|
||||
from reflector.processors.file_diarization_auto import ( # noqa: PLC0415
|
||||
FileDiarizationAutoProcessor,
|
||||
)
|
||||
|
||||
processor = FileDiarizationAutoProcessor()
|
||||
input_data = FileDiarizationInput(audio_url=audio_url)
|
||||
|
||||
result = None
|
||||
|
||||
async def capture_result(diarization_output):
|
||||
nonlocal result
|
||||
result = diarization_output.diarization
|
||||
|
||||
try:
|
||||
processor.on(capture_result)
|
||||
await processor.push(input_data)
|
||||
await processor.flush()
|
||||
except Exception as e:
|
||||
logger.error(f"Diarization failed: {e}")
|
||||
return DiarizeResult(diarization=None)
|
||||
|
||||
ctx.log(f"diarize complete: {len(result) if result else 0} segments")
|
||||
return DiarizeResult(diarization=list(result) if result else None)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[upload_audio],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_WAVEFORM)
|
||||
async def generate_waveform(input: FilePipelineInput, ctx: Context) -> WaveformResult:
|
||||
"""Generate audio waveform visualization."""
|
||||
ctx.log(f"generate_waveform: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
upload_result = ctx.task_output(upload_audio)
|
||||
audio_path = upload_result.audio_path
|
||||
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptWaveform,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
waveform = get_audio_waveform(
|
||||
path=Path(audio_path), segments_count=WAVEFORM_SEGMENTS
|
||||
)
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript:
|
||||
transcript.data_path.mkdir(parents=True, exist_ok=True)
|
||||
with open(transcript.audio_waveform_filename, "w") as f:
|
||||
json.dump(waveform, f)
|
||||
|
||||
waveform_data = TranscriptWaveform(waveform=waveform)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"WAVEFORM",
|
||||
waveform_data,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
ctx.log("generate_waveform complete")
|
||||
return WaveformResult(waveform_generated=True)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[transcribe, diarize, generate_waveform],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.ASSEMBLE_TRANSCRIPT)
|
||||
async def assemble_transcript(
|
||||
input: FilePipelineInput, ctx: Context
|
||||
) -> AssembleTranscriptResult:
|
||||
"""Merge transcription + diarization results."""
|
||||
ctx.log(f"assemble_transcript: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
transcribe_result = ctx.task_output(transcribe)
|
||||
diarize_result = ctx.task_output(diarize)
|
||||
|
||||
from reflector.processors.transcript_diarization_assembler import ( # noqa: PLC0415
|
||||
TranscriptDiarizationAssemblerInput,
|
||||
TranscriptDiarizationAssemblerProcessor,
|
||||
)
|
||||
from reflector.processors.types import ( # noqa: PLC0415
|
||||
DiarizationSegment,
|
||||
Word,
|
||||
)
|
||||
from reflector.processors.types import ( # noqa: PLC0415
|
||||
Transcript as TranscriptType,
|
||||
)
|
||||
|
||||
words = [Word(**w) for w in transcribe_result.words]
|
||||
transcript_data = TranscriptType(
|
||||
words=words, translation=transcribe_result.translation
|
||||
)
|
||||
|
||||
diarization = None
|
||||
if diarize_result.diarization:
|
||||
diarization = [DiarizationSegment(**s) for s in diarize_result.diarization]
|
||||
|
||||
processor = TranscriptDiarizationAssemblerProcessor()
|
||||
assembler_input = TranscriptDiarizationAssemblerInput(
|
||||
transcript=transcript_data, diarization=diarization or []
|
||||
)
|
||||
|
||||
diarized_transcript = None
|
||||
|
||||
async def capture_result(transcript):
|
||||
nonlocal diarized_transcript
|
||||
diarized_transcript = transcript
|
||||
|
||||
processor.on(capture_result)
|
||||
await processor.push(assembler_input)
|
||||
await processor.flush()
|
||||
|
||||
if not diarized_transcript:
|
||||
raise ValueError("No diarized transcript captured")
|
||||
|
||||
# Save the assembled transcript events to the database
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptText,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript:
|
||||
assembled_text = diarized_transcript.text if diarized_transcript else ""
|
||||
assembled_translation = (
|
||||
diarized_transcript.translation if diarized_transcript else None
|
||||
)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"TRANSCRIPT",
|
||||
TranscriptText(text=assembled_text, translation=assembled_translation),
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
ctx.log("assemble_transcript complete")
|
||||
return AssembleTranscriptResult(assembled=True)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[assemble_transcript],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.DETECT_TOPICS)
|
||||
async def detect_topics(input: FilePipelineInput, ctx: Context) -> TopicsResult:
|
||||
"""Detect topics from the assembled transcript."""
|
||||
ctx.log(f"detect_topics: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
# Re-read the transcript to get the diarized words
|
||||
transcribe_result = ctx.task_output(transcribe)
|
||||
diarize_result = ctx.task_output(diarize)
|
||||
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.processors.transcript_diarization_assembler import ( # noqa: PLC0415
|
||||
TranscriptDiarizationAssemblerInput,
|
||||
TranscriptDiarizationAssemblerProcessor,
|
||||
)
|
||||
from reflector.processors.types import ( # noqa: PLC0415
|
||||
DiarizationSegment,
|
||||
Word,
|
||||
)
|
||||
from reflector.processors.types import ( # noqa: PLC0415
|
||||
Transcript as TranscriptType,
|
||||
)
|
||||
|
||||
words = [Word(**w) for w in transcribe_result.words]
|
||||
transcript_data = TranscriptType(
|
||||
words=words, translation=transcribe_result.translation
|
||||
)
|
||||
|
||||
diarization = None
|
||||
if diarize_result.diarization:
|
||||
diarization = [DiarizationSegment(**s) for s in diarize_result.diarization]
|
||||
|
||||
# Re-assemble to get the diarized transcript for topic detection
|
||||
processor = TranscriptDiarizationAssemblerProcessor()
|
||||
assembler_input = TranscriptDiarizationAssemblerInput(
|
||||
transcript=transcript_data, diarization=diarization or []
|
||||
)
|
||||
|
||||
diarized_transcript = None
|
||||
|
||||
async def capture_result(transcript):
|
||||
nonlocal diarized_transcript
|
||||
diarized_transcript = transcript
|
||||
|
||||
processor.on(capture_result)
|
||||
await processor.push(assembler_input)
|
||||
await processor.flush()
|
||||
|
||||
if not diarized_transcript:
|
||||
raise ValueError("No diarized transcript for topic detection")
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
raise ValueError(f"Transcript {input.transcript_id} not found")
|
||||
target_language = transcript.target_language
|
||||
|
||||
empty_pipeline = topic_processing.EmptyPipeline(logger=logger)
|
||||
|
||||
async def on_topic_callback(data):
|
||||
topic = TranscriptTopic(
|
||||
title=data.title,
|
||||
summary=data.summary,
|
||||
timestamp=data.timestamp,
|
||||
transcript=data.transcript.text
|
||||
if hasattr(data.transcript, "text")
|
||||
else "",
|
||||
words=data.transcript.words
|
||||
if hasattr(data.transcript, "words")
|
||||
else [],
|
||||
)
|
||||
await transcripts_controller.upsert_topic(transcript, topic)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id, transcript, "TOPIC", topic, logger=logger
|
||||
)
|
||||
|
||||
topics = await topic_processing.detect_topics(
|
||||
diarized_transcript,
|
||||
target_language,
|
||||
on_topic_callback=on_topic_callback,
|
||||
empty_pipeline=empty_pipeline,
|
||||
)
|
||||
|
||||
ctx.log(f"detect_topics complete: {len(topics)} topics")
|
||||
return TopicsResult(topics=topics)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[detect_topics],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_TITLE),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_TITLE)
|
||||
async def generate_title(input: FilePipelineInput, ctx: Context) -> TitleResult:
|
||||
"""Generate meeting title using LLM."""
|
||||
ctx.log(f"generate_title: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
topics_result = ctx.task_output(detect_topics)
|
||||
topics = topics_result.topics
|
||||
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptFinalTitle,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
empty_pipeline = topic_processing.EmptyPipeline(logger=logger)
|
||||
title_result = None
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
raise ValueError(f"Transcript {input.transcript_id} not found")
|
||||
|
||||
async def on_title_callback(data):
|
||||
nonlocal title_result
|
||||
title_result = data.title
|
||||
final_title = TranscriptFinalTitle(title=data.title)
|
||||
if not transcript.title:
|
||||
await transcripts_controller.update(
|
||||
transcript, {"title": final_title.title}
|
||||
)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"FINAL_TITLE",
|
||||
final_title,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
await topic_processing.generate_title(
|
||||
topics,
|
||||
on_title_callback=on_title_callback,
|
||||
empty_pipeline=empty_pipeline,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
ctx.log(f"generate_title complete: '{title_result}'")
|
||||
return TitleResult(title=title_result)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[detect_topics],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_SUMMARIES)
|
||||
async def generate_summaries(input: FilePipelineInput, ctx: Context) -> SummariesResult:
|
||||
"""Generate long/short summaries and action items."""
|
||||
ctx.log(f"generate_summaries: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
topics_result = ctx.task_output(detect_topics)
|
||||
topics = topics_result.topics
|
||||
|
||||
from reflector.db.transcripts import ( # noqa: PLC0415
|
||||
TranscriptActionItems,
|
||||
TranscriptFinalLongSummary,
|
||||
TranscriptFinalShortSummary,
|
||||
transcripts_controller,
|
||||
)
|
||||
|
||||
empty_pipeline = topic_processing.EmptyPipeline(logger=logger)
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
raise ValueError(f"Transcript {input.transcript_id} not found")
|
||||
|
||||
async def on_long_summary_callback(data):
|
||||
final_long = TranscriptFinalLongSummary(long_summary=data.long_summary)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"long_summary": final_long.long_summary}
|
||||
)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"FINAL_LONG_SUMMARY",
|
||||
final_long,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
async def on_short_summary_callback(data):
|
||||
final_short = TranscriptFinalShortSummary(short_summary=data.short_summary)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"short_summary": final_short.short_summary}
|
||||
)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"FINAL_SHORT_SUMMARY",
|
||||
final_short,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
async def on_action_items_callback(data):
|
||||
action_items = TranscriptActionItems(action_items=data.action_items)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"action_items": action_items.action_items}
|
||||
)
|
||||
await append_event_and_broadcast(
|
||||
input.transcript_id,
|
||||
transcript,
|
||||
"ACTION_ITEMS",
|
||||
action_items,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
await topic_processing.generate_summaries(
|
||||
topics,
|
||||
transcript,
|
||||
on_long_summary_callback=on_long_summary_callback,
|
||||
on_short_summary_callback=on_short_summary_callback,
|
||||
on_action_items_callback=on_action_items_callback,
|
||||
empty_pipeline=empty_pipeline,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
ctx.log("generate_summaries complete")
|
||||
return SummariesResult(generated=True)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[generate_title, generate_summaries],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=5,
|
||||
)
|
||||
@with_error_handling(TaskName.FINALIZE)
|
||||
async def finalize(input: FilePipelineInput, ctx: Context) -> FinalizeResult:
|
||||
"""Set transcript status to 'ended' and broadcast."""
|
||||
ctx.log("finalize: setting status to 'ended'")
|
||||
|
||||
async with fresh_db_connection():
|
||||
await set_status_and_broadcast(input.transcript_id, "ended", logger=logger)
|
||||
|
||||
ctx.log("finalize complete")
|
||||
return FinalizeResult(status="COMPLETED")
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[finalize],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.CLEANUP_CONSENT, set_error_status=False)
|
||||
async def cleanup_consent(input: FilePipelineInput, ctx: Context) -> ConsentResult:
|
||||
"""Check consent and delete audio files if any participant denied."""
|
||||
ctx.log(f"cleanup_consent: transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.meetings import ( # noqa: PLC0415
|
||||
meeting_consent_controller,
|
||||
meetings_controller,
|
||||
)
|
||||
from reflector.db.recordings import recordings_controller # noqa: PLC0415
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
from reflector.storage import get_transcripts_storage # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
ctx.log("cleanup_consent: transcript not found")
|
||||
return ConsentResult()
|
||||
|
||||
consent_denied = False
|
||||
recording = None
|
||||
if transcript.recording_id:
|
||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||
if recording and recording.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(recording.meeting_id)
|
||||
if meeting:
|
||||
consent_denied = await meeting_consent_controller.has_any_denial(
|
||||
meeting.id
|
||||
)
|
||||
|
||||
if not consent_denied:
|
||||
ctx.log("cleanup_consent: consent approved, keeping all files")
|
||||
return ConsentResult()
|
||||
|
||||
ctx.log("cleanup_consent: consent denied, deleting audio files")
|
||||
|
||||
deletion_errors = []
|
||||
if recording and recording.bucket_name:
|
||||
keys_to_delete = []
|
||||
if recording.track_keys:
|
||||
keys_to_delete = recording.track_keys
|
||||
elif recording.object_key:
|
||||
keys_to_delete = [recording.object_key]
|
||||
|
||||
master_storage = get_transcripts_storage()
|
||||
for key in keys_to_delete:
|
||||
try:
|
||||
await master_storage.delete_file(key, bucket=recording.bucket_name)
|
||||
ctx.log(f"Deleted recording file: {recording.bucket_name}/{key}")
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete {key}: {e}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
deletion_errors.append(error_msg)
|
||||
|
||||
if transcript.audio_location == "storage":
|
||||
storage = get_transcripts_storage()
|
||||
try:
|
||||
await storage.delete_file(transcript.storage_audio_path)
|
||||
ctx.log(f"Deleted processed audio: {transcript.storage_audio_path}")
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete processed audio: {e}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
deletion_errors.append(error_msg)
|
||||
|
||||
try:
|
||||
if (
|
||||
hasattr(transcript, "audio_mp3_filename")
|
||||
and transcript.audio_mp3_filename
|
||||
):
|
||||
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
||||
if (
|
||||
hasattr(transcript, "audio_wav_filename")
|
||||
and transcript.audio_wav_filename
|
||||
):
|
||||
transcript.audio_wav_filename.unlink(missing_ok=True)
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete local audio files: {e}"
|
||||
logger.error(error_msg, exc_info=True)
|
||||
deletion_errors.append(error_msg)
|
||||
|
||||
if deletion_errors:
|
||||
logger.warning(
|
||||
"[Hatchet] cleanup_consent completed with errors",
|
||||
transcript_id=input.transcript_id,
|
||||
error_count=len(deletion_errors),
|
||||
)
|
||||
else:
|
||||
await transcripts_controller.update(transcript, {"audio_deleted": True})
|
||||
ctx.log("cleanup_consent: all audio deleted successfully")
|
||||
|
||||
return ConsentResult()
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[cleanup_consent],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.POST_ZULIP, set_error_status=False)
|
||||
async def post_zulip(input: FilePipelineInput, ctx: Context) -> ZulipResult:
|
||||
"""Post notification to Zulip."""
|
||||
ctx.log(f"post_zulip: transcript_id={input.transcript_id}")
|
||||
|
||||
if not settings.ZULIP_REALM:
|
||||
ctx.log("post_zulip skipped (Zulip not configured)")
|
||||
return ZulipResult(zulip_message_id=None, skipped=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
from reflector.zulip import post_transcript_notification # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript:
|
||||
message_id = await post_transcript_notification(transcript)
|
||||
ctx.log(f"post_zulip complete: zulip_message_id={message_id}")
|
||||
else:
|
||||
message_id = None
|
||||
|
||||
return ZulipResult(zulip_message_id=message_id)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[cleanup_consent],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.SEND_WEBHOOK, set_error_status=False)
|
||||
async def send_webhook(input: FilePipelineInput, ctx: Context) -> WebhookResult:
|
||||
"""Send completion webhook to external service."""
|
||||
ctx.log(f"send_webhook: transcript_id={input.transcript_id}")
|
||||
|
||||
if not input.room_id:
|
||||
ctx.log("send_webhook skipped (no room_id)")
|
||||
return WebhookResult(webhook_sent=False, skipped=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.rooms import rooms_controller # noqa: PLC0415
|
||||
from reflector.utils.webhook import ( # noqa: PLC0415
|
||||
fetch_transcript_webhook_payload,
|
||||
send_webhook_request,
|
||||
)
|
||||
|
||||
room = await rooms_controller.get_by_id(input.room_id)
|
||||
if not room or not room.webhook_url:
|
||||
ctx.log("send_webhook skipped (no webhook_url configured)")
|
||||
return WebhookResult(webhook_sent=False, skipped=True)
|
||||
|
||||
payload = await fetch_transcript_webhook_payload(
|
||||
transcript_id=input.transcript_id,
|
||||
room_id=input.room_id,
|
||||
)
|
||||
|
||||
if isinstance(payload, str):
|
||||
ctx.log(f"send_webhook skipped (could not build payload): {payload}")
|
||||
return WebhookResult(webhook_sent=False, skipped=True)
|
||||
|
||||
import httpx # noqa: PLC0415
|
||||
|
||||
try:
|
||||
response = await send_webhook_request(
|
||||
url=room.webhook_url,
|
||||
payload=payload,
|
||||
event_type="transcript.completed",
|
||||
webhook_secret=room.webhook_secret,
|
||||
timeout=30.0,
|
||||
)
|
||||
ctx.log(f"send_webhook complete: status_code={response.status_code}")
|
||||
return WebhookResult(webhook_sent=True, response_code=response.status_code)
|
||||
except httpx.HTTPStatusError as e:
|
||||
ctx.log(f"send_webhook failed (HTTP {e.response.status_code}), continuing")
|
||||
return WebhookResult(
|
||||
webhook_sent=False, response_code=e.response.status_code
|
||||
)
|
||||
except (httpx.ConnectError, httpx.TimeoutException) as e:
|
||||
ctx.log(f"send_webhook failed ({e}), continuing")
|
||||
return WebhookResult(webhook_sent=False)
|
||||
except Exception as e:
|
||||
ctx.log(f"send_webhook unexpected error: {e}")
|
||||
return WebhookResult(webhook_sent=False)
|
||||
|
||||
|
||||
@file_pipeline.task(
|
||||
parents=[cleanup_consent],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.SEND_EMAIL, set_error_status=False)
|
||||
async def send_email(input: FilePipelineInput, ctx: Context) -> EmailResult:
|
||||
"""Send transcript email to collected recipients."""
|
||||
ctx.log(f"send_email: transcript_id={input.transcript_id}")
|
||||
|
||||
if not is_email_configured():
|
||||
ctx.log("send_email skipped (SMTP not configured)")
|
||||
return EmailResult(skipped=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.meetings import meetings_controller # noqa: PLC0415
|
||||
from reflector.db.recordings import recordings_controller # noqa: PLC0415
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
ctx.log("send_email skipped (transcript not found)")
|
||||
return EmailResult(skipped=True)
|
||||
|
||||
# Try transcript.meeting_id first, then fall back to recording.meeting_id
|
||||
meeting = None
|
||||
if transcript.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(transcript.meeting_id)
|
||||
if not meeting and transcript.recording_id:
|
||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||
if recording and recording.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(recording.meeting_id)
|
||||
|
||||
recipients = (
|
||||
list(meeting.email_recipients)
|
||||
if meeting and meeting.email_recipients
|
||||
else []
|
||||
)
|
||||
|
||||
# Also check room-level email
|
||||
from reflector.db.rooms import rooms_controller # noqa: PLC0415
|
||||
|
||||
if transcript.room_id:
|
||||
room = await rooms_controller.get_by_id(transcript.room_id)
|
||||
if room and room.email_transcript_to:
|
||||
if room.email_transcript_to not in recipients:
|
||||
recipients.append(room.email_transcript_to)
|
||||
|
||||
if not recipients:
|
||||
ctx.log("send_email skipped (no email recipients)")
|
||||
return EmailResult(skipped=True)
|
||||
|
||||
# For room-level emails, do NOT change share_mode (only set public if meeting had recipients)
|
||||
if meeting and meeting.email_recipients:
|
||||
await transcripts_controller.update(transcript, {"share_mode": "public"})
|
||||
|
||||
count = await send_transcript_email(recipients, transcript)
|
||||
ctx.log(f"send_email complete: sent {count} emails")
|
||||
|
||||
return EmailResult(emails_sent=count)
|
||||
|
||||
|
||||
# --- On failure handler ---
|
||||
|
||||
|
||||
async def on_workflow_failure(input: FilePipelineInput, ctx: Context) -> None:
|
||||
"""Set transcript status to 'error' only if not already 'ended'."""
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript and transcript.status == "ended":
|
||||
logger.info(
|
||||
"[Hatchet] FilePipeline on_workflow_failure: transcript already ended, skipping error status",
|
||||
transcript_id=input.transcript_id,
|
||||
)
|
||||
ctx.log(
|
||||
"on_workflow_failure: transcript already ended, skipping error status"
|
||||
)
|
||||
return
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
|
||||
|
||||
@file_pipeline.on_failure_task()
|
||||
async def _register_on_workflow_failure(input: FilePipelineInput, ctx: Context) -> None:
|
||||
await on_workflow_failure(input, ctx)
|
||||
454
server/reflector/hatchet/workflows/live_post_pipeline.py
Normal file
454
server/reflector/hatchet/workflows/live_post_pipeline.py
Normal file
@@ -0,0 +1,454 @@
|
||||
"""
|
||||
Hatchet workflow: LivePostProcessingPipeline
|
||||
|
||||
Post-processing pipeline for live WebRTC meetings.
|
||||
Triggered after a live meeting ends. Orchestrates:
|
||||
Left branch: waveform → convert_mp3 → upload_mp3 → remove_upload → diarize → cleanup_consent
|
||||
Right branch: generate_title (parallel with left branch)
|
||||
Fan-in: final_summaries → post_zulip → send_webhook
|
||||
|
||||
Note: This file uses deferred imports (inside functions/tasks) intentionally.
|
||||
Hatchet workers run in forked processes; fresh imports per task ensure DB connections
|
||||
are not shared across forks, avoiding connection pooling issues.
|
||||
"""
|
||||
|
||||
from datetime import timedelta
|
||||
|
||||
from hatchet_sdk import Context
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.email import is_email_configured, send_transcript_email
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.constants import (
|
||||
TIMEOUT_HEAVY,
|
||||
TIMEOUT_MEDIUM,
|
||||
TIMEOUT_SHORT,
|
||||
TIMEOUT_TITLE,
|
||||
TaskName,
|
||||
)
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
fresh_db_connection,
|
||||
set_workflow_error_status,
|
||||
with_error_handling,
|
||||
)
|
||||
from reflector.hatchet.workflows.models import (
|
||||
ConsentResult,
|
||||
EmailResult,
|
||||
TitleResult,
|
||||
WaveformResult,
|
||||
WebhookResult,
|
||||
ZulipResult,
|
||||
)
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class LivePostPipelineInput(BaseModel):
|
||||
transcript_id: str
|
||||
room_id: str | None = None
|
||||
|
||||
|
||||
# --- Result models specific to live post pipeline ---
|
||||
|
||||
|
||||
class ConvertMp3Result(BaseModel):
|
||||
converted: bool
|
||||
|
||||
|
||||
class UploadMp3Result(BaseModel):
|
||||
uploaded: bool
|
||||
|
||||
|
||||
class RemoveUploadResult(BaseModel):
|
||||
removed: bool
|
||||
|
||||
|
||||
class DiarizeResult(BaseModel):
|
||||
diarized: bool
|
||||
|
||||
|
||||
class FinalSummariesResult(BaseModel):
|
||||
generated: bool
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
|
||||
live_post_pipeline = hatchet.workflow(
|
||||
name="LivePostProcessingPipeline", input_validator=LivePostPipelineInput
|
||||
)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.WAVEFORM)
|
||||
async def waveform(input: LivePostPipelineInput, ctx: Context) -> WaveformResult:
|
||||
"""Generate waveform visualization from recorded audio."""
|
||||
ctx.log(f"waveform: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
|
||||
PipelineMainWaveform,
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
raise ValueError(f"Transcript {input.transcript_id} not found")
|
||||
|
||||
runner = PipelineMainWaveform(transcript_id=transcript.id)
|
||||
await runner.run()
|
||||
|
||||
ctx.log("waveform complete")
|
||||
return WaveformResult(waveform_generated=True)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_TITLE),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_TITLE)
|
||||
async def generate_title(input: LivePostPipelineInput, ctx: Context) -> TitleResult:
|
||||
"""Generate meeting title from topics (runs in parallel with audio chain)."""
|
||||
ctx.log(f"generate_title: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
|
||||
PipelineMainTitle,
|
||||
)
|
||||
|
||||
runner = PipelineMainTitle(transcript_id=input.transcript_id)
|
||||
await runner.run()
|
||||
|
||||
ctx.log("generate_title complete")
|
||||
return TitleResult(title=None)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
parents=[waveform],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.CONVERT_MP3)
|
||||
async def convert_mp3(input: LivePostPipelineInput, ctx: Context) -> ConvertMp3Result:
|
||||
"""Convert WAV recording to MP3."""
|
||||
ctx.log(f"convert_mp3: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
|
||||
pipeline_convert_to_mp3,
|
||||
)
|
||||
|
||||
await pipeline_convert_to_mp3(transcript_id=input.transcript_id)
|
||||
|
||||
ctx.log("convert_mp3 complete")
|
||||
return ConvertMp3Result(converted=True)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
parents=[convert_mp3],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.UPLOAD_MP3)
|
||||
async def upload_mp3(input: LivePostPipelineInput, ctx: Context) -> UploadMp3Result:
|
||||
"""Upload MP3 to external storage."""
|
||||
ctx.log(f"upload_mp3: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
|
||||
pipeline_upload_mp3,
|
||||
)
|
||||
|
||||
await pipeline_upload_mp3(transcript_id=input.transcript_id)
|
||||
|
||||
ctx.log("upload_mp3 complete")
|
||||
return UploadMp3Result(uploaded=True)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
parents=[upload_mp3],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=5,
|
||||
)
|
||||
@with_error_handling(TaskName.REMOVE_UPLOAD)
|
||||
async def remove_upload(
|
||||
input: LivePostPipelineInput, ctx: Context
|
||||
) -> RemoveUploadResult:
|
||||
"""Remove the original upload file."""
|
||||
ctx.log(f"remove_upload: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
|
||||
pipeline_remove_upload,
|
||||
)
|
||||
|
||||
await pipeline_remove_upload(transcript_id=input.transcript_id)
|
||||
|
||||
ctx.log("remove_upload complete")
|
||||
return RemoveUploadResult(removed=True)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
parents=[remove_upload],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.DIARIZE)
|
||||
async def diarize(input: LivePostPipelineInput, ctx: Context) -> DiarizeResult:
|
||||
"""Run diarization on the recorded audio."""
|
||||
ctx.log(f"diarize: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
|
||||
pipeline_diarization,
|
||||
)
|
||||
|
||||
await pipeline_diarization(transcript_id=input.transcript_id)
|
||||
|
||||
ctx.log("diarize complete")
|
||||
return DiarizeResult(diarized=True)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
parents=[diarize],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.CLEANUP_CONSENT, set_error_status=False)
|
||||
async def cleanup_consent(input: LivePostPipelineInput, ctx: Context) -> ConsentResult:
|
||||
"""Check consent and delete audio files if any participant denied."""
|
||||
ctx.log(f"cleanup_consent: transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
|
||||
cleanup_consent as _cleanup_consent,
|
||||
)
|
||||
|
||||
await _cleanup_consent(transcript_id=input.transcript_id)
|
||||
|
||||
ctx.log("cleanup_consent complete")
|
||||
return ConsentResult()
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
parents=[cleanup_consent, generate_title],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.FINAL_SUMMARIES)
|
||||
async def final_summaries(
|
||||
input: LivePostPipelineInput, ctx: Context
|
||||
) -> FinalSummariesResult:
|
||||
"""Generate final summaries (fan-in after audio chain + title)."""
|
||||
ctx.log(f"final_summaries: starting for transcript_id={input.transcript_id}")
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
|
||||
pipeline_summaries,
|
||||
)
|
||||
|
||||
await pipeline_summaries(transcript_id=input.transcript_id)
|
||||
|
||||
ctx.log("final_summaries complete")
|
||||
return FinalSummariesResult(generated=True)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
parents=[final_summaries],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.POST_ZULIP, set_error_status=False)
|
||||
async def post_zulip(input: LivePostPipelineInput, ctx: Context) -> ZulipResult:
|
||||
"""Post notification to Zulip."""
|
||||
ctx.log(f"post_zulip: transcript_id={input.transcript_id}")
|
||||
|
||||
if not settings.ZULIP_REALM:
|
||||
ctx.log("post_zulip skipped (Zulip not configured)")
|
||||
return ZulipResult(zulip_message_id=None, skipped=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
from reflector.zulip import post_transcript_notification # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript:
|
||||
message_id = await post_transcript_notification(transcript)
|
||||
ctx.log(f"post_zulip complete: zulip_message_id={message_id}")
|
||||
else:
|
||||
message_id = None
|
||||
|
||||
return ZulipResult(zulip_message_id=message_id)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
parents=[final_summaries],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.SEND_WEBHOOK, set_error_status=False)
|
||||
async def send_webhook(input: LivePostPipelineInput, ctx: Context) -> WebhookResult:
|
||||
"""Send completion webhook to external service."""
|
||||
ctx.log(f"send_webhook: transcript_id={input.transcript_id}")
|
||||
|
||||
if not input.room_id:
|
||||
ctx.log("send_webhook skipped (no room_id)")
|
||||
return WebhookResult(webhook_sent=False, skipped=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.rooms import rooms_controller # noqa: PLC0415
|
||||
from reflector.utils.webhook import ( # noqa: PLC0415
|
||||
fetch_transcript_webhook_payload,
|
||||
send_webhook_request,
|
||||
)
|
||||
|
||||
room = await rooms_controller.get_by_id(input.room_id)
|
||||
if not room or not room.webhook_url:
|
||||
ctx.log("send_webhook skipped (no webhook_url configured)")
|
||||
return WebhookResult(webhook_sent=False, skipped=True)
|
||||
|
||||
payload = await fetch_transcript_webhook_payload(
|
||||
transcript_id=input.transcript_id,
|
||||
room_id=input.room_id,
|
||||
)
|
||||
|
||||
if isinstance(payload, str):
|
||||
ctx.log(f"send_webhook skipped (could not build payload): {payload}")
|
||||
return WebhookResult(webhook_sent=False, skipped=True)
|
||||
|
||||
import httpx # noqa: PLC0415
|
||||
|
||||
try:
|
||||
response = await send_webhook_request(
|
||||
url=room.webhook_url,
|
||||
payload=payload,
|
||||
event_type="transcript.completed",
|
||||
webhook_secret=room.webhook_secret,
|
||||
timeout=30.0,
|
||||
)
|
||||
ctx.log(f"send_webhook complete: status_code={response.status_code}")
|
||||
return WebhookResult(webhook_sent=True, response_code=response.status_code)
|
||||
except httpx.HTTPStatusError as e:
|
||||
ctx.log(f"send_webhook failed (HTTP {e.response.status_code}), continuing")
|
||||
return WebhookResult(
|
||||
webhook_sent=False, response_code=e.response.status_code
|
||||
)
|
||||
except (httpx.ConnectError, httpx.TimeoutException) as e:
|
||||
ctx.log(f"send_webhook failed ({e}), continuing")
|
||||
return WebhookResult(webhook_sent=False)
|
||||
except Exception as e:
|
||||
ctx.log(f"send_webhook unexpected error: {e}")
|
||||
return WebhookResult(webhook_sent=False)
|
||||
|
||||
|
||||
@live_post_pipeline.task(
|
||||
parents=[final_summaries],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.SEND_EMAIL, set_error_status=False)
|
||||
async def send_email(input: LivePostPipelineInput, ctx: Context) -> EmailResult:
|
||||
"""Send transcript email to collected recipients."""
|
||||
ctx.log(f"send_email: transcript_id={input.transcript_id}")
|
||||
|
||||
if not is_email_configured():
|
||||
ctx.log("send_email skipped (SMTP not configured)")
|
||||
return EmailResult(skipped=True)
|
||||
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.meetings import meetings_controller # noqa: PLC0415
|
||||
from reflector.db.recordings import recordings_controller # noqa: PLC0415
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
ctx.log("send_email skipped (transcript not found)")
|
||||
return EmailResult(skipped=True)
|
||||
|
||||
meeting = None
|
||||
if transcript.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(transcript.meeting_id)
|
||||
if not meeting and transcript.recording_id:
|
||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||
if recording and recording.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(recording.meeting_id)
|
||||
|
||||
recipients = (
|
||||
list(meeting.email_recipients)
|
||||
if meeting and meeting.email_recipients
|
||||
else []
|
||||
)
|
||||
|
||||
# Also check room-level email
|
||||
from reflector.db.rooms import rooms_controller # noqa: PLC0415
|
||||
|
||||
if transcript.room_id:
|
||||
room = await rooms_controller.get_by_id(transcript.room_id)
|
||||
if room and room.email_transcript_to:
|
||||
if room.email_transcript_to not in recipients:
|
||||
recipients.append(room.email_transcript_to)
|
||||
|
||||
if not recipients:
|
||||
ctx.log("send_email skipped (no email recipients)")
|
||||
return EmailResult(skipped=True)
|
||||
|
||||
# For room-level emails, do NOT change share_mode (only set public if meeting had recipients)
|
||||
if meeting and meeting.email_recipients:
|
||||
await transcripts_controller.update(transcript, {"share_mode": "public"})
|
||||
|
||||
count = await send_transcript_email(recipients, transcript)
|
||||
ctx.log(f"send_email complete: sent {count} emails")
|
||||
|
||||
return EmailResult(emails_sent=count)
|
||||
|
||||
|
||||
# --- On failure handler ---
|
||||
|
||||
|
||||
async def on_workflow_failure(input: LivePostPipelineInput, ctx: Context) -> None:
|
||||
"""Set transcript status to 'error' only if not already 'ended'."""
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript and transcript.status == "ended":
|
||||
logger.info(
|
||||
"[Hatchet] LivePostProcessingPipeline on_workflow_failure: transcript already ended",
|
||||
transcript_id=input.transcript_id,
|
||||
)
|
||||
ctx.log(
|
||||
"on_workflow_failure: transcript already ended, skipping error status"
|
||||
)
|
||||
return
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
|
||||
|
||||
@live_post_pipeline.on_failure_task()
|
||||
async def _register_on_workflow_failure(
|
||||
input: LivePostPipelineInput, ctx: Context
|
||||
) -> None:
|
||||
await on_workflow_failure(input, ctx)
|
||||
@@ -170,3 +170,10 @@ class WebhookResult(BaseModel):
|
||||
webhook_sent: bool
|
||||
skipped: bool = False
|
||||
response_code: int | None = None
|
||||
|
||||
|
||||
class EmailResult(BaseModel):
|
||||
"""Result from send_email task."""
|
||||
|
||||
emails_sent: int = 0
|
||||
skipped: bool = False
|
||||
|
||||
@@ -24,6 +24,7 @@ class PaddingInput(BaseModel):
|
||||
s3_key: str
|
||||
bucket_name: str
|
||||
transcript_id: str
|
||||
source_platform: str = "daily"
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
@@ -33,7 +34,12 @@ padding_workflow = hatchet.workflow(
|
||||
)
|
||||
|
||||
|
||||
@padding_workflow.task(execution_timeout=timedelta(seconds=TIMEOUT_AUDIO), retries=3)
|
||||
@padding_workflow.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_AUDIO),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
async def pad_track(input: PaddingInput, ctx: Context) -> PadTrackResult:
|
||||
"""Pad audio track with silence based on WebM container start_time."""
|
||||
ctx.log(f"pad_track: track {input.track_index}, s3_key={input.s3_key}")
|
||||
@@ -45,20 +51,14 @@ async def pad_track(input: PaddingInput, ctx: Context) -> PadTrackResult:
|
||||
)
|
||||
|
||||
try:
|
||||
# Create fresh storage instance to avoid aioboto3 fork issues
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
from reflector.storage import ( # noqa: PLC0415
|
||||
get_source_storage,
|
||||
get_transcripts_storage,
|
||||
)
|
||||
|
||||
source_url = await storage.get_file_url(
|
||||
# Source reads: use platform-specific credentials
|
||||
source_storage = get_source_storage(input.source_platform)
|
||||
source_url = await source_storage.get_file_url(
|
||||
input.s3_key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
@@ -96,52 +96,28 @@ async def pad_track(input: PaddingInput, ctx: Context) -> PadTrackResult:
|
||||
|
||||
storage_path = f"file_pipeline_hatchet/{input.transcript_id}/tracks/padded_{input.track_index}.webm"
|
||||
|
||||
# Presign PUT URL for output (Modal will upload directly)
|
||||
output_url = await storage.get_file_url(
|
||||
# Output writes: use transcript storage (our own bucket)
|
||||
output_storage = get_transcripts_storage()
|
||||
output_url = await output_storage.get_file_url(
|
||||
storage_path,
|
||||
operation="put_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
)
|
||||
|
||||
import httpx # noqa: PLC0415
|
||||
|
||||
from reflector.processors.audio_padding_modal import ( # noqa: PLC0415
|
||||
AudioPaddingModalProcessor,
|
||||
from reflector.processors.audio_padding_auto import ( # noqa: PLC0415
|
||||
AudioPaddingAutoProcessor,
|
||||
)
|
||||
|
||||
try:
|
||||
processor = AudioPaddingModalProcessor()
|
||||
result = await processor.pad_track(
|
||||
track_url=source_url,
|
||||
output_url=output_url,
|
||||
start_time_seconds=start_time_seconds,
|
||||
track_index=input.track_index,
|
||||
)
|
||||
file_size = result.size
|
||||
processor = AudioPaddingAutoProcessor()
|
||||
result = await processor.pad_track(
|
||||
track_url=source_url,
|
||||
output_url=output_url,
|
||||
start_time_seconds=start_time_seconds,
|
||||
track_index=input.track_index,
|
||||
)
|
||||
file_size = result.size
|
||||
|
||||
ctx.log(f"pad_track: Modal returned size={file_size}")
|
||||
except httpx.HTTPStatusError as e:
|
||||
error_detail = e.response.text if hasattr(e.response, "text") else str(e)
|
||||
logger.error(
|
||||
"[Hatchet] Modal padding HTTP error",
|
||||
transcript_id=input.transcript_id,
|
||||
track_index=input.track_index,
|
||||
status_code=e.response.status_code if hasattr(e, "response") else None,
|
||||
error=error_detail,
|
||||
exc_info=True,
|
||||
)
|
||||
raise Exception(
|
||||
f"Modal padding failed: HTTP {e.response.status_code}"
|
||||
) from e
|
||||
except httpx.TimeoutException as e:
|
||||
logger.error(
|
||||
"[Hatchet] Modal padding timeout",
|
||||
transcript_id=input.transcript_id,
|
||||
track_index=input.track_index,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
raise Exception("Modal padding timeout") from e
|
||||
ctx.log(f"pad_track: padding returned size={file_size}")
|
||||
|
||||
logger.info(
|
||||
"[Hatchet] pad_track complete",
|
||||
|
||||
@@ -13,7 +13,7 @@ from hatchet_sdk.rate_limit import RateLimit
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.constants import LLM_RATE_LIMIT_KEY, TIMEOUT_MEDIUM
|
||||
from reflector.hatchet.constants import LLM_RATE_LIMIT_KEY, TIMEOUT_HEAVY
|
||||
from reflector.hatchet.workflows.models import SubjectSummaryResult
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.summary.prompts import (
|
||||
@@ -41,8 +41,10 @@ subject_workflow = hatchet.workflow(
|
||||
|
||||
|
||||
@subject_workflow.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=60,
|
||||
rate_limits=[RateLimit(static_key=LLM_RATE_LIMIT_KEY, units=2)],
|
||||
)
|
||||
async def generate_detailed_summary(
|
||||
|
||||
@@ -50,7 +50,9 @@ topic_chunk_workflow = hatchet.workflow(
|
||||
|
||||
@topic_chunk_workflow.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=60,
|
||||
rate_limits=[RateLimit(static_key=LLM_RATE_LIMIT_KEY, units=1)],
|
||||
)
|
||||
async def detect_chunk_topic(input: TopicChunkInput, ctx: Context) -> TopicChunkResult:
|
||||
|
||||
@@ -36,6 +36,7 @@ class TrackInput(BaseModel):
|
||||
bucket_name: str
|
||||
transcript_id: str
|
||||
language: str = "en"
|
||||
source_platform: str = "daily"
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
@@ -43,7 +44,12 @@ hatchet = HatchetClientManager.get_client()
|
||||
track_workflow = hatchet.workflow(name="TrackProcessing", input_validator=TrackInput)
|
||||
|
||||
|
||||
@track_workflow.task(execution_timeout=timedelta(seconds=TIMEOUT_AUDIO), retries=3)
|
||||
@track_workflow.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_AUDIO),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
"""Pad single audio track with silence for alignment.
|
||||
|
||||
@@ -59,20 +65,14 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
)
|
||||
|
||||
try:
|
||||
# Create fresh storage instance to avoid aioboto3 fork issues
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
from reflector.storage import ( # noqa: PLC0415
|
||||
get_source_storage,
|
||||
get_transcripts_storage,
|
||||
)
|
||||
|
||||
source_url = await storage.get_file_url(
|
||||
# Source reads: use platform-specific credentials
|
||||
source_storage = get_source_storage(input.source_platform)
|
||||
source_url = await source_storage.get_file_url(
|
||||
input.s3_key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
@@ -99,18 +99,19 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
|
||||
storage_path = f"file_pipeline_hatchet/{input.transcript_id}/tracks/padded_{input.track_index}.webm"
|
||||
|
||||
# Presign PUT URL for output (Modal uploads directly)
|
||||
output_url = await storage.get_file_url(
|
||||
# Output writes: use transcript storage (our own bucket)
|
||||
output_storage = get_transcripts_storage()
|
||||
output_url = await output_storage.get_file_url(
|
||||
storage_path,
|
||||
operation="put_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
)
|
||||
|
||||
from reflector.processors.audio_padding_modal import ( # noqa: PLC0415
|
||||
AudioPaddingModalProcessor,
|
||||
from reflector.processors.audio_padding_auto import ( # noqa: PLC0415
|
||||
AudioPaddingAutoProcessor,
|
||||
)
|
||||
|
||||
processor = AudioPaddingModalProcessor()
|
||||
processor = AudioPaddingAutoProcessor()
|
||||
result = await processor.pad_track(
|
||||
track_url=source_url,
|
||||
output_url=output_url,
|
||||
@@ -141,7 +142,11 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
|
||||
|
||||
@track_workflow.task(
|
||||
parents=[pad_track], execution_timeout=timedelta(seconds=TIMEOUT_HEAVY), retries=3
|
||||
parents=[pad_track],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
async def transcribe_track(input: TrackInput, ctx: Context) -> TranscribeTrackResult:
|
||||
"""Transcribe audio track using GPU (Modal.com) or local Whisper."""
|
||||
@@ -161,18 +166,18 @@ async def transcribe_track(input: TrackInput, ctx: Context) -> TranscribeTrackRe
|
||||
raise ValueError("Missing padded_key from pad_track")
|
||||
|
||||
# Presign URL on demand (avoids stale URLs on workflow replay)
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
from reflector.storage import ( # noqa: PLC0415
|
||||
get_source_storage,
|
||||
get_transcripts_storage,
|
||||
)
|
||||
|
||||
# If bucket_name is set, file is still in the platform's source bucket (no padding applied).
|
||||
# If bucket_name is None, padded file was written to our transcript storage.
|
||||
if bucket_name:
|
||||
storage = get_source_storage(input.source_platform)
|
||||
else:
|
||||
storage = get_transcripts_storage()
|
||||
|
||||
audio_url = await storage.get_file_url(
|
||||
padded_key,
|
||||
operation="get_object",
|
||||
|
||||
@@ -65,10 +65,25 @@ class LLM:
|
||||
async def get_response(
|
||||
self, prompt: str, texts: list[str], tone_name: str | None = None
|
||||
) -> str:
|
||||
"""Get a text response using TreeSummarize for non-function-calling models"""
|
||||
summarizer = TreeSummarize(verbose=False)
|
||||
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
|
||||
return str(response).strip()
|
||||
"""Get a text response using TreeSummarize for non-function-calling models.
|
||||
|
||||
Uses the same retry() wrapper as get_structured_response for transient
|
||||
network errors (connection, timeout, OSError) with exponential backoff.
|
||||
"""
|
||||
|
||||
async def _call():
|
||||
summarizer = TreeSummarize(verbose=False)
|
||||
response = await summarizer.aget_response(
|
||||
prompt, texts, tone_name=tone_name
|
||||
)
|
||||
return str(response).strip()
|
||||
|
||||
return await retry(_call)(
|
||||
retry_attempts=3,
|
||||
retry_backoff_interval=1.0,
|
||||
retry_backoff_max=30.0,
|
||||
retry_ignore_exc_types=(ConnectionError, TimeoutError, OSError),
|
||||
)
|
||||
|
||||
async def get_structured_response(
|
||||
self,
|
||||
|
||||
@@ -17,7 +17,7 @@ from contextlib import asynccontextmanager
|
||||
from typing import Generic
|
||||
|
||||
import av
|
||||
from celery import chord, current_task, group, shared_task
|
||||
from celery import current_task, shared_task
|
||||
from pydantic import BaseModel
|
||||
from structlog import BoundLogger as Logger
|
||||
|
||||
@@ -397,7 +397,9 @@ class PipelineMainLive(PipelineMainBase):
|
||||
# when the pipeline ends, connect to the post pipeline
|
||||
logger.info("Pipeline main live ended", transcript_id=self.transcript_id)
|
||||
logger.info("Scheduling pipeline main post", transcript_id=self.transcript_id)
|
||||
pipeline_post(transcript_id=self.transcript_id)
|
||||
transcript = await transcripts_controller.get_by_id(self.transcript_id)
|
||||
room_id = transcript.room_id if transcript else None
|
||||
await pipeline_post(transcript_id=self.transcript_id, room_id=room_id)
|
||||
|
||||
|
||||
class PipelineMainDiarization(PipelineMainBase[AudioDiarizationInput]):
|
||||
@@ -792,29 +794,20 @@ async def task_pipeline_post_to_zulip(*, transcript_id: str):
|
||||
await pipeline_post_to_zulip(transcript_id=transcript_id)
|
||||
|
||||
|
||||
def pipeline_post(*, transcript_id: str):
|
||||
async def pipeline_post(*, transcript_id: str, room_id: str | None = None):
|
||||
"""
|
||||
Run the post pipeline
|
||||
Run the post pipeline via Hatchet.
|
||||
"""
|
||||
chain_mp3_and_diarize = (
|
||||
task_pipeline_waveform.si(transcript_id=transcript_id)
|
||||
| task_pipeline_convert_to_mp3.si(transcript_id=transcript_id)
|
||||
| task_pipeline_upload_mp3.si(transcript_id=transcript_id)
|
||||
| task_pipeline_remove_upload.si(transcript_id=transcript_id)
|
||||
| task_pipeline_diarization.si(transcript_id=transcript_id)
|
||||
| task_cleanup_consent.si(transcript_id=transcript_id)
|
||||
)
|
||||
chain_title_preview = task_pipeline_title.si(transcript_id=transcript_id)
|
||||
chain_final_summaries = task_pipeline_final_summaries.si(
|
||||
transcript_id=transcript_id
|
||||
)
|
||||
from reflector.hatchet.client import HatchetClientManager # noqa: PLC0415
|
||||
|
||||
chain = chord(
|
||||
group(chain_mp3_and_diarize, chain_title_preview),
|
||||
chain_final_summaries,
|
||||
) | task_pipeline_post_to_zulip.si(transcript_id=transcript_id)
|
||||
|
||||
return chain.delay()
|
||||
await HatchetClientManager.start_workflow(
|
||||
"LivePostProcessingPipeline",
|
||||
{
|
||||
"transcript_id": str(transcript_id),
|
||||
"room_id": str(room_id) if room_id else None,
|
||||
},
|
||||
additional_metadata={"transcript_id": str(transcript_id)},
|
||||
)
|
||||
|
||||
|
||||
@get_transcript
|
||||
|
||||
@@ -4,6 +4,10 @@ from .audio_diarization_auto import AudioDiarizationAutoProcessor # noqa: F401
|
||||
from .audio_downscale import AudioDownscaleProcessor # noqa: F401
|
||||
from .audio_file_writer import AudioFileWriterProcessor # noqa: F401
|
||||
from .audio_merge import AudioMergeProcessor # noqa: F401
|
||||
from .audio_mixdown import AudioMixdownProcessor # noqa: F401
|
||||
from .audio_mixdown_auto import AudioMixdownAutoProcessor # noqa: F401
|
||||
from .audio_padding import AudioPaddingProcessor # noqa: F401
|
||||
from .audio_padding_auto import AudioPaddingAutoProcessor # noqa: F401
|
||||
from .audio_transcript import AudioTranscriptProcessor # noqa: F401
|
||||
from .audio_transcript_auto import AudioTranscriptAutoProcessor # noqa: F401
|
||||
from .base import ( # noqa: F401
|
||||
|
||||
86
server/reflector/processors/_audio_download.py
Normal file
86
server/reflector/processors/_audio_download.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""
|
||||
Shared audio download utility for local processors.
|
||||
|
||||
Downloads audio from a URL to a temporary file for in-process ML inference.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
|
||||
from reflector.logger import logger
|
||||
|
||||
S3_TIMEOUT = 60
|
||||
|
||||
|
||||
async def download_audio_to_temp(url: str) -> Path:
|
||||
"""Download audio from URL to a temporary file.
|
||||
|
||||
The caller is responsible for deleting the temp file after use.
|
||||
|
||||
Args:
|
||||
url: Presigned URL or public URL to download audio from.
|
||||
|
||||
Returns:
|
||||
Path to the downloaded temporary file.
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(None, _download_blocking, url)
|
||||
|
||||
|
||||
def _download_blocking(url: str) -> Path:
|
||||
"""Blocking download implementation."""
|
||||
log = logger.bind(url=url[:80])
|
||||
log.info("Downloading audio to temp file")
|
||||
|
||||
response = requests.get(url, stream=True, timeout=S3_TIMEOUT)
|
||||
response.raise_for_status()
|
||||
|
||||
# Determine extension from content-type or URL
|
||||
ext = _detect_extension(url, response.headers.get("content-type", ""))
|
||||
|
||||
fd, tmp_path = tempfile.mkstemp(suffix=ext)
|
||||
try:
|
||||
total_bytes = 0
|
||||
with os.fdopen(fd, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
total_bytes += len(chunk)
|
||||
log.info("Audio downloaded", bytes=total_bytes, path=tmp_path)
|
||||
return Path(tmp_path)
|
||||
except Exception:
|
||||
# Clean up on failure
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def _detect_extension(url: str, content_type: str) -> str:
|
||||
"""Detect audio file extension from URL or content-type."""
|
||||
# Try URL path first
|
||||
path = url.split("?")[0] # Strip query params
|
||||
for ext in (".wav", ".mp3", ".mp4", ".m4a", ".webm", ".ogg", ".flac"):
|
||||
if path.lower().endswith(ext):
|
||||
return ext
|
||||
|
||||
# Try content-type
|
||||
ct_map = {
|
||||
"audio/wav": ".wav",
|
||||
"audio/x-wav": ".wav",
|
||||
"audio/mpeg": ".mp3",
|
||||
"audio/mp4": ".m4a",
|
||||
"audio/webm": ".webm",
|
||||
"audio/ogg": ".ogg",
|
||||
"audio/flac": ".flac",
|
||||
}
|
||||
for ct, ext in ct_map.items():
|
||||
if ct in content_type.lower():
|
||||
return ext
|
||||
|
||||
return ".audio"
|
||||
76
server/reflector/processors/_marian_translator_service.py
Normal file
76
server/reflector/processors/_marian_translator_service.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
MarianMT translation service.
|
||||
|
||||
Singleton service that loads HuggingFace MarianMT translation models
|
||||
and reuses them across all MarianMT translator processor instances.
|
||||
|
||||
Ported from gpu/self_hosted/app/services/translator.py for in-process use.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from transformers import MarianMTModel, MarianTokenizer, pipeline
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MarianTranslatorService:
|
||||
"""MarianMT text translation service for in-process use."""
|
||||
|
||||
def __init__(self):
|
||||
self._pipeline = None
|
||||
self._current_pair = None
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def load(self, source_language: str = "en", target_language: str = "fr"):
|
||||
"""Load the translation model for a specific language pair."""
|
||||
model_name = self._resolve_model_name(source_language, target_language)
|
||||
logger.info(
|
||||
"Loading MarianMT model: %s (%s -> %s)",
|
||||
model_name,
|
||||
source_language,
|
||||
target_language,
|
||||
)
|
||||
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
model = MarianMTModel.from_pretrained(model_name)
|
||||
self._pipeline = pipeline("translation", model=model, tokenizer=tokenizer)
|
||||
self._current_pair = (source_language.lower(), target_language.lower())
|
||||
|
||||
def _resolve_model_name(self, src: str, tgt: str) -> str:
|
||||
"""Resolve language pair to MarianMT model name."""
|
||||
pair = (src.lower(), tgt.lower())
|
||||
mapping = {
|
||||
("en", "fr"): "Helsinki-NLP/opus-mt-en-fr",
|
||||
("fr", "en"): "Helsinki-NLP/opus-mt-fr-en",
|
||||
("en", "es"): "Helsinki-NLP/opus-mt-en-es",
|
||||
("es", "en"): "Helsinki-NLP/opus-mt-es-en",
|
||||
("en", "de"): "Helsinki-NLP/opus-mt-en-de",
|
||||
("de", "en"): "Helsinki-NLP/opus-mt-de-en",
|
||||
}
|
||||
return mapping.get(pair, "Helsinki-NLP/opus-mt-en-fr")
|
||||
|
||||
def translate(self, text: str, source_language: str, target_language: str) -> dict:
|
||||
"""Translate text between languages.
|
||||
|
||||
Args:
|
||||
text: Text to translate.
|
||||
source_language: Source language code (e.g. "en").
|
||||
target_language: Target language code (e.g. "fr").
|
||||
|
||||
Returns:
|
||||
dict with "text" key containing {source_language: original, target_language: translated}.
|
||||
"""
|
||||
pair = (source_language.lower(), target_language.lower())
|
||||
if self._pipeline is None or self._current_pair != pair:
|
||||
self.load(source_language, target_language)
|
||||
with self._lock:
|
||||
results = self._pipeline(
|
||||
text, src_lang=source_language, tgt_lang=target_language
|
||||
)
|
||||
translated = results[0]["translation_text"] if results else ""
|
||||
return {"text": {source_language: text, target_language: translated}}
|
||||
|
||||
|
||||
# Module-level singleton — shared across all MarianMT translator processors
|
||||
translator_service = MarianTranslatorService()
|
||||
133
server/reflector/processors/_pyannote_diarization_service.py
Normal file
133
server/reflector/processors/_pyannote_diarization_service.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""
|
||||
Pyannote diarization service using pyannote.audio.
|
||||
|
||||
Singleton service that loads the pyannote speaker diarization model once
|
||||
and reuses it across all pyannote diarization processor instances.
|
||||
|
||||
Ported from gpu/self_hosted/app/services/diarizer.py for in-process use.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import tarfile
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from urllib.request import urlopen
|
||||
|
||||
import torch
|
||||
import torchaudio
|
||||
import yaml
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
from reflector.settings import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
S3_BUNDLE_URL = "https://reflector-public.s3.us-east-1.amazonaws.com/pyannote-speaker-diarization-3.1.tar.gz"
|
||||
BUNDLE_CACHE_DIR = Path.home() / ".cache" / "pyannote-bundle"
|
||||
|
||||
|
||||
def _ensure_model(cache_dir: Path) -> str:
|
||||
"""Download and extract S3 model bundle if not cached."""
|
||||
model_dir = cache_dir / "pyannote-speaker-diarization-3.1"
|
||||
config_path = model_dir / "config.yaml"
|
||||
|
||||
if config_path.exists():
|
||||
logger.info("Using cached model bundle at %s", model_dir)
|
||||
return str(model_dir)
|
||||
|
||||
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
tarball_path = cache_dir / "model.tar.gz"
|
||||
|
||||
logger.info("Downloading model bundle from %s", S3_BUNDLE_URL)
|
||||
with urlopen(S3_BUNDLE_URL) as response, open(tarball_path, "wb") as f:
|
||||
while chunk := response.read(8192):
|
||||
f.write(chunk)
|
||||
|
||||
logger.info("Extracting model bundle")
|
||||
with tarfile.open(tarball_path, "r:gz") as tar:
|
||||
tar.extractall(path=cache_dir, filter="data")
|
||||
tarball_path.unlink()
|
||||
|
||||
_patch_config(model_dir, cache_dir)
|
||||
return str(model_dir)
|
||||
|
||||
|
||||
def _patch_config(model_dir: Path, cache_dir: Path) -> None:
|
||||
"""Rewrite config.yaml to reference local pytorch_model.bin paths."""
|
||||
config_path = model_dir / "config.yaml"
|
||||
with open(config_path) as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
config["pipeline"]["params"]["segmentation"] = str(
|
||||
cache_dir / "pyannote-segmentation-3.0" / "pytorch_model.bin"
|
||||
)
|
||||
config["pipeline"]["params"]["embedding"] = str(
|
||||
cache_dir / "pyannote-wespeaker-voxceleb-resnet34-LM" / "pytorch_model.bin"
|
||||
)
|
||||
|
||||
with open(config_path, "w") as f:
|
||||
yaml.dump(config, f)
|
||||
|
||||
logger.info("Patched config.yaml with local model paths")
|
||||
|
||||
|
||||
class PyannoteDiarizationService:
|
||||
"""Pyannote speaker diarization service for in-process use."""
|
||||
|
||||
def __init__(self):
|
||||
self._pipeline = None
|
||||
self._device = "cpu"
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def load(self):
|
||||
self._device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
hf_token = settings.HF_TOKEN
|
||||
|
||||
if hf_token:
|
||||
logger.info("Loading pyannote model from HuggingFace (HF_TOKEN set)")
|
||||
self._pipeline = Pipeline.from_pretrained(
|
||||
"pyannote/speaker-diarization-3.1",
|
||||
use_auth_token=hf_token,
|
||||
)
|
||||
else:
|
||||
logger.info("HF_TOKEN not set — loading model from S3 bundle")
|
||||
model_path = _ensure_model(BUNDLE_CACHE_DIR)
|
||||
config_path = Path(model_path) / "config.yaml"
|
||||
self._pipeline = Pipeline.from_pretrained(str(config_path))
|
||||
|
||||
self._pipeline.to(torch.device(self._device))
|
||||
|
||||
def diarize_file(self, file_path: str, timestamp: float = 0.0) -> dict:
|
||||
"""Run speaker diarization on an audio file.
|
||||
|
||||
Args:
|
||||
file_path: Path to the audio file.
|
||||
timestamp: Offset to add to all segment timestamps.
|
||||
|
||||
Returns:
|
||||
dict with "diarization" key containing list of
|
||||
{"start": float, "end": float, "speaker": int} segments.
|
||||
"""
|
||||
if self._pipeline is None:
|
||||
self.load()
|
||||
waveform, sample_rate = torchaudio.load(file_path)
|
||||
with self._lock:
|
||||
diarization = self._pipeline(
|
||||
{"waveform": waveform, "sample_rate": sample_rate}
|
||||
)
|
||||
segments = []
|
||||
for diarization_segment, _, speaker in diarization.itertracks(yield_label=True):
|
||||
segments.append(
|
||||
{
|
||||
"start": round(timestamp + diarization_segment.start, 3),
|
||||
"end": round(timestamp + diarization_segment.end, 3),
|
||||
"speaker": int(speaker[-2:])
|
||||
if speaker and speaker[-2:].isdigit()
|
||||
else 0,
|
||||
}
|
||||
)
|
||||
return {"diarization": segments}
|
||||
|
||||
|
||||
# Module-level singleton — shared across all pyannote diarization processors
|
||||
diarization_service = PyannoteDiarizationService()
|
||||
37
server/reflector/processors/audio_diarization_pyannote.py
Normal file
37
server/reflector/processors/audio_diarization_pyannote.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""
|
||||
Pyannote audio diarization processor using pyannote.audio in-process.
|
||||
|
||||
Downloads audio from URL, runs pyannote diarization locally,
|
||||
and returns speaker segments. No HTTP backend needed.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
from reflector.processors._audio_download import download_audio_to_temp
|
||||
from reflector.processors._pyannote_diarization_service import diarization_service
|
||||
from reflector.processors.audio_diarization import AudioDiarizationProcessor
|
||||
from reflector.processors.audio_diarization_auto import AudioDiarizationAutoProcessor
|
||||
from reflector.processors.types import AudioDiarizationInput
|
||||
|
||||
|
||||
class AudioDiarizationPyannoteProcessor(AudioDiarizationProcessor):
|
||||
INPUT_TYPE = AudioDiarizationInput
|
||||
|
||||
async def _diarize(self, data: AudioDiarizationInput):
|
||||
"""Run pyannote diarization on audio from URL."""
|
||||
tmp_path = await download_audio_to_temp(data.audio_url)
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None, diarization_service.diarize_file, str(tmp_path)
|
||||
)
|
||||
return result["diarization"]
|
||||
finally:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
AudioDiarizationAutoProcessor.register("pyannote", AudioDiarizationPyannoteProcessor)
|
||||
27
server/reflector/processors/audio_mixdown.py
Normal file
27
server/reflector/processors/audio_mixdown.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""
|
||||
Base class for audio mixdown processors.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class MixdownResponse(BaseModel):
|
||||
size: int
|
||||
duration_ms: float = 0.0
|
||||
cancelled: bool = False
|
||||
output_path: str | None = (
|
||||
None # Local file path (pyav sets this; modal leaves None)
|
||||
)
|
||||
|
||||
|
||||
class AudioMixdownProcessor:
|
||||
"""Base class for audio mixdown processors."""
|
||||
|
||||
async def mixdown_tracks(
|
||||
self,
|
||||
track_urls: list[str],
|
||||
output_url: str,
|
||||
target_sample_rate: int | None = None,
|
||||
offsets_seconds: list[float] | None = None,
|
||||
) -> MixdownResponse:
|
||||
raise NotImplementedError
|
||||
32
server/reflector/processors/audio_mixdown_auto.py
Normal file
32
server/reflector/processors/audio_mixdown_auto.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import importlib
|
||||
|
||||
from reflector.processors.audio_mixdown import AudioMixdownProcessor
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class AudioMixdownAutoProcessor(AudioMixdownProcessor):
|
||||
_registry = {}
|
||||
|
||||
@classmethod
|
||||
def register(cls, name, kclass):
|
||||
cls._registry[name] = kclass
|
||||
|
||||
def __new__(cls, name: str | None = None, **kwargs):
|
||||
if name is None:
|
||||
name = settings.MIXDOWN_BACKEND
|
||||
if name not in cls._registry:
|
||||
module_name = f"reflector.processors.audio_mixdown_{name}"
|
||||
importlib.import_module(module_name)
|
||||
|
||||
# gather specific configuration for the processor
|
||||
# search `MIXDOWN_XXX_YYY`, push to constructor as `xxx_yyy`
|
||||
config = {}
|
||||
name_upper = name.upper()
|
||||
settings_prefix = "MIXDOWN_"
|
||||
config_prefix = f"{settings_prefix}{name_upper}_"
|
||||
for key, value in settings:
|
||||
if key.startswith(config_prefix):
|
||||
config_name = key[len(settings_prefix) :].lower()
|
||||
config[config_name] = value
|
||||
|
||||
return cls._registry[name](**config | kwargs)
|
||||
110
server/reflector/processors/audio_mixdown_modal.py
Normal file
110
server/reflector/processors/audio_mixdown_modal.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""
|
||||
Modal.com backend for audio mixdown.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
import httpx
|
||||
|
||||
from reflector.hatchet.constants import TIMEOUT_HEAVY_HTTP
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.audio_mixdown import AudioMixdownProcessor, MixdownResponse
|
||||
from reflector.processors.audio_mixdown_auto import AudioMixdownAutoProcessor
|
||||
|
||||
|
||||
class AudioMixdownModalProcessor(AudioMixdownProcessor):
|
||||
"""Audio mixdown processor using Modal.com/self-hosted backend via HTTP."""
|
||||
|
||||
def __init__(
|
||||
self, mixdown_url: str | None = None, modal_api_key: str | None = None
|
||||
):
|
||||
self.mixdown_url = mixdown_url or os.getenv("MIXDOWN_URL")
|
||||
if not self.mixdown_url:
|
||||
raise ValueError(
|
||||
"MIXDOWN_URL required to use AudioMixdownModalProcessor. "
|
||||
"Set MIXDOWN_URL environment variable or pass mixdown_url parameter."
|
||||
)
|
||||
|
||||
self.modal_api_key = modal_api_key or os.getenv("MODAL_API_KEY")
|
||||
|
||||
async def mixdown_tracks(
|
||||
self,
|
||||
track_urls: list[str],
|
||||
output_url: str,
|
||||
target_sample_rate: int | None = None,
|
||||
offsets_seconds: list[float] | None = None,
|
||||
) -> MixdownResponse:
|
||||
"""Mix audio tracks via remote Modal/self-hosted backend.
|
||||
|
||||
Args:
|
||||
track_urls: Presigned GET URLs for source audio tracks
|
||||
output_url: Presigned PUT URL for output MP3
|
||||
target_sample_rate: Sample rate for output (Hz), auto-detected if None
|
||||
offsets_seconds: Optional per-track delays in seconds for alignment
|
||||
"""
|
||||
valid_count = len([u for u in track_urls if u])
|
||||
log = logger.bind(track_count=valid_count)
|
||||
log.info("Sending Modal mixdown HTTP request")
|
||||
|
||||
url = f"{self.mixdown_url}/mixdown"
|
||||
|
||||
headers = {}
|
||||
if self.modal_api_key:
|
||||
headers["Authorization"] = f"Bearer {self.modal_api_key}"
|
||||
|
||||
# Scale timeout with track count: base TIMEOUT_HEAVY_HTTP + 60s per track beyond 2
|
||||
extra_timeout = max(0, (valid_count - 2)) * 60
|
||||
timeout = TIMEOUT_HEAVY_HTTP + extra_timeout
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
response = await client.post(
|
||||
url,
|
||||
headers=headers,
|
||||
json={
|
||||
"track_urls": track_urls,
|
||||
"output_url": output_url,
|
||||
"target_sample_rate": target_sample_rate,
|
||||
"offsets_seconds": offsets_seconds,
|
||||
},
|
||||
follow_redirects=True,
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
error_body = response.text
|
||||
log.error(
|
||||
"Modal mixdown API error",
|
||||
status_code=response.status_code,
|
||||
error_body=error_body,
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
# Check if work was cancelled
|
||||
if result.get("cancelled"):
|
||||
log.warning("Modal mixdown was cancelled by disconnect detection")
|
||||
raise asyncio.CancelledError(
|
||||
"Mixdown cancelled due to client disconnect"
|
||||
)
|
||||
|
||||
log.info("Modal mixdown complete", size=result["size"])
|
||||
return MixdownResponse(**result)
|
||||
except asyncio.CancelledError:
|
||||
log.warning(
|
||||
"Modal mixdown cancelled (Hatchet timeout, disconnect detected on Modal side)"
|
||||
)
|
||||
raise
|
||||
except httpx.TimeoutException as e:
|
||||
log.error("Modal mixdown timeout", error=str(e), exc_info=True)
|
||||
raise Exception(f"Modal mixdown timeout: {e}") from e
|
||||
except httpx.HTTPStatusError as e:
|
||||
log.error("Modal mixdown HTTP error", error=str(e), exc_info=True)
|
||||
raise Exception(f"Modal mixdown HTTP error: {e}") from e
|
||||
except Exception as e:
|
||||
log.error("Modal mixdown unexpected error", error=str(e), exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
AudioMixdownAutoProcessor.register("modal", AudioMixdownModalProcessor)
|
||||
101
server/reflector/processors/audio_mixdown_pyav.py
Normal file
101
server/reflector/processors/audio_mixdown_pyav.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""
|
||||
PyAV audio mixdown processor.
|
||||
|
||||
Mixes N tracks in-process using the existing utility from reflector.utils.audio_mixdown.
|
||||
Writes to a local temp file (does NOT upload to S3 — the pipeline handles upload).
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.audio_file_writer import AudioFileWriterProcessor
|
||||
from reflector.processors.audio_mixdown import AudioMixdownProcessor, MixdownResponse
|
||||
from reflector.processors.audio_mixdown_auto import AudioMixdownAutoProcessor
|
||||
from reflector.utils.audio_mixdown import (
|
||||
detect_sample_rate_from_tracks,
|
||||
mixdown_tracks_pyav,
|
||||
)
|
||||
|
||||
|
||||
class AudioMixdownPyavProcessor(AudioMixdownProcessor):
|
||||
"""Audio mixdown processor using PyAV (no HTTP backend).
|
||||
|
||||
Writes the mixed output to a local temp file and returns its path
|
||||
in MixdownResponse.output_path. The caller is responsible for
|
||||
uploading the file and cleaning it up.
|
||||
"""
|
||||
|
||||
async def mixdown_tracks(
|
||||
self,
|
||||
track_urls: list[str],
|
||||
output_url: str,
|
||||
target_sample_rate: int | None = None,
|
||||
offsets_seconds: list[float] | None = None,
|
||||
) -> MixdownResponse:
|
||||
log = logger.bind(track_count=len(track_urls))
|
||||
log.info("Starting local PyAV mixdown")
|
||||
|
||||
valid_urls = [url for url in track_urls if url]
|
||||
if not valid_urls:
|
||||
raise ValueError("No valid track URLs provided")
|
||||
|
||||
# Auto-detect sample rate if not provided
|
||||
if target_sample_rate is None:
|
||||
target_sample_rate = detect_sample_rate_from_tracks(
|
||||
valid_urls, logger=logger
|
||||
)
|
||||
if not target_sample_rate:
|
||||
raise ValueError("No decodable audio frames in any track")
|
||||
|
||||
# Write to temp MP3 file
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
output_path = os.path.join(temp_dir, "mixed.mp3")
|
||||
duration_ms_container = [0.0]
|
||||
|
||||
async def capture_duration(d):
|
||||
duration_ms_container[0] = d
|
||||
|
||||
writer = AudioFileWriterProcessor(
|
||||
path=output_path, on_duration=capture_duration
|
||||
)
|
||||
|
||||
try:
|
||||
await mixdown_tracks_pyav(
|
||||
valid_urls,
|
||||
writer,
|
||||
target_sample_rate,
|
||||
offsets_seconds=offsets_seconds,
|
||||
logger=logger,
|
||||
)
|
||||
await writer.flush()
|
||||
|
||||
file_size = os.path.getsize(output_path)
|
||||
log.info(
|
||||
"Local mixdown complete",
|
||||
size=file_size,
|
||||
duration_ms=duration_ms_container[0],
|
||||
)
|
||||
|
||||
return MixdownResponse(
|
||||
size=file_size,
|
||||
duration_ms=duration_ms_container[0],
|
||||
output_path=output_path,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# Cleanup on failure
|
||||
if os.path.exists(output_path):
|
||||
try:
|
||||
os.unlink(output_path)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
os.rmdir(temp_dir)
|
||||
except Exception:
|
||||
pass
|
||||
log.error("Local mixdown failed", error=str(e), exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
AudioMixdownAutoProcessor.register("pyav", AudioMixdownPyavProcessor)
|
||||
23
server/reflector/processors/audio_padding.py
Normal file
23
server/reflector/processors/audio_padding.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Base class for audio padding processors.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class PaddingResponse(BaseModel):
|
||||
size: int
|
||||
cancelled: bool = False
|
||||
|
||||
|
||||
class AudioPaddingProcessor:
|
||||
"""Base class for audio padding processors."""
|
||||
|
||||
async def pad_track(
|
||||
self,
|
||||
track_url: str,
|
||||
output_url: str,
|
||||
start_time_seconds: float,
|
||||
track_index: int,
|
||||
) -> PaddingResponse:
|
||||
raise NotImplementedError
|
||||
32
server/reflector/processors/audio_padding_auto.py
Normal file
32
server/reflector/processors/audio_padding_auto.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import importlib
|
||||
|
||||
from reflector.processors.audio_padding import AudioPaddingProcessor
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class AudioPaddingAutoProcessor(AudioPaddingProcessor):
|
||||
_registry = {}
|
||||
|
||||
@classmethod
|
||||
def register(cls, name, kclass):
|
||||
cls._registry[name] = kclass
|
||||
|
||||
def __new__(cls, name: str | None = None, **kwargs):
|
||||
if name is None:
|
||||
name = settings.PADDING_BACKEND
|
||||
if name not in cls._registry:
|
||||
module_name = f"reflector.processors.audio_padding_{name}"
|
||||
importlib.import_module(module_name)
|
||||
|
||||
# gather specific configuration for the processor
|
||||
# search `PADDING_XXX_YYY`, push to constructor as `xxx_yyy`
|
||||
config = {}
|
||||
name_upper = name.upper()
|
||||
settings_prefix = "PADDING_"
|
||||
config_prefix = f"{settings_prefix}{name_upper}_"
|
||||
for key, value in settings:
|
||||
if key.startswith(config_prefix):
|
||||
config_name = key[len(settings_prefix) :].lower()
|
||||
config[config_name] = value
|
||||
|
||||
return cls._registry[name](**config | kwargs)
|
||||
@@ -6,18 +6,14 @@ import asyncio
|
||||
import os
|
||||
|
||||
import httpx
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.hatchet.constants import TIMEOUT_AUDIO
|
||||
from reflector.hatchet.constants import TIMEOUT_AUDIO_HTTP
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.audio_padding import AudioPaddingProcessor, PaddingResponse
|
||||
from reflector.processors.audio_padding_auto import AudioPaddingAutoProcessor
|
||||
|
||||
|
||||
class PaddingResponse(BaseModel):
|
||||
size: int
|
||||
cancelled: bool = False
|
||||
|
||||
|
||||
class AudioPaddingModalProcessor:
|
||||
class AudioPaddingModalProcessor(AudioPaddingProcessor):
|
||||
"""Audio padding processor using Modal.com CPU backend via HTTP."""
|
||||
|
||||
def __init__(
|
||||
@@ -64,7 +60,7 @@ class AudioPaddingModalProcessor:
|
||||
headers["Authorization"] = f"Bearer {self.modal_api_key}"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=TIMEOUT_AUDIO) as client:
|
||||
async with httpx.AsyncClient(timeout=TIMEOUT_AUDIO_HTTP) as client:
|
||||
response = await client.post(
|
||||
url,
|
||||
headers=headers,
|
||||
@@ -111,3 +107,6 @@ class AudioPaddingModalProcessor:
|
||||
except Exception as e:
|
||||
log.error("Modal padding unexpected error", error=str(e), exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
AudioPaddingAutoProcessor.register("modal", AudioPaddingModalProcessor)
|
||||
|
||||
133
server/reflector/processors/audio_padding_pyav.py
Normal file
133
server/reflector/processors/audio_padding_pyav.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""
|
||||
PyAV audio padding processor.
|
||||
|
||||
Pads audio tracks with silence directly in-process (no HTTP).
|
||||
Reuses the shared PyAV utilities from reflector.utils.audio_padding.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import av
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.audio_padding import AudioPaddingProcessor, PaddingResponse
|
||||
from reflector.processors.audio_padding_auto import AudioPaddingAutoProcessor
|
||||
from reflector.utils.audio_padding import apply_audio_padding_to_file
|
||||
|
||||
S3_TIMEOUT = 60
|
||||
|
||||
|
||||
class AudioPaddingPyavProcessor(AudioPaddingProcessor):
|
||||
"""Audio padding processor using PyAV (no HTTP backend)."""
|
||||
|
||||
async def pad_track(
|
||||
self,
|
||||
track_url: str,
|
||||
output_url: str,
|
||||
start_time_seconds: float,
|
||||
track_index: int,
|
||||
) -> PaddingResponse:
|
||||
"""Pad audio track with silence via PyAV.
|
||||
|
||||
Args:
|
||||
track_url: Presigned GET URL for source audio track
|
||||
output_url: Presigned PUT URL for output WebM
|
||||
start_time_seconds: Amount of silence to prepend
|
||||
track_index: Track index for logging
|
||||
"""
|
||||
if not track_url:
|
||||
raise ValueError("track_url cannot be empty")
|
||||
if start_time_seconds <= 0:
|
||||
raise ValueError(
|
||||
f"start_time_seconds must be positive, got {start_time_seconds}"
|
||||
)
|
||||
|
||||
log = logger.bind(track_index=track_index, padding_seconds=start_time_seconds)
|
||||
log.info("Starting local PyAV padding")
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
self._pad_track_blocking,
|
||||
track_url,
|
||||
output_url,
|
||||
start_time_seconds,
|
||||
track_index,
|
||||
)
|
||||
|
||||
def _pad_track_blocking(
|
||||
self,
|
||||
track_url: str,
|
||||
output_url: str,
|
||||
start_time_seconds: float,
|
||||
track_index: int,
|
||||
) -> PaddingResponse:
|
||||
"""Blocking padding work: download, pad with PyAV, upload."""
|
||||
import requests
|
||||
|
||||
log = logger.bind(track_index=track_index, padding_seconds=start_time_seconds)
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
input_path = None
|
||||
output_path = None
|
||||
|
||||
try:
|
||||
# Download source audio
|
||||
log.info("Downloading track for local padding")
|
||||
response = requests.get(track_url, stream=True, timeout=S3_TIMEOUT)
|
||||
response.raise_for_status()
|
||||
|
||||
input_path = os.path.join(temp_dir, "track.webm")
|
||||
total_bytes = 0
|
||||
with open(input_path, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
total_bytes += len(chunk)
|
||||
log.info("Track downloaded", bytes=total_bytes)
|
||||
|
||||
# Apply padding using shared PyAV utility
|
||||
output_path = os.path.join(temp_dir, "padded.webm")
|
||||
with av.open(input_path) as in_container:
|
||||
apply_audio_padding_to_file(
|
||||
in_container,
|
||||
output_path,
|
||||
start_time_seconds,
|
||||
track_index,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
file_size = os.path.getsize(output_path)
|
||||
log.info("Local padding complete", size=file_size)
|
||||
|
||||
# Upload padded track
|
||||
log.info("Uploading padded track to S3")
|
||||
with open(output_path, "rb") as f:
|
||||
upload_response = requests.put(output_url, data=f, timeout=S3_TIMEOUT)
|
||||
upload_response.raise_for_status()
|
||||
log.info("Upload complete", size=file_size)
|
||||
|
||||
return PaddingResponse(size=file_size)
|
||||
|
||||
except Exception as e:
|
||||
log.error("Local padding failed", error=str(e), exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
if input_path and os.path.exists(input_path):
|
||||
try:
|
||||
os.unlink(input_path)
|
||||
except Exception as e:
|
||||
log.warning("Failed to cleanup input file", error=str(e))
|
||||
if output_path and os.path.exists(output_path):
|
||||
try:
|
||||
os.unlink(output_path)
|
||||
except Exception as e:
|
||||
log.warning("Failed to cleanup output file", error=str(e))
|
||||
try:
|
||||
os.rmdir(temp_dir)
|
||||
except Exception as e:
|
||||
log.warning("Failed to cleanup temp directory", error=str(e))
|
||||
|
||||
|
||||
AudioPaddingAutoProcessor.register("pyav", AudioPaddingPyavProcessor)
|
||||
@@ -3,13 +3,17 @@ from faster_whisper import WhisperModel
|
||||
from reflector.processors.audio_transcript import AudioTranscriptProcessor
|
||||
from reflector.processors.audio_transcript_auto import AudioTranscriptAutoProcessor
|
||||
from reflector.processors.types import AudioFile, Transcript, Word
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class AudioTranscriptWhisperProcessor(AudioTranscriptProcessor):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.model = WhisperModel(
|
||||
"tiny", device="cpu", compute_type="float32", num_workers=12
|
||||
settings.WHISPER_CHUNK_MODEL,
|
||||
device="cpu",
|
||||
compute_type="float32",
|
||||
num_workers=12,
|
||||
)
|
||||
|
||||
async def _transcript(self, data: AudioFile):
|
||||
|
||||
39
server/reflector/processors/file_diarization_pyannote.py
Normal file
39
server/reflector/processors/file_diarization_pyannote.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""
|
||||
Pyannote file diarization processor using pyannote.audio in-process.
|
||||
|
||||
Downloads audio from URL, runs pyannote diarization locally,
|
||||
and returns speaker segments. No HTTP backend needed.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
from reflector.processors._audio_download import download_audio_to_temp
|
||||
from reflector.processors._pyannote_diarization_service import diarization_service
|
||||
from reflector.processors.file_diarization import (
|
||||
FileDiarizationInput,
|
||||
FileDiarizationOutput,
|
||||
FileDiarizationProcessor,
|
||||
)
|
||||
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
|
||||
|
||||
|
||||
class FileDiarizationPyannoteProcessor(FileDiarizationProcessor):
|
||||
async def _diarize(self, data: FileDiarizationInput):
|
||||
"""Run pyannote diarization on file from URL."""
|
||||
self.logger.info(f"Starting pyannote diarization from {data.audio_url}")
|
||||
tmp_path = await download_audio_to_temp(data.audio_url)
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None, diarization_service.diarize_file, str(tmp_path)
|
||||
)
|
||||
return FileDiarizationOutput(diarization=result["diarization"])
|
||||
finally:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
FileDiarizationAutoProcessor.register("pyannote", FileDiarizationPyannoteProcessor)
|
||||
275
server/reflector/processors/file_transcript_whisper.py
Normal file
275
server/reflector/processors/file_transcript_whisper.py
Normal file
@@ -0,0 +1,275 @@
|
||||
"""
|
||||
Local file transcription processor using faster-whisper with Silero VAD pipeline.
|
||||
|
||||
Downloads audio from URL, segments it using Silero VAD, transcribes each
|
||||
segment with faster-whisper, and merges results. No HTTP backend needed.
|
||||
|
||||
VAD pipeline ported from gpu/self_hosted/app/services/transcriber.py.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import threading
|
||||
from typing import Generator
|
||||
|
||||
import numpy as np
|
||||
from silero_vad import VADIterator, load_silero_vad
|
||||
|
||||
from reflector.processors._audio_download import download_audio_to_temp
|
||||
from reflector.processors.file_transcript import (
|
||||
FileTranscriptInput,
|
||||
FileTranscriptProcessor,
|
||||
)
|
||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||
from reflector.processors.types import Transcript, Word
|
||||
from reflector.settings import settings
|
||||
|
||||
SAMPLE_RATE = 16000
|
||||
|
||||
VAD_CONFIG = {
|
||||
"batch_max_duration": 30.0,
|
||||
"silence_padding": 0.5,
|
||||
"window_size": 512,
|
||||
}
|
||||
|
||||
|
||||
class FileTranscriptWhisperProcessor(FileTranscriptProcessor):
|
||||
"""Transcribe complete audio files using local faster-whisper with VAD."""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._model = None
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _ensure_model(self):
|
||||
"""Lazy-load the whisper model on first use."""
|
||||
if self._model is not None:
|
||||
return
|
||||
|
||||
import faster_whisper
|
||||
import torch
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
compute_type = "float16" if device == "cuda" else "int8"
|
||||
model_name = settings.WHISPER_FILE_MODEL
|
||||
|
||||
self.logger.info(
|
||||
"Loading whisper model",
|
||||
model=model_name,
|
||||
device=device,
|
||||
compute_type=compute_type,
|
||||
)
|
||||
self._model = faster_whisper.WhisperModel(
|
||||
model_name,
|
||||
device=device,
|
||||
compute_type=compute_type,
|
||||
num_workers=1,
|
||||
)
|
||||
|
||||
async def _transcript(self, data: FileTranscriptInput):
|
||||
"""Download file, run VAD segmentation, transcribe each segment."""
|
||||
tmp_path = await download_audio_to_temp(data.audio_url)
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
self._transcribe_file_blocking,
|
||||
str(tmp_path),
|
||||
data.language,
|
||||
)
|
||||
return result
|
||||
finally:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _transcribe_file_blocking(self, file_path: str, language: str) -> Transcript:
|
||||
"""Blocking transcription with VAD pipeline."""
|
||||
self._ensure_model()
|
||||
|
||||
audio_array = _load_audio_via_ffmpeg(file_path, SAMPLE_RATE)
|
||||
|
||||
# VAD segmentation → batch merging
|
||||
merged_batches: list[tuple[float, float]] = []
|
||||
batch_start = None
|
||||
batch_end = None
|
||||
max_duration = VAD_CONFIG["batch_max_duration"]
|
||||
|
||||
for seg_start, seg_end in _vad_segments(audio_array):
|
||||
if batch_start is None:
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
continue
|
||||
if seg_end - batch_start <= max_duration:
|
||||
batch_end = seg_end
|
||||
else:
|
||||
merged_batches.append((batch_start, batch_end))
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
|
||||
if batch_start is not None and batch_end is not None:
|
||||
merged_batches.append((batch_start, batch_end))
|
||||
|
||||
# If no speech detected, try transcribing the whole file
|
||||
if not merged_batches:
|
||||
return self._transcribe_whole_file(file_path, language)
|
||||
|
||||
# Transcribe each batch
|
||||
all_words = []
|
||||
for start_time, end_time in merged_batches:
|
||||
s_idx = int(start_time * SAMPLE_RATE)
|
||||
e_idx = int(end_time * SAMPLE_RATE)
|
||||
segment = audio_array[s_idx:e_idx]
|
||||
segment = _pad_audio(segment, SAMPLE_RATE)
|
||||
|
||||
with self._lock:
|
||||
segments, _ = self._model.transcribe(
|
||||
segment,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
segments = list(segments)
|
||||
|
||||
for seg in segments:
|
||||
for w in seg.words:
|
||||
all_words.append(
|
||||
{
|
||||
"word": w.word,
|
||||
"start": round(float(w.start) + start_time, 2),
|
||||
"end": round(float(w.end) + start_time, 2),
|
||||
}
|
||||
)
|
||||
|
||||
all_words = _enforce_word_timing_constraints(all_words)
|
||||
|
||||
words = [
|
||||
Word(text=w["word"], start=w["start"], end=w["end"]) for w in all_words
|
||||
]
|
||||
words.sort(key=lambda w: w.start)
|
||||
return Transcript(words=words)
|
||||
|
||||
def _transcribe_whole_file(self, file_path: str, language: str) -> Transcript:
|
||||
"""Fallback: transcribe entire file without VAD segmentation."""
|
||||
with self._lock:
|
||||
segments, _ = self._model.transcribe(
|
||||
file_path,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
segments = list(segments)
|
||||
|
||||
words = []
|
||||
for seg in segments:
|
||||
for w in seg.words:
|
||||
words.append(
|
||||
Word(
|
||||
text=w.word,
|
||||
start=round(float(w.start), 2),
|
||||
end=round(float(w.end), 2),
|
||||
)
|
||||
)
|
||||
return Transcript(words=words)
|
||||
|
||||
|
||||
# --- VAD helpers (ported from gpu/self_hosted/app/services/transcriber.py) ---
|
||||
# IMPORTANT: This VAD segment logic is duplicated for deployment isolation.
|
||||
# If you modify this, consider updating the GPU service copy as well:
|
||||
# - gpu/self_hosted/app/services/transcriber.py
|
||||
# - gpu/modal_deployments/reflector_transcriber.py
|
||||
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
|
||||
|
||||
|
||||
def _load_audio_via_ffmpeg(
|
||||
input_path: str, sample_rate: int = SAMPLE_RATE
|
||||
) -> np.ndarray:
|
||||
"""Load audio file via ffmpeg, converting to mono float32 at target sample rate."""
|
||||
ffmpeg_bin = shutil.which("ffmpeg") or "ffmpeg"
|
||||
cmd = [
|
||||
ffmpeg_bin,
|
||||
"-nostdin",
|
||||
"-threads",
|
||||
"1",
|
||||
"-i",
|
||||
input_path,
|
||||
"-f",
|
||||
"f32le",
|
||||
"-acodec",
|
||||
"pcm_f32le",
|
||||
"-ac",
|
||||
"1",
|
||||
"-ar",
|
||||
str(sample_rate),
|
||||
"pipe:1",
|
||||
]
|
||||
proc = subprocess.run(
|
||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
|
||||
)
|
||||
return np.frombuffer(proc.stdout, dtype=np.float32)
|
||||
|
||||
|
||||
def _vad_segments(
|
||||
audio_array: np.ndarray,
|
||||
sample_rate: int = SAMPLE_RATE,
|
||||
window_size: int = VAD_CONFIG["window_size"],
|
||||
) -> Generator[tuple[float, float], None, None]:
|
||||
"""Detect speech segments using Silero VAD."""
|
||||
vad_model = load_silero_vad(onnx=False)
|
||||
iterator = VADIterator(vad_model, sampling_rate=sample_rate)
|
||||
start = None
|
||||
|
||||
for i in range(0, len(audio_array), window_size):
|
||||
chunk = audio_array[i : i + window_size]
|
||||
if len(chunk) < window_size:
|
||||
chunk = np.pad(chunk, (0, window_size - len(chunk)), mode="constant")
|
||||
speech = iterator(chunk)
|
||||
if not speech:
|
||||
continue
|
||||
if "start" in speech:
|
||||
start = speech["start"]
|
||||
continue
|
||||
if "end" in speech and start is not None:
|
||||
end = speech["end"]
|
||||
yield (start / float(SAMPLE_RATE), end / float(SAMPLE_RATE))
|
||||
start = None
|
||||
|
||||
# Handle case where audio ends while speech is still active
|
||||
if start is not None:
|
||||
audio_duration = len(audio_array) / float(sample_rate)
|
||||
yield (start / float(SAMPLE_RATE), audio_duration)
|
||||
|
||||
iterator.reset_states()
|
||||
|
||||
|
||||
def _pad_audio(audio_array: np.ndarray, sample_rate: int = SAMPLE_RATE) -> np.ndarray:
|
||||
"""Pad short audio with silence for VAD compatibility."""
|
||||
audio_duration = len(audio_array) / sample_rate
|
||||
if audio_duration < VAD_CONFIG["silence_padding"]:
|
||||
silence_samples = int(sample_rate * VAD_CONFIG["silence_padding"])
|
||||
silence = np.zeros(silence_samples, dtype=np.float32)
|
||||
return np.concatenate([audio_array, silence])
|
||||
return audio_array
|
||||
|
||||
|
||||
def _enforce_word_timing_constraints(words: list[dict]) -> list[dict]:
|
||||
"""Ensure no word end time exceeds the next word's start time."""
|
||||
if len(words) <= 1:
|
||||
return words
|
||||
enforced: list[dict] = []
|
||||
for i, word in enumerate(words):
|
||||
current = dict(word)
|
||||
if i < len(words) - 1:
|
||||
next_start = words[i + 1]["start"]
|
||||
if current["end"] > next_start:
|
||||
current["end"] = next_start
|
||||
enforced.append(current)
|
||||
return enforced
|
||||
|
||||
|
||||
FileTranscriptAutoProcessor.register("whisper", FileTranscriptWhisperProcessor)
|
||||
50
server/reflector/processors/transcript_translator_marian.py
Normal file
50
server/reflector/processors/transcript_translator_marian.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""
|
||||
MarianMT transcript translator processor using HuggingFace MarianMT in-process.
|
||||
|
||||
Translates transcript text using HuggingFace MarianMT models
|
||||
locally. No HTTP backend needed.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
from reflector.processors._marian_translator_service import translator_service
|
||||
from reflector.processors.transcript_translator import TranscriptTranslatorProcessor
|
||||
from reflector.processors.transcript_translator_auto import (
|
||||
TranscriptTranslatorAutoProcessor,
|
||||
)
|
||||
from reflector.processors.types import TranslationLanguages
|
||||
|
||||
|
||||
class TranscriptTranslatorMarianProcessor(TranscriptTranslatorProcessor):
|
||||
"""Translate transcript text using MarianMT models."""
|
||||
|
||||
async def _translate(self, text: str) -> str | None:
|
||||
source_language = self.get_pref("audio:source_language", "en")
|
||||
target_language = self.get_pref("audio:target_language", "en")
|
||||
|
||||
languages = TranslationLanguages()
|
||||
assert languages.is_supported(target_language)
|
||||
|
||||
self.logger.debug(f"MarianMT translate {text=}")
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
translator_service.translate,
|
||||
text,
|
||||
source_language,
|
||||
target_language,
|
||||
)
|
||||
|
||||
if target_language in result["text"]:
|
||||
translation = result["text"][target_language]
|
||||
else:
|
||||
translation = None
|
||||
|
||||
self.logger.debug(f"Translation result: {text=}, {translation=}")
|
||||
return translation
|
||||
|
||||
|
||||
TranscriptTranslatorAutoProcessor.register(
|
||||
"marian", TranscriptTranslatorMarianProcessor
|
||||
)
|
||||
@@ -10,7 +10,6 @@ from dataclasses import dataclass
|
||||
from typing import Literal, Union, assert_never
|
||||
|
||||
import celery
|
||||
from celery.result import AsyncResult
|
||||
from hatchet_sdk.clients.rest.exceptions import ApiException, NotFoundException
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
@@ -18,7 +17,6 @@ from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.transcripts import Transcript, transcripts_controller
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
@@ -40,6 +38,7 @@ class MultitrackProcessingConfig:
|
||||
track_keys: list[str]
|
||||
recording_id: NonEmptyString | None = None
|
||||
room_id: NonEmptyString | None = None
|
||||
source_platform: str = "daily"
|
||||
mode: Literal["multitrack"] = "multitrack"
|
||||
|
||||
|
||||
@@ -104,11 +103,8 @@ async def validate_transcript_for_processing(
|
||||
):
|
||||
return ValidationNotReady(detail="Recording is not ready for processing")
|
||||
|
||||
# Check Celery tasks
|
||||
# Check Celery tasks (multitrack still uses Celery for some paths)
|
||||
if task_is_scheduled_or_active(
|
||||
"reflector.pipelines.main_file_pipeline.task_pipeline_file_process",
|
||||
transcript_id=transcript.id,
|
||||
) or task_is_scheduled_or_active(
|
||||
"reflector.pipelines.main_multitrack_pipeline.task_pipeline_multitrack_process",
|
||||
transcript_id=transcript.id,
|
||||
):
|
||||
@@ -174,11 +170,8 @@ async def prepare_transcript_processing(validation: ValidationOk) -> PrepareResu
|
||||
|
||||
async def dispatch_transcript_processing(
|
||||
config: ProcessingConfig, force: bool = False
|
||||
) -> AsyncResult | None:
|
||||
"""Dispatch transcript processing to appropriate backend (Hatchet or Celery).
|
||||
|
||||
Returns AsyncResult for Celery tasks, None for Hatchet workflows.
|
||||
"""
|
||||
) -> None:
|
||||
"""Dispatch transcript processing to Hatchet workflow engine."""
|
||||
if isinstance(config, MultitrackProcessingConfig):
|
||||
# Multitrack processing always uses Hatchet (no Celery fallback)
|
||||
# First check if we can replay (outside transaction since it's read-only)
|
||||
@@ -256,6 +249,7 @@ async def dispatch_transcript_processing(
|
||||
"bucket_name": config.bucket_name,
|
||||
"transcript_id": config.transcript_id,
|
||||
"room_id": config.room_id,
|
||||
"source_platform": config.source_platform,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": config.transcript_id,
|
||||
@@ -273,7 +267,21 @@ async def dispatch_transcript_processing(
|
||||
return None
|
||||
|
||||
elif isinstance(config, FileProcessingConfig):
|
||||
return task_pipeline_file_process.delay(transcript_id=config.transcript_id)
|
||||
# File processing uses Hatchet workflow
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="FilePipeline",
|
||||
input_data={"transcript_id": config.transcript_id},
|
||||
additional_metadata={"transcript_id": config.transcript_id},
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript:
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
|
||||
logger.info("File pipeline dispatched via Hatchet", workflow_id=workflow_id)
|
||||
return None
|
||||
else:
|
||||
assert_never(config)
|
||||
|
||||
|
||||
@@ -40,14 +40,24 @@ class Settings(BaseSettings):
|
||||
# backends: silero, frames
|
||||
AUDIO_CHUNKER_BACKEND: str = "frames"
|
||||
|
||||
# HuggingFace token for gated models (pyannote diarization in --cpu mode)
|
||||
HF_TOKEN: str | None = None
|
||||
|
||||
# Audio Transcription
|
||||
# backends:
|
||||
# - whisper: in-process model loading (no HTTP, runs in same process)
|
||||
# - modal: HTTP API client (works with Modal.com OR self-hosted gpu/self_hosted/)
|
||||
TRANSCRIPT_BACKEND: str = "whisper"
|
||||
|
||||
# Whisper model sizes for local transcription
|
||||
# Options: "tiny", "base", "small", "medium", "large-v2"
|
||||
WHISPER_CHUNK_MODEL: str = "tiny"
|
||||
WHISPER_FILE_MODEL: str = "tiny"
|
||||
TRANSCRIPT_URL: str | None = None
|
||||
TRANSCRIPT_TIMEOUT: int = 90
|
||||
TRANSCRIPT_FILE_TIMEOUT: int = 600
|
||||
TRANSCRIPT_FILE_TIMEOUT: int = (
|
||||
540 # Below Hatchet TIMEOUT_HEAVY (600) to avoid timeout race
|
||||
)
|
||||
|
||||
# Audio Transcription: modal backend
|
||||
TRANSCRIPT_MODAL_API_KEY: str | None = None
|
||||
@@ -73,6 +83,9 @@ class Settings(BaseSettings):
|
||||
DAILYCO_STORAGE_AWS_BUCKET_NAME: str | None = None
|
||||
DAILYCO_STORAGE_AWS_REGION: str | None = None
|
||||
DAILYCO_STORAGE_AWS_ROLE_ARN: str | None = None
|
||||
# Worker credentials for reading/deleting from Daily's recording bucket
|
||||
DAILYCO_STORAGE_AWS_ACCESS_KEY_ID: str | None = None
|
||||
DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
|
||||
|
||||
# Translate into the target language
|
||||
TRANSLATION_BACKEND: str = "passthrough"
|
||||
@@ -97,7 +110,7 @@ class Settings(BaseSettings):
|
||||
)
|
||||
|
||||
# Diarization
|
||||
# backend: modal — HTTP API client (works with Modal.com OR self-hosted gpu/self_hosted/)
|
||||
# backends: modal — HTTP API client, pyannote — in-process pyannote.audio
|
||||
DIARIZATION_ENABLED: bool = True
|
||||
DIARIZATION_BACKEND: str = "modal"
|
||||
DIARIZATION_URL: str | None = None
|
||||
@@ -106,10 +119,22 @@ class Settings(BaseSettings):
|
||||
# Diarization: modal backend
|
||||
DIARIZATION_MODAL_API_KEY: str | None = None
|
||||
|
||||
# Audio Padding (Modal.com backend)
|
||||
# Audio Padding
|
||||
# backends:
|
||||
# - pyav: in-process PyAV padding (no HTTP, runs in same process)
|
||||
# - modal: HTTP API client (works with Modal.com OR self-hosted gpu/self_hosted/)
|
||||
PADDING_BACKEND: str = "pyav"
|
||||
PADDING_URL: str | None = None
|
||||
PADDING_MODAL_API_KEY: str | None = None
|
||||
|
||||
# Audio Mixdown
|
||||
# backends:
|
||||
# - pyav: in-process PyAV mixdown (no HTTP, runs in same process)
|
||||
# - modal: HTTP API client (works with Modal.com OR self-hosted gpu/self_hosted/)
|
||||
MIXDOWN_BACKEND: str = "pyav"
|
||||
MIXDOWN_URL: str | None = None
|
||||
MIXDOWN_MODAL_API_KEY: str | None = None
|
||||
|
||||
# Sentry
|
||||
SENTRY_DSN: str | None = None
|
||||
|
||||
@@ -163,6 +188,7 @@ class Settings(BaseSettings):
|
||||
)
|
||||
|
||||
# Daily.co integration
|
||||
DAILY_API_URL: str = "https://api.daily.co/v1"
|
||||
DAILY_API_KEY: str | None = None
|
||||
DAILY_WEBHOOK_SECRET: str | None = None
|
||||
DAILY_SUBDOMAIN: str | None = None
|
||||
@@ -176,6 +202,16 @@ class Settings(BaseSettings):
|
||||
ZULIP_REALM: str | None = None
|
||||
ZULIP_API_KEY: str | None = None
|
||||
ZULIP_BOT_EMAIL: str | None = None
|
||||
ZULIP_DAG_STREAM: str | None = None
|
||||
ZULIP_DAG_TOPIC: str | None = None
|
||||
|
||||
# Email / SMTP integration (for transcript email notifications)
|
||||
SMTP_HOST: str | None = None
|
||||
SMTP_PORT: int = 587
|
||||
SMTP_USERNAME: str | None = None
|
||||
SMTP_PASSWORD: str | None = None
|
||||
SMTP_FROM_EMAIL: str | None = None
|
||||
SMTP_USE_TLS: bool = True
|
||||
|
||||
# Hatchet workflow orchestration (always enabled for multitrack processing)
|
||||
HATCHET_CLIENT_TOKEN: str | None = None
|
||||
|
||||
@@ -17,6 +17,49 @@ def get_transcripts_storage() -> Storage:
|
||||
)
|
||||
|
||||
|
||||
def get_source_storage(platform: str) -> Storage:
|
||||
"""Get storage for reading/deleting source recording files from the platform's bucket.
|
||||
|
||||
Returns an AwsStorage configured with the platform's worker credentials
|
||||
(access keys), or falls back to get_transcripts_storage() when platform-specific
|
||||
credentials aren't configured (e.g., single-bucket setups).
|
||||
|
||||
Args:
|
||||
platform: Recording platform name ("daily", "whereby", or other).
|
||||
"""
|
||||
if platform == "daily":
|
||||
if (
|
||||
settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID
|
||||
and settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY
|
||||
and settings.DAILYCO_STORAGE_AWS_BUCKET_NAME
|
||||
):
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.DAILYCO_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.DAILYCO_STORAGE_AWS_REGION or "us-east-1",
|
||||
aws_access_key_id=settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
)
|
||||
|
||||
elif platform == "whereby":
|
||||
if (
|
||||
settings.WHEREBY_STORAGE_AWS_ACCESS_KEY_ID
|
||||
and settings.WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY
|
||||
and settings.WHEREBY_STORAGE_AWS_BUCKET_NAME
|
||||
):
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.WHEREBY_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.WHEREBY_STORAGE_AWS_REGION or "us-east-1",
|
||||
aws_access_key_id=settings.WHEREBY_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
)
|
||||
|
||||
return get_transcripts_storage()
|
||||
|
||||
|
||||
def get_whereby_storage() -> Storage:
|
||||
"""
|
||||
Get storage config for Whereby (for passing to Whereby API).
|
||||
@@ -47,6 +90,9 @@ def get_dailyco_storage() -> Storage:
|
||||
"""
|
||||
Get storage config for Daily.co (for passing to Daily API).
|
||||
|
||||
Uses role_arn only — access keys are excluded because they're for
|
||||
worker reads (get_source_storage), not for the Daily API.
|
||||
|
||||
Usage:
|
||||
daily_storage = get_dailyco_storage()
|
||||
daily_api.create_meeting(
|
||||
@@ -57,13 +103,15 @@ def get_dailyco_storage() -> Storage:
|
||||
|
||||
Do NOT use for our file operations - use get_transcripts_storage() instead.
|
||||
"""
|
||||
# Fail fast if platform-specific config missing
|
||||
if not settings.DAILYCO_STORAGE_AWS_BUCKET_NAME:
|
||||
raise ValueError(
|
||||
"DAILYCO_STORAGE_AWS_BUCKET_NAME required for Daily.co with AWS storage"
|
||||
)
|
||||
|
||||
return Storage.get_instance(
|
||||
name="aws",
|
||||
settings_prefix="DAILYCO_STORAGE_",
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.DAILYCO_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.DAILYCO_STORAGE_AWS_REGION or "us-east-1",
|
||||
aws_role_arn=settings.DAILYCO_STORAGE_AWS_ROLE_ARN,
|
||||
)
|
||||
|
||||
@@ -116,9 +116,12 @@ class Storage:
|
||||
expires_in: int = 3600,
|
||||
*,
|
||||
bucket: str | None = None,
|
||||
extra_params: dict | None = None,
|
||||
) -> str:
|
||||
"""Generate presigned URL. bucket: override instance default if provided."""
|
||||
return await self._get_file_url(filename, operation, expires_in, bucket=bucket)
|
||||
return await self._get_file_url(
|
||||
filename, operation, expires_in, bucket=bucket, extra_params=extra_params
|
||||
)
|
||||
|
||||
async def _get_file_url(
|
||||
self,
|
||||
@@ -127,6 +130,7 @@ class Storage:
|
||||
expires_in: int = 3600,
|
||||
*,
|
||||
bucket: str | None = None,
|
||||
extra_params: dict | None = None,
|
||||
) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@@ -170,16 +170,23 @@ class AwsStorage(Storage):
|
||||
expires_in: int = 3600,
|
||||
*,
|
||||
bucket: str | None = None,
|
||||
extra_params: dict | None = None,
|
||||
) -> str:
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
params = {}
|
||||
if extra_params:
|
||||
params.update(extra_params)
|
||||
# Always set Bucket/Key after extra_params to prevent overrides
|
||||
params["Bucket"] = actual_bucket
|
||||
params["Key"] = s3filename
|
||||
async with self.session.client(
|
||||
"s3", config=self.boto_config, endpoint_url=self._endpoint_url
|
||||
) as client:
|
||||
presigned_url = await client.generate_presigned_url(
|
||||
operation,
|
||||
Params={"Bucket": actual_bucket, "Key": s3filename},
|
||||
Params=params,
|
||||
ExpiresIn=expires_in,
|
||||
)
|
||||
|
||||
|
||||
257
server/reflector/tools/deleted_transcripts.py
Normal file
257
server/reflector/tools/deleted_transcripts.py
Normal file
@@ -0,0 +1,257 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
CLI tool for managing soft-deleted transcripts.
|
||||
|
||||
Usage:
|
||||
uv run python -m reflector.tools.deleted_transcripts list
|
||||
uv run python -m reflector.tools.deleted_transcripts files <transcript_id>
|
||||
uv run python -m reflector.tools.deleted_transcripts download <transcript_id> [--output-dir ./]
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
|
||||
import structlog
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.db.meetings import meetings_controller
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.transcripts import Transcript, transcripts
|
||||
from reflector.storage import get_source_storage, get_transcripts_storage
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
|
||||
async def list_deleted():
|
||||
"""List all soft-deleted transcripts."""
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
query = (
|
||||
transcripts.select()
|
||||
.where(transcripts.c.deleted_at.isnot(None))
|
||||
.order_by(transcripts.c.deleted_at.desc())
|
||||
)
|
||||
results = await database.fetch_all(query)
|
||||
|
||||
if not results:
|
||||
print("No deleted transcripts found.")
|
||||
return
|
||||
|
||||
print(
|
||||
f"{'ID':<40} {'Title':<40} {'Deleted At':<28} {'Recording ID':<40} {'Meeting ID'}"
|
||||
)
|
||||
print("-" * 180)
|
||||
for row in results:
|
||||
t = Transcript(**row)
|
||||
title = (t.title or "")[:38]
|
||||
deleted = t.deleted_at.isoformat() if t.deleted_at else ""
|
||||
print(
|
||||
f"{t.id:<40} {title:<40} {deleted:<28} {t.recording_id or '':<40} {t.meeting_id or ''}"
|
||||
)
|
||||
|
||||
print(f"\nTotal: {len(results)} deleted transcript(s)")
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
|
||||
async def list_files(transcript_id: str):
|
||||
"""List all S3 keys associated with a deleted transcript."""
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
query = transcripts.select().where(transcripts.c.id == transcript_id)
|
||||
result = await database.fetch_one(query)
|
||||
if not result:
|
||||
print(f"Transcript {transcript_id} not found.")
|
||||
return
|
||||
|
||||
t = Transcript(**result)
|
||||
if t.deleted_at is None:
|
||||
print(f"Transcript {transcript_id} is not deleted.")
|
||||
return
|
||||
|
||||
print(f"Transcript: {t.id}")
|
||||
print(f"Title: {t.title}")
|
||||
print(f"Deleted at: {t.deleted_at}")
|
||||
print()
|
||||
|
||||
files = []
|
||||
|
||||
# Transcript audio
|
||||
if t.audio_location == "storage" and not t.audio_deleted:
|
||||
files.append(("Transcript audio", t.storage_audio_path, None))
|
||||
|
||||
# Recording files
|
||||
if t.recording_id:
|
||||
recording = await recordings_controller.get_by_id(t.recording_id)
|
||||
if recording:
|
||||
if recording.object_key:
|
||||
files.append(
|
||||
(
|
||||
"Recording object_key",
|
||||
recording.object_key,
|
||||
recording.bucket_name,
|
||||
)
|
||||
)
|
||||
if recording.track_keys:
|
||||
for i, key in enumerate(recording.track_keys):
|
||||
files.append((f"Track {i}", key, recording.bucket_name))
|
||||
|
||||
# Cloud video
|
||||
if t.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(t.meeting_id)
|
||||
if meeting and meeting.daily_composed_video_s3_key:
|
||||
files.append(("Cloud video", meeting.daily_composed_video_s3_key, None))
|
||||
|
||||
if not files:
|
||||
print("No associated files found.")
|
||||
return
|
||||
|
||||
print(f"{'Type':<25} {'Bucket':<30} {'S3 Key'}")
|
||||
print("-" * 120)
|
||||
for label, key, bucket in files:
|
||||
print(f"{label:<25} {bucket or '(default)':<30} {key}")
|
||||
|
||||
# Generate presigned URLs
|
||||
print("\nPresigned URLs (valid for 1 hour):")
|
||||
print("-" * 120)
|
||||
storage = get_transcripts_storage()
|
||||
for label, key, bucket in files:
|
||||
try:
|
||||
url = await storage.get_file_url(key, bucket=bucket, expires_in=3600)
|
||||
print(f"{label}: {url}")
|
||||
except Exception as e:
|
||||
print(f"{label}: ERROR - {e}")
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
|
||||
async def download_files(transcript_id: str, output_dir: str):
|
||||
"""Download all files associated with a deleted transcript."""
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
query = transcripts.select().where(transcripts.c.id == transcript_id)
|
||||
result = await database.fetch_one(query)
|
||||
if not result:
|
||||
print(f"Transcript {transcript_id} not found.")
|
||||
return
|
||||
|
||||
t = Transcript(**result)
|
||||
if t.deleted_at is None:
|
||||
print(f"Transcript {transcript_id} is not deleted.")
|
||||
return
|
||||
|
||||
dest = os.path.join(output_dir, t.id)
|
||||
os.makedirs(dest, exist_ok=True)
|
||||
|
||||
storage = get_transcripts_storage()
|
||||
|
||||
# Download transcript audio
|
||||
if t.audio_location == "storage" and not t.audio_deleted:
|
||||
try:
|
||||
data = await storage.get_file(t.storage_audio_path)
|
||||
path = os.path.join(dest, "audio.mp3")
|
||||
with open(path, "wb") as f:
|
||||
f.write(data)
|
||||
print(f"Downloaded: {path}")
|
||||
except Exception as e:
|
||||
print(f"Failed to download audio: {e}")
|
||||
|
||||
# Download recording files
|
||||
if t.recording_id:
|
||||
recording = await recordings_controller.get_by_id(t.recording_id)
|
||||
if recording and recording.track_keys:
|
||||
tracks_dir = os.path.join(dest, "tracks")
|
||||
os.makedirs(tracks_dir, exist_ok=True)
|
||||
for i, key in enumerate(recording.track_keys):
|
||||
try:
|
||||
data = await storage.get_file(key, bucket=recording.bucket_name)
|
||||
filename = os.path.basename(key) or f"track_{i}"
|
||||
path = os.path.join(tracks_dir, filename)
|
||||
with open(path, "wb") as f:
|
||||
f.write(data)
|
||||
print(f"Downloaded: {path}")
|
||||
except Exception as e:
|
||||
print(f"Failed to download track {i}: {e}")
|
||||
|
||||
# Download cloud video
|
||||
if t.meeting_id:
|
||||
meeting = await meetings_controller.get_by_id(t.meeting_id)
|
||||
if meeting and meeting.daily_composed_video_s3_key:
|
||||
try:
|
||||
source_storage = get_source_storage("daily")
|
||||
data = await source_storage.get_file(
|
||||
meeting.daily_composed_video_s3_key
|
||||
)
|
||||
path = os.path.join(dest, "cloud_video.mp4")
|
||||
with open(path, "wb") as f:
|
||||
f.write(data)
|
||||
print(f"Downloaded: {path}")
|
||||
except Exception as e:
|
||||
print(f"Failed to download cloud video: {e}")
|
||||
|
||||
# Write metadata
|
||||
metadata = {
|
||||
"id": t.id,
|
||||
"title": t.title,
|
||||
"created_at": t.created_at.isoformat() if t.created_at else None,
|
||||
"deleted_at": t.deleted_at.isoformat() if t.deleted_at else None,
|
||||
"duration": t.duration,
|
||||
"source_language": t.source_language,
|
||||
"target_language": t.target_language,
|
||||
"short_summary": t.short_summary,
|
||||
"long_summary": t.long_summary,
|
||||
"topics": [topic.model_dump() for topic in t.topics] if t.topics else [],
|
||||
"participants": [p.model_dump() for p in t.participants]
|
||||
if t.participants
|
||||
else [],
|
||||
"action_items": t.action_items,
|
||||
"webvtt": t.webvtt,
|
||||
"recording_id": t.recording_id,
|
||||
"meeting_id": t.meeting_id,
|
||||
}
|
||||
path = os.path.join(dest, "metadata.json")
|
||||
with open(path, "w") as f:
|
||||
json.dump(metadata, f, indent=2, default=str)
|
||||
print(f"Downloaded: {path}")
|
||||
|
||||
print(f"\nAll files saved to: {dest}")
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Manage soft-deleted transcripts")
|
||||
subparsers = parser.add_subparsers(dest="command", required=True)
|
||||
|
||||
subparsers.add_parser("list", help="List all deleted transcripts")
|
||||
|
||||
files_parser = subparsers.add_parser(
|
||||
"files", help="List S3 keys for a deleted transcript"
|
||||
)
|
||||
files_parser.add_argument("transcript_id", help="Transcript ID")
|
||||
|
||||
download_parser = subparsers.add_parser(
|
||||
"download", help="Download files for a deleted transcript"
|
||||
)
|
||||
download_parser.add_argument("transcript_id", help="Transcript ID")
|
||||
download_parser.add_argument(
|
||||
"--output-dir", default=".", help="Output directory (default: .)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.command == "list":
|
||||
asyncio.run(list_deleted())
|
||||
elif args.command == "files":
|
||||
asyncio.run(list_files(args.transcript_id))
|
||||
elif args.command == "download":
|
||||
asyncio.run(download_files(args.transcript_id, args.output_dir))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -7,7 +7,6 @@ import asyncio
|
||||
import json
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal, Tuple
|
||||
from urllib.parse import unquote, urlparse
|
||||
@@ -15,10 +14,8 @@ from urllib.parse import unquote, urlparse
|
||||
from botocore.exceptions import BotoCoreError, ClientError, NoCredentialsError
|
||||
|
||||
from reflector.db.transcripts import SourceKind, TranscriptTopic, transcripts_controller
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines.main_file_pipeline import (
|
||||
task_pipeline_file_process as task_pipeline_file_process,
|
||||
)
|
||||
from reflector.pipelines.main_live_pipeline import pipeline_post as live_pipeline_post
|
||||
from reflector.pipelines.main_live_pipeline import (
|
||||
pipeline_process as live_pipeline_process,
|
||||
@@ -237,29 +234,22 @@ async def process_live_pipeline(
|
||||
# assert documented behaviour: after process, the pipeline isn't ended. this is the reason of calling pipeline_post
|
||||
assert pre_final_transcript.status != "ended"
|
||||
|
||||
# at this point, diarization is running but we have no access to it. run diarization in parallel - one will hopefully win after polling
|
||||
result = live_pipeline_post(transcript_id=transcript_id)
|
||||
|
||||
# result.ready() blocks even without await; it mutates result also
|
||||
while not result.ready():
|
||||
print(f"Status: {result.state}")
|
||||
time.sleep(2)
|
||||
# Trigger post-processing via Hatchet (fire-and-forget)
|
||||
await live_pipeline_post(transcript_id=transcript_id)
|
||||
print("Live post-processing pipeline triggered via Hatchet", file=sys.stderr)
|
||||
|
||||
|
||||
async def process_file_pipeline(
|
||||
transcript_id: TranscriptId,
|
||||
):
|
||||
"""Process audio/video file using the optimized file pipeline"""
|
||||
"""Process audio/video file using the optimized file pipeline via Hatchet"""
|
||||
|
||||
# task_pipeline_file_process is a Celery task, need to use .delay() for async execution
|
||||
result = task_pipeline_file_process.delay(transcript_id=transcript_id)
|
||||
|
||||
# Wait for the Celery task to complete
|
||||
while not result.ready():
|
||||
print(f"File pipeline status: {result.state}", file=sys.stderr)
|
||||
time.sleep(2)
|
||||
|
||||
logger.info("File pipeline processing complete")
|
||||
await HatchetClientManager.start_workflow(
|
||||
"FilePipeline",
|
||||
{"transcript_id": str(transcript_id)},
|
||||
additional_metadata={"transcript_id": str(transcript_id)},
|
||||
)
|
||||
print("File pipeline triggered via Hatchet", file=sys.stderr)
|
||||
|
||||
|
||||
async def process(
|
||||
@@ -293,7 +283,16 @@ async def process(
|
||||
|
||||
await handler(transcript_id)
|
||||
|
||||
await extract_result_from_entry(transcript_id, output_path)
|
||||
if pipeline == "file":
|
||||
# File pipeline is async via Hatchet — results not available immediately.
|
||||
# Use reflector.tools.process_transcript with --sync for polling.
|
||||
print(
|
||||
f"File pipeline dispatched for transcript {transcript_id}. "
|
||||
f"Results will be available once the Hatchet workflow completes.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
else:
|
||||
await extract_result_from_entry(transcript_id, output_path)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user