mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-20 20:29:06 +00:00
Compare commits
35 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c8024484b3 | |||
| 28f87c09dc | |||
| dabf7251db | |||
|
|
b51b7aa917 | ||
|
|
a8983b4e7e | ||
|
|
fe47c46489 | ||
| a2bb6a27d6 | |||
| 7f0b728991 | |||
| 692895c859 | |||
|
|
d63040e2fd | ||
| 8d696aa775 | |||
| f6ca07505f | |||
|
|
3aef926203 | ||
|
|
0b2c82227d | ||
|
|
689c8075cc | ||
| 201671368a | |||
|
|
86d5e26224 | ||
| 9bec39808f | |||
| 86ac23868b | |||
|
|
c442a62787 | ||
|
|
8e438ca285 | ||
|
|
11731c9d38 | ||
|
|
4287f8b8ae | ||
| 3e47c2c057 | |||
|
|
616092a9bb | ||
| 18ed713369 | |||
| 2801ab3643 | |||
|
|
b20cad76e6 | ||
| 28a7258e45 | |||
| a9a4f32324 | |||
|
|
857e035562 | ||
| 34a3f5618c | |||
|
|
1473fd82dc | ||
| 372202b0e1 | |||
|
|
d20aac66c4 |
81
CHANGELOG.md
81
CHANGELOG.md
@@ -1,5 +1,86 @@
|
||||
# Changelog
|
||||
|
||||
## [0.22.3](https://github.com/Monadical-SAS/reflector/compare/v0.22.2...v0.22.3) (2025-12-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* align daily room settings ([#759](https://github.com/Monadical-SAS/reflector/issues/759)) ([28f87c0](https://github.com/Monadical-SAS/reflector/commit/28f87c09dc459846873d0dde65b03e3d7b2b9399))
|
||||
|
||||
## [0.22.2](https://github.com/Monadical-SAS/reflector/compare/v0.22.1...v0.22.2) (2025-12-02)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* daily auto refresh fix ([#755](https://github.com/Monadical-SAS/reflector/issues/755)) ([fe47c46](https://github.com/Monadical-SAS/reflector/commit/fe47c46489c5aa0cc538109f7559cc9accb35c01))
|
||||
* Skip mixdown for multitrack ([#760](https://github.com/Monadical-SAS/reflector/issues/760)) ([b51b7aa](https://github.com/Monadical-SAS/reflector/commit/b51b7aa9176c1a53ba57ad99f5e976c804a1e80c))
|
||||
|
||||
## [0.22.1](https://github.com/Monadical-SAS/reflector/compare/v0.22.0...v0.22.1) (2025-11-27)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* participants update from daily ([#749](https://github.com/Monadical-SAS/reflector/issues/749)) ([7f0b728](https://github.com/Monadical-SAS/reflector/commit/7f0b728991c1b9f9aae702c96297eae63b561ef5))
|
||||
|
||||
## [0.22.0](https://github.com/Monadical-SAS/reflector/compare/v0.21.0...v0.22.0) (2025-11-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Multitrack segmentation ([#747](https://github.com/Monadical-SAS/reflector/issues/747)) ([d63040e](https://github.com/Monadical-SAS/reflector/commit/d63040e2fdc07e7b272e85a39eb2411cd6a14798))
|
||||
|
||||
## [0.21.0](https://github.com/Monadical-SAS/reflector/compare/v0.20.0...v0.21.0) (2025-11-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add transcript format parameter to GET endpoint ([#709](https://github.com/Monadical-SAS/reflector/issues/709)) ([f6ca075](https://github.com/Monadical-SAS/reflector/commit/f6ca07505f34483b02270a2ef3bd809e9d2e1045))
|
||||
|
||||
## [0.20.0](https://github.com/Monadical-SAS/reflector/compare/v0.19.0...v0.20.0) (2025-11-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* link transcript participants ([#737](https://github.com/Monadical-SAS/reflector/issues/737)) ([9bec398](https://github.com/Monadical-SAS/reflector/commit/9bec39808fc6322612d8b87e922a6f7901fc01c1))
|
||||
* transcript restart script ([#742](https://github.com/Monadical-SAS/reflector/issues/742)) ([86d5e26](https://github.com/Monadical-SAS/reflector/commit/86d5e26224bb55a0f1cc785aeda52065bb92ee6f))
|
||||
|
||||
## [0.19.0](https://github.com/Monadical-SAS/reflector/compare/v0.18.0...v0.19.0) (2025-11-25)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* dailyco api module ([#725](https://github.com/Monadical-SAS/reflector/issues/725)) ([4287f8b](https://github.com/Monadical-SAS/reflector/commit/4287f8b8aeee60e51db7539f4dcbda5f6e696bd8))
|
||||
* dailyco poll ([#730](https://github.com/Monadical-SAS/reflector/issues/730)) ([8e438ca](https://github.com/Monadical-SAS/reflector/commit/8e438ca285152bd48fdc42767e706fb448d3525c))
|
||||
* multitrack cli ([#735](https://github.com/Monadical-SAS/reflector/issues/735)) ([11731c9](https://github.com/Monadical-SAS/reflector/commit/11731c9d38439b04e93b1c3afbd7090bad11a11f))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* default platform fix ([#736](https://github.com/Monadical-SAS/reflector/issues/736)) ([c442a62](https://github.com/Monadical-SAS/reflector/commit/c442a627873ca667656eeaefb63e54ab10b8d19e))
|
||||
* parakeet vad not getting the end timestamp ([#728](https://github.com/Monadical-SAS/reflector/issues/728)) ([18ed713](https://github.com/Monadical-SAS/reflector/commit/18ed7133693653ef4ddac6c659a8c14b320d1657))
|
||||
* start raw tracks recording ([#729](https://github.com/Monadical-SAS/reflector/issues/729)) ([3e47c2c](https://github.com/Monadical-SAS/reflector/commit/3e47c2c0573504858e0d2e1798b6ed31f16b4a5d))
|
||||
|
||||
## [0.18.0](https://github.com/Monadical-SAS/reflector/compare/v0.17.0...v0.18.0) (2025-11-14)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* daily QOL: participants dictionary ([#721](https://github.com/Monadical-SAS/reflector/issues/721)) ([b20cad7](https://github.com/Monadical-SAS/reflector/commit/b20cad76e69fb6a76405af299a005f1ddcf60eae))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* add proccessing page to file upload and reprocessing ([#650](https://github.com/Monadical-SAS/reflector/issues/650)) ([28a7258](https://github.com/Monadical-SAS/reflector/commit/28a7258e45317b78e60e6397be2bc503647eaace))
|
||||
* copy transcript ([#674](https://github.com/Monadical-SAS/reflector/issues/674)) ([a9a4f32](https://github.com/Monadical-SAS/reflector/commit/a9a4f32324f66c838e081eee42bb9502f38c1db1))
|
||||
|
||||
## [0.17.0](https://github.com/Monadical-SAS/reflector/compare/v0.16.0...v0.17.0) (2025-11-13)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add API key management UI ([#716](https://github.com/Monadical-SAS/reflector/issues/716)) ([372202b](https://github.com/Monadical-SAS/reflector/commit/372202b0e1a86823900b0aa77be1bfbc2893d8a1))
|
||||
* daily.co support as alternative to whereby ([#691](https://github.com/Monadical-SAS/reflector/issues/691)) ([1473fd8](https://github.com/Monadical-SAS/reflector/commit/1473fd82dc472c394cbaa2987212ad662a74bcac))
|
||||
|
||||
## [0.16.0](https://github.com/Monadical-SAS/reflector/compare/v0.15.0...v0.16.0) (2025-10-24)
|
||||
|
||||
|
||||
|
||||
@@ -168,6 +168,12 @@ You can manually process an audio file by calling the process tool:
|
||||
uv run python -m reflector.tools.process path/to/audio.wav
|
||||
```
|
||||
|
||||
## Reprocessing any transcription
|
||||
|
||||
```bash
|
||||
uv run -m reflector.tools.process_transcript 81ec38d1-9dd7-43d2-b3f8-51f4d34a07cd --sync
|
||||
```
|
||||
|
||||
## Build-time env variables
|
||||
|
||||
Next.js projects are more used to NEXT_PUBLIC_ prefixed buildtime vars. We don't have those for the reason we need to serve a ccustomizable prebuild docker container.
|
||||
|
||||
241
docs/transcript.md
Normal file
241
docs/transcript.md
Normal file
@@ -0,0 +1,241 @@
|
||||
# Transcript Formats
|
||||
|
||||
The Reflector API provides multiple output formats for transcript data through the `transcript_format` query parameter on the GET `/v1/transcripts/{id}` endpoint.
|
||||
|
||||
## Overview
|
||||
|
||||
When retrieving a transcript, you can specify the desired format using the `transcript_format` query parameter. The API supports four formats optimized for different use cases:
|
||||
|
||||
- **text** - Plain text with speaker names (default)
|
||||
- **text-timestamped** - Timestamped text with speaker names
|
||||
- **webvtt-named** - WebVTT subtitle format with participant names
|
||||
- **json** - Structured JSON segments with full metadata
|
||||
|
||||
All formats include participant information when available, resolving speaker IDs to actual names.
|
||||
|
||||
## Query Parameter Usage
|
||||
|
||||
```
|
||||
GET /v1/transcripts/{id}?transcript_format={format}
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
- `transcript_format` (optional): The desired output format
|
||||
- Type: `"text" | "text-timestamped" | "webvtt-named" | "json"`
|
||||
- Default: `"text"`
|
||||
|
||||
## Format Descriptions
|
||||
|
||||
### Text Format (`text`)
|
||||
|
||||
**Use case:** Simple, human-readable transcript for display or export.
|
||||
|
||||
**Format:** Speaker names followed by their dialogue, one line per segment.
|
||||
|
||||
**Example:**
|
||||
```
|
||||
John Smith: Hello everyone
|
||||
Jane Doe: Hi there
|
||||
John Smith: How are you today?
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=text
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "text",
|
||||
"transcript": "John Smith: Hello everyone\nJane Doe: Hi there\nJohn Smith: How are you today?",
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Text Timestamped Format (`text-timestamped`)
|
||||
|
||||
**Use case:** Transcript with timing information for navigation or reference.
|
||||
|
||||
**Format:** `[MM:SS]` timestamp prefix before each speaker and dialogue.
|
||||
|
||||
**Example:**
|
||||
```
|
||||
[00:00] John Smith: Hello everyone
|
||||
[00:05] Jane Doe: Hi there
|
||||
[00:12] John Smith: How are you today?
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=text-timestamped
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "text-timestamped",
|
||||
"transcript": "[00:00] John Smith: Hello everyone\n[00:05] Jane Doe: Hi there\n[00:12] John Smith: How are you today?",
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### WebVTT Named Format (`webvtt-named`)
|
||||
|
||||
**Use case:** Subtitle files for video players, accessibility tools, or video editing.
|
||||
|
||||
**Format:** Standard WebVTT subtitle format with voice tags using participant names.
|
||||
|
||||
**Example:**
|
||||
```
|
||||
WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:05.000
|
||||
<v John Smith>Hello everyone
|
||||
|
||||
00:00:05.000 --> 00:00:12.000
|
||||
<v Jane Doe>Hi there
|
||||
|
||||
00:00:12.000 --> 00:00:18.000
|
||||
<v John Smith>How are you today?
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=webvtt-named
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "webvtt-named",
|
||||
"transcript": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\n<v John Smith>Hello everyone\n\n...",
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### JSON Format (`json`)
|
||||
|
||||
**Use case:** Programmatic access with full timing and speaker metadata.
|
||||
|
||||
**Format:** Array of segment objects with speaker information, text content, and precise timing.
|
||||
|
||||
**Example:**
|
||||
```json
|
||||
[
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "Hello everyone",
|
||||
"start": 0.0,
|
||||
"end": 5.0
|
||||
},
|
||||
{
|
||||
"speaker": 1,
|
||||
"speaker_name": "Jane Doe",
|
||||
"text": "Hi there",
|
||||
"start": 5.0,
|
||||
"end": 12.0
|
||||
},
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "How are you today?",
|
||||
"start": 12.0,
|
||||
"end": 18.0
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Request:**
|
||||
```bash
|
||||
GET /v1/transcripts/{id}?transcript_format=json
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "transcript_123",
|
||||
"name": "Meeting Recording",
|
||||
"transcript_format": "json",
|
||||
"transcript": [
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "Hello everyone",
|
||||
"start": 0.0,
|
||||
"end": 5.0
|
||||
},
|
||||
{
|
||||
"speaker": 1,
|
||||
"speaker_name": "Jane Doe",
|
||||
"text": "Hi there",
|
||||
"start": 5.0,
|
||||
"end": 12.0
|
||||
}
|
||||
],
|
||||
"participants": [
|
||||
{"id": "p1", "speaker": 0, "name": "John Smith"},
|
||||
{"id": "p2", "speaker": 1, "name": "Jane Doe"}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Response Structure
|
||||
|
||||
All formats return the same base transcript metadata with an additional `transcript_format` field and format-specific `transcript` field:
|
||||
|
||||
### Common Fields
|
||||
|
||||
- `id`: Transcript identifier
|
||||
- `user_id`: Owner user ID (if authenticated)
|
||||
- `name`: Transcript name
|
||||
- `status`: Processing status
|
||||
- `locked`: Whether transcript is locked for editing
|
||||
- `duration`: Total duration in seconds
|
||||
- `title`: Auto-generated or custom title
|
||||
- `short_summary`: Brief summary
|
||||
- `long_summary`: Detailed summary
|
||||
- `created_at`: Creation timestamp
|
||||
- `share_mode`: Access control setting
|
||||
- `source_language`: Original audio language
|
||||
- `target_language`: Translation target language
|
||||
- `reviewed`: Whether transcript has been reviewed
|
||||
- `meeting_id`: Associated meeting ID (if applicable)
|
||||
- `source_kind`: Source type (live, file, room)
|
||||
- `room_id`: Associated room ID (if applicable)
|
||||
- `audio_deleted`: Whether audio has been deleted
|
||||
- `participants`: Array of participant objects with speaker mappings
|
||||
|
||||
### Format-Specific Fields
|
||||
|
||||
- `transcript_format`: The format identifier (discriminator field)
|
||||
- `transcript`: The formatted transcript content (string for text/webvtt formats, array for json format)
|
||||
|
||||
## Speaker Name Resolution
|
||||
|
||||
All formats resolve speaker IDs to participant names when available:
|
||||
|
||||
- If a participant exists for the speaker ID, their name is used
|
||||
- If no participant exists, a default name like "Speaker 0" is generated
|
||||
- Speaker IDs are integers (0, 1, 2, etc.) assigned during diarization
|
||||
@@ -81,9 +81,9 @@ image = (
|
||||
"cuda-python==12.8.0",
|
||||
"fastapi==0.115.12",
|
||||
"numpy<2",
|
||||
"librosa==0.10.1",
|
||||
"librosa==0.11.0",
|
||||
"requests",
|
||||
"silero-vad==5.1.0",
|
||||
"silero-vad==6.2.0",
|
||||
"torch",
|
||||
)
|
||||
.entrypoint([]) # silence chatty logs by container on start
|
||||
@@ -306,6 +306,7 @@ class TranscriberParakeetFile:
|
||||
) -> Generator[TimeSegment, None, None]:
|
||||
"""Generate speech segments using VAD with start/end sample indices"""
|
||||
vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE)
|
||||
audio_duration = len(audio_array) / float(SAMPLERATE)
|
||||
window_size = VAD_CONFIG["window_size"]
|
||||
start = None
|
||||
|
||||
@@ -332,6 +333,10 @@ class TranscriberParakeetFile:
|
||||
yield TimeSegment(start_time, end_time)
|
||||
start = None
|
||||
|
||||
if start is not None:
|
||||
start_time = start / float(SAMPLERATE)
|
||||
yield TimeSegment(start_time, audio_duration)
|
||||
|
||||
vad_iterator.reset_states()
|
||||
|
||||
def batch_speech_segments(
|
||||
|
||||
236
server/docs/video-platforms/README.md
Normal file
236
server/docs/video-platforms/README.md
Normal file
@@ -0,0 +1,236 @@
|
||||
# Reflector Architecture: Whereby + Daily.co Recording Storage
|
||||
|
||||
## System Overview
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "Actors"
|
||||
APP[Our App<br/>Reflector]
|
||||
WHEREBY[Whereby Service<br/>External]
|
||||
DAILY[Daily.co Service<br/>External]
|
||||
end
|
||||
|
||||
subgraph "AWS S3 Buckets"
|
||||
TRANSCRIPT_BUCKET[Transcript Bucket<br/>reflector-transcripts<br/>Output: Processed MP3s]
|
||||
WHEREBY_BUCKET[Whereby Bucket<br/>reflector-whereby-recordings<br/>Input: Raw MP4s]
|
||||
DAILY_BUCKET[Daily.co Bucket<br/>reflector-dailyco-recordings<br/>Input: Raw WebM tracks]
|
||||
end
|
||||
|
||||
subgraph "AWS Infrastructure"
|
||||
SQS[SQS Queue<br/>Whereby notifications]
|
||||
end
|
||||
|
||||
subgraph "Database"
|
||||
DB[(PostgreSQL<br/>Recordings, Transcripts, Meetings)]
|
||||
end
|
||||
|
||||
APP -->|Write processed| TRANSCRIPT_BUCKET
|
||||
APP -->|Read/Delete| WHEREBY_BUCKET
|
||||
APP -->|Read/Delete| DAILY_BUCKET
|
||||
APP -->|Poll| SQS
|
||||
APP -->|Store metadata| DB
|
||||
|
||||
WHEREBY -->|Write recordings| WHEREBY_BUCKET
|
||||
WHEREBY_BUCKET -->|S3 Event| SQS
|
||||
WHEREBY -->|Participant webhooks<br/>room.client.joined/left| APP
|
||||
|
||||
DAILY -->|Write recordings| DAILY_BUCKET
|
||||
DAILY -->|Recording webhook<br/>recording.ready-to-download| APP
|
||||
```
|
||||
|
||||
**Note on Webhook vs S3 Event for Recording Processing:**
|
||||
- **Whereby**: Uses S3 Events → SQS for recording availability (S3 as source of truth, no race conditions)
|
||||
- **Daily.co**: Uses webhooks for recording availability (more immediate, built-in reliability)
|
||||
- **Both**: Use webhooks for participant tracking (real-time updates)
|
||||
|
||||
## Credentials & Permissions
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph "Master Credentials"
|
||||
MASTER[TRANSCRIPT_STORAGE_AWS_*<br/>Access Key ID + Secret]
|
||||
end
|
||||
|
||||
subgraph "Whereby Upload Credentials"
|
||||
WHEREBY_CREDS[AWS_WHEREBY_ACCESS_KEY_*<br/>Access Key ID + Secret]
|
||||
end
|
||||
|
||||
subgraph "Daily.co Upload Role"
|
||||
DAILY_ROLE[DAILY_STORAGE_AWS_ROLE_ARN<br/>IAM Role ARN]
|
||||
end
|
||||
|
||||
subgraph "Our App Uses"
|
||||
MASTER -->|Read/Write/Delete| TRANSCRIPT_BUCKET[Transcript Bucket]
|
||||
MASTER -->|Read/Delete| WHEREBY_BUCKET[Whereby Bucket]
|
||||
MASTER -->|Read/Delete| DAILY_BUCKET[Daily.co Bucket]
|
||||
MASTER -->|Poll/Delete| SQS[SQS Queue]
|
||||
end
|
||||
|
||||
subgraph "We Give To Services"
|
||||
WHEREBY_CREDS -->|Passed in API call| WHEREBY_SERVICE[Whereby Service]
|
||||
WHEREBY_SERVICE -->|Write Only| WHEREBY_BUCKET
|
||||
|
||||
DAILY_ROLE -->|Passed in API call| DAILY_SERVICE[Daily.co Service]
|
||||
DAILY_SERVICE -->|Assume Role| DAILY_ROLE
|
||||
DAILY_SERVICE -->|Write Only| DAILY_BUCKET
|
||||
end
|
||||
```
|
||||
|
||||
# Video Platform Recording Integration
|
||||
|
||||
This document explains how Reflector receives and identifies multitrack audio recordings from different video platforms.
|
||||
|
||||
## Platform Comparison
|
||||
|
||||
| Platform | Delivery Method | Track Identification |
|
||||
|----------|----------------|---------------------|
|
||||
| **Daily.co** | Webhook | Explicit track list in payload |
|
||||
| **Whereby** | SQS (S3 notifications) | Single file per notification |
|
||||
|
||||
---
|
||||
|
||||
## Daily.co
|
||||
|
||||
**Note:** Primary discovery via polling (`poll_daily_recordings`), webhooks as backup.
|
||||
|
||||
Daily.co uses **webhooks** to notify Reflector when recordings are ready.
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Daily.co sends webhook** when recording is ready
|
||||
- Event type: `recording.ready-to-download`
|
||||
- Endpoint: `/v1/daily/webhook` (`reflector/views/daily.py:46-102`)
|
||||
|
||||
2. **Webhook payload explicitly includes track list**:
|
||||
```json
|
||||
{
|
||||
"recording_id": "7443ee0a-dab1-40eb-b316-33d6c0d5ff88",
|
||||
"room_name": "daily-20251020193458",
|
||||
"tracks": [
|
||||
{
|
||||
"type": "audio",
|
||||
"s3Key": "monadical/daily-20251020193458/1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922",
|
||||
"size": 831843
|
||||
},
|
||||
{
|
||||
"type": "audio",
|
||||
"s3Key": "monadical/daily-20251020193458/1760988935484-a37c35e3-6f8e-4274-a482-e9d0f102a732-cam-audio-1760988943823",
|
||||
"size": 408438
|
||||
},
|
||||
{
|
||||
"type": "video",
|
||||
"s3Key": "monadical/daily-20251020193458/...-video.webm",
|
||||
"size": 30000000
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
3. **System extracts audio tracks** (`daily.py:211`):
|
||||
```python
|
||||
track_keys = [t.s3Key for t in tracks if t.type == "audio"]
|
||||
```
|
||||
|
||||
4. **Triggers multitrack processing** (`daily.py:213-218`):
|
||||
```python
|
||||
process_multitrack_recording.delay(
|
||||
bucket_name=bucket_name, # reflector-dailyco-local
|
||||
room_name=room_name, # daily-20251020193458
|
||||
recording_id=recording_id, # 7443ee0a-dab1-40eb-b316-33d6c0d5ff88
|
||||
track_keys=track_keys # Only audio s3Keys
|
||||
)
|
||||
```
|
||||
|
||||
### Key Advantage: No Ambiguity
|
||||
|
||||
Even though multiple meetings may share the same S3 bucket/folder (`monadical/`), **there's no ambiguity** because:
|
||||
- Each webhook payload contains the exact `s3Key` list for that specific `recording_id`
|
||||
- No need to scan folders or guess which files belong together
|
||||
- Each track's s3Key includes the room timestamp subfolder (e.g., `daily-20251020193458/`)
|
||||
|
||||
The room name includes timestamp (`daily-20251020193458`) to keep recordings organized, but **the webhook's explicit track list is what prevents mixing files from different meetings**.
|
||||
|
||||
### Track Timeline Extraction
|
||||
|
||||
Daily.co provides timing information in two places:
|
||||
|
||||
**1. PyAV WebM Metadata (current approach)**:
|
||||
```python
|
||||
# Read from WebM container stream metadata
|
||||
stream.start_time = 8.130s # Meeting-relative timing
|
||||
```
|
||||
|
||||
**2. Filename Timestamps (alternative approach, commit 3bae9076)**:
|
||||
```
|
||||
Filename format: {recording_start_ts}-{uuid}-cam-audio-{track_start_ts}.webm
|
||||
Example: 1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922.webm
|
||||
|
||||
Parse timestamps:
|
||||
- recording_start_ts: 1760988935484 (Unix ms)
|
||||
- track_start_ts: 1760988935922 (Unix ms)
|
||||
- offset: (1760988935922 - 1760988935484) / 1000 = 0.438s
|
||||
```
|
||||
|
||||
**Time Difference (PyAV vs Filename)**:
|
||||
```
|
||||
Track 0:
|
||||
Filename offset: 438ms
|
||||
PyAV metadata: 229ms
|
||||
Difference: 209ms
|
||||
|
||||
Track 1:
|
||||
Filename offset: 8339ms
|
||||
PyAV metadata: 8130ms
|
||||
Difference: 209ms
|
||||
```
|
||||
|
||||
**Consistent 209ms delta** suggests network/encoding delay between file upload initiation (filename) and actual audio stream start (metadata).
|
||||
|
||||
**Current implementation uses PyAV metadata** because:
|
||||
- More accurate (represents when audio actually started)
|
||||
- Padding BEFORE transcription produces correct Whisper timestamps automatically
|
||||
- No manual offset adjustment needed during transcript merge
|
||||
|
||||
### Why Re-encoding During Padding
|
||||
|
||||
Padding coincidentally involves re-encoding, which is important for Daily.co + Whisper:
|
||||
|
||||
**Problem:** Daily.co skips frames in recordings when microphone is muted or paused
|
||||
- WebM containers have gaps where audio frames should be
|
||||
- Whisper doesn't understand these gaps and produces incorrect timestamps
|
||||
- Example: 5s of audio with 2s muted → file has frames only for 3s, Whisper thinks duration is 3s
|
||||
|
||||
**Solution:** Re-encoding via PyAV filter graph (`adelay` + `aresample`)
|
||||
- Restores missing frames as silence
|
||||
- Produces continuous audio stream without gaps
|
||||
- Whisper now sees correct duration and produces accurate timestamps
|
||||
|
||||
**Why combined with padding:**
|
||||
- Already re-encoding for padding (adding initial silence)
|
||||
- More performant to do both operations in single PyAV pipeline
|
||||
- Padded values needed for mixdown anyway (creating final MP3)
|
||||
|
||||
Implementation: `main_multitrack_pipeline.py:_apply_audio_padding_streaming()`
|
||||
|
||||
---
|
||||
|
||||
## Whereby (SQS-based)
|
||||
|
||||
Whereby uses **AWS SQS** (via S3 notifications) to notify Reflector when files are uploaded.
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Whereby uploads recording** to S3
|
||||
2. **S3 sends notification** to SQS queue (one notification per file)
|
||||
3. **Reflector polls SQS queue** (`worker/process.py:process_messages()`)
|
||||
4. **System processes single file** (`worker/process.py:process_recording()`)
|
||||
|
||||
### Key Difference from Daily.co
|
||||
|
||||
**Whereby (SQS):** System receives S3 notification "file X was created" - only knows about one file at a time, would need to scan folder to find related files
|
||||
|
||||
**Daily.co (Webhook):** Daily explicitly tells system which files belong together in the webhook payload
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -71,3 +71,30 @@ DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run
|
||||
|
||||
## Sentry DSN configuration
|
||||
#SENTRY_DSN=
|
||||
|
||||
## =======================================================
|
||||
## Video Platform Configuration
|
||||
## =======================================================
|
||||
|
||||
## Whereby
|
||||
#WHEREBY_API_KEY=your-whereby-api-key
|
||||
#WHEREBY_WEBHOOK_SECRET=your-whereby-webhook-secret
|
||||
#WHEREBY_STORAGE_AWS_ACCESS_KEY_ID=your-aws-key
|
||||
#WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret
|
||||
#AWS_PROCESS_RECORDING_QUEUE_URL=https://sqs.us-west-2.amazonaws.com/...
|
||||
|
||||
## Daily.co
|
||||
#DAILY_API_KEY=your-daily-api-key
|
||||
#DAILY_WEBHOOK_SECRET=your-daily-webhook-secret
|
||||
#DAILY_SUBDOMAIN=your-subdomain
|
||||
#DAILY_WEBHOOK_UUID= # Auto-populated by recreate_daily_webhook.py script
|
||||
#DAILYCO_STORAGE_AWS_ROLE_ARN=... # IAM role ARN for Daily.co S3 access
|
||||
#DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
||||
#DAILYCO_STORAGE_AWS_REGION=us-west-2
|
||||
|
||||
## Whereby (optional separate bucket)
|
||||
#WHEREBY_STORAGE_AWS_BUCKET_NAME=reflector-whereby
|
||||
#WHEREBY_STORAGE_AWS_REGION=us-east-1
|
||||
|
||||
## Platform Configuration
|
||||
#DEFAULT_VIDEO_PLATFORM=whereby # Default platform for new rooms
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
"""add_platform_support
|
||||
|
||||
Revision ID: 1e49625677e4
|
||||
Revises: 9e3f7b2a4c8e
|
||||
Create Date: 2025-10-08 13:17:29.943612
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "1e49625677e4"
|
||||
down_revision: Union[str, None] = "9e3f7b2a4c8e"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Add platform field with default 'whereby' for backward compatibility."""
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"platform",
|
||||
sa.String(),
|
||||
nullable=True,
|
||||
server_default=None,
|
||||
)
|
||||
)
|
||||
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"platform",
|
||||
sa.String(),
|
||||
nullable=False,
|
||||
server_default="whereby",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Remove platform field."""
|
||||
with op.batch_alter_table("meeting", schema=None) as batch_op:
|
||||
batch_op.drop_column("platform")
|
||||
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.drop_column("platform")
|
||||
@@ -0,0 +1,79 @@
|
||||
"""add daily participant session table with immutable left_at
|
||||
|
||||
Revision ID: 2b92a1b03caa
|
||||
Revises: f8294b31f022
|
||||
Create Date: 2025-11-13 20:29:30.486577
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "2b92a1b03caa"
|
||||
down_revision: Union[str, None] = "f8294b31f022"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Create table
|
||||
op.create_table(
|
||||
"daily_participant_session",
|
||||
sa.Column("id", sa.String(), nullable=False),
|
||||
sa.Column("meeting_id", sa.String(), nullable=False),
|
||||
sa.Column("room_id", sa.String(), nullable=False),
|
||||
sa.Column("session_id", sa.String(), nullable=False),
|
||||
sa.Column("user_id", sa.String(), nullable=True),
|
||||
sa.Column("user_name", sa.String(), nullable=False),
|
||||
sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("left_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.ForeignKeyConstraint(["meeting_id"], ["meeting.id"], ondelete="CASCADE"),
|
||||
sa.ForeignKeyConstraint(["room_id"], ["room.id"], ondelete="CASCADE"),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
with op.batch_alter_table("daily_participant_session", schema=None) as batch_op:
|
||||
batch_op.create_index(
|
||||
"idx_daily_session_meeting_left", ["meeting_id", "left_at"], unique=False
|
||||
)
|
||||
batch_op.create_index("idx_daily_session_room", ["room_id"], unique=False)
|
||||
|
||||
# Create trigger function to prevent left_at from being updated once set
|
||||
op.execute("""
|
||||
CREATE OR REPLACE FUNCTION prevent_left_at_update()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF OLD.left_at IS NOT NULL THEN
|
||||
RAISE EXCEPTION 'left_at is immutable once set';
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
""")
|
||||
|
||||
# Create trigger
|
||||
op.execute("""
|
||||
CREATE TRIGGER prevent_left_at_update_trigger
|
||||
BEFORE UPDATE ON daily_participant_session
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION prevent_left_at_update();
|
||||
""")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop trigger
|
||||
op.execute(
|
||||
"DROP TRIGGER IF EXISTS prevent_left_at_update_trigger ON daily_participant_session;"
|
||||
)
|
||||
|
||||
# Drop trigger function
|
||||
op.execute("DROP FUNCTION IF EXISTS prevent_left_at_update();")
|
||||
|
||||
# Drop indexes and table
|
||||
with op.batch_alter_table("daily_participant_session", schema=None) as batch_op:
|
||||
batch_op.drop_index("idx_daily_session_room")
|
||||
batch_op.drop_index("idx_daily_session_meeting_left")
|
||||
|
||||
op.drop_table("daily_participant_session")
|
||||
@@ -0,0 +1,30 @@
|
||||
"""Make room platform non-nullable with dynamic default
|
||||
|
||||
Revision ID: 5d6b9df9b045
|
||||
Revises: 2b92a1b03caa
|
||||
Create Date: 2025-11-21 13:22:25.756584
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "5d6b9df9b045"
|
||||
down_revision: Union[str, None] = "2b92a1b03caa"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.execute("UPDATE room SET platform = 'whereby' WHERE platform IS NULL")
|
||||
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.alter_column("platform", existing_type=sa.String(), nullable=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.alter_column("platform", existing_type=sa.String(), nullable=True)
|
||||
38
server/migrations/versions/bbafedfa510c_add_user_table.py
Normal file
38
server/migrations/versions/bbafedfa510c_add_user_table.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""add user table
|
||||
|
||||
Revision ID: bbafedfa510c
|
||||
Revises: 5d6b9df9b045
|
||||
Create Date: 2025-11-19 21:06:30.543262
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "bbafedfa510c"
|
||||
down_revision: Union[str, None] = "5d6b9df9b045"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.create_table(
|
||||
"user",
|
||||
sa.Column("id", sa.String(), nullable=False),
|
||||
sa.Column("email", sa.String(), nullable=False),
|
||||
sa.Column("authentik_uid", sa.String(), nullable=False),
|
||||
sa.Column("created_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.PrimaryKeyConstraint("id"),
|
||||
)
|
||||
|
||||
with op.batch_alter_table("user", schema=None) as batch_op:
|
||||
batch_op.create_index("idx_user_authentik_uid", ["authentik_uid"], unique=True)
|
||||
batch_op.create_index("idx_user_email", ["email"], unique=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_table("user")
|
||||
28
server/migrations/versions/f8294b31f022_add_track_keys.py
Normal file
28
server/migrations/versions/f8294b31f022_add_track_keys.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""add_track_keys
|
||||
|
||||
Revision ID: f8294b31f022
|
||||
Revises: 1e49625677e4
|
||||
Create Date: 2025-10-27 18:52:17.589167
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "f8294b31f022"
|
||||
down_revision: Union[str, None] = "1e49625677e4"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
with op.batch_alter_table("recording", schema=None) as batch_op:
|
||||
batch_op.add_column(sa.Column("track_keys", sa.JSON(), nullable=True))
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
with op.batch_alter_table("recording", schema=None) as batch_op:
|
||||
batch_op.drop_column("track_keys")
|
||||
@@ -12,6 +12,7 @@ from reflector.events import subscribers_shutdown, subscribers_startup
|
||||
from reflector.logger import logger
|
||||
from reflector.metrics import metrics_init
|
||||
from reflector.settings import settings
|
||||
from reflector.views.daily import router as daily_router
|
||||
from reflector.views.meetings import router as meetings_router
|
||||
from reflector.views.rooms import router as rooms_router
|
||||
from reflector.views.rtc_offer import router as rtc_offer_router
|
||||
@@ -96,6 +97,7 @@ app.include_router(user_api_keys_router, prefix="/v1")
|
||||
app.include_router(user_ws_router, prefix="/v1")
|
||||
app.include_router(zulip_router, prefix="/v1")
|
||||
app.include_router(whereby_router, prefix="/v1")
|
||||
app.include_router(daily_router, prefix="/v1/daily")
|
||||
add_pagination(app)
|
||||
|
||||
# prepare celery
|
||||
|
||||
@@ -6,8 +6,10 @@ from jose import JWTError, jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.db.user_api_keys import user_api_keys_controller
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
|
||||
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
||||
@@ -74,9 +76,21 @@ async def _authenticate_user(
|
||||
if jwt_token:
|
||||
try:
|
||||
payload = jwtauth.verify_token(jwt_token)
|
||||
sub = payload["sub"]
|
||||
authentik_uid = payload["sub"]
|
||||
email = payload["email"]
|
||||
user_infos.append(UserInfo(sub=sub, email=email))
|
||||
|
||||
user = await user_controller.get_by_authentik_uid(authentik_uid)
|
||||
if not user:
|
||||
logger.info(
|
||||
f"Creating new user on first login: {authentik_uid} ({email})"
|
||||
)
|
||||
user = await user_controller.create_or_update(
|
||||
id=generate_uuid4(),
|
||||
authentik_uid=authentik_uid,
|
||||
email=email,
|
||||
)
|
||||
|
||||
user_infos.append(UserInfo(sub=user.id, email=email))
|
||||
except JWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||
|
||||
6
server/reflector/dailyco_api/README.md
Normal file
6
server/reflector/dailyco_api/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
anything about Daily.co api interaction
|
||||
|
||||
- webhook event shapes
|
||||
- REST api client
|
||||
|
||||
No REST api client existing found in the wild; the official lib is about working with videocall as a bot
|
||||
108
server/reflector/dailyco_api/__init__.py
Normal file
108
server/reflector/dailyco_api/__init__.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""
|
||||
Daily.co API Module
|
||||
"""
|
||||
|
||||
# Client
|
||||
from .client import DailyApiClient, DailyApiError
|
||||
|
||||
# Request models
|
||||
from .requests import (
|
||||
CreateMeetingTokenRequest,
|
||||
CreateRoomRequest,
|
||||
CreateWebhookRequest,
|
||||
MeetingTokenProperties,
|
||||
RecordingsBucketConfig,
|
||||
RoomProperties,
|
||||
UpdateWebhookRequest,
|
||||
)
|
||||
|
||||
# Response models
|
||||
from .responses import (
|
||||
MeetingParticipant,
|
||||
MeetingParticipantsResponse,
|
||||
MeetingResponse,
|
||||
MeetingTokenResponse,
|
||||
RecordingResponse,
|
||||
RecordingS3Info,
|
||||
RoomPresenceParticipant,
|
||||
RoomPresenceResponse,
|
||||
RoomResponse,
|
||||
WebhookResponse,
|
||||
)
|
||||
|
||||
# Webhook utilities
|
||||
from .webhook_utils import (
|
||||
extract_room_name,
|
||||
parse_participant_joined,
|
||||
parse_participant_left,
|
||||
parse_recording_error,
|
||||
parse_recording_ready,
|
||||
parse_recording_started,
|
||||
parse_webhook_payload,
|
||||
verify_webhook_signature,
|
||||
)
|
||||
|
||||
# Webhook models
|
||||
from .webhooks import (
|
||||
DailyTrack,
|
||||
DailyWebhookEvent,
|
||||
DailyWebhookEventUnion,
|
||||
ParticipantJoinedEvent,
|
||||
ParticipantJoinedPayload,
|
||||
ParticipantLeftEvent,
|
||||
ParticipantLeftPayload,
|
||||
RecordingErrorEvent,
|
||||
RecordingErrorPayload,
|
||||
RecordingReadyEvent,
|
||||
RecordingReadyToDownloadPayload,
|
||||
RecordingStartedEvent,
|
||||
RecordingStartedPayload,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Client
|
||||
"DailyApiClient",
|
||||
"DailyApiError",
|
||||
# Requests
|
||||
"CreateRoomRequest",
|
||||
"RoomProperties",
|
||||
"RecordingsBucketConfig",
|
||||
"CreateMeetingTokenRequest",
|
||||
"MeetingTokenProperties",
|
||||
"CreateWebhookRequest",
|
||||
"UpdateWebhookRequest",
|
||||
# Responses
|
||||
"RoomResponse",
|
||||
"RoomPresenceResponse",
|
||||
"RoomPresenceParticipant",
|
||||
"MeetingParticipantsResponse",
|
||||
"MeetingParticipant",
|
||||
"MeetingResponse",
|
||||
"RecordingResponse",
|
||||
"RecordingS3Info",
|
||||
"MeetingTokenResponse",
|
||||
"WebhookResponse",
|
||||
# Webhooks
|
||||
"DailyWebhookEvent",
|
||||
"DailyWebhookEventUnion",
|
||||
"DailyTrack",
|
||||
"ParticipantJoinedEvent",
|
||||
"ParticipantJoinedPayload",
|
||||
"ParticipantLeftEvent",
|
||||
"ParticipantLeftPayload",
|
||||
"RecordingStartedEvent",
|
||||
"RecordingStartedPayload",
|
||||
"RecordingReadyEvent",
|
||||
"RecordingReadyToDownloadPayload",
|
||||
"RecordingErrorEvent",
|
||||
"RecordingErrorPayload",
|
||||
# Webhook utilities
|
||||
"verify_webhook_signature",
|
||||
"extract_room_name",
|
||||
"parse_webhook_payload",
|
||||
"parse_participant_joined",
|
||||
"parse_participant_left",
|
||||
"parse_recording_started",
|
||||
"parse_recording_ready",
|
||||
"parse_recording_error",
|
||||
]
|
||||
573
server/reflector/dailyco_api/client.py
Normal file
573
server/reflector/dailyco_api/client.py
Normal file
@@ -0,0 +1,573 @@
|
||||
"""
|
||||
Daily.co API Client
|
||||
|
||||
Complete async client for Daily.co REST API with Pydantic models.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api
|
||||
"""
|
||||
|
||||
from http import HTTPStatus
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
import structlog
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
from .requests import (
|
||||
CreateMeetingTokenRequest,
|
||||
CreateRoomRequest,
|
||||
CreateWebhookRequest,
|
||||
UpdateWebhookRequest,
|
||||
)
|
||||
from .responses import (
|
||||
MeetingParticipantsResponse,
|
||||
MeetingResponse,
|
||||
MeetingTokenResponse,
|
||||
RecordingResponse,
|
||||
RoomPresenceResponse,
|
||||
RoomResponse,
|
||||
WebhookResponse,
|
||||
)
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
|
||||
class DailyApiError(Exception):
|
||||
"""Daily.co API error with full request/response context."""
|
||||
|
||||
def __init__(self, operation: str, response: httpx.Response):
|
||||
self.operation = operation
|
||||
self.response = response
|
||||
self.status_code = response.status_code
|
||||
self.response_body = response.text
|
||||
self.url = str(response.url)
|
||||
self.request_body = (
|
||||
response.request.content.decode() if response.request.content else None
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
f"Daily.co API error: {operation} failed with status {self.status_code}"
|
||||
)
|
||||
|
||||
|
||||
class DailyApiClient:
|
||||
"""
|
||||
Complete async client for Daily.co REST API.
|
||||
|
||||
Usage:
|
||||
# Direct usage
|
||||
client = DailyApiClient(api_key="your_api_key")
|
||||
room = await client.create_room(CreateRoomRequest(name="my-room"))
|
||||
await client.close() # Clean up when done
|
||||
|
||||
# Context manager (recommended)
|
||||
async with DailyApiClient(api_key="your_api_key") as client:
|
||||
room = await client.create_room(CreateRoomRequest(name="my-room"))
|
||||
"""
|
||||
|
||||
BASE_URL = "https://api.daily.co/v1"
|
||||
DEFAULT_TIMEOUT = 10.0
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: NonEmptyString,
|
||||
webhook_secret: NonEmptyString | None = None,
|
||||
timeout: float = DEFAULT_TIMEOUT,
|
||||
base_url: NonEmptyString | None = None,
|
||||
):
|
||||
"""
|
||||
Initialize Daily.co API client.
|
||||
|
||||
Args:
|
||||
api_key: Daily.co API key (Bearer token)
|
||||
webhook_secret: Base64-encoded HMAC secret for webhook verification.
|
||||
Must match the 'hmac' value provided when creating webhooks.
|
||||
Generate with: base64.b64encode(os.urandom(32)).decode()
|
||||
timeout: Default request timeout in seconds
|
||||
base_url: Override base URL (for testing)
|
||||
"""
|
||||
self.api_key = api_key
|
||||
self.webhook_secret = webhook_secret
|
||||
self.timeout = timeout
|
||||
self.base_url = base_url or self.BASE_URL
|
||||
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
self._client: httpx.AsyncClient | None = None
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
|
||||
async def _get_client(self) -> httpx.AsyncClient:
|
||||
if self._client is None:
|
||||
self._client = httpx.AsyncClient(timeout=self.timeout)
|
||||
return self._client
|
||||
|
||||
async def close(self):
|
||||
if self._client is not None:
|
||||
await self._client.aclose()
|
||||
self._client = None
|
||||
|
||||
async def _handle_response(
|
||||
self, response: httpx.Response, operation: str
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Handle API response with error logging.
|
||||
|
||||
Args:
|
||||
response: HTTP response
|
||||
operation: Operation name for logging (e.g., "create_room")
|
||||
|
||||
Returns:
|
||||
Parsed JSON response
|
||||
|
||||
Raises:
|
||||
DailyApiError: If request failed with full context
|
||||
"""
|
||||
if response.status_code >= 400:
|
||||
logger.error(
|
||||
f"Daily.co API error: {operation}",
|
||||
status_code=response.status_code,
|
||||
response_body=response.text,
|
||||
request_body=response.request.content.decode()
|
||||
if response.request.content
|
||||
else None,
|
||||
url=str(response.url),
|
||||
)
|
||||
raise DailyApiError(operation, response)
|
||||
|
||||
return response.json()
|
||||
|
||||
# ============================================================================
|
||||
# ROOMS
|
||||
# ============================================================================
|
||||
|
||||
async def create_room(self, request: CreateRoomRequest) -> RoomResponse:
|
||||
"""
|
||||
Create a new Daily.co room.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
||||
|
||||
Args:
|
||||
request: Room creation request with name, privacy, and properties
|
||||
|
||||
Returns:
|
||||
Created room data including URL and ID
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.post(
|
||||
f"{self.base_url}/rooms",
|
||||
headers=self.headers,
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "create_room")
|
||||
return RoomResponse(**data)
|
||||
|
||||
async def get_room(self, room_name: NonEmptyString) -> RoomResponse:
|
||||
"""
|
||||
Get room configuration.
|
||||
|
||||
Args:
|
||||
room_name: Daily.co room name
|
||||
|
||||
Returns:
|
||||
Room configuration data
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/rooms/{room_name}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_room")
|
||||
return RoomResponse(**data)
|
||||
|
||||
async def get_room_presence(
|
||||
self, room_name: NonEmptyString
|
||||
) -> RoomPresenceResponse:
|
||||
"""
|
||||
Get current participants in a room (real-time presence).
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
|
||||
|
||||
Args:
|
||||
room_name: Daily.co room name
|
||||
|
||||
Returns:
|
||||
List of currently present participants with join time and duration
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/rooms/{room_name}/presence",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_room_presence")
|
||||
return RoomPresenceResponse(**data)
|
||||
|
||||
async def delete_room(self, room_name: NonEmptyString) -> None:
|
||||
"""
|
||||
Delete a room (idempotent - succeeds even if room doesn't exist).
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/delete-room
|
||||
|
||||
Args:
|
||||
room_name: Daily.co room name
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails (except 404)
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.delete(
|
||||
f"{self.base_url}/rooms/{room_name}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
# Idempotent delete - 404 means already deleted
|
||||
if response.status_code == HTTPStatus.NOT_FOUND:
|
||||
logger.debug("Room not found (already deleted)", room_name=room_name)
|
||||
return
|
||||
|
||||
await self._handle_response(response, "delete_room")
|
||||
|
||||
# ============================================================================
|
||||
# MEETINGS
|
||||
# ============================================================================
|
||||
|
||||
async def get_meeting(self, meeting_id: NonEmptyString) -> MeetingResponse:
|
||||
"""
|
||||
Get full meeting information including participants.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information
|
||||
|
||||
Args:
|
||||
meeting_id: Daily.co meeting/session ID
|
||||
|
||||
Returns:
|
||||
Meeting metadata including room, duration, participants, and status
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/meetings/{meeting_id}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_meeting")
|
||||
return MeetingResponse(**data)
|
||||
|
||||
async def get_meeting_participants(
|
||||
self,
|
||||
meeting_id: NonEmptyString,
|
||||
limit: int | None = None,
|
||||
joined_after: NonEmptyString | None = None,
|
||||
joined_before: NonEmptyString | None = None,
|
||||
) -> MeetingParticipantsResponse:
|
||||
"""
|
||||
Get historical participant data from a completed meeting (paginated).
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
|
||||
|
||||
Args:
|
||||
meeting_id: Daily.co meeting/session ID
|
||||
limit: Maximum number of participant records to return
|
||||
joined_after: Return participants who joined after this participant_id
|
||||
joined_before: Return participants who joined before this participant_id
|
||||
|
||||
Returns:
|
||||
List of participants with join times and duration
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails (404 when no more participants)
|
||||
|
||||
Note:
|
||||
For pagination, use joined_after with the last participant_id from previous response.
|
||||
Returns 404 when no more participants remain.
|
||||
"""
|
||||
params = {}
|
||||
if limit is not None:
|
||||
params["limit"] = limit
|
||||
if joined_after is not None:
|
||||
params["joined_after"] = joined_after
|
||||
if joined_before is not None:
|
||||
params["joined_before"] = joined_before
|
||||
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/meetings/{meeting_id}/participants",
|
||||
headers=self.headers,
|
||||
params=params,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_meeting_participants")
|
||||
return MeetingParticipantsResponse(**data)
|
||||
|
||||
# ============================================================================
|
||||
# RECORDINGS
|
||||
# ============================================================================
|
||||
|
||||
async def get_recording(self, recording_id: NonEmptyString) -> RecordingResponse:
|
||||
"""
|
||||
https://docs.daily.co/reference/rest-api/recordings/get-recording-information
|
||||
Get recording metadata and status.
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/recordings/{recording_id}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "get_recording")
|
||||
return RecordingResponse(**data)
|
||||
|
||||
async def list_recordings(
|
||||
self,
|
||||
room_name: NonEmptyString | None = None,
|
||||
starting_after: str | None = None,
|
||||
ending_before: str | None = None,
|
||||
limit: int = 100,
|
||||
) -> list[RecordingResponse]:
|
||||
"""
|
||||
List recordings with optional filters.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
||||
|
||||
Args:
|
||||
room_name: Filter by room name
|
||||
starting_after: Pagination cursor - recording ID to start after
|
||||
ending_before: Pagination cursor - recording ID to end before
|
||||
limit: Max results per page (default 100, max 100)
|
||||
|
||||
Note: starting_after/ending_before are pagination cursors (recording IDs),
|
||||
NOT time filters. API returns recordings in reverse chronological order.
|
||||
"""
|
||||
client = await self._get_client()
|
||||
|
||||
params = {"limit": limit}
|
||||
if room_name:
|
||||
params["room_name"] = room_name
|
||||
if starting_after:
|
||||
params["starting_after"] = starting_after
|
||||
if ending_before:
|
||||
params["ending_before"] = ending_before
|
||||
|
||||
response = await client.get(
|
||||
f"{self.base_url}/recordings",
|
||||
headers=self.headers,
|
||||
params=params,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "list_recordings")
|
||||
|
||||
if not isinstance(data, dict) or "data" not in data:
|
||||
logger.error(
|
||||
"Daily.co API returned unexpected format for list_recordings",
|
||||
data_type=type(data).__name__,
|
||||
data_keys=list(data.keys()) if isinstance(data, dict) else None,
|
||||
data_sample=str(data)[:500],
|
||||
room_name=room_name,
|
||||
operation="list_recordings",
|
||||
)
|
||||
raise httpx.HTTPStatusError(
|
||||
message=f"Unexpected response format from list_recordings: {type(data).__name__}",
|
||||
request=response.request,
|
||||
response=response,
|
||||
)
|
||||
|
||||
return [RecordingResponse(**r) for r in data["data"]]
|
||||
|
||||
# ============================================================================
|
||||
# MEETING TOKENS
|
||||
# ============================================================================
|
||||
|
||||
async def create_meeting_token(
|
||||
self, request: CreateMeetingTokenRequest
|
||||
) -> MeetingTokenResponse:
|
||||
"""
|
||||
Create a meeting token for participant authentication.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
||||
|
||||
Args:
|
||||
request: Token properties including room name, user_id, permissions
|
||||
|
||||
Returns:
|
||||
JWT meeting token
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.post(
|
||||
f"{self.base_url}/meeting-tokens",
|
||||
headers=self.headers,
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "create_meeting_token")
|
||||
return MeetingTokenResponse(**data)
|
||||
|
||||
# ============================================================================
|
||||
# WEBHOOKS
|
||||
# ============================================================================
|
||||
|
||||
async def list_webhooks(self) -> list[WebhookResponse]:
|
||||
"""
|
||||
List all configured webhooks for this account.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Returns:
|
||||
List of webhook configurations
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.get(
|
||||
f"{self.base_url}/webhooks",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "list_webhooks")
|
||||
|
||||
# Daily.co returns array directly (not paginated)
|
||||
if isinstance(data, list):
|
||||
return [WebhookResponse(**wh) for wh in data]
|
||||
|
||||
# Future-proof: handle potential pagination envelope
|
||||
if isinstance(data, dict) and "data" in data:
|
||||
return [WebhookResponse(**wh) for wh in data["data"]]
|
||||
|
||||
logger.warning("Unexpected webhook list response format", data=data)
|
||||
return []
|
||||
|
||||
async def create_webhook(self, request: CreateWebhookRequest) -> WebhookResponse:
|
||||
"""
|
||||
Create a new webhook subscription.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Args:
|
||||
request: Webhook configuration with URL, event types, and HMAC secret
|
||||
|
||||
Returns:
|
||||
Created webhook with UUID and state
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.post(
|
||||
f"{self.base_url}/webhooks",
|
||||
headers=self.headers,
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "create_webhook")
|
||||
return WebhookResponse(**data)
|
||||
|
||||
async def update_webhook(
|
||||
self, webhook_uuid: NonEmptyString, request: UpdateWebhookRequest
|
||||
) -> WebhookResponse:
|
||||
"""
|
||||
Update webhook configuration.
|
||||
|
||||
Note: Daily.co may not support PATCH for all fields.
|
||||
Common pattern is delete + recreate.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Args:
|
||||
webhook_uuid: Webhook UUID to update
|
||||
request: Updated webhook configuration
|
||||
|
||||
Returns:
|
||||
Updated webhook configuration
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If API request fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.patch(
|
||||
f"{self.base_url}/webhooks/{webhook_uuid}",
|
||||
headers=self.headers,
|
||||
json=request.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
data = await self._handle_response(response, "update_webhook")
|
||||
return WebhookResponse(**data)
|
||||
|
||||
async def delete_webhook(self, webhook_uuid: NonEmptyString) -> None:
|
||||
"""
|
||||
Delete a webhook.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Args:
|
||||
webhook_uuid: Webhook UUID to delete
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If webhook not found or deletion fails
|
||||
"""
|
||||
client = await self._get_client()
|
||||
response = await client.delete(
|
||||
f"{self.base_url}/webhooks/{webhook_uuid}",
|
||||
headers=self.headers,
|
||||
)
|
||||
|
||||
await self._handle_response(response, "delete_webhook")
|
||||
|
||||
# ============================================================================
|
||||
# HELPER METHODS
|
||||
# ============================================================================
|
||||
|
||||
async def find_webhook_by_url(self, url: NonEmptyString) -> WebhookResponse | None:
|
||||
"""
|
||||
Find a webhook by its URL.
|
||||
|
||||
Args:
|
||||
url: Webhook endpoint URL to search for
|
||||
|
||||
Returns:
|
||||
Webhook if found, None otherwise
|
||||
"""
|
||||
webhooks = await self.list_webhooks()
|
||||
for webhook in webhooks:
|
||||
if webhook.url == url:
|
||||
return webhook
|
||||
return None
|
||||
|
||||
async def find_webhooks_by_pattern(
|
||||
self, pattern: NonEmptyString
|
||||
) -> list[WebhookResponse]:
|
||||
"""
|
||||
Find webhooks matching a URL pattern (e.g., 'ngrok').
|
||||
|
||||
Args:
|
||||
pattern: String to match in webhook URLs
|
||||
|
||||
Returns:
|
||||
List of matching webhooks
|
||||
"""
|
||||
webhooks = await self.list_webhooks()
|
||||
return [wh for wh in webhooks if pattern in wh.url]
|
||||
162
server/reflector/dailyco_api/requests.py
Normal file
162
server/reflector/dailyco_api/requests.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""
|
||||
Daily.co API Request Models
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api
|
||||
"""
|
||||
|
||||
from typing import List, Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
class RecordingsBucketConfig(BaseModel):
|
||||
"""
|
||||
S3 bucket configuration for raw-tracks recordings.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
||||
"""
|
||||
|
||||
bucket_name: NonEmptyString = Field(description="S3 bucket name")
|
||||
bucket_region: NonEmptyString = Field(description="AWS region (e.g., 'us-east-1')")
|
||||
assume_role_arn: NonEmptyString = Field(
|
||||
description="AWS IAM role ARN that Daily.co will assume to write recordings"
|
||||
)
|
||||
allow_api_access: bool = Field(
|
||||
default=True,
|
||||
description="Whether to allow API access to recording metadata",
|
||||
)
|
||||
|
||||
|
||||
class RoomProperties(BaseModel):
|
||||
"""
|
||||
Room configuration properties.
|
||||
"""
|
||||
|
||||
enable_recording: Literal["cloud", "local", "raw-tracks"] | None = Field(
|
||||
default=None,
|
||||
description="Recording mode: 'cloud' for mixed, 'local' for local recording, 'raw-tracks' for multitrack, None to disable",
|
||||
)
|
||||
enable_chat: bool = Field(default=True, description="Enable in-meeting chat")
|
||||
enable_screenshare: bool = Field(default=True, description="Enable screen sharing")
|
||||
enable_knocking: bool = Field(
|
||||
default=False,
|
||||
description="Enable knocking for private rooms (allows participants to request access)",
|
||||
)
|
||||
start_video_off: bool = Field(
|
||||
default=False, description="Start with video off for all participants"
|
||||
)
|
||||
start_audio_off: bool = Field(
|
||||
default=False, description="Start with audio muted for all participants"
|
||||
)
|
||||
exp: int | None = Field(
|
||||
None, description="Room expiration timestamp (Unix epoch seconds)"
|
||||
)
|
||||
recordings_bucket: RecordingsBucketConfig | None = Field(
|
||||
None, description="S3 bucket configuration for raw-tracks recordings"
|
||||
)
|
||||
|
||||
|
||||
class CreateRoomRequest(BaseModel):
|
||||
"""
|
||||
Request to create a new Daily.co room.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
||||
"""
|
||||
|
||||
name: NonEmptyString = Field(description="Room name (must be unique within domain)")
|
||||
privacy: Literal["public", "private"] = Field(
|
||||
default="public", description="Room privacy setting"
|
||||
)
|
||||
properties: RoomProperties = Field(
|
||||
default_factory=RoomProperties, description="Room configuration properties"
|
||||
)
|
||||
|
||||
|
||||
class MeetingTokenProperties(BaseModel):
|
||||
"""
|
||||
Properties for meeting token creation.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
||||
"""
|
||||
|
||||
room_name: NonEmptyString = Field(description="Room name this token is valid for")
|
||||
user_id: NonEmptyString | None = Field(
|
||||
None, description="User identifier to associate with token"
|
||||
)
|
||||
is_owner: bool = Field(
|
||||
default=False, description="Grant owner privileges to token holder"
|
||||
)
|
||||
start_cloud_recording: bool = Field(
|
||||
default=False, description="Automatically start cloud recording on join"
|
||||
)
|
||||
enable_recording_ui: bool = Field(
|
||||
default=True, description="Show recording controls in UI"
|
||||
)
|
||||
eject_at_token_exp: bool = Field(
|
||||
default=False, description="Eject participant when token expires"
|
||||
)
|
||||
nbf: int | None = Field(
|
||||
None, description="Not-before timestamp (Unix epoch seconds)"
|
||||
)
|
||||
exp: int | None = Field(
|
||||
None, description="Expiration timestamp (Unix epoch seconds)"
|
||||
)
|
||||
|
||||
|
||||
class CreateMeetingTokenRequest(BaseModel):
|
||||
"""
|
||||
Request to create a meeting token for participant authentication.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
||||
"""
|
||||
|
||||
properties: MeetingTokenProperties = Field(description="Token properties")
|
||||
|
||||
|
||||
class CreateWebhookRequest(BaseModel):
|
||||
"""
|
||||
Request to create a webhook subscription.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
url: NonEmptyString = Field(description="Webhook endpoint URL (must be HTTPS)")
|
||||
eventTypes: List[
|
||||
Literal[
|
||||
"participant.joined",
|
||||
"participant.left",
|
||||
"recording.started",
|
||||
"recording.ready-to-download",
|
||||
"recording.error",
|
||||
]
|
||||
] = Field(
|
||||
description="Array of event types to subscribe to (only events we handle)"
|
||||
)
|
||||
hmac: NonEmptyString = Field(
|
||||
description="Base64-encoded HMAC secret for webhook signature verification"
|
||||
)
|
||||
basicAuth: NonEmptyString | None = Field(
|
||||
None, description="Optional basic auth credentials for webhook endpoint"
|
||||
)
|
||||
|
||||
|
||||
class UpdateWebhookRequest(BaseModel):
|
||||
"""
|
||||
Request to update an existing webhook.
|
||||
|
||||
Note: Daily.co API may not support PATCH for webhooks.
|
||||
Common pattern is to delete and recreate.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
url: NonEmptyString | None = Field(None, description="New webhook endpoint URL")
|
||||
eventTypes: List[NonEmptyString] | None = Field(
|
||||
None, description="New array of event types"
|
||||
)
|
||||
hmac: NonEmptyString | None = Field(None, description="New HMAC secret")
|
||||
basicAuth: NonEmptyString | None = Field(
|
||||
None, description="New basic auth credentials"
|
||||
)
|
||||
193
server/reflector/dailyco_api/responses.py
Normal file
193
server/reflector/dailyco_api/responses.py
Normal file
@@ -0,0 +1,193 @@
|
||||
"""
|
||||
Daily.co API Response Models
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.dailyco_api.webhooks import DailyTrack
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
# not documented in daily; we fill it according to observations
|
||||
RecordingStatus = Literal["in-progress", "finished"]
|
||||
|
||||
|
||||
class RoomResponse(BaseModel):
|
||||
"""
|
||||
Response from room creation or retrieval.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/create-room
|
||||
"""
|
||||
|
||||
id: NonEmptyString = Field(description="Unique room identifier (UUID)")
|
||||
name: NonEmptyString = Field(description="Room name used in URLs")
|
||||
api_created: bool = Field(description="Whether room was created via API")
|
||||
privacy: Literal["public", "private"] = Field(description="Room privacy setting")
|
||||
url: NonEmptyString = Field(description="Full room URL")
|
||||
created_at: NonEmptyString = Field(description="ISO 8601 creation timestamp")
|
||||
config: Dict[NonEmptyString, Any] = Field(
|
||||
default_factory=dict, description="Room configuration properties"
|
||||
)
|
||||
|
||||
|
||||
class RoomPresenceParticipant(BaseModel):
|
||||
"""
|
||||
Participant presence information in a room.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
|
||||
"""
|
||||
|
||||
room: NonEmptyString = Field(description="Room name")
|
||||
id: NonEmptyString = Field(description="Participant session ID")
|
||||
userId: NonEmptyString | None = Field(None, description="User ID if provided")
|
||||
userName: NonEmptyString | None = Field(None, description="User display name")
|
||||
joinTime: NonEmptyString = Field(description="ISO 8601 join timestamp")
|
||||
duration: int = Field(description="Duration in room (seconds)")
|
||||
|
||||
|
||||
class RoomPresenceResponse(BaseModel):
|
||||
"""
|
||||
Response from room presence endpoint.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence
|
||||
"""
|
||||
|
||||
total_count: int = Field(
|
||||
description="Total number of participants currently in room"
|
||||
)
|
||||
data: List[RoomPresenceParticipant] = Field(
|
||||
default_factory=list, description="Array of participant presence data"
|
||||
)
|
||||
|
||||
|
||||
class MeetingParticipant(BaseModel):
|
||||
"""
|
||||
Historical participant data from a meeting.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
|
||||
"""
|
||||
|
||||
user_id: NonEmptyString | None = Field(None, description="User identifier")
|
||||
participant_id: NonEmptyString = Field(description="Participant session identifier")
|
||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
||||
join_time: int = Field(description="Join timestamp (Unix epoch seconds)")
|
||||
duration: int = Field(description="Duration in meeting (seconds)")
|
||||
|
||||
|
||||
class MeetingParticipantsResponse(BaseModel):
|
||||
"""
|
||||
Response from meeting participants endpoint.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants
|
||||
"""
|
||||
|
||||
data: List[MeetingParticipant] = Field(
|
||||
default_factory=list, description="Array of participant data"
|
||||
)
|
||||
|
||||
|
||||
class MeetingResponse(BaseModel):
|
||||
"""
|
||||
Response from meeting information endpoint.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information
|
||||
"""
|
||||
|
||||
id: NonEmptyString = Field(description="Meeting session identifier (UUID)")
|
||||
room: NonEmptyString = Field(description="Room name where meeting occurred")
|
||||
start_time: int = Field(
|
||||
description="Meeting start Unix timestamp (~15s granularity)"
|
||||
)
|
||||
duration: int = Field(description="Total meeting duration in seconds")
|
||||
ongoing: bool = Field(description="Whether meeting is currently active")
|
||||
max_participants: int = Field(description="Peak concurrent participant count")
|
||||
participants: List[MeetingParticipant] = Field(
|
||||
default_factory=list, description="Array of participant session data"
|
||||
)
|
||||
|
||||
|
||||
class RecordingS3Info(BaseModel):
|
||||
"""
|
||||
S3 bucket information for a recording.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
||||
"""
|
||||
|
||||
bucket_name: NonEmptyString
|
||||
bucket_region: NonEmptyString
|
||||
endpoint: NonEmptyString | None = None
|
||||
|
||||
|
||||
class RecordingResponse(BaseModel):
|
||||
"""
|
||||
Response from recording retrieval endpoint.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
||||
"""
|
||||
|
||||
id: NonEmptyString = Field(description="Recording identifier")
|
||||
room_name: NonEmptyString = Field(description="Room where recording occurred")
|
||||
start_ts: int = Field(description="Recording start timestamp (Unix epoch seconds)")
|
||||
status: RecordingStatus = Field(
|
||||
description="Recording status ('in-progress' or 'finished')"
|
||||
)
|
||||
max_participants: int | None = Field(
|
||||
None, description="Maximum participants during recording (may be missing)"
|
||||
)
|
||||
duration: int = Field(description="Recording duration in seconds")
|
||||
share_token: NonEmptyString | None = Field(
|
||||
None, description="Token for sharing recording"
|
||||
)
|
||||
s3: RecordingS3Info | None = Field(None, description="S3 bucket information")
|
||||
tracks: list[DailyTrack] = Field(
|
||||
default_factory=list,
|
||||
description="Track list for raw-tracks recordings (always array, never null)",
|
||||
)
|
||||
# this is not a mistake but a deliberate Daily.co naming decision
|
||||
mtgSessionId: NonEmptyString | None = Field(
|
||||
None, description="Meeting session identifier (may be missing)"
|
||||
)
|
||||
|
||||
|
||||
class MeetingTokenResponse(BaseModel):
|
||||
"""
|
||||
Response from meeting token creation.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token
|
||||
"""
|
||||
|
||||
token: NonEmptyString = Field(
|
||||
description="JWT meeting token for participant authentication"
|
||||
)
|
||||
|
||||
|
||||
class WebhookResponse(BaseModel):
|
||||
"""
|
||||
Response from webhook creation or retrieval.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
uuid: NonEmptyString = Field(description="Unique webhook identifier")
|
||||
url: NonEmptyString = Field(description="Webhook endpoint URL")
|
||||
hmac: NonEmptyString | None = Field(
|
||||
None, description="Base64-encoded HMAC secret for signature verification"
|
||||
)
|
||||
basicAuth: NonEmptyString | None = Field(
|
||||
None, description="Basic auth credentials if configured"
|
||||
)
|
||||
eventTypes: List[NonEmptyString] = Field(
|
||||
default_factory=list,
|
||||
description="Array of event types (e.g., ['recording.started', 'participant.joined'])",
|
||||
)
|
||||
state: Literal["ACTIVE", "FAILED"] = Field(
|
||||
description="Webhook state - FAILED after 3+ consecutive failures"
|
||||
)
|
||||
failedCount: int = Field(default=0, description="Number of consecutive failures")
|
||||
lastMomentPushed: NonEmptyString | None = Field(
|
||||
None, description="ISO 8601 timestamp of last successful push"
|
||||
)
|
||||
domainId: NonEmptyString = Field(description="Daily.co domain/account identifier")
|
||||
createdAt: NonEmptyString = Field(description="ISO 8601 creation timestamp")
|
||||
updatedAt: NonEmptyString = Field(description="ISO 8601 last update timestamp")
|
||||
228
server/reflector/dailyco_api/webhook_utils.py
Normal file
228
server/reflector/dailyco_api/webhook_utils.py
Normal file
@@ -0,0 +1,228 @@
|
||||
"""
|
||||
Daily.co Webhook Utilities
|
||||
|
||||
Utilities for verifying and parsing Daily.co webhook events.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
import base64
|
||||
import hmac
|
||||
from hashlib import sha256
|
||||
|
||||
import structlog
|
||||
|
||||
from .webhooks import (
|
||||
DailyWebhookEvent,
|
||||
ParticipantJoinedPayload,
|
||||
ParticipantLeftPayload,
|
||||
RecordingErrorPayload,
|
||||
RecordingReadyToDownloadPayload,
|
||||
RecordingStartedPayload,
|
||||
)
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
|
||||
def verify_webhook_signature(
|
||||
body: bytes,
|
||||
signature: str,
|
||||
timestamp: str,
|
||||
webhook_secret: str,
|
||||
) -> bool:
|
||||
"""
|
||||
Verify Daily.co webhook signature using HMAC-SHA256.
|
||||
|
||||
Daily.co signature verification:
|
||||
1. Base64-decode the webhook secret
|
||||
2. Create signed content: timestamp + '.' + body
|
||||
3. Compute HMAC-SHA256(secret, signed_content)
|
||||
4. Base64-encode the result
|
||||
5. Compare with provided signature using constant-time comparison
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
Args:
|
||||
body: Raw request body bytes
|
||||
signature: X-Webhook-Signature header value
|
||||
timestamp: X-Webhook-Timestamp header value
|
||||
webhook_secret: Base64-encoded HMAC secret
|
||||
|
||||
Returns:
|
||||
True if signature is valid, False otherwise
|
||||
|
||||
Example:
|
||||
>>> body = b'{"version":"1.0.0","type":"participant.joined",...}'
|
||||
>>> signature = "abc123..."
|
||||
>>> timestamp = "1234567890"
|
||||
>>> secret = "your-base64-secret"
|
||||
>>> is_valid = verify_webhook_signature(body, signature, timestamp, secret)
|
||||
"""
|
||||
if not signature or not timestamp or not webhook_secret:
|
||||
logger.warning(
|
||||
"Missing required data for webhook verification",
|
||||
has_signature=bool(signature),
|
||||
has_timestamp=bool(timestamp),
|
||||
has_secret=bool(webhook_secret),
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
secret_bytes = base64.b64decode(webhook_secret)
|
||||
signed_content = timestamp.encode() + b"." + body
|
||||
expected = hmac.new(secret_bytes, signed_content, sha256).digest()
|
||||
expected_b64 = base64.b64encode(expected).decode()
|
||||
|
||||
# Constant-time comparison to prevent timing attacks
|
||||
return hmac.compare_digest(expected_b64, signature)
|
||||
|
||||
except (base64.binascii.Error, ValueError, TypeError, UnicodeDecodeError) as e:
|
||||
logger.error(
|
||||
"Webhook signature verification failed",
|
||||
error=str(e),
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def extract_room_name(event: DailyWebhookEvent) -> str | None:
|
||||
"""
|
||||
Extract room name from Daily.co webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Parsed webhook event
|
||||
|
||||
Returns:
|
||||
Room name if present and is a string, None otherwise
|
||||
|
||||
Example:
|
||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
||||
>>> room_name = extract_room_name(event)
|
||||
"""
|
||||
room = event.payload.get("room_name")
|
||||
# Ensure we return a string, not any falsy value that might be in payload
|
||||
return room if isinstance(room, str) else None
|
||||
|
||||
|
||||
def parse_participant_joined(event: DailyWebhookEvent) -> ParticipantJoinedPayload:
|
||||
"""
|
||||
Parse participant.joined webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "participant.joined"
|
||||
|
||||
Returns:
|
||||
Parsed participant joined payload
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
"""
|
||||
return ParticipantJoinedPayload(**event.payload)
|
||||
|
||||
|
||||
def parse_participant_left(event: DailyWebhookEvent) -> ParticipantLeftPayload:
|
||||
"""
|
||||
Parse participant.left webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "participant.left"
|
||||
|
||||
Returns:
|
||||
Parsed participant left payload
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
"""
|
||||
return ParticipantLeftPayload(**event.payload)
|
||||
|
||||
|
||||
def parse_recording_started(event: DailyWebhookEvent) -> RecordingStartedPayload:
|
||||
"""
|
||||
Parse recording.started webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "recording.started"
|
||||
|
||||
Returns:
|
||||
Parsed recording started payload
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
"""
|
||||
return RecordingStartedPayload(**event.payload)
|
||||
|
||||
|
||||
def parse_recording_ready(
|
||||
event: DailyWebhookEvent,
|
||||
) -> RecordingReadyToDownloadPayload:
|
||||
"""
|
||||
Parse recording.ready-to-download webhook event payload.
|
||||
|
||||
This event is sent when raw-tracks recordings are complete and uploaded to S3.
|
||||
The payload includes a 'tracks' array with individual audio/video files.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "recording.ready-to-download"
|
||||
|
||||
Returns:
|
||||
Parsed recording ready payload with tracks array
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
|
||||
Example:
|
||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
||||
>>> if event.type == "recording.ready-to-download":
|
||||
... payload = parse_recording_ready(event)
|
||||
... audio_tracks = [t for t in payload.tracks if t.type == "audio"]
|
||||
"""
|
||||
return RecordingReadyToDownloadPayload(**event.payload)
|
||||
|
||||
|
||||
def parse_recording_error(event: DailyWebhookEvent) -> RecordingErrorPayload:
|
||||
"""
|
||||
Parse recording.error webhook event payload.
|
||||
|
||||
Args:
|
||||
event: Webhook event with type "recording.error"
|
||||
|
||||
Returns:
|
||||
Parsed recording error payload
|
||||
|
||||
Raises:
|
||||
pydantic.ValidationError: If payload doesn't match expected schema
|
||||
"""
|
||||
return RecordingErrorPayload(**event.payload)
|
||||
|
||||
|
||||
WEBHOOK_PARSERS = {
|
||||
"participant.joined": parse_participant_joined,
|
||||
"participant.left": parse_participant_left,
|
||||
"recording.started": parse_recording_started,
|
||||
"recording.ready-to-download": parse_recording_ready,
|
||||
"recording.error": parse_recording_error,
|
||||
}
|
||||
|
||||
|
||||
def parse_webhook_payload(event: DailyWebhookEvent):
|
||||
"""
|
||||
Parse webhook event payload based on event type.
|
||||
|
||||
Args:
|
||||
event: Webhook event
|
||||
|
||||
Returns:
|
||||
Typed payload model based on event type, or raw dict if unknown
|
||||
|
||||
Example:
|
||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
||||
>>> payload = parse_webhook_payload(event)
|
||||
>>> if isinstance(payload, ParticipantJoinedPayload):
|
||||
... print(f"User {payload.user_name} joined")
|
||||
"""
|
||||
parser = WEBHOOK_PARSERS.get(event.type)
|
||||
if parser:
|
||||
return parser(event)
|
||||
else:
|
||||
logger.warning("Unknown webhook event type", event_type=event.type)
|
||||
return event.payload
|
||||
271
server/reflector/dailyco_api/webhooks.py
Normal file
271
server/reflector/dailyco_api/webhooks.py
Normal file
@@ -0,0 +1,271 @@
|
||||
"""
|
||||
Daily.co Webhook Event Models
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
from typing import Annotated, Any, Dict, Literal, Union
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
def normalize_timestamp_to_int(v):
|
||||
"""
|
||||
Normalize float timestamps to int by truncating decimal part.
|
||||
|
||||
Daily.co sometimes sends timestamps as floats (e.g., 1708972279.96).
|
||||
Pydantic expects int for fields typed as `int`.
|
||||
"""
|
||||
if v is None:
|
||||
return v
|
||||
if isinstance(v, float):
|
||||
return int(v)
|
||||
return v
|
||||
|
||||
|
||||
WebhookEventType = Literal[
|
||||
"participant.joined",
|
||||
"participant.left",
|
||||
"recording.started",
|
||||
"recording.ready-to-download",
|
||||
"recording.error",
|
||||
]
|
||||
|
||||
|
||||
class DailyTrack(BaseModel):
|
||||
"""
|
||||
Individual audio or video track from a multitrack recording.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
||||
"""
|
||||
|
||||
type: Literal["audio", "video"]
|
||||
s3Key: NonEmptyString = Field(description="S3 object key for the track file")
|
||||
size: int = Field(description="File size in bytes")
|
||||
|
||||
|
||||
class DailyWebhookEvent(BaseModel):
|
||||
"""
|
||||
Base structure for all Daily.co webhook events.
|
||||
All events share five common fields documented below.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
version: NonEmptyString = Field(
|
||||
description="Represents the version of the event. This uses semantic versioning to inform a consumer if the payload has introduced any breaking changes"
|
||||
)
|
||||
type: WebhookEventType = Field(
|
||||
description="Represents the type of the event described in the payload"
|
||||
)
|
||||
id: NonEmptyString = Field(
|
||||
description="An identifier representing this specific event"
|
||||
)
|
||||
payload: Dict[NonEmptyString, Any] = Field(
|
||||
description="An object representing the event, whose fields are described in the corresponding payload class"
|
||||
)
|
||||
event_ts: int = Field(
|
||||
description="Documenting when the webhook itself was sent. This timestamp is different than the time of the event the webhook describes. For example, a recording.started event will contain a start_ts timestamp of when the actual recording started, and a slightly later event_ts timestamp indicating when the webhook event was sent"
|
||||
)
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class ParticipantJoinedPayload(BaseModel):
|
||||
"""
|
||||
Payload for participant.joined webhook event.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-joined
|
||||
"""
|
||||
|
||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
||||
session_id: NonEmptyString = Field(description="Daily.co session identifier")
|
||||
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
|
||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
||||
joined_at: int = Field(description="Join timestamp in Unix epoch seconds")
|
||||
|
||||
_normalize_joined_at = field_validator("joined_at", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class ParticipantLeftPayload(BaseModel):
|
||||
"""
|
||||
Payload for participant.left webhook event.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-left
|
||||
"""
|
||||
|
||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
||||
session_id: NonEmptyString = Field(description="Daily.co session identifier")
|
||||
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
|
||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
||||
joined_at: int = Field(description="Join timestamp in Unix epoch seconds")
|
||||
duration: int | None = Field(
|
||||
None, description="Duration of participation in seconds"
|
||||
)
|
||||
|
||||
_normalize_joined_at = field_validator("joined_at", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingStartedPayload(BaseModel):
|
||||
"""
|
||||
Payload for recording.started webhook event.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-started
|
||||
"""
|
||||
|
||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
||||
recording_id: NonEmptyString = Field(description="Recording identifier")
|
||||
start_ts: int | None = Field(None, description="Recording start timestamp")
|
||||
|
||||
_normalize_start_ts = field_validator("start_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingReadyToDownloadPayload(BaseModel):
|
||||
"""
|
||||
Payload for recording.ready-to-download webhook event.
|
||||
This is sent when raw-tracks recordings are complete and uploaded to S3.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-ready-to-download
|
||||
"""
|
||||
|
||||
type: Literal["cloud", "raw-tracks"] = Field(
|
||||
description="The type of recording that was generated"
|
||||
)
|
||||
recording_id: NonEmptyString = Field(
|
||||
description="An ID identifying the recording that was generated"
|
||||
)
|
||||
room_name: NonEmptyString = Field(
|
||||
description="The name of the room where the recording was made"
|
||||
)
|
||||
start_ts: int = Field(
|
||||
description="The Unix epoch time in seconds representing when the recording started"
|
||||
)
|
||||
status: Literal["finished"] = Field(
|
||||
description="The status of the given recording (always 'finished' in ready-to-download webhook, see RecordingStatus in responses.py for full API statuses)"
|
||||
)
|
||||
max_participants: int = Field(
|
||||
description="The number of participants on the call that were recorded"
|
||||
)
|
||||
duration: int = Field(description="The duration in seconds of the call")
|
||||
s3_key: NonEmptyString = Field(
|
||||
description="The location of the recording in the provided S3 bucket"
|
||||
)
|
||||
share_token: NonEmptyString | None = Field(
|
||||
None, description="undocumented documented secret field"
|
||||
)
|
||||
tracks: list[DailyTrack] | None = Field(
|
||||
None,
|
||||
description="If the recording is a raw-tracks recording, a tracks field will be provided. If role permissions have been removed, the tracks field may be null",
|
||||
)
|
||||
|
||||
_normalize_start_ts = field_validator("start_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingErrorPayload(BaseModel):
|
||||
"""
|
||||
Payload for recording.error webhook event.
|
||||
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-error
|
||||
"""
|
||||
|
||||
action: Literal["clourd-recording-err", "cloud-recording-error"] = Field(
|
||||
description="A string describing the event that was emitted (both variants are documented)"
|
||||
)
|
||||
error_msg: NonEmptyString = Field(description="The error message returned")
|
||||
instance_id: NonEmptyString = Field(
|
||||
description="The recording instance ID that was passed into the start recording command"
|
||||
)
|
||||
room_name: NonEmptyString = Field(
|
||||
description="The name of the room where the recording was made"
|
||||
)
|
||||
timestamp: int = Field(
|
||||
description="The Unix epoch time in seconds representing when the error was emitted"
|
||||
)
|
||||
|
||||
_normalize_timestamp = field_validator("timestamp", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class ParticipantJoinedEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["participant.joined"]
|
||||
id: NonEmptyString
|
||||
payload: ParticipantJoinedPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class ParticipantLeftEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["participant.left"]
|
||||
id: NonEmptyString
|
||||
payload: ParticipantLeftPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingStartedEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["recording.started"]
|
||||
id: NonEmptyString
|
||||
payload: RecordingStartedPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingReadyEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["recording.ready-to-download"]
|
||||
id: NonEmptyString
|
||||
payload: RecordingReadyToDownloadPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingErrorEvent(BaseModel):
|
||||
version: NonEmptyString
|
||||
type: Literal["recording.error"]
|
||||
id: NonEmptyString
|
||||
payload: RecordingErrorPayload
|
||||
event_ts: int
|
||||
|
||||
_normalize_event_ts = field_validator("event_ts", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
DailyWebhookEventUnion = Annotated[
|
||||
Union[
|
||||
ParticipantJoinedEvent,
|
||||
ParticipantLeftEvent,
|
||||
RecordingStartedEvent,
|
||||
RecordingReadyEvent,
|
||||
RecordingErrorEvent,
|
||||
],
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
@@ -25,11 +25,13 @@ def get_database() -> databases.Database:
|
||||
|
||||
# import models
|
||||
import reflector.db.calendar_events # noqa
|
||||
import reflector.db.daily_participant_sessions # noqa
|
||||
import reflector.db.meetings # noqa
|
||||
import reflector.db.recordings # noqa
|
||||
import reflector.db.rooms # noqa
|
||||
import reflector.db.transcripts # noqa
|
||||
import reflector.db.user_api_keys # noqa
|
||||
import reflector.db.users # noqa
|
||||
|
||||
kwargs = {}
|
||||
if "postgres" not in settings.DATABASE_URL:
|
||||
|
||||
229
server/reflector/db/daily_participant_sessions.py
Normal file
229
server/reflector/db/daily_participant_sessions.py
Normal file
@@ -0,0 +1,229 @@
|
||||
"""Daily.co participant session tracking.
|
||||
|
||||
Stores webhook data for participant.joined and participant.left events to provide
|
||||
historical session information (Daily.co API only returns current participants).
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
import sqlalchemy as sa
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.dialects.postgresql import insert
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
daily_participant_sessions = sa.Table(
|
||||
"daily_participant_session",
|
||||
metadata,
|
||||
sa.Column("id", sa.String, primary_key=True),
|
||||
sa.Column(
|
||||
"meeting_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("meeting.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column(
|
||||
"room_id",
|
||||
sa.String,
|
||||
sa.ForeignKey("room.id", ondelete="CASCADE"),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column("session_id", sa.String, nullable=False),
|
||||
sa.Column("user_id", sa.String, nullable=True),
|
||||
sa.Column("user_name", sa.String, nullable=False),
|
||||
sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False),
|
||||
sa.Column("left_at", sa.DateTime(timezone=True), nullable=True),
|
||||
sa.Index("idx_daily_session_meeting_left", "meeting_id", "left_at"),
|
||||
sa.Index("idx_daily_session_room", "room_id"),
|
||||
)
|
||||
|
||||
|
||||
class DailyParticipantSession(BaseModel):
|
||||
"""Daily.co participant session record.
|
||||
|
||||
Tracks when a participant joined and left a meeting. Populated from webhooks:
|
||||
- participant.joined: Creates record with left_at=None
|
||||
- participant.left: Updates record with left_at
|
||||
|
||||
ID format: {meeting_id}:{user_id}:{joined_at_ms}
|
||||
- Ensures idempotency (duplicate webhooks don't create duplicates)
|
||||
- Allows same user to rejoin (different joined_at = different session)
|
||||
|
||||
Duration is calculated as: left_at - joined_at (not stored)
|
||||
"""
|
||||
|
||||
id: NonEmptyString
|
||||
meeting_id: NonEmptyString
|
||||
room_id: NonEmptyString
|
||||
session_id: NonEmptyString # Daily.co's session_id (identifies room session)
|
||||
user_id: NonEmptyString | None = None
|
||||
user_name: str
|
||||
joined_at: datetime
|
||||
left_at: datetime | None = None
|
||||
|
||||
|
||||
class DailyParticipantSessionController:
|
||||
"""Controller for Daily.co participant session persistence."""
|
||||
|
||||
async def get_by_id(self, id: str) -> DailyParticipantSession | None:
|
||||
"""Get a session by its ID."""
|
||||
query = daily_participant_sessions.select().where(
|
||||
daily_participant_sessions.c.id == id
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
return DailyParticipantSession(**result) if result else None
|
||||
|
||||
async def get_open_session(
|
||||
self, meeting_id: NonEmptyString, session_id: NonEmptyString
|
||||
) -> DailyParticipantSession | None:
|
||||
"""Get the open (not left) session for a user in a meeting."""
|
||||
query = daily_participant_sessions.select().where(
|
||||
sa.and_(
|
||||
daily_participant_sessions.c.meeting_id == meeting_id,
|
||||
daily_participant_sessions.c.session_id == session_id,
|
||||
daily_participant_sessions.c.left_at.is_(None),
|
||||
)
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
|
||||
if len(results) > 1:
|
||||
raise ValueError(
|
||||
f"Multiple open sessions for daily session {session_id} in meeting {meeting_id}: "
|
||||
f"found {len(results)} sessions"
|
||||
)
|
||||
|
||||
return DailyParticipantSession(**results[0]) if results else None
|
||||
|
||||
async def upsert_joined(self, session: DailyParticipantSession) -> None:
|
||||
"""Insert or update when participant.joined webhook arrives.
|
||||
|
||||
Idempotent: Duplicate webhooks with same ID are safely ignored.
|
||||
Out-of-order: If left webhook arrived first, preserves left_at.
|
||||
"""
|
||||
query = insert(daily_participant_sessions).values(**session.model_dump())
|
||||
query = query.on_conflict_do_update(
|
||||
index_elements=["id"],
|
||||
set_={"user_name": session.user_name},
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def upsert_left(self, session: DailyParticipantSession) -> None:
|
||||
"""Update session when participant.left webhook arrives.
|
||||
|
||||
Finds the open session for this user in this meeting and updates left_at.
|
||||
Works around Daily.co webhook timestamp inconsistency (joined_at differs by ~4ms between webhooks).
|
||||
|
||||
Handles three cases:
|
||||
1. Normal flow: open session exists → updates left_at
|
||||
2. Out-of-order: left arrives first → creates new record with left data
|
||||
3. Duplicate: left arrives again → idempotent (DB trigger prevents left_at modification)
|
||||
"""
|
||||
if session.left_at is None:
|
||||
raise ValueError("left_at is required for upsert_left")
|
||||
|
||||
if session.left_at <= session.joined_at:
|
||||
raise ValueError(
|
||||
f"left_at ({session.left_at}) must be after joined_at ({session.joined_at})"
|
||||
)
|
||||
|
||||
# Find existing open session (works around timestamp mismatch in webhooks)
|
||||
existing = await self.get_open_session(session.meeting_id, session.session_id)
|
||||
|
||||
if existing:
|
||||
# Update existing open session
|
||||
query = (
|
||||
daily_participant_sessions.update()
|
||||
.where(daily_participant_sessions.c.id == existing.id)
|
||||
.values(left_at=session.left_at)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
else:
|
||||
# Out-of-order or first webhook: insert new record
|
||||
query = insert(daily_participant_sessions).values(**session.model_dump())
|
||||
query = query.on_conflict_do_nothing(index_elements=["id"])
|
||||
await get_database().execute(query)
|
||||
|
||||
async def get_by_meeting(self, meeting_id: str) -> list[DailyParticipantSession]:
|
||||
"""Get all participant sessions for a meeting (active and ended)."""
|
||||
query = daily_participant_sessions.select().where(
|
||||
daily_participant_sessions.c.meeting_id == meeting_id
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [DailyParticipantSession(**result) for result in results]
|
||||
|
||||
async def get_active_by_meeting(
|
||||
self, meeting_id: str
|
||||
) -> list[DailyParticipantSession]:
|
||||
"""Get only active (not left) participant sessions for a meeting."""
|
||||
query = daily_participant_sessions.select().where(
|
||||
sa.and_(
|
||||
daily_participant_sessions.c.meeting_id == meeting_id,
|
||||
daily_participant_sessions.c.left_at.is_(None),
|
||||
)
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [DailyParticipantSession(**result) for result in results]
|
||||
|
||||
async def get_all_sessions_for_meeting(
|
||||
self, meeting_id: NonEmptyString
|
||||
) -> dict[NonEmptyString, DailyParticipantSession]:
|
||||
query = daily_participant_sessions.select().where(
|
||||
daily_participant_sessions.c.meeting_id == meeting_id
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
# TODO DailySessionId custom type
|
||||
return {row["session_id"]: DailyParticipantSession(**row) for row in results}
|
||||
|
||||
async def batch_upsert_sessions(
|
||||
self, sessions: list[DailyParticipantSession]
|
||||
) -> None:
|
||||
"""Upsert multiple sessions in single query.
|
||||
|
||||
Uses ON CONFLICT for idempotency. Updates user_name on conflict since they may change it during a meeting.
|
||||
|
||||
"""
|
||||
if not sessions:
|
||||
return
|
||||
|
||||
values = [session.model_dump() for session in sessions]
|
||||
query = insert(daily_participant_sessions).values(values)
|
||||
query = query.on_conflict_do_update(
|
||||
index_elements=["id"],
|
||||
set_={
|
||||
# Preserve existing left_at to prevent race conditions
|
||||
"left_at": sa.func.coalesce(
|
||||
daily_participant_sessions.c.left_at,
|
||||
query.excluded.left_at,
|
||||
),
|
||||
"user_name": query.excluded.user_name,
|
||||
},
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def batch_close_sessions(
|
||||
self, session_ids: list[NonEmptyString], left_at: datetime
|
||||
) -> None:
|
||||
"""Mark multiple sessions as left in single query.
|
||||
|
||||
Only updates sessions where left_at is NULL (protects already-closed sessions).
|
||||
|
||||
Left_at mismatch for existing sessions is ignored, assumed to be not important issue if ever happens.
|
||||
"""
|
||||
if not session_ids:
|
||||
return
|
||||
|
||||
query = (
|
||||
daily_participant_sessions.update()
|
||||
.where(
|
||||
sa.and_(
|
||||
daily_participant_sessions.c.id.in_(session_ids),
|
||||
daily_participant_sessions.c.left_at.is_(None),
|
||||
)
|
||||
)
|
||||
.values(left_at=left_at)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
|
||||
daily_participant_sessions_controller = DailyParticipantSessionController()
|
||||
@@ -7,7 +7,9 @@ from sqlalchemy.dialects.postgresql import JSONB
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.db.rooms import Room
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform
|
||||
from reflector.utils import generate_uuid4
|
||||
from reflector.utils.string import assert_equal
|
||||
|
||||
meetings = sa.Table(
|
||||
"meeting",
|
||||
@@ -55,6 +57,12 @@ meetings = sa.Table(
|
||||
),
|
||||
),
|
||||
sa.Column("calendar_metadata", JSONB),
|
||||
sa.Column(
|
||||
"platform",
|
||||
sa.String,
|
||||
nullable=False,
|
||||
server_default=assert_equal(WHEREBY_PLATFORM, "whereby"),
|
||||
),
|
||||
sa.Index("idx_meeting_room_id", "room_id"),
|
||||
sa.Index("idx_meeting_calendar_event", "calendar_event_id"),
|
||||
)
|
||||
@@ -94,13 +102,14 @@ class Meeting(BaseModel):
|
||||
is_locked: bool = False
|
||||
room_mode: Literal["normal", "group"] = "normal"
|
||||
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
||||
recording_trigger: Literal[
|
||||
recording_trigger: Literal[ # whereby-specific
|
||||
"none", "prompt", "automatic", "automatic-2nd-participant"
|
||||
] = "automatic-2nd-participant"
|
||||
num_clients: int = 0
|
||||
is_active: bool = True
|
||||
calendar_event_id: str | None = None
|
||||
calendar_metadata: dict[str, Any] | None = None
|
||||
platform: Platform = WHEREBY_PLATFORM
|
||||
|
||||
|
||||
class MeetingController:
|
||||
@@ -130,14 +139,19 @@ class MeetingController:
|
||||
recording_trigger=room.recording_trigger,
|
||||
calendar_event_id=calendar_event_id,
|
||||
calendar_metadata=calendar_metadata,
|
||||
platform=room.platform,
|
||||
)
|
||||
query = meetings.insert().values(**meeting.model_dump())
|
||||
await get_database().execute(query)
|
||||
return meeting
|
||||
|
||||
async def get_all_active(self) -> list[Meeting]:
|
||||
query = meetings.select().where(meetings.c.is_active)
|
||||
return await get_database().fetch_all(query)
|
||||
async def get_all_active(self, platform: str | None = None) -> list[Meeting]:
|
||||
conditions = [meetings.c.is_active]
|
||||
if platform is not None:
|
||||
conditions.append(meetings.c.platform == platform)
|
||||
query = meetings.select().where(sa.and_(*conditions))
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Meeting(**result) for result in results]
|
||||
|
||||
async def get_by_room_name(
|
||||
self,
|
||||
@@ -147,16 +161,14 @@ class MeetingController:
|
||||
Get a meeting by room name.
|
||||
For backward compatibility, returns the most recent meeting.
|
||||
"""
|
||||
end_date = getattr(meetings.c, "end_date")
|
||||
query = (
|
||||
meetings.select()
|
||||
.where(meetings.c.room_name == room_name)
|
||||
.order_by(end_date.desc())
|
||||
.order_by(meetings.c.end_date.desc())
|
||||
)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_active(self, room: Room, current_time: datetime) -> Meeting | None:
|
||||
@@ -179,7 +191,6 @@ class MeetingController:
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_all_active_for_room(
|
||||
@@ -219,17 +230,27 @@ class MeetingController:
|
||||
return None
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_by_id(self, meeting_id: str, **kwargs) -> Meeting | None:
|
||||
async def get_by_id(
|
||||
self, meeting_id: str, room: Room | None = None
|
||||
) -> Meeting | None:
|
||||
query = meetings.select().where(meetings.c.id == meeting_id)
|
||||
|
||||
if room:
|
||||
query = query.where(meetings.c.room_id == room.id)
|
||||
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
return Meeting(**result)
|
||||
|
||||
async def get_by_calendar_event(self, calendar_event_id: str) -> Meeting | None:
|
||||
async def get_by_calendar_event(
|
||||
self, calendar_event_id: str, room: Room
|
||||
) -> Meeting | None:
|
||||
query = meetings.select().where(
|
||||
meetings.c.calendar_event_id == calendar_event_id
|
||||
)
|
||||
if room:
|
||||
query = query.where(meetings.c.room_id == room.id)
|
||||
result = await get_database().fetch_one(query)
|
||||
if not result:
|
||||
return None
|
||||
@@ -239,6 +260,28 @@ class MeetingController:
|
||||
query = meetings.update().where(meetings.c.id == meeting_id).values(**kwargs)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def increment_num_clients(self, meeting_id: str) -> None:
|
||||
"""Atomically increment participant count."""
|
||||
query = (
|
||||
meetings.update()
|
||||
.where(meetings.c.id == meeting_id)
|
||||
.values(num_clients=meetings.c.num_clients + 1)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
async def decrement_num_clients(self, meeting_id: str) -> None:
|
||||
"""Atomically decrement participant count (min 0)."""
|
||||
query = (
|
||||
meetings.update()
|
||||
.where(meetings.c.id == meeting_id)
|
||||
.values(
|
||||
num_clients=sa.case(
|
||||
(meetings.c.num_clients > 0, meetings.c.num_clients - 1), else_=0
|
||||
)
|
||||
)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
|
||||
class MeetingConsentController:
|
||||
async def get_by_meeting_id(self, meeting_id: str) -> list[MeetingConsent]:
|
||||
|
||||
@@ -21,6 +21,7 @@ recordings = sa.Table(
|
||||
server_default="pending",
|
||||
),
|
||||
sa.Column("meeting_id", sa.String),
|
||||
sa.Column("track_keys", sa.JSON, nullable=True),
|
||||
sa.Index("idx_recording_meeting_id", "meeting_id"),
|
||||
)
|
||||
|
||||
@@ -28,10 +29,20 @@ recordings = sa.Table(
|
||||
class Recording(BaseModel):
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
bucket_name: str
|
||||
# for single-track
|
||||
object_key: str
|
||||
recorded_at: datetime
|
||||
status: Literal["pending", "processing", "completed", "failed"] = "pending"
|
||||
meeting_id: str | None = None
|
||||
# for multitrack reprocessing
|
||||
# track_keys can be empty list [] if recording finished but no audio was captured (silence/muted)
|
||||
# None means not a multitrack recording, [] means multitrack with no tracks
|
||||
track_keys: list[str] | None = None
|
||||
|
||||
@property
|
||||
def is_multitrack(self) -> bool:
|
||||
"""True if recording has separate audio tracks (1+ tracks counts as multitrack)."""
|
||||
return self.track_keys is not None and len(self.track_keys) > 0
|
||||
|
||||
|
||||
class RecordingController:
|
||||
@@ -40,12 +51,14 @@ class RecordingController:
|
||||
await get_database().execute(query)
|
||||
return recording
|
||||
|
||||
async def get_by_id(self, id: str) -> Recording:
|
||||
async def get_by_id(self, id: str) -> Recording | None:
|
||||
query = recordings.select().where(recordings.c.id == id)
|
||||
result = await get_database().fetch_one(query)
|
||||
return Recording(**result) if result else None
|
||||
|
||||
async def get_by_object_key(self, bucket_name: str, object_key: str) -> Recording:
|
||||
async def get_by_object_key(
|
||||
self, bucket_name: str, object_key: str
|
||||
) -> Recording | None:
|
||||
query = recordings.select().where(
|
||||
recordings.c.bucket_name == bucket_name,
|
||||
recordings.c.object_key == object_key,
|
||||
@@ -57,5 +70,14 @@ class RecordingController:
|
||||
query = recordings.delete().where(recordings.c.id == id)
|
||||
await get_database().execute(query)
|
||||
|
||||
# no check for existence
|
||||
async def get_by_ids(self, recording_ids: list[str]) -> list[Recording]:
|
||||
if not recording_ids:
|
||||
return []
|
||||
|
||||
query = recordings.select().where(recordings.c.id.in_(recording_ids))
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Recording(**row) for row in results]
|
||||
|
||||
|
||||
recordings_controller = RecordingController()
|
||||
|
||||
@@ -9,6 +9,8 @@ from pydantic import BaseModel, Field
|
||||
from sqlalchemy.sql import false, or_
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.schemas.platform import Platform
|
||||
from reflector.settings import settings
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
rooms = sqlalchemy.Table(
|
||||
@@ -50,6 +52,11 @@ rooms = sqlalchemy.Table(
|
||||
),
|
||||
sqlalchemy.Column("ics_last_sync", sqlalchemy.DateTime(timezone=True)),
|
||||
sqlalchemy.Column("ics_last_etag", sqlalchemy.Text),
|
||||
sqlalchemy.Column(
|
||||
"platform",
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
),
|
||||
sqlalchemy.Index("idx_room_is_shared", "is_shared"),
|
||||
sqlalchemy.Index("idx_room_ics_enabled", "ics_enabled"),
|
||||
)
|
||||
@@ -66,7 +73,7 @@ class Room(BaseModel):
|
||||
is_locked: bool = False
|
||||
room_mode: Literal["normal", "group"] = "normal"
|
||||
recording_type: Literal["none", "local", "cloud"] = "cloud"
|
||||
recording_trigger: Literal[
|
||||
recording_trigger: Literal[ # whereby-specific
|
||||
"none", "prompt", "automatic", "automatic-2nd-participant"
|
||||
] = "automatic-2nd-participant"
|
||||
is_shared: bool = False
|
||||
@@ -77,6 +84,7 @@ class Room(BaseModel):
|
||||
ics_enabled: bool = False
|
||||
ics_last_sync: datetime | None = None
|
||||
ics_last_etag: str | None = None
|
||||
platform: Platform = Field(default_factory=lambda: settings.DEFAULT_VIDEO_PLATFORM)
|
||||
|
||||
|
||||
class RoomController:
|
||||
@@ -130,6 +138,7 @@ class RoomController:
|
||||
ics_url: str | None = None,
|
||||
ics_fetch_interval: int = 300,
|
||||
ics_enabled: bool = False,
|
||||
platform: Platform = settings.DEFAULT_VIDEO_PLATFORM,
|
||||
):
|
||||
"""
|
||||
Add a new room
|
||||
@@ -137,23 +146,26 @@ class RoomController:
|
||||
if webhook_url and not webhook_secret:
|
||||
webhook_secret = secrets.token_urlsafe(32)
|
||||
|
||||
room = Room(
|
||||
name=name,
|
||||
user_id=user_id,
|
||||
zulip_auto_post=zulip_auto_post,
|
||||
zulip_stream=zulip_stream,
|
||||
zulip_topic=zulip_topic,
|
||||
is_locked=is_locked,
|
||||
room_mode=room_mode,
|
||||
recording_type=recording_type,
|
||||
recording_trigger=recording_trigger,
|
||||
is_shared=is_shared,
|
||||
webhook_url=webhook_url,
|
||||
webhook_secret=webhook_secret,
|
||||
ics_url=ics_url,
|
||||
ics_fetch_interval=ics_fetch_interval,
|
||||
ics_enabled=ics_enabled,
|
||||
)
|
||||
room_data = {
|
||||
"name": name,
|
||||
"user_id": user_id,
|
||||
"zulip_auto_post": zulip_auto_post,
|
||||
"zulip_stream": zulip_stream,
|
||||
"zulip_topic": zulip_topic,
|
||||
"is_locked": is_locked,
|
||||
"room_mode": room_mode,
|
||||
"recording_type": recording_type,
|
||||
"recording_trigger": recording_trigger,
|
||||
"is_shared": is_shared,
|
||||
"webhook_url": webhook_url,
|
||||
"webhook_secret": webhook_secret,
|
||||
"ics_url": ics_url,
|
||||
"ics_fetch_interval": ics_fetch_interval,
|
||||
"ics_enabled": ics_enabled,
|
||||
"platform": platform,
|
||||
}
|
||||
|
||||
room = Room(**room_data)
|
||||
query = rooms.insert().values(**room.model_dump())
|
||||
try:
|
||||
await get_database().execute(query)
|
||||
|
||||
@@ -21,7 +21,7 @@ from reflector.db.utils import is_postgresql
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.types import Word as ProcessorWord
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_recordings_storage, get_transcripts_storage
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.utils import generate_uuid4
|
||||
from reflector.utils.webvtt import topics_to_webvtt
|
||||
|
||||
@@ -186,6 +186,7 @@ class TranscriptParticipant(BaseModel):
|
||||
id: str = Field(default_factory=generate_uuid4)
|
||||
speaker: int | None
|
||||
name: str
|
||||
user_id: str | None = None
|
||||
|
||||
|
||||
class Transcript(BaseModel):
|
||||
@@ -623,7 +624,9 @@ class TranscriptController:
|
||||
)
|
||||
if recording:
|
||||
try:
|
||||
await get_recordings_storage().delete_file(recording.object_key)
|
||||
await get_transcripts_storage().delete_file(
|
||||
recording.object_key, bucket=recording.bucket_name
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to delete recording object from S3",
|
||||
@@ -725,11 +728,13 @@ class TranscriptController:
|
||||
"""
|
||||
Download audio from storage
|
||||
"""
|
||||
transcript.audio_mp3_filename.write_bytes(
|
||||
await get_transcripts_storage().get_file(
|
||||
transcript.storage_audio_path,
|
||||
)
|
||||
)
|
||||
storage = get_transcripts_storage()
|
||||
try:
|
||||
with open(transcript.audio_mp3_filename, "wb") as f:
|
||||
await storage.stream_to_fileobj(transcript.storage_audio_path, f)
|
||||
except Exception:
|
||||
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
||||
raise
|
||||
|
||||
async def upsert_participant(
|
||||
self,
|
||||
|
||||
@@ -84,7 +84,8 @@ class UserApiKeyController:
|
||||
(user_api_keys.c.id == key_id) & (user_api_keys.c.user_id == user_id)
|
||||
)
|
||||
result = await get_database().execute(query)
|
||||
return result > 0
|
||||
# asyncpg returns None for DELETE, consider it success if no exception
|
||||
return result is None or result > 0
|
||||
|
||||
|
||||
user_api_keys_controller = UserApiKeyController()
|
||||
|
||||
92
server/reflector/db/users.py
Normal file
92
server/reflector/db/users.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""User table for storing Authentik user information."""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import sqlalchemy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.db import get_database, metadata
|
||||
from reflector.utils import generate_uuid4
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
users = sqlalchemy.Table(
|
||||
"user",
|
||||
metadata,
|
||||
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
|
||||
sqlalchemy.Column("email", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("authentik_uid", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
||||
sqlalchemy.Column("updated_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
||||
sqlalchemy.Index("idx_user_authentik_uid", "authentik_uid", unique=True),
|
||||
sqlalchemy.Index("idx_user_email", "email", unique=False),
|
||||
)
|
||||
|
||||
|
||||
class User(BaseModel):
|
||||
id: NonEmptyString = Field(default_factory=generate_uuid4)
|
||||
email: NonEmptyString
|
||||
authentik_uid: NonEmptyString
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
|
||||
class UserController:
|
||||
@staticmethod
|
||||
async def get_by_id(user_id: NonEmptyString) -> User | None:
|
||||
query = users.select().where(users.c.id == user_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
return User(**result) if result else None
|
||||
|
||||
@staticmethod
|
||||
async def get_by_authentik_uid(authentik_uid: NonEmptyString) -> User | None:
|
||||
query = users.select().where(users.c.authentik_uid == authentik_uid)
|
||||
result = await get_database().fetch_one(query)
|
||||
return User(**result) if result else None
|
||||
|
||||
@staticmethod
|
||||
async def get_by_email(email: NonEmptyString) -> User | None:
|
||||
query = users.select().where(users.c.email == email)
|
||||
result = await get_database().fetch_one(query)
|
||||
return User(**result) if result else None
|
||||
|
||||
@staticmethod
|
||||
async def create_or_update(
|
||||
id: NonEmptyString, authentik_uid: NonEmptyString, email: NonEmptyString
|
||||
) -> User:
|
||||
existing = await UserController.get_by_authentik_uid(authentik_uid)
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
if existing:
|
||||
query = (
|
||||
users.update()
|
||||
.where(users.c.authentik_uid == authentik_uid)
|
||||
.values(email=email, updated_at=now)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
return User(
|
||||
id=existing.id,
|
||||
authentik_uid=authentik_uid,
|
||||
email=email,
|
||||
created_at=existing.created_at,
|
||||
updated_at=now,
|
||||
)
|
||||
else:
|
||||
user = User(
|
||||
id=id,
|
||||
authentik_uid=authentik_uid,
|
||||
email=email,
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
query = users.insert().values(**user.model_dump())
|
||||
await get_database().execute(query)
|
||||
return user
|
||||
|
||||
@staticmethod
|
||||
async def list_all() -> list[User]:
|
||||
query = users.select().order_by(users.c.created_at.desc())
|
||||
results = await get_database().fetch_all(query)
|
||||
return [User(**r) for r in results]
|
||||
|
||||
|
||||
user_controller = UserController()
|
||||
@@ -1,3 +1,4 @@
|
||||
import logging
|
||||
from typing import Type, TypeVar
|
||||
|
||||
from llama_index.core import Settings
|
||||
@@ -5,7 +6,7 @@ from llama_index.core.output_parsers import PydanticOutputParser
|
||||
from llama_index.core.program import LLMTextCompletionProgram
|
||||
from llama_index.core.response_synthesizers import TreeSummarize
|
||||
from llama_index.llms.openai_like import OpenAILike
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
|
||||
@@ -61,6 +62,8 @@ class LLM:
|
||||
tone_name: str | None = None,
|
||||
) -> T:
|
||||
"""Get structured output from LLM for non-function-calling models"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
summarizer = TreeSummarize(verbose=True)
|
||||
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
|
||||
|
||||
@@ -76,8 +79,25 @@ class LLM:
|
||||
"Please structure the above information in the following JSON format:"
|
||||
)
|
||||
|
||||
output = await program.acall(
|
||||
analysis=str(response), format_instructions=format_instructions
|
||||
)
|
||||
try:
|
||||
output = await program.acall(
|
||||
analysis=str(response), format_instructions=format_instructions
|
||||
)
|
||||
except ValidationError as e:
|
||||
# Extract the raw JSON from the error details
|
||||
errors = e.errors()
|
||||
if errors and "input" in errors[0]:
|
||||
raw_json = errors[0]["input"]
|
||||
logger.error(
|
||||
f"JSON validation failed for {output_cls.__name__}. "
|
||||
f"Full raw JSON output:\n{raw_json}\n"
|
||||
f"Validation errors: {errors}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"JSON validation failed for {output_cls.__name__}. "
|
||||
f"Validation errors: {errors}"
|
||||
)
|
||||
raise
|
||||
|
||||
return output
|
||||
|
||||
1
server/reflector/pipelines/__init__.py
Normal file
1
server/reflector/pipelines/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Pipeline modules for audio processing."""
|
||||
@@ -23,23 +23,18 @@ from reflector.db.transcripts import (
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines import topic_processing
|
||||
from reflector.pipelines.main_live_pipeline import (
|
||||
PipelineMainBase,
|
||||
broadcast_to_sockets,
|
||||
task_cleanup_consent,
|
||||
task_pipeline_post_to_zulip,
|
||||
)
|
||||
from reflector.processors import (
|
||||
AudioFileWriterProcessor,
|
||||
TranscriptFinalSummaryProcessor,
|
||||
TranscriptFinalTitleProcessor,
|
||||
TranscriptTopicDetectorProcessor,
|
||||
)
|
||||
from reflector.pipelines.transcription_helpers import transcribe_file_with_processor
|
||||
from reflector.processors import AudioFileWriterProcessor
|
||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||
from reflector.processors.file_diarization import FileDiarizationInput
|
||||
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
|
||||
from reflector.processors.file_transcript import FileTranscriptInput
|
||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||
from reflector.processors.transcript_diarization_assembler import (
|
||||
TranscriptDiarizationAssemblerInput,
|
||||
TranscriptDiarizationAssemblerProcessor,
|
||||
@@ -56,19 +51,6 @@ from reflector.storage import get_transcripts_storage
|
||||
from reflector.worker.webhook import send_transcript_webhook
|
||||
|
||||
|
||||
class EmptyPipeline:
|
||||
"""Empty pipeline for processors that need a pipeline reference"""
|
||||
|
||||
def __init__(self, logger: structlog.BoundLogger):
|
||||
self.logger = logger
|
||||
|
||||
def get_pref(self, k, d=None):
|
||||
return d
|
||||
|
||||
async def emit(self, event):
|
||||
pass
|
||||
|
||||
|
||||
class PipelineMainFile(PipelineMainBase):
|
||||
"""
|
||||
Optimized file processing pipeline.
|
||||
@@ -81,7 +63,7 @@ class PipelineMainFile(PipelineMainBase):
|
||||
def __init__(self, transcript_id: str):
|
||||
super().__init__(transcript_id=transcript_id)
|
||||
self.logger = logger.bind(transcript_id=self.transcript_id)
|
||||
self.empty_pipeline = EmptyPipeline(logger=self.logger)
|
||||
self.empty_pipeline = topic_processing.EmptyPipeline(logger=self.logger)
|
||||
|
||||
def _handle_gather_exceptions(self, results: list, operation: str) -> None:
|
||||
"""Handle exceptions from asyncio.gather with return_exceptions=True"""
|
||||
@@ -262,24 +244,7 @@ class PipelineMainFile(PipelineMainBase):
|
||||
|
||||
async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType:
|
||||
"""Transcribe complete file"""
|
||||
processor = FileTranscriptAutoProcessor()
|
||||
input_data = FileTranscriptInput(audio_url=audio_url, language=language)
|
||||
|
||||
# Store result for retrieval
|
||||
result: TranscriptType | None = None
|
||||
|
||||
async def capture_result(transcript):
|
||||
nonlocal result
|
||||
result = transcript
|
||||
|
||||
processor.on(capture_result)
|
||||
await processor.push(input_data)
|
||||
await processor.flush()
|
||||
|
||||
if not result:
|
||||
raise ValueError("No transcript captured")
|
||||
|
||||
return result
|
||||
return await transcribe_file_with_processor(audio_url, language)
|
||||
|
||||
async def diarize_file(self, audio_url: str) -> list[DiarizationSegment] | None:
|
||||
"""Get diarization for file"""
|
||||
@@ -322,63 +287,31 @@ class PipelineMainFile(PipelineMainBase):
|
||||
async def detect_topics(
|
||||
self, transcript: TranscriptType, target_language: str
|
||||
) -> list[TitleSummary]:
|
||||
"""Detect topics from complete transcript"""
|
||||
chunk_size = 300
|
||||
topics: list[TitleSummary] = []
|
||||
|
||||
async def on_topic(topic: TitleSummary):
|
||||
topics.append(topic)
|
||||
return await self.on_topic(topic)
|
||||
|
||||
topic_detector = TranscriptTopicDetectorProcessor(callback=on_topic)
|
||||
topic_detector.set_pipeline(self.empty_pipeline)
|
||||
|
||||
for i in range(0, len(transcript.words), chunk_size):
|
||||
chunk_words = transcript.words[i : i + chunk_size]
|
||||
if not chunk_words:
|
||||
continue
|
||||
|
||||
chunk_transcript = TranscriptType(
|
||||
words=chunk_words, translation=transcript.translation
|
||||
)
|
||||
|
||||
await topic_detector.push(chunk_transcript)
|
||||
|
||||
await topic_detector.flush()
|
||||
return topics
|
||||
return await topic_processing.detect_topics(
|
||||
transcript,
|
||||
target_language,
|
||||
on_topic_callback=self.on_topic,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
)
|
||||
|
||||
async def generate_title(self, topics: list[TitleSummary]):
|
||||
"""Generate title from topics"""
|
||||
if not topics:
|
||||
self.logger.warning("No topics for title generation")
|
||||
return
|
||||
|
||||
processor = TranscriptFinalTitleProcessor(callback=self.on_title)
|
||||
processor.set_pipeline(self.empty_pipeline)
|
||||
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
return await topic_processing.generate_title(
|
||||
topics,
|
||||
on_title_callback=self.on_title,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
logger=self.logger,
|
||||
)
|
||||
|
||||
async def generate_summaries(self, topics: list[TitleSummary]):
|
||||
"""Generate long and short summaries from topics"""
|
||||
if not topics:
|
||||
self.logger.warning("No topics for summary generation")
|
||||
return
|
||||
|
||||
transcript = await self.get_transcript()
|
||||
processor = TranscriptFinalSummaryProcessor(
|
||||
transcript=transcript,
|
||||
callback=self.on_long_summary,
|
||||
on_short_summary=self.on_short_summary,
|
||||
return await topic_processing.generate_summaries(
|
||||
topics,
|
||||
transcript,
|
||||
on_long_summary_callback=self.on_long_summary,
|
||||
on_short_summary_callback=self.on_short_summary,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
logger=self.logger,
|
||||
)
|
||||
processor.set_pipeline(self.empty_pipeline)
|
||||
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
|
||||
|
||||
@shared_task
|
||||
|
||||
@@ -17,7 +17,6 @@ from contextlib import asynccontextmanager
|
||||
from typing import Generic
|
||||
|
||||
import av
|
||||
import boto3
|
||||
from celery import chord, current_task, group, shared_task
|
||||
from pydantic import BaseModel
|
||||
from structlog import BoundLogger as Logger
|
||||
@@ -584,6 +583,7 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
||||
|
||||
consent_denied = False
|
||||
recording = None
|
||||
meeting = None
|
||||
try:
|
||||
if transcript.recording_id:
|
||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||
@@ -594,8 +594,8 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
||||
meeting.id
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get fetch consent: {e}", exc_info=e)
|
||||
consent_denied = True
|
||||
logger.error(f"Failed to fetch consent: {e}", exc_info=e)
|
||||
raise
|
||||
|
||||
if not consent_denied:
|
||||
logger.info("Consent approved, keeping all files")
|
||||
@@ -603,25 +603,24 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
||||
|
||||
logger.info("Consent denied, cleaning up all related audio files")
|
||||
|
||||
if recording and recording.bucket_name and recording.object_key:
|
||||
s3_whereby = boto3.client(
|
||||
"s3",
|
||||
aws_access_key_id=settings.AWS_WHEREBY_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.AWS_WHEREBY_ACCESS_KEY_SECRET,
|
||||
)
|
||||
try:
|
||||
s3_whereby.delete_object(
|
||||
Bucket=recording.bucket_name, Key=recording.object_key
|
||||
)
|
||||
logger.info(
|
||||
f"Deleted original Whereby recording: {recording.bucket_name}/{recording.object_key}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete Whereby recording: {e}", exc_info=e)
|
||||
deletion_errors = []
|
||||
if recording and recording.bucket_name:
|
||||
keys_to_delete = []
|
||||
if recording.track_keys:
|
||||
keys_to_delete = recording.track_keys
|
||||
elif recording.object_key:
|
||||
keys_to_delete = [recording.object_key]
|
||||
|
||||
master_storage = get_transcripts_storage()
|
||||
for key in keys_to_delete:
|
||||
try:
|
||||
await master_storage.delete_file(key, bucket=recording.bucket_name)
|
||||
logger.info(f"Deleted recording file: {recording.bucket_name}/{key}")
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to delete {key}: {e}"
|
||||
logger.error(error_msg, exc_info=e)
|
||||
deletion_errors.append(error_msg)
|
||||
|
||||
# non-transactional, files marked for deletion not actually deleted is possible
|
||||
await transcripts_controller.update(transcript, {"audio_deleted": True})
|
||||
# 2. Delete processed audio from transcript storage S3 bucket
|
||||
if transcript.audio_location == "storage":
|
||||
storage = get_transcripts_storage()
|
||||
try:
|
||||
@@ -630,18 +629,28 @@ async def cleanup_consent(transcript: Transcript, logger: Logger):
|
||||
f"Deleted processed audio from storage: {transcript.storage_audio_path}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete processed audio: {e}", exc_info=e)
|
||||
error_msg = f"Failed to delete processed audio: {e}"
|
||||
logger.error(error_msg, exc_info=e)
|
||||
deletion_errors.append(error_msg)
|
||||
|
||||
# 3. Delete local audio files
|
||||
try:
|
||||
if hasattr(transcript, "audio_mp3_filename") and transcript.audio_mp3_filename:
|
||||
transcript.audio_mp3_filename.unlink(missing_ok=True)
|
||||
if hasattr(transcript, "audio_wav_filename") and transcript.audio_wav_filename:
|
||||
transcript.audio_wav_filename.unlink(missing_ok=True)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete local audio files: {e}", exc_info=e)
|
||||
error_msg = f"Failed to delete local audio files: {e}"
|
||||
logger.error(error_msg, exc_info=e)
|
||||
deletion_errors.append(error_msg)
|
||||
|
||||
logger.info("Consent cleanup done")
|
||||
if deletion_errors:
|
||||
logger.warning(
|
||||
f"Consent cleanup completed with {len(deletion_errors)} errors",
|
||||
errors=deletion_errors,
|
||||
)
|
||||
else:
|
||||
await transcripts_controller.update(transcript, {"audio_deleted": True})
|
||||
logger.info("Consent cleanup done - all audio deleted")
|
||||
|
||||
|
||||
@get_transcript
|
||||
|
||||
803
server/reflector/pipelines/main_multitrack_pipeline.py
Normal file
803
server/reflector/pipelines/main_multitrack_pipeline.py
Normal file
@@ -0,0 +1,803 @@
|
||||
import asyncio
|
||||
import math
|
||||
import tempfile
|
||||
from fractions import Fraction
|
||||
from pathlib import Path
|
||||
|
||||
import av
|
||||
from av.audio.resampler import AudioResampler
|
||||
from celery import chain, shared_task
|
||||
|
||||
from reflector.asynctask import asynctask
|
||||
from reflector.dailyco_api import MeetingParticipantsResponse
|
||||
from reflector.db.transcripts import (
|
||||
Transcript,
|
||||
TranscriptParticipant,
|
||||
TranscriptStatus,
|
||||
TranscriptWaveform,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines import topic_processing
|
||||
from reflector.pipelines.main_file_pipeline import task_send_webhook_if_needed
|
||||
from reflector.pipelines.main_live_pipeline import (
|
||||
PipelineMainBase,
|
||||
broadcast_to_sockets,
|
||||
task_cleanup_consent,
|
||||
task_pipeline_post_to_zulip,
|
||||
)
|
||||
from reflector.pipelines.transcription_helpers import transcribe_file_with_processor
|
||||
from reflector.processors import AudioFileWriterProcessor
|
||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||
from reflector.processors.types import TitleSummary
|
||||
from reflector.processors.types import Transcript as TranscriptType
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import Storage, get_transcripts_storage
|
||||
from reflector.utils.daily import (
|
||||
filter_cam_audio_tracks,
|
||||
parse_daily_recording_filename,
|
||||
)
|
||||
from reflector.utils.string import NonEmptyString
|
||||
from reflector.video_platforms.factory import create_platform_client
|
||||
|
||||
# Audio encoding constants
|
||||
OPUS_STANDARD_SAMPLE_RATE = 48000
|
||||
OPUS_DEFAULT_BIT_RATE = 128000
|
||||
|
||||
# Storage operation constants
|
||||
PRESIGNED_URL_EXPIRATION_SECONDS = 7200 # 2 hours
|
||||
|
||||
|
||||
class PipelineMainMultitrack(PipelineMainBase):
|
||||
def __init__(self, transcript_id: str):
|
||||
super().__init__(transcript_id=transcript_id)
|
||||
self.logger = logger.bind(transcript_id=self.transcript_id)
|
||||
self.empty_pipeline = topic_processing.EmptyPipeline(logger=self.logger)
|
||||
|
||||
async def pad_track_for_transcription(
|
||||
self,
|
||||
track_url: NonEmptyString,
|
||||
track_idx: int,
|
||||
storage: Storage,
|
||||
) -> NonEmptyString:
|
||||
"""
|
||||
Pad a single track with silence based on stream metadata start_time.
|
||||
Downloads from S3 presigned URL, processes via PyAV using tempfile, uploads to S3.
|
||||
Returns presigned URL of padded track (or original URL if no padding needed).
|
||||
|
||||
Memory usage:
|
||||
- Pattern: fixed_overhead(2-5MB) for PyAV codec/filters
|
||||
- PyAV streams input efficiently (no full download, verified)
|
||||
- Output written to tempfile (disk-based, not memory)
|
||||
- Upload streams from file handle (boto3 chunks, typically 5-10MB)
|
||||
|
||||
Daily.co raw-tracks timing - Two approaches:
|
||||
|
||||
CURRENT APPROACH (PyAV metadata):
|
||||
The WebM stream.start_time field encodes MEETING-RELATIVE timing:
|
||||
- t=0: When Daily.co recording started (first participant joined)
|
||||
- start_time=8.13s: This participant's track began 8.13s after recording started
|
||||
- Purpose: Enables track alignment without external manifest files
|
||||
|
||||
This is NOT:
|
||||
- Stream-internal offset (first packet timestamp relative to stream start)
|
||||
- Absolute/wall-clock time
|
||||
- Recording duration
|
||||
|
||||
ALTERNATIVE APPROACH (filename parsing):
|
||||
Daily.co filenames contain Unix timestamps (milliseconds):
|
||||
Format: {recording_start_ts}-{participant_id}-cam-audio-{track_start_ts}.webm
|
||||
Example: 1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922.webm
|
||||
|
||||
Can calculate offset: (track_start_ts - recording_start_ts) / 1000
|
||||
- Track 0: (1760988935922 - 1760988935484) / 1000 = 0.438s
|
||||
- Track 1: (1760988943823 - 1760988935484) / 1000 = 8.339s
|
||||
|
||||
TIME DIFFERENCE: PyAV metadata vs filename timestamps differ by ~209ms:
|
||||
- Track 0: filename=438ms, metadata=229ms (diff: 209ms)
|
||||
- Track 1: filename=8339ms, metadata=8130ms (diff: 209ms)
|
||||
|
||||
Consistent delta suggests network/encoding delay. PyAV metadata is ground truth
|
||||
(represents when audio stream actually started vs when file upload initiated).
|
||||
|
||||
Example with 2 participants:
|
||||
Track A: start_time=0.2s → Joined 200ms after recording began
|
||||
Track B: start_time=8.1s → Joined 8.1 seconds later
|
||||
|
||||
After padding:
|
||||
Track A: [0.2s silence] + [speech...]
|
||||
Track B: [8.1s silence] + [speech...]
|
||||
|
||||
Whisper transcription timestamps are now synchronized:
|
||||
Track A word at 5.0s → happened at meeting t=5.0s
|
||||
Track B word at 10.0s → happened at meeting t=10.0s
|
||||
|
||||
Merging just sorts by timestamp - no offset calculation needed.
|
||||
|
||||
Padding coincidentally involves re-encoding. It's important when we work with Daily.co + Whisper.
|
||||
This is because Daily.co returns recordings with skipped frames e.g. when microphone muted.
|
||||
Daily.co doesn't understand those frames and ignores them, causing timestamp issues in transcription.
|
||||
Re-encoding restores those frames. We do padding and re-encoding together just because it's convenient and more performant:
|
||||
we need padded values for mix mp3 anyways
|
||||
"""
|
||||
|
||||
transcript = await self.get_transcript()
|
||||
|
||||
try:
|
||||
# PyAV streams input from S3 URL efficiently (2-5MB fixed overhead for codec/filters)
|
||||
with av.open(track_url) as in_container:
|
||||
start_time_seconds = self._extract_stream_start_time_from_container(
|
||||
in_container, track_idx
|
||||
)
|
||||
|
||||
if start_time_seconds <= 0:
|
||||
self.logger.info(
|
||||
f"Track {track_idx} requires no padding (start_time={start_time_seconds}s)",
|
||||
track_idx=track_idx,
|
||||
)
|
||||
return track_url
|
||||
|
||||
# Use tempfile instead of BytesIO for better memory efficiency
|
||||
# Reduces peak memory usage during encoding/upload
|
||||
with tempfile.NamedTemporaryFile(
|
||||
suffix=".webm", delete=False
|
||||
) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
|
||||
try:
|
||||
self._apply_audio_padding_to_file(
|
||||
in_container, temp_path, start_time_seconds, track_idx
|
||||
)
|
||||
|
||||
storage_path = (
|
||||
f"file_pipeline/{transcript.id}/tracks/padded_{track_idx}.webm"
|
||||
)
|
||||
|
||||
# Upload using file handle for streaming
|
||||
with open(temp_path, "rb") as padded_file:
|
||||
await storage.put_file(storage_path, padded_file)
|
||||
finally:
|
||||
# Clean up temp file
|
||||
Path(temp_path).unlink(missing_ok=True)
|
||||
|
||||
padded_url = await storage.get_file_url(
|
||||
storage_path,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
f"Successfully padded track {track_idx}",
|
||||
track_idx=track_idx,
|
||||
start_time_seconds=start_time_seconds,
|
||||
padded_url=padded_url,
|
||||
)
|
||||
|
||||
return padded_url
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
f"Failed to process track {track_idx}",
|
||||
track_idx=track_idx,
|
||||
url=track_url,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
raise Exception(
|
||||
f"Track {track_idx} padding failed - transcript would have incorrect timestamps"
|
||||
) from e
|
||||
|
||||
def _extract_stream_start_time_from_container(
|
||||
self, container, track_idx: int
|
||||
) -> float:
|
||||
"""
|
||||
Extract meeting-relative start time from WebM stream metadata.
|
||||
Uses PyAV to read stream.start_time from WebM container.
|
||||
More accurate than filename timestamps by ~209ms due to network/encoding delays.
|
||||
"""
|
||||
start_time_seconds = 0.0
|
||||
try:
|
||||
audio_streams = [s for s in container.streams if s.type == "audio"]
|
||||
stream = audio_streams[0] if audio_streams else container.streams[0]
|
||||
|
||||
# 1) Try stream-level start_time (most reliable for Daily.co tracks)
|
||||
if stream.start_time is not None and stream.time_base is not None:
|
||||
start_time_seconds = float(stream.start_time * stream.time_base)
|
||||
|
||||
# 2) Fallback to container-level start_time (in av.time_base units)
|
||||
if (start_time_seconds <= 0) and (container.start_time is not None):
|
||||
start_time_seconds = float(container.start_time * av.time_base)
|
||||
|
||||
# 3) Fallback to first packet DTS in stream.time_base
|
||||
if start_time_seconds <= 0:
|
||||
for packet in container.demux(stream):
|
||||
if packet.dts is not None:
|
||||
start_time_seconds = float(packet.dts * stream.time_base)
|
||||
break
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
"PyAV metadata read failed; assuming 0 start_time",
|
||||
track_idx=track_idx,
|
||||
error=str(e),
|
||||
)
|
||||
start_time_seconds = 0.0
|
||||
|
||||
self.logger.info(
|
||||
f"Track {track_idx} stream metadata: start_time={start_time_seconds:.3f}s",
|
||||
track_idx=track_idx,
|
||||
)
|
||||
return start_time_seconds
|
||||
|
||||
def _apply_audio_padding_to_file(
|
||||
self,
|
||||
in_container,
|
||||
output_path: str,
|
||||
start_time_seconds: float,
|
||||
track_idx: int,
|
||||
) -> None:
|
||||
"""Apply silence padding to audio track using PyAV filter graph, writing to file"""
|
||||
delay_ms = math.floor(start_time_seconds * 1000)
|
||||
|
||||
self.logger.info(
|
||||
f"Padding track {track_idx} with {delay_ms}ms delay using PyAV",
|
||||
track_idx=track_idx,
|
||||
delay_ms=delay_ms,
|
||||
)
|
||||
|
||||
try:
|
||||
with av.open(output_path, "w", format="webm") as out_container:
|
||||
in_stream = next(
|
||||
(s for s in in_container.streams if s.type == "audio"), None
|
||||
)
|
||||
if in_stream is None:
|
||||
raise Exception("No audio stream in input")
|
||||
|
||||
out_stream = out_container.add_stream(
|
||||
"libopus", rate=OPUS_STANDARD_SAMPLE_RATE
|
||||
)
|
||||
out_stream.bit_rate = OPUS_DEFAULT_BIT_RATE
|
||||
graph = av.filter.Graph()
|
||||
|
||||
abuf_args = (
|
||||
f"time_base=1/{OPUS_STANDARD_SAMPLE_RATE}:"
|
||||
f"sample_rate={OPUS_STANDARD_SAMPLE_RATE}:"
|
||||
f"sample_fmt=s16:"
|
||||
f"channel_layout=stereo"
|
||||
)
|
||||
src = graph.add("abuffer", args=abuf_args, name="src")
|
||||
aresample_f = graph.add("aresample", args="async=1", name="ares")
|
||||
# adelay requires one delay value per channel separated by '|'
|
||||
delays_arg = f"{delay_ms}|{delay_ms}"
|
||||
adelay_f = graph.add(
|
||||
"adelay", args=f"delays={delays_arg}:all=1", name="delay"
|
||||
)
|
||||
sink = graph.add("abuffersink", name="sink")
|
||||
|
||||
src.link_to(aresample_f)
|
||||
aresample_f.link_to(adelay_f)
|
||||
adelay_f.link_to(sink)
|
||||
graph.configure()
|
||||
|
||||
resampler = AudioResampler(
|
||||
format="s16", layout="stereo", rate=OPUS_STANDARD_SAMPLE_RATE
|
||||
)
|
||||
# Decode -> resample -> push through graph -> encode Opus
|
||||
for frame in in_container.decode(in_stream):
|
||||
out_frames = resampler.resample(frame) or []
|
||||
for rframe in out_frames:
|
||||
rframe.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
rframe.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
src.push(rframe)
|
||||
|
||||
while True:
|
||||
try:
|
||||
f_out = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
for packet in out_stream.encode(f_out):
|
||||
out_container.mux(packet)
|
||||
|
||||
src.push(None)
|
||||
while True:
|
||||
try:
|
||||
f_out = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
for packet in out_stream.encode(f_out):
|
||||
out_container.mux(packet)
|
||||
|
||||
for packet in out_stream.encode(None):
|
||||
out_container.mux(packet)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
"PyAV padding failed for track",
|
||||
track_idx=track_idx,
|
||||
delay_ms=delay_ms,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
async def mixdown_tracks(
|
||||
self,
|
||||
track_urls: list[str],
|
||||
writer: AudioFileWriterProcessor,
|
||||
offsets_seconds: list[float] | None = None,
|
||||
) -> None:
|
||||
"""Multi-track mixdown using PyAV filter graph (amix), reading from S3 presigned URLs"""
|
||||
|
||||
target_sample_rate: int | None = None
|
||||
for url in track_urls:
|
||||
if not url:
|
||||
continue
|
||||
container = None
|
||||
try:
|
||||
container = av.open(url)
|
||||
for frame in container.decode(audio=0):
|
||||
target_sample_rate = frame.sample_rate
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
finally:
|
||||
if container is not None:
|
||||
container.close()
|
||||
if target_sample_rate:
|
||||
break
|
||||
|
||||
if not target_sample_rate:
|
||||
self.logger.error("Mixdown failed - no decodable audio frames found")
|
||||
raise Exception("Mixdown failed: No decodable audio frames in any track")
|
||||
# Build PyAV filter graph:
|
||||
# N abuffer (s32/stereo)
|
||||
# -> optional adelay per input (for alignment)
|
||||
# -> amix (s32)
|
||||
# -> aformat(s16)
|
||||
# -> sink
|
||||
graph = av.filter.Graph()
|
||||
inputs = []
|
||||
valid_track_urls = [url for url in track_urls if url]
|
||||
input_offsets_seconds = None
|
||||
if offsets_seconds is not None:
|
||||
input_offsets_seconds = [
|
||||
offsets_seconds[i] for i, url in enumerate(track_urls) if url
|
||||
]
|
||||
for idx, url in enumerate(valid_track_urls):
|
||||
args = (
|
||||
f"time_base=1/{target_sample_rate}:"
|
||||
f"sample_rate={target_sample_rate}:"
|
||||
f"sample_fmt=s32:"
|
||||
f"channel_layout=stereo"
|
||||
)
|
||||
in_ctx = graph.add("abuffer", args=args, name=f"in{idx}")
|
||||
inputs.append(in_ctx)
|
||||
|
||||
if not inputs:
|
||||
self.logger.error("Mixdown failed - no valid inputs for graph")
|
||||
raise Exception("Mixdown failed: No valid inputs for filter graph")
|
||||
|
||||
mixer = graph.add("amix", args=f"inputs={len(inputs)}:normalize=0", name="mix")
|
||||
|
||||
fmt = graph.add(
|
||||
"aformat",
|
||||
args=(
|
||||
f"sample_fmts=s32:channel_layouts=stereo:sample_rates={target_sample_rate}"
|
||||
),
|
||||
name="fmt",
|
||||
)
|
||||
|
||||
sink = graph.add("abuffersink", name="out")
|
||||
|
||||
# Optional per-input delay before mixing
|
||||
delays_ms: list[int] = []
|
||||
if input_offsets_seconds is not None:
|
||||
base = min(input_offsets_seconds) if input_offsets_seconds else 0.0
|
||||
delays_ms = [
|
||||
max(0, int(round((o - base) * 1000))) for o in input_offsets_seconds
|
||||
]
|
||||
else:
|
||||
delays_ms = [0 for _ in inputs]
|
||||
|
||||
for idx, in_ctx in enumerate(inputs):
|
||||
delay_ms = delays_ms[idx] if idx < len(delays_ms) else 0
|
||||
if delay_ms > 0:
|
||||
# adelay requires one value per channel; use same for stereo
|
||||
adelay = graph.add(
|
||||
"adelay",
|
||||
args=f"delays={delay_ms}|{delay_ms}:all=1",
|
||||
name=f"delay{idx}",
|
||||
)
|
||||
in_ctx.link_to(adelay)
|
||||
adelay.link_to(mixer, 0, idx)
|
||||
else:
|
||||
in_ctx.link_to(mixer, 0, idx)
|
||||
mixer.link_to(fmt)
|
||||
fmt.link_to(sink)
|
||||
graph.configure()
|
||||
|
||||
containers = []
|
||||
try:
|
||||
# Open all containers with cleanup guaranteed
|
||||
for i, url in enumerate(valid_track_urls):
|
||||
try:
|
||||
c = av.open(url)
|
||||
containers.append(c)
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
"Mixdown: failed to open container from URL",
|
||||
input=i,
|
||||
url=url,
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
if not containers:
|
||||
self.logger.error("Mixdown failed - no valid containers opened")
|
||||
raise Exception("Mixdown failed: Could not open any track containers")
|
||||
|
||||
decoders = [c.decode(audio=0) for c in containers]
|
||||
active = [True] * len(decoders)
|
||||
resamplers = [
|
||||
AudioResampler(format="s32", layout="stereo", rate=target_sample_rate)
|
||||
for _ in decoders
|
||||
]
|
||||
|
||||
while any(active):
|
||||
for i, (dec, is_active) in enumerate(zip(decoders, active)):
|
||||
if not is_active:
|
||||
continue
|
||||
try:
|
||||
frame = next(dec)
|
||||
except StopIteration:
|
||||
active[i] = False
|
||||
continue
|
||||
|
||||
if frame.sample_rate != target_sample_rate:
|
||||
continue
|
||||
out_frames = resamplers[i].resample(frame) or []
|
||||
for rf in out_frames:
|
||||
rf.sample_rate = target_sample_rate
|
||||
rf.time_base = Fraction(1, target_sample_rate)
|
||||
inputs[i].push(rf)
|
||||
|
||||
while True:
|
||||
try:
|
||||
mixed = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
mixed.sample_rate = target_sample_rate
|
||||
mixed.time_base = Fraction(1, target_sample_rate)
|
||||
await writer.push(mixed)
|
||||
|
||||
for in_ctx in inputs:
|
||||
in_ctx.push(None)
|
||||
while True:
|
||||
try:
|
||||
mixed = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
mixed.sample_rate = target_sample_rate
|
||||
mixed.time_base = Fraction(1, target_sample_rate)
|
||||
await writer.push(mixed)
|
||||
finally:
|
||||
# Cleanup all containers, even if processing failed
|
||||
for c in containers:
|
||||
if c is not None:
|
||||
try:
|
||||
c.close()
|
||||
except Exception:
|
||||
pass # Best effort cleanup
|
||||
|
||||
@broadcast_to_sockets
|
||||
async def set_status(self, transcript_id: str, status: TranscriptStatus):
|
||||
async with self.lock_transaction():
|
||||
return await transcripts_controller.set_status(transcript_id, status)
|
||||
|
||||
async def on_waveform(self, data):
|
||||
async with self.transaction():
|
||||
waveform = TranscriptWaveform(waveform=data)
|
||||
transcript = await self.get_transcript()
|
||||
return await transcripts_controller.append_event(
|
||||
transcript=transcript, event="WAVEFORM", data=waveform
|
||||
)
|
||||
|
||||
async def update_participants_from_daily(
|
||||
self, transcript: Transcript, track_keys: list[str]
|
||||
) -> None:
|
||||
"""Update transcript participants with user_id and names from Daily.co API."""
|
||||
if not transcript.recording_id:
|
||||
return
|
||||
|
||||
try:
|
||||
async with create_platform_client("daily") as daily_client:
|
||||
id_to_name = {}
|
||||
id_to_user_id = {}
|
||||
|
||||
try:
|
||||
rec_details = await daily_client.get_recording(
|
||||
transcript.recording_id
|
||||
)
|
||||
mtg_session_id = rec_details.mtgSessionId
|
||||
if mtg_session_id:
|
||||
try:
|
||||
payload: MeetingParticipantsResponse = (
|
||||
await daily_client.get_meeting_participants(
|
||||
mtg_session_id
|
||||
)
|
||||
)
|
||||
for p in payload.data:
|
||||
pid = p.participant_id
|
||||
name = p.user_name
|
||||
user_id = p.user_id
|
||||
if name:
|
||||
id_to_name[pid] = name
|
||||
if user_id:
|
||||
id_to_user_id[pid] = user_id
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
"Failed to fetch Daily meeting participants",
|
||||
error=str(e),
|
||||
mtg_session_id=mtg_session_id,
|
||||
exc_info=True,
|
||||
)
|
||||
else:
|
||||
self.logger.warning(
|
||||
"No mtgSessionId found for recording; participant names may be generic",
|
||||
recording_id=transcript.recording_id,
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
"Failed to fetch Daily recording details",
|
||||
error=str(e),
|
||||
recording_id=transcript.recording_id,
|
||||
exc_info=True,
|
||||
)
|
||||
return
|
||||
|
||||
cam_audio_keys = filter_cam_audio_tracks(track_keys)
|
||||
|
||||
for idx, key in enumerate(cam_audio_keys):
|
||||
try:
|
||||
parsed = parse_daily_recording_filename(key)
|
||||
participant_id = parsed.participant_id
|
||||
except ValueError as e:
|
||||
self.logger.error(
|
||||
"Failed to parse Daily recording filename",
|
||||
error=str(e),
|
||||
key=key,
|
||||
exc_info=True,
|
||||
)
|
||||
continue
|
||||
|
||||
default_name = f"Speaker {idx}"
|
||||
name = id_to_name.get(participant_id, default_name)
|
||||
user_id = id_to_user_id.get(participant_id)
|
||||
|
||||
participant = TranscriptParticipant(
|
||||
id=participant_id, speaker=idx, name=name, user_id=user_id
|
||||
)
|
||||
await transcripts_controller.upsert_participant(
|
||||
transcript, participant
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
"Failed to map participant names", error=str(e), exc_info=True
|
||||
)
|
||||
|
||||
async def process(self, bucket_name: str, track_keys: list[str]):
|
||||
transcript = await self.get_transcript()
|
||||
async with self.transaction():
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"events": [],
|
||||
"topics": [],
|
||||
"participants": [],
|
||||
},
|
||||
)
|
||||
|
||||
await self.update_participants_from_daily(transcript, track_keys)
|
||||
|
||||
source_storage = get_transcripts_storage()
|
||||
transcript_storage = source_storage
|
||||
|
||||
track_urls: list[str] = []
|
||||
for key in track_keys:
|
||||
url = await source_storage.get_file_url(
|
||||
key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
bucket=bucket_name,
|
||||
)
|
||||
track_urls.append(url)
|
||||
self.logger.info(
|
||||
f"Generated presigned URL for track from {bucket_name}",
|
||||
key=key,
|
||||
)
|
||||
|
||||
created_padded_files = set()
|
||||
padded_track_urls: list[str] = []
|
||||
for idx, url in enumerate(track_urls):
|
||||
padded_url = await self.pad_track_for_transcription(
|
||||
url, idx, transcript_storage
|
||||
)
|
||||
padded_track_urls.append(padded_url)
|
||||
if padded_url != url:
|
||||
storage_path = f"file_pipeline/{transcript.id}/tracks/padded_{idx}.webm"
|
||||
created_padded_files.add(storage_path)
|
||||
self.logger.info(f"Track {idx} processed, padded URL: {padded_url}")
|
||||
|
||||
transcript.data_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if settings.SKIP_MIXDOWN:
|
||||
self.logger.warning(
|
||||
"SKIP_MIXDOWN enabled: Skipping mixdown and waveform generation. "
|
||||
"UI will have no audio playback or waveform.",
|
||||
num_tracks=len(padded_track_urls),
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
else:
|
||||
mp3_writer = AudioFileWriterProcessor(
|
||||
path=str(transcript.audio_mp3_filename),
|
||||
on_duration=self.on_duration,
|
||||
)
|
||||
await self.mixdown_tracks(
|
||||
padded_track_urls, mp3_writer, offsets_seconds=None
|
||||
)
|
||||
await mp3_writer.flush()
|
||||
|
||||
if not transcript.audio_mp3_filename.exists():
|
||||
raise Exception(
|
||||
"Mixdown failed - no MP3 file generated. Cannot proceed without playable audio."
|
||||
)
|
||||
|
||||
storage_path = f"{transcript.id}/audio.mp3"
|
||||
# Use file handle streaming to avoid loading entire MP3 into memory
|
||||
mp3_size = transcript.audio_mp3_filename.stat().st_size
|
||||
with open(transcript.audio_mp3_filename, "rb") as mp3_file:
|
||||
await transcript_storage.put_file(storage_path, mp3_file)
|
||||
mp3_url = await transcript_storage.get_file_url(storage_path)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript, {"audio_location": "storage"}
|
||||
)
|
||||
|
||||
self.logger.info(
|
||||
f"Uploaded mixed audio to storage",
|
||||
storage_path=storage_path,
|
||||
size=mp3_size,
|
||||
url=mp3_url,
|
||||
)
|
||||
|
||||
self.logger.info("Generating waveform from mixed audio")
|
||||
waveform_processor = AudioWaveformProcessor(
|
||||
audio_path=transcript.audio_mp3_filename,
|
||||
waveform_path=transcript.audio_waveform_filename,
|
||||
on_waveform=self.on_waveform,
|
||||
)
|
||||
waveform_processor.set_pipeline(self.empty_pipeline)
|
||||
await waveform_processor.flush()
|
||||
self.logger.info("Waveform generated successfully")
|
||||
|
||||
speaker_transcripts: list[TranscriptType] = []
|
||||
for idx, padded_url in enumerate(padded_track_urls):
|
||||
if not padded_url:
|
||||
continue
|
||||
|
||||
t = await self.transcribe_file(padded_url, transcript.source_language)
|
||||
|
||||
if not t.words:
|
||||
self.logger.debug(f"no words in track {idx}")
|
||||
# not skipping, it may be silence or indistinguishable mumbling
|
||||
|
||||
for w in t.words:
|
||||
w.speaker = idx
|
||||
|
||||
speaker_transcripts.append(t)
|
||||
self.logger.info(
|
||||
f"Track {idx} transcribed successfully with {len(t.words)} words",
|
||||
track_idx=idx,
|
||||
)
|
||||
|
||||
valid_track_count = len([url for url in padded_track_urls if url])
|
||||
if valid_track_count > 0 and len(speaker_transcripts) != valid_track_count:
|
||||
raise Exception(
|
||||
f"Only {len(speaker_transcripts)}/{valid_track_count} tracks transcribed successfully. "
|
||||
f"All tracks must succeed to avoid incomplete transcripts."
|
||||
)
|
||||
|
||||
if not speaker_transcripts:
|
||||
raise Exception("No valid track transcriptions")
|
||||
|
||||
self.logger.info(f"Cleaning up {len(created_padded_files)} temporary S3 files")
|
||||
cleanup_tasks = []
|
||||
for storage_path in created_padded_files:
|
||||
cleanup_tasks.append(transcript_storage.delete_file(storage_path))
|
||||
|
||||
if cleanup_tasks:
|
||||
cleanup_results = await asyncio.gather(
|
||||
*cleanup_tasks, return_exceptions=True
|
||||
)
|
||||
for storage_path, result in zip(created_padded_files, cleanup_results):
|
||||
if isinstance(result, Exception):
|
||||
self.logger.warning(
|
||||
"Failed to cleanup temporary padded track",
|
||||
storage_path=storage_path,
|
||||
error=str(result),
|
||||
)
|
||||
|
||||
merged_words = []
|
||||
for t in speaker_transcripts:
|
||||
merged_words.extend(t.words)
|
||||
merged_words.sort(
|
||||
key=lambda w: w.start if hasattr(w, "start") and w.start is not None else 0
|
||||
)
|
||||
|
||||
merged_transcript = TranscriptType(words=merged_words, translation=None)
|
||||
|
||||
await self.on_transcript(merged_transcript)
|
||||
|
||||
topics = await self.detect_topics(merged_transcript, transcript.target_language)
|
||||
await asyncio.gather(
|
||||
self.generate_title(topics),
|
||||
self.generate_summaries(topics),
|
||||
return_exceptions=False,
|
||||
)
|
||||
|
||||
await self.set_status(transcript.id, "ended")
|
||||
|
||||
async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType:
|
||||
return await transcribe_file_with_processor(audio_url, language)
|
||||
|
||||
async def detect_topics(
|
||||
self, transcript: TranscriptType, target_language: str
|
||||
) -> list[TitleSummary]:
|
||||
return await topic_processing.detect_topics(
|
||||
transcript,
|
||||
target_language,
|
||||
on_topic_callback=self.on_topic,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
)
|
||||
|
||||
async def generate_title(self, topics: list[TitleSummary]):
|
||||
return await topic_processing.generate_title(
|
||||
topics,
|
||||
on_title_callback=self.on_title,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
logger=self.logger,
|
||||
)
|
||||
|
||||
async def generate_summaries(self, topics: list[TitleSummary]):
|
||||
transcript = await self.get_transcript()
|
||||
return await topic_processing.generate_summaries(
|
||||
topics,
|
||||
transcript,
|
||||
on_long_summary_callback=self.on_long_summary,
|
||||
on_short_summary_callback=self.on_short_summary,
|
||||
empty_pipeline=self.empty_pipeline,
|
||||
logger=self.logger,
|
||||
)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def task_pipeline_multitrack_process(
|
||||
*, transcript_id: str, bucket_name: str, track_keys: list[str]
|
||||
):
|
||||
pipeline = PipelineMainMultitrack(transcript_id=transcript_id)
|
||||
try:
|
||||
await pipeline.set_status(transcript_id, "processing")
|
||||
await pipeline.process(bucket_name, track_keys)
|
||||
except Exception:
|
||||
await pipeline.set_status(transcript_id, "error")
|
||||
raise
|
||||
|
||||
post_chain = chain(
|
||||
task_cleanup_consent.si(transcript_id=transcript_id),
|
||||
task_pipeline_post_to_zulip.si(transcript_id=transcript_id),
|
||||
task_send_webhook_if_needed.si(transcript_id=transcript_id),
|
||||
)
|
||||
post_chain.delay()
|
||||
109
server/reflector/pipelines/topic_processing.py
Normal file
109
server/reflector/pipelines/topic_processing.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
Topic processing utilities
|
||||
==========================
|
||||
|
||||
Shared topic detection, title generation, and summarization logic
|
||||
used across file and multitrack pipelines.
|
||||
"""
|
||||
|
||||
from typing import Callable
|
||||
|
||||
import structlog
|
||||
|
||||
from reflector.db.transcripts import Transcript
|
||||
from reflector.processors import (
|
||||
TranscriptFinalSummaryProcessor,
|
||||
TranscriptFinalTitleProcessor,
|
||||
TranscriptTopicDetectorProcessor,
|
||||
)
|
||||
from reflector.processors.types import TitleSummary
|
||||
from reflector.processors.types import Transcript as TranscriptType
|
||||
|
||||
|
||||
class EmptyPipeline:
|
||||
def __init__(self, logger: structlog.BoundLogger):
|
||||
self.logger = logger
|
||||
|
||||
def get_pref(self, k, d=None):
|
||||
return d
|
||||
|
||||
async def emit(self, event):
|
||||
pass
|
||||
|
||||
|
||||
async def detect_topics(
|
||||
transcript: TranscriptType,
|
||||
target_language: str,
|
||||
*,
|
||||
on_topic_callback: Callable,
|
||||
empty_pipeline: EmptyPipeline,
|
||||
) -> list[TitleSummary]:
|
||||
chunk_size = 300
|
||||
topics: list[TitleSummary] = []
|
||||
|
||||
async def on_topic(topic: TitleSummary):
|
||||
topics.append(topic)
|
||||
return await on_topic_callback(topic)
|
||||
|
||||
topic_detector = TranscriptTopicDetectorProcessor(callback=on_topic)
|
||||
topic_detector.set_pipeline(empty_pipeline)
|
||||
|
||||
for i in range(0, len(transcript.words), chunk_size):
|
||||
chunk_words = transcript.words[i : i + chunk_size]
|
||||
if not chunk_words:
|
||||
continue
|
||||
|
||||
chunk_transcript = TranscriptType(
|
||||
words=chunk_words, translation=transcript.translation
|
||||
)
|
||||
|
||||
await topic_detector.push(chunk_transcript)
|
||||
|
||||
await topic_detector.flush()
|
||||
return topics
|
||||
|
||||
|
||||
async def generate_title(
|
||||
topics: list[TitleSummary],
|
||||
*,
|
||||
on_title_callback: Callable,
|
||||
empty_pipeline: EmptyPipeline,
|
||||
logger: structlog.BoundLogger,
|
||||
):
|
||||
if not topics:
|
||||
logger.warning("No topics for title generation")
|
||||
return
|
||||
|
||||
processor = TranscriptFinalTitleProcessor(callback=on_title_callback)
|
||||
processor.set_pipeline(empty_pipeline)
|
||||
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
|
||||
|
||||
async def generate_summaries(
|
||||
topics: list[TitleSummary],
|
||||
transcript: Transcript,
|
||||
*,
|
||||
on_long_summary_callback: Callable,
|
||||
on_short_summary_callback: Callable,
|
||||
empty_pipeline: EmptyPipeline,
|
||||
logger: structlog.BoundLogger,
|
||||
):
|
||||
if not topics:
|
||||
logger.warning("No topics for summary generation")
|
||||
return
|
||||
|
||||
processor = TranscriptFinalSummaryProcessor(
|
||||
transcript=transcript,
|
||||
callback=on_long_summary_callback,
|
||||
on_short_summary=on_short_summary_callback,
|
||||
)
|
||||
processor.set_pipeline(empty_pipeline)
|
||||
|
||||
for topic in topics:
|
||||
await processor.push(topic)
|
||||
|
||||
await processor.flush()
|
||||
34
server/reflector/pipelines/transcription_helpers.py
Normal file
34
server/reflector/pipelines/transcription_helpers.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from reflector.processors.file_transcript import FileTranscriptInput
|
||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||
from reflector.processors.types import Transcript as TranscriptType
|
||||
|
||||
|
||||
async def transcribe_file_with_processor(
|
||||
audio_url: str,
|
||||
language: str,
|
||||
processor_name: str | None = None,
|
||||
) -> TranscriptType:
|
||||
processor = (
|
||||
FileTranscriptAutoProcessor(name=processor_name)
|
||||
if processor_name
|
||||
else FileTranscriptAutoProcessor()
|
||||
)
|
||||
input_data = FileTranscriptInput(audio_url=audio_url, language=language)
|
||||
|
||||
result: TranscriptType | None = None
|
||||
|
||||
async def capture_result(transcript):
|
||||
nonlocal result
|
||||
result = transcript
|
||||
|
||||
processor.on(capture_result)
|
||||
await processor.push(input_data)
|
||||
await processor.flush()
|
||||
|
||||
if not result:
|
||||
processor_label = processor_name or "default"
|
||||
raise ValueError(
|
||||
f"No transcript captured from {processor_label} processor for audio: {audio_url}"
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -165,6 +165,7 @@ class SummaryBuilder:
|
||||
self.llm: LLM = llm
|
||||
self.model_name: str = llm.model_name
|
||||
self.logger = logger or structlog.get_logger()
|
||||
self.participant_instructions: str | None = None
|
||||
if filename:
|
||||
self.read_transcript_from_file(filename)
|
||||
|
||||
@@ -191,14 +192,61 @@ class SummaryBuilder:
|
||||
self, prompt: str, output_cls: Type[T], tone_name: str | None = None
|
||||
) -> T:
|
||||
"""Generic function to get structured output from LLM for non-function-calling models."""
|
||||
# Add participant instructions to the prompt if available
|
||||
enhanced_prompt = self._enhance_prompt_with_participants(prompt)
|
||||
return await self.llm.get_structured_response(
|
||||
prompt, [self.transcript], output_cls, tone_name=tone_name
|
||||
enhanced_prompt, [self.transcript], output_cls, tone_name=tone_name
|
||||
)
|
||||
|
||||
async def _get_response(
|
||||
self, prompt: str, texts: list[str], tone_name: str | None = None
|
||||
) -> str:
|
||||
"""Get text response with automatic participant instructions injection."""
|
||||
enhanced_prompt = self._enhance_prompt_with_participants(prompt)
|
||||
return await self.llm.get_response(enhanced_prompt, texts, tone_name=tone_name)
|
||||
|
||||
def _enhance_prompt_with_participants(self, prompt: str) -> str:
|
||||
"""Add participant instructions to any prompt if participants are known."""
|
||||
if self.participant_instructions:
|
||||
self.logger.debug("Adding participant instructions to prompt")
|
||||
return f"{prompt}\n\n{self.participant_instructions}"
|
||||
return prompt
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Participants
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
def set_known_participants(self, participants: list[str]) -> None:
|
||||
"""
|
||||
Set known participants directly without LLM identification.
|
||||
This is used when participants are already identified and stored.
|
||||
They are appended at the end of the transcript, providing more context for the assistant.
|
||||
"""
|
||||
if not participants:
|
||||
self.logger.warning("No participants provided")
|
||||
return
|
||||
|
||||
self.logger.info(
|
||||
"Using known participants",
|
||||
participants=participants,
|
||||
)
|
||||
|
||||
participants_md = self.format_list_md(participants)
|
||||
self.transcript += f"\n\n# Participants\n\n{participants_md}"
|
||||
|
||||
# Set instructions that will be automatically added to all prompts
|
||||
participants_list = ", ".join(participants)
|
||||
self.participant_instructions = dedent(
|
||||
f"""
|
||||
# IMPORTANT: Participant Names
|
||||
The following participants are identified in this conversation: {participants_list}
|
||||
|
||||
You MUST use these specific participant names when referring to people in your response.
|
||||
Do NOT use generic terms like "a participant", "someone", "attendee", "Speaker 1", "Speaker 2", etc.
|
||||
Always refer to people by their actual names (e.g., "John suggested..." not "A participant suggested...").
|
||||
"""
|
||||
).strip()
|
||||
|
||||
async def identify_participants(self) -> None:
|
||||
"""
|
||||
From a transcript, try to identify the participants using TreeSummarize with structured output.
|
||||
@@ -232,6 +280,19 @@ class SummaryBuilder:
|
||||
if unique_participants:
|
||||
participants_md = self.format_list_md(unique_participants)
|
||||
self.transcript += f"\n\n# Participants\n\n{participants_md}"
|
||||
|
||||
# Set instructions that will be automatically added to all prompts
|
||||
participants_list = ", ".join(unique_participants)
|
||||
self.participant_instructions = dedent(
|
||||
f"""
|
||||
# IMPORTANT: Participant Names
|
||||
The following participants are identified in this conversation: {participants_list}
|
||||
|
||||
You MUST use these specific participant names when referring to people in your response.
|
||||
Do NOT use generic terms like "a participant", "someone", "attendee", "Speaker 1", "Speaker 2", etc.
|
||||
Always refer to people by their actual names (e.g., "John suggested..." not "A participant suggested...").
|
||||
"""
|
||||
).strip()
|
||||
else:
|
||||
self.logger.warning("No participants identified in the transcript")
|
||||
|
||||
@@ -318,13 +379,13 @@ class SummaryBuilder:
|
||||
for subject in self.subjects:
|
||||
detailed_prompt = DETAILED_SUBJECT_PROMPT_TEMPLATE.format(subject=subject)
|
||||
|
||||
detailed_response = await self.llm.get_response(
|
||||
detailed_response = await self._get_response(
|
||||
detailed_prompt, [self.transcript], tone_name="Topic assistant"
|
||||
)
|
||||
|
||||
paragraph_prompt = PARAGRAPH_SUMMARY_PROMPT
|
||||
|
||||
paragraph_response = await self.llm.get_response(
|
||||
paragraph_response = await self._get_response(
|
||||
paragraph_prompt, [str(detailed_response)], tone_name="Topic summarizer"
|
||||
)
|
||||
|
||||
@@ -345,7 +406,7 @@ class SummaryBuilder:
|
||||
|
||||
recap_prompt = RECAP_PROMPT
|
||||
|
||||
recap_response = await self.llm.get_response(
|
||||
recap_response = await self._get_response(
|
||||
recap_prompt, [summaries_text], tone_name="Recap summarizer"
|
||||
)
|
||||
|
||||
|
||||
@@ -26,7 +26,25 @@ class TranscriptFinalSummaryProcessor(Processor):
|
||||
async def get_summary_builder(self, text) -> SummaryBuilder:
|
||||
builder = SummaryBuilder(self.llm, logger=self.logger)
|
||||
builder.set_transcript(text)
|
||||
await builder.identify_participants()
|
||||
|
||||
# Use known participants if available, otherwise identify them
|
||||
if self.transcript and self.transcript.participants:
|
||||
# Extract participant names from the stored participants
|
||||
participant_names = [p.name for p in self.transcript.participants if p.name]
|
||||
if participant_names:
|
||||
self.logger.info(
|
||||
f"Using {len(participant_names)} known participants from transcript"
|
||||
)
|
||||
builder.set_known_participants(participant_names)
|
||||
else:
|
||||
self.logger.info(
|
||||
"Participants field exists but is empty, identifying participants"
|
||||
)
|
||||
await builder.identify_participants()
|
||||
else:
|
||||
self.logger.info("No participants stored, identifying participants")
|
||||
await builder.identify_participants()
|
||||
|
||||
await builder.generate_summary()
|
||||
return builder
|
||||
|
||||
@@ -49,18 +67,30 @@ class TranscriptFinalSummaryProcessor(Processor):
|
||||
speakermap = {}
|
||||
if self.transcript:
|
||||
speakermap = {
|
||||
participant["speaker"]: participant["name"]
|
||||
for participant in self.transcript.participants
|
||||
p.speaker: p.name
|
||||
for p in (self.transcript.participants or [])
|
||||
if p.speaker is not None and p.name
|
||||
}
|
||||
self.logger.info(
|
||||
f"Built speaker map with {len(speakermap)} participants",
|
||||
speakermap=speakermap,
|
||||
)
|
||||
|
||||
# build the transcript as a single string
|
||||
# XXX: unsure if the participants name as replaced directly in speaker ?
|
||||
# Replace speaker IDs with actual participant names if available
|
||||
text_transcript = []
|
||||
unique_speakers = set()
|
||||
for topic in self.chunks:
|
||||
for segment in topic.transcript.as_segments():
|
||||
name = speakermap.get(segment.speaker, f"Speaker {segment.speaker}")
|
||||
unique_speakers.add((segment.speaker, name))
|
||||
text_transcript.append(f"{name}: {segment.text}")
|
||||
|
||||
self.logger.info(
|
||||
f"Built transcript with {len(unique_speakers)} unique speakers",
|
||||
speakers=list(unique_speakers),
|
||||
)
|
||||
|
||||
text_transcript = "\n".join(text_transcript)
|
||||
|
||||
last_chunk = self.chunks[-1]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from textwrap import dedent
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import AliasChoices, BaseModel, Field
|
||||
|
||||
from reflector.llm import LLM
|
||||
from reflector.processors.base import Processor
|
||||
@@ -36,15 +36,13 @@ class TopicResponse(BaseModel):
|
||||
|
||||
title: str = Field(
|
||||
description="A descriptive title for the topic being discussed",
|
||||
validation_alias="Title",
|
||||
validation_alias=AliasChoices("title", "Title"),
|
||||
)
|
||||
summary: str = Field(
|
||||
description="A concise 1-2 sentence summary of the discussion",
|
||||
validation_alias="Summary",
|
||||
validation_alias=AliasChoices("summary", "Summary"),
|
||||
)
|
||||
|
||||
model_config = {"populate_by_name": True}
|
||||
|
||||
|
||||
class TranscriptTopicDetectorProcessor(Processor):
|
||||
"""
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import io
|
||||
import re
|
||||
import tempfile
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Annotated, TypedDict
|
||||
|
||||
@@ -16,6 +17,17 @@ class DiarizationSegment(TypedDict):
|
||||
|
||||
|
||||
PUNC_RE = re.compile(r"[.;:?!…]")
|
||||
SENTENCE_END_RE = re.compile(r"[.?!…]$")
|
||||
|
||||
# Max segment length for words_to_segments() - breaks on any punctuation (. ; : ? ! …)
|
||||
# when segment exceeds this limit. Used for non-multitrack recordings.
|
||||
MAX_SEGMENT_CHARS = 120
|
||||
|
||||
# Max segment length for words_to_segments_by_sentence() - only breaks on sentence-ending
|
||||
# punctuation (. ? ! …) when segment exceeds this limit. Higher threshold allows complete
|
||||
# sentences in multitrack recordings where speakers overlap.
|
||||
# similar number to server/reflector/processors/transcript_liner.py
|
||||
MAX_SENTENCE_SEGMENT_CHARS = 1000
|
||||
|
||||
|
||||
class AudioFile(BaseModel):
|
||||
@@ -76,7 +88,6 @@ def words_to_segments(words: list[Word]) -> list[TranscriptSegment]:
|
||||
# but separate if the speaker changes, or if the punctuation is a . , ; : ? !
|
||||
segments = []
|
||||
current_segment = None
|
||||
MAX_SEGMENT_LENGTH = 120
|
||||
|
||||
for word in words:
|
||||
if current_segment is None:
|
||||
@@ -106,7 +117,7 @@ def words_to_segments(words: list[Word]) -> list[TranscriptSegment]:
|
||||
current_segment.end = word.end
|
||||
|
||||
have_punc = PUNC_RE.search(word.text)
|
||||
if have_punc and (len(current_segment.text) > MAX_SEGMENT_LENGTH):
|
||||
if have_punc and (len(current_segment.text) > MAX_SEGMENT_CHARS):
|
||||
segments.append(current_segment)
|
||||
current_segment = None
|
||||
|
||||
@@ -116,6 +127,70 @@ def words_to_segments(words: list[Word]) -> list[TranscriptSegment]:
|
||||
return segments
|
||||
|
||||
|
||||
def words_to_segments_by_sentence(words: list[Word]) -> list[TranscriptSegment]:
|
||||
"""Group words by speaker, then split into sentences.
|
||||
|
||||
For multitrack recordings where words from different speakers are interleaved
|
||||
by timestamp, this function first groups all words by speaker, then creates
|
||||
segments based on sentence boundaries within each speaker's words.
|
||||
|
||||
This produces cleaner output than words_to_segments() which breaks on every
|
||||
speaker change, resulting in many tiny segments when speakers overlap.
|
||||
"""
|
||||
if not words:
|
||||
return []
|
||||
|
||||
# Group words by speaker, preserving order within each speaker
|
||||
by_speaker: dict[int, list[Word]] = defaultdict(list)
|
||||
for w in words:
|
||||
by_speaker[w.speaker].append(w)
|
||||
|
||||
segments: list[TranscriptSegment] = []
|
||||
|
||||
for speaker, speaker_words in by_speaker.items():
|
||||
current_text = ""
|
||||
current_start: float | None = None
|
||||
current_end: float = 0.0
|
||||
|
||||
for word in speaker_words:
|
||||
if current_start is None:
|
||||
current_start = word.start
|
||||
|
||||
current_text += word.text
|
||||
current_end = word.end
|
||||
|
||||
# Check for sentence end or max length
|
||||
is_sentence_end = SENTENCE_END_RE.search(word.text.strip())
|
||||
is_too_long = len(current_text) >= MAX_SENTENCE_SEGMENT_CHARS
|
||||
|
||||
if is_sentence_end or is_too_long:
|
||||
segments.append(
|
||||
TranscriptSegment(
|
||||
text=current_text,
|
||||
start=current_start,
|
||||
end=current_end,
|
||||
speaker=speaker,
|
||||
)
|
||||
)
|
||||
current_text = ""
|
||||
current_start = None
|
||||
|
||||
# Flush remaining words for this speaker
|
||||
if current_text and current_start is not None:
|
||||
segments.append(
|
||||
TranscriptSegment(
|
||||
text=current_text,
|
||||
start=current_start,
|
||||
end=current_end,
|
||||
speaker=speaker,
|
||||
)
|
||||
)
|
||||
|
||||
# Sort segments by start time
|
||||
segments.sort(key=lambda s: s.start)
|
||||
return segments
|
||||
|
||||
|
||||
class Transcript(BaseModel):
|
||||
translation: str | None = None
|
||||
words: list[Word] = []
|
||||
@@ -154,7 +229,9 @@ class Transcript(BaseModel):
|
||||
word.start += offset
|
||||
word.end += offset
|
||||
|
||||
def as_segments(self) -> list[TranscriptSegment]:
|
||||
def as_segments(self, is_multitrack: bool = False) -> list[TranscriptSegment]:
|
||||
if is_multitrack:
|
||||
return words_to_segments_by_sentence(self.words)
|
||||
return words_to_segments(self.words)
|
||||
|
||||
|
||||
|
||||
5
server/reflector/schemas/platform.py
Normal file
5
server/reflector/schemas/platform.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from typing import Literal
|
||||
|
||||
Platform = Literal["whereby", "daily"]
|
||||
WHEREBY_PLATFORM: Platform = "whereby"
|
||||
DAILY_PLATFORM: Platform = "daily"
|
||||
17
server/reflector/schemas/transcript_formats.py
Normal file
17
server/reflector/schemas/transcript_formats.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""Schema definitions for transcript format types and segments."""
|
||||
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
TranscriptFormat = Literal["text", "text-timestamped", "webvtt-named", "json"]
|
||||
|
||||
|
||||
class TranscriptSegment(BaseModel):
|
||||
"""A single transcript segment with speaker and timing information."""
|
||||
|
||||
speaker: int
|
||||
speaker_name: str
|
||||
text: str
|
||||
start: float
|
||||
end: float
|
||||
168
server/reflector/services/transcript_process.py
Normal file
168
server/reflector/services/transcript_process.py
Normal file
@@ -0,0 +1,168 @@
|
||||
"""
|
||||
Transcript processing service - shared logic for HTTP endpoints and Celery tasks.
|
||||
|
||||
This module provides result-based error handling that works in both contexts:
|
||||
- HTTP endpoint: converts errors to HTTPException
|
||||
- Celery task: converts errors to Exception
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Literal, Union, assert_never
|
||||
|
||||
import celery
|
||||
from celery.result import AsyncResult
|
||||
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.transcripts import Transcript
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.pipelines.main_multitrack_pipeline import (
|
||||
task_pipeline_multitrack_process,
|
||||
)
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProcessError:
|
||||
detail: NonEmptyString
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileProcessingConfig:
|
||||
transcript_id: NonEmptyString
|
||||
mode: Literal["file"] = "file"
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultitrackProcessingConfig:
|
||||
transcript_id: NonEmptyString
|
||||
bucket_name: NonEmptyString
|
||||
track_keys: list[str]
|
||||
mode: Literal["multitrack"] = "multitrack"
|
||||
|
||||
|
||||
ProcessingConfig = Union[FileProcessingConfig, MultitrackProcessingConfig]
|
||||
PrepareResult = Union[ProcessingConfig, ProcessError]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationOk:
|
||||
# transcript currently doesnt always have recording_id
|
||||
recording_id: NonEmptyString | None
|
||||
transcript_id: NonEmptyString
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationLocked:
|
||||
detail: NonEmptyString
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationNotReady:
|
||||
detail: NonEmptyString
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationAlreadyScheduled:
|
||||
detail: NonEmptyString
|
||||
|
||||
|
||||
ValidationError = Union[
|
||||
ValidationNotReady, ValidationLocked, ValidationAlreadyScheduled
|
||||
]
|
||||
ValidationResult = Union[ValidationOk, ValidationError]
|
||||
|
||||
|
||||
@dataclass
|
||||
class DispatchOk:
|
||||
status: Literal["ok"] = "ok"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DispatchAlreadyRunning:
|
||||
status: Literal["already_running"] = "already_running"
|
||||
|
||||
|
||||
DispatchResult = Union[
|
||||
DispatchOk, DispatchAlreadyRunning, ProcessError, ValidationError
|
||||
]
|
||||
|
||||
|
||||
async def validate_transcript_for_processing(
|
||||
transcript: Transcript,
|
||||
) -> ValidationResult:
|
||||
if transcript.locked:
|
||||
return ValidationLocked(detail="Recording is locked")
|
||||
|
||||
if transcript.status == "idle":
|
||||
return ValidationNotReady(detail="Recording is not ready for processing")
|
||||
|
||||
if task_is_scheduled_or_active(
|
||||
"reflector.pipelines.main_file_pipeline.task_pipeline_file_process",
|
||||
transcript_id=transcript.id,
|
||||
) or task_is_scheduled_or_active(
|
||||
"reflector.pipelines.main_multitrack_pipeline.task_pipeline_multitrack_process",
|
||||
transcript_id=transcript.id,
|
||||
):
|
||||
return ValidationAlreadyScheduled(detail="already running")
|
||||
|
||||
return ValidationOk(
|
||||
recording_id=transcript.recording_id, transcript_id=transcript.id
|
||||
)
|
||||
|
||||
|
||||
async def prepare_transcript_processing(validation: ValidationOk) -> PrepareResult:
|
||||
"""
|
||||
Determine processing mode from transcript/recording data.
|
||||
"""
|
||||
bucket_name: str | None = None
|
||||
track_keys: list[str] | None = None
|
||||
|
||||
if validation.recording_id:
|
||||
recording = await recordings_controller.get_by_id(validation.recording_id)
|
||||
if recording:
|
||||
bucket_name = recording.bucket_name
|
||||
track_keys = recording.track_keys
|
||||
|
||||
if track_keys is not None and len(track_keys) == 0:
|
||||
return ProcessError(
|
||||
detail="No track keys found, must be either > 0 or None",
|
||||
)
|
||||
if track_keys is not None and not bucket_name:
|
||||
return ProcessError(
|
||||
detail="Bucket name must be specified",
|
||||
)
|
||||
|
||||
if track_keys:
|
||||
return MultitrackProcessingConfig(
|
||||
bucket_name=bucket_name, # type: ignore (validated above)
|
||||
track_keys=track_keys,
|
||||
transcript_id=validation.transcript_id,
|
||||
)
|
||||
|
||||
return FileProcessingConfig(
|
||||
transcript_id=validation.transcript_id,
|
||||
)
|
||||
|
||||
|
||||
def dispatch_transcript_processing(config: ProcessingConfig) -> AsyncResult:
|
||||
if isinstance(config, MultitrackProcessingConfig):
|
||||
return task_pipeline_multitrack_process.delay(
|
||||
transcript_id=config.transcript_id,
|
||||
bucket_name=config.bucket_name,
|
||||
track_keys=config.track_keys,
|
||||
)
|
||||
elif isinstance(config, FileProcessingConfig):
|
||||
return task_pipeline_file_process.delay(transcript_id=config.transcript_id)
|
||||
else:
|
||||
assert_never(config)
|
||||
|
||||
|
||||
def task_is_scheduled_or_active(task_name: str, **kwargs):
|
||||
inspect = celery.current_app.control.inspect()
|
||||
|
||||
for worker, tasks in (inspect.scheduled() | inspect.active()).items():
|
||||
for task in tasks:
|
||||
if task["name"] == task_name and task["kwargs"] == kwargs:
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -1,6 +1,7 @@
|
||||
from pydantic.types import PositiveInt
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
@@ -47,14 +48,17 @@ class Settings(BaseSettings):
|
||||
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID: str | None = None
|
||||
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
|
||||
|
||||
# Recording storage
|
||||
RECORDING_STORAGE_BACKEND: str | None = None
|
||||
# Platform-specific recording storage (follows {PREFIX}_STORAGE_AWS_{CREDENTIAL} pattern)
|
||||
# Whereby storage configuration
|
||||
WHEREBY_STORAGE_AWS_BUCKET_NAME: str | None = None
|
||||
WHEREBY_STORAGE_AWS_REGION: str | None = None
|
||||
WHEREBY_STORAGE_AWS_ACCESS_KEY_ID: str | None = None
|
||||
WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
|
||||
|
||||
# Recording storage configuration for AWS
|
||||
RECORDING_STORAGE_AWS_BUCKET_NAME: str = "recording-bucket"
|
||||
RECORDING_STORAGE_AWS_REGION: str = "us-east-1"
|
||||
RECORDING_STORAGE_AWS_ACCESS_KEY_ID: str | None = None
|
||||
RECORDING_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
|
||||
# Daily.co storage configuration
|
||||
DAILYCO_STORAGE_AWS_BUCKET_NAME: str | None = None
|
||||
DAILYCO_STORAGE_AWS_REGION: str | None = None
|
||||
DAILYCO_STORAGE_AWS_ROLE_ARN: str | None = None
|
||||
|
||||
# Translate into the target language
|
||||
TRANSLATION_BACKEND: str = "passthrough"
|
||||
@@ -124,11 +128,27 @@ class Settings(BaseSettings):
|
||||
WHEREBY_API_URL: str = "https://api.whereby.dev/v1"
|
||||
WHEREBY_API_KEY: NonEmptyString | None = None
|
||||
WHEREBY_WEBHOOK_SECRET: str | None = None
|
||||
AWS_WHEREBY_ACCESS_KEY_ID: str | None = None
|
||||
AWS_WHEREBY_ACCESS_KEY_SECRET: str | None = None
|
||||
AWS_PROCESS_RECORDING_QUEUE_URL: str | None = None
|
||||
SQS_POLLING_TIMEOUT_SECONDS: int = 60
|
||||
|
||||
# Daily.co integration
|
||||
DAILY_API_KEY: str | None = None
|
||||
DAILY_WEBHOOK_SECRET: str | None = None
|
||||
DAILY_SUBDOMAIN: str | None = None
|
||||
DAILY_WEBHOOK_UUID: str | None = (
|
||||
None # Webhook UUID for this environment. Not used by production code
|
||||
)
|
||||
|
||||
# Multitrack processing
|
||||
# SKIP_MIXDOWN: When True, skips audio mixdown and waveform generation.
|
||||
# Transcription still works using individual tracks. Useful for:
|
||||
# - Diagnosing OOM issues in mixdown
|
||||
# - Fast processing when audio playback is not needed
|
||||
# Note: UI will have no audio playback or waveform when enabled.
|
||||
SKIP_MIXDOWN: bool = True
|
||||
# Platform Configuration
|
||||
DEFAULT_VIDEO_PLATFORM: Platform = WHEREBY_PLATFORM
|
||||
|
||||
# Zulip integration
|
||||
ZULIP_REALM: str | None = None
|
||||
ZULIP_API_KEY: str | None = None
|
||||
|
||||
@@ -3,6 +3,13 @@ from reflector.settings import settings
|
||||
|
||||
|
||||
def get_transcripts_storage() -> Storage:
|
||||
"""
|
||||
Get storage for processed transcript files (master credentials).
|
||||
|
||||
Also use this for ALL our file operations with bucket override:
|
||||
master = get_transcripts_storage()
|
||||
master.delete_file(key, bucket=recording.bucket_name)
|
||||
"""
|
||||
assert settings.TRANSCRIPT_STORAGE_BACKEND
|
||||
return Storage.get_instance(
|
||||
name=settings.TRANSCRIPT_STORAGE_BACKEND,
|
||||
@@ -10,8 +17,53 @@ def get_transcripts_storage() -> Storage:
|
||||
)
|
||||
|
||||
|
||||
def get_recordings_storage() -> Storage:
|
||||
def get_whereby_storage() -> Storage:
|
||||
"""
|
||||
Get storage config for Whereby (for passing to Whereby API).
|
||||
|
||||
Usage:
|
||||
whereby_storage = get_whereby_storage()
|
||||
key_id, secret = whereby_storage.key_credentials
|
||||
whereby_api.create_meeting(
|
||||
bucket=whereby_storage.bucket_name,
|
||||
access_key_id=key_id,
|
||||
secret=secret,
|
||||
)
|
||||
|
||||
Do NOT use for our file operations - use get_transcripts_storage() instead.
|
||||
"""
|
||||
if not settings.WHEREBY_STORAGE_AWS_BUCKET_NAME:
|
||||
raise ValueError(
|
||||
"WHEREBY_STORAGE_AWS_BUCKET_NAME required for Whereby with AWS storage"
|
||||
)
|
||||
|
||||
return Storage.get_instance(
|
||||
name=settings.RECORDING_STORAGE_BACKEND,
|
||||
settings_prefix="RECORDING_STORAGE_",
|
||||
name="aws",
|
||||
settings_prefix="WHEREBY_STORAGE_",
|
||||
)
|
||||
|
||||
|
||||
def get_dailyco_storage() -> Storage:
|
||||
"""
|
||||
Get storage config for Daily.co (for passing to Daily API).
|
||||
|
||||
Usage:
|
||||
daily_storage = get_dailyco_storage()
|
||||
daily_api.create_meeting(
|
||||
bucket=daily_storage.bucket_name,
|
||||
region=daily_storage.region,
|
||||
role_arn=daily_storage.role_credential,
|
||||
)
|
||||
|
||||
Do NOT use for our file operations - use get_transcripts_storage() instead.
|
||||
"""
|
||||
# Fail fast if platform-specific config missing
|
||||
if not settings.DAILYCO_STORAGE_AWS_BUCKET_NAME:
|
||||
raise ValueError(
|
||||
"DAILYCO_STORAGE_AWS_BUCKET_NAME required for Daily.co with AWS storage"
|
||||
)
|
||||
|
||||
return Storage.get_instance(
|
||||
name="aws",
|
||||
settings_prefix="DAILYCO_STORAGE_",
|
||||
)
|
||||
|
||||
@@ -1,10 +1,23 @@
|
||||
import importlib
|
||||
from typing import BinaryIO, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class StorageError(Exception):
|
||||
"""Base exception for storage operations."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class StoragePermissionError(StorageError):
|
||||
"""Exception raised when storage operation fails due to permission issues."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class FileResult(BaseModel):
|
||||
filename: str
|
||||
url: str
|
||||
@@ -36,26 +49,113 @@ class Storage:
|
||||
|
||||
return cls._registry[name](**config)
|
||||
|
||||
async def put_file(self, filename: str, data: bytes) -> FileResult:
|
||||
return await self._put_file(filename, data)
|
||||
|
||||
async def _put_file(self, filename: str, data: bytes) -> FileResult:
|
||||
# Credential properties for API passthrough
|
||||
@property
|
||||
def bucket_name(self) -> str:
|
||||
"""Default bucket name for this storage instance."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def delete_file(self, filename: str):
|
||||
return await self._delete_file(filename)
|
||||
|
||||
async def _delete_file(self, filename: str):
|
||||
@property
|
||||
def region(self) -> str:
|
||||
"""AWS region for this storage instance."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def get_file_url(self, filename: str) -> str:
|
||||
return await self._get_file_url(filename)
|
||||
@property
|
||||
def access_key_id(self) -> str | None:
|
||||
"""AWS access key ID (None for role-based auth). Prefer key_credentials property."""
|
||||
return None
|
||||
|
||||
async def _get_file_url(self, filename: str) -> str:
|
||||
@property
|
||||
def secret_access_key(self) -> str | None:
|
||||
"""AWS secret access key (None for role-based auth). Prefer key_credentials property."""
|
||||
return None
|
||||
|
||||
@property
|
||||
def role_arn(self) -> str | None:
|
||||
"""AWS IAM role ARN for role-based auth (None for key-based auth). Prefer role_credential property."""
|
||||
return None
|
||||
|
||||
@property
|
||||
def key_credentials(self) -> tuple[str, str]:
|
||||
"""
|
||||
Get (access_key_id, secret_access_key) for key-based auth.
|
||||
Raises ValueError if storage uses IAM role instead.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def get_file(self, filename: str):
|
||||
return await self._get_file(filename)
|
||||
|
||||
async def _get_file(self, filename: str):
|
||||
@property
|
||||
def role_credential(self) -> str:
|
||||
"""
|
||||
Get IAM role ARN for role-based auth.
|
||||
Raises ValueError if storage uses access keys instead.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def put_file(
|
||||
self, filename: str, data: Union[bytes, BinaryIO], *, bucket: str | None = None
|
||||
) -> FileResult:
|
||||
"""Upload data. bucket: override instance default if provided."""
|
||||
return await self._put_file(filename, data, bucket=bucket)
|
||||
|
||||
async def _put_file(
|
||||
self, filename: str, data: Union[bytes, BinaryIO], *, bucket: str | None = None
|
||||
) -> FileResult:
|
||||
raise NotImplementedError
|
||||
|
||||
async def delete_file(self, filename: str, *, bucket: str | None = None):
|
||||
"""Delete file. bucket: override instance default if provided."""
|
||||
return await self._delete_file(filename, bucket=bucket)
|
||||
|
||||
async def _delete_file(self, filename: str, *, bucket: str | None = None):
|
||||
raise NotImplementedError
|
||||
|
||||
async def get_file_url(
|
||||
self,
|
||||
filename: str,
|
||||
operation: str = "get_object",
|
||||
expires_in: int = 3600,
|
||||
*,
|
||||
bucket: str | None = None,
|
||||
) -> str:
|
||||
"""Generate presigned URL. bucket: override instance default if provided."""
|
||||
return await self._get_file_url(filename, operation, expires_in, bucket=bucket)
|
||||
|
||||
async def _get_file_url(
|
||||
self,
|
||||
filename: str,
|
||||
operation: str = "get_object",
|
||||
expires_in: int = 3600,
|
||||
*,
|
||||
bucket: str | None = None,
|
||||
) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
async def get_file(self, filename: str, *, bucket: str | None = None):
|
||||
"""Download file. bucket: override instance default if provided."""
|
||||
return await self._get_file(filename, bucket=bucket)
|
||||
|
||||
async def _get_file(self, filename: str, *, bucket: str | None = None):
|
||||
raise NotImplementedError
|
||||
|
||||
async def list_objects(
|
||||
self, prefix: str = "", *, bucket: str | None = None
|
||||
) -> list[str]:
|
||||
"""List object keys. bucket: override instance default if provided."""
|
||||
return await self._list_objects(prefix, bucket=bucket)
|
||||
|
||||
async def _list_objects(
|
||||
self, prefix: str = "", *, bucket: str | None = None
|
||||
) -> list[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
async def stream_to_fileobj(
|
||||
self, filename: str, fileobj: BinaryIO, *, bucket: str | None = None
|
||||
):
|
||||
"""Stream file directly to file object without loading into memory.
|
||||
bucket: override instance default if provided."""
|
||||
return await self._stream_to_fileobj(filename, fileobj, bucket=bucket)
|
||||
|
||||
async def _stream_to_fileobj(
|
||||
self, filename: str, fileobj: BinaryIO, *, bucket: str | None = None
|
||||
):
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -1,79 +1,236 @@
|
||||
from functools import wraps
|
||||
from typing import BinaryIO, Union
|
||||
|
||||
import aioboto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.storage.base import FileResult, Storage
|
||||
from reflector.storage.base import FileResult, Storage, StoragePermissionError
|
||||
|
||||
|
||||
def handle_s3_client_errors(operation_name: str):
|
||||
"""Decorator to handle S3 ClientError with bucket-aware messaging.
|
||||
|
||||
Args:
|
||||
operation_name: Human-readable operation name for error messages (e.g., "upload", "delete")
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
async def wrapper(self, *args, **kwargs):
|
||||
bucket = kwargs.get("bucket")
|
||||
try:
|
||||
return await func(self, *args, **kwargs)
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code")
|
||||
if error_code in ("AccessDenied", "NoSuchBucket"):
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
bucket_context = (
|
||||
f"overridden bucket '{actual_bucket}'"
|
||||
if bucket
|
||||
else f"default bucket '{actual_bucket}'"
|
||||
)
|
||||
raise StoragePermissionError(
|
||||
f"S3 {operation_name} failed for {bucket_context}: {error_code}. "
|
||||
f"Check TRANSCRIPT_STORAGE_AWS_* credentials have permission."
|
||||
) from e
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class AwsStorage(Storage):
|
||||
"""AWS S3 storage with bucket override for multi-platform recording architecture.
|
||||
Master credentials access all buckets via optional bucket parameter in operations."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
aws_access_key_id: str,
|
||||
aws_secret_access_key: str,
|
||||
aws_bucket_name: str,
|
||||
aws_region: str,
|
||||
aws_access_key_id: str | None = None,
|
||||
aws_secret_access_key: str | None = None,
|
||||
aws_role_arn: str | None = None,
|
||||
):
|
||||
if not aws_access_key_id:
|
||||
raise ValueError("Storage `aws_storage` require `aws_access_key_id`")
|
||||
if not aws_secret_access_key:
|
||||
raise ValueError("Storage `aws_storage` require `aws_secret_access_key`")
|
||||
if not aws_bucket_name:
|
||||
raise ValueError("Storage `aws_storage` require `aws_bucket_name`")
|
||||
if not aws_region:
|
||||
raise ValueError("Storage `aws_storage` require `aws_region`")
|
||||
if not aws_access_key_id and not aws_role_arn:
|
||||
raise ValueError(
|
||||
"Storage `aws_storage` require either `aws_access_key_id` or `aws_role_arn`"
|
||||
)
|
||||
if aws_role_arn and (aws_access_key_id or aws_secret_access_key):
|
||||
raise ValueError(
|
||||
"Storage `aws_storage` cannot use both `aws_role_arn` and access keys"
|
||||
)
|
||||
|
||||
super().__init__()
|
||||
self.aws_bucket_name = aws_bucket_name
|
||||
self._bucket_name = aws_bucket_name
|
||||
self._region = aws_region
|
||||
self._access_key_id = aws_access_key_id
|
||||
self._secret_access_key = aws_secret_access_key
|
||||
self._role_arn = aws_role_arn
|
||||
|
||||
self.aws_folder = ""
|
||||
if "/" in aws_bucket_name:
|
||||
self.aws_bucket_name, self.aws_folder = aws_bucket_name.split("/", 1)
|
||||
self._bucket_name, self.aws_folder = aws_bucket_name.split("/", 1)
|
||||
self.boto_config = Config(retries={"max_attempts": 3, "mode": "adaptive"})
|
||||
self.session = aioboto3.Session(
|
||||
aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
region_name=aws_region,
|
||||
)
|
||||
self.base_url = f"https://{aws_bucket_name}.s3.amazonaws.com/"
|
||||
self.base_url = f"https://{self._bucket_name}.s3.amazonaws.com/"
|
||||
|
||||
async def _put_file(self, filename: str, data: bytes) -> FileResult:
|
||||
bucket = self.aws_bucket_name
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Uploading {filename} to S3 {bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3") as client:
|
||||
await client.put_object(
|
||||
Bucket=bucket,
|
||||
Key=s3filename,
|
||||
Body=data,
|
||||
# Implement credential properties
|
||||
@property
|
||||
def bucket_name(self) -> str:
|
||||
return self._bucket_name
|
||||
|
||||
@property
|
||||
def region(self) -> str:
|
||||
return self._region
|
||||
|
||||
@property
|
||||
def access_key_id(self) -> str | None:
|
||||
return self._access_key_id
|
||||
|
||||
@property
|
||||
def secret_access_key(self) -> str | None:
|
||||
return self._secret_access_key
|
||||
|
||||
@property
|
||||
def role_arn(self) -> str | None:
|
||||
return self._role_arn
|
||||
|
||||
@property
|
||||
def key_credentials(self) -> tuple[str, str]:
|
||||
"""Get (access_key_id, secret_access_key) for key-based auth."""
|
||||
if self._role_arn:
|
||||
raise ValueError(
|
||||
"Storage uses IAM role authentication. "
|
||||
"Use role_credential property instead of key_credentials."
|
||||
)
|
||||
if not self._access_key_id or not self._secret_access_key:
|
||||
raise ValueError("Storage access key credentials not configured")
|
||||
return (self._access_key_id, self._secret_access_key)
|
||||
|
||||
async def _get_file_url(self, filename: str) -> FileResult:
|
||||
bucket = self.aws_bucket_name
|
||||
@property
|
||||
def role_credential(self) -> str:
|
||||
"""Get IAM role ARN for role-based auth."""
|
||||
if self._access_key_id or self._secret_access_key:
|
||||
raise ValueError(
|
||||
"Storage uses access key authentication. "
|
||||
"Use key_credentials property instead of role_credential."
|
||||
)
|
||||
if not self._role_arn:
|
||||
raise ValueError("Storage IAM role ARN not configured")
|
||||
return self._role_arn
|
||||
|
||||
@handle_s3_client_errors("upload")
|
||||
async def _put_file(
|
||||
self, filename: str, data: Union[bytes, BinaryIO], *, bucket: str | None = None
|
||||
) -> FileResult:
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3") as client:
|
||||
logger.info(f"Uploading {filename} to S3 {actual_bucket}/{folder}")
|
||||
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
if isinstance(data, bytes):
|
||||
await client.put_object(Bucket=actual_bucket, Key=s3filename, Body=data)
|
||||
else:
|
||||
# boto3 reads file-like object in chunks
|
||||
# avoids creating extra memory copy vs bytes.getvalue() approach
|
||||
await client.upload_fileobj(data, Bucket=actual_bucket, Key=s3filename)
|
||||
|
||||
url = await self._get_file_url(filename, bucket=bucket)
|
||||
return FileResult(filename=filename, url=url)
|
||||
|
||||
@handle_s3_client_errors("presign")
|
||||
async def _get_file_url(
|
||||
self,
|
||||
filename: str,
|
||||
operation: str = "get_object",
|
||||
expires_in: int = 3600,
|
||||
*,
|
||||
bucket: str | None = None,
|
||||
) -> str:
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
presigned_url = await client.generate_presigned_url(
|
||||
"get_object",
|
||||
Params={"Bucket": bucket, "Key": s3filename},
|
||||
ExpiresIn=3600,
|
||||
operation,
|
||||
Params={"Bucket": actual_bucket, "Key": s3filename},
|
||||
ExpiresIn=expires_in,
|
||||
)
|
||||
|
||||
return presigned_url
|
||||
|
||||
async def _delete_file(self, filename: str):
|
||||
bucket = self.aws_bucket_name
|
||||
@handle_s3_client_errors("delete")
|
||||
async def _delete_file(self, filename: str, *, bucket: str | None = None):
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Deleting {filename} from S3 {bucket}/{folder}")
|
||||
logger.info(f"Deleting {filename} from S3 {actual_bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3") as client:
|
||||
await client.delete_object(Bucket=bucket, Key=s3filename)
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
await client.delete_object(Bucket=actual_bucket, Key=s3filename)
|
||||
|
||||
async def _get_file(self, filename: str):
|
||||
bucket = self.aws_bucket_name
|
||||
@handle_s3_client_errors("download")
|
||||
async def _get_file(self, filename: str, *, bucket: str | None = None):
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Downloading {filename} from S3 {bucket}/{folder}")
|
||||
logger.info(f"Downloading {filename} from S3 {actual_bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3") as client:
|
||||
response = await client.get_object(Bucket=bucket, Key=s3filename)
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
response = await client.get_object(Bucket=actual_bucket, Key=s3filename)
|
||||
return await response["Body"].read()
|
||||
|
||||
@handle_s3_client_errors("list_objects")
|
||||
async def _list_objects(
|
||||
self, prefix: str = "", *, bucket: str | None = None
|
||||
) -> list[str]:
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
# Combine folder and prefix
|
||||
s3prefix = f"{folder}/{prefix}" if folder else prefix
|
||||
logger.info(f"Listing objects from S3 {actual_bucket} with prefix '{s3prefix}'")
|
||||
|
||||
keys = []
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
async for page in paginator.paginate(Bucket=actual_bucket, Prefix=s3prefix):
|
||||
if "Contents" in page:
|
||||
for obj in page["Contents"]:
|
||||
# Strip folder prefix from keys if present
|
||||
key = obj["Key"]
|
||||
if folder:
|
||||
if key.startswith(f"{folder}/"):
|
||||
key = key[len(folder) + 1 :]
|
||||
elif key == folder:
|
||||
# Skip folder marker itself
|
||||
continue
|
||||
keys.append(key)
|
||||
|
||||
return keys
|
||||
|
||||
@handle_s3_client_errors("stream")
|
||||
async def _stream_to_fileobj(
|
||||
self, filename: str, fileobj: BinaryIO, *, bucket: str | None = None
|
||||
):
|
||||
"""Stream file from S3 directly to file object without loading into memory."""
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Streaming {filename} from S3 {actual_bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
await client.download_fileobj(
|
||||
Bucket=actual_bucket, Key=s3filename, Fileobj=fileobj
|
||||
)
|
||||
|
||||
|
||||
Storage.register("aws", AwsStorage)
|
||||
|
||||
347
server/reflector/tools/cli_multitrack.py
Normal file
347
server/reflector/tools/cli_multitrack.py
Normal file
@@ -0,0 +1,347 @@
|
||||
import asyncio
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Optional, Protocol
|
||||
|
||||
import structlog
|
||||
from celery.result import AsyncResult
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.db.transcripts import SourceKind, Transcript, transcripts_controller
|
||||
from reflector.pipelines.main_multitrack_pipeline import (
|
||||
task_pipeline_multitrack_process,
|
||||
)
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.tools.process import (
|
||||
extract_result_from_entry,
|
||||
parse_s3_url,
|
||||
validate_s3_objects,
|
||||
)
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
DEFAULT_PROCESSING_TIMEOUT_SECONDS = 3600
|
||||
|
||||
MAX_ERROR_MESSAGE_LENGTH = 500
|
||||
|
||||
TASK_POLL_INTERVAL_SECONDS = 2
|
||||
|
||||
|
||||
class StatusCallback(Protocol):
|
||||
def __call__(self, state: str, elapsed_seconds: int) -> None: ...
|
||||
|
||||
|
||||
@dataclass
|
||||
class MultitrackTaskResult:
|
||||
success: bool
|
||||
transcript_id: str
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
async def create_multitrack_transcript(
|
||||
bucket_name: str,
|
||||
track_keys: List[str],
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
user_id: Optional[str] = None,
|
||||
) -> Transcript:
|
||||
num_tracks = len(track_keys)
|
||||
track_word = "track" if num_tracks == 1 else "tracks"
|
||||
transcript_name = f"Multitrack ({num_tracks} {track_word})"
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
transcript_name,
|
||||
source_kind=SourceKind.FILE,
|
||||
source_language=source_language,
|
||||
target_language=target_language,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Created multitrack transcript",
|
||||
transcript_id=transcript.id,
|
||||
name=transcript_name,
|
||||
bucket=bucket_name,
|
||||
num_tracks=len(track_keys),
|
||||
)
|
||||
|
||||
return transcript
|
||||
|
||||
|
||||
def submit_multitrack_task(
|
||||
transcript_id: str, bucket_name: str, track_keys: List[str]
|
||||
) -> AsyncResult:
|
||||
result = task_pipeline_multitrack_process.delay(
|
||||
transcript_id=transcript_id,
|
||||
bucket_name=bucket_name,
|
||||
track_keys=track_keys,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Multitrack task submitted",
|
||||
transcript_id=transcript_id,
|
||||
task_id=result.id,
|
||||
bucket=bucket_name,
|
||||
num_tracks=len(track_keys),
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def wait_for_task(
|
||||
result: AsyncResult,
|
||||
transcript_id: str,
|
||||
timeout_seconds: int = DEFAULT_PROCESSING_TIMEOUT_SECONDS,
|
||||
poll_interval: int = TASK_POLL_INTERVAL_SECONDS,
|
||||
status_callback: Optional[StatusCallback] = None,
|
||||
) -> MultitrackTaskResult:
|
||||
start_time = time.time()
|
||||
last_status = None
|
||||
|
||||
while not result.ready():
|
||||
elapsed = time.time() - start_time
|
||||
if elapsed > timeout_seconds:
|
||||
error_msg = (
|
||||
f"Task {result.id} did not complete within {timeout_seconds}s "
|
||||
f"for transcript {transcript_id}"
|
||||
)
|
||||
logger.error(
|
||||
"Task timeout",
|
||||
task_id=result.id,
|
||||
transcript_id=transcript_id,
|
||||
elapsed_seconds=elapsed,
|
||||
)
|
||||
raise TimeoutError(error_msg)
|
||||
|
||||
if result.state != last_status:
|
||||
if status_callback:
|
||||
status_callback(result.state, int(elapsed))
|
||||
last_status = result.state
|
||||
|
||||
await asyncio.sleep(poll_interval)
|
||||
|
||||
if result.failed():
|
||||
error_info = result.info
|
||||
traceback_info = getattr(result, "traceback", None)
|
||||
|
||||
logger.error(
|
||||
"Multitrack task failed",
|
||||
transcript_id=transcript_id,
|
||||
task_id=result.id,
|
||||
error=str(error_info),
|
||||
has_traceback=bool(traceback_info),
|
||||
)
|
||||
|
||||
error_detail = str(error_info)
|
||||
if traceback_info:
|
||||
error_detail += f"\nTraceback:\n{traceback_info}"
|
||||
|
||||
return MultitrackTaskResult(
|
||||
success=False, transcript_id=transcript_id, error=error_detail
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Multitrack task completed",
|
||||
transcript_id=transcript_id,
|
||||
task_id=result.id,
|
||||
state=result.state,
|
||||
)
|
||||
|
||||
return MultitrackTaskResult(success=True, transcript_id=transcript_id)
|
||||
|
||||
|
||||
async def update_transcript_status(
|
||||
transcript_id: str,
|
||||
status: str,
|
||||
error: Optional[str] = None,
|
||||
max_error_length: int = MAX_ERROR_MESSAGE_LENGTH,
|
||||
) -> None:
|
||||
database = get_database()
|
||||
connected = False
|
||||
|
||||
try:
|
||||
await database.connect()
|
||||
connected = True
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if transcript:
|
||||
update_data: Dict[str, Any] = {"status": status}
|
||||
|
||||
if error:
|
||||
if len(error) > max_error_length:
|
||||
error = error[: max_error_length - 3] + "..."
|
||||
update_data["error"] = error
|
||||
|
||||
await transcripts_controller.update(transcript, update_data)
|
||||
|
||||
logger.info(
|
||||
"Updated transcript status",
|
||||
transcript_id=transcript_id,
|
||||
status=status,
|
||||
has_error=bool(error),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Failed to update transcript status",
|
||||
transcript_id=transcript_id,
|
||||
error=str(e),
|
||||
)
|
||||
finally:
|
||||
if connected:
|
||||
try:
|
||||
await database.disconnect()
|
||||
except Exception as e:
|
||||
logger.warning(f"Database disconnect failed: {e}")
|
||||
|
||||
|
||||
async def process_multitrack(
|
||||
bucket_name: str,
|
||||
track_keys: List[str],
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
user_id: Optional[str] = None,
|
||||
timeout_seconds: int = DEFAULT_PROCESSING_TIMEOUT_SECONDS,
|
||||
status_callback: Optional[StatusCallback] = None,
|
||||
) -> MultitrackTaskResult:
|
||||
"""High-level orchestration for multitrack processing."""
|
||||
database = get_database()
|
||||
transcript = None
|
||||
connected = False
|
||||
|
||||
try:
|
||||
await database.connect()
|
||||
connected = True
|
||||
|
||||
transcript = await create_multitrack_transcript(
|
||||
bucket_name=bucket_name,
|
||||
track_keys=track_keys,
|
||||
source_language=source_language,
|
||||
target_language=target_language,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
result = submit_multitrack_task(
|
||||
transcript_id=transcript.id, bucket_name=bucket_name, track_keys=track_keys
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if transcript:
|
||||
try:
|
||||
await update_transcript_status(
|
||||
transcript_id=transcript.id, status="failed", error=str(e)
|
||||
)
|
||||
except Exception as update_error:
|
||||
logger.error(
|
||||
"Failed to update transcript status after error",
|
||||
original_error=str(e),
|
||||
update_error=str(update_error),
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
if connected:
|
||||
try:
|
||||
await database.disconnect()
|
||||
except Exception as e:
|
||||
logger.warning(f"Database disconnect failed: {e}")
|
||||
|
||||
# Poll outside database connection
|
||||
task_result = await wait_for_task(
|
||||
result=result,
|
||||
transcript_id=transcript.id,
|
||||
timeout_seconds=timeout_seconds,
|
||||
poll_interval=2,
|
||||
status_callback=status_callback,
|
||||
)
|
||||
|
||||
if not task_result.success:
|
||||
await update_transcript_status(
|
||||
transcript_id=transcript.id, status="failed", error=task_result.error
|
||||
)
|
||||
|
||||
return task_result
|
||||
|
||||
|
||||
def print_progress(message: str) -> None:
|
||||
"""Print progress message to stderr for CLI visibility."""
|
||||
print(f"{message}", file=sys.stderr)
|
||||
|
||||
|
||||
def create_status_callback() -> StatusCallback:
|
||||
"""Create callback for task status updates during polling."""
|
||||
|
||||
def callback(state: str, elapsed_seconds: int) -> None:
|
||||
print_progress(
|
||||
f"Multitrack pipeline status: {state} (elapsed: {elapsed_seconds}s)"
|
||||
)
|
||||
|
||||
return callback
|
||||
|
||||
|
||||
async def process_multitrack_cli(
|
||||
s3_urls: List[str],
|
||||
source_language: str,
|
||||
target_language: str,
|
||||
output_path: Optional[str] = None,
|
||||
) -> None:
|
||||
if not s3_urls:
|
||||
raise ValueError("At least one track required for multitrack processing")
|
||||
|
||||
bucket_keys = []
|
||||
for url in s3_urls:
|
||||
try:
|
||||
bucket, key = parse_s3_url(url)
|
||||
bucket_keys.append((bucket, key))
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid S3 URL '{url}': {e}") from e
|
||||
|
||||
buckets = set(bucket for bucket, _ in bucket_keys)
|
||||
if len(buckets) > 1:
|
||||
raise ValueError(
|
||||
f"All tracks must be in the same S3 bucket. "
|
||||
f"Found {len(buckets)} different buckets: {sorted(buckets)}. "
|
||||
f"Please upload all files to a single bucket."
|
||||
)
|
||||
|
||||
primary_bucket = bucket_keys[0][0]
|
||||
track_keys = [key for _, key in bucket_keys]
|
||||
|
||||
print_progress(
|
||||
f"Starting multitrack CLI processing: "
|
||||
f"bucket={primary_bucket}, num_tracks={len(track_keys)}, "
|
||||
f"source_language={source_language}, target_language={target_language}"
|
||||
)
|
||||
|
||||
storage = get_transcripts_storage()
|
||||
await validate_s3_objects(storage, bucket_keys)
|
||||
print_progress(f"S3 validation complete: {len(bucket_keys)} objects verified")
|
||||
|
||||
result = await process_multitrack(
|
||||
bucket_name=primary_bucket,
|
||||
track_keys=track_keys,
|
||||
source_language=source_language,
|
||||
target_language=target_language,
|
||||
user_id=None,
|
||||
timeout_seconds=3600,
|
||||
status_callback=create_status_callback(),
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
error_msg = (
|
||||
f"Multitrack pipeline failed for transcript {result.transcript_id}\n"
|
||||
)
|
||||
if result.error:
|
||||
error_msg += f"Error: {result.error}\n"
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
print_progress(
|
||||
f"Multitrack processing complete for transcript {result.transcript_id}"
|
||||
)
|
||||
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
await extract_result_from_entry(result.transcript_id, output_path)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
@@ -9,7 +9,10 @@ import shutil
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Literal
|
||||
from typing import Any, Dict, List, Literal, Tuple
|
||||
from urllib.parse import unquote, urlparse
|
||||
|
||||
from botocore.exceptions import BotoCoreError, ClientError, NoCredentialsError
|
||||
|
||||
from reflector.db.transcripts import SourceKind, TranscriptTopic, transcripts_controller
|
||||
from reflector.logger import logger
|
||||
@@ -20,10 +23,119 @@ from reflector.pipelines.main_live_pipeline import pipeline_post as live_pipelin
|
||||
from reflector.pipelines.main_live_pipeline import (
|
||||
pipeline_process as live_pipeline_process,
|
||||
)
|
||||
from reflector.storage import Storage
|
||||
|
||||
|
||||
def validate_s3_bucket_name(bucket: str) -> None:
|
||||
if not bucket:
|
||||
raise ValueError("Bucket name cannot be empty")
|
||||
if len(bucket) > 255: # Absolute max for any region
|
||||
raise ValueError(f"Bucket name too long: {len(bucket)} characters (max 255)")
|
||||
|
||||
|
||||
def validate_s3_key(key: str) -> None:
|
||||
if not key:
|
||||
raise ValueError("S3 key cannot be empty")
|
||||
if len(key) > 1024:
|
||||
raise ValueError(f"S3 key too long: {len(key)} characters (max 1024)")
|
||||
|
||||
|
||||
def parse_s3_url(url: str) -> Tuple[str, str]:
|
||||
parsed = urlparse(url)
|
||||
|
||||
if parsed.scheme == "s3":
|
||||
bucket = parsed.netloc
|
||||
key = parsed.path.lstrip("/")
|
||||
if parsed.fragment:
|
||||
logger.debug(
|
||||
"URL fragment ignored (not part of S3 key)",
|
||||
url=url,
|
||||
fragment=parsed.fragment,
|
||||
)
|
||||
if not bucket or not key:
|
||||
raise ValueError(f"Invalid S3 URL: {url} (missing bucket or key)")
|
||||
bucket = unquote(bucket)
|
||||
key = unquote(key)
|
||||
validate_s3_bucket_name(bucket)
|
||||
validate_s3_key(key)
|
||||
return bucket, key
|
||||
|
||||
elif parsed.scheme in ("http", "https"):
|
||||
if ".s3." in parsed.netloc or parsed.netloc.endswith(".s3.amazonaws.com"):
|
||||
bucket = parsed.netloc.split(".")[0]
|
||||
key = parsed.path.lstrip("/")
|
||||
if parsed.fragment:
|
||||
logger.debug("URL fragment ignored", url=url, fragment=parsed.fragment)
|
||||
if not bucket or not key:
|
||||
raise ValueError(f"Invalid S3 URL: {url} (missing bucket or key)")
|
||||
bucket = unquote(bucket)
|
||||
key = unquote(key)
|
||||
validate_s3_bucket_name(bucket)
|
||||
validate_s3_key(key)
|
||||
return bucket, key
|
||||
|
||||
elif parsed.netloc.startswith("s3.") and "amazonaws.com" in parsed.netloc:
|
||||
path_parts = parsed.path.lstrip("/").split("/", 1)
|
||||
if len(path_parts) != 2:
|
||||
raise ValueError(f"Invalid S3 URL: {url} (missing bucket or key)")
|
||||
bucket, key = path_parts
|
||||
if parsed.fragment:
|
||||
logger.debug("URL fragment ignored", url=url, fragment=parsed.fragment)
|
||||
bucket = unquote(bucket)
|
||||
key = unquote(key)
|
||||
validate_s3_bucket_name(bucket)
|
||||
validate_s3_key(key)
|
||||
return bucket, key
|
||||
|
||||
else:
|
||||
raise ValueError(f"Invalid S3 URL format: {url} (not recognized as S3 URL)")
|
||||
|
||||
else:
|
||||
raise ValueError(f"Invalid S3 URL scheme: {url} (must be s3:// or https://)")
|
||||
|
||||
|
||||
async def validate_s3_objects(
|
||||
storage: Storage, bucket_keys: List[Tuple[str, str]]
|
||||
) -> None:
|
||||
async with storage.session.client("s3") as client:
|
||||
|
||||
async def check_object(bucket: str, key: str) -> None:
|
||||
try:
|
||||
await client.head_object(Bucket=bucket, Key=key)
|
||||
except ClientError as e:
|
||||
error_code = e.response["Error"]["Code"]
|
||||
if error_code in ("404", "NoSuchKey"):
|
||||
raise ValueError(f"S3 object not found: s3://{bucket}/{key}") from e
|
||||
elif error_code in ("403", "Forbidden", "AccessDenied"):
|
||||
raise ValueError(
|
||||
f"Access denied for S3 object: s3://{bucket}/{key}. "
|
||||
f"Check AWS credentials and permissions"
|
||||
) from e
|
||||
else:
|
||||
raise ValueError(
|
||||
f"S3 error {error_code} for s3://{bucket}/{key}: "
|
||||
f"{e.response['Error'].get('Message', 'Unknown error')}"
|
||||
) from e
|
||||
except NoCredentialsError as e:
|
||||
raise ValueError(
|
||||
"AWS credentials not configured. Set AWS_ACCESS_KEY_ID and "
|
||||
"AWS_SECRET_ACCESS_KEY environment variables"
|
||||
) from e
|
||||
except BotoCoreError as e:
|
||||
raise ValueError(
|
||||
f"AWS service error for s3://{bucket}/{key}: {str(e)}"
|
||||
) from e
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Unexpected error validating s3://{bucket}/{key}: {str(e)}"
|
||||
) from e
|
||||
|
||||
await asyncio.gather(
|
||||
*(check_object(bucket, key) for bucket, key in bucket_keys)
|
||||
)
|
||||
|
||||
|
||||
def serialize_topics(topics: List[TranscriptTopic]) -> List[Dict[str, Any]]:
|
||||
"""Convert TranscriptTopic objects to JSON-serializable dicts"""
|
||||
serialized = []
|
||||
for topic in topics:
|
||||
topic_dict = topic.model_dump()
|
||||
@@ -32,7 +144,6 @@ def serialize_topics(topics: List[TranscriptTopic]) -> List[Dict[str, Any]]:
|
||||
|
||||
|
||||
def debug_print_speakers(serialized_topics: List[Dict[str, Any]]) -> None:
|
||||
"""Print debug info about speakers found in topics"""
|
||||
all_speakers = set()
|
||||
for topic_dict in serialized_topics:
|
||||
for word in topic_dict.get("words", []):
|
||||
@@ -47,8 +158,6 @@ def debug_print_speakers(serialized_topics: List[Dict[str, Any]]) -> None:
|
||||
TranscriptId = str
|
||||
|
||||
|
||||
# common interface for every flow: it needs an Entry in db with specific ceremony (file path + status + actual file in file system)
|
||||
# ideally we want to get rid of it at some point
|
||||
async def prepare_entry(
|
||||
source_path: str,
|
||||
source_language: str,
|
||||
@@ -65,9 +174,7 @@ async def prepare_entry(
|
||||
user_id=None,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Created empty transcript {transcript.id} for file {file_path.name} because technically we need an empty transcript before we start transcript"
|
||||
)
|
||||
logger.info(f"Created transcript {transcript.id} for {file_path.name}")
|
||||
|
||||
# pipelines expect files as upload.*
|
||||
|
||||
@@ -83,7 +190,6 @@ async def prepare_entry(
|
||||
return transcript.id
|
||||
|
||||
|
||||
# same reason as prepare_entry
|
||||
async def extract_result_from_entry(
|
||||
transcript_id: TranscriptId, output_path: str
|
||||
) -> None:
|
||||
@@ -193,13 +299,20 @@ if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Process audio files with speaker diarization"
|
||||
)
|
||||
parser.add_argument("source", help="Source file (mp3, wav, mp4...)")
|
||||
parser.add_argument(
|
||||
"source",
|
||||
help="Source file (mp3, wav, mp4...) or comma-separated S3 URLs with --multitrack",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pipeline",
|
||||
required=True,
|
||||
choices=["live", "file"],
|
||||
help="Pipeline type to use for processing (live: streaming/incremental, file: batch/parallel)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--multitrack",
|
||||
action="store_true",
|
||||
help="Process multiple audio tracks from comma-separated S3 URLs",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--source-language", default="en", help="Source language code (default: en)"
|
||||
)
|
||||
@@ -209,12 +322,40 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--output", "-o", help="Output file (output.jsonl)")
|
||||
args = parser.parse_args()
|
||||
|
||||
asyncio.run(
|
||||
process(
|
||||
args.source,
|
||||
args.source_language,
|
||||
args.target_language,
|
||||
args.pipeline,
|
||||
args.output,
|
||||
if args.multitrack:
|
||||
if not args.source:
|
||||
parser.error("Source URLs required for multitrack processing")
|
||||
|
||||
s3_urls = [url.strip() for url in args.source.split(",") if url.strip()]
|
||||
|
||||
if not s3_urls:
|
||||
parser.error("At least one S3 URL required for multitrack processing")
|
||||
|
||||
from reflector.tools.cli_multitrack import process_multitrack_cli
|
||||
|
||||
asyncio.run(
|
||||
process_multitrack_cli(
|
||||
s3_urls,
|
||||
args.source_language,
|
||||
args.target_language,
|
||||
args.output,
|
||||
)
|
||||
)
|
||||
else:
|
||||
if not args.pipeline:
|
||||
parser.error("--pipeline is required for single-track processing")
|
||||
|
||||
if "," in args.source:
|
||||
parser.error(
|
||||
"Multiple files detected. Use --multitrack flag for multitrack processing"
|
||||
)
|
||||
|
||||
asyncio.run(
|
||||
process(
|
||||
args.source,
|
||||
args.source_language,
|
||||
args.target_language,
|
||||
args.pipeline,
|
||||
args.output,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
127
server/reflector/tools/process_transcript.py
Normal file
127
server/reflector/tools/process_transcript.py
Normal file
@@ -0,0 +1,127 @@
|
||||
"""
|
||||
Process transcript by ID - auto-detects multitrack vs file pipeline.
|
||||
|
||||
Usage:
|
||||
uv run -m reflector.tools.process_transcript <transcript_id>
|
||||
|
||||
# Or via docker:
|
||||
docker compose exec server uv run -m reflector.tools.process_transcript <transcript_id>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import sys
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
from celery.result import AsyncResult
|
||||
|
||||
from reflector.db.transcripts import Transcript, transcripts_controller
|
||||
from reflector.services.transcript_process import (
|
||||
FileProcessingConfig,
|
||||
MultitrackProcessingConfig,
|
||||
PrepareResult,
|
||||
ProcessError,
|
||||
ValidationError,
|
||||
ValidationResult,
|
||||
dispatch_transcript_processing,
|
||||
prepare_transcript_processing,
|
||||
validate_transcript_for_processing,
|
||||
)
|
||||
|
||||
|
||||
async def process_transcript_inner(
|
||||
transcript: Transcript,
|
||||
on_validation: Callable[[ValidationResult], None],
|
||||
on_preprocess: Callable[[PrepareResult], None],
|
||||
) -> AsyncResult:
|
||||
validation = await validate_transcript_for_processing(transcript)
|
||||
on_validation(validation)
|
||||
config = await prepare_transcript_processing(validation)
|
||||
on_preprocess(config)
|
||||
return dispatch_transcript_processing(config)
|
||||
|
||||
|
||||
async def process_transcript(transcript_id: str, sync: bool = False) -> None:
|
||||
"""
|
||||
Process a transcript by ID, auto-detecting multitrack vs file pipeline.
|
||||
|
||||
Args:
|
||||
transcript_id: The transcript UUID
|
||||
sync: If True, wait for task completion. If False, dispatch and exit.
|
||||
"""
|
||||
from reflector.db import get_database
|
||||
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
|
||||
try:
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
print(f"Error: Transcript {transcript_id} not found", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(f"Found transcript: {transcript.title or transcript_id}", file=sys.stderr)
|
||||
print(f" Status: {transcript.status}", file=sys.stderr)
|
||||
print(f" Recording ID: {transcript.recording_id or 'None'}", file=sys.stderr)
|
||||
|
||||
def on_validation(validation: ValidationResult) -> None:
|
||||
if isinstance(validation, ValidationError):
|
||||
print(f"Error: {validation.detail}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
def on_preprocess(config: PrepareResult) -> None:
|
||||
if isinstance(config, ProcessError):
|
||||
print(f"Error: {config.detail}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
elif isinstance(config, MultitrackProcessingConfig):
|
||||
print(f"Dispatching multitrack pipeline", file=sys.stderr)
|
||||
print(f" Bucket: {config.bucket_name}", file=sys.stderr)
|
||||
print(f" Tracks: {len(config.track_keys)}", file=sys.stderr)
|
||||
elif isinstance(config, FileProcessingConfig):
|
||||
print(f"Dispatching file pipeline", file=sys.stderr)
|
||||
|
||||
result = await process_transcript_inner(
|
||||
transcript, on_validation=on_validation, on_preprocess=on_preprocess
|
||||
)
|
||||
|
||||
if sync:
|
||||
print("Waiting for task completion...", file=sys.stderr)
|
||||
while not result.ready():
|
||||
print(f" Status: {result.state}", file=sys.stderr)
|
||||
time.sleep(5)
|
||||
|
||||
if result.successful():
|
||||
print("Task completed successfully", file=sys.stderr)
|
||||
else:
|
||||
print(f"Task failed: {result.result}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(
|
||||
"Task dispatched (use --sync to wait for completion)", file=sys.stderr
|
||||
)
|
||||
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Process transcript by ID - auto-detects multitrack vs file pipeline"
|
||||
)
|
||||
parser.add_argument(
|
||||
"transcript_id",
|
||||
help="Transcript UUID to process",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sync",
|
||||
action="store_true",
|
||||
help="Wait for task completion instead of just dispatching",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
asyncio.run(process_transcript(args.transcript_id, sync=args.sync))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
92
server/reflector/utils/daily.py
Normal file
92
server/reflector/utils/daily.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import os
|
||||
import re
|
||||
from typing import NamedTuple
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
DailyRoomName = NonEmptyString
|
||||
|
||||
|
||||
class DailyRecordingFilename(NamedTuple):
|
||||
"""Parsed components from Daily.co recording filename.
|
||||
|
||||
Format: {recording_start_ts}-{participant_id}-cam-audio-{track_start_ts}
|
||||
Example: 1763152299562-12f0b87c-97d4-4dd3-a65c-cee1f854a79c-cam-audio-1763152314582
|
||||
|
||||
Note: S3 object keys have no extension, but browsers add .webm when downloading
|
||||
from S3 UI due to MIME type headers. If you download manually and wonder.
|
||||
"""
|
||||
|
||||
recording_start_ts: int
|
||||
participant_id: str
|
||||
track_start_ts: int
|
||||
|
||||
|
||||
def parse_daily_recording_filename(filename: str) -> DailyRecordingFilename:
|
||||
"""Parse Daily.co recording filename to extract timestamps and participant ID.
|
||||
|
||||
Args:
|
||||
filename: Full path or basename of Daily.co recording file
|
||||
Format: {recording_start_ts}-{participant_id}-cam-audio-{track_start_ts}
|
||||
|
||||
Returns:
|
||||
DailyRecordingFilename with parsed components
|
||||
|
||||
Raises:
|
||||
ValueError: If filename doesn't match expected format
|
||||
|
||||
Examples:
|
||||
>>> parse_daily_recording_filename("1763152299562-12f0b87c-97d4-4dd3-a65c-cee1f854a79c-cam-audio-1763152314582")
|
||||
DailyRecordingFilename(recording_start_ts=1763152299562, participant_id='12f0b87c-97d4-4dd3-a65c-cee1f854a79c', track_start_ts=1763152314582)
|
||||
"""
|
||||
base = os.path.basename(filename)
|
||||
pattern = r"(\d{13,})-([0-9a-fA-F-]{36})-cam-audio-(\d{13,})"
|
||||
match = re.search(pattern, base)
|
||||
|
||||
if not match:
|
||||
raise ValueError(
|
||||
f"Invalid Daily.co recording filename: {filename}. "
|
||||
f"Expected format: {{recording_start_ts}}-{{participant_id}}-cam-audio-{{track_start_ts}}"
|
||||
)
|
||||
|
||||
recording_start_ts = int(match.group(1))
|
||||
participant_id = match.group(2)
|
||||
track_start_ts = int(match.group(3))
|
||||
|
||||
return DailyRecordingFilename(
|
||||
recording_start_ts=recording_start_ts,
|
||||
participant_id=participant_id,
|
||||
track_start_ts=track_start_ts,
|
||||
)
|
||||
|
||||
|
||||
def recording_lock_key(recording_id: NonEmptyString) -> NonEmptyString:
|
||||
return f"recording:{recording_id}"
|
||||
|
||||
|
||||
def filter_cam_audio_tracks(track_keys: list[str]) -> list[str]:
|
||||
"""Filter track keys to cam-audio tracks only (skip screen-audio, etc.)."""
|
||||
return [k for k in track_keys if "cam-audio" in k]
|
||||
|
||||
|
||||
def extract_base_room_name(daily_room_name: DailyRoomName) -> NonEmptyString:
|
||||
"""
|
||||
Extract base room name from Daily.co timestamped room name.
|
||||
|
||||
Daily.co creates rooms with timestamp suffix: {base_name}-YYYYMMDDHHMMSS
|
||||
This function removes the timestamp to get the original room name.
|
||||
|
||||
Examples:
|
||||
"daily-20251020193458" → "daily"
|
||||
"daily-2-20251020193458" → "daily-2"
|
||||
"my-room-name-20251020193458" → "my-room-name"
|
||||
|
||||
Args:
|
||||
daily_room_name: Full Daily.co room name with optional timestamp
|
||||
|
||||
Returns:
|
||||
Base room name without timestamp suffix
|
||||
"""
|
||||
base_name = daily_room_name.rsplit("-", 1)[0]
|
||||
assert base_name, f"Extracted base name is empty from: {daily_room_name}"
|
||||
return base_name
|
||||
9
server/reflector/utils/datetime.py
Normal file
9
server/reflector/utils/datetime.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
def parse_datetime_with_timezone(iso_string: str) -> datetime:
|
||||
"""Parse ISO datetime string and ensure timezone awareness (defaults to UTC if naive)."""
|
||||
dt = datetime.fromisoformat(iso_string)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
return dt
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Annotated
|
||||
from typing import Annotated, TypeVar
|
||||
|
||||
from pydantic import Field, TypeAdapter, constr
|
||||
|
||||
@@ -21,3 +21,12 @@ def try_parse_non_empty_string(s: str) -> NonEmptyString | None:
|
||||
if not s:
|
||||
return None
|
||||
return parse_non_empty_string(s)
|
||||
|
||||
|
||||
T = TypeVar("T", bound=str)
|
||||
|
||||
|
||||
def assert_equal[T](s1: T, s2: T) -> T:
|
||||
if s1 != s2:
|
||||
raise ValueError(f"assert_equal: {s1} != {s2}")
|
||||
return s1
|
||||
|
||||
133
server/reflector/utils/transcript_formats.py
Normal file
133
server/reflector/utils/transcript_formats.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""Utilities for converting transcript data to various output formats."""
|
||||
|
||||
import webvtt
|
||||
|
||||
from reflector.db.transcripts import TranscriptParticipant, TranscriptTopic
|
||||
from reflector.processors.types import (
|
||||
Transcript as ProcessorTranscript,
|
||||
)
|
||||
from reflector.schemas.transcript_formats import TranscriptSegment
|
||||
from reflector.utils.webvtt import seconds_to_timestamp
|
||||
|
||||
|
||||
def get_speaker_name(
|
||||
speaker: int, participants: list[TranscriptParticipant] | None
|
||||
) -> str:
|
||||
"""Get participant name for speaker or default to 'Speaker N'."""
|
||||
if participants:
|
||||
for participant in participants:
|
||||
if participant.speaker == speaker:
|
||||
return participant.name
|
||||
return f"Speaker {speaker}"
|
||||
|
||||
|
||||
def format_timestamp_mmss(seconds: float | int) -> str:
|
||||
"""Format seconds as MM:SS timestamp."""
|
||||
minutes = int(seconds // 60)
|
||||
secs = int(seconds % 60)
|
||||
return f"{minutes:02d}:{secs:02d}"
|
||||
|
||||
|
||||
def transcript_to_text(
|
||||
topics: list[TranscriptTopic],
|
||||
participants: list[TranscriptParticipant] | None,
|
||||
is_multitrack: bool = False,
|
||||
) -> str:
|
||||
"""Convert transcript topics to plain text with speaker names."""
|
||||
lines = []
|
||||
for topic in topics:
|
||||
if not topic.words:
|
||||
continue
|
||||
|
||||
transcript = ProcessorTranscript(words=topic.words)
|
||||
segments = transcript.as_segments(is_multitrack)
|
||||
|
||||
for segment in segments:
|
||||
speaker_name = get_speaker_name(segment.speaker, participants)
|
||||
text = segment.text.strip()
|
||||
lines.append(f"{speaker_name}: {text}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def transcript_to_text_timestamped(
|
||||
topics: list[TranscriptTopic],
|
||||
participants: list[TranscriptParticipant] | None,
|
||||
is_multitrack: bool = False,
|
||||
) -> str:
|
||||
"""Convert transcript topics to timestamped text with speaker names."""
|
||||
lines = []
|
||||
for topic in topics:
|
||||
if not topic.words:
|
||||
continue
|
||||
|
||||
transcript = ProcessorTranscript(words=topic.words)
|
||||
segments = transcript.as_segments(is_multitrack)
|
||||
|
||||
for segment in segments:
|
||||
speaker_name = get_speaker_name(segment.speaker, participants)
|
||||
timestamp = format_timestamp_mmss(segment.start)
|
||||
text = segment.text.strip()
|
||||
lines.append(f"[{timestamp}] {speaker_name}: {text}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def topics_to_webvtt_named(
|
||||
topics: list[TranscriptTopic],
|
||||
participants: list[TranscriptParticipant] | None,
|
||||
is_multitrack: bool = False,
|
||||
) -> str:
|
||||
"""Convert transcript topics to WebVTT format with participant names."""
|
||||
vtt = webvtt.WebVTT()
|
||||
|
||||
for topic in topics:
|
||||
if not topic.words:
|
||||
continue
|
||||
|
||||
transcript = ProcessorTranscript(words=topic.words)
|
||||
segments = transcript.as_segments(is_multitrack)
|
||||
|
||||
for segment in segments:
|
||||
speaker_name = get_speaker_name(segment.speaker, participants)
|
||||
text = segment.text.strip()
|
||||
text = f"<v {speaker_name}>{text}"
|
||||
|
||||
caption = webvtt.Caption(
|
||||
start=seconds_to_timestamp(segment.start),
|
||||
end=seconds_to_timestamp(segment.end),
|
||||
text=text,
|
||||
)
|
||||
vtt.captions.append(caption)
|
||||
|
||||
return vtt.content
|
||||
|
||||
|
||||
def transcript_to_json_segments(
|
||||
topics: list[TranscriptTopic],
|
||||
participants: list[TranscriptParticipant] | None,
|
||||
is_multitrack: bool = False,
|
||||
) -> list[TranscriptSegment]:
|
||||
"""Convert transcript topics to a flat list of JSON segments."""
|
||||
result = []
|
||||
|
||||
for topic in topics:
|
||||
if not topic.words:
|
||||
continue
|
||||
|
||||
transcript = ProcessorTranscript(words=topic.words)
|
||||
segments = transcript.as_segments(is_multitrack)
|
||||
|
||||
for segment in segments:
|
||||
speaker_name = get_speaker_name(segment.speaker, participants)
|
||||
result.append(
|
||||
TranscriptSegment(
|
||||
speaker=segment.speaker,
|
||||
speaker_name=speaker_name,
|
||||
text=segment.text.strip(),
|
||||
start=segment.start,
|
||||
end=segment.end,
|
||||
)
|
||||
)
|
||||
|
||||
return result
|
||||
37
server/reflector/utils/url.py
Normal file
37
server/reflector/utils/url.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""URL manipulation utilities."""
|
||||
|
||||
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
|
||||
|
||||
|
||||
def add_query_param(url: str, key: str, value: str) -> str:
|
||||
"""
|
||||
Add or update a query parameter in a URL.
|
||||
|
||||
Properly handles URLs with or without existing query parameters,
|
||||
preserving fragments and encoding special characters.
|
||||
|
||||
Args:
|
||||
url: The URL to modify
|
||||
key: The query parameter name
|
||||
value: The query parameter value
|
||||
|
||||
Returns:
|
||||
The URL with the query parameter added or updated
|
||||
|
||||
Examples:
|
||||
>>> add_query_param("https://example.com/room", "t", "token123")
|
||||
'https://example.com/room?t=token123'
|
||||
|
||||
>>> add_query_param("https://example.com/room?existing=param", "t", "token123")
|
||||
'https://example.com/room?existing=param&t=token123'
|
||||
"""
|
||||
parsed = urlparse(url)
|
||||
|
||||
query_params = parse_qs(parsed.query, keep_blank_values=True)
|
||||
|
||||
query_params[key] = [value]
|
||||
|
||||
new_query = urlencode(query_params, doseq=True)
|
||||
|
||||
new_parsed = parsed._replace(query=new_query)
|
||||
return urlunparse(new_parsed)
|
||||
@@ -13,7 +13,7 @@ VttTimestamp = Annotated[str, "vtt_timestamp"]
|
||||
WebVTTStr = Annotated[str, "webvtt_str"]
|
||||
|
||||
|
||||
def _seconds_to_timestamp(seconds: Seconds) -> VttTimestamp:
|
||||
def seconds_to_timestamp(seconds: Seconds) -> VttTimestamp:
|
||||
# lib doesn't do that
|
||||
hours = int(seconds // 3600)
|
||||
minutes = int((seconds % 3600) // 60)
|
||||
@@ -37,8 +37,8 @@ def words_to_webvtt(words: list[Word]) -> WebVTTStr:
|
||||
text = f"<v Speaker{segment.speaker}>{text}"
|
||||
|
||||
caption = webvtt.Caption(
|
||||
start=_seconds_to_timestamp(segment.start),
|
||||
end=_seconds_to_timestamp(segment.end),
|
||||
start=seconds_to_timestamp(segment.start),
|
||||
end=seconds_to_timestamp(segment.end),
|
||||
text=text,
|
||||
)
|
||||
vtt.captions.append(caption)
|
||||
|
||||
11
server/reflector/video_platforms/__init__.py
Normal file
11
server/reflector/video_platforms/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from .base import VideoPlatformClient
|
||||
from .models import MeetingData, VideoPlatformConfig
|
||||
from .registry import get_platform_client, register_platform
|
||||
|
||||
__all__ = [
|
||||
"VideoPlatformClient",
|
||||
"VideoPlatformConfig",
|
||||
"MeetingData",
|
||||
"get_platform_client",
|
||||
"register_platform",
|
||||
]
|
||||
51
server/reflector/video_platforms/base.py
Normal file
51
server/reflector/video_platforms/base.py
Normal file
@@ -0,0 +1,51 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional
|
||||
|
||||
from ..schemas.platform import Platform
|
||||
from ..utils.string import NonEmptyString
|
||||
from .models import MeetingData, SessionData, VideoPlatformConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from reflector.db.rooms import Room
|
||||
|
||||
# separator doesn't guarantee there's no more "ROOM_PREFIX_SEPARATOR" strings in room name
|
||||
ROOM_PREFIX_SEPARATOR = "-"
|
||||
|
||||
|
||||
class VideoPlatformClient(ABC):
|
||||
PLATFORM_NAME: Platform
|
||||
|
||||
def __init__(self, config: VideoPlatformConfig):
|
||||
self.config = config
|
||||
|
||||
@abstractmethod
|
||||
async def create_meeting(
|
||||
self, room_name_prefix: NonEmptyString, end_date: datetime, room: "Room"
|
||||
) -> MeetingData:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_room_sessions(self, room_name: str) -> list[SessionData]:
|
||||
"""Get session history for a room."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def upload_logo(self, room_name: str, logo_path: str) -> bool:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def verify_webhook_signature(
|
||||
self, body: bytes, signature: str, timestamp: Optional[str] = None
|
||||
) -> bool:
|
||||
pass
|
||||
|
||||
def format_recording_config(self, room: "Room") -> Dict[str, Any]:
|
||||
if room.recording_type == "cloud" and self.config.s3_bucket:
|
||||
return {
|
||||
"type": room.recording_type,
|
||||
"bucket": self.config.s3_bucket,
|
||||
"region": self.config.s3_region,
|
||||
"trigger": room.recording_trigger,
|
||||
}
|
||||
return {"type": room.recording_type}
|
||||
204
server/reflector/video_platforms/daily.py
Normal file
204
server/reflector/video_platforms/daily.py
Normal file
@@ -0,0 +1,204 @@
|
||||
from datetime import datetime
|
||||
|
||||
from reflector.dailyco_api import (
|
||||
CreateMeetingTokenRequest,
|
||||
CreateRoomRequest,
|
||||
DailyApiClient,
|
||||
MeetingParticipantsResponse,
|
||||
MeetingTokenProperties,
|
||||
RecordingResponse,
|
||||
RecordingsBucketConfig,
|
||||
RoomPresenceResponse,
|
||||
RoomProperties,
|
||||
verify_webhook_signature,
|
||||
)
|
||||
from reflector.db.daily_participant_sessions import (
|
||||
daily_participant_sessions_controller,
|
||||
)
|
||||
from reflector.db.rooms import Room
|
||||
from reflector.logger import logger
|
||||
from reflector.storage import get_dailyco_storage
|
||||
|
||||
from ..dailyco_api.responses import RecordingStatus
|
||||
from ..schemas.platform import Platform
|
||||
from ..utils.daily import DailyRoomName
|
||||
from ..utils.string import NonEmptyString
|
||||
from .base import ROOM_PREFIX_SEPARATOR, VideoPlatformClient
|
||||
from .models import MeetingData, RecordingType, SessionData, VideoPlatformConfig
|
||||
|
||||
|
||||
class DailyClient(VideoPlatformClient):
|
||||
PLATFORM_NAME: Platform = "daily"
|
||||
TIMESTAMP_FORMAT = "%Y%m%d%H%M%S"
|
||||
RECORDING_NONE: RecordingType = "none"
|
||||
RECORDING_LOCAL: RecordingType = "local"
|
||||
RECORDING_CLOUD: RecordingType = "cloud"
|
||||
|
||||
def __init__(self, config: VideoPlatformConfig):
|
||||
super().__init__(config)
|
||||
self._api_client = DailyApiClient(
|
||||
api_key=config.api_key,
|
||||
webhook_secret=config.webhook_secret,
|
||||
timeout=10.0,
|
||||
)
|
||||
|
||||
async def create_meeting(
|
||||
self, room_name_prefix: NonEmptyString, end_date: datetime, room: Room
|
||||
) -> MeetingData:
|
||||
"""
|
||||
Daily.co rooms vs meetings:
|
||||
- We create a NEW Daily.co room for each Reflector meeting
|
||||
- Daily.co meeting/session starts automatically when first participant joins
|
||||
- Room auto-deletes after exp time
|
||||
- Meeting.room_name stores the timestamped Daily.co room name
|
||||
"""
|
||||
timestamp = datetime.now().strftime(self.TIMESTAMP_FORMAT)
|
||||
room_name = f"{room_name_prefix}{ROOM_PREFIX_SEPARATOR}{timestamp}"
|
||||
|
||||
enable_recording = None
|
||||
if room.recording_type == self.RECORDING_LOCAL:
|
||||
enable_recording = "local"
|
||||
elif room.recording_type == self.RECORDING_CLOUD:
|
||||
enable_recording = "raw-tracks"
|
||||
|
||||
properties = RoomProperties(
|
||||
enable_recording=enable_recording,
|
||||
enable_chat=True,
|
||||
enable_screenshare=True,
|
||||
enable_knocking=room.is_locked,
|
||||
start_video_off=False,
|
||||
start_audio_off=False,
|
||||
exp=int(end_date.timestamp()),
|
||||
)
|
||||
|
||||
if room.recording_type == self.RECORDING_CLOUD:
|
||||
daily_storage = get_dailyco_storage()
|
||||
assert daily_storage.bucket_name, "S3 bucket must be configured"
|
||||
properties.recordings_bucket = RecordingsBucketConfig(
|
||||
bucket_name=daily_storage.bucket_name,
|
||||
bucket_region=daily_storage.region,
|
||||
assume_role_arn=daily_storage.role_credential,
|
||||
allow_api_access=True,
|
||||
)
|
||||
|
||||
request = CreateRoomRequest(
|
||||
name=room_name,
|
||||
privacy="private" if room.is_locked else "public",
|
||||
properties=properties,
|
||||
)
|
||||
|
||||
result = await self._api_client.create_room(request)
|
||||
|
||||
return MeetingData(
|
||||
meeting_id=result.id,
|
||||
room_name=result.name,
|
||||
room_url=result.url,
|
||||
host_room_url=result.url,
|
||||
platform=self.PLATFORM_NAME,
|
||||
extra_data=result.model_dump(),
|
||||
)
|
||||
|
||||
async def get_room_sessions(self, room_name: str) -> list[SessionData]:
|
||||
"""Get room session history from database (webhook-stored sessions).
|
||||
|
||||
Daily.co doesn't provide historical session API, so we query our database
|
||||
where participant.joined/left webhooks are stored.
|
||||
"""
|
||||
from reflector.db.meetings import meetings_controller # noqa: PLC0415
|
||||
|
||||
meeting = await meetings_controller.get_by_room_name(room_name)
|
||||
if not meeting:
|
||||
return []
|
||||
|
||||
sessions = await daily_participant_sessions_controller.get_by_meeting(
|
||||
meeting.id
|
||||
)
|
||||
|
||||
return [
|
||||
SessionData(
|
||||
session_id=s.id,
|
||||
started_at=s.joined_at,
|
||||
ended_at=s.left_at,
|
||||
)
|
||||
for s in sessions
|
||||
]
|
||||
|
||||
async def get_room_presence(self, room_name: str) -> RoomPresenceResponse:
|
||||
"""Get room presence/session data for a Daily.co room."""
|
||||
return await self._api_client.get_room_presence(room_name)
|
||||
|
||||
async def get_meeting_participants(
|
||||
self, meeting_id: str
|
||||
) -> MeetingParticipantsResponse:
|
||||
"""Get participant data for a specific Daily.co meeting."""
|
||||
return await self._api_client.get_meeting_participants(meeting_id)
|
||||
|
||||
async def get_recording(self, recording_id: str) -> RecordingResponse:
|
||||
return await self._api_client.get_recording(recording_id)
|
||||
|
||||
async def list_recordings(
|
||||
self,
|
||||
room_name: NonEmptyString | None = None,
|
||||
starting_after: str | None = None,
|
||||
ending_before: str | None = None,
|
||||
limit: int = 100,
|
||||
) -> list[RecordingResponse]:
|
||||
return await self._api_client.list_recordings(
|
||||
room_name=room_name,
|
||||
starting_after=starting_after,
|
||||
ending_before=ending_before,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
async def get_recording_status(
|
||||
self, recording_id: NonEmptyString
|
||||
) -> RecordingStatus:
|
||||
recording = await self.get_recording(recording_id)
|
||||
return recording.status
|
||||
|
||||
async def upload_logo(self, room_name: str, logo_path: str) -> bool:
|
||||
return True
|
||||
|
||||
def verify_webhook_signature(
|
||||
self, body: bytes, signature: str, timestamp: str | None = None
|
||||
) -> bool:
|
||||
"""Verify Daily.co webhook signature using dailyco_api module."""
|
||||
if not self.config.webhook_secret:
|
||||
logger.warning("Webhook secret not configured")
|
||||
return False
|
||||
|
||||
return verify_webhook_signature(
|
||||
body=body,
|
||||
signature=signature,
|
||||
timestamp=timestamp or "",
|
||||
webhook_secret=self.config.webhook_secret,
|
||||
)
|
||||
|
||||
async def create_meeting_token(
|
||||
self,
|
||||
room_name: DailyRoomName,
|
||||
start_cloud_recording: bool,
|
||||
enable_recording_ui: bool,
|
||||
user_id: NonEmptyString | None = None,
|
||||
is_owner: bool = False,
|
||||
) -> NonEmptyString:
|
||||
properties = MeetingTokenProperties(
|
||||
room_name=room_name,
|
||||
user_id=user_id,
|
||||
start_cloud_recording=start_cloud_recording,
|
||||
enable_recording_ui=enable_recording_ui,
|
||||
is_owner=is_owner,
|
||||
)
|
||||
request = CreateMeetingTokenRequest(properties=properties)
|
||||
result = await self._api_client.create_meeting_token(request)
|
||||
return result.token
|
||||
|
||||
async def close(self):
|
||||
"""Clean up API client resources."""
|
||||
await self._api_client.close()
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
53
server/reflector/video_platforms/factory.py
Normal file
53
server/reflector/video_platforms/factory.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_dailyco_storage, get_whereby_storage
|
||||
|
||||
from ..schemas.platform import WHEREBY_PLATFORM, Platform
|
||||
from .base import VideoPlatformClient, VideoPlatformConfig
|
||||
from .registry import get_platform_client
|
||||
|
||||
|
||||
def get_platform_config(platform: Platform) -> VideoPlatformConfig:
|
||||
if platform == WHEREBY_PLATFORM:
|
||||
if not settings.WHEREBY_API_KEY:
|
||||
raise ValueError(
|
||||
"WHEREBY_API_KEY is required when platform='whereby'. "
|
||||
"Set WHEREBY_API_KEY environment variable."
|
||||
)
|
||||
whereby_storage = get_whereby_storage()
|
||||
key_id, secret = whereby_storage.key_credentials
|
||||
return VideoPlatformConfig(
|
||||
api_key=settings.WHEREBY_API_KEY,
|
||||
webhook_secret=settings.WHEREBY_WEBHOOK_SECRET or "",
|
||||
api_url=settings.WHEREBY_API_URL,
|
||||
s3_bucket=whereby_storage.bucket_name,
|
||||
s3_region=whereby_storage.region,
|
||||
aws_access_key_id=key_id,
|
||||
aws_access_key_secret=secret,
|
||||
)
|
||||
elif platform == "daily":
|
||||
if not settings.DAILY_API_KEY:
|
||||
raise ValueError(
|
||||
"DAILY_API_KEY is required when platform='daily'. "
|
||||
"Set DAILY_API_KEY environment variable."
|
||||
)
|
||||
if not settings.DAILY_SUBDOMAIN:
|
||||
raise ValueError(
|
||||
"DAILY_SUBDOMAIN is required when platform='daily'. "
|
||||
"Set DAILY_SUBDOMAIN environment variable."
|
||||
)
|
||||
daily_storage = get_dailyco_storage()
|
||||
return VideoPlatformConfig(
|
||||
api_key=settings.DAILY_API_KEY,
|
||||
webhook_secret=settings.DAILY_WEBHOOK_SECRET or "",
|
||||
subdomain=settings.DAILY_SUBDOMAIN,
|
||||
s3_bucket=daily_storage.bucket_name,
|
||||
s3_region=daily_storage.region,
|
||||
aws_role_arn=daily_storage.role_credential,
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Unknown platform: {platform}")
|
||||
|
||||
|
||||
def create_platform_client(platform: Platform) -> VideoPlatformClient:
|
||||
config = get_platform_config(platform)
|
||||
return get_platform_client(platform, config)
|
||||
60
server/reflector/video_platforms/models.py
Normal file
60
server/reflector/video_platforms/models.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
RecordingType = Literal["none", "local", "cloud"]
|
||||
|
||||
|
||||
class SessionData(BaseModel):
|
||||
"""Platform-agnostic session data.
|
||||
|
||||
Represents a participant session in a meeting room, regardless of platform.
|
||||
Used to determine if a meeting is still active or has ended.
|
||||
"""
|
||||
|
||||
session_id: NonEmptyString = Field(description="Unique session identifier")
|
||||
started_at: datetime = Field(description="When session started (UTC)")
|
||||
ended_at: datetime | None = Field(
|
||||
description="When session ended (UTC), None if still active"
|
||||
)
|
||||
|
||||
|
||||
class MeetingData(BaseModel):
|
||||
platform: Platform
|
||||
meeting_id: NonEmptyString = Field(
|
||||
description="Platform-specific meeting identifier"
|
||||
)
|
||||
room_url: NonEmptyString = Field(description="URL for participants to join")
|
||||
host_room_url: NonEmptyString = Field(
|
||||
description="URL for hosts (may be same as room_url)"
|
||||
)
|
||||
room_name: NonEmptyString = Field(description="Human-readable room name")
|
||||
extra_data: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"platform": WHEREBY_PLATFORM,
|
||||
"meeting_id": "12345678",
|
||||
"room_url": "https://subdomain.whereby.com/room-20251008120000",
|
||||
"host_room_url": "https://subdomain.whereby.com/room-20251008120000?roomKey=abc123",
|
||||
"room_name": "room-20251008120000",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class VideoPlatformConfig(BaseModel):
|
||||
api_key: str
|
||||
webhook_secret: str
|
||||
api_url: Optional[str] = None
|
||||
subdomain: Optional[str] = None # Whereby/Daily subdomain
|
||||
s3_bucket: Optional[str] = None
|
||||
s3_region: Optional[str] = None
|
||||
# Whereby uses access keys, Daily uses IAM role
|
||||
aws_access_key_id: Optional[str] = None
|
||||
aws_access_key_secret: Optional[str] = None
|
||||
aws_role_arn: Optional[str] = None
|
||||
35
server/reflector/video_platforms/registry.py
Normal file
35
server/reflector/video_platforms/registry.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from typing import Dict, Type
|
||||
|
||||
from ..schemas.platform import DAILY_PLATFORM, WHEREBY_PLATFORM, Platform
|
||||
from .base import VideoPlatformClient, VideoPlatformConfig
|
||||
|
||||
_PLATFORMS: Dict[Platform, Type[VideoPlatformClient]] = {}
|
||||
|
||||
|
||||
def register_platform(name: Platform, client_class: Type[VideoPlatformClient]):
|
||||
_PLATFORMS[name] = client_class
|
||||
|
||||
|
||||
def get_platform_client(
|
||||
platform: Platform, config: VideoPlatformConfig
|
||||
) -> VideoPlatformClient:
|
||||
if platform not in _PLATFORMS:
|
||||
raise ValueError(f"Unknown video platform: {platform}")
|
||||
|
||||
client_class = _PLATFORMS[platform]
|
||||
return client_class(config)
|
||||
|
||||
|
||||
def get_available_platforms() -> list[Platform]:
|
||||
return list(_PLATFORMS.keys())
|
||||
|
||||
|
||||
def _register_builtin_platforms():
|
||||
from .daily import DailyClient # noqa: PLC0415
|
||||
from .whereby import WherebyClient # noqa: PLC0415
|
||||
|
||||
register_platform(WHEREBY_PLATFORM, WherebyClient)
|
||||
register_platform(DAILY_PLATFORM, DailyClient)
|
||||
|
||||
|
||||
_register_builtin_platforms()
|
||||
170
server/reflector/video_platforms/whereby.py
Normal file
170
server/reflector/video_platforms/whereby.py
Normal file
@@ -0,0 +1,170 @@
|
||||
import hmac
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
from datetime import datetime
|
||||
from hashlib import sha256
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from reflector.db.rooms import Room
|
||||
from reflector.storage import get_whereby_storage
|
||||
|
||||
from ..schemas.platform import WHEREBY_PLATFORM, Platform
|
||||
from ..utils.string import NonEmptyString
|
||||
from .base import VideoPlatformClient
|
||||
from .models import MeetingData, SessionData, VideoPlatformConfig
|
||||
from .whereby_utils import whereby_room_name_prefix
|
||||
|
||||
|
||||
class WherebyClient(VideoPlatformClient):
|
||||
PLATFORM_NAME: Platform = WHEREBY_PLATFORM
|
||||
TIMEOUT = 10 # seconds
|
||||
MAX_ELAPSED_TIME = 60 * 1000 # 1 minute in milliseconds
|
||||
|
||||
def __init__(self, config: VideoPlatformConfig):
|
||||
super().__init__(config)
|
||||
self.headers = {
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {config.api_key}",
|
||||
}
|
||||
|
||||
async def create_meeting(
|
||||
self, room_name_prefix: NonEmptyString, end_date: datetime, room: Room
|
||||
) -> MeetingData:
|
||||
data = {
|
||||
"isLocked": room.is_locked,
|
||||
"roomNamePrefix": whereby_room_name_prefix(room_name_prefix),
|
||||
"roomNamePattern": "uuid",
|
||||
"roomMode": room.room_mode,
|
||||
"endDate": end_date.isoformat(),
|
||||
"fields": ["hostRoomUrl"],
|
||||
}
|
||||
|
||||
if room.recording_type == "cloud":
|
||||
# Get storage config for passing credentials to Whereby API
|
||||
whereby_storage = get_whereby_storage()
|
||||
key_id, secret = whereby_storage.key_credentials
|
||||
data["recording"] = {
|
||||
"type": room.recording_type,
|
||||
"destination": {
|
||||
"provider": "s3",
|
||||
"bucket": whereby_storage.bucket_name,
|
||||
"accessKeyId": key_id,
|
||||
"accessKeySecret": secret,
|
||||
"fileFormat": "mp4",
|
||||
},
|
||||
"startTrigger": room.recording_trigger,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
f"{self.config.api_url}/meetings",
|
||||
headers=self.headers,
|
||||
json=data,
|
||||
timeout=self.TIMEOUT,
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
return MeetingData(
|
||||
meeting_id=result["meetingId"],
|
||||
room_name=result["roomName"],
|
||||
room_url=result["roomUrl"],
|
||||
host_room_url=result["hostRoomUrl"],
|
||||
platform=self.PLATFORM_NAME,
|
||||
extra_data=result,
|
||||
)
|
||||
|
||||
async def get_room_sessions(self, room_name: str) -> list[SessionData]:
|
||||
"""Get room session history from Whereby API.
|
||||
|
||||
Whereby API returns: [{"sessionId": "...", "startedAt": "...", "endedAt": "..." | null}, ...]
|
||||
"""
|
||||
async with httpx.AsyncClient() as client:
|
||||
"""
|
||||
{
|
||||
"cursor": "text",
|
||||
"results": [
|
||||
{
|
||||
"roomSessionId": "e2f29530-46ec-4cee-8b27-e565cb5bb2e9",
|
||||
"roomName": "/room-prefix-793e9ec1-c686-423d-9043-9b7a10c553fd",
|
||||
"startedAt": "2025-01-01T00:00:00.000Z",
|
||||
"endedAt": "2025-01-01T01:00:00.000Z",
|
||||
"totalParticipantMinutes": 124,
|
||||
"totalRecorderMinutes": 120,
|
||||
"totalStreamerMinutes": 120,
|
||||
"totalUniqueParticipants": 4,
|
||||
"totalUniqueRecorders": 3,
|
||||
"totalUniqueStreamers": 2
|
||||
}
|
||||
]
|
||||
}"""
|
||||
response = await client.get(
|
||||
f"{self.config.api_url}/insights/room-sessions?roomName={room_name}",
|
||||
headers=self.headers,
|
||||
timeout=self.TIMEOUT,
|
||||
)
|
||||
response.raise_for_status()
|
||||
results = response.json().get("results", [])
|
||||
|
||||
return [
|
||||
SessionData(
|
||||
session_id=s["roomSessionId"],
|
||||
started_at=datetime.fromisoformat(
|
||||
s["startedAt"].replace("Z", "+00:00")
|
||||
),
|
||||
ended_at=datetime.fromisoformat(s["endedAt"].replace("Z", "+00:00"))
|
||||
if s.get("endedAt")
|
||||
else None,
|
||||
)
|
||||
for s in results
|
||||
]
|
||||
|
||||
async def upload_logo(self, room_name: str, logo_path: str) -> bool:
|
||||
async with httpx.AsyncClient() as client:
|
||||
with open(logo_path, "rb") as f:
|
||||
response = await client.put(
|
||||
f"{self.config.api_url}/rooms/{room_name}/theme/logo",
|
||||
headers={
|
||||
"Authorization": f"Bearer {self.config.api_key}",
|
||||
},
|
||||
timeout=self.TIMEOUT,
|
||||
files={"image": f},
|
||||
)
|
||||
response.raise_for_status()
|
||||
return True
|
||||
|
||||
def verify_webhook_signature(
|
||||
self, body: bytes, signature: str, timestamp: Optional[str] = None
|
||||
) -> bool:
|
||||
if not signature:
|
||||
return False
|
||||
|
||||
matches = re.match(r"t=(.*),v1=(.*)", signature)
|
||||
if not matches:
|
||||
return False
|
||||
|
||||
ts, sig = matches.groups()
|
||||
|
||||
current_time = int(time.time() * 1000)
|
||||
diff_time = current_time - int(ts) * 1000
|
||||
if diff_time >= self.MAX_ELAPSED_TIME:
|
||||
return False
|
||||
|
||||
body_dict = json.loads(body)
|
||||
signed_payload = f"{ts}.{json.dumps(body_dict, separators=(',', ':'))}"
|
||||
hmac_obj = hmac.new(
|
||||
self.config.webhook_secret.encode("utf-8"),
|
||||
signed_payload.encode("utf-8"),
|
||||
sha256,
|
||||
)
|
||||
expected_signature = hmac_obj.hexdigest()
|
||||
|
||||
try:
|
||||
return hmac.compare_digest(
|
||||
expected_signature.encode("utf-8"), sig.encode("utf-8")
|
||||
)
|
||||
except Exception:
|
||||
return False
|
||||
38
server/reflector/video_platforms/whereby_utils.py
Normal file
38
server/reflector/video_platforms/whereby_utils.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
from reflector.utils.datetime import parse_datetime_with_timezone
|
||||
from reflector.utils.string import NonEmptyString, parse_non_empty_string
|
||||
from reflector.video_platforms.base import ROOM_PREFIX_SEPARATOR
|
||||
|
||||
|
||||
def parse_whereby_recording_filename(
|
||||
object_key: NonEmptyString,
|
||||
) -> (NonEmptyString, datetime):
|
||||
filename = parse_non_empty_string(object_key.rsplit(".", 1)[0])
|
||||
timestamp_pattern = r"(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)"
|
||||
match = re.search(timestamp_pattern, filename)
|
||||
if not match:
|
||||
raise ValueError(f"No ISO timestamp found in filename: {object_key}")
|
||||
timestamp_str = match.group(1)
|
||||
timestamp_start = match.start(1)
|
||||
room_name_part = filename[:timestamp_start]
|
||||
if room_name_part.endswith(ROOM_PREFIX_SEPARATOR):
|
||||
room_name_part = room_name_part[: -len(ROOM_PREFIX_SEPARATOR)]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"room name {room_name_part} doesnt have {ROOM_PREFIX_SEPARATOR} at the end of filename: {object_key}"
|
||||
)
|
||||
|
||||
return parse_non_empty_string(room_name_part), parse_datetime_with_timezone(
|
||||
timestamp_str
|
||||
)
|
||||
|
||||
|
||||
def whereby_room_name_prefix(room_name_prefix: NonEmptyString) -> NonEmptyString:
|
||||
return room_name_prefix + ROOM_PREFIX_SEPARATOR
|
||||
|
||||
|
||||
# room name comes with "/" from whereby api but lacks "/" e.g. in recording filenames
|
||||
def room_name_to_whereby_api_room_name(room_name: NonEmptyString) -> NonEmptyString:
|
||||
return f"/{room_name}"
|
||||
233
server/reflector/views/daily.py
Normal file
233
server/reflector/views/daily.py
Normal file
@@ -0,0 +1,233 @@
|
||||
import json
|
||||
from typing import assert_never
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from pydantic import TypeAdapter
|
||||
|
||||
from reflector.dailyco_api import (
|
||||
DailyWebhookEventUnion,
|
||||
ParticipantJoinedEvent,
|
||||
ParticipantLeftEvent,
|
||||
RecordingErrorEvent,
|
||||
RecordingReadyEvent,
|
||||
RecordingStartedEvent,
|
||||
)
|
||||
from reflector.db.meetings import meetings_controller
|
||||
from reflector.logger import logger as _logger
|
||||
from reflector.settings import settings
|
||||
from reflector.video_platforms.factory import create_platform_client
|
||||
from reflector.worker.process import (
|
||||
poll_daily_room_presence_task,
|
||||
process_multitrack_recording,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
logger = _logger.bind(platform="daily")
|
||||
|
||||
|
||||
@router.post("/webhook")
|
||||
async def webhook(request: Request):
|
||||
"""Handle Daily webhook events.
|
||||
|
||||
Example webhook payload:
|
||||
{
|
||||
"version": "1.0.0",
|
||||
"type": "recording.ready-to-download",
|
||||
"id": "rec-rtd-c3df927c-f738-4471-a2b7-066fa7e95a6b-1692124192",
|
||||
"payload": {
|
||||
"recording_id": "08fa0b24-9220-44c5-846c-3f116cf8e738",
|
||||
"room_name": "Xcm97xRZ08b2dePKb78g",
|
||||
"start_ts": 1692124183,
|
||||
"status": "finished",
|
||||
"max_participants": 1,
|
||||
"duration": 9,
|
||||
"share_token": "ntDCL5k98Ulq", #gitleaks:allow
|
||||
"s3_key": "api-test-1j8fizhzd30c/Xcm97xRZ08b2dePKb78g/1692124183028"
|
||||
},
|
||||
"event_ts": 1692124192
|
||||
}
|
||||
|
||||
Daily.co circuit-breaker: After 3+ failed responses (4xx/5xx), webhook
|
||||
state→FAILED, stops sending events. Reset: scripts/recreate_daily_webhook.py
|
||||
"""
|
||||
body = await request.body()
|
||||
signature = request.headers.get("X-Webhook-Signature", "")
|
||||
timestamp = request.headers.get("X-Webhook-Timestamp", "")
|
||||
|
||||
client = create_platform_client("daily")
|
||||
|
||||
if not client.verify_webhook_signature(body, signature, timestamp):
|
||||
logger.warning(
|
||||
"Invalid webhook signature",
|
||||
signature=signature,
|
||||
timestamp=timestamp,
|
||||
has_body=bool(body),
|
||||
)
|
||||
raise HTTPException(status_code=401, detail="Invalid webhook signature")
|
||||
|
||||
try:
|
||||
body_json = json.loads(body)
|
||||
except json.JSONDecodeError:
|
||||
raise HTTPException(status_code=422, detail="Invalid JSON")
|
||||
|
||||
if body_json.get("test") == "test":
|
||||
logger.info("Received Daily webhook test event")
|
||||
return {"status": "ok"}
|
||||
|
||||
event_adapter = TypeAdapter(DailyWebhookEventUnion)
|
||||
try:
|
||||
event = event_adapter.validate_python(body_json)
|
||||
except Exception as e:
|
||||
logger.error("Failed to parse webhook event", error=str(e), body=body.decode())
|
||||
raise HTTPException(status_code=422, detail="Invalid event format")
|
||||
|
||||
match event:
|
||||
case ParticipantJoinedEvent():
|
||||
await _handle_participant_joined(event)
|
||||
case ParticipantLeftEvent():
|
||||
await _handle_participant_left(event)
|
||||
case RecordingStartedEvent():
|
||||
await _handle_recording_started(event)
|
||||
case RecordingReadyEvent():
|
||||
await _handle_recording_ready(event)
|
||||
case RecordingErrorEvent():
|
||||
await _handle_recording_error(event)
|
||||
case _:
|
||||
assert_never(event)
|
||||
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
async def _queue_poll_for_room(
|
||||
room_name: str | None,
|
||||
event_type: str,
|
||||
user_id: str | None,
|
||||
session_id: str | None,
|
||||
**log_kwargs,
|
||||
) -> None:
|
||||
"""Queue poll task for room by name, handling missing room/meeting cases."""
|
||||
if not room_name:
|
||||
logger.warning(f"{event_type}: no room in payload")
|
||||
return
|
||||
|
||||
meeting = await meetings_controller.get_by_room_name(room_name)
|
||||
if not meeting:
|
||||
logger.warning(f"{event_type}: meeting not found", room_name=room_name)
|
||||
return
|
||||
|
||||
poll_daily_room_presence_task.delay(meeting.id)
|
||||
|
||||
logger.info(
|
||||
f"{event_type.replace('.', ' ').title()} - poll queued",
|
||||
meeting_id=meeting.id,
|
||||
room_name=room_name,
|
||||
user_id=user_id,
|
||||
session_id=session_id,
|
||||
**log_kwargs,
|
||||
)
|
||||
|
||||
|
||||
async def _handle_participant_joined(event: ParticipantJoinedEvent):
|
||||
"""Queue poll task for presence reconciliation."""
|
||||
await _queue_poll_for_room(
|
||||
event.payload.room_name,
|
||||
"participant.joined",
|
||||
event.payload.user_id,
|
||||
event.payload.session_id,
|
||||
user_name=event.payload.user_name,
|
||||
)
|
||||
|
||||
|
||||
async def _handle_participant_left(event: ParticipantLeftEvent):
|
||||
"""Queue poll task for presence reconciliation."""
|
||||
await _queue_poll_for_room(
|
||||
event.payload.room_name,
|
||||
"participant.left",
|
||||
event.payload.user_id,
|
||||
event.payload.session_id,
|
||||
duration=event.payload.duration,
|
||||
)
|
||||
|
||||
|
||||
async def _handle_recording_started(event: RecordingStartedEvent):
|
||||
room_name = event.payload.room_name
|
||||
if not room_name:
|
||||
logger.warning(
|
||||
"recording.started: no room_name in payload", payload=event.payload
|
||||
)
|
||||
return
|
||||
|
||||
meeting = await meetings_controller.get_by_room_name(room_name)
|
||||
if meeting:
|
||||
logger.info(
|
||||
"Recording started",
|
||||
meeting_id=meeting.id,
|
||||
room_name=room_name,
|
||||
recording_id=event.payload.recording_id,
|
||||
platform="daily",
|
||||
)
|
||||
else:
|
||||
logger.warning("recording.started: meeting not found", room_name=room_name)
|
||||
|
||||
|
||||
async def _handle_recording_ready(event: RecordingReadyEvent):
|
||||
room_name = event.payload.room_name
|
||||
recording_id = event.payload.recording_id
|
||||
tracks = event.payload.tracks
|
||||
|
||||
if not tracks:
|
||||
logger.warning(
|
||||
"recording.ready-to-download: missing tracks",
|
||||
room_name=room_name,
|
||||
recording_id=recording_id,
|
||||
payload=event.payload,
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Recording ready for download",
|
||||
room_name=room_name,
|
||||
recording_id=recording_id,
|
||||
num_tracks=len(tracks),
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
bucket_name = settings.DAILYCO_STORAGE_AWS_BUCKET_NAME
|
||||
if not bucket_name:
|
||||
logger.error(
|
||||
"DAILYCO_STORAGE_AWS_BUCKET_NAME not configured; cannot process Daily recording"
|
||||
)
|
||||
return
|
||||
|
||||
track_keys = [t.s3Key for t in tracks if t.type == "audio"]
|
||||
|
||||
logger.info(
|
||||
"Recording webhook queuing processing",
|
||||
recording_id=recording_id,
|
||||
room_name=room_name,
|
||||
)
|
||||
|
||||
process_multitrack_recording.delay(
|
||||
bucket_name=bucket_name,
|
||||
daily_room_name=room_name,
|
||||
recording_id=recording_id,
|
||||
track_keys=track_keys,
|
||||
)
|
||||
|
||||
|
||||
async def _handle_recording_error(event: RecordingErrorEvent):
|
||||
payload = event.payload
|
||||
room_name = payload.room_name
|
||||
|
||||
meeting = await meetings_controller.get_by_room_name(room_name)
|
||||
if meeting:
|
||||
logger.error(
|
||||
"Recording error",
|
||||
meeting_id=meeting.id,
|
||||
room_name=room_name,
|
||||
error=payload.error_msg,
|
||||
platform="daily",
|
||||
)
|
||||
else:
|
||||
logger.warning("recording.error: meeting not found", room_name=room_name)
|
||||
@@ -15,9 +15,11 @@ from reflector.db.calendar_events import calendar_events_controller
|
||||
from reflector.db.meetings import meetings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.redis_cache import RedisAsyncLock
|
||||
from reflector.schemas.platform import Platform
|
||||
from reflector.services.ics_sync import ics_sync_service
|
||||
from reflector.settings import settings
|
||||
from reflector.whereby import create_meeting, upload_logo
|
||||
from reflector.utils.url import add_query_param
|
||||
from reflector.video_platforms.factory import create_platform_client
|
||||
from reflector.worker.webhook import test_webhook
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -41,6 +43,7 @@ class Room(BaseModel):
|
||||
ics_enabled: bool = False
|
||||
ics_last_sync: Optional[datetime] = None
|
||||
ics_last_etag: Optional[str] = None
|
||||
platform: Platform
|
||||
|
||||
|
||||
class RoomDetails(Room):
|
||||
@@ -68,6 +71,7 @@ class Meeting(BaseModel):
|
||||
is_active: bool = True
|
||||
calendar_event_id: str | None = None
|
||||
calendar_metadata: dict[str, Any] | None = None
|
||||
platform: Platform
|
||||
|
||||
|
||||
class CreateRoom(BaseModel):
|
||||
@@ -85,6 +89,7 @@ class CreateRoom(BaseModel):
|
||||
ics_url: Optional[str] = None
|
||||
ics_fetch_interval: int = 300
|
||||
ics_enabled: bool = False
|
||||
platform: Platform
|
||||
|
||||
|
||||
class UpdateRoom(BaseModel):
|
||||
@@ -102,6 +107,7 @@ class UpdateRoom(BaseModel):
|
||||
ics_url: Optional[str] = None
|
||||
ics_fetch_interval: Optional[int] = None
|
||||
ics_enabled: Optional[bool] = None
|
||||
platform: Optional[Platform] = None
|
||||
|
||||
|
||||
class CreateRoomMeeting(BaseModel):
|
||||
@@ -165,14 +171,6 @@ class CalendarEventResponse(BaseModel):
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def parse_datetime_with_timezone(iso_string: str) -> datetime:
|
||||
"""Parse ISO datetime string and ensure timezone awareness (defaults to UTC if naive)."""
|
||||
dt = datetime.fromisoformat(iso_string)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
return dt
|
||||
|
||||
|
||||
@router.get("/rooms", response_model=Page[RoomDetails])
|
||||
async def rooms_list(
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
@@ -182,13 +180,15 @@ async def rooms_list(
|
||||
|
||||
user_id = user["sub"] if user else None
|
||||
|
||||
return await apaginate(
|
||||
paginated = await apaginate(
|
||||
get_database(),
|
||||
await rooms_controller.get_all(
|
||||
user_id=user_id, order_by="-created_at", return_query=True
|
||||
),
|
||||
)
|
||||
|
||||
return paginated
|
||||
|
||||
|
||||
@router.get("/rooms/{room_id}", response_model=RoomDetails)
|
||||
async def rooms_get(
|
||||
@@ -214,14 +214,11 @@ async def rooms_get_by_name(
|
||||
if not room:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
|
||||
# Convert to RoomDetails format (add webhook fields if user is owner)
|
||||
room_dict = room.__dict__.copy()
|
||||
if user_id == room.user_id:
|
||||
# User is owner, include webhook details if available
|
||||
room_dict["webhook_url"] = getattr(room, "webhook_url", None)
|
||||
room_dict["webhook_secret"] = getattr(room, "webhook_secret", None)
|
||||
else:
|
||||
# Non-owner, hide webhook details
|
||||
room_dict["webhook_url"] = None
|
||||
room_dict["webhook_secret"] = None
|
||||
|
||||
@@ -251,6 +248,7 @@ async def rooms_create(
|
||||
ics_url=room.ics_url,
|
||||
ics_fetch_interval=room.ics_fetch_interval,
|
||||
ics_enabled=room.ics_enabled,
|
||||
platform=room.platform,
|
||||
)
|
||||
|
||||
|
||||
@@ -312,22 +310,41 @@ async def rooms_create_meeting(
|
||||
room=room, current_time=current_time
|
||||
)
|
||||
|
||||
if meeting is not None:
|
||||
settings_match = (
|
||||
meeting.is_locked == room.is_locked
|
||||
and meeting.room_mode == room.room_mode
|
||||
and meeting.recording_type == room.recording_type
|
||||
and meeting.recording_trigger == room.recording_trigger
|
||||
and meeting.platform == room.platform
|
||||
)
|
||||
if not settings_match:
|
||||
logger.info(
|
||||
f"Room settings changed for {room_name}, creating new meeting",
|
||||
room_id=room.id,
|
||||
old_meeting_id=meeting.id,
|
||||
)
|
||||
meeting = None
|
||||
|
||||
if meeting is None:
|
||||
end_date = current_time + timedelta(hours=8)
|
||||
|
||||
whereby_meeting = await create_meeting("", end_date=end_date, room=room)
|
||||
platform = room.platform
|
||||
client = create_platform_client(platform)
|
||||
|
||||
await upload_logo(whereby_meeting["roomName"], "./images/logo.png")
|
||||
meeting_data = await client.create_meeting(
|
||||
room.name, end_date=end_date, room=room
|
||||
)
|
||||
|
||||
await client.upload_logo(meeting_data.room_name, "./images/logo.png")
|
||||
|
||||
meeting = await meetings_controller.create(
|
||||
id=whereby_meeting["meetingId"],
|
||||
room_name=whereby_meeting["roomName"],
|
||||
room_url=whereby_meeting["roomUrl"],
|
||||
host_room_url=whereby_meeting["hostRoomUrl"],
|
||||
start_date=parse_datetime_with_timezone(
|
||||
whereby_meeting["startDate"]
|
||||
),
|
||||
end_date=parse_datetime_with_timezone(whereby_meeting["endDate"]),
|
||||
id=meeting_data.meeting_id,
|
||||
room_name=meeting_data.room_name,
|
||||
room_url=meeting_data.room_url,
|
||||
host_room_url=meeting_data.host_room_url,
|
||||
start_date=current_time,
|
||||
end_date=end_date,
|
||||
room=room,
|
||||
)
|
||||
except LockError:
|
||||
@@ -336,7 +353,7 @@ async def rooms_create_meeting(
|
||||
status_code=503, detail="Meeting creation in progress, please try again"
|
||||
)
|
||||
|
||||
if user_id != room.user_id:
|
||||
if user_id != room.user_id and meeting.platform == "whereby":
|
||||
meeting.host_room_url = ""
|
||||
|
||||
return meeting
|
||||
@@ -490,10 +507,13 @@ async def rooms_list_active_meetings(
|
||||
room=room, current_time=current_time
|
||||
)
|
||||
|
||||
# Hide host URLs from non-owners
|
||||
for meeting in meetings:
|
||||
meeting.platform = room.platform
|
||||
|
||||
if user_id != room.user_id:
|
||||
for meeting in meetings:
|
||||
meeting.host_room_url = ""
|
||||
if meeting.platform == "whereby":
|
||||
meeting.host_room_url = ""
|
||||
|
||||
return meetings
|
||||
|
||||
@@ -511,16 +531,11 @@ async def rooms_get_meeting(
|
||||
if not room:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
|
||||
meeting = await meetings_controller.get_by_id(meeting_id)
|
||||
meeting = await meetings_controller.get_by_id(meeting_id, room=room)
|
||||
if not meeting:
|
||||
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||
|
||||
if meeting.room_id != room.id:
|
||||
raise HTTPException(
|
||||
status_code=403, detail="Meeting does not belong to this room"
|
||||
)
|
||||
|
||||
if user_id != room.user_id and not room.is_shared:
|
||||
if user_id != room.user_id and not room.is_shared and meeting.platform == "whereby":
|
||||
meeting.host_room_url = ""
|
||||
|
||||
return meeting
|
||||
@@ -538,16 +553,11 @@ async def rooms_join_meeting(
|
||||
if not room:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
|
||||
meeting = await meetings_controller.get_by_id(meeting_id)
|
||||
meeting = await meetings_controller.get_by_id(meeting_id, room=room)
|
||||
|
||||
if not meeting:
|
||||
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||
|
||||
if meeting.room_id != room.id:
|
||||
raise HTTPException(
|
||||
status_code=403, detail="Meeting does not belong to this room"
|
||||
)
|
||||
|
||||
if not meeting.is_active:
|
||||
raise HTTPException(status_code=400, detail="Meeting is not active")
|
||||
|
||||
@@ -555,8 +565,16 @@ async def rooms_join_meeting(
|
||||
if meeting.end_date <= current_time:
|
||||
raise HTTPException(status_code=400, detail="Meeting has ended")
|
||||
|
||||
# Hide host URL from non-owners
|
||||
if user_id != room.user_id:
|
||||
meeting.host_room_url = ""
|
||||
if meeting.platform == "daily" and user_id is not None:
|
||||
client = create_platform_client(meeting.platform)
|
||||
token = await client.create_meeting_token(
|
||||
meeting.room_name,
|
||||
start_cloud_recording=meeting.recording_type == "cloud",
|
||||
enable_recording_ui=meeting.recording_type == "local",
|
||||
user_id=user_id,
|
||||
is_owner=user_id == room.user_id,
|
||||
)
|
||||
meeting = meeting.model_copy()
|
||||
meeting.room_url = add_query_param(meeting.room_url, "t", token)
|
||||
|
||||
return meeting
|
||||
|
||||
@@ -1,14 +1,22 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Annotated, Literal, Optional
|
||||
from typing import Annotated, Literal, Optional, assert_never
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from fastapi_pagination import Page
|
||||
from fastapi_pagination.ext.databases import apaginate
|
||||
from jose import jwt
|
||||
from pydantic import AwareDatetime, BaseModel, Field, constr, field_serializer
|
||||
from pydantic import (
|
||||
AwareDatetime,
|
||||
BaseModel,
|
||||
Discriminator,
|
||||
Field,
|
||||
constr,
|
||||
field_serializer,
|
||||
)
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.db import get_database
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.search import (
|
||||
DEFAULT_SEARCH_LIMIT,
|
||||
SearchLimit,
|
||||
@@ -31,7 +39,14 @@ from reflector.db.transcripts import (
|
||||
)
|
||||
from reflector.processors.types import Transcript as ProcessorTranscript
|
||||
from reflector.processors.types import Word
|
||||
from reflector.schemas.transcript_formats import TranscriptFormat, TranscriptSegment
|
||||
from reflector.settings import settings
|
||||
from reflector.utils.transcript_formats import (
|
||||
topics_to_webvtt_named,
|
||||
transcript_to_json_segments,
|
||||
transcript_to_text,
|
||||
transcript_to_text_timestamped,
|
||||
)
|
||||
from reflector.ws_manager import get_ws_manager
|
||||
from reflector.zulip import (
|
||||
InvalidMessageError,
|
||||
@@ -46,6 +61,14 @@ ALGORITHM = "HS256"
|
||||
DOWNLOAD_EXPIRE_MINUTES = 60
|
||||
|
||||
|
||||
async def _get_is_multitrack(transcript) -> bool:
|
||||
"""Detect if transcript is from multitrack recording."""
|
||||
if not transcript.recording_id:
|
||||
return False
|
||||
recording = await recordings_controller.get_by_id(transcript.recording_id)
|
||||
return recording is not None and recording.is_multitrack
|
||||
|
||||
|
||||
def create_access_token(data: dict, expires_delta: timedelta):
|
||||
to_encode = data.copy()
|
||||
expire = datetime.now(timezone.utc) + expires_delta
|
||||
@@ -88,10 +111,84 @@ class GetTranscriptMinimal(BaseModel):
|
||||
audio_deleted: bool | None = None
|
||||
|
||||
|
||||
class GetTranscript(GetTranscriptMinimal):
|
||||
class GetTranscriptWithParticipants(GetTranscriptMinimal):
|
||||
participants: list[TranscriptParticipant] | None
|
||||
|
||||
|
||||
class GetTranscriptWithText(GetTranscriptWithParticipants):
|
||||
"""
|
||||
Transcript response with plain text format.
|
||||
|
||||
Format: Speaker names followed by their dialogue, one line per segment.
|
||||
Example:
|
||||
John Smith: Hello everyone
|
||||
Jane Doe: Hi there
|
||||
"""
|
||||
|
||||
transcript_format: Literal["text"] = "text"
|
||||
transcript: str
|
||||
|
||||
|
||||
class GetTranscriptWithTextTimestamped(GetTranscriptWithParticipants):
|
||||
"""
|
||||
Transcript response with timestamped text format.
|
||||
|
||||
Format: [MM:SS] timestamp prefix before each speaker and dialogue.
|
||||
Example:
|
||||
[00:00] John Smith: Hello everyone
|
||||
[00:05] Jane Doe: Hi there
|
||||
"""
|
||||
|
||||
transcript_format: Literal["text-timestamped"] = "text-timestamped"
|
||||
transcript: str
|
||||
|
||||
|
||||
class GetTranscriptWithWebVTTNamed(GetTranscriptWithParticipants):
|
||||
"""
|
||||
Transcript response in WebVTT subtitle format with participant names.
|
||||
|
||||
Format: Standard WebVTT with voice tags using participant names.
|
||||
Example:
|
||||
WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:05.000
|
||||
<v John Smith>Hello everyone
|
||||
"""
|
||||
|
||||
transcript_format: Literal["webvtt-named"] = "webvtt-named"
|
||||
transcript: str
|
||||
|
||||
|
||||
class GetTranscriptWithJSON(GetTranscriptWithParticipants):
|
||||
"""
|
||||
Transcript response as structured JSON segments.
|
||||
|
||||
Format: Array of segment objects with speaker info, text, and timing.
|
||||
Example:
|
||||
[
|
||||
{
|
||||
"speaker": 0,
|
||||
"speaker_name": "John Smith",
|
||||
"text": "Hello everyone",
|
||||
"start": 0.0,
|
||||
"end": 5.0
|
||||
}
|
||||
]
|
||||
"""
|
||||
|
||||
transcript_format: Literal["json"] = "json"
|
||||
transcript: list[TranscriptSegment]
|
||||
|
||||
|
||||
GetTranscript = Annotated[
|
||||
GetTranscriptWithText
|
||||
| GetTranscriptWithTextTimestamped
|
||||
| GetTranscriptWithWebVTTNamed
|
||||
| GetTranscriptWithJSON,
|
||||
Discriminator("transcript_format"),
|
||||
]
|
||||
|
||||
|
||||
class CreateTranscript(BaseModel):
|
||||
name: str
|
||||
source_language: str = Field("en")
|
||||
@@ -228,7 +325,7 @@ async def transcripts_search(
|
||||
)
|
||||
|
||||
|
||||
@router.post("/transcripts", response_model=GetTranscript)
|
||||
@router.post("/transcripts", response_model=GetTranscriptWithParticipants)
|
||||
async def transcripts_create(
|
||||
info: CreateTranscript,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
@@ -272,7 +369,7 @@ class GetTranscriptTopic(BaseModel):
|
||||
segments: list[GetTranscriptSegmentTopic] = []
|
||||
|
||||
@classmethod
|
||||
def from_transcript_topic(cls, topic: TranscriptTopic):
|
||||
def from_transcript_topic(cls, topic: TranscriptTopic, is_multitrack: bool = False):
|
||||
if not topic.words:
|
||||
# In previous version, words were missing
|
||||
# Just output a segment with speaker 0
|
||||
@@ -296,7 +393,7 @@ class GetTranscriptTopic(BaseModel):
|
||||
start=segment.start,
|
||||
speaker=segment.speaker,
|
||||
)
|
||||
for segment in transcript.as_segments()
|
||||
for segment in transcript.as_segments(is_multitrack)
|
||||
]
|
||||
return cls(
|
||||
id=topic.id,
|
||||
@@ -313,8 +410,8 @@ class GetTranscriptTopicWithWords(GetTranscriptTopic):
|
||||
words: list[Word] = []
|
||||
|
||||
@classmethod
|
||||
def from_transcript_topic(cls, topic: TranscriptTopic):
|
||||
instance = super().from_transcript_topic(topic)
|
||||
def from_transcript_topic(cls, topic: TranscriptTopic, is_multitrack: bool = False):
|
||||
instance = super().from_transcript_topic(topic, is_multitrack)
|
||||
if topic.words:
|
||||
instance.words = topic.words
|
||||
return instance
|
||||
@@ -329,8 +426,8 @@ class GetTranscriptTopicWithWordsPerSpeaker(GetTranscriptTopic):
|
||||
words_per_speaker: list[SpeakerWords] = []
|
||||
|
||||
@classmethod
|
||||
def from_transcript_topic(cls, topic: TranscriptTopic):
|
||||
instance = super().from_transcript_topic(topic)
|
||||
def from_transcript_topic(cls, topic: TranscriptTopic, is_multitrack: bool = False):
|
||||
instance = super().from_transcript_topic(topic, is_multitrack)
|
||||
if topic.words:
|
||||
words_per_speakers = []
|
||||
# group words by speaker
|
||||
@@ -362,14 +459,76 @@ class GetTranscriptTopicWithWordsPerSpeaker(GetTranscriptTopic):
|
||||
async def transcript_get(
|
||||
transcript_id: str,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
transcript_format: TranscriptFormat = "text",
|
||||
):
|
||||
user_id = user["sub"] if user else None
|
||||
return await transcripts_controller.get_by_id_for_http(
|
||||
transcript = await transcripts_controller.get_by_id_for_http(
|
||||
transcript_id, user_id=user_id
|
||||
)
|
||||
|
||||
is_multitrack = await _get_is_multitrack(transcript)
|
||||
|
||||
@router.patch("/transcripts/{transcript_id}", response_model=GetTranscript)
|
||||
base_data = {
|
||||
"id": transcript.id,
|
||||
"user_id": transcript.user_id,
|
||||
"name": transcript.name,
|
||||
"status": transcript.status,
|
||||
"locked": transcript.locked,
|
||||
"duration": transcript.duration,
|
||||
"title": transcript.title,
|
||||
"short_summary": transcript.short_summary,
|
||||
"long_summary": transcript.long_summary,
|
||||
"created_at": transcript.created_at,
|
||||
"share_mode": transcript.share_mode,
|
||||
"source_language": transcript.source_language,
|
||||
"target_language": transcript.target_language,
|
||||
"reviewed": transcript.reviewed,
|
||||
"meeting_id": transcript.meeting_id,
|
||||
"source_kind": transcript.source_kind,
|
||||
"room_id": transcript.room_id,
|
||||
"audio_deleted": transcript.audio_deleted,
|
||||
"participants": transcript.participants,
|
||||
}
|
||||
|
||||
if transcript_format == "text":
|
||||
return GetTranscriptWithText(
|
||||
**base_data,
|
||||
transcript_format="text",
|
||||
transcript=transcript_to_text(
|
||||
transcript.topics, transcript.participants, is_multitrack
|
||||
),
|
||||
)
|
||||
elif transcript_format == "text-timestamped":
|
||||
return GetTranscriptWithTextTimestamped(
|
||||
**base_data,
|
||||
transcript_format="text-timestamped",
|
||||
transcript=transcript_to_text_timestamped(
|
||||
transcript.topics, transcript.participants, is_multitrack
|
||||
),
|
||||
)
|
||||
elif transcript_format == "webvtt-named":
|
||||
return GetTranscriptWithWebVTTNamed(
|
||||
**base_data,
|
||||
transcript_format="webvtt-named",
|
||||
transcript=topics_to_webvtt_named(
|
||||
transcript.topics, transcript.participants, is_multitrack
|
||||
),
|
||||
)
|
||||
elif transcript_format == "json":
|
||||
return GetTranscriptWithJSON(
|
||||
**base_data,
|
||||
transcript_format="json",
|
||||
transcript=transcript_to_json_segments(
|
||||
transcript.topics, transcript.participants, is_multitrack
|
||||
),
|
||||
)
|
||||
else:
|
||||
assert_never(transcript_format)
|
||||
|
||||
|
||||
@router.patch(
|
||||
"/transcripts/{transcript_id}", response_model=GetTranscriptWithParticipants
|
||||
)
|
||||
async def transcript_update(
|
||||
transcript_id: str,
|
||||
info: UpdateTranscript,
|
||||
@@ -419,9 +578,12 @@ async def transcript_get_topics(
|
||||
transcript_id, user_id=user_id
|
||||
)
|
||||
|
||||
is_multitrack = await _get_is_multitrack(transcript)
|
||||
|
||||
# convert to GetTranscriptTopic
|
||||
return [
|
||||
GetTranscriptTopic.from_transcript_topic(topic) for topic in transcript.topics
|
||||
GetTranscriptTopic.from_transcript_topic(topic, is_multitrack)
|
||||
for topic in transcript.topics
|
||||
]
|
||||
|
||||
|
||||
@@ -438,9 +600,11 @@ async def transcript_get_topics_with_words(
|
||||
transcript_id, user_id=user_id
|
||||
)
|
||||
|
||||
is_multitrack = await _get_is_multitrack(transcript)
|
||||
|
||||
# convert to GetTranscriptTopicWithWords
|
||||
return [
|
||||
GetTranscriptTopicWithWords.from_transcript_topic(topic)
|
||||
GetTranscriptTopicWithWords.from_transcript_topic(topic, is_multitrack)
|
||||
for topic in transcript.topics
|
||||
]
|
||||
|
||||
@@ -459,13 +623,17 @@ async def transcript_get_topics_with_words_per_speaker(
|
||||
transcript_id, user_id=user_id
|
||||
)
|
||||
|
||||
is_multitrack = await _get_is_multitrack(transcript)
|
||||
|
||||
# get the topic from the transcript
|
||||
topic = next((t for t in transcript.topics if t.id == topic_id), None)
|
||||
if not topic:
|
||||
raise HTTPException(status_code=404, detail="Topic not found")
|
||||
|
||||
# convert to GetTranscriptTopicWithWordsPerSpeaker
|
||||
return GetTranscriptTopicWithWordsPerSpeaker.from_transcript_topic(topic)
|
||||
return GetTranscriptTopicWithWordsPerSpeaker.from_transcript_topic(
|
||||
topic, is_multitrack
|
||||
)
|
||||
|
||||
|
||||
@router.post("/transcripts/{transcript_id}/zulip")
|
||||
|
||||
@@ -1,12 +1,20 @@
|
||||
from typing import Annotated, Optional
|
||||
from typing import Annotated, Optional, assert_never
|
||||
|
||||
import celery
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.services.transcript_process import (
|
||||
ProcessError,
|
||||
ValidationAlreadyScheduled,
|
||||
ValidationError,
|
||||
ValidationLocked,
|
||||
ValidationOk,
|
||||
dispatch_transcript_processing,
|
||||
prepare_transcript_processing,
|
||||
validate_transcript_for_processing,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -19,38 +27,28 @@ class ProcessStatus(BaseModel):
|
||||
async def transcript_process(
|
||||
transcript_id: str,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
):
|
||||
) -> ProcessStatus:
|
||||
user_id = user["sub"] if user else None
|
||||
transcript = await transcripts_controller.get_by_id_for_http(
|
||||
transcript_id, user_id=user_id
|
||||
)
|
||||
|
||||
if transcript.locked:
|
||||
raise HTTPException(status_code=400, detail="Transcript is locked")
|
||||
validation = await validate_transcript_for_processing(transcript)
|
||||
if isinstance(validation, ValidationLocked):
|
||||
raise HTTPException(status_code=400, detail=validation.detail)
|
||||
elif isinstance(validation, ValidationError):
|
||||
raise HTTPException(status_code=400, detail=validation.detail)
|
||||
elif isinstance(validation, ValidationAlreadyScheduled):
|
||||
return ProcessStatus(status=validation.detail)
|
||||
elif isinstance(validation, ValidationOk):
|
||||
pass
|
||||
else:
|
||||
assert_never(validation)
|
||||
|
||||
if transcript.status == "idle":
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Recording is not ready for processing"
|
||||
)
|
||||
config = await prepare_transcript_processing(validation)
|
||||
|
||||
if task_is_scheduled_or_active(
|
||||
"reflector.pipelines.main_file_pipeline.task_pipeline_file_process",
|
||||
transcript_id=transcript_id,
|
||||
):
|
||||
return ProcessStatus(status="already running")
|
||||
|
||||
# schedule a background task process the file
|
||||
task_pipeline_file_process.delay(transcript_id=transcript_id)
|
||||
|
||||
return ProcessStatus(status="ok")
|
||||
|
||||
|
||||
def task_is_scheduled_or_active(task_name: str, **kwargs):
|
||||
inspect = celery.current_app.control.inspect()
|
||||
|
||||
for worker, tasks in (inspect.scheduled() | inspect.active()).items():
|
||||
for task in tasks:
|
||||
if task["name"] == task_name and task["kwargs"] == kwargs:
|
||||
return True
|
||||
|
||||
return False
|
||||
if isinstance(config, ProcessError):
|
||||
raise HTTPException(status_code=500, detail=config.detail)
|
||||
else:
|
||||
dispatch_transcript_processing(config)
|
||||
return ProcessStatus(status="ok")
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Optional
|
||||
from fastapi import APIRouter, WebSocket
|
||||
|
||||
from reflector.auth.auth_jwt import JWTAuth # type: ignore
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.ws_manager import get_ws_manager
|
||||
|
||||
router = APIRouter()
|
||||
@@ -29,7 +30,18 @@ async def user_events_websocket(websocket: WebSocket):
|
||||
|
||||
try:
|
||||
payload = JWTAuth().verify_token(token)
|
||||
user_id = payload.get("sub")
|
||||
authentik_uid = payload.get("sub")
|
||||
|
||||
if authentik_uid:
|
||||
user = await user_controller.get_by_authentik_uid(authentik_uid)
|
||||
if user:
|
||||
user_id = user.id
|
||||
else:
|
||||
await websocket.close(code=UNAUTHORISED)
|
||||
return
|
||||
else:
|
||||
await websocket.close(code=UNAUTHORISED)
|
||||
return
|
||||
except Exception:
|
||||
await websocket.close(code=UNAUTHORISED)
|
||||
return
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import httpx
|
||||
|
||||
from reflector.db.rooms import Room
|
||||
from reflector.settings import settings
|
||||
from reflector.utils.string import parse_non_empty_string
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_headers():
|
||||
api_key = parse_non_empty_string(
|
||||
settings.WHEREBY_API_KEY, "WHEREBY_API_KEY value is required."
|
||||
)
|
||||
return {
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
}
|
||||
|
||||
|
||||
TIMEOUT = 10 # seconds
|
||||
|
||||
|
||||
def _get_whereby_s3_auth():
|
||||
errors = []
|
||||
try:
|
||||
bucket_name = parse_non_empty_string(
|
||||
settings.RECORDING_STORAGE_AWS_BUCKET_NAME,
|
||||
"RECORDING_STORAGE_AWS_BUCKET_NAME value is required.",
|
||||
)
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
try:
|
||||
key_id = parse_non_empty_string(
|
||||
settings.AWS_WHEREBY_ACCESS_KEY_ID,
|
||||
"AWS_WHEREBY_ACCESS_KEY_ID value is required.",
|
||||
)
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
try:
|
||||
key_secret = parse_non_empty_string(
|
||||
settings.AWS_WHEREBY_ACCESS_KEY_SECRET,
|
||||
"AWS_WHEREBY_ACCESS_KEY_SECRET value is required.",
|
||||
)
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
if len(errors) > 0:
|
||||
raise Exception(
|
||||
f"Failed to get Whereby auth settings: {', '.join(str(e) for e in errors)}"
|
||||
)
|
||||
return bucket_name, key_id, key_secret
|
||||
|
||||
|
||||
async def create_meeting(room_name_prefix: str, end_date: datetime, room: Room):
|
||||
s3_bucket_name, s3_key_id, s3_key_secret = _get_whereby_s3_auth()
|
||||
data = {
|
||||
"isLocked": room.is_locked,
|
||||
"roomNamePrefix": room_name_prefix,
|
||||
"roomNamePattern": "uuid",
|
||||
"roomMode": room.room_mode,
|
||||
"endDate": end_date.isoformat(),
|
||||
"recording": {
|
||||
"type": room.recording_type,
|
||||
"destination": {
|
||||
"provider": "s3",
|
||||
"bucket": s3_bucket_name,
|
||||
"accessKeyId": s3_key_id,
|
||||
"accessKeySecret": s3_key_secret,
|
||||
"fileFormat": "mp4",
|
||||
},
|
||||
"startTrigger": room.recording_trigger,
|
||||
},
|
||||
"fields": ["hostRoomUrl"],
|
||||
}
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
f"{settings.WHEREBY_API_URL}/meetings",
|
||||
headers=_get_headers(),
|
||||
json=data,
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
if response.status_code == 403:
|
||||
logger.warning(
|
||||
f"Failed to create meeting: access denied on Whereby: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
|
||||
async def get_room_sessions(room_name: str):
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(
|
||||
f"{settings.WHEREBY_API_URL}/insights/room-sessions?roomName={room_name}",
|
||||
headers=_get_headers(),
|
||||
timeout=TIMEOUT,
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
|
||||
async def upload_logo(room_name: str, logo_path: str):
|
||||
async with httpx.AsyncClient() as client:
|
||||
with open(logo_path, "rb") as f:
|
||||
response = await client.put(
|
||||
f"{settings.WHEREBY_API_URL}/rooms{room_name}/theme/logo",
|
||||
headers={
|
||||
"Authorization": f"Bearer {settings.WHEREBY_API_KEY}",
|
||||
},
|
||||
timeout=TIMEOUT,
|
||||
files={"image": f},
|
||||
)
|
||||
response.raise_for_status()
|
||||
@@ -38,6 +38,14 @@ else:
|
||||
"task": "reflector.worker.process.reprocess_failed_recordings",
|
||||
"schedule": crontab(hour=5, minute=0), # Midnight EST
|
||||
},
|
||||
"poll_daily_recordings": {
|
||||
"task": "reflector.worker.process.poll_daily_recordings",
|
||||
"schedule": 180.0, # Every 3 minutes (configurable lookback window)
|
||||
},
|
||||
"trigger_daily_reconciliation": {
|
||||
"task": "reflector.worker.process.trigger_daily_reconciliation",
|
||||
"schedule": 30.0, # Every 30 seconds (queues poll tasks for all active meetings)
|
||||
},
|
||||
"sync_all_ics_calendars": {
|
||||
"task": "reflector.worker.ics_sync.sync_all_ics_calendars",
|
||||
"schedule": 60.0, # Run every minute to check which rooms need sync
|
||||
|
||||
@@ -19,7 +19,7 @@ from reflector.db.meetings import meetings
|
||||
from reflector.db.recordings import recordings
|
||||
from reflector.db.transcripts import transcripts, transcripts_controller
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_recordings_storage
|
||||
from reflector.storage import get_transcripts_storage
|
||||
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
@@ -53,8 +53,8 @@ async def delete_single_transcript(
|
||||
)
|
||||
if recording:
|
||||
try:
|
||||
await get_recordings_storage().delete_file(
|
||||
recording["object_key"]
|
||||
await get_transcripts_storage().delete_file(
|
||||
recording["object_key"], bucket=recording["bucket_name"]
|
||||
)
|
||||
except Exception as storage_error:
|
||||
logger.warning(
|
||||
|
||||
@@ -7,10 +7,10 @@ from celery.utils.log import get_task_logger
|
||||
from reflector.asynctask import asynctask
|
||||
from reflector.db.calendar_events import calendar_events_controller
|
||||
from reflector.db.meetings import meetings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.rooms import Room, rooms_controller
|
||||
from reflector.redis_cache import RedisAsyncLock
|
||||
from reflector.services.ics_sync import SyncStatus, ics_sync_service
|
||||
from reflector.whereby import create_meeting, upload_logo
|
||||
from reflector.video_platforms.factory import create_platform_client
|
||||
|
||||
logger = structlog.wrap_logger(get_task_logger(__name__))
|
||||
|
||||
@@ -86,17 +86,17 @@ def _should_sync(room) -> bool:
|
||||
MEETING_DEFAULT_DURATION = timedelta(hours=1)
|
||||
|
||||
|
||||
async def create_upcoming_meetings_for_event(event, create_window, room_id, room):
|
||||
async def create_upcoming_meetings_for_event(event, create_window, room: Room):
|
||||
if event.start_time <= create_window:
|
||||
return
|
||||
existing_meeting = await meetings_controller.get_by_calendar_event(event.id)
|
||||
existing_meeting = await meetings_controller.get_by_calendar_event(event.id, room)
|
||||
|
||||
if existing_meeting:
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Pre-creating meeting for calendar event",
|
||||
room_id=room_id,
|
||||
room_id=room.id,
|
||||
event_id=event.id,
|
||||
event_title=event.title,
|
||||
)
|
||||
@@ -104,20 +104,22 @@ async def create_upcoming_meetings_for_event(event, create_window, room_id, room
|
||||
try:
|
||||
end_date = event.end_time or (event.start_time + MEETING_DEFAULT_DURATION)
|
||||
|
||||
whereby_meeting = await create_meeting(
|
||||
"",
|
||||
client = create_platform_client(room.platform)
|
||||
|
||||
meeting_data = await client.create_meeting(
|
||||
room.name,
|
||||
end_date=end_date,
|
||||
room=room,
|
||||
)
|
||||
await upload_logo(whereby_meeting["roomName"], "./images/logo.png")
|
||||
await client.upload_logo(meeting_data.room_name, "./images/logo.png")
|
||||
|
||||
meeting = await meetings_controller.create(
|
||||
id=whereby_meeting["meetingId"],
|
||||
room_name=whereby_meeting["roomName"],
|
||||
room_url=whereby_meeting["roomUrl"],
|
||||
host_room_url=whereby_meeting["hostRoomUrl"],
|
||||
start_date=datetime.fromisoformat(whereby_meeting["startDate"]),
|
||||
end_date=datetime.fromisoformat(whereby_meeting["endDate"]),
|
||||
id=meeting_data.meeting_id,
|
||||
room_name=meeting_data.room_name,
|
||||
room_url=meeting_data.room_url,
|
||||
host_room_url=meeting_data.host_room_url,
|
||||
start_date=event.start_time,
|
||||
end_date=end_date,
|
||||
room=room,
|
||||
calendar_event_id=event.id,
|
||||
calendar_metadata={
|
||||
@@ -136,7 +138,7 @@ async def create_upcoming_meetings_for_event(event, create_window, room_id, room
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to pre-create meeting",
|
||||
room_id=room_id,
|
||||
room_id=room.id,
|
||||
event_id=event.id,
|
||||
error=str(e),
|
||||
)
|
||||
@@ -166,9 +168,7 @@ async def create_upcoming_meetings():
|
||||
)
|
||||
|
||||
for event in events:
|
||||
await create_upcoming_meetings_for_event(
|
||||
event, create_window, room.id, room
|
||||
)
|
||||
await create_upcoming_meetings_for_event(event, create_window, room)
|
||||
logger.info("Completed pre-creation check for upcoming meetings")
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
from typing import List
|
||||
from urllib.parse import unquote
|
||||
|
||||
import av
|
||||
@@ -9,29 +11,45 @@ import structlog
|
||||
from celery import shared_task
|
||||
from celery.utils.log import get_task_logger
|
||||
from pydantic import ValidationError
|
||||
from redis.exceptions import LockError
|
||||
|
||||
from reflector.dailyco_api import RecordingResponse
|
||||
from reflector.db.daily_participant_sessions import (
|
||||
DailyParticipantSession,
|
||||
daily_participant_sessions_controller,
|
||||
)
|
||||
from reflector.db.meetings import meetings_controller
|
||||
from reflector.db.recordings import Recording, recordings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import SourceKind, transcripts_controller
|
||||
from reflector.db.transcripts import (
|
||||
SourceKind,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.pipelines.main_live_pipeline import asynctask
|
||||
from reflector.redis_cache import get_redis_client
|
||||
from reflector.pipelines.main_multitrack_pipeline import (
|
||||
task_pipeline_multitrack_process,
|
||||
)
|
||||
from reflector.pipelines.topic_processing import EmptyPipeline
|
||||
from reflector.processors import AudioFileWriterProcessor
|
||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||
from reflector.redis_cache import RedisAsyncLock
|
||||
from reflector.settings import settings
|
||||
from reflector.whereby import get_room_sessions
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.utils.daily import (
|
||||
DailyRoomName,
|
||||
extract_base_room_name,
|
||||
filter_cam_audio_tracks,
|
||||
recording_lock_key,
|
||||
)
|
||||
from reflector.video_platforms.factory import create_platform_client
|
||||
from reflector.video_platforms.whereby_utils import (
|
||||
parse_whereby_recording_filename,
|
||||
room_name_to_whereby_api_room_name,
|
||||
)
|
||||
|
||||
logger = structlog.wrap_logger(get_task_logger(__name__))
|
||||
|
||||
|
||||
def parse_datetime_with_timezone(iso_string: str) -> datetime:
|
||||
"""Parse ISO datetime string and ensure timezone awareness (defaults to UTC if naive)."""
|
||||
dt = datetime.fromisoformat(iso_string)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
return dt
|
||||
|
||||
|
||||
@shared_task
|
||||
def process_messages():
|
||||
queue_url = settings.AWS_PROCESS_RECORDING_QUEUE_URL
|
||||
@@ -73,14 +91,16 @@ def process_messages():
|
||||
logger.error("process_messages", error=str(e))
|
||||
|
||||
|
||||
# only whereby supported.
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def process_recording(bucket_name: str, object_key: str):
|
||||
logger.info("Processing recording: %s/%s", bucket_name, object_key)
|
||||
|
||||
# extract a guid and a datetime from the object key
|
||||
room_name = f"/{object_key[:36]}"
|
||||
recorded_at = parse_datetime_with_timezone(object_key[37:57])
|
||||
room_name_part, recorded_at = parse_whereby_recording_filename(object_key)
|
||||
|
||||
# we store whereby api room names, NOT whereby room names
|
||||
room_name = room_name_to_whereby_api_room_name(room_name_part)
|
||||
|
||||
meeting = await meetings_controller.get_by_room_name(room_name)
|
||||
room = await rooms_controller.get_by_id(meeting.room_id)
|
||||
@@ -102,6 +122,7 @@ async def process_recording(bucket_name: str, object_key: str):
|
||||
transcript,
|
||||
{
|
||||
"topics": [],
|
||||
"participants": [],
|
||||
},
|
||||
)
|
||||
else:
|
||||
@@ -121,15 +142,15 @@ async def process_recording(bucket_name: str, object_key: str):
|
||||
upload_filename = transcript.data_path / f"upload{extension}"
|
||||
upload_filename.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
s3 = boto3.client(
|
||||
"s3",
|
||||
region_name=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
)
|
||||
storage = get_transcripts_storage()
|
||||
|
||||
with open(upload_filename, "wb") as f:
|
||||
s3.download_fileobj(bucket_name, object_key, f)
|
||||
try:
|
||||
with open(upload_filename, "wb") as f:
|
||||
await storage.stream_to_fileobj(object_key, f, bucket=bucket_name)
|
||||
except Exception:
|
||||
# Clean up partial file on stream failure
|
||||
upload_filename.unlink(missing_ok=True)
|
||||
raise
|
||||
|
||||
container = av.open(upload_filename.as_posix())
|
||||
try:
|
||||
@@ -146,6 +167,335 @@ async def process_recording(bucket_name: str, object_key: str):
|
||||
task_pipeline_file_process.delay(transcript_id=transcript.id)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def process_multitrack_recording(
|
||||
bucket_name: str,
|
||||
daily_room_name: DailyRoomName,
|
||||
recording_id: str,
|
||||
track_keys: list[str],
|
||||
):
|
||||
logger.info(
|
||||
"Processing multitrack recording",
|
||||
bucket=bucket_name,
|
||||
room_name=daily_room_name,
|
||||
recording_id=recording_id,
|
||||
provided_keys=len(track_keys),
|
||||
)
|
||||
|
||||
if not track_keys:
|
||||
logger.warning("No audio track keys provided")
|
||||
return
|
||||
|
||||
lock_key = recording_lock_key(recording_id)
|
||||
async with RedisAsyncLock(
|
||||
key=lock_key,
|
||||
timeout=600, # 10min for processing (includes API calls, DB writes)
|
||||
extend_interval=60, # Auto-extend every 60s
|
||||
skip_if_locked=True,
|
||||
blocking=False,
|
||||
) as lock:
|
||||
if not lock.acquired:
|
||||
logger.warning(
|
||||
"Recording processing skipped - lock already held (duplicate task or concurrent worker)",
|
||||
recording_id=recording_id,
|
||||
lock_key=lock_key,
|
||||
reason="duplicate_task_or_concurrent_worker",
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Recording worker acquired lock - starting processing",
|
||||
recording_id=recording_id,
|
||||
lock_key=lock_key,
|
||||
)
|
||||
|
||||
await _process_multitrack_recording_inner(
|
||||
bucket_name, daily_room_name, recording_id, track_keys
|
||||
)
|
||||
|
||||
|
||||
async def _process_multitrack_recording_inner(
|
||||
bucket_name: str,
|
||||
daily_room_name: DailyRoomName,
|
||||
recording_id: str,
|
||||
track_keys: list[str],
|
||||
):
|
||||
"""Inner function containing the actual processing logic."""
|
||||
|
||||
tz = timezone.utc
|
||||
recorded_at = datetime.now(tz)
|
||||
try:
|
||||
if track_keys:
|
||||
folder = os.path.basename(os.path.dirname(track_keys[0]))
|
||||
ts_match = re.search(r"(\d{14})$", folder)
|
||||
if ts_match:
|
||||
ts = ts_match.group(1)
|
||||
recorded_at = datetime.strptime(ts, "%Y%m%d%H%M%S").replace(tzinfo=tz)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not parse recorded_at from keys, using now() {recorded_at}",
|
||||
e,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
meeting = await meetings_controller.get_by_room_name(daily_room_name)
|
||||
|
||||
room_name_base = extract_base_room_name(daily_room_name)
|
||||
|
||||
room = await rooms_controller.get_by_name(room_name_base)
|
||||
if not room:
|
||||
raise Exception(f"Room not found: {room_name_base}")
|
||||
|
||||
if not meeting:
|
||||
raise Exception(f"Meeting not found: {room_name_base}")
|
||||
|
||||
logger.info(
|
||||
"Found existing Meeting for recording",
|
||||
meeting_id=meeting.id,
|
||||
room_name=daily_room_name,
|
||||
recording_id=recording_id,
|
||||
)
|
||||
|
||||
recording = await recordings_controller.get_by_id(recording_id)
|
||||
if not recording:
|
||||
object_key_dir = os.path.dirname(track_keys[0]) if track_keys else ""
|
||||
recording = await recordings_controller.create(
|
||||
Recording(
|
||||
id=recording_id,
|
||||
bucket_name=bucket_name,
|
||||
object_key=object_key_dir,
|
||||
recorded_at=recorded_at,
|
||||
meeting_id=meeting.id,
|
||||
track_keys=track_keys,
|
||||
)
|
||||
)
|
||||
# else: Recording already exists; metadata set at creation time
|
||||
|
||||
transcript = await transcripts_controller.get_by_recording_id(recording.id)
|
||||
if not transcript:
|
||||
transcript = await transcripts_controller.add(
|
||||
"",
|
||||
source_kind=SourceKind.ROOM,
|
||||
source_language="en",
|
||||
target_language="en",
|
||||
user_id=room.user_id,
|
||||
recording_id=recording.id,
|
||||
share_mode="public",
|
||||
meeting_id=meeting.id,
|
||||
room_id=room.id,
|
||||
)
|
||||
|
||||
task_pipeline_multitrack_process.delay(
|
||||
transcript_id=transcript.id,
|
||||
bucket_name=bucket_name,
|
||||
track_keys=filter_cam_audio_tracks(track_keys),
|
||||
)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def poll_daily_recordings():
|
||||
"""Poll Daily.co API for recordings and process missing ones.
|
||||
|
||||
Fetches latest recordings from Daily.co API (default limit 100), compares with DB,
|
||||
and queues processing for recordings not already in DB.
|
||||
|
||||
For each missing recording, uses audio tracks from API response.
|
||||
|
||||
Worker-level locking provides idempotency (see process_multitrack_recording).
|
||||
"""
|
||||
bucket_name = settings.DAILYCO_STORAGE_AWS_BUCKET_NAME
|
||||
if not bucket_name:
|
||||
logger.debug(
|
||||
"DAILYCO_STORAGE_AWS_BUCKET_NAME not configured; skipping recording poll"
|
||||
)
|
||||
return
|
||||
|
||||
async with create_platform_client("daily") as daily_client:
|
||||
# latest 100. TODO cursor-based state
|
||||
api_recordings: List[RecordingResponse] = await daily_client.list_recordings()
|
||||
|
||||
if not api_recordings:
|
||||
logger.debug(
|
||||
"No recordings found from Daily.co API",
|
||||
)
|
||||
return
|
||||
|
||||
recording_ids = [rec.id for rec in api_recordings]
|
||||
existing_recordings = await recordings_controller.get_by_ids(recording_ids)
|
||||
existing_ids = {rec.id for rec in existing_recordings}
|
||||
|
||||
missing_recordings = [rec for rec in api_recordings if rec.id not in existing_ids]
|
||||
|
||||
if not missing_recordings:
|
||||
logger.debug(
|
||||
"All recordings already in DB",
|
||||
api_count=len(api_recordings),
|
||||
existing_count=len(existing_recordings),
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"Found recordings missing from DB",
|
||||
missing_count=len(missing_recordings),
|
||||
total_api_count=len(api_recordings),
|
||||
existing_count=len(existing_recordings),
|
||||
)
|
||||
|
||||
for recording in missing_recordings:
|
||||
if not recording.tracks:
|
||||
if recording.status == "finished":
|
||||
logger.warning(
|
||||
"Finished recording has no tracks (no audio captured)",
|
||||
recording_id=recording.id,
|
||||
room_name=recording.room_name,
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
"No tracks in recording yet",
|
||||
recording_id=recording.id,
|
||||
room_name=recording.room_name,
|
||||
status=recording.status,
|
||||
)
|
||||
continue
|
||||
|
||||
track_keys = [t.s3Key for t in recording.tracks if t.type == "audio"]
|
||||
|
||||
if not track_keys:
|
||||
logger.warning(
|
||||
"No audio tracks found in recording (only video tracks)",
|
||||
recording_id=recording.id,
|
||||
room_name=recording.room_name,
|
||||
total_tracks=len(recording.tracks),
|
||||
)
|
||||
continue
|
||||
|
||||
logger.info(
|
||||
"Queueing missing recording for processing",
|
||||
recording_id=recording.id,
|
||||
room_name=recording.room_name,
|
||||
track_count=len(track_keys),
|
||||
)
|
||||
|
||||
process_multitrack_recording.delay(
|
||||
bucket_name=bucket_name,
|
||||
daily_room_name=recording.room_name,
|
||||
recording_id=recording.id,
|
||||
track_keys=track_keys,
|
||||
)
|
||||
|
||||
|
||||
async def poll_daily_room_presence(meeting_id: str) -> None:
|
||||
"""Poll Daily.co room presence and reconcile with DB sessions. New presence is added, old presence is marked as closed.
|
||||
Warning: Daily api returns only current state, so there could be missed presence updates, people who went and left the room quickly.
|
||||
Therefore, set(presences) != set(recordings) even if everyone said something. This is not a problem but should be noted."""
|
||||
|
||||
async with RedisAsyncLock(
|
||||
key=f"meeting_presence_poll:{meeting_id}",
|
||||
timeout=120,
|
||||
extend_interval=30,
|
||||
skip_if_locked=True,
|
||||
blocking=False,
|
||||
) as lock:
|
||||
if not lock.acquired:
|
||||
logger.debug(
|
||||
"Concurrent poll skipped (duplicate task)", meeting_id=meeting_id
|
||||
)
|
||||
return
|
||||
|
||||
meeting = await meetings_controller.get_by_id(meeting_id)
|
||||
if not meeting:
|
||||
logger.warning("Meeting not found", meeting_id=meeting_id)
|
||||
return
|
||||
|
||||
async with create_platform_client("daily") as daily_client:
|
||||
try:
|
||||
presence = await daily_client.get_room_presence(meeting.room_name)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Daily.co API fetch failed",
|
||||
meeting_id=meeting.id,
|
||||
room_name=meeting.room_name,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
return
|
||||
|
||||
api_participants = {p.id: p for p in presence.data}
|
||||
db_sessions = (
|
||||
await daily_participant_sessions_controller.get_all_sessions_for_meeting(
|
||||
meeting.id
|
||||
)
|
||||
)
|
||||
|
||||
active_session_ids = {
|
||||
sid for sid, s in db_sessions.items() if s.left_at is None
|
||||
}
|
||||
missing_session_ids = set(api_participants.keys()) - active_session_ids
|
||||
stale_session_ids = active_session_ids - set(api_participants.keys())
|
||||
|
||||
if missing_session_ids:
|
||||
missing_sessions = []
|
||||
for session_id in missing_session_ids:
|
||||
p = api_participants[session_id]
|
||||
session = DailyParticipantSession(
|
||||
id=f"{meeting.id}:{session_id}",
|
||||
meeting_id=meeting.id,
|
||||
room_id=meeting.room_id,
|
||||
session_id=session_id,
|
||||
user_id=p.userId,
|
||||
user_name=p.userName,
|
||||
joined_at=datetime.fromisoformat(p.joinTime),
|
||||
left_at=None,
|
||||
)
|
||||
missing_sessions.append(session)
|
||||
|
||||
await daily_participant_sessions_controller.batch_upsert_sessions(
|
||||
missing_sessions
|
||||
)
|
||||
logger.info(
|
||||
"Sessions added",
|
||||
meeting_id=meeting.id,
|
||||
count=len(missing_sessions),
|
||||
)
|
||||
|
||||
if stale_session_ids:
|
||||
composite_ids = [f"{meeting.id}:{sid}" for sid in stale_session_ids]
|
||||
await daily_participant_sessions_controller.batch_close_sessions(
|
||||
composite_ids,
|
||||
left_at=datetime.now(timezone.utc),
|
||||
)
|
||||
logger.info(
|
||||
"Stale sessions closed",
|
||||
meeting_id=meeting.id,
|
||||
count=len(composite_ids),
|
||||
)
|
||||
|
||||
final_active_count = len(api_participants)
|
||||
if meeting.num_clients != final_active_count:
|
||||
await meetings_controller.update_meeting(
|
||||
meeting.id,
|
||||
num_clients=final_active_count,
|
||||
)
|
||||
logger.info(
|
||||
"num_clients updated",
|
||||
meeting_id=meeting.id,
|
||||
old_value=meeting.num_clients,
|
||||
new_value=final_active_count,
|
||||
)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def poll_daily_room_presence_task(meeting_id: str) -> None:
|
||||
"""Celery task wrapper for poll_daily_room_presence.
|
||||
|
||||
Queued by webhooks or reconciliation timer.
|
||||
"""
|
||||
await poll_daily_room_presence(meeting_id)
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def process_meetings():
|
||||
@@ -164,139 +514,220 @@ async def process_meetings():
|
||||
Uses distributed locking to prevent race conditions when multiple workers
|
||||
process the same meeting simultaneously.
|
||||
"""
|
||||
logger.info("Processing meetings")
|
||||
|
||||
meetings = await meetings_controller.get_all_active()
|
||||
logger.info(f"Processing {len(meetings)} meetings")
|
||||
current_time = datetime.now(timezone.utc)
|
||||
redis_client = get_redis_client()
|
||||
processed_count = 0
|
||||
skipped_count = 0
|
||||
|
||||
for meeting in meetings:
|
||||
logger_ = logger.bind(meeting_id=meeting.id, room_name=meeting.room_name)
|
||||
lock_key = f"meeting_process_lock:{meeting.id}"
|
||||
lock = redis_client.lock(lock_key, timeout=120)
|
||||
logger_.info("Processing meeting")
|
||||
|
||||
try:
|
||||
if not lock.acquire(blocking=False):
|
||||
logger_.debug("Meeting is being processed by another worker, skipping")
|
||||
skipped_count += 1
|
||||
continue
|
||||
async with RedisAsyncLock(
|
||||
key=f"meeting_process_lock:{meeting.id}",
|
||||
timeout=120,
|
||||
extend_interval=30,
|
||||
skip_if_locked=True,
|
||||
blocking=False,
|
||||
) as lock:
|
||||
if not lock.acquired:
|
||||
logger_.debug(
|
||||
"Meeting is being processed by another worker, skipping"
|
||||
)
|
||||
skipped_count += 1
|
||||
continue
|
||||
|
||||
# Process the meeting
|
||||
should_deactivate = False
|
||||
end_date = meeting.end_date
|
||||
if end_date.tzinfo is None:
|
||||
end_date = end_date.replace(tzinfo=timezone.utc)
|
||||
# Process the meeting
|
||||
should_deactivate = False
|
||||
end_date = meeting.end_date
|
||||
if end_date.tzinfo is None:
|
||||
end_date = end_date.replace(tzinfo=timezone.utc)
|
||||
|
||||
# This API call could be slow, extend lock if needed
|
||||
response = await get_room_sessions(meeting.room_name)
|
||||
client = create_platform_client(meeting.platform)
|
||||
room_sessions = await client.get_room_sessions(meeting.room_name)
|
||||
|
||||
try:
|
||||
# Extend lock after slow operation to ensure we still hold it
|
||||
lock.extend(120, replace_ttl=True)
|
||||
except LockError:
|
||||
logger_.warning("Lost lock for meeting, skipping")
|
||||
continue
|
||||
|
||||
room_sessions = response.get("results", [])
|
||||
has_active_sessions = room_sessions and any(
|
||||
rs["endedAt"] is None for rs in room_sessions
|
||||
)
|
||||
has_had_sessions = bool(room_sessions)
|
||||
|
||||
if has_active_sessions:
|
||||
logger_.debug("Meeting still has active sessions, keep it")
|
||||
elif has_had_sessions:
|
||||
should_deactivate = True
|
||||
logger_.info("Meeting ended - all participants left")
|
||||
elif current_time > end_date:
|
||||
should_deactivate = True
|
||||
logger_.info(
|
||||
"Meeting deactivated - scheduled time ended with no participants",
|
||||
has_active_sessions = room_sessions and any(
|
||||
s.ended_at is None for s in room_sessions
|
||||
)
|
||||
has_had_sessions = bool(room_sessions)
|
||||
logger_.info(
|
||||
f"found {has_active_sessions} active sessions, had {has_had_sessions}"
|
||||
)
|
||||
else:
|
||||
logger_.debug("Meeting not yet started, keep it")
|
||||
|
||||
if should_deactivate:
|
||||
await meetings_controller.update_meeting(meeting.id, is_active=False)
|
||||
logger_.info("Meeting is deactivated")
|
||||
if has_active_sessions:
|
||||
logger_.debug("Meeting still has active sessions, keep it")
|
||||
elif has_had_sessions:
|
||||
should_deactivate = True
|
||||
logger_.info("Meeting ended - all participants left")
|
||||
elif current_time > end_date:
|
||||
should_deactivate = True
|
||||
logger_.info(
|
||||
"Meeting deactivated - scheduled time ended with no participants",
|
||||
)
|
||||
else:
|
||||
logger_.debug("Meeting not yet started, keep it")
|
||||
|
||||
processed_count += 1
|
||||
if should_deactivate:
|
||||
await meetings_controller.update_meeting(
|
||||
meeting.id, is_active=False
|
||||
)
|
||||
logger_.info("Meeting is deactivated")
|
||||
|
||||
processed_count += 1
|
||||
|
||||
except Exception:
|
||||
logger_.error("Error processing meeting", exc_info=True)
|
||||
finally:
|
||||
try:
|
||||
lock.release()
|
||||
except LockError:
|
||||
pass # Lock already released or expired
|
||||
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Processed meetings finished",
|
||||
processed_count=processed_count,
|
||||
skipped_count=skipped_count,
|
||||
)
|
||||
|
||||
|
||||
async def convert_audio_and_waveform(transcript) -> None:
|
||||
"""Convert WebM to MP3 and generate waveform for Daily.co recordings.
|
||||
|
||||
This bypasses the full file pipeline which would overwrite stub data.
|
||||
"""
|
||||
try:
|
||||
logger.info(
|
||||
"Converting audio to MP3 and generating waveform",
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
|
||||
upload_path = transcript.data_path / "upload.webm"
|
||||
mp3_path = transcript.audio_mp3_filename
|
||||
|
||||
# Convert WebM to MP3
|
||||
mp3_writer = AudioFileWriterProcessor(path=mp3_path)
|
||||
|
||||
container = av.open(str(upload_path))
|
||||
for frame in container.decode(audio=0):
|
||||
await mp3_writer.push(frame)
|
||||
await mp3_writer.flush()
|
||||
container.close()
|
||||
|
||||
logger.info(
|
||||
"Converted WebM to MP3",
|
||||
transcript_id=transcript.id,
|
||||
mp3_size=mp3_path.stat().st_size,
|
||||
)
|
||||
|
||||
waveform_processor = AudioWaveformProcessor(
|
||||
audio_path=mp3_path,
|
||||
waveform_path=transcript.audio_waveform_filename,
|
||||
)
|
||||
waveform_processor.set_pipeline(EmptyPipeline(logger))
|
||||
await waveform_processor.flush()
|
||||
|
||||
logger.info(
|
||||
"Generated waveform",
|
||||
transcript_id=transcript.id,
|
||||
waveform_path=transcript.audio_waveform_filename,
|
||||
)
|
||||
|
||||
# Update transcript status to ended (successful)
|
||||
await transcripts_controller.update(transcript, {"status": "ended"})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to convert audio or generate waveform",
|
||||
transcript_id=transcript.id,
|
||||
error=str(e),
|
||||
)
|
||||
# Keep status as uploaded even if conversion fails
|
||||
pass
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def reprocess_failed_recordings():
|
||||
"""
|
||||
Find recordings in the S3 bucket and check if they have proper transcriptions.
|
||||
Find recordings in Whereby S3 bucket and check if they have proper transcriptions.
|
||||
If not, requeue them for processing.
|
||||
"""
|
||||
logger.info("Checking for recordings that need processing or reprocessing")
|
||||
|
||||
s3 = boto3.client(
|
||||
"s3",
|
||||
region_name=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
)
|
||||
Note: Daily.co recordings are processed via webhooks, not this cron job.
|
||||
"""
|
||||
logger.info("Checking Whereby recordings that need processing or reprocessing")
|
||||
|
||||
if not settings.WHEREBY_STORAGE_AWS_BUCKET_NAME:
|
||||
raise ValueError(
|
||||
"WHEREBY_STORAGE_AWS_BUCKET_NAME required for Whereby recording reprocessing. "
|
||||
"Set WHEREBY_STORAGE_AWS_BUCKET_NAME environment variable."
|
||||
)
|
||||
|
||||
storage = get_transcripts_storage()
|
||||
bucket_name = settings.WHEREBY_STORAGE_AWS_BUCKET_NAME
|
||||
|
||||
reprocessed_count = 0
|
||||
try:
|
||||
paginator = s3.get_paginator("list_objects_v2")
|
||||
bucket_name = settings.RECORDING_STORAGE_AWS_BUCKET_NAME
|
||||
pages = paginator.paginate(Bucket=bucket_name)
|
||||
object_keys = await storage.list_objects(prefix="", bucket=bucket_name)
|
||||
|
||||
for page in pages:
|
||||
if "Contents" not in page:
|
||||
for object_key in object_keys:
|
||||
if not object_key.endswith(".mp4"):
|
||||
continue
|
||||
|
||||
for obj in page["Contents"]:
|
||||
object_key = obj["Key"]
|
||||
recording = await recordings_controller.get_by_object_key(
|
||||
bucket_name, object_key
|
||||
)
|
||||
if not recording:
|
||||
logger.info(f"Queueing recording for processing: {object_key}")
|
||||
process_recording.delay(bucket_name, object_key)
|
||||
reprocessed_count += 1
|
||||
continue
|
||||
|
||||
if not (object_key.endswith(".mp4")):
|
||||
continue
|
||||
|
||||
recording = await recordings_controller.get_by_object_key(
|
||||
bucket_name, object_key
|
||||
transcript = None
|
||||
try:
|
||||
transcript = await transcripts_controller.get_by_recording_id(
|
||||
recording.id
|
||||
)
|
||||
except ValidationError:
|
||||
await transcripts_controller.remove_by_recording_id(recording.id)
|
||||
logger.warning(
|
||||
f"Removed invalid transcript for recording: {recording.id}"
|
||||
)
|
||||
if not recording:
|
||||
logger.info(f"Queueing recording for processing: {object_key}")
|
||||
process_recording.delay(bucket_name, object_key)
|
||||
reprocessed_count += 1
|
||||
continue
|
||||
|
||||
transcript = None
|
||||
try:
|
||||
transcript = await transcripts_controller.get_by_recording_id(
|
||||
recording.id
|
||||
)
|
||||
except ValidationError:
|
||||
await transcripts_controller.remove_by_recording_id(recording.id)
|
||||
logger.warning(
|
||||
f"Removed invalid transcript for recording: {recording.id}"
|
||||
)
|
||||
|
||||
if transcript is None or transcript.status == "error":
|
||||
logger.info(f"Queueing recording for processing: {object_key}")
|
||||
process_recording.delay(bucket_name, object_key)
|
||||
reprocessed_count += 1
|
||||
if transcript is None or transcript.status == "error":
|
||||
logger.info(f"Queueing recording for processing: {object_key}")
|
||||
process_recording.delay(bucket_name, object_key)
|
||||
reprocessed_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking S3 bucket: {str(e)}")
|
||||
|
||||
logger.info(f"Reprocessing complete. Requeued {reprocessed_count} recordings")
|
||||
return reprocessed_count
|
||||
|
||||
|
||||
@shared_task
|
||||
@asynctask
|
||||
async def trigger_daily_reconciliation() -> None:
|
||||
"""Daily.co pull"""
|
||||
try:
|
||||
active_meetings = await meetings_controller.get_all_active(platform="daily")
|
||||
queued_count = 0
|
||||
|
||||
for meeting in active_meetings:
|
||||
try:
|
||||
poll_daily_room_presence_task.delay(meeting.id)
|
||||
queued_count += 1
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to queue reconciliation poll",
|
||||
meeting_id=meeting.id,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
|
||||
if queued_count > 0:
|
||||
logger.debug(
|
||||
"Reconciliation polls queued",
|
||||
count=queued_count,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Reconciliation trigger failed", error=str(e), exc_info=True)
|
||||
|
||||
53
server/scripts/list_daily_webhooks.py
Executable file
53
server/scripts/list_daily_webhooks.py
Executable file
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from reflector.dailyco_api import DailyApiClient
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
async def list_webhooks():
|
||||
"""List all Daily.co webhooks for this account using dailyco_api module."""
|
||||
if not settings.DAILY_API_KEY:
|
||||
print("Error: DAILY_API_KEY not set")
|
||||
return 1
|
||||
|
||||
async with DailyApiClient(api_key=settings.DAILY_API_KEY) as client:
|
||||
try:
|
||||
webhooks = await client.list_webhooks()
|
||||
|
||||
if not webhooks:
|
||||
print("No webhooks found")
|
||||
return 0
|
||||
|
||||
print(f"Found {len(webhooks)} webhook(s):\n")
|
||||
|
||||
for webhook in webhooks:
|
||||
print("=" * 80)
|
||||
print(f"UUID: {webhook.uuid}")
|
||||
print(f"URL: {webhook.url}")
|
||||
print(f"State: {webhook.state}")
|
||||
print(f"Event Types: {', '.join(webhook.eventTypes)}")
|
||||
print(
|
||||
f"HMAC Secret: {'✓ Configured' if webhook.hmac else '✗ Not set'}"
|
||||
)
|
||||
print()
|
||||
|
||||
print("=" * 80)
|
||||
print(
|
||||
f"\nCurrent DAILY_WEBHOOK_UUID in settings: {settings.DAILY_WEBHOOK_UUID or '(not set)'}"
|
||||
)
|
||||
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error fetching webhooks: {e}")
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(asyncio.run(list_webhooks()))
|
||||
292
server/scripts/migrate_user_ids.py
Executable file
292
server/scripts/migrate_user_ids.py
Executable file
@@ -0,0 +1,292 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Manual Migration Script: Migrate user_id from Authentik UID to internal user.id
|
||||
|
||||
This script should be run manually AFTER applying the database schema migrations.
|
||||
|
||||
Usage:
|
||||
AUTHENTIK_API_URL=https://your-authentik-url \
|
||||
AUTHENTIK_API_TOKEN=your-token \
|
||||
DATABASE_URL=postgresql://... \
|
||||
python scripts/migrate_user_ids.py
|
||||
|
||||
What this script does:
|
||||
1. Collects all unique Authentik UIDs currently used in the database
|
||||
2. Fetches only those users from Authentik API to populate the users table
|
||||
3. Updates user_id in: user_api_key, transcript, room, meeting_consent
|
||||
4. Uses user.authentik_uid to lookup the corresponding user.id
|
||||
|
||||
The script is idempotent:
|
||||
- User inserts use ON CONFLICT DO NOTHING (safe if users already exist)
|
||||
- Update queries only match authentik_uid->uuid pairs (no-op if already migrated)
|
||||
- Safe to run multiple times without side effects
|
||||
|
||||
Prerequisites:
|
||||
- AUTHENTIK_API_URL environment variable must be set
|
||||
- AUTHENTIK_API_TOKEN environment variable must be set
|
||||
- DATABASE_URL environment variable must be set
|
||||
- Authentik API must be accessible
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
import httpx
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.ext.asyncio import AsyncConnection, create_async_engine
|
||||
|
||||
TABLES_WITH_USER_ID = ["user_api_key", "transcript", "room", "meeting_consent"]
|
||||
NULLABLE_USER_ID_TABLES = {"transcript", "meeting_consent"}
|
||||
AUTHENTIK_PAGE_SIZE = 100
|
||||
HTTP_TIMEOUT = 30.0
|
||||
|
||||
|
||||
class AuthentikClient:
|
||||
def __init__(self, api_url: str, api_token: str):
|
||||
self.api_url = api_url
|
||||
self.api_token = api_token
|
||||
|
||||
def _get_headers(self) -> dict[str, str]:
|
||||
return {
|
||||
"Authorization": f"Bearer {self.api_token}",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
async def fetch_all_users(self) -> list[dict[str, Any]]:
|
||||
all_users = []
|
||||
page = 1
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=HTTP_TIMEOUT) as client:
|
||||
while True:
|
||||
url = f"{self.api_url}/api/v3/core/users/"
|
||||
params = {
|
||||
"page": page,
|
||||
"page_size": AUTHENTIK_PAGE_SIZE,
|
||||
"include_groups": "false",
|
||||
}
|
||||
|
||||
print(f" Fetching users from Authentik (page {page})...")
|
||||
response = await client.get(
|
||||
url, headers=self._get_headers(), params=params
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
results = data.get("results", [])
|
||||
if not results:
|
||||
break
|
||||
|
||||
all_users.extend(results)
|
||||
print(f" Fetched {len(results)} users from page {page}")
|
||||
|
||||
if not data.get("next"):
|
||||
break
|
||||
|
||||
page += 1
|
||||
|
||||
print(f" Total: {len(all_users)} users fetched from Authentik")
|
||||
return all_users
|
||||
|
||||
except httpx.HTTPError as e:
|
||||
raise Exception(f"Failed to fetch users from Authentik: {e}") from e
|
||||
|
||||
|
||||
async def collect_used_authentik_uids(connection: AsyncConnection) -> set[str]:
|
||||
print("\nStep 1: Collecting Authentik UIDs from database tables...")
|
||||
used_authentik_uids = set()
|
||||
|
||||
for table in TABLES_WITH_USER_ID:
|
||||
result = await connection.execute(
|
||||
text(f'SELECT DISTINCT user_id FROM "{table}" WHERE user_id IS NOT NULL')
|
||||
)
|
||||
authentik_uids = [row[0] for row in result.fetchall()]
|
||||
used_authentik_uids.update(authentik_uids)
|
||||
print(f" Found {len(authentik_uids)} unique Authentik UIDs in {table}")
|
||||
|
||||
print(f" Total unique user IDs found: {len(used_authentik_uids)}")
|
||||
|
||||
if used_authentik_uids:
|
||||
sample_id = next(iter(used_authentik_uids))
|
||||
if len(sample_id) == 36 and sample_id.count("-") == 4:
|
||||
print(
|
||||
f"\n✅ User IDs are already in UUID format (e.g., {sample_id[:20]}...)"
|
||||
)
|
||||
print("Migration has already been completed!")
|
||||
return set()
|
||||
|
||||
return used_authentik_uids
|
||||
|
||||
|
||||
def filter_users_by_authentik_uid(
|
||||
authentik_users: list[dict[str, Any]], used_authentik_uids: set[str]
|
||||
) -> tuple[list[dict[str, Any]], set[str]]:
|
||||
used_authentik_users = [
|
||||
user for user in authentik_users if user.get("uid") in used_authentik_uids
|
||||
]
|
||||
|
||||
missing_ids = used_authentik_uids - {u.get("uid") for u in used_authentik_users}
|
||||
|
||||
print(
|
||||
f" Found {len(used_authentik_users)} matching users in Authentik "
|
||||
f"(out of {len(authentik_users)} total)"
|
||||
)
|
||||
|
||||
if missing_ids:
|
||||
print(
|
||||
f" ⚠ Warning: {len(missing_ids)} Authentik UIDs in database not found in Authentik:"
|
||||
)
|
||||
for user_id in sorted(missing_ids):
|
||||
print(f" - {user_id}")
|
||||
|
||||
return used_authentik_users, missing_ids
|
||||
|
||||
|
||||
async def sync_users_to_database(
|
||||
connection: AsyncConnection, authentik_users: list[dict[str, Any]]
|
||||
) -> tuple[int, int]:
|
||||
created = 0
|
||||
skipped = 0
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
for authentik_user in authentik_users:
|
||||
user_id = authentik_user["uuid"]
|
||||
authentik_uid = authentik_user["uid"]
|
||||
email = authentik_user.get("email")
|
||||
|
||||
if not email:
|
||||
print(f" ⚠ Skipping user {authentik_uid} (no email)")
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
result = await connection.execute(
|
||||
text("""
|
||||
INSERT INTO "user" (id, email, authentik_uid, created_at, updated_at)
|
||||
VALUES (:id, :email, :authentik_uid, :created_at, :updated_at)
|
||||
ON CONFLICT (id) DO NOTHING
|
||||
"""),
|
||||
{
|
||||
"id": user_id,
|
||||
"email": email,
|
||||
"authentik_uid": authentik_uid,
|
||||
"created_at": now,
|
||||
"updated_at": now,
|
||||
},
|
||||
)
|
||||
if result.rowcount > 0:
|
||||
created += 1
|
||||
|
||||
return created, skipped
|
||||
|
||||
|
||||
async def migrate_all_user_ids(connection: AsyncConnection) -> int:
|
||||
print("\nStep 3: Migrating user_id columns from Authentik UID to internal UUID...")
|
||||
print("(If no rows are updated, migration may have already been completed)")
|
||||
|
||||
total_updated = 0
|
||||
|
||||
for table in TABLES_WITH_USER_ID:
|
||||
null_check = (
|
||||
f"AND {table}.user_id IS NOT NULL"
|
||||
if table in NULLABLE_USER_ID_TABLES
|
||||
else ""
|
||||
)
|
||||
|
||||
query = f"""
|
||||
UPDATE {table}
|
||||
SET user_id = u.id
|
||||
FROM "user" u
|
||||
WHERE {table}.user_id = u.authentik_uid
|
||||
{null_check}
|
||||
"""
|
||||
|
||||
print(f" Updating {table}.user_id...")
|
||||
result = await connection.execute(text(query))
|
||||
rows = result.rowcount
|
||||
print(f" ✓ Updated {rows} rows")
|
||||
total_updated += rows
|
||||
|
||||
return total_updated
|
||||
|
||||
|
||||
async def run_migration(
|
||||
database_url: str, authentik_api_url: str, authentik_api_token: str
|
||||
) -> None:
|
||||
engine = create_async_engine(database_url)
|
||||
|
||||
try:
|
||||
async with engine.begin() as connection:
|
||||
used_authentik_uids = await collect_used_authentik_uids(connection)
|
||||
if not used_authentik_uids:
|
||||
print("\n⚠️ No user IDs found in database. Nothing to migrate.")
|
||||
print("Migration complete (no-op)!")
|
||||
return
|
||||
|
||||
print("\nStep 2: Fetching user data from Authentik and syncing users...")
|
||||
print("(This script is idempotent - safe to run multiple times)")
|
||||
print(f"Authentik API URL: {authentik_api_url}")
|
||||
|
||||
client = AuthentikClient(authentik_api_url, authentik_api_token)
|
||||
authentik_users = await client.fetch_all_users()
|
||||
|
||||
if not authentik_users:
|
||||
print("\nERROR: No users returned from Authentik API.")
|
||||
print(
|
||||
"Please verify your Authentik configuration and ensure users exist."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
used_authentik_users, _ = filter_users_by_authentik_uid(
|
||||
authentik_users, used_authentik_uids
|
||||
)
|
||||
created, skipped = await sync_users_to_database(
|
||||
connection, used_authentik_users
|
||||
)
|
||||
|
||||
if created > 0:
|
||||
print(f"✓ Created {created} users from Authentik")
|
||||
else:
|
||||
print("✓ No new users created (users may already exist)")
|
||||
|
||||
if skipped > 0:
|
||||
print(f" ⚠ Skipped {skipped} users without email")
|
||||
|
||||
result = await connection.execute(text('SELECT COUNT(*) FROM "user"'))
|
||||
user_count = result.scalar()
|
||||
print(f"✓ Users table now has {user_count} users")
|
||||
|
||||
total_updated = await migrate_all_user_ids(connection)
|
||||
|
||||
if total_updated > 0:
|
||||
print(f"\n✅ Migration complete! Updated {total_updated} total rows.")
|
||||
else:
|
||||
print(
|
||||
"\n✅ Migration complete! (No rows updated - migration may have already been completed)"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ ERROR: Migration failed: {e}")
|
||||
sys.exit(1)
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
database_url = os.getenv("DATABASE_URL")
|
||||
authentik_api_url = os.getenv("AUTHENTIK_API_URL")
|
||||
authentik_api_token = os.getenv("AUTHENTIK_API_TOKEN")
|
||||
|
||||
if not database_url or not authentik_api_url or not authentik_api_token:
|
||||
print(
|
||||
"ERROR: DATABASE_URL, AUTHENTIK_API_URL, and AUTHENTIK_API_TOKEN must be set"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
await run_migration(database_url, authentik_api_url, authentik_api_token)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
127
server/scripts/recreate_daily_webhook.py
Normal file
127
server/scripts/recreate_daily_webhook.py
Normal file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from reflector.dailyco_api import (
|
||||
CreateWebhookRequest,
|
||||
DailyApiClient,
|
||||
)
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
async def setup_webhook(webhook_url: str):
|
||||
"""
|
||||
Create or update Daily.co webhook for this environment using dailyco_api module.
|
||||
Uses DAILY_WEBHOOK_UUID to identify existing webhook.
|
||||
"""
|
||||
if not settings.DAILY_API_KEY:
|
||||
print("Error: DAILY_API_KEY not set")
|
||||
return 1
|
||||
|
||||
if not settings.DAILY_WEBHOOK_SECRET:
|
||||
print("Error: DAILY_WEBHOOK_SECRET not set")
|
||||
return 1
|
||||
|
||||
event_types = [
|
||||
"participant.joined",
|
||||
"participant.left",
|
||||
"recording.started",
|
||||
"recording.ready-to-download",
|
||||
"recording.error",
|
||||
]
|
||||
|
||||
async with DailyApiClient(api_key=settings.DAILY_API_KEY) as client:
|
||||
webhook_uuid = settings.DAILY_WEBHOOK_UUID
|
||||
|
||||
if webhook_uuid:
|
||||
print(f"Updating existing webhook {webhook_uuid}...")
|
||||
try:
|
||||
# Note: Daily.co doesn't support PATCH well, so we delete + recreate
|
||||
await client.delete_webhook(webhook_uuid)
|
||||
print(f"Deleted old webhook {webhook_uuid}")
|
||||
|
||||
request = CreateWebhookRequest(
|
||||
url=webhook_url,
|
||||
eventTypes=event_types,
|
||||
hmac=settings.DAILY_WEBHOOK_SECRET,
|
||||
)
|
||||
result = await client.create_webhook(request)
|
||||
|
||||
print(
|
||||
f"✓ Created replacement webhook {result.uuid} (state: {result.state})"
|
||||
)
|
||||
print(f" URL: {result.url}")
|
||||
|
||||
webhook_uuid = result.uuid
|
||||
|
||||
except Exception as e:
|
||||
if hasattr(e, "response") and e.response.status_code == 404:
|
||||
print(f"Webhook {webhook_uuid} not found, creating new one...")
|
||||
webhook_uuid = None # Fall through to creation
|
||||
else:
|
||||
print(f"Error updating webhook: {e}")
|
||||
return 1
|
||||
|
||||
if not webhook_uuid:
|
||||
print("Creating new webhook...")
|
||||
request = CreateWebhookRequest(
|
||||
url=webhook_url,
|
||||
eventTypes=event_types,
|
||||
hmac=settings.DAILY_WEBHOOK_SECRET,
|
||||
)
|
||||
result = await client.create_webhook(request)
|
||||
webhook_uuid = result.uuid
|
||||
|
||||
print(f"✓ Created webhook {webhook_uuid} (state: {result.state})")
|
||||
print(f" URL: {result.url}")
|
||||
print()
|
||||
print("=" * 60)
|
||||
print("IMPORTANT: Add this to your environment variables:")
|
||||
print("=" * 60)
|
||||
print(f"DAILY_WEBHOOK_UUID: {webhook_uuid}")
|
||||
print("=" * 60)
|
||||
print()
|
||||
|
||||
# Try to write UUID to .env file
|
||||
env_file = Path(__file__).parent.parent / ".env"
|
||||
if env_file.exists():
|
||||
lines = env_file.read_text().splitlines()
|
||||
updated = False
|
||||
|
||||
# Update existing DAILY_WEBHOOK_UUID line or add it
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith("DAILY_WEBHOOK_UUID="):
|
||||
lines[i] = f"DAILY_WEBHOOK_UUID={webhook_uuid}"
|
||||
updated = True
|
||||
break
|
||||
|
||||
if not updated:
|
||||
lines.append(f"DAILY_WEBHOOK_UUID={webhook_uuid}")
|
||||
|
||||
env_file.write_text("\n".join(lines) + "\n")
|
||||
print(f"✓ Also saved to local .env file")
|
||||
else:
|
||||
print(f"⚠ Local .env file not found - please add manually")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python recreate_daily_webhook.py <webhook_url>")
|
||||
print(
|
||||
"Example: python recreate_daily_webhook.py https://example.com/v1/daily/webhook"
|
||||
)
|
||||
print()
|
||||
print("Behavior:")
|
||||
print(" - If DAILY_WEBHOOK_UUID set: Deletes old webhook, creates new one")
|
||||
print(
|
||||
" - If DAILY_WEBHOOK_UUID empty: Creates new webhook, saves UUID to .env"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(asyncio.run(setup_webhook(sys.argv[1])))
|
||||
@@ -5,6 +5,18 @@ from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def register_mock_platform():
|
||||
from mocks.mock_platform import MockPlatformClient
|
||||
|
||||
from reflector.video_platforms.registry import register_platform
|
||||
|
||||
register_platform(WHEREBY_PLATFORM, MockPlatformClient)
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def settings_configuration():
|
||||
|
||||
0
server/tests/mocks/__init__.py
Normal file
0
server/tests/mocks/__init__.py
Normal file
104
server/tests/mocks/mock_platform.py
Normal file
104
server/tests/mocks/mock_platform.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, Literal, Optional
|
||||
|
||||
from reflector.db.rooms import Room
|
||||
from reflector.utils.string import NonEmptyString
|
||||
from reflector.video_platforms.base import (
|
||||
ROOM_PREFIX_SEPARATOR,
|
||||
MeetingData,
|
||||
SessionData,
|
||||
VideoPlatformClient,
|
||||
VideoPlatformConfig,
|
||||
)
|
||||
|
||||
MockPlatform = Literal["mock"]
|
||||
|
||||
|
||||
class MockPlatformClient(VideoPlatformClient):
|
||||
PLATFORM_NAME: MockPlatform = "mock"
|
||||
|
||||
def __init__(self, config: VideoPlatformConfig):
|
||||
super().__init__(config)
|
||||
self._rooms: Dict[str, Dict[str, Any]] = {}
|
||||
self._webhook_calls: list[Dict[str, Any]] = []
|
||||
|
||||
async def create_meeting(
|
||||
self, room_name_prefix: str, end_date: datetime, room: Room
|
||||
) -> MeetingData:
|
||||
meeting_id = str(uuid.uuid4())
|
||||
room_name = f"{room_name_prefix}{ROOM_PREFIX_SEPARATOR}{meeting_id[:8]}"
|
||||
room_url = f"https://mock.video/{room_name}"
|
||||
host_room_url = f"{room_url}?host=true"
|
||||
|
||||
self._rooms[room_name] = {
|
||||
"id": meeting_id,
|
||||
"name": room_name,
|
||||
"url": room_url,
|
||||
"host_url": host_room_url,
|
||||
"end_date": end_date,
|
||||
"room": room,
|
||||
"participants": [],
|
||||
"is_active": True,
|
||||
}
|
||||
|
||||
return MeetingData.model_construct(
|
||||
meeting_id=meeting_id,
|
||||
room_name=room_name,
|
||||
room_url=room_url,
|
||||
host_room_url=host_room_url,
|
||||
platform="whereby",
|
||||
extra_data={"mock": True},
|
||||
)
|
||||
|
||||
async def get_room_sessions(self, room_name: NonEmptyString) -> list[SessionData]:
|
||||
if room_name not in self._rooms:
|
||||
return []
|
||||
|
||||
room_data = self._rooms[room_name]
|
||||
return [
|
||||
SessionData(
|
||||
session_id=room_data["id"],
|
||||
started_at=datetime.utcnow(),
|
||||
ended_at=None if room_data["is_active"] else datetime.utcnow(),
|
||||
)
|
||||
]
|
||||
|
||||
async def upload_logo(self, room_name: str, logo_path: str) -> bool:
|
||||
if room_name in self._rooms:
|
||||
self._rooms[room_name]["logo_path"] = logo_path
|
||||
return True
|
||||
return False
|
||||
|
||||
def verify_webhook_signature(
|
||||
self, body: bytes, signature: str, timestamp: Optional[str] = None
|
||||
) -> bool:
|
||||
return signature == "valid"
|
||||
|
||||
def add_participant(
|
||||
self, room_name: str, participant_id: str, participant_name: str
|
||||
):
|
||||
if room_name in self._rooms:
|
||||
self._rooms[room_name]["participants"].append(
|
||||
{
|
||||
"id": participant_id,
|
||||
"name": participant_name,
|
||||
"joined_at": datetime.utcnow().isoformat(),
|
||||
}
|
||||
)
|
||||
|
||||
def trigger_webhook(self, event_type: str, data: Dict[str, Any]):
|
||||
self._webhook_calls.append(
|
||||
{
|
||||
"type": event_type,
|
||||
"data": data,
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
}
|
||||
)
|
||||
|
||||
def get_webhook_calls(self) -> list[Dict[str, Any]]:
|
||||
return self._webhook_calls.copy()
|
||||
|
||||
def clear_data(self):
|
||||
self._rooms.clear()
|
||||
self._webhook_calls.clear()
|
||||
@@ -139,14 +139,10 @@ async def test_cleanup_deletes_associated_meeting_and_recording():
|
||||
mock_settings.PUBLIC_DATA_RETENTION_DAYS = 7
|
||||
|
||||
# Mock storage deletion
|
||||
with patch("reflector.db.transcripts.get_transcripts_storage") as mock_storage:
|
||||
with patch("reflector.worker.cleanup.get_transcripts_storage") as mock_storage:
|
||||
mock_storage.return_value.delete_file = AsyncMock()
|
||||
with patch(
|
||||
"reflector.worker.cleanup.get_recordings_storage"
|
||||
) as mock_rec_storage:
|
||||
mock_rec_storage.return_value.delete_file = AsyncMock()
|
||||
|
||||
result = await cleanup_old_public_data()
|
||||
result = await cleanup_old_public_data()
|
||||
|
||||
# Check results
|
||||
assert result["transcripts_deleted"] == 1
|
||||
|
||||
330
server/tests/test_consent_multitrack.py
Normal file
330
server/tests/test_consent_multitrack.py
Normal file
@@ -0,0 +1,330 @@
|
||||
from datetime import datetime, timezone
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.db.meetings import (
|
||||
MeetingConsent,
|
||||
meeting_consent_controller,
|
||||
meetings_controller,
|
||||
)
|
||||
from reflector.db.recordings import Recording, recordings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import SourceKind, transcripts_controller
|
||||
from reflector.pipelines.main_live_pipeline import cleanup_consent
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_consent_cleanup_deletes_multitrack_files():
|
||||
room = await rooms_controller.add(
|
||||
name="Test Room",
|
||||
user_id="test-user",
|
||||
zulip_auto_post=False,
|
||||
zulip_stream="",
|
||||
zulip_topic="",
|
||||
is_locked=False,
|
||||
room_mode="normal",
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic",
|
||||
is_shared=False,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
# Create meeting
|
||||
meeting = await meetings_controller.create(
|
||||
id="test-multitrack-meeting",
|
||||
room_name="test-room-20250101120000",
|
||||
room_url="https://test.daily.co/test-room",
|
||||
host_room_url="https://test.daily.co/test-room",
|
||||
start_date=datetime.now(timezone.utc),
|
||||
end_date=datetime.now(timezone.utc),
|
||||
room=room,
|
||||
)
|
||||
|
||||
track_keys = [
|
||||
"recordings/test-room-20250101120000/track-0.webm",
|
||||
"recordings/test-room-20250101120000/track-1.webm",
|
||||
"recordings/test-room-20250101120000/track-2.webm",
|
||||
]
|
||||
recording = await recordings_controller.create(
|
||||
Recording(
|
||||
bucket_name="test-bucket",
|
||||
object_key="recordings/test-room-20250101120000", # Folder path
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
meeting_id=meeting.id,
|
||||
track_keys=track_keys,
|
||||
)
|
||||
)
|
||||
|
||||
# Create transcript
|
||||
transcript = await transcripts_controller.add(
|
||||
name="Test Multitrack Transcript",
|
||||
source_kind=SourceKind.ROOM,
|
||||
recording_id=recording.id,
|
||||
meeting_id=meeting.id,
|
||||
)
|
||||
|
||||
# Add consent denial
|
||||
await meeting_consent_controller.upsert(
|
||||
MeetingConsent(
|
||||
meeting_id=meeting.id,
|
||||
user_id="test-user",
|
||||
consent_given=False,
|
||||
consent_timestamp=datetime.now(timezone.utc),
|
||||
)
|
||||
)
|
||||
|
||||
# Mock get_transcripts_storage (master credentials with bucket override)
|
||||
with patch(
|
||||
"reflector.pipelines.main_live_pipeline.get_transcripts_storage"
|
||||
) as mock_get_transcripts_storage:
|
||||
mock_master_storage = MagicMock()
|
||||
mock_master_storage.delete_file = AsyncMock()
|
||||
mock_get_transcripts_storage.return_value = mock_master_storage
|
||||
|
||||
await cleanup_consent(transcript_id=transcript.id)
|
||||
|
||||
# Verify master storage was used with bucket override for all track keys
|
||||
assert mock_master_storage.delete_file.call_count == 3
|
||||
deleted_keys = []
|
||||
for call_args in mock_master_storage.delete_file.call_args_list:
|
||||
key = call_args[0][0]
|
||||
bucket_kwarg = call_args[1].get("bucket")
|
||||
deleted_keys.append(key)
|
||||
assert bucket_kwarg == "test-bucket" # Verify bucket override!
|
||||
assert set(deleted_keys) == set(track_keys)
|
||||
|
||||
updated_transcript = await transcripts_controller.get_by_id(transcript.id)
|
||||
assert updated_transcript.audio_deleted is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_consent_cleanup_handles_missing_track_keys():
|
||||
room = await rooms_controller.add(
|
||||
name="Test Room 2",
|
||||
user_id="test-user",
|
||||
zulip_auto_post=False,
|
||||
zulip_stream="",
|
||||
zulip_topic="",
|
||||
is_locked=False,
|
||||
room_mode="normal",
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic",
|
||||
is_shared=False,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
# Create meeting
|
||||
meeting = await meetings_controller.create(
|
||||
id="test-multitrack-meeting-2",
|
||||
room_name="test-room-20250101120001",
|
||||
room_url="https://test.daily.co/test-room-2",
|
||||
host_room_url="https://test.daily.co/test-room-2",
|
||||
start_date=datetime.now(timezone.utc),
|
||||
end_date=datetime.now(timezone.utc),
|
||||
room=room,
|
||||
)
|
||||
|
||||
recording = await recordings_controller.create(
|
||||
Recording(
|
||||
bucket_name="test-bucket",
|
||||
object_key="recordings/old-style-recording.mp4",
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
meeting_id=meeting.id,
|
||||
track_keys=None,
|
||||
)
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
name="Test Old-Style Transcript",
|
||||
source_kind=SourceKind.ROOM,
|
||||
recording_id=recording.id,
|
||||
meeting_id=meeting.id,
|
||||
)
|
||||
|
||||
# Add consent denial
|
||||
await meeting_consent_controller.upsert(
|
||||
MeetingConsent(
|
||||
meeting_id=meeting.id,
|
||||
user_id="test-user-2",
|
||||
consent_given=False,
|
||||
consent_timestamp=datetime.now(timezone.utc),
|
||||
)
|
||||
)
|
||||
|
||||
# Mock get_transcripts_storage (master credentials with bucket override)
|
||||
with patch(
|
||||
"reflector.pipelines.main_live_pipeline.get_transcripts_storage"
|
||||
) as mock_get_transcripts_storage:
|
||||
mock_master_storage = MagicMock()
|
||||
mock_master_storage.delete_file = AsyncMock()
|
||||
mock_get_transcripts_storage.return_value = mock_master_storage
|
||||
|
||||
await cleanup_consent(transcript_id=transcript.id)
|
||||
|
||||
# Verify master storage was used with bucket override
|
||||
assert mock_master_storage.delete_file.call_count == 1
|
||||
call_args = mock_master_storage.delete_file.call_args
|
||||
assert call_args[0][0] == recording.object_key
|
||||
assert call_args[1].get("bucket") == "test-bucket" # Verify bucket override!
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_consent_cleanup_empty_track_keys_falls_back():
|
||||
room = await rooms_controller.add(
|
||||
name="Test Room 3",
|
||||
user_id="test-user",
|
||||
zulip_auto_post=False,
|
||||
zulip_stream="",
|
||||
zulip_topic="",
|
||||
is_locked=False,
|
||||
room_mode="normal",
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic",
|
||||
is_shared=False,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
# Create meeting
|
||||
meeting = await meetings_controller.create(
|
||||
id="test-multitrack-meeting-3",
|
||||
room_name="test-room-20250101120002",
|
||||
room_url="https://test.daily.co/test-room-3",
|
||||
host_room_url="https://test.daily.co/test-room-3",
|
||||
start_date=datetime.now(timezone.utc),
|
||||
end_date=datetime.now(timezone.utc),
|
||||
room=room,
|
||||
)
|
||||
|
||||
recording = await recordings_controller.create(
|
||||
Recording(
|
||||
bucket_name="test-bucket",
|
||||
object_key="recordings/fallback-recording.mp4",
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
meeting_id=meeting.id,
|
||||
track_keys=[],
|
||||
)
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
name="Test Empty Track Keys Transcript",
|
||||
source_kind=SourceKind.ROOM,
|
||||
recording_id=recording.id,
|
||||
meeting_id=meeting.id,
|
||||
)
|
||||
|
||||
# Add consent denial
|
||||
await meeting_consent_controller.upsert(
|
||||
MeetingConsent(
|
||||
meeting_id=meeting.id,
|
||||
user_id="test-user-3",
|
||||
consent_given=False,
|
||||
consent_timestamp=datetime.now(timezone.utc),
|
||||
)
|
||||
)
|
||||
|
||||
# Mock get_transcripts_storage (master credentials with bucket override)
|
||||
with patch(
|
||||
"reflector.pipelines.main_live_pipeline.get_transcripts_storage"
|
||||
) as mock_get_transcripts_storage:
|
||||
mock_master_storage = MagicMock()
|
||||
mock_master_storage.delete_file = AsyncMock()
|
||||
mock_get_transcripts_storage.return_value = mock_master_storage
|
||||
|
||||
# Run cleanup
|
||||
await cleanup_consent(transcript_id=transcript.id)
|
||||
|
||||
# Verify master storage was used with bucket override
|
||||
assert mock_master_storage.delete_file.call_count == 1
|
||||
call_args = mock_master_storage.delete_file.call_args
|
||||
assert call_args[0][0] == recording.object_key
|
||||
assert call_args[1].get("bucket") == "test-bucket" # Verify bucket override!
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_consent_cleanup_partial_failure_doesnt_mark_deleted():
|
||||
room = await rooms_controller.add(
|
||||
name="Test Room 4",
|
||||
user_id="test-user",
|
||||
zulip_auto_post=False,
|
||||
zulip_stream="",
|
||||
zulip_topic="",
|
||||
is_locked=False,
|
||||
room_mode="normal",
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic",
|
||||
is_shared=False,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
# Create meeting
|
||||
meeting = await meetings_controller.create(
|
||||
id="test-multitrack-meeting-4",
|
||||
room_name="test-room-20250101120003",
|
||||
room_url="https://test.daily.co/test-room-4",
|
||||
host_room_url="https://test.daily.co/test-room-4",
|
||||
start_date=datetime.now(timezone.utc),
|
||||
end_date=datetime.now(timezone.utc),
|
||||
room=room,
|
||||
)
|
||||
|
||||
track_keys = [
|
||||
"recordings/test-room-20250101120003/track-0.webm",
|
||||
"recordings/test-room-20250101120003/track-1.webm",
|
||||
"recordings/test-room-20250101120003/track-2.webm",
|
||||
]
|
||||
recording = await recordings_controller.create(
|
||||
Recording(
|
||||
bucket_name="test-bucket",
|
||||
object_key="recordings/test-room-20250101120003",
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
meeting_id=meeting.id,
|
||||
track_keys=track_keys,
|
||||
)
|
||||
)
|
||||
|
||||
# Create transcript
|
||||
transcript = await transcripts_controller.add(
|
||||
name="Test Partial Failure Transcript",
|
||||
source_kind=SourceKind.ROOM,
|
||||
recording_id=recording.id,
|
||||
meeting_id=meeting.id,
|
||||
)
|
||||
|
||||
# Add consent denial
|
||||
await meeting_consent_controller.upsert(
|
||||
MeetingConsent(
|
||||
meeting_id=meeting.id,
|
||||
user_id="test-user-4",
|
||||
consent_given=False,
|
||||
consent_timestamp=datetime.now(timezone.utc),
|
||||
)
|
||||
)
|
||||
|
||||
# Mock get_transcripts_storage (master credentials with bucket override) with partial failure
|
||||
with patch(
|
||||
"reflector.pipelines.main_live_pipeline.get_transcripts_storage"
|
||||
) as mock_get_transcripts_storage:
|
||||
mock_master_storage = MagicMock()
|
||||
|
||||
call_count = 0
|
||||
|
||||
async def delete_side_effect(key, bucket=None):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 2:
|
||||
raise Exception("S3 deletion failed")
|
||||
|
||||
mock_master_storage.delete_file = AsyncMock(side_effect=delete_side_effect)
|
||||
mock_get_transcripts_storage.return_value = mock_master_storage
|
||||
|
||||
await cleanup_consent(transcript_id=transcript.id)
|
||||
|
||||
# Verify master storage was called with bucket override
|
||||
assert mock_master_storage.delete_file.call_count == 3
|
||||
|
||||
updated_transcript = await transcripts_controller.get_by_id(transcript.id)
|
||||
assert (
|
||||
updated_transcript.audio_deleted is None
|
||||
or updated_transcript.audio_deleted is False
|
||||
)
|
||||
466
server/tests/test_daily_room_presence_polling.py
Normal file
466
server/tests/test_daily_room_presence_polling.py
Normal file
@@ -0,0 +1,466 @@
|
||||
"""Tests for Daily.co room presence polling functionality.
|
||||
|
||||
TDD tests for Task 3.2: Room Presence Polling
|
||||
- Query Daily.co API for current room participants
|
||||
- Reconcile with DB sessions (add missing, close stale)
|
||||
- Update meeting.num_clients if different
|
||||
- Use batch operations for efficiency
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.dailyco_api.responses import (
|
||||
RoomPresenceParticipant,
|
||||
RoomPresenceResponse,
|
||||
)
|
||||
from reflector.db.daily_participant_sessions import DailyParticipantSession
|
||||
from reflector.db.meetings import Meeting
|
||||
from reflector.worker.process import poll_daily_room_presence
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_meeting():
|
||||
"""Mock meeting with Daily.co room."""
|
||||
return Meeting(
|
||||
id="meeting-123",
|
||||
room_id="room-456",
|
||||
room_name="test-room-20251118120000",
|
||||
room_url="https://daily.co/test-room-20251118120000",
|
||||
host_room_url="https://daily.co/test-room-20251118120000?t=host-token",
|
||||
platform="daily",
|
||||
num_clients=2,
|
||||
is_active=True,
|
||||
start_date=datetime.now(timezone.utc),
|
||||
end_date=datetime.now(timezone.utc),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_api_participants():
|
||||
"""Mock Daily.co API presence response."""
|
||||
now = datetime.now(timezone.utc)
|
||||
return RoomPresenceResponse(
|
||||
total_count=2,
|
||||
data=[
|
||||
RoomPresenceParticipant(
|
||||
room="test-room-20251118120000",
|
||||
id="participant-1",
|
||||
userName="Alice",
|
||||
userId="user-alice",
|
||||
joinTime=(now - timedelta(minutes=10)).isoformat(),
|
||||
duration=600,
|
||||
),
|
||||
RoomPresenceParticipant(
|
||||
room="test-room-20251118120000",
|
||||
id="participant-2",
|
||||
userName="Bob",
|
||||
userId="user-bob",
|
||||
joinTime=(now - timedelta(minutes=5)).isoformat(),
|
||||
duration=300,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_by_id")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.get_all_sessions_for_meeting"
|
||||
)
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.batch_upsert_sessions"
|
||||
)
|
||||
async def test_poll_presence_adds_missing_sessions(
|
||||
mock_batch_upsert,
|
||||
mock_get_sessions,
|
||||
mock_create_client,
|
||||
mock_get_by_id,
|
||||
mock_meeting,
|
||||
mock_api_participants,
|
||||
):
|
||||
"""Test that polling creates sessions for participants not in DB."""
|
||||
mock_get_by_id.return_value = mock_meeting
|
||||
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_daily_client.get_room_presence = AsyncMock(return_value=mock_api_participants)
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
mock_get_sessions.return_value = {}
|
||||
mock_batch_upsert.return_value = None
|
||||
|
||||
await poll_daily_room_presence(mock_meeting.id)
|
||||
|
||||
assert mock_batch_upsert.call_count == 1
|
||||
sessions = mock_batch_upsert.call_args.args[0]
|
||||
assert len(sessions) == 2
|
||||
session_ids = {s.session_id for s in sessions}
|
||||
assert session_ids == {"participant-1", "participant-2"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_by_id")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.get_all_sessions_for_meeting"
|
||||
)
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.batch_upsert_sessions"
|
||||
)
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.batch_close_sessions"
|
||||
)
|
||||
async def test_poll_presence_closes_stale_sessions(
|
||||
mock_batch_close,
|
||||
mock_batch_upsert,
|
||||
mock_get_sessions,
|
||||
mock_create_client,
|
||||
mock_get_by_id,
|
||||
mock_meeting,
|
||||
mock_api_participants,
|
||||
):
|
||||
"""Test that polling closes sessions for participants no longer in room."""
|
||||
mock_get_by_id.return_value = mock_meeting
|
||||
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_daily_client.get_room_presence = AsyncMock(return_value=mock_api_participants)
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
mock_get_sessions.return_value = {
|
||||
"participant-1": DailyParticipantSession(
|
||||
id=f"meeting-123:participant-1",
|
||||
meeting_id="meeting-123",
|
||||
room_id="room-456",
|
||||
session_id="participant-1",
|
||||
user_id="user-alice",
|
||||
user_name="Alice",
|
||||
joined_at=now,
|
||||
left_at=None,
|
||||
),
|
||||
"participant-stale": DailyParticipantSession(
|
||||
id=f"meeting-123:participant-stale",
|
||||
meeting_id="meeting-123",
|
||||
room_id="room-456",
|
||||
session_id="participant-stale",
|
||||
user_id="user-stale",
|
||||
user_name="Stale User",
|
||||
joined_at=now - timedelta(seconds=120), # Joined 2 minutes ago
|
||||
left_at=None,
|
||||
),
|
||||
}
|
||||
|
||||
await poll_daily_room_presence(mock_meeting.id)
|
||||
|
||||
assert mock_batch_close.call_count == 1
|
||||
composite_ids = mock_batch_close.call_args.args[0]
|
||||
left_at = mock_batch_close.call_args.kwargs["left_at"]
|
||||
assert len(composite_ids) == 1
|
||||
assert "meeting-123:participant-stale" in composite_ids
|
||||
assert left_at is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_by_id")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.get_all_sessions_for_meeting"
|
||||
)
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.batch_upsert_sessions"
|
||||
)
|
||||
@patch("reflector.worker.process.meetings_controller.update_meeting")
|
||||
async def test_poll_presence_updates_num_clients(
|
||||
mock_update_meeting,
|
||||
mock_batch_upsert,
|
||||
mock_get_sessions,
|
||||
mock_create_client,
|
||||
mock_get_by_id,
|
||||
mock_meeting,
|
||||
mock_api_participants,
|
||||
):
|
||||
"""Test that polling updates num_clients when different from API."""
|
||||
meeting_with_wrong_count = mock_meeting
|
||||
meeting_with_wrong_count.num_clients = 5
|
||||
mock_get_by_id.return_value = meeting_with_wrong_count
|
||||
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_daily_client.get_room_presence = AsyncMock(return_value=mock_api_participants)
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
mock_get_sessions.return_value = {}
|
||||
mock_batch_upsert.return_value = None
|
||||
|
||||
await poll_daily_room_presence(meeting_with_wrong_count.id)
|
||||
|
||||
assert mock_update_meeting.call_count == 1
|
||||
assert mock_update_meeting.call_args.kwargs["num_clients"] == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_by_id")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.get_all_sessions_for_meeting"
|
||||
)
|
||||
async def test_poll_presence_no_changes_if_synced(
|
||||
mock_get_sessions,
|
||||
mock_create_client,
|
||||
mock_get_by_id,
|
||||
mock_meeting,
|
||||
mock_api_participants,
|
||||
):
|
||||
"""Test that polling skips updates when DB already synced with API."""
|
||||
mock_get_by_id.return_value = mock_meeting
|
||||
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_daily_client.get_room_presence = AsyncMock(return_value=mock_api_participants)
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
mock_get_sessions.return_value = {
|
||||
"participant-1": DailyParticipantSession(
|
||||
id=f"meeting-123:participant-1",
|
||||
meeting_id="meeting-123",
|
||||
room_id="room-456",
|
||||
session_id="participant-1",
|
||||
user_id="user-alice",
|
||||
user_name="Alice",
|
||||
joined_at=now,
|
||||
left_at=None,
|
||||
),
|
||||
"participant-2": DailyParticipantSession(
|
||||
id=f"meeting-123:participant-2",
|
||||
meeting_id="meeting-123",
|
||||
room_id="room-456",
|
||||
session_id="participant-2",
|
||||
user_id="user-bob",
|
||||
user_name="Bob",
|
||||
joined_at=now,
|
||||
left_at=None,
|
||||
),
|
||||
}
|
||||
|
||||
await poll_daily_room_presence(mock_meeting.id)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_by_id")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.get_all_sessions_for_meeting"
|
||||
)
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.batch_upsert_sessions"
|
||||
)
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.batch_close_sessions"
|
||||
)
|
||||
async def test_poll_presence_mixed_add_and_remove(
|
||||
mock_batch_close,
|
||||
mock_batch_upsert,
|
||||
mock_get_sessions,
|
||||
mock_create_client,
|
||||
mock_get_by_id,
|
||||
mock_meeting,
|
||||
):
|
||||
"""Test that polling handles simultaneous joins and leaves in single poll."""
|
||||
mock_get_by_id.return_value = mock_meeting
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
# API returns: participant-1 and participant-3 (new)
|
||||
api_response = RoomPresenceResponse(
|
||||
total_count=2,
|
||||
data=[
|
||||
RoomPresenceParticipant(
|
||||
room="test-room-20251118120000",
|
||||
id="participant-1",
|
||||
userName="Alice",
|
||||
userId="user-alice",
|
||||
joinTime=(now - timedelta(minutes=10)).isoformat(),
|
||||
duration=600,
|
||||
),
|
||||
RoomPresenceParticipant(
|
||||
room="test-room-20251118120000",
|
||||
id="participant-3",
|
||||
userName="Charlie",
|
||||
userId="user-charlie",
|
||||
joinTime=now.isoformat(),
|
||||
duration=0,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_daily_client.get_room_presence = AsyncMock(return_value=api_response)
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
# DB has: participant-1 and participant-2 (left but not in API)
|
||||
mock_get_sessions.return_value = {
|
||||
"participant-1": DailyParticipantSession(
|
||||
id=f"meeting-123:participant-1",
|
||||
meeting_id="meeting-123",
|
||||
room_id="room-456",
|
||||
session_id="participant-1",
|
||||
user_id="user-alice",
|
||||
user_name="Alice",
|
||||
joined_at=now - timedelta(minutes=10),
|
||||
left_at=None,
|
||||
),
|
||||
"participant-2": DailyParticipantSession(
|
||||
id=f"meeting-123:participant-2",
|
||||
meeting_id="meeting-123",
|
||||
room_id="room-456",
|
||||
session_id="participant-2",
|
||||
user_id="user-bob",
|
||||
user_name="Bob",
|
||||
joined_at=now - timedelta(minutes=5),
|
||||
left_at=None,
|
||||
),
|
||||
}
|
||||
|
||||
mock_batch_upsert.return_value = None
|
||||
mock_batch_close.return_value = None
|
||||
|
||||
await poll_daily_room_presence(mock_meeting.id)
|
||||
|
||||
# Verify participant-3 was added (missing in DB)
|
||||
assert mock_batch_upsert.call_count == 1
|
||||
sessions_added = mock_batch_upsert.call_args.args[0]
|
||||
assert len(sessions_added) == 1
|
||||
assert sessions_added[0].session_id == "participant-3"
|
||||
assert sessions_added[0].user_name == "Charlie"
|
||||
|
||||
# Verify participant-2 was closed (stale in DB)
|
||||
assert mock_batch_close.call_count == 1
|
||||
composite_ids = mock_batch_close.call_args.args[0]
|
||||
assert len(composite_ids) == 1
|
||||
assert "meeting-123:participant-2" in composite_ids
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_by_id")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
async def test_poll_presence_handles_api_error(
|
||||
mock_create_client,
|
||||
mock_get_by_id,
|
||||
mock_meeting,
|
||||
):
|
||||
"""Test that polling handles Daily.co API errors gracefully."""
|
||||
mock_get_by_id.return_value = mock_meeting
|
||||
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_daily_client.get_room_presence = AsyncMock(side_effect=Exception("API error"))
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
await poll_daily_room_presence(mock_meeting.id)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_by_id")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.get_all_sessions_for_meeting"
|
||||
)
|
||||
@patch(
|
||||
"reflector.worker.process.daily_participant_sessions_controller.batch_close_sessions"
|
||||
)
|
||||
async def test_poll_presence_closes_all_when_room_empty(
|
||||
mock_batch_close,
|
||||
mock_get_sessions,
|
||||
mock_create_client,
|
||||
mock_get_by_id,
|
||||
mock_meeting,
|
||||
):
|
||||
"""Test that polling closes all sessions when room is empty."""
|
||||
mock_get_by_id.return_value = mock_meeting
|
||||
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_daily_client.get_room_presence = AsyncMock(
|
||||
return_value=RoomPresenceResponse(total_count=0, data=[])
|
||||
)
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
mock_get_sessions.return_value = {
|
||||
"participant-1": DailyParticipantSession(
|
||||
id=f"meeting-123:participant-1",
|
||||
meeting_id="meeting-123",
|
||||
room_id="room-456",
|
||||
session_id="participant-1",
|
||||
user_id="user-alice",
|
||||
user_name="Alice",
|
||||
joined_at=now
|
||||
- timedelta(seconds=120), # Joined 2 minutes ago (beyond grace period)
|
||||
left_at=None,
|
||||
),
|
||||
}
|
||||
|
||||
await poll_daily_room_presence(mock_meeting.id)
|
||||
|
||||
assert mock_batch_close.call_count == 1
|
||||
composite_ids = mock_batch_close.call_args.args[0]
|
||||
left_at = mock_batch_close.call_args.kwargs["left_at"]
|
||||
assert len(composite_ids) == 1
|
||||
assert "meeting-123:participant-1" in composite_ids
|
||||
assert left_at is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.RedisAsyncLock")
|
||||
@patch("reflector.worker.process.meetings_controller.get_by_id")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
async def test_poll_presence_skips_if_locked(
|
||||
mock_create_client,
|
||||
mock_get_by_id,
|
||||
mock_redis_lock_class,
|
||||
mock_meeting,
|
||||
):
|
||||
"""Test that concurrent polling is prevented by Redis lock."""
|
||||
mock_get_by_id.return_value = mock_meeting
|
||||
|
||||
# Mock the RedisAsyncLock to simulate lock not acquired
|
||||
mock_lock_instance = AsyncMock()
|
||||
mock_lock_instance.acquired = False # Lock not acquired
|
||||
mock_lock_instance.__aenter__ = AsyncMock(return_value=mock_lock_instance)
|
||||
mock_lock_instance.__aexit__ = AsyncMock()
|
||||
|
||||
mock_redis_lock_class.return_value = mock_lock_instance
|
||||
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
await poll_daily_room_presence(mock_meeting.id)
|
||||
|
||||
# Verify RedisAsyncLock was instantiated
|
||||
assert mock_redis_lock_class.call_count == 1
|
||||
# Verify get_room_presence was NOT called (lock not acquired, so function returned early)
|
||||
assert mock_daily_client.get_room_presence.call_count == 0
|
||||
@@ -127,18 +127,27 @@ async def mock_storage():
|
||||
from reflector.storage.base import Storage
|
||||
|
||||
class TestStorage(Storage):
|
||||
async def _put_file(self, path, data):
|
||||
async def _put_file(self, path, data, bucket=None):
|
||||
return None
|
||||
|
||||
async def _get_file_url(self, path):
|
||||
async def _get_file_url(
|
||||
self,
|
||||
path,
|
||||
operation: str = "get_object",
|
||||
expires_in: int = 3600,
|
||||
bucket=None,
|
||||
):
|
||||
return f"http://test-storage/{path}"
|
||||
|
||||
async def _get_file(self, path):
|
||||
async def _get_file(self, path, bucket=None):
|
||||
return b"test_audio_data"
|
||||
|
||||
async def _delete_file(self, path):
|
||||
async def _delete_file(self, path, bucket=None):
|
||||
return None
|
||||
|
||||
async def _stream_to_fileobj(self, path, fileobj, bucket=None):
|
||||
fileobj.write(b"test_audio_data")
|
||||
|
||||
storage = TestStorage()
|
||||
# Add mock tracking for verification
|
||||
storage._put_file = AsyncMock(side_effect=storage._put_file)
|
||||
@@ -181,7 +190,7 @@ async def mock_waveform_processor():
|
||||
async def mock_topic_detector():
|
||||
"""Mock TranscriptTopicDetectorProcessor"""
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.TranscriptTopicDetectorProcessor"
|
||||
"reflector.pipelines.topic_processing.TranscriptTopicDetectorProcessor"
|
||||
) as mock_topic_class:
|
||||
mock_topic = AsyncMock()
|
||||
mock_topic.set_pipeline = MagicMock()
|
||||
@@ -218,7 +227,7 @@ async def mock_topic_detector():
|
||||
async def mock_title_processor():
|
||||
"""Mock TranscriptFinalTitleProcessor"""
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.TranscriptFinalTitleProcessor"
|
||||
"reflector.pipelines.topic_processing.TranscriptFinalTitleProcessor"
|
||||
) as mock_title_class:
|
||||
mock_title = AsyncMock()
|
||||
mock_title.set_pipeline = MagicMock()
|
||||
@@ -247,7 +256,7 @@ async def mock_title_processor():
|
||||
async def mock_summary_processor():
|
||||
"""Mock TranscriptFinalSummaryProcessor"""
|
||||
with patch(
|
||||
"reflector.pipelines.main_file_pipeline.TranscriptFinalSummaryProcessor"
|
||||
"reflector.pipelines.topic_processing.TranscriptFinalSummaryProcessor"
|
||||
) as mock_summary_class:
|
||||
mock_summary = AsyncMock()
|
||||
mock_summary.set_pipeline = MagicMock()
|
||||
|
||||
193
server/tests/test_poll_daily_recordings.py
Normal file
193
server/tests/test_poll_daily_recordings.py
Normal file
@@ -0,0 +1,193 @@
|
||||
"""Tests for poll_daily_recordings task."""
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.dailyco_api.responses import RecordingResponse
|
||||
from reflector.dailyco_api.webhooks import DailyTrack
|
||||
|
||||
|
||||
# Import the unwrapped async function for testing
|
||||
# The function is decorated with @shared_task and @asynctask,
|
||||
# but we need to test the underlying async implementation
|
||||
def _get_poll_daily_recordings_fn():
|
||||
"""Get the underlying async function without Celery/asynctask decorators."""
|
||||
from reflector.worker import process
|
||||
|
||||
# Access the actual async function before decorators
|
||||
fn = process.poll_daily_recordings
|
||||
# Get through both decorator layers
|
||||
if hasattr(fn, "__wrapped__"):
|
||||
fn = fn.__wrapped__
|
||||
if hasattr(fn, "__wrapped__"):
|
||||
fn = fn.__wrapped__
|
||||
return fn
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_recording_response():
|
||||
"""Mock Daily.co API recording response with tracks."""
|
||||
now = datetime.now(timezone.utc)
|
||||
return [
|
||||
RecordingResponse(
|
||||
id="rec-123",
|
||||
room_name="test-room-20251118120000",
|
||||
start_ts=int((now - timedelta(hours=1)).timestamp()),
|
||||
status="finished",
|
||||
max_participants=2,
|
||||
duration=3600,
|
||||
share_token="share-token-123",
|
||||
tracks=[
|
||||
DailyTrack(type="audio", s3Key="track1.webm", size=1024),
|
||||
DailyTrack(type="audio", s3Key="track2.webm", size=2048),
|
||||
],
|
||||
),
|
||||
RecordingResponse(
|
||||
id="rec-456",
|
||||
room_name="test-room-20251118130000",
|
||||
start_ts=int((now - timedelta(hours=2)).timestamp()),
|
||||
status="finished",
|
||||
max_participants=3,
|
||||
duration=7200,
|
||||
share_token="share-token-456",
|
||||
tracks=[
|
||||
DailyTrack(type="audio", s3Key="track1.webm", size=1024),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.settings")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch("reflector.worker.process.recordings_controller.get_by_ids")
|
||||
@patch("reflector.worker.process.process_multitrack_recording.delay")
|
||||
async def test_poll_daily_recordings_processes_missing_recordings(
|
||||
mock_process_delay,
|
||||
mock_get_recordings,
|
||||
mock_create_client,
|
||||
mock_settings,
|
||||
mock_recording_response,
|
||||
):
|
||||
"""Test that poll_daily_recordings queues processing for recordings not in DB."""
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "test-bucket"
|
||||
|
||||
# Mock Daily.co API client
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_daily_client.list_recordings = AsyncMock(return_value=mock_recording_response)
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
# Mock DB controller - no existing recordings
|
||||
mock_get_recordings.return_value = []
|
||||
|
||||
# Execute - call the unwrapped async function
|
||||
poll_fn = _get_poll_daily_recordings_fn()
|
||||
await poll_fn()
|
||||
|
||||
# Verify Daily.co API was called without time parameters (uses default limit=100)
|
||||
assert mock_daily_client.list_recordings.call_count == 1
|
||||
call_kwargs = mock_daily_client.list_recordings.call_args.kwargs
|
||||
|
||||
# Should not have time-based parameters (uses cursor-based pagination)
|
||||
assert "start_time" not in call_kwargs
|
||||
assert "end_time" not in call_kwargs
|
||||
|
||||
# Verify processing was queued for both missing recordings
|
||||
assert mock_process_delay.call_count == 2
|
||||
|
||||
# Verify the processing calls have correct parameters
|
||||
calls = mock_process_delay.call_args_list
|
||||
assert calls[0].kwargs["bucket_name"] == "test-bucket"
|
||||
assert calls[0].kwargs["recording_id"] == "rec-123"
|
||||
assert calls[0].kwargs["daily_room_name"] == "test-room-20251118120000"
|
||||
assert calls[0].kwargs["track_keys"] == ["track1.webm", "track2.webm"]
|
||||
|
||||
assert calls[1].kwargs["bucket_name"] == "test-bucket"
|
||||
assert calls[1].kwargs["recording_id"] == "rec-456"
|
||||
assert calls[1].kwargs["daily_room_name"] == "test-room-20251118130000"
|
||||
assert calls[1].kwargs["track_keys"] == ["track1.webm"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.settings")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch("reflector.worker.process.recordings_controller.get_by_ids")
|
||||
@patch("reflector.worker.process.process_multitrack_recording.delay")
|
||||
async def test_poll_daily_recordings_skips_existing_recordings(
|
||||
mock_process_delay,
|
||||
mock_get_recordings,
|
||||
mock_create_client,
|
||||
mock_settings,
|
||||
mock_recording_response,
|
||||
):
|
||||
"""Test that poll_daily_recordings skips recordings already in DB."""
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "test-bucket"
|
||||
|
||||
# Mock Daily.co API client
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_daily_client.list_recordings = AsyncMock(return_value=mock_recording_response)
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
# Mock DB controller - all recordings already exist
|
||||
from reflector.db.recordings import Recording
|
||||
|
||||
mock_get_recordings.return_value = [
|
||||
Recording(
|
||||
id="rec-123",
|
||||
bucket_name="test-bucket",
|
||||
object_key="",
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
meeting_id="meeting-1",
|
||||
),
|
||||
Recording(
|
||||
id="rec-456",
|
||||
bucket_name="test-bucket",
|
||||
object_key="",
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
meeting_id="meeting-1",
|
||||
),
|
||||
]
|
||||
|
||||
# Execute - call the unwrapped async function
|
||||
poll_fn = _get_poll_daily_recordings_fn()
|
||||
await poll_fn()
|
||||
|
||||
# Verify Daily.co API was called
|
||||
assert mock_daily_client.list_recordings.call_count == 1
|
||||
|
||||
# Verify NO processing was queued (all recordings already exist)
|
||||
assert mock_process_delay.call_count == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.settings")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
async def test_poll_daily_recordings_skips_when_bucket_not_configured(
|
||||
mock_create_client,
|
||||
mock_settings,
|
||||
):
|
||||
"""Test that poll_daily_recordings returns early when bucket is not configured."""
|
||||
# No bucket configured
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = None
|
||||
|
||||
# Mock should not be called
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_create_client.return_value.__aenter__ = AsyncMock(
|
||||
return_value=mock_daily_client
|
||||
)
|
||||
mock_create_client.return_value.__aexit__ = AsyncMock()
|
||||
|
||||
# Execute - call the unwrapped async function
|
||||
poll_fn = _get_poll_daily_recordings_fn()
|
||||
await poll_fn()
|
||||
|
||||
# Verify API was never called
|
||||
mock_daily_client.list_recordings.assert_not_called()
|
||||
@@ -159,3 +159,78 @@ def test_processor_transcript_segment():
|
||||
assert segments[3].start == 30.72
|
||||
assert segments[4].start == 31.56
|
||||
assert segments[5].start == 32.38
|
||||
|
||||
|
||||
def test_processor_transcript_segment_multitrack_interleaved():
|
||||
"""Test as_segments(is_multitrack=True) with interleaved speakers.
|
||||
|
||||
Multitrack recordings have words from different speakers sorted by start time,
|
||||
causing frequent speaker alternation. The multitrack mode should group by
|
||||
speaker first, then split into sentences.
|
||||
"""
|
||||
from reflector.processors.types import Transcript, Word
|
||||
|
||||
# Simulate real multitrack data: words sorted by start time, speakers interleave
|
||||
# Speaker 0 says: "Hello there."
|
||||
# Speaker 1 says: "I'm good."
|
||||
# When sorted by time, words interleave
|
||||
transcript = Transcript(
|
||||
words=[
|
||||
Word(text="Hello ", start=0.0, end=0.5, speaker=0),
|
||||
Word(text="I'm ", start=0.5, end=0.8, speaker=1),
|
||||
Word(text="there.", start=0.5, end=1.0, speaker=0),
|
||||
Word(text="good.", start=1.0, end=1.5, speaker=1),
|
||||
]
|
||||
)
|
||||
|
||||
# Default behavior (is_multitrack=False): breaks on every speaker change = 4 segments
|
||||
segments_default = transcript.as_segments(is_multitrack=False)
|
||||
assert len(segments_default) == 4
|
||||
|
||||
# Multitrack behavior: groups by speaker, then sentences = 2 segments
|
||||
segments_multitrack = transcript.as_segments(is_multitrack=True)
|
||||
assert len(segments_multitrack) == 2
|
||||
|
||||
# Check content - sorted by start time
|
||||
assert segments_multitrack[0].speaker == 0
|
||||
assert segments_multitrack[0].text == "Hello there."
|
||||
assert segments_multitrack[0].start == 0.0
|
||||
assert segments_multitrack[0].end == 1.0
|
||||
|
||||
assert segments_multitrack[1].speaker == 1
|
||||
assert segments_multitrack[1].text == "I'm good."
|
||||
assert segments_multitrack[1].start == 0.5
|
||||
assert segments_multitrack[1].end == 1.5
|
||||
|
||||
|
||||
def test_processor_transcript_segment_multitrack_overlapping_timestamps():
|
||||
"""Test multitrack with exactly overlapping timestamps (real Daily.co data pattern)."""
|
||||
from reflector.processors.types import Transcript, Word
|
||||
|
||||
# Real pattern from transcript 38d84d57: words with identical timestamps
|
||||
transcript = Transcript(
|
||||
words=[
|
||||
Word(text="speaking ", start=6.71, end=7.11, speaker=0),
|
||||
Word(text="Speaking ", start=6.71, end=7.11, speaker=1),
|
||||
Word(text="at ", start=7.11, end=7.27, speaker=0),
|
||||
Word(text="at ", start=7.11, end=7.27, speaker=1),
|
||||
Word(text="the ", start=7.27, end=7.43, speaker=0),
|
||||
Word(text="the ", start=7.27, end=7.43, speaker=1),
|
||||
Word(text="same ", start=7.43, end=7.59, speaker=0),
|
||||
Word(text="same ", start=7.43, end=7.59, speaker=1),
|
||||
Word(text="time.", start=7.59, end=8.0, speaker=0),
|
||||
Word(text="time.", start=7.59, end=8.0, speaker=1),
|
||||
]
|
||||
)
|
||||
|
||||
# Default: 10 segments (one per speaker change)
|
||||
segments_default = transcript.as_segments(is_multitrack=False)
|
||||
assert len(segments_default) == 10
|
||||
|
||||
# Multitrack: 2 segments (one per speaker sentence)
|
||||
segments_multitrack = transcript.as_segments(is_multitrack=True)
|
||||
assert len(segments_multitrack) == 2
|
||||
|
||||
# Both should have complete sentences
|
||||
assert "speaking at the same time." in segments_multitrack[0].text
|
||||
assert "Speaking at the same time." in segments_multitrack[1].text
|
||||
|
||||
@@ -48,6 +48,7 @@ async def test_create_room_with_ics_fields(authenticated_client):
|
||||
"ics_url": "https://calendar.example.com/test.ics",
|
||||
"ics_fetch_interval": 600,
|
||||
"ics_enabled": True,
|
||||
"platform": "daily",
|
||||
},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
@@ -75,6 +76,7 @@ async def test_update_room_ics_configuration(authenticated_client):
|
||||
"is_shared": False,
|
||||
"webhook_url": "",
|
||||
"webhook_secret": "",
|
||||
"platform": "daily",
|
||||
},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
@@ -111,6 +113,7 @@ async def test_trigger_ics_sync(authenticated_client):
|
||||
is_shared=False,
|
||||
ics_url="https://calendar.example.com/api.ics",
|
||||
ics_enabled=True,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
cal = Calendar()
|
||||
@@ -154,6 +157,7 @@ async def test_trigger_ics_sync_unauthorized(client):
|
||||
is_shared=False,
|
||||
ics_url="https://calendar.example.com/api.ics",
|
||||
ics_enabled=True,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
response = await client.post(f"/rooms/{room.name}/ics/sync")
|
||||
@@ -176,6 +180,7 @@ async def test_trigger_ics_sync_not_configured(authenticated_client):
|
||||
recording_trigger="automatic-2nd-participant",
|
||||
is_shared=False,
|
||||
ics_enabled=False,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
response = await client.post(f"/rooms/{room.name}/ics/sync")
|
||||
@@ -200,6 +205,7 @@ async def test_get_ics_status(authenticated_client):
|
||||
ics_url="https://calendar.example.com/status.ics",
|
||||
ics_enabled=True,
|
||||
ics_fetch_interval=300,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
@@ -231,6 +237,7 @@ async def test_get_ics_status_unauthorized(client):
|
||||
is_shared=False,
|
||||
ics_url="https://calendar.example.com/status.ics",
|
||||
ics_enabled=True,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
response = await client.get(f"/rooms/{room.name}/ics/status")
|
||||
@@ -252,6 +259,7 @@ async def test_list_room_meetings(authenticated_client):
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic-2nd-participant",
|
||||
is_shared=False,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
@@ -298,6 +306,7 @@ async def test_list_room_meetings_non_owner(client):
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic-2nd-participant",
|
||||
is_shared=False,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
event = CalendarEvent(
|
||||
@@ -334,6 +343,7 @@ async def test_list_upcoming_meetings(authenticated_client):
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic-2nd-participant",
|
||||
is_shared=False,
|
||||
platform="daily",
|
||||
)
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
136
server/tests/test_s3_url_parser.py
Normal file
136
server/tests/test_s3_url_parser.py
Normal file
@@ -0,0 +1,136 @@
|
||||
"""Tests for S3 URL parsing functionality in reflector.tools.process"""
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.tools.process import parse_s3_url
|
||||
|
||||
|
||||
class TestParseS3URL:
|
||||
"""Test cases for parse_s3_url function"""
|
||||
|
||||
def test_parse_s3_protocol(self):
|
||||
"""Test parsing s3:// protocol URLs"""
|
||||
bucket, key = parse_s3_url("s3://my-bucket/path/to/file.webm")
|
||||
assert bucket == "my-bucket"
|
||||
assert key == "path/to/file.webm"
|
||||
|
||||
def test_parse_s3_protocol_deep_path(self):
|
||||
"""Test s3:// with deeply nested paths"""
|
||||
bucket, key = parse_s3_url("s3://bucket-name/very/deep/path/to/audio.mp4")
|
||||
assert bucket == "bucket-name"
|
||||
assert key == "very/deep/path/to/audio.mp4"
|
||||
|
||||
def test_parse_https_subdomain_format(self):
|
||||
"""Test parsing https://bucket.s3.amazonaws.com/key format"""
|
||||
bucket, key = parse_s3_url("https://my-bucket.s3.amazonaws.com/path/file.webm")
|
||||
assert bucket == "my-bucket"
|
||||
assert key == "path/file.webm"
|
||||
|
||||
def test_parse_https_regional_subdomain(self):
|
||||
"""Test parsing regional endpoint with subdomain"""
|
||||
bucket, key = parse_s3_url(
|
||||
"https://my-bucket.s3.us-west-2.amazonaws.com/path/file.webm"
|
||||
)
|
||||
assert bucket == "my-bucket"
|
||||
assert key == "path/file.webm"
|
||||
|
||||
def test_parse_https_path_style(self):
|
||||
"""Test parsing https://s3.amazonaws.com/bucket/key format"""
|
||||
bucket, key = parse_s3_url("https://s3.amazonaws.com/my-bucket/path/file.webm")
|
||||
assert bucket == "my-bucket"
|
||||
assert key == "path/file.webm"
|
||||
|
||||
def test_parse_https_regional_path_style(self):
|
||||
"""Test parsing regional endpoint with path style"""
|
||||
bucket, key = parse_s3_url(
|
||||
"https://s3.us-east-1.amazonaws.com/my-bucket/path/file.webm"
|
||||
)
|
||||
assert bucket == "my-bucket"
|
||||
assert key == "path/file.webm"
|
||||
|
||||
def test_parse_url_encoded_keys(self):
|
||||
"""Test parsing URL-encoded keys"""
|
||||
bucket, key = parse_s3_url(
|
||||
"s3://my-bucket/path%20with%20spaces/file%2Bname.webm"
|
||||
)
|
||||
assert bucket == "my-bucket"
|
||||
assert key == "path with spaces/file+name.webm" # Should be decoded
|
||||
|
||||
def test_parse_url_encoded_https(self):
|
||||
"""Test URL-encoded keys with HTTPS format"""
|
||||
bucket, key = parse_s3_url(
|
||||
"https://my-bucket.s3.amazonaws.com/file%20with%20spaces.webm"
|
||||
)
|
||||
assert bucket == "my-bucket"
|
||||
assert key == "file with spaces.webm"
|
||||
|
||||
def test_invalid_url_no_scheme(self):
|
||||
"""Test that URLs without scheme raise ValueError"""
|
||||
with pytest.raises(ValueError, match="Invalid S3 URL scheme"):
|
||||
parse_s3_url("my-bucket/path/file.webm")
|
||||
|
||||
def test_invalid_url_wrong_scheme(self):
|
||||
"""Test that non-S3 schemes raise ValueError"""
|
||||
with pytest.raises(ValueError, match="Invalid S3 URL scheme"):
|
||||
parse_s3_url("ftp://my-bucket/path/file.webm")
|
||||
|
||||
def test_invalid_s3_missing_bucket(self):
|
||||
"""Test s3:// URL without bucket raises ValueError"""
|
||||
with pytest.raises(ValueError, match="missing bucket or key"):
|
||||
parse_s3_url("s3:///path/file.webm")
|
||||
|
||||
def test_invalid_s3_missing_key(self):
|
||||
"""Test s3:// URL without key raises ValueError"""
|
||||
with pytest.raises(ValueError, match="missing bucket or key"):
|
||||
parse_s3_url("s3://my-bucket/")
|
||||
|
||||
def test_invalid_s3_empty_key(self):
|
||||
"""Test s3:// URL with empty key raises ValueError"""
|
||||
with pytest.raises(ValueError, match="missing bucket or key"):
|
||||
parse_s3_url("s3://my-bucket")
|
||||
|
||||
def test_invalid_https_not_s3(self):
|
||||
"""Test HTTPS URL that's not S3 raises ValueError"""
|
||||
with pytest.raises(ValueError, match="not recognized as S3 URL"):
|
||||
parse_s3_url("https://example.com/path/file.webm")
|
||||
|
||||
def test_invalid_https_subdomain_missing_key(self):
|
||||
"""Test HTTPS subdomain format without key raises ValueError"""
|
||||
with pytest.raises(ValueError, match="missing bucket or key"):
|
||||
parse_s3_url("https://my-bucket.s3.amazonaws.com/")
|
||||
|
||||
def test_invalid_https_path_style_missing_parts(self):
|
||||
"""Test HTTPS path style with missing bucket/key raises ValueError"""
|
||||
with pytest.raises(ValueError, match="missing bucket or key"):
|
||||
parse_s3_url("https://s3.amazonaws.com/")
|
||||
|
||||
def test_bucket_with_dots(self):
|
||||
"""Test parsing bucket names with dots"""
|
||||
bucket, key = parse_s3_url("s3://my.bucket.name/path/file.webm")
|
||||
assert bucket == "my.bucket.name"
|
||||
assert key == "path/file.webm"
|
||||
|
||||
def test_bucket_with_hyphens(self):
|
||||
"""Test parsing bucket names with hyphens"""
|
||||
bucket, key = parse_s3_url("s3://my-bucket-name-123/path/file.webm")
|
||||
assert bucket == "my-bucket-name-123"
|
||||
assert key == "path/file.webm"
|
||||
|
||||
def test_key_with_special_chars(self):
|
||||
"""Test keys with various special characters"""
|
||||
# Note: # is treated as URL fragment separator, not part of key
|
||||
bucket, key = parse_s3_url("s3://bucket/2024-01-01_12:00:00/file.webm")
|
||||
assert bucket == "bucket"
|
||||
assert key == "2024-01-01_12:00:00/file.webm"
|
||||
|
||||
def test_fragment_handling(self):
|
||||
"""Test that URL fragments are properly ignored"""
|
||||
bucket, key = parse_s3_url("s3://bucket/path/to/file.webm#fragment123")
|
||||
assert bucket == "bucket"
|
||||
assert key == "path/to/file.webm" # Fragment not included
|
||||
|
||||
def test_http_scheme_s3_url(self):
|
||||
"""Test that HTTP (not HTTPS) S3 URLs are supported"""
|
||||
bucket, key = parse_s3_url("http://my-bucket.s3.amazonaws.com/path/file.webm")
|
||||
assert bucket == "my-bucket"
|
||||
assert key == "path/file.webm"
|
||||
321
server/tests/test_storage.py
Normal file
321
server/tests/test_storage.py
Normal file
@@ -0,0 +1,321 @@
|
||||
"""Tests for storage abstraction layer."""
|
||||
|
||||
import io
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from reflector.storage.base import StoragePermissionError
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_stream_to_fileobj():
|
||||
"""Test that AWS storage can stream directly to a file object without loading into memory."""
|
||||
# Setup
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="test-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
)
|
||||
|
||||
# Mock download_fileobj to write data
|
||||
async def mock_download(Bucket, Key, Fileobj, **kwargs):
|
||||
Fileobj.write(b"chunk1chunk2")
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.download_fileobj = AsyncMock(side_effect=mock_download)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
# Patch the session client
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
# Create a file-like object to stream to
|
||||
output = io.BytesIO()
|
||||
|
||||
# Act - stream to file object
|
||||
await storage.stream_to_fileobj("test-file.mp4", output, bucket="test-bucket")
|
||||
|
||||
# Assert
|
||||
mock_client.download_fileobj.assert_called_once_with(
|
||||
Bucket="test-bucket", Key="test-file.mp4", Fileobj=output
|
||||
)
|
||||
|
||||
# Check that data was written to output
|
||||
output.seek(0)
|
||||
assert output.read() == b"chunk1chunk2"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_stream_to_fileobj_with_folder():
|
||||
"""Test streaming with folder prefix in bucket name."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="test-bucket/recordings",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
)
|
||||
|
||||
async def mock_download(Bucket, Key, Fileobj, **kwargs):
|
||||
Fileobj.write(b"data")
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.download_fileobj = AsyncMock(side_effect=mock_download)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
output = io.BytesIO()
|
||||
await storage.stream_to_fileobj("file.mp4", output, bucket="other-bucket")
|
||||
|
||||
# Should use folder prefix from instance config
|
||||
mock_client.download_fileobj.assert_called_once_with(
|
||||
Bucket="other-bucket", Key="recordings/file.mp4", Fileobj=output
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_storage_base_class_stream_to_fileobj():
|
||||
"""Test that base Storage class has stream_to_fileobj method."""
|
||||
from reflector.storage.base import Storage
|
||||
|
||||
# Verify method exists in base class
|
||||
assert hasattr(Storage, "stream_to_fileobj")
|
||||
|
||||
# Create a mock storage instance
|
||||
storage = MagicMock(spec=Storage)
|
||||
storage.stream_to_fileobj = AsyncMock()
|
||||
|
||||
# Should be callable
|
||||
await storage.stream_to_fileobj("file.mp4", io.BytesIO())
|
||||
storage.stream_to_fileobj.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_stream_uses_download_fileobj():
|
||||
"""Test that download_fileobj is called correctly."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="test-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
)
|
||||
|
||||
async def mock_download(Bucket, Key, Fileobj, **kwargs):
|
||||
Fileobj.write(b"data")
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.download_fileobj = AsyncMock(side_effect=mock_download)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
output = io.BytesIO()
|
||||
await storage.stream_to_fileobj("test.mp4", output)
|
||||
|
||||
# Verify download_fileobj was called with correct parameters
|
||||
mock_client.download_fileobj.assert_called_once_with(
|
||||
Bucket="test-bucket", Key="test.mp4", Fileobj=output
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_handles_access_denied_error():
|
||||
"""Test that AccessDenied errors are caught and wrapped in StoragePermissionError."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="test-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
)
|
||||
|
||||
# Mock ClientError with AccessDenied
|
||||
error_response = {"Error": {"Code": "AccessDenied", "Message": "Access Denied"}}
|
||||
mock_client = AsyncMock()
|
||||
mock_client.put_object = AsyncMock(
|
||||
side_effect=ClientError(error_response, "PutObject")
|
||||
)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
with pytest.raises(StoragePermissionError) as exc_info:
|
||||
await storage.put_file("test.txt", b"data")
|
||||
|
||||
# Verify error message contains expected information
|
||||
error_msg = str(exc_info.value)
|
||||
assert "AccessDenied" in error_msg
|
||||
assert "default bucket 'test-bucket'" in error_msg
|
||||
assert "S3 upload failed" in error_msg
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_handles_no_such_bucket_error():
|
||||
"""Test that NoSuchBucket errors are caught and wrapped in StoragePermissionError."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="test-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
)
|
||||
|
||||
# Mock ClientError with NoSuchBucket
|
||||
error_response = {
|
||||
"Error": {
|
||||
"Code": "NoSuchBucket",
|
||||
"Message": "The specified bucket does not exist",
|
||||
}
|
||||
}
|
||||
mock_client = AsyncMock()
|
||||
mock_client.delete_object = AsyncMock(
|
||||
side_effect=ClientError(error_response, "DeleteObject")
|
||||
)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
with pytest.raises(StoragePermissionError) as exc_info:
|
||||
await storage.delete_file("test.txt")
|
||||
|
||||
# Verify error message contains expected information
|
||||
error_msg = str(exc_info.value)
|
||||
assert "NoSuchBucket" in error_msg
|
||||
assert "default bucket 'test-bucket'" in error_msg
|
||||
assert "S3 delete failed" in error_msg
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_error_message_with_bucket_override():
|
||||
"""Test that error messages correctly show overridden bucket."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="default-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
)
|
||||
|
||||
# Mock ClientError with AccessDenied
|
||||
error_response = {"Error": {"Code": "AccessDenied", "Message": "Access Denied"}}
|
||||
mock_client = AsyncMock()
|
||||
mock_client.get_object = AsyncMock(
|
||||
side_effect=ClientError(error_response, "GetObject")
|
||||
)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
with pytest.raises(StoragePermissionError) as exc_info:
|
||||
await storage.get_file("test.txt", bucket="override-bucket")
|
||||
|
||||
# Verify error message shows overridden bucket, not default
|
||||
error_msg = str(exc_info.value)
|
||||
assert "overridden bucket 'override-bucket'" in error_msg
|
||||
assert "default-bucket" not in error_msg
|
||||
assert "S3 download failed" in error_msg
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_reraises_non_handled_errors():
|
||||
"""Test that non-AccessDenied/NoSuchBucket errors are re-raised as-is."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="test-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
)
|
||||
|
||||
# Mock ClientError with different error code
|
||||
error_response = {
|
||||
"Error": {"Code": "InternalError", "Message": "Internal Server Error"}
|
||||
}
|
||||
mock_client = AsyncMock()
|
||||
mock_client.put_object = AsyncMock(
|
||||
side_effect=ClientError(error_response, "PutObject")
|
||||
)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
# Should raise ClientError, not StoragePermissionError
|
||||
with pytest.raises(ClientError) as exc_info:
|
||||
await storage.put_file("test.txt", b"data")
|
||||
|
||||
# Verify it's the original ClientError
|
||||
assert exc_info.value.response["Error"]["Code"] == "InternalError"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_presign_url_handles_errors():
|
||||
"""Test that presigned URL generation handles permission errors."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="test-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
)
|
||||
|
||||
# Mock ClientError with AccessDenied during presign operation
|
||||
error_response = {"Error": {"Code": "AccessDenied", "Message": "Access Denied"}}
|
||||
mock_client = AsyncMock()
|
||||
mock_client.generate_presigned_url = AsyncMock(
|
||||
side_effect=ClientError(error_response, "GeneratePresignedUrl")
|
||||
)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
with pytest.raises(StoragePermissionError) as exc_info:
|
||||
await storage.get_file_url("test.txt")
|
||||
|
||||
# Verify error message
|
||||
error_msg = str(exc_info.value)
|
||||
assert "S3 presign failed" in error_msg
|
||||
assert "AccessDenied" in error_msg
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_list_objects_handles_errors():
|
||||
"""Test that list_objects handles permission errors."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="test-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
)
|
||||
|
||||
# Mock ClientError during list operation
|
||||
error_response = {"Error": {"Code": "AccessDenied", "Message": "Access Denied"}}
|
||||
mock_paginator = MagicMock()
|
||||
|
||||
async def mock_paginate(*args, **kwargs):
|
||||
raise ClientError(error_response, "ListObjectsV2")
|
||||
yield # Make it an async generator
|
||||
|
||||
mock_paginator.paginate = mock_paginate
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.get_paginator = MagicMock(return_value=mock_paginator)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
with pytest.raises(StoragePermissionError) as exc_info:
|
||||
await storage.list_objects(prefix="test/")
|
||||
|
||||
error_msg = str(exc_info.value)
|
||||
assert "S3 list_objects failed" in error_msg
|
||||
assert "AccessDenied" in error_msg
|
||||
|
||||
|
||||
def test_aws_storage_constructor_rejects_mixed_auth():
|
||||
"""Test that constructor rejects both role_arn and access keys."""
|
||||
with pytest.raises(ValueError, match="cannot use both.*role_arn.*access keys"):
|
||||
AwsStorage(
|
||||
aws_bucket_name="test-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="test-key",
|
||||
aws_secret_access_key="test-secret",
|
||||
aws_role_arn="arn:aws:iam::123456789012:role/test-role",
|
||||
)
|
||||
779
server/tests/test_transcript_formats.py
Normal file
779
server/tests/test_transcript_formats.py
Normal file
@@ -0,0 +1,779 @@
|
||||
"""Tests for transcript format conversion functionality."""
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.db.transcripts import TranscriptParticipant, TranscriptTopic
|
||||
from reflector.processors.types import Word
|
||||
from reflector.utils.transcript_formats import (
|
||||
format_timestamp_mmss,
|
||||
get_speaker_name,
|
||||
topics_to_webvtt_named,
|
||||
transcript_to_json_segments,
|
||||
transcript_to_text,
|
||||
transcript_to_text_timestamped,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_speaker_name_with_participants():
|
||||
"""Test speaker name resolution with participants list."""
|
||||
participants = [
|
||||
TranscriptParticipant(id="1", speaker=0, name="John Smith"),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Jane Doe"),
|
||||
]
|
||||
|
||||
assert get_speaker_name(0, participants) == "John Smith"
|
||||
assert get_speaker_name(1, participants) == "Jane Doe"
|
||||
assert get_speaker_name(2, participants) == "Speaker 2"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_speaker_name_without_participants():
|
||||
"""Test speaker name resolution without participants list."""
|
||||
assert get_speaker_name(0, None) == "Speaker 0"
|
||||
assert get_speaker_name(1, None) == "Speaker 1"
|
||||
assert get_speaker_name(5, []) == "Speaker 5"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_format_timestamp_mmss():
|
||||
"""Test timestamp formatting to MM:SS."""
|
||||
assert format_timestamp_mmss(0) == "00:00"
|
||||
assert format_timestamp_mmss(5) == "00:05"
|
||||
assert format_timestamp_mmss(65) == "01:05"
|
||||
assert format_timestamp_mmss(125.7) == "02:05"
|
||||
assert format_timestamp_mmss(3661) == "61:01"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_to_text():
|
||||
"""Test plain text format conversion."""
|
||||
topics = [
|
||||
TranscriptTopic(
|
||||
id="1",
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0.0,
|
||||
words=[
|
||||
Word(text="Hello", start=0.0, end=1.0, speaker=0),
|
||||
Word(text=" world.", start=1.0, end=2.0, speaker=0),
|
||||
],
|
||||
),
|
||||
TranscriptTopic(
|
||||
id="2",
|
||||
title="Topic 2",
|
||||
summary="Summary 2",
|
||||
timestamp=2.0,
|
||||
words=[
|
||||
Word(text="How", start=2.0, end=3.0, speaker=1),
|
||||
Word(text=" are", start=3.0, end=4.0, speaker=1),
|
||||
Word(text=" you?", start=4.0, end=5.0, speaker=1),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
participants = [
|
||||
TranscriptParticipant(id="1", speaker=0, name="John Smith"),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Jane Doe"),
|
||||
]
|
||||
|
||||
result = transcript_to_text(topics, participants)
|
||||
lines = result.split("\n")
|
||||
|
||||
assert len(lines) == 2
|
||||
assert lines[0] == "John Smith: Hello world."
|
||||
assert lines[1] == "Jane Doe: How are you?"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_to_text_timestamped():
|
||||
"""Test timestamped text format conversion."""
|
||||
topics = [
|
||||
TranscriptTopic(
|
||||
id="1",
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0.0,
|
||||
words=[
|
||||
Word(text="Hello", start=0.0, end=1.0, speaker=0),
|
||||
Word(text=" world.", start=1.0, end=2.0, speaker=0),
|
||||
],
|
||||
),
|
||||
TranscriptTopic(
|
||||
id="2",
|
||||
title="Topic 2",
|
||||
summary="Summary 2",
|
||||
timestamp=65.0,
|
||||
words=[
|
||||
Word(text="How", start=65.0, end=66.0, speaker=1),
|
||||
Word(text=" are", start=66.0, end=67.0, speaker=1),
|
||||
Word(text=" you?", start=67.0, end=68.0, speaker=1),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
participants = [
|
||||
TranscriptParticipant(id="1", speaker=0, name="John Smith"),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Jane Doe"),
|
||||
]
|
||||
|
||||
result = transcript_to_text_timestamped(topics, participants)
|
||||
lines = result.split("\n")
|
||||
|
||||
assert len(lines) == 2
|
||||
assert lines[0] == "[00:00] John Smith: Hello world."
|
||||
assert lines[1] == "[01:05] Jane Doe: How are you?"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_topics_to_webvtt_named():
|
||||
"""Test WebVTT format conversion with participant names."""
|
||||
topics = [
|
||||
TranscriptTopic(
|
||||
id="1",
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0.0,
|
||||
words=[
|
||||
Word(text="Hello", start=0.0, end=1.0, speaker=0),
|
||||
Word(text=" world.", start=1.0, end=2.0, speaker=0),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
participants = [
|
||||
TranscriptParticipant(id="1", speaker=0, name="John Smith"),
|
||||
]
|
||||
|
||||
result = topics_to_webvtt_named(topics, participants)
|
||||
|
||||
assert result.startswith("WEBVTT")
|
||||
assert "<v John Smith>" in result
|
||||
assert "00:00:00.000 --> 00:00:02.000" in result
|
||||
assert "Hello world." in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_to_json_segments():
|
||||
"""Test JSON segments format conversion."""
|
||||
topics = [
|
||||
TranscriptTopic(
|
||||
id="1",
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0.0,
|
||||
words=[
|
||||
Word(text="Hello", start=0.0, end=1.0, speaker=0),
|
||||
Word(text=" world.", start=1.0, end=2.0, speaker=0),
|
||||
],
|
||||
),
|
||||
TranscriptTopic(
|
||||
id="2",
|
||||
title="Topic 2",
|
||||
summary="Summary 2",
|
||||
timestamp=2.0,
|
||||
words=[
|
||||
Word(text="How", start=2.0, end=3.0, speaker=1),
|
||||
Word(text=" are", start=3.0, end=4.0, speaker=1),
|
||||
Word(text=" you?", start=4.0, end=5.0, speaker=1),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
participants = [
|
||||
TranscriptParticipant(id="1", speaker=0, name="John Smith"),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Jane Doe"),
|
||||
]
|
||||
|
||||
result = transcript_to_json_segments(topics, participants)
|
||||
|
||||
assert len(result) == 2
|
||||
assert result[0].speaker == 0
|
||||
assert result[0].speaker_name == "John Smith"
|
||||
assert result[0].text == "Hello world."
|
||||
assert result[0].start == 0.0
|
||||
assert result[0].end == 2.0
|
||||
|
||||
assert result[1].speaker == 1
|
||||
assert result[1].speaker_name == "Jane Doe"
|
||||
assert result[1].text == "How are you?"
|
||||
assert result[1].start == 2.0
|
||||
assert result[1].end == 5.0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_formats_with_empty_topics():
|
||||
"""Test format conversion with empty topics list."""
|
||||
topics = []
|
||||
participants = []
|
||||
|
||||
assert transcript_to_text(topics, participants) == ""
|
||||
assert transcript_to_text_timestamped(topics, participants) == ""
|
||||
assert "WEBVTT" in topics_to_webvtt_named(topics, participants)
|
||||
assert transcript_to_json_segments(topics, participants) == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_formats_with_empty_words():
|
||||
"""Test format conversion with topics containing no words."""
|
||||
topics = [
|
||||
TranscriptTopic(
|
||||
id="1",
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0.0,
|
||||
words=[],
|
||||
),
|
||||
]
|
||||
participants = []
|
||||
|
||||
assert transcript_to_text(topics, participants) == ""
|
||||
assert transcript_to_text_timestamped(topics, participants) == ""
|
||||
assert "WEBVTT" in topics_to_webvtt_named(topics, participants)
|
||||
assert transcript_to_json_segments(topics, participants) == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_formats_with_multiple_speakers():
|
||||
"""Test format conversion with multiple speaker changes."""
|
||||
topics = [
|
||||
TranscriptTopic(
|
||||
id="1",
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0.0,
|
||||
words=[
|
||||
Word(text="Hello", start=0.0, end=1.0, speaker=0),
|
||||
Word(text=" there.", start=1.0, end=2.0, speaker=0),
|
||||
Word(text="Hi", start=2.0, end=3.0, speaker=1),
|
||||
Word(text=" back.", start=3.0, end=4.0, speaker=1),
|
||||
Word(text="Good", start=4.0, end=5.0, speaker=0),
|
||||
Word(text=" morning.", start=5.0, end=6.0, speaker=0),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
participants = [
|
||||
TranscriptParticipant(id="1", speaker=0, name="Alice"),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Bob"),
|
||||
]
|
||||
|
||||
text_result = transcript_to_text(topics, participants)
|
||||
lines = text_result.split("\n")
|
||||
assert len(lines) == 3
|
||||
assert "Alice: Hello there." in lines[0]
|
||||
assert "Bob: Hi back." in lines[1]
|
||||
assert "Alice: Good morning." in lines[2]
|
||||
|
||||
json_result = transcript_to_json_segments(topics, participants)
|
||||
assert len(json_result) == 3
|
||||
assert json_result[0].speaker_name == "Alice"
|
||||
assert json_result[1].speaker_name == "Bob"
|
||||
assert json_result[2].speaker_name == "Alice"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_formats_with_overlapping_speakers_multitrack():
|
||||
"""Test format conversion for multitrack recordings with truly interleaved words.
|
||||
|
||||
Multitrack recordings have words from different speakers sorted by start time,
|
||||
causing frequent speaker alternation. This tests the sentence-based segmentation
|
||||
that groups each speaker's words into complete sentences.
|
||||
"""
|
||||
# Real multitrack data: words sorted by start time, speakers interleave
|
||||
# Alice says: "Hello there." (0.0-1.0)
|
||||
# Bob says: "I'm good." (0.5-1.5)
|
||||
# When sorted by time, words interleave: Hello, I'm, there., good.
|
||||
topics = [
|
||||
TranscriptTopic(
|
||||
id="1",
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0.0,
|
||||
words=[
|
||||
Word(text="Hello ", start=0.0, end=0.5, speaker=0),
|
||||
Word(text="I'm ", start=0.5, end=0.8, speaker=1),
|
||||
Word(text="there.", start=0.5, end=1.0, speaker=0),
|
||||
Word(text="good.", start=1.0, end=1.5, speaker=1),
|
||||
],
|
||||
),
|
||||
]
|
||||
|
||||
participants = [
|
||||
TranscriptParticipant(id="1", speaker=0, name="Alice"),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Bob"),
|
||||
]
|
||||
|
||||
# With is_multitrack=True, should produce 2 segments (one per speaker sentence)
|
||||
# not 4 segments (one per speaker change)
|
||||
webvtt_result = topics_to_webvtt_named(topics, participants, is_multitrack=True)
|
||||
expected_webvtt = """WEBVTT
|
||||
|
||||
00:00:00.000 --> 00:00:01.000
|
||||
<v Alice>Hello there.
|
||||
|
||||
00:00:00.500 --> 00:00:01.500
|
||||
<v Bob>I'm good.
|
||||
"""
|
||||
assert webvtt_result == expected_webvtt
|
||||
|
||||
text_result = transcript_to_text(topics, participants, is_multitrack=True)
|
||||
lines = text_result.split("\n")
|
||||
assert len(lines) == 2
|
||||
assert "Alice: Hello there." in lines[0]
|
||||
assert "Bob: I'm good." in lines[1]
|
||||
|
||||
timestamped_result = transcript_to_text_timestamped(
|
||||
topics, participants, is_multitrack=True
|
||||
)
|
||||
timestamped_lines = timestamped_result.split("\n")
|
||||
assert len(timestamped_lines) == 2
|
||||
assert "[00:00] Alice: Hello there." in timestamped_lines[0]
|
||||
assert "[00:00] Bob: I'm good." in timestamped_lines[1]
|
||||
|
||||
segments = transcript_to_json_segments(topics, participants, is_multitrack=True)
|
||||
assert len(segments) == 2
|
||||
assert segments[0].speaker_name == "Alice"
|
||||
assert segments[0].text == "Hello there."
|
||||
assert segments[1].speaker_name == "Bob"
|
||||
assert segments[1].text == "I'm good."
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_transcript_format_text(client):
|
||||
"""Test GET /transcripts/{id} with transcript_format=text."""
|
||||
response = await client.post("/transcripts", json={"name": "Test transcript"})
|
||||
assert response.status_code == 200
|
||||
tid = response.json()["id"]
|
||||
|
||||
from reflector.db.transcripts import (
|
||||
TranscriptParticipant,
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.processors.types import Word
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(tid)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"participants": [
|
||||
TranscriptParticipant(
|
||||
id="1", speaker=0, name="John Smith"
|
||||
).model_dump(),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Jane Doe").model_dump(),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
await transcripts_controller.upsert_topic(
|
||||
transcript,
|
||||
TranscriptTopic(
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0,
|
||||
words=[
|
||||
Word(text="Hello", start=0, end=1, speaker=0),
|
||||
Word(text=" world.", start=1, end=2, speaker=0),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
response = await client.get(f"/transcripts/{tid}?transcript_format=text")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
assert data["transcript_format"] == "text"
|
||||
assert "transcript" in data
|
||||
assert "John Smith: Hello world." in data["transcript"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_transcript_format_text_timestamped(client):
|
||||
"""Test GET /transcripts/{id} with transcript_format=text-timestamped."""
|
||||
response = await client.post("/transcripts", json={"name": "Test transcript"})
|
||||
assert response.status_code == 200
|
||||
tid = response.json()["id"]
|
||||
|
||||
from reflector.db.transcripts import (
|
||||
TranscriptParticipant,
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.processors.types import Word
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(tid)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"participants": [
|
||||
TranscriptParticipant(
|
||||
id="1", speaker=0, name="John Smith"
|
||||
).model_dump(),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
await transcripts_controller.upsert_topic(
|
||||
transcript,
|
||||
TranscriptTopic(
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0,
|
||||
words=[
|
||||
Word(text="Hello", start=65, end=66, speaker=0),
|
||||
Word(text=" world.", start=66, end=67, speaker=0),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
response = await client.get(
|
||||
f"/transcripts/{tid}?transcript_format=text-timestamped"
|
||||
)
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
assert data["transcript_format"] == "text-timestamped"
|
||||
assert "transcript" in data
|
||||
assert "[01:05] John Smith: Hello world." in data["transcript"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_transcript_format_webvtt_named(client):
|
||||
"""Test GET /transcripts/{id} with transcript_format=webvtt-named."""
|
||||
response = await client.post("/transcripts", json={"name": "Test transcript"})
|
||||
assert response.status_code == 200
|
||||
tid = response.json()["id"]
|
||||
|
||||
from reflector.db.transcripts import (
|
||||
TranscriptParticipant,
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.processors.types import Word
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(tid)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"participants": [
|
||||
TranscriptParticipant(
|
||||
id="1", speaker=0, name="John Smith"
|
||||
).model_dump(),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
await transcripts_controller.upsert_topic(
|
||||
transcript,
|
||||
TranscriptTopic(
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0,
|
||||
words=[
|
||||
Word(text="Hello", start=0, end=1, speaker=0),
|
||||
Word(text=" world.", start=1, end=2, speaker=0),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
response = await client.get(f"/transcripts/{tid}?transcript_format=webvtt-named")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
assert data["transcript_format"] == "webvtt-named"
|
||||
assert "transcript" in data
|
||||
assert "WEBVTT" in data["transcript"]
|
||||
assert "<v John Smith>" in data["transcript"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_transcript_format_json(client):
|
||||
"""Test GET /transcripts/{id} with transcript_format=json."""
|
||||
response = await client.post("/transcripts", json={"name": "Test transcript"})
|
||||
assert response.status_code == 200
|
||||
tid = response.json()["id"]
|
||||
|
||||
from reflector.db.transcripts import (
|
||||
TranscriptParticipant,
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.processors.types import Word
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(tid)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"participants": [
|
||||
TranscriptParticipant(
|
||||
id="1", speaker=0, name="John Smith"
|
||||
).model_dump(),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
await transcripts_controller.upsert_topic(
|
||||
transcript,
|
||||
TranscriptTopic(
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0,
|
||||
words=[
|
||||
Word(text="Hello", start=0, end=1, speaker=0),
|
||||
Word(text=" world.", start=1, end=2, speaker=0),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
response = await client.get(f"/transcripts/{tid}?transcript_format=json")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
assert data["transcript_format"] == "json"
|
||||
assert "transcript" in data
|
||||
assert isinstance(data["transcript"], list)
|
||||
assert len(data["transcript"]) == 1
|
||||
assert data["transcript"][0]["speaker"] == 0
|
||||
assert data["transcript"][0]["speaker_name"] == "John Smith"
|
||||
assert data["transcript"][0]["text"] == "Hello world."
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_transcript_format_default_is_text(client):
|
||||
"""Test GET /transcripts/{id} defaults to text format."""
|
||||
response = await client.post("/transcripts", json={"name": "Test transcript"})
|
||||
assert response.status_code == 200
|
||||
tid = response.json()["id"]
|
||||
|
||||
from reflector.db.transcripts import TranscriptTopic, transcripts_controller
|
||||
from reflector.processors.types import Word
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(tid)
|
||||
|
||||
await transcripts_controller.upsert_topic(
|
||||
transcript,
|
||||
TranscriptTopic(
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0,
|
||||
words=[
|
||||
Word(text="Hello", start=0, end=1, speaker=0),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
response = await client.get(f"/transcripts/{tid}")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
assert data["transcript_format"] == "text"
|
||||
assert "transcript" in data
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_topics_endpoint_multitrack_segmentation(client):
|
||||
"""Test GET /transcripts/{id}/topics uses sentence-based segmentation for multitrack.
|
||||
|
||||
This tests the fix for TASKS2.md - ensuring /topics endpoints correctly detect
|
||||
multitrack recordings and use sentence-based segmentation instead of fragmenting
|
||||
on every speaker change.
|
||||
"""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from reflector.db.recordings import Recording, recordings_controller
|
||||
from reflector.db.transcripts import (
|
||||
TranscriptParticipant,
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.processors.types import Word
|
||||
|
||||
# Create a multitrack recording (has track_keys)
|
||||
recording = Recording(
|
||||
bucket_name="test-bucket",
|
||||
object_key="test-key",
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
track_keys=["track1.webm", "track2.webm"], # This makes it multitrack
|
||||
)
|
||||
await recordings_controller.create(recording)
|
||||
|
||||
# Create transcript linked to the recording
|
||||
transcript = await transcripts_controller.add(
|
||||
name="Multitrack Test",
|
||||
source_kind="file",
|
||||
recording_id=recording.id,
|
||||
)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"participants": [
|
||||
TranscriptParticipant(id="1", speaker=0, name="Alice").model_dump(),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Bob").model_dump(),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
# Add interleaved words (as they appear in real multitrack data)
|
||||
await transcripts_controller.upsert_topic(
|
||||
transcript,
|
||||
TranscriptTopic(
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0,
|
||||
words=[
|
||||
Word(text="Hello ", start=0.0, end=0.5, speaker=0),
|
||||
Word(text="I'm ", start=0.5, end=0.8, speaker=1),
|
||||
Word(text="there.", start=0.5, end=1.0, speaker=0),
|
||||
Word(text="good.", start=1.0, end=1.5, speaker=1),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
# Test /topics endpoint
|
||||
response = await client.get(f"/transcripts/{transcript.id}/topics")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
assert len(data) == 1
|
||||
topic = data[0]
|
||||
|
||||
# Key assertion: multitrack should produce 2 segments (one per speaker sentence)
|
||||
# Not 4 segments (one per speaker change)
|
||||
assert len(topic["segments"]) == 2
|
||||
|
||||
# Check content
|
||||
segment_texts = [s["text"] for s in topic["segments"]]
|
||||
assert "Hello there." in segment_texts
|
||||
assert "I'm good." in segment_texts
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_topics_endpoint_non_multitrack_segmentation(client):
|
||||
"""Test GET /transcripts/{id}/topics uses default segmentation for non-multitrack.
|
||||
|
||||
Ensures backward compatibility - transcripts without multitrack recordings
|
||||
should continue using the default speaker-change-based segmentation.
|
||||
"""
|
||||
from reflector.db.transcripts import (
|
||||
TranscriptParticipant,
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.processors.types import Word
|
||||
|
||||
# Create transcript WITHOUT recording (defaulted as not multitrack) TODO better heuristic
|
||||
response = await client.post("/transcripts", json={"name": "Test transcript"})
|
||||
assert response.status_code == 200
|
||||
tid = response.json()["id"]
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(tid)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"participants": [
|
||||
TranscriptParticipant(id="1", speaker=0, name="Alice").model_dump(),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Bob").model_dump(),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
# Add interleaved words
|
||||
await transcripts_controller.upsert_topic(
|
||||
transcript,
|
||||
TranscriptTopic(
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0,
|
||||
words=[
|
||||
Word(text="Hello ", start=0.0, end=0.5, speaker=0),
|
||||
Word(text="I'm ", start=0.5, end=0.8, speaker=1),
|
||||
Word(text="there.", start=0.5, end=1.0, speaker=0),
|
||||
Word(text="good.", start=1.0, end=1.5, speaker=1),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
# Test /topics endpoint
|
||||
response = await client.get(f"/transcripts/{tid}/topics")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
assert len(data) == 1
|
||||
topic = data[0]
|
||||
|
||||
# Non-multitrack: should produce 4 segments (one per speaker change)
|
||||
assert len(topic["segments"]) == 4
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_api_topics_with_words_endpoint_multitrack(client):
|
||||
"""Test GET /transcripts/{id}/topics/with-words uses multitrack segmentation."""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from reflector.db.recordings import Recording, recordings_controller
|
||||
from reflector.db.transcripts import (
|
||||
TranscriptParticipant,
|
||||
TranscriptTopic,
|
||||
transcripts_controller,
|
||||
)
|
||||
from reflector.processors.types import Word
|
||||
|
||||
# Create multitrack recording
|
||||
recording = Recording(
|
||||
bucket_name="test-bucket",
|
||||
object_key="test-key-2",
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
track_keys=["track1.webm", "track2.webm"],
|
||||
)
|
||||
await recordings_controller.create(recording)
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
name="Multitrack Test 2",
|
||||
source_kind="file",
|
||||
recording_id=recording.id,
|
||||
)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"participants": [
|
||||
TranscriptParticipant(id="1", speaker=0, name="Alice").model_dump(),
|
||||
TranscriptParticipant(id="2", speaker=1, name="Bob").model_dump(),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
await transcripts_controller.upsert_topic(
|
||||
transcript,
|
||||
TranscriptTopic(
|
||||
title="Topic 1",
|
||||
summary="Summary 1",
|
||||
timestamp=0,
|
||||
words=[
|
||||
Word(text="Hello ", start=0.0, end=0.5, speaker=0),
|
||||
Word(text="I'm ", start=0.5, end=0.8, speaker=1),
|
||||
Word(text="there.", start=0.5, end=1.0, speaker=0),
|
||||
Word(text="good.", start=1.0, end=1.5, speaker=1),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
response = await client.get(f"/transcripts/{transcript.id}/topics/with-words")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
assert len(data) == 1
|
||||
topic = data[0]
|
||||
|
||||
# Should have 2 segments (multitrack sentence-based)
|
||||
assert len(topic["segments"]) == 2
|
||||
# Should also have words field
|
||||
assert "words" in topic
|
||||
assert len(topic["words"]) == 4
|
||||
@@ -1,5 +1,6 @@
|
||||
import asyncio
|
||||
import time
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from httpx import ASGITransport, AsyncClient
|
||||
@@ -101,3 +102,113 @@ async def test_transcript_process(
|
||||
assert response.status_code == 200
|
||||
assert len(response.json()) == 1
|
||||
assert "Hello world. How are you today?" in response.json()[0]["transcript"]
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("setup_database")
|
||||
@pytest.mark.asyncio
|
||||
async def test_whereby_recording_uses_file_pipeline(client):
|
||||
"""Test that Whereby recordings (bucket_name but no track_keys) use file pipeline"""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from reflector.db.recordings import Recording, recordings_controller
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
|
||||
# Create transcript with Whereby recording (has bucket_name, no track_keys)
|
||||
transcript = await transcripts_controller.add(
|
||||
"",
|
||||
source_kind="room",
|
||||
source_language="en",
|
||||
target_language="en",
|
||||
user_id="test-user",
|
||||
share_mode="public",
|
||||
)
|
||||
|
||||
recording = await recordings_controller.create(
|
||||
Recording(
|
||||
bucket_name="whereby-bucket",
|
||||
object_key="test-recording.mp4", # gitleaks:allow
|
||||
meeting_id="test-meeting",
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
track_keys=None, # Whereby recordings have no track_keys
|
||||
)
|
||||
)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript, {"recording_id": recording.id, "status": "uploaded"}
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.services.transcript_process.task_pipeline_file_process"
|
||||
) as mock_file_pipeline,
|
||||
patch(
|
||||
"reflector.services.transcript_process.task_pipeline_multitrack_process"
|
||||
) as mock_multitrack_pipeline,
|
||||
):
|
||||
response = await client.post(f"/transcripts/{transcript.id}/process")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json()["status"] == "ok"
|
||||
|
||||
# Whereby recordings should use file pipeline
|
||||
mock_file_pipeline.delay.assert_called_once_with(transcript_id=transcript.id)
|
||||
mock_multitrack_pipeline.delay.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("setup_database")
|
||||
@pytest.mark.asyncio
|
||||
async def test_dailyco_recording_uses_multitrack_pipeline(client):
|
||||
"""Test that Daily.co recordings (bucket_name + track_keys) use multitrack pipeline"""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from reflector.db.recordings import Recording, recordings_controller
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
|
||||
# Create transcript with Daily.co multitrack recording
|
||||
transcript = await transcripts_controller.add(
|
||||
"",
|
||||
source_kind="room",
|
||||
source_language="en",
|
||||
target_language="en",
|
||||
user_id="test-user",
|
||||
share_mode="public",
|
||||
)
|
||||
|
||||
track_keys = [
|
||||
"recordings/test-room/track1.webm",
|
||||
"recordings/test-room/track2.webm",
|
||||
]
|
||||
recording = await recordings_controller.create(
|
||||
Recording(
|
||||
bucket_name="daily-bucket",
|
||||
object_key="recordings/test-room",
|
||||
meeting_id="test-meeting",
|
||||
track_keys=track_keys,
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
)
|
||||
)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript, {"recording_id": recording.id, "status": "uploaded"}
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.services.transcript_process.task_pipeline_file_process"
|
||||
) as mock_file_pipeline,
|
||||
patch(
|
||||
"reflector.services.transcript_process.task_pipeline_multitrack_process"
|
||||
) as mock_multitrack_pipeline,
|
||||
):
|
||||
response = await client.post(f"/transcripts/{transcript.id}/process")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json()["status"] == "ok"
|
||||
|
||||
# Daily.co multitrack recordings should use multitrack pipeline
|
||||
mock_multitrack_pipeline.delay.assert_called_once_with(
|
||||
transcript_id=transcript.id,
|
||||
bucket_name="daily-bucket",
|
||||
track_keys=track_keys,
|
||||
)
|
||||
mock_file_pipeline.delay.assert_not_called()
|
||||
|
||||
@@ -22,13 +22,16 @@ async def test_recording_deleted_with_transcript():
|
||||
recording_id=recording.id,
|
||||
)
|
||||
|
||||
with patch("reflector.db.transcripts.get_recordings_storage") as mock_get_storage:
|
||||
with patch("reflector.db.transcripts.get_transcripts_storage") as mock_get_storage:
|
||||
storage_instance = mock_get_storage.return_value
|
||||
storage_instance.delete_file = AsyncMock()
|
||||
|
||||
await transcripts_controller.remove_by_id(transcript.id)
|
||||
|
||||
storage_instance.delete_file.assert_awaited_once_with(recording.object_key)
|
||||
# Should be called with bucket override
|
||||
storage_instance.delete_file.assert_awaited_once_with(
|
||||
recording.object_key, bucket=recording.bucket_name
|
||||
)
|
||||
|
||||
assert await recordings_controller.get_by_id(recording.id) is None
|
||||
assert await transcripts_controller.get_by_id(transcript.id) is None
|
||||
|
||||
@@ -120,7 +120,15 @@ async def test_user_ws_accepts_valid_token_and_receives_events(appserver_ws_user
|
||||
host, port = appserver_ws_user
|
||||
base_ws = f"http://{host}:{port}/v1/events"
|
||||
|
||||
token = _make_dummy_jwt("user-abc")
|
||||
# Create a test user in the database
|
||||
from reflector.db.users import user_controller
|
||||
|
||||
test_uid = "user-abc"
|
||||
user = await user_controller.create_or_update(
|
||||
id="test-user-id-abc", authentik_uid=test_uid, email="user-abc@example.com"
|
||||
)
|
||||
|
||||
token = _make_dummy_jwt(test_uid)
|
||||
subprotocols = ["bearer", token]
|
||||
|
||||
# Connect and then trigger an event via HTTP create
|
||||
@@ -132,12 +140,13 @@ async def test_user_ws_accepts_valid_token_and_receives_events(appserver_ws_user
|
||||
from reflector.auth import current_user, current_user_optional
|
||||
|
||||
# Override auth dependencies so HTTP request is performed as the same user
|
||||
# Use the internal user.id (not the Authentik UID)
|
||||
app.dependency_overrides[current_user] = lambda: {
|
||||
"sub": "user-abc",
|
||||
"sub": user.id,
|
||||
"email": "user-abc@example.com",
|
||||
}
|
||||
app.dependency_overrides[current_user_optional] = lambda: {
|
||||
"sub": "user-abc",
|
||||
"sub": user.id,
|
||||
"email": "user-abc@example.com",
|
||||
}
|
||||
|
||||
|
||||
64
server/tests/test_utils_daily.py
Normal file
64
server/tests/test_utils_daily.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import pytest
|
||||
|
||||
from reflector.utils.daily import extract_base_room_name, parse_daily_recording_filename
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"daily_room_name,expected",
|
||||
[
|
||||
("daily-20251020193458", "daily"),
|
||||
("daily-2-20251020193458", "daily-2"),
|
||||
("my-room-name-20251020193458", "my-room-name"),
|
||||
("room-with-numbers-123-20251020193458", "room-with-numbers-123"),
|
||||
("x-20251020193458", "x"),
|
||||
],
|
||||
)
|
||||
def test_extract_base_room_name(daily_room_name, expected):
|
||||
assert extract_base_room_name(daily_room_name) == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"filename,expected_recording_ts,expected_participant_id,expected_track_ts",
|
||||
[
|
||||
(
|
||||
"1763152299562-12f0b87c-97d4-4dd3-a65c-cee1f854a79c-cam-audio-1763152314582",
|
||||
1763152299562,
|
||||
"12f0b87c-97d4-4dd3-a65c-cee1f854a79c",
|
||||
1763152314582,
|
||||
),
|
||||
(
|
||||
"1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922",
|
||||
1760988935484,
|
||||
"52f7f48b-fbab-431f-9a50-87b9abfc8255",
|
||||
1760988935922,
|
||||
),
|
||||
(
|
||||
"1760988935484-a37c35e3-6f8e-4274-a482-e9d0f102a732-cam-audio-1760988943823",
|
||||
1760988935484,
|
||||
"a37c35e3-6f8e-4274-a482-e9d0f102a732",
|
||||
1760988943823,
|
||||
),
|
||||
(
|
||||
"path/to/1763151171834-b6719a43-4481-483a-a8fc-2ae18b69283c-cam-audio-1763151180561",
|
||||
1763151171834,
|
||||
"b6719a43-4481-483a-a8fc-2ae18b69283c",
|
||||
1763151180561,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_parse_daily_recording_filename(
|
||||
filename, expected_recording_ts, expected_participant_id, expected_track_ts
|
||||
):
|
||||
result = parse_daily_recording_filename(filename)
|
||||
|
||||
assert result.recording_start_ts == expected_recording_ts
|
||||
assert result.participant_id == expected_participant_id
|
||||
assert result.track_start_ts == expected_track_ts
|
||||
|
||||
|
||||
def test_parse_daily_recording_filename_invalid():
|
||||
with pytest.raises(ValueError, match="Invalid Daily.co recording filename"):
|
||||
parse_daily_recording_filename("invalid-filename")
|
||||
|
||||
with pytest.raises(ValueError, match="Invalid Daily.co recording filename"):
|
||||
parse_daily_recording_filename("123-not-a-uuid-cam-audio-456")
|
||||
63
server/tests/test_utils_url.py
Normal file
63
server/tests/test_utils_url.py
Normal file
@@ -0,0 +1,63 @@
|
||||
"""Tests for URL utility functions."""
|
||||
|
||||
from reflector.utils.url import add_query_param
|
||||
|
||||
|
||||
class TestAddQueryParam:
|
||||
"""Test the add_query_param function."""
|
||||
|
||||
def test_add_param_to_url_without_query(self):
|
||||
"""Should add query param with ? to URL without existing params."""
|
||||
url = "https://example.com/room"
|
||||
result = add_query_param(url, "t", "token123")
|
||||
assert result == "https://example.com/room?t=token123"
|
||||
|
||||
def test_add_param_to_url_with_existing_query(self):
|
||||
"""Should add query param with & to URL with existing params."""
|
||||
url = "https://example.com/room?existing=param"
|
||||
result = add_query_param(url, "t", "token123")
|
||||
assert result == "https://example.com/room?existing=param&t=token123"
|
||||
|
||||
def test_add_param_to_url_with_multiple_existing_params(self):
|
||||
"""Should add query param to URL with multiple existing params."""
|
||||
url = "https://example.com/room?param1=value1¶m2=value2"
|
||||
result = add_query_param(url, "t", "token123")
|
||||
assert (
|
||||
result == "https://example.com/room?param1=value1¶m2=value2&t=token123"
|
||||
)
|
||||
|
||||
def test_add_param_with_special_characters(self):
|
||||
"""Should properly encode special characters in param value."""
|
||||
url = "https://example.com/room"
|
||||
result = add_query_param(url, "name", "hello world")
|
||||
assert result == "https://example.com/room?name=hello+world"
|
||||
|
||||
def test_add_param_to_url_with_fragment(self):
|
||||
"""Should preserve URL fragment when adding query param."""
|
||||
url = "https://example.com/room#section"
|
||||
result = add_query_param(url, "t", "token123")
|
||||
assert result == "https://example.com/room?t=token123#section"
|
||||
|
||||
def test_add_param_to_url_with_query_and_fragment(self):
|
||||
"""Should preserve fragment when adding param to URL with existing query."""
|
||||
url = "https://example.com/room?existing=param#section"
|
||||
result = add_query_param(url, "t", "token123")
|
||||
assert result == "https://example.com/room?existing=param&t=token123#section"
|
||||
|
||||
def test_add_param_overwrites_existing_param(self):
|
||||
"""Should overwrite existing param with same name."""
|
||||
url = "https://example.com/room?t=oldtoken"
|
||||
result = add_query_param(url, "t", "newtoken")
|
||||
assert result == "https://example.com/room?t=newtoken"
|
||||
|
||||
def test_url_without_scheme(self):
|
||||
"""Should handle URLs without scheme (relative URLs)."""
|
||||
url = "/room/path"
|
||||
result = add_query_param(url, "t", "token123")
|
||||
assert result == "/room/path?t=token123"
|
||||
|
||||
def test_empty_url(self):
|
||||
"""Should handle empty URL."""
|
||||
url = ""
|
||||
result = add_query_param(url, "t", "token123")
|
||||
assert result == "?t=token123"
|
||||
@@ -22,9 +22,10 @@ AUTHENTIK_CLIENT_SECRET=your-client-secret-here
|
||||
|
||||
# API URLs
|
||||
API_URL=http://127.0.0.1:1250
|
||||
SERVER_API_URL=http://server:1250
|
||||
WEBSOCKET_URL=ws://127.0.0.1:1250
|
||||
AUTH_CALLBACK_URL=http://localhost:3000/auth-callback
|
||||
|
||||
# Sentry
|
||||
# SENTRY_DSN=https://your-dsn@sentry.io/project-id
|
||||
# SENTRY_IGNORE_API_RESOLUTION_ERROR=1
|
||||
# SENTRY_IGNORE_API_RESOLUTION_ERROR=1
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
"use client";
|
||||
import React, { useState, useEffect } from "react";
|
||||
import React, { useState, useEffect, useMemo } from "react";
|
||||
import {
|
||||
Flex,
|
||||
Spinner,
|
||||
@@ -235,15 +235,26 @@ export default function TranscriptBrowser() {
|
||||
|
||||
const pageSize = 20;
|
||||
|
||||
// must be json-able
|
||||
const searchFilters = useMemo(
|
||||
() => ({
|
||||
q: urlSearchQuery,
|
||||
extras: {
|
||||
room_id: urlRoomId || undefined,
|
||||
source_kind: urlSourceKind || undefined,
|
||||
},
|
||||
}),
|
||||
[urlSearchQuery, urlRoomId, urlSourceKind],
|
||||
);
|
||||
|
||||
const {
|
||||
data: searchData,
|
||||
isLoading: searchLoading,
|
||||
refetch: reloadSearch,
|
||||
} = useTranscriptsSearch(urlSearchQuery, {
|
||||
} = useTranscriptsSearch(searchFilters.q, {
|
||||
limit: pageSize,
|
||||
offset: paginationPageTo0Based(page) * pageSize,
|
||||
room_id: urlRoomId || undefined,
|
||||
source_kind: urlSourceKind || undefined,
|
||||
...searchFilters.extras,
|
||||
});
|
||||
|
||||
const results = searchData?.results || [];
|
||||
@@ -255,6 +266,12 @@ export default function TranscriptBrowser() {
|
||||
|
||||
const totalPages = getTotalPages(totalResults, pageSize);
|
||||
|
||||
// reset pagination when search results change (detected by total change; good enough approximation)
|
||||
useEffect(() => {
|
||||
// operation is idempotent
|
||||
setPage(FIRST_PAGE).then(() => {});
|
||||
}, [JSON.stringify(searchFilters)]);
|
||||
|
||||
const userName = useUserName();
|
||||
const [deletionLoading, setDeletionLoading] = useState(false);
|
||||
const cancelRef = React.useRef(null);
|
||||
|
||||
@@ -78,6 +78,14 @@ export default async function AppLayout({
|
||||
)}
|
||||
{featureEnabled("requireLogin") ? (
|
||||
<>
|
||||
·
|
||||
<Link
|
||||
href="/settings/api-keys"
|
||||
as={NextLink}
|
||||
className="font-light px-2"
|
||||
>
|
||||
Settings
|
||||
</Link>
|
||||
·
|
||||
<UserInfo />
|
||||
</>
|
||||
|
||||
@@ -67,6 +67,11 @@ const recordingTypeOptions: SelectOption[] = [
|
||||
{ label: "Cloud", value: "cloud" },
|
||||
];
|
||||
|
||||
const platformOptions: SelectOption[] = [
|
||||
{ label: "Whereby", value: "whereby" },
|
||||
{ label: "Daily", value: "daily" },
|
||||
];
|
||||
|
||||
const roomInitialState = {
|
||||
name: "",
|
||||
zulipAutoPost: false,
|
||||
@@ -82,6 +87,7 @@ const roomInitialState = {
|
||||
icsUrl: "",
|
||||
icsEnabled: false,
|
||||
icsFetchInterval: 5,
|
||||
platform: "whereby",
|
||||
};
|
||||
|
||||
export default function RoomsList() {
|
||||
@@ -99,6 +105,11 @@ export default function RoomsList() {
|
||||
const recordingTypeCollection = createListCollection({
|
||||
items: recordingTypeOptions,
|
||||
});
|
||||
|
||||
const platformCollection = createListCollection({
|
||||
items: platformOptions,
|
||||
});
|
||||
|
||||
const [roomInput, setRoomInput] = useState<null | typeof roomInitialState>(
|
||||
null,
|
||||
);
|
||||
@@ -143,15 +154,24 @@ export default function RoomsList() {
|
||||
zulipStream: detailedEditedRoom.zulip_stream,
|
||||
zulipTopic: detailedEditedRoom.zulip_topic,
|
||||
isLocked: detailedEditedRoom.is_locked,
|
||||
roomMode: detailedEditedRoom.room_mode,
|
||||
roomMode:
|
||||
detailedEditedRoom.platform === "daily"
|
||||
? "group"
|
||||
: detailedEditedRoom.room_mode,
|
||||
recordingType: detailedEditedRoom.recording_type,
|
||||
recordingTrigger: detailedEditedRoom.recording_trigger,
|
||||
recordingTrigger:
|
||||
detailedEditedRoom.platform === "daily"
|
||||
? detailedEditedRoom.recording_type === "cloud"
|
||||
? "automatic-2nd-participant"
|
||||
: "none"
|
||||
: detailedEditedRoom.recording_trigger,
|
||||
isShared: detailedEditedRoom.is_shared,
|
||||
webhookUrl: detailedEditedRoom.webhook_url || "",
|
||||
webhookSecret: detailedEditedRoom.webhook_secret || "",
|
||||
icsUrl: detailedEditedRoom.ics_url || "",
|
||||
icsEnabled: detailedEditedRoom.ics_enabled || false,
|
||||
icsFetchInterval: detailedEditedRoom.ics_fetch_interval || 5,
|
||||
platform: detailedEditedRoom.platform,
|
||||
}
|
||||
: null,
|
||||
[detailedEditedRoom],
|
||||
@@ -277,21 +297,32 @@ export default function RoomsList() {
|
||||
return;
|
||||
}
|
||||
|
||||
const platform: "whereby" | "daily" | null =
|
||||
room.platform === "whereby" || room.platform === "daily"
|
||||
? room.platform
|
||||
: null;
|
||||
|
||||
const roomData = {
|
||||
name: room.name,
|
||||
zulip_auto_post: room.zulipAutoPost,
|
||||
zulip_stream: room.zulipStream,
|
||||
zulip_topic: room.zulipTopic,
|
||||
is_locked: room.isLocked,
|
||||
room_mode: room.roomMode,
|
||||
room_mode: platform === "daily" ? "group" : room.roomMode,
|
||||
recording_type: room.recordingType,
|
||||
recording_trigger: room.recordingTrigger,
|
||||
recording_trigger:
|
||||
platform === "daily"
|
||||
? room.recordingType === "cloud"
|
||||
? "automatic-2nd-participant"
|
||||
: "none"
|
||||
: room.recordingTrigger,
|
||||
is_shared: room.isShared,
|
||||
webhook_url: room.webhookUrl,
|
||||
webhook_secret: room.webhookSecret,
|
||||
ics_url: room.icsUrl,
|
||||
ics_enabled: room.icsEnabled,
|
||||
ics_fetch_interval: room.icsFetchInterval,
|
||||
platform,
|
||||
};
|
||||
|
||||
if (isEditing) {
|
||||
@@ -339,15 +370,21 @@ export default function RoomsList() {
|
||||
zulipStream: roomData.zulip_stream,
|
||||
zulipTopic: roomData.zulip_topic,
|
||||
isLocked: roomData.is_locked,
|
||||
roomMode: roomData.room_mode,
|
||||
roomMode: roomData.platform === "daily" ? "group" : roomData.room_mode, // Daily always uses 2-200
|
||||
recordingType: roomData.recording_type,
|
||||
recordingTrigger: roomData.recording_trigger,
|
||||
recordingTrigger:
|
||||
roomData.platform === "daily"
|
||||
? roomData.recording_type === "cloud"
|
||||
? "automatic-2nd-participant"
|
||||
: "none"
|
||||
: roomData.recording_trigger,
|
||||
isShared: roomData.is_shared,
|
||||
webhookUrl: roomData.webhook_url || "",
|
||||
webhookSecret: roomData.webhook_secret || "",
|
||||
icsUrl: roomData.ics_url || "",
|
||||
icsEnabled: roomData.ics_enabled || false,
|
||||
icsFetchInterval: roomData.ics_fetch_interval || 5,
|
||||
platform: roomData.platform,
|
||||
});
|
||||
setEditRoomId(roomId);
|
||||
setIsEditing(true);
|
||||
@@ -482,6 +519,48 @@ export default function RoomsList() {
|
||||
)}
|
||||
</Field.Root>
|
||||
|
||||
<Field.Root mt={4}>
|
||||
<Field.Label>Platform</Field.Label>
|
||||
<Select.Root
|
||||
value={[room.platform]}
|
||||
onValueChange={(e) => {
|
||||
const newPlatform = e.value[0] as "whereby" | "daily";
|
||||
const updates: Partial<typeof room> = {
|
||||
platform: newPlatform,
|
||||
};
|
||||
if (newPlatform === "daily") {
|
||||
updates.roomMode = "group";
|
||||
updates.recordingTrigger =
|
||||
room.recordingType === "cloud"
|
||||
? "automatic-2nd-participant"
|
||||
: "none";
|
||||
}
|
||||
setRoomInput({ ...room, ...updates });
|
||||
}}
|
||||
collection={platformCollection}
|
||||
>
|
||||
<Select.HiddenSelect />
|
||||
<Select.Control>
|
||||
<Select.Trigger>
|
||||
<Select.ValueText placeholder="Select platform" />
|
||||
</Select.Trigger>
|
||||
<Select.IndicatorGroup>
|
||||
<Select.Indicator />
|
||||
</Select.IndicatorGroup>
|
||||
</Select.Control>
|
||||
<Select.Positioner>
|
||||
<Select.Content>
|
||||
{platformOptions.map((option) => (
|
||||
<Select.Item key={option.value} item={option}>
|
||||
{option.label}
|
||||
<Select.ItemIndicator />
|
||||
</Select.Item>
|
||||
))}
|
||||
</Select.Content>
|
||||
</Select.Positioner>
|
||||
</Select.Root>
|
||||
</Field.Root>
|
||||
|
||||
<Field.Root mt={4}>
|
||||
<Checkbox.Root
|
||||
name="isLocked"
|
||||
@@ -512,6 +591,7 @@ export default function RoomsList() {
|
||||
setRoomInput({ ...room, roomMode: e.value[0] })
|
||||
}
|
||||
collection={roomModeCollection}
|
||||
disabled={room.platform === "daily"}
|
||||
>
|
||||
<Select.HiddenSelect />
|
||||
<Select.Control>
|
||||
@@ -538,16 +618,26 @@ export default function RoomsList() {
|
||||
<Field.Label>Recording type</Field.Label>
|
||||
<Select.Root
|
||||
value={[room.recordingType]}
|
||||
onValueChange={(e) =>
|
||||
setRoomInput({
|
||||
...room,
|
||||
recordingType: e.value[0],
|
||||
recordingTrigger:
|
||||
e.value[0] !== "cloud"
|
||||
onValueChange={(e) => {
|
||||
const newRecordingType = e.value[0];
|
||||
const updates: Partial<typeof room> = {
|
||||
recordingType: newRecordingType,
|
||||
};
|
||||
// For Daily: if cloud, use automatic; otherwise none
|
||||
if (room.platform === "daily") {
|
||||
updates.recordingTrigger =
|
||||
newRecordingType === "cloud"
|
||||
? "automatic-2nd-participant"
|
||||
: "none";
|
||||
} else {
|
||||
// For Whereby: if not cloud, set to none
|
||||
updates.recordingTrigger =
|
||||
newRecordingType !== "cloud"
|
||||
? "none"
|
||||
: room.recordingTrigger,
|
||||
})
|
||||
}
|
||||
: room.recordingTrigger;
|
||||
}
|
||||
setRoomInput({ ...room, ...updates });
|
||||
}}
|
||||
collection={recordingTypeCollection}
|
||||
>
|
||||
<Select.HiddenSelect />
|
||||
@@ -572,7 +662,7 @@ export default function RoomsList() {
|
||||
</Select.Root>
|
||||
</Field.Root>
|
||||
<Field.Root mt={4}>
|
||||
<Field.Label>Cloud recording start trigger</Field.Label>
|
||||
<Field.Label>Recording start trigger</Field.Label>
|
||||
<Select.Root
|
||||
value={[room.recordingTrigger]}
|
||||
onValueChange={(e) =>
|
||||
@@ -582,7 +672,11 @@ export default function RoomsList() {
|
||||
})
|
||||
}
|
||||
collection={recordingTriggerCollection}
|
||||
disabled={room.recordingType !== "cloud"}
|
||||
disabled={
|
||||
room.recordingType !== "cloud" ||
|
||||
(room.platform === "daily" &&
|
||||
room.recordingType === "cloud")
|
||||
}
|
||||
>
|
||||
<Select.HiddenSelect />
|
||||
<Select.Control>
|
||||
|
||||
341
www/app/(app)/settings/api-keys/page.tsx
Normal file
341
www/app/(app)/settings/api-keys/page.tsx
Normal file
@@ -0,0 +1,341 @@
|
||||
"use client";
|
||||
import React, { useState, useRef } from "react";
|
||||
import {
|
||||
Box,
|
||||
Button,
|
||||
Heading,
|
||||
Stack,
|
||||
Text,
|
||||
Input,
|
||||
Table,
|
||||
Flex,
|
||||
IconButton,
|
||||
Code,
|
||||
Dialog,
|
||||
} from "@chakra-ui/react";
|
||||
import { LuTrash2, LuCopy, LuPlus } from "react-icons/lu";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { $api } from "../../../lib/apiClient";
|
||||
import { toaster } from "../../../components/ui/toaster";
|
||||
|
||||
interface CreateApiKeyResponse {
|
||||
id: string;
|
||||
user_id: string;
|
||||
name: string | null;
|
||||
created_at: string;
|
||||
key: string;
|
||||
}
|
||||
|
||||
export default function ApiKeysPage() {
|
||||
const [newKeyName, setNewKeyName] = useState("");
|
||||
const [isCreating, setIsCreating] = useState(false);
|
||||
const [createdKey, setCreatedKey] = useState<CreateApiKeyResponse | null>(
|
||||
null,
|
||||
);
|
||||
const [keyToDelete, setKeyToDelete] = useState<string | null>(null);
|
||||
const queryClient = useQueryClient();
|
||||
const cancelRef = useRef<HTMLButtonElement>(null);
|
||||
|
||||
const { data: apiKeys, isLoading } = $api.useQuery(
|
||||
"get",
|
||||
"/v1/user/api-keys",
|
||||
);
|
||||
|
||||
const createKeyMutation = $api.useMutation("post", "/v1/user/api-keys", {
|
||||
onSuccess: (data) => {
|
||||
setCreatedKey(data);
|
||||
setNewKeyName("");
|
||||
setIsCreating(false);
|
||||
queryClient.invalidateQueries({ queryKey: ["get", "/v1/user/api-keys"] });
|
||||
toaster.create({
|
||||
duration: 5000,
|
||||
render: () => (
|
||||
<Box bg="green.500" color="white" px={4} py={3} borderRadius="md">
|
||||
<Text fontWeight="bold">API key created</Text>
|
||||
<Text fontSize="sm">
|
||||
Make sure to copy it now - you won't see it again!
|
||||
</Text>
|
||||
</Box>
|
||||
),
|
||||
});
|
||||
|
||||
setTimeout(() => {
|
||||
const keyElement = document.querySelector(".api-key-code");
|
||||
if (keyElement) {
|
||||
const range = document.createRange();
|
||||
range.selectNodeContents(keyElement);
|
||||
const selection = window.getSelection();
|
||||
selection?.removeAllRanges();
|
||||
selection?.addRange(range);
|
||||
}
|
||||
}, 100);
|
||||
},
|
||||
onError: () => {
|
||||
toaster.create({
|
||||
duration: 3000,
|
||||
render: () => (
|
||||
<Box bg="red.500" color="white" px={4} py={3} borderRadius="md">
|
||||
<Text fontWeight="bold">Error</Text>
|
||||
<Text fontSize="sm">Failed to create API key</Text>
|
||||
</Box>
|
||||
),
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
const deleteKeyMutation = $api.useMutation(
|
||||
"delete",
|
||||
"/v1/user/api-keys/{key_id}",
|
||||
{
|
||||
onSuccess: () => {
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["get", "/v1/user/api-keys"],
|
||||
});
|
||||
toaster.create({
|
||||
duration: 3000,
|
||||
render: () => (
|
||||
<Box bg="green.500" color="white" px={4} py={3} borderRadius="md">
|
||||
<Text fontWeight="bold">API key deleted</Text>
|
||||
</Box>
|
||||
),
|
||||
});
|
||||
},
|
||||
onError: () => {
|
||||
toaster.create({
|
||||
duration: 3000,
|
||||
render: () => (
|
||||
<Box bg="red.500" color="white" px={4} py={3} borderRadius="md">
|
||||
<Text fontWeight="bold">Error</Text>
|
||||
<Text fontSize="sm">Failed to delete API key</Text>
|
||||
</Box>
|
||||
),
|
||||
});
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const handleCreateKey = () => {
|
||||
createKeyMutation.mutate({
|
||||
body: { name: newKeyName || null },
|
||||
});
|
||||
};
|
||||
|
||||
const handleCopyKey = (key: string) => {
|
||||
navigator.clipboard.writeText(key);
|
||||
toaster.create({
|
||||
duration: 2000,
|
||||
render: () => (
|
||||
<Box bg="green.500" color="white" px={4} py={3} borderRadius="md">
|
||||
<Text fontWeight="bold">Copied to clipboard</Text>
|
||||
</Box>
|
||||
),
|
||||
});
|
||||
};
|
||||
|
||||
const handleDeleteRequest = (keyId: string) => {
|
||||
setKeyToDelete(keyId);
|
||||
};
|
||||
|
||||
const confirmDelete = () => {
|
||||
if (keyToDelete) {
|
||||
deleteKeyMutation.mutate({
|
||||
params: { path: { key_id: keyToDelete } },
|
||||
});
|
||||
setKeyToDelete(null);
|
||||
}
|
||||
};
|
||||
|
||||
const formatDate = (dateString: string) => {
|
||||
return new Date(dateString).toLocaleDateString("en-US", {
|
||||
year: "numeric",
|
||||
month: "short",
|
||||
day: "numeric",
|
||||
hour: "2-digit",
|
||||
minute: "2-digit",
|
||||
});
|
||||
};
|
||||
|
||||
return (
|
||||
<Box maxW="800px" w="100%" mx="auto" p={8}>
|
||||
<Heading mb={2}>API Keys</Heading>
|
||||
<Text color="gray.600" mb={6}>
|
||||
Manage your API keys for programmatic access to Reflector
|
||||
</Text>
|
||||
|
||||
{/* Show newly created key */}
|
||||
{createdKey && (
|
||||
<Box
|
||||
mb={6}
|
||||
p={4}
|
||||
bg="green.50"
|
||||
borderWidth={1}
|
||||
borderColor="green.200"
|
||||
borderRadius="md"
|
||||
>
|
||||
<Flex justify="space-between" align="start" mb={2}>
|
||||
<Heading size="sm" color="green.800">
|
||||
API Key Created
|
||||
</Heading>
|
||||
<Button
|
||||
size="sm"
|
||||
variant="ghost"
|
||||
onClick={() => setCreatedKey(null)}
|
||||
>
|
||||
×
|
||||
</Button>
|
||||
</Flex>
|
||||
<Text mb={2} fontSize="sm" color="green.700">
|
||||
Make sure to copy your API key now. You won't be able to see it
|
||||
again!
|
||||
</Text>
|
||||
<Flex gap={2} align="center">
|
||||
<Code
|
||||
p={2}
|
||||
flex={1}
|
||||
fontSize="sm"
|
||||
bg="white"
|
||||
className="api-key-code"
|
||||
>
|
||||
{createdKey.key}
|
||||
</Code>
|
||||
<IconButton
|
||||
aria-label="Copy API key"
|
||||
size="sm"
|
||||
onClick={() => handleCopyKey(createdKey.key)}
|
||||
>
|
||||
<LuCopy />
|
||||
</IconButton>
|
||||
</Flex>
|
||||
</Box>
|
||||
)}
|
||||
|
||||
{/* Create new key */}
|
||||
<Box mb={8} p={6} borderWidth={1} borderRadius="md">
|
||||
<Heading size="md" mb={4}>
|
||||
Create New API Key
|
||||
</Heading>
|
||||
{!isCreating ? (
|
||||
<Button onClick={() => setIsCreating(true)} colorPalette="blue">
|
||||
<LuPlus /> Create API Key
|
||||
</Button>
|
||||
) : (
|
||||
<Stack gap={4}>
|
||||
<Box>
|
||||
<Text mb={2}>Name (optional)</Text>
|
||||
<Input
|
||||
placeholder="e.g., Production API Key"
|
||||
value={newKeyName}
|
||||
onChange={(e) => setNewKeyName(e.target.value)}
|
||||
/>
|
||||
</Box>
|
||||
<Flex gap={2}>
|
||||
<Button
|
||||
onClick={handleCreateKey}
|
||||
colorPalette="blue"
|
||||
loading={createKeyMutation.isPending}
|
||||
>
|
||||
Create
|
||||
</Button>
|
||||
<Button
|
||||
onClick={() => {
|
||||
setIsCreating(false);
|
||||
setNewKeyName("");
|
||||
}}
|
||||
variant="outline"
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
</Flex>
|
||||
</Stack>
|
||||
)}
|
||||
</Box>
|
||||
|
||||
{/* List of API keys */}
|
||||
<Box>
|
||||
<Heading size="md" mb={4}>
|
||||
Your API Keys
|
||||
</Heading>
|
||||
{isLoading ? (
|
||||
<Text>Loading...</Text>
|
||||
) : !apiKeys || apiKeys.length === 0 ? (
|
||||
<Text color="gray.600">
|
||||
No API keys yet. Create one to get started.
|
||||
</Text>
|
||||
) : (
|
||||
<Table.Root>
|
||||
<Table.Header>
|
||||
<Table.Row>
|
||||
<Table.ColumnHeader>Name</Table.ColumnHeader>
|
||||
<Table.ColumnHeader>Created</Table.ColumnHeader>
|
||||
<Table.ColumnHeader>Actions</Table.ColumnHeader>
|
||||
</Table.Row>
|
||||
</Table.Header>
|
||||
<Table.Body>
|
||||
{apiKeys.map((key) => (
|
||||
<Table.Row key={key.id}>
|
||||
<Table.Cell>
|
||||
{key.name || <Text color="gray.500">Unnamed</Text>}
|
||||
</Table.Cell>
|
||||
<Table.Cell>{formatDate(key.created_at)}</Table.Cell>
|
||||
<Table.Cell>
|
||||
<IconButton
|
||||
aria-label="Delete API key"
|
||||
size="sm"
|
||||
colorPalette="red"
|
||||
variant="ghost"
|
||||
onClick={() => handleDeleteRequest(key.id)}
|
||||
loading={
|
||||
deleteKeyMutation.isPending &&
|
||||
deleteKeyMutation.variables?.params?.path?.key_id ===
|
||||
key.id
|
||||
}
|
||||
>
|
||||
<LuTrash2 />
|
||||
</IconButton>
|
||||
</Table.Cell>
|
||||
</Table.Row>
|
||||
))}
|
||||
</Table.Body>
|
||||
</Table.Root>
|
||||
)}
|
||||
</Box>
|
||||
|
||||
{/* Delete confirmation dialog */}
|
||||
<Dialog.Root
|
||||
open={!!keyToDelete}
|
||||
onOpenChange={(e) => {
|
||||
if (!e.open) setKeyToDelete(null);
|
||||
}}
|
||||
initialFocusEl={() => cancelRef.current}
|
||||
>
|
||||
<Dialog.Backdrop />
|
||||
<Dialog.Positioner>
|
||||
<Dialog.Content>
|
||||
<Dialog.Header fontSize="lg" fontWeight="bold">
|
||||
Delete API Key
|
||||
</Dialog.Header>
|
||||
<Dialog.Body>
|
||||
<Text>
|
||||
Are you sure you want to delete this API key? This action cannot
|
||||
be undone.
|
||||
</Text>
|
||||
</Dialog.Body>
|
||||
<Dialog.Footer>
|
||||
<Button
|
||||
ref={cancelRef}
|
||||
onClick={() => setKeyToDelete(null)}
|
||||
variant="outline"
|
||||
colorPalette="gray"
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button colorPalette="red" onClick={confirmDelete} ml={3}>
|
||||
Delete
|
||||
</Button>
|
||||
</Dialog.Footer>
|
||||
</Dialog.Content>
|
||||
</Dialog.Positioner>
|
||||
</Dialog.Root>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -10,7 +10,15 @@ import FinalSummary from "./finalSummary";
|
||||
import TranscriptTitle from "../transcriptTitle";
|
||||
import Player from "../player";
|
||||
import { useRouter } from "next/navigation";
|
||||
import { Box, Flex, Grid, GridItem, Skeleton, Text } from "@chakra-ui/react";
|
||||
import {
|
||||
Box,
|
||||
Flex,
|
||||
Grid,
|
||||
GridItem,
|
||||
Skeleton,
|
||||
Text,
|
||||
Spinner,
|
||||
} from "@chakra-ui/react";
|
||||
import { useTranscriptGet } from "../../../lib/apiHooks";
|
||||
import { TranscriptStatus } from "../../../lib/transcript";
|
||||
|
||||
@@ -28,6 +36,7 @@ export default function TranscriptDetails(details: TranscriptDetails) {
|
||||
"idle",
|
||||
"recording",
|
||||
"processing",
|
||||
"uploaded",
|
||||
] satisfies TranscriptStatus[] as TranscriptStatus[];
|
||||
|
||||
const transcript = useTranscriptGet(transcriptId);
|
||||
@@ -45,15 +54,55 @@ export default function TranscriptDetails(details: TranscriptDetails) {
|
||||
useState<HTMLDivElement | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (waiting) {
|
||||
const newUrl = "/transcripts/" + params.transcriptId + "/record";
|
||||
if (!waiting || !transcript.data) return;
|
||||
|
||||
const status = transcript.data.status;
|
||||
let newUrl: string | null = null;
|
||||
|
||||
if (status === "processing" || status === "uploaded") {
|
||||
newUrl = `/transcripts/${params.transcriptId}/processing`;
|
||||
} else if (status === "recording") {
|
||||
newUrl = `/transcripts/${params.transcriptId}/record`;
|
||||
} else if (status === "idle") {
|
||||
newUrl =
|
||||
transcript.data.source_kind === "file"
|
||||
? `/transcripts/${params.transcriptId}/upload`
|
||||
: `/transcripts/${params.transcriptId}/record`;
|
||||
}
|
||||
|
||||
if (newUrl) {
|
||||
// Shallow redirection does not work on NextJS 13
|
||||
// https://github.com/vercel/next.js/discussions/48110
|
||||
// https://github.com/vercel/next.js/discussions/49540
|
||||
router.replace(newUrl);
|
||||
// history.replaceState({}, "", newUrl);
|
||||
}
|
||||
}, [waiting]);
|
||||
}, [waiting, transcript.data?.status, transcript.data?.source_kind]);
|
||||
|
||||
if (waiting) {
|
||||
return (
|
||||
<Box>
|
||||
<Box
|
||||
w="full"
|
||||
background="gray.bg"
|
||||
border={"2px solid"}
|
||||
borderColor={"gray.bg"}
|
||||
borderRadius={8}
|
||||
p={6}
|
||||
minH="100%"
|
||||
display="flex"
|
||||
alignItems="center"
|
||||
justifyContent="center"
|
||||
>
|
||||
<Flex direction="column" align="center" gap={3}>
|
||||
<Spinner size="xl" color="blue.500" />
|
||||
<Text color="gray.600" textAlign="center">
|
||||
Loading transcript...
|
||||
</Text>
|
||||
</Flex>
|
||||
</Box>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
if (transcript.error || topics?.error) {
|
||||
return (
|
||||
@@ -68,15 +117,6 @@ export default function TranscriptDetails(details: TranscriptDetails) {
|
||||
return <Modal title="Loading" text={"Loading transcript..."} />;
|
||||
}
|
||||
|
||||
if (mp3.error) {
|
||||
return (
|
||||
<Modal
|
||||
title="Transcription error"
|
||||
text={`There was an error loading the recording. Error: ${mp3.error}`}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<Grid
|
||||
@@ -98,7 +138,12 @@ export default function TranscriptDetails(details: TranscriptDetails) {
|
||||
/>
|
||||
) : !mp3.loading && (waveform.error || mp3.error) ? (
|
||||
<Box p={4} bg="red.100" borderRadius="md">
|
||||
<Text>Error loading this recording</Text>
|
||||
<Text>
|
||||
Error loading{" "}
|
||||
{[waveform.error && "waveform", mp3.error && "mp3"]
|
||||
.filter(Boolean)
|
||||
.join(" and ")}
|
||||
</Text>
|
||||
</Box>
|
||||
) : (
|
||||
<Skeleton h={14} />
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user