mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2026-03-24 07:56:46 +00:00
Compare commits
28 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
22a50bb94d | ||
|
|
504ca74184 | ||
|
|
a455b8090a | ||
|
|
6b0292d5f0 | ||
|
|
304315daaf | ||
|
|
7845f679c3 | ||
|
|
c155f66982 | ||
|
|
a682846645 | ||
|
|
4235ab4293 | ||
|
|
f5ec2d28cf | ||
|
|
ac46c60a7c | ||
|
|
1d1a520be9 | ||
|
|
9e64d52461 | ||
|
|
0931095f49 | ||
|
|
4d915e2a9f | ||
|
|
045eae8ff2 | ||
|
|
f6cc03286b | ||
|
|
7f9ce7f13a | ||
|
|
66772efbfe | ||
|
|
d79ec4149a | ||
|
|
69f7cce0fd | ||
|
|
4fb60955d4 | ||
|
|
f428b9e3f2 | ||
|
|
25bcdb16a8 | ||
| 5d547586ef | |||
|
|
815e87056d | ||
|
|
bc6bb63c32 | ||
|
|
e7dd8b57d1 |
36
.github/workflows/selfhost-script.yml
vendored
Normal file
36
.github/workflows/selfhost-script.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# Validates the self-hosted setup script: runs with --cpu and --garage,
|
||||
# brings up services, runs health checks, then tears down.
|
||||
name: Selfhost script (CPU + Garage)
|
||||
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request: {}
|
||||
|
||||
jobs:
|
||||
selfhost-cpu-garage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 25
|
||||
concurrency:
|
||||
group: selfhost-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Run setup-selfhosted.sh (CPU + Garage)
|
||||
run: |
|
||||
./scripts/setup-selfhosted.sh --cpu --garage
|
||||
|
||||
- name: Quick health checks
|
||||
run: |
|
||||
curl -sf http://localhost:1250/health && echo " Server OK"
|
||||
curl -sf http://localhost:3000 > /dev/null && echo " Frontend OK"
|
||||
curl -sf http://localhost:3903/metrics > /dev/null && echo " Garage admin OK"
|
||||
|
||||
- name: Teardown
|
||||
if: always()
|
||||
run: |
|
||||
docker compose -f docker-compose.selfhosted.yml --profile cpu --profile garage down -v --remove-orphans 2>/dev/null || true
|
||||
6
.github/workflows/test_server.yml
vendored
6
.github/workflows/test_server.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
uv run -m pytest -v tests
|
||||
|
||||
docker-amd64:
|
||||
runs-on: linux-amd64
|
||||
runs-on: [linux-amd64]
|
||||
concurrency:
|
||||
group: docker-amd64-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -52,12 +52,14 @@ jobs:
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
|
||||
docker-arm64:
|
||||
runs-on: linux-arm64
|
||||
runs-on: [linux-arm64]
|
||||
concurrency:
|
||||
group: docker-arm64-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Wait for Docker daemon
|
||||
run: while ! docker version; do sleep 1; done
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build ARM64
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,6 +3,7 @@ server/.env
|
||||
server/.env.production
|
||||
.env
|
||||
Caddyfile
|
||||
.env.hatchet
|
||||
server/exportdanswer
|
||||
.vercel
|
||||
.env*.local
|
||||
@@ -20,7 +21,6 @@ CLAUDE.local.md
|
||||
www/.env.development
|
||||
www/.env.production
|
||||
.playwright-mcp
|
||||
docs/pnpm-lock.yaml
|
||||
.secrets
|
||||
opencode.json
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ repos:
|
||||
- id: format
|
||||
name: run format
|
||||
language: system
|
||||
entry: bash -c 'source "$HOME/.nvm/nvm.sh" && cd www && pnpm format'
|
||||
entry: bash -c 'if [ -f "$HOME/.nvm/nvm.sh" ]; then source "$HOME/.nvm/nvm.sh"; fi; cd www && pnpm format'
|
||||
pass_filenames: false
|
||||
files: ^www/
|
||||
|
||||
|
||||
42
CHANGELOG.md
42
CHANGELOG.md
@@ -1,5 +1,47 @@
|
||||
# Changelog
|
||||
|
||||
## [0.38.1](https://github.com/GreyhavenHQ/reflector/compare/v0.38.0...v0.38.1) (2026-03-06)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* pin hatchet sdk version ([#903](https://github.com/GreyhavenHQ/reflector/issues/903)) ([504ca74](https://github.com/GreyhavenHQ/reflector/commit/504ca74184211eda9020d0b38ba7bd2b55d09991))
|
||||
|
||||
## [0.38.0](https://github.com/GreyhavenHQ/reflector/compare/v0.37.0...v0.38.0) (2026-03-06)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* 3-mode selfhosted refactoring (--gpu, --cpu, --hosted) + audio token auth fallback ([#896](https://github.com/GreyhavenHQ/reflector/issues/896)) ([a682846](https://github.com/GreyhavenHQ/reflector/commit/a6828466456407c808302e9eb8dc4b4f0614dd6f))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* improve hatchet workflow reliability ([#900](https://github.com/GreyhavenHQ/reflector/issues/900)) ([c155f66](https://github.com/GreyhavenHQ/reflector/commit/c155f669825e8e2a6e929821a1ef0bd94237dc11))
|
||||
|
||||
## [0.37.0](https://github.com/GreyhavenHQ/reflector/compare/v0.36.0...v0.37.0) (2026-03-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* enable daily co in selfhosted + only schedule tasks when necessary ([#883](https://github.com/GreyhavenHQ/reflector/issues/883)) ([045eae8](https://github.com/GreyhavenHQ/reflector/commit/045eae8ff2014a7b83061045e3c8cb25cce9d60a))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* aws storage construction ([#895](https://github.com/GreyhavenHQ/reflector/issues/895)) ([f5ec2d2](https://github.com/GreyhavenHQ/reflector/commit/f5ec2d28cfa2de9b2b4aeec81966737b740689c2))
|
||||
* remaining dependabot security issues ([#890](https://github.com/GreyhavenHQ/reflector/issues/890)) ([0931095](https://github.com/GreyhavenHQ/reflector/commit/0931095f49e61216e651025ce92be460e6a9df9e))
|
||||
* test selfhosted script ([#892](https://github.com/GreyhavenHQ/reflector/issues/892)) ([4d915e2](https://github.com/GreyhavenHQ/reflector/commit/4d915e2a9fe9f05f31cbd0018d9c2580daf7854f))
|
||||
* upgrade to nextjs 16 ([#888](https://github.com/GreyhavenHQ/reflector/issues/888)) ([f6cc032](https://github.com/GreyhavenHQ/reflector/commit/f6cc03286baf3e3a115afd3b22ae993ad7a4b7e3))
|
||||
|
||||
## [0.35.1](https://github.com/GreyhavenHQ/reflector/compare/v0.35.0...v0.35.1) (2026-02-25)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* enable sentry on frontend ([#876](https://github.com/GreyhavenHQ/reflector/issues/876)) ([bc6bb63](https://github.com/GreyhavenHQ/reflector/commit/bc6bb63c32dc84be5d3b00388618d53f04f64e35))
|
||||
* switch structured output to tool-call with reflection retry ([#879](https://github.com/GreyhavenHQ/reflector/issues/879)) ([5d54758](https://github.com/GreyhavenHQ/reflector/commit/5d547586ef0f54514d1d65aacca8e57869013a82))
|
||||
|
||||
## [0.35.0](https://github.com/Monadical-SAS/reflector/compare/v0.34.0...v0.35.0) (2026-02-23)
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
Reflector is an AI-powered audio transcription and meeting analysis platform with real-time processing capabilities. The system consists of:
|
||||
|
||||
- **Frontend**: Next.js 14 React application (`www/`) with Chakra UI, real-time WebSocket integration
|
||||
- **Frontend**: Next.js 16 React application (`www/`) with Chakra UI, real-time WebSocket integration
|
||||
- **Backend**: Python FastAPI server (`server/`) with async database operations and background processing
|
||||
- **Processing**: GPU-accelerated ML pipeline for transcription, diarization, summarization via Modal.com
|
||||
- **Infrastructure**: Redis, PostgreSQL/SQLite, Celery workers, WebRTC streaming
|
||||
|
||||
10
README.md
10
README.md
@@ -34,6 +34,8 @@ Reflector is an AI-powered audio transcription and meeting analysis platform tha
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<p align="center" style="font-size: 1.5em; font-weight: bold;">By <a href="https://greyhaven.co">Greyhaven</a></p>
|
||||
|
||||
## What is Reflector?
|
||||
|
||||
Reflector is a web application that utilizes local models to process audio content, providing:
|
||||
@@ -210,7 +212,13 @@ All new contributions should be made in a separate branch, and goes through a Pu
|
||||
|
||||
## Future Plans
|
||||
|
||||
- **Daily.co integration with multitrack processing**: Support for Daily.co live rooms with per-participant audio tracks for improved diarization and transcription quality.
|
||||
- **Multi-language support enhancement**: Default language selection per room/user, automatic language detection improvements, multi-language diarization, and RTL language UI support
|
||||
- **Jitsi integration**: Self-hosted video conferencing rooms with no external API keys, full control over video infrastructure, and enhanced privacy
|
||||
- **Calendar integration**: Google Calendar and Microsoft Outlook synchronization, automatic meeting room creation, and post-meeting transcript delivery
|
||||
- **Enhanced analytics**: Meeting insights dashboard, speaker participation metrics, topic trends over time, and team collaboration patterns
|
||||
- **Advanced AI features**: Real-time sentiment analysis, emotion detection, meeting quality scores, and automated coaching suggestions
|
||||
- **Integration ecosystem**: Slack/Teams notifications, CRM integration (Salesforce, HubSpot), project management tools (Jira, Asana), and knowledge bases (Notion, Confluence)
|
||||
- **Performance improvements**: WebAssembly for client-side processing, edge computing support, and network optimization
|
||||
|
||||
## Legacy Documentation
|
||||
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
# Self-hosted production Docker Compose — single file for everything.
|
||||
#
|
||||
# Usage: ./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
# or: docker compose -f docker-compose.selfhosted.yml --profile gpu [--profile ollama-gpu] [--profile garage] [--profile caddy] up -d
|
||||
# Usage: ./scripts/setup-selfhosted.sh <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--garage] [--caddy]
|
||||
# or: docker compose -f docker-compose.selfhosted.yml [--profile gpu] [--profile ollama-gpu] [--profile garage] [--profile caddy] up -d
|
||||
#
|
||||
# Specialized models (pick ONE — required):
|
||||
# --profile gpu NVIDIA GPU for transcription/diarization/translation
|
||||
# --profile cpu CPU-only for transcription/diarization/translation
|
||||
# ML processing modes (pick ONE — required):
|
||||
# --gpu NVIDIA GPU container for transcription/diarization/translation (profile: gpu)
|
||||
# --cpu In-process CPU processing on server/worker (no ML container needed)
|
||||
# --hosted Remote GPU service URL (no ML container needed)
|
||||
#
|
||||
# Local LLM (optional — for summarization/topics):
|
||||
# --profile ollama-gpu Local Ollama with NVIDIA GPU
|
||||
# --profile ollama-cpu Local Ollama on CPU only
|
||||
#
|
||||
# Daily.co multitrack processing (auto-detected from server/.env):
|
||||
# --profile dailyco Hatchet workflow engine + CPU/LLM workers
|
||||
#
|
||||
# Other optional services:
|
||||
# --profile garage Local S3-compatible storage (Garage)
|
||||
# --profile caddy Reverse proxy with auto-SSL
|
||||
@@ -32,7 +36,7 @@ services:
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:1250:1250"
|
||||
- "50000-50100:50000-50100/udp"
|
||||
- "51000-51100:51000-51100/udp"
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
@@ -42,18 +46,11 @@ services:
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: ""
|
||||
HATCHET_CLIENT_HOST_PORT: ""
|
||||
# Specialized models via gpu/cpu container (aliased as "transcription")
|
||||
TRANSCRIPT_BACKEND: modal
|
||||
TRANSCRIPT_URL: http://transcription:8000
|
||||
TRANSCRIPT_MODAL_API_KEY: selfhosted
|
||||
DIARIZATION_BACKEND: modal
|
||||
DIARIZATION_URL: http://transcription:8000
|
||||
TRANSLATION_BACKEND: modal
|
||||
TRANSLATE_URL: http://transcription:8000
|
||||
# ML backend config comes from env_file (server/.env), set per-mode by setup script
|
||||
# HF_TOKEN needed for in-process pyannote diarization (--cpu mode)
|
||||
HF_TOKEN: ${HF_TOKEN:-}
|
||||
# WebRTC: fixed UDP port range for ICE candidates (mapped above)
|
||||
WEBRTC_PORT_RANGE: "50000-50100"
|
||||
WEBRTC_PORT_RANGE: "51000-51100"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
@@ -76,15 +73,8 @@ services:
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: ""
|
||||
HATCHET_CLIENT_HOST_PORT: ""
|
||||
TRANSCRIPT_BACKEND: modal
|
||||
TRANSCRIPT_URL: http://transcription:8000
|
||||
TRANSCRIPT_MODAL_API_KEY: selfhosted
|
||||
DIARIZATION_BACKEND: modal
|
||||
DIARIZATION_URL: http://transcription:8000
|
||||
TRANSLATION_BACKEND: modal
|
||||
TRANSLATE_URL: http://transcription:8000
|
||||
# ML backend config comes from env_file (server/.env), set per-mode by setup script
|
||||
HF_TOKEN: ${HF_TOKEN:-}
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
@@ -147,12 +137,14 @@ services:
|
||||
postgres:
|
||||
image: postgres:17-alpine
|
||||
restart: unless-stopped
|
||||
command: ["postgres", "-c", "max_connections=200"]
|
||||
environment:
|
||||
POSTGRES_USER: reflector
|
||||
POSTGRES_PASSWORD: reflector
|
||||
POSTGRES_DB: reflector
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
- ./server/docker/init-hatchet-db.sql:/docker-entrypoint-initdb.d/init-hatchet-db.sql:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U reflector"]
|
||||
interval: 30s
|
||||
@@ -161,7 +153,10 @@ services:
|
||||
|
||||
# ===========================================================
|
||||
# Specialized model containers (transcription, diarization, translation)
|
||||
# Both gpu and cpu get alias "transcription" so server config never changes.
|
||||
# Only the gpu profile is activated by the setup script (--gpu mode).
|
||||
# The cpu service definition is kept for manual/standalone use but is
|
||||
# NOT activated by --cpu mode (which uses in-process local backends).
|
||||
# Both services get alias "transcription" so server config never changes.
|
||||
# ===========================================================
|
||||
|
||||
gpu:
|
||||
@@ -305,6 +300,87 @@ services:
|
||||
- web
|
||||
- server
|
||||
|
||||
# ===========================================================
|
||||
# Hatchet + Daily.co workers (optional — for Daily.co multitrack processing)
|
||||
# Auto-enabled when DAILY_API_KEY is configured in server/r
|
||||
# ===========================================================
|
||||
|
||||
hatchet:
|
||||
image: ghcr.io/hatchet-dev/hatchet/hatchet-lite:latest
|
||||
profiles: [dailyco]
|
||||
restart: on-failure
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "8888:8888"
|
||||
- "7078:7077"
|
||||
env_file:
|
||||
- ./.env.hatchet
|
||||
environment:
|
||||
DATABASE_URL: "postgresql://reflector:reflector@postgres:5432/hatchet?sslmode=disable&connect_timeout=30"
|
||||
SERVER_AUTH_COOKIE_INSECURE: "t"
|
||||
SERVER_GRPC_BIND_ADDRESS: "0.0.0.0"
|
||||
SERVER_GRPC_INSECURE: "t"
|
||||
SERVER_GRPC_BROADCAST_ADDRESS: hatchet:7077
|
||||
SERVER_GRPC_PORT: "7077"
|
||||
SERVER_AUTH_SET_EMAIL_VERIFIED: "t"
|
||||
SERVER_INTERNAL_CLIENT_INTERNAL_GRPC_BROADCAST_ADDRESS: hatchet:7077
|
||||
volumes:
|
||||
- hatchet_config:/config
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8888/api/live"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
|
||||
hatchet-worker-cpu:
|
||||
build:
|
||||
context: ./server
|
||||
dockerfile: Dockerfile
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
profiles: [dailyco]
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: hatchet-worker-cpu
|
||||
DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: http://hatchet:8888
|
||||
HATCHET_CLIENT_HOST_PORT: hatchet:7077
|
||||
depends_on:
|
||||
hatchet:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
|
||||
hatchet-worker-llm:
|
||||
build:
|
||||
context: ./server
|
||||
dockerfile: Dockerfile
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
profiles: [dailyco]
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: hatchet-worker-llm
|
||||
DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: http://hatchet:8888
|
||||
HATCHET_CLIENT_HOST_PORT: hatchet:7077
|
||||
depends_on:
|
||||
hatchet:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
redis_data:
|
||||
@@ -315,6 +391,7 @@ volumes:
|
||||
ollama_data:
|
||||
caddy_data:
|
||||
caddy_config:
|
||||
hatchet_config:
|
||||
|
||||
networks:
|
||||
default:
|
||||
|
||||
@@ -83,25 +83,22 @@ services:
|
||||
ports:
|
||||
- 6379:6379
|
||||
web:
|
||||
image: node:22-alpine
|
||||
build:
|
||||
context: ./www
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- "3000:3000"
|
||||
command: sh -c "corepack enable && pnpm install && pnpm dev"
|
||||
restart: unless-stopped
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ./www:/app/
|
||||
- /app/node_modules
|
||||
- next_cache:/app/.next
|
||||
env_file:
|
||||
- ./www/.env.local
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
- SERVER_API_URL=http://host.docker.internal:1250
|
||||
NODE_ENV: development
|
||||
SERVER_API_URL: http://host.docker.internal:1250
|
||||
KV_URL: redis://redis:6379
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
depends_on:
|
||||
- server
|
||||
redis:
|
||||
condition: service_started
|
||||
|
||||
postgres:
|
||||
image: postgres:17
|
||||
|
||||
7
docs/.dockerignore
Normal file
7
docs/.dockerignore
Normal file
@@ -0,0 +1,7 @@
|
||||
node_modules
|
||||
build
|
||||
.git
|
||||
.gitignore
|
||||
*.log
|
||||
.DS_Store
|
||||
.env*
|
||||
@@ -1,14 +1,17 @@
|
||||
FROM node:18-alpine AS builder
|
||||
FROM node:20-alpine AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Install curl for fetching OpenAPI spec
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json ./
|
||||
# Enable pnpm
|
||||
RUN corepack enable && corepack prepare pnpm@latest --activate
|
||||
|
||||
# Copy package files and lockfile
|
||||
COPY package.json pnpm-lock.yaml* ./
|
||||
|
||||
# Install dependencies
|
||||
RUN npm ci
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
# Copy source
|
||||
COPY . .
|
||||
@@ -21,7 +24,7 @@ RUN mkdir -p ./static && curl -sf "${OPENAPI_URL}" -o ./static/openapi.json || e
|
||||
RUN sed -i "s/onBrokenLinks: 'throw'/onBrokenLinks: 'warn'/g" docusaurus.config.ts
|
||||
|
||||
# Build static site (skip prebuild hook by calling docusaurus directly)
|
||||
RUN npx docusaurus build
|
||||
RUN pnpm exec docusaurus build
|
||||
|
||||
# Production image
|
||||
FROM nginx:alpine
|
||||
|
||||
@@ -5,13 +5,13 @@ This website is built using [Docusaurus](https://docusaurus.io/), a modern stati
|
||||
### Installation
|
||||
|
||||
```
|
||||
$ yarn
|
||||
$ pnpm install
|
||||
```
|
||||
|
||||
### Local Development
|
||||
|
||||
```
|
||||
$ yarn start
|
||||
$ pnpm start
|
||||
```
|
||||
|
||||
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
|
||||
@@ -19,7 +19,7 @@ This command starts a local development server and opens up a browser window. Mo
|
||||
### Build
|
||||
|
||||
```
|
||||
$ yarn build
|
||||
$ pnpm build
|
||||
```
|
||||
|
||||
This command generates static content into the `build` directory and can be served using any static contents hosting service.
|
||||
@@ -29,13 +29,13 @@ This command generates static content into the `build` directory and can be serv
|
||||
Using SSH:
|
||||
|
||||
```
|
||||
$ USE_SSH=true yarn deploy
|
||||
$ USE_SSH=true pnpm deploy
|
||||
```
|
||||
|
||||
Not using SSH:
|
||||
|
||||
```
|
||||
$ GIT_USER=<Your GitHub username> yarn deploy
|
||||
$ GIT_USER=<Your GitHub username> pnpm deploy
|
||||
```
|
||||
|
||||
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
|
||||
|
||||
@@ -254,15 +254,15 @@ Reflector can run completely offline:
|
||||
Control where each step happens:
|
||||
|
||||
```yaml
|
||||
# All local processing
|
||||
TRANSCRIPT_BACKEND=local
|
||||
DIARIZATION_BACKEND=local
|
||||
TRANSLATION_BACKEND=local
|
||||
# All in-process processing
|
||||
TRANSCRIPT_BACKEND=whisper
|
||||
DIARIZATION_BACKEND=pyannote
|
||||
TRANSLATION_BACKEND=marian
|
||||
|
||||
# Hybrid approach
|
||||
TRANSCRIPT_BACKEND=modal # Fast GPU processing
|
||||
DIARIZATION_BACKEND=local # Sensitive speaker data
|
||||
TRANSLATION_BACKEND=modal # Non-sensitive translation
|
||||
TRANSCRIPT_BACKEND=modal # Fast GPU processing
|
||||
DIARIZATION_BACKEND=pyannote # Sensitive speaker data
|
||||
TRANSLATION_BACKEND=modal # Non-sensitive translation
|
||||
```
|
||||
|
||||
### Storage Options
|
||||
|
||||
@@ -11,7 +11,7 @@ Reflector is built as a modern, scalable, microservices-based application design
|
||||
|
||||
### Frontend Application
|
||||
|
||||
The user interface is built with **Next.js 15** using the App Router pattern, providing:
|
||||
The user interface is built with **Next.js 16** using the App Router pattern, providing:
|
||||
|
||||
- Server-side rendering for optimal performance
|
||||
- Real-time WebSocket connections for live transcription
|
||||
|
||||
@@ -36,14 +36,15 @@ This creates `docs/static/openapi.json` (should be ~70KB) which will be copied d
|
||||
The Dockerfile is already in `docs/Dockerfile`:
|
||||
|
||||
```dockerfile
|
||||
FROM node:18-alpine AS builder
|
||||
FROM node:20-alpine AS builder
|
||||
WORKDIR /app
|
||||
|
||||
# Copy package files
|
||||
COPY package*.json ./
|
||||
# Enable pnpm and copy package files + lockfile
|
||||
RUN corepack enable && corepack prepare pnpm@latest --activate
|
||||
COPY package.json pnpm-lock.yaml* ./
|
||||
|
||||
# Inshall dependencies
|
||||
RUN npm ci
|
||||
# Install dependencies
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
# Copy source (includes static/openapi.json if pre-fetched)
|
||||
COPY . .
|
||||
@@ -52,7 +53,7 @@ COPY . .
|
||||
RUN sed -i "s/onBrokenLinks: 'throw'/onBrokenLinks: 'warn'/g" docusaurus.config.ts
|
||||
|
||||
# Build static site
|
||||
RUN npx docusaurus build
|
||||
RUN pnpm exec docusaurus build
|
||||
|
||||
FROM nginx:alpine
|
||||
COPY --from=builder /app/build /usr/share/nginx/html
|
||||
|
||||
@@ -46,7 +46,7 @@ Reflector consists of three main components:
|
||||
|
||||
Ready to deploy Reflector? Head over to our [Installation Guide](./installation/overview) to set up your own instance.
|
||||
|
||||
For a quick overview of how Reflector processes audio, check out our [Pipeline Documentation](./pipelines/overview).
|
||||
For a quick overview of how Reflector processes audio, check out our [Pipeline Documentation](./concepts/pipeline).
|
||||
|
||||
## Open Source
|
||||
|
||||
|
||||
@@ -124,11 +124,11 @@ const config: Config = {
|
||||
items: [
|
||||
{
|
||||
label: 'Architecture',
|
||||
to: '/docs/reference/architecture/overview',
|
||||
to: '/docs/concepts/overview',
|
||||
},
|
||||
{
|
||||
label: 'Pipelines',
|
||||
to: '/docs/pipelines/overview',
|
||||
to: '/docs/concepts/pipeline',
|
||||
},
|
||||
{
|
||||
label: 'Roadmap',
|
||||
|
||||
23526
docs/package-lock.json
generated
23526
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -14,26 +14,26 @@
|
||||
"write-heading-ids": "docusaurus write-heading-ids",
|
||||
"typecheck": "tsc",
|
||||
"fetch-openapi": "./scripts/fetch-openapi.sh",
|
||||
"gen-api-docs": "npm run fetch-openapi && docusaurus gen-api-docs reflector",
|
||||
"prebuild": "npm run fetch-openapi"
|
||||
"gen-api-docs": "pnpm run fetch-openapi && docusaurus gen-api-docs reflector",
|
||||
"prebuild": "pnpm run fetch-openapi"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "3.6.3",
|
||||
"@docusaurus/preset-classic": "3.6.3",
|
||||
"@mdx-js/react": "^3.0.0",
|
||||
"clsx": "^2.0.0",
|
||||
"docusaurus-plugin-openapi-docs": "^4.5.1",
|
||||
"docusaurus-theme-openapi-docs": "^4.5.1",
|
||||
"@docusaurus/theme-mermaid": "3.6.3",
|
||||
"prism-react-renderer": "^2.3.0",
|
||||
"react": "^18.0.0",
|
||||
"react-dom": "^18.0.0"
|
||||
"@docusaurus/core": "3.9.2",
|
||||
"@docusaurus/preset-classic": "3.9.2",
|
||||
"@docusaurus/theme-mermaid": "3.9.2",
|
||||
"@mdx-js/react": "^3.1.1",
|
||||
"clsx": "^2.1.1",
|
||||
"docusaurus-plugin-openapi-docs": "^4.7.1",
|
||||
"docusaurus-theme-openapi-docs": "^4.7.1",
|
||||
"prism-react-renderer": "^2.4.1",
|
||||
"react": "^19.2.4",
|
||||
"react-dom": "^19.2.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@docusaurus/module-type-aliases": "3.6.3",
|
||||
"@docusaurus/tsconfig": "3.6.3",
|
||||
"@docusaurus/types": "3.6.3",
|
||||
"typescript": "~5.6.2"
|
||||
"@docusaurus/module-type-aliases": "3.9.2",
|
||||
"@docusaurus/tsconfig": "3.9.2",
|
||||
"@docusaurus/types": "3.9.2",
|
||||
"typescript": "~5.9.3"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
@@ -49,5 +49,16 @@
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0"
|
||||
},
|
||||
"pnpm": {
|
||||
"overrides": {
|
||||
"minimatch@<3.1.4": "3.1.5",
|
||||
"minimatch@>=5.0.0 <5.1.8": "5.1.8",
|
||||
"minimatch@>=9.0.0 <9.0.7": "9.0.7",
|
||||
"lodash@<4.17.23": "4.17.23",
|
||||
"js-yaml@<4.1.1": "4.1.1",
|
||||
"gray-matter": "github:jonschlinkert/gray-matter#234163e",
|
||||
"serialize-javascript": "7.0.4"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
13976
docs/pnpm-lock.yaml
generated
Normal file
13976
docs/pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
4151
docs/static/openapi.json
vendored
4151
docs/static/openapi.json
vendored
File diff suppressed because it is too large
Load Diff
@@ -39,7 +39,6 @@ Deploy Reflector on a single server with everything running in Docker. Transcrip
|
||||
- **Any S3-compatible provider** (Backblaze B2, Cloudflare R2, DigitalOcean Spaces, etc.): same fields + custom endpoint URL
|
||||
|
||||
**Optional add-ons (configure after initial setup):**
|
||||
- **Daily.co** (live meeting rooms): Requires a Daily.co account (https://www.daily.co/), API key, subdomain, and an AWS S3 bucket + IAM Role for recording storage. See [Enabling Daily.co Live Rooms](#enabling-dailyco-live-rooms) below.
|
||||
- **Authentik** (user authentication): Requires an Authentik instance with an OAuth2/OIDC application configured for Reflector. See [Enabling Authentication](#enabling-authentication-authentik) below.
|
||||
|
||||
## Quick Start
|
||||
@@ -54,9 +53,12 @@ cd reflector
|
||||
# Same but without a domain (self-signed cert, access via IP):
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
|
||||
# CPU-only (same, but slower):
|
||||
# CPU-only (in-process ML, no GPU container):
|
||||
./scripts/setup-selfhosted.sh --cpu --ollama-cpu --garage --caddy
|
||||
|
||||
# Remote GPU service (your own hosted GPU, no local ML container):
|
||||
./scripts/setup-selfhosted.sh --hosted --garage --caddy
|
||||
|
||||
# With password authentication (single admin user):
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --password mysecretpass
|
||||
|
||||
@@ -66,14 +68,15 @@ cd reflector
|
||||
|
||||
That's it. The script generates env files, secrets, starts all containers, waits for health checks, and prints the URL.
|
||||
|
||||
## Specialized Models (Required)
|
||||
## ML Processing Modes (Required)
|
||||
|
||||
Pick `--gpu` or `--cpu`. This determines how **transcription, diarization, and translation** run:
|
||||
Pick `--gpu`, `--cpu`, or `--hosted`. This determines how **transcription, diarization, translation, and audio padding** run:
|
||||
|
||||
| Flag | What it does | Requires |
|
||||
|------|-------------|----------|
|
||||
| `--gpu` | NVIDIA GPU acceleration for ML models | NVIDIA GPU + drivers + `nvidia-container-toolkit` |
|
||||
| `--cpu` | CPU-only (slower but works without GPU) | 8+ cores, 32GB+ RAM recommended |
|
||||
| `--gpu` | NVIDIA GPU container for ML models | NVIDIA GPU + drivers + `nvidia-container-toolkit` |
|
||||
| `--cpu` | In-process CPU processing on server/worker (no ML container) | 8+ cores, 16GB+ RAM (32GB recommended for large files) |
|
||||
| `--hosted` | Remote GPU service URL (no local ML container) | A running GPU service instance (e.g. `gpu/self_hosted/`) |
|
||||
|
||||
## Local LLM (Optional)
|
||||
|
||||
@@ -86,6 +89,21 @@ Optionally add `--ollama-gpu` or `--ollama-cpu` for a **local Ollama instance**
|
||||
| `--llm-model MODEL` | Choose which Ollama model to download (default: `qwen2.5:14b`) | `--ollama-gpu` or `--ollama-cpu` |
|
||||
| *(omitted)* | User configures external LLM (OpenAI, Anthropic, etc.) | LLM API key |
|
||||
|
||||
### macOS / Apple Silicon
|
||||
|
||||
`--ollama-gpu` requires an NVIDIA GPU and **does not work on macOS**. Docker on macOS cannot access Apple GPU acceleration, so the containerized Ollama will run on CPU only regardless of the flag used.
|
||||
|
||||
For the best performance on Mac, we recommend running Ollama **natively outside Docker** (install from https://ollama.com) — this gives Ollama direct access to Apple Metal GPU acceleration. Then omit `--ollama-gpu`/`--ollama-cpu` from the setup script and point the backend to your local Ollama instance:
|
||||
|
||||
```env
|
||||
# In server/.env
|
||||
LLM_URL=http://host.docker.internal:11434/v1
|
||||
LLM_MODEL=qwen2.5:14b
|
||||
LLM_API_KEY=not-needed
|
||||
```
|
||||
|
||||
`--ollama-cpu` does work on macOS but will be significantly slower than a native Ollama install with Metal acceleration.
|
||||
|
||||
### Choosing an Ollama model
|
||||
|
||||
The default model is `qwen2.5:14b` (~9GB download, good multilingual support and summary quality). Override with `--llm-model`:
|
||||
@@ -116,9 +134,11 @@ Browse all available models at https://ollama.com/library.
|
||||
|
||||
- **`--gpu --ollama-gpu`**: Best for servers with NVIDIA GPU. Fully self-contained, no external API keys needed.
|
||||
- **`--cpu --ollama-cpu`**: No GPU available but want everything self-contained. Slower but works.
|
||||
- **`--hosted --ollama-cpu`**: Remote GPU for ML, local CPU for LLM. Great when you have a separate GPU server.
|
||||
- **`--gpu --ollama-cpu`**: GPU for transcription, CPU for LLM. Saves GPU VRAM for ML models.
|
||||
- **`--gpu`**: Have NVIDIA GPU but prefer a cloud LLM (faster/better summaries with GPT-4, Claude, etc.).
|
||||
- **`--cpu`**: No GPU, prefer cloud LLM. Slowest transcription but best summary quality.
|
||||
- **`--hosted`**: Remote GPU, cloud LLM. No local ML at all.
|
||||
|
||||
## Other Optional Flags
|
||||
|
||||
@@ -146,8 +166,9 @@ Without `--caddy` or `--domain`, no ports are exposed. Point your own reverse pr
|
||||
4. **Generate `www/.env`** — Auto-detects server IP, sets URLs
|
||||
5. **Storage setup** — Either initializes Garage (bucket, keys, permissions) or prompts for external S3 credentials
|
||||
6. **Caddyfile** — Generates domain-specific (Let's Encrypt) or IP-specific (self-signed) configuration
|
||||
7. **Build & start** — Always builds GPU/CPU model image from source. With `--build`, also builds backend and frontend from source; otherwise pulls prebuilt images from the registry
|
||||
8. **Health checks** — Waits for each service, pulls Ollama model if needed, warns about missing LLM config
|
||||
7. **Build & start** — For `--gpu`, builds the GPU model image from source. For `--cpu` and `--hosted`, no ML container is built. With `--build`, also builds backend and frontend from source; otherwise pulls prebuilt images from the registry
|
||||
8. **Auto-detects video platforms** — If `DAILY_API_KEY` is found in `server/.env`, generates `.env.hatchet` (dashboard URL/cookie config), starts Hatchet workflow engine, and generates an API token. If any video platform is configured, enables the Rooms feature
|
||||
9. **Health checks** — Waits for each service, pulls Ollama model if needed, warns about missing LLM config
|
||||
|
||||
> For a deeper dive into each step, see [How the Self-Hosted Setup Works](selfhosted-architecture.md).
|
||||
|
||||
@@ -166,12 +187,23 @@ Without `--caddy` or `--domain`, no ports are exposed. Point your own reverse pr
|
||||
| `ADMIN_PASSWORD_HASH` | PBKDF2 hash for password auth | *(unset)* |
|
||||
| `WEBRTC_HOST` | IP advertised in WebRTC ICE candidates | Auto-detected (server IP) |
|
||||
| `TRANSCRIPT_URL` | Specialized model endpoint | `http://transcription:8000` |
|
||||
| `PADDING_BACKEND` | Audio padding backend (`pyav` or `modal`) | `modal` (selfhosted), `pyav` (default) |
|
||||
| `PADDING_URL` | Audio padding endpoint (when `PADDING_BACKEND=modal`) | `http://transcription:8000` |
|
||||
| `LLM_URL` | OpenAI-compatible LLM endpoint | Auto-set for Ollama modes |
|
||||
| `LLM_API_KEY` | LLM API key | `not-needed` for Ollama |
|
||||
| `LLM_MODEL` | LLM model name | `qwen2.5:14b` for Ollama (override with `--llm-model`) |
|
||||
| `CELERY_BEAT_POLL_INTERVAL` | Override all worker polling intervals (seconds). `0` = use individual defaults | `300` (selfhosted), `0` (other) |
|
||||
| `TRANSCRIPT_STORAGE_BACKEND` | Storage backend | `aws` |
|
||||
| `TRANSCRIPT_STORAGE_AWS_*` | S3 credentials | Auto-set for Garage |
|
||||
| `DAILY_API_KEY` | Daily.co API key (enables live rooms) | *(unset)* |
|
||||
| `DAILY_SUBDOMAIN` | Daily.co subdomain | *(unset)* |
|
||||
| `DAILYCO_STORAGE_AWS_ACCESS_KEY_ID` | AWS access key for reading Daily's recording bucket | *(unset)* |
|
||||
| `DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY` | AWS secret key for reading Daily's recording bucket | *(unset)* |
|
||||
| `HATCHET_CLIENT_TOKEN` | Hatchet API token (auto-generated) | *(unset)* |
|
||||
| `HATCHET_CLIENT_SERVER_URL` | Hatchet server URL | Auto-set when Daily.co configured |
|
||||
| `HATCHET_CLIENT_HOST_PORT` | Hatchet gRPC address | Auto-set when Daily.co configured |
|
||||
| `TRANSCRIPT_FILE_TIMEOUT` | HTTP timeout (seconds) for file transcription requests | `600` (`3600` in CPU mode) |
|
||||
| `DIARIZATION_FILE_TIMEOUT` | HTTP timeout (seconds) for file diarization requests | `600` (`3600` in CPU mode) |
|
||||
|
||||
### Frontend Environment (`www/.env`)
|
||||
|
||||
@@ -183,6 +215,7 @@ Without `--caddy` or `--domain`, no ports are exposed. Point your own reverse pr
|
||||
| `NEXTAUTH_SECRET` | Auth secret | Auto-generated |
|
||||
| `FEATURE_REQUIRE_LOGIN` | Require authentication | `false` |
|
||||
| `AUTH_PROVIDER` | Auth provider (`authentik` or `credentials`) | *(unset)* |
|
||||
| `FEATURE_ROOMS` | Enable meeting rooms UI | Auto-set when video platform configured |
|
||||
|
||||
## Storage Options
|
||||
|
||||
@@ -341,20 +374,84 @@ By default, authentication is disabled (`AUTH_BACKEND=none`, `FEATURE_REQUIRE_LO
|
||||
|
||||
## Enabling Daily.co Live Rooms
|
||||
|
||||
Daily.co enables real-time meeting rooms with automatic recording and transcription.
|
||||
Daily.co enables real-time meeting rooms with automatic recording and per-participant
|
||||
audio tracks for improved diarization. When configured, the setup script automatically
|
||||
starts the Hatchet workflow engine for multitrack recording processing.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Daily.co account** — Sign up at https://www.daily.co/
|
||||
- **API key** — From Daily.co Dashboard → Developers → API Keys
|
||||
- **Subdomain** — The `yourname` part of `yourname.daily.co`
|
||||
- **AWS S3 bucket** — For Daily.co to store recordings. See [Daily.co recording storage docs](https://docs.daily.co/guides/products/live-streaming-recording/storing-recordings-in-a-custom-s3-bucket)
|
||||
- **IAM role ARN** — An AWS IAM role that Daily.co assumes to write recordings to your bucket
|
||||
|
||||
### Setup
|
||||
|
||||
1. Configure Daily.co env vars in `server/.env` **before** running the setup script:
|
||||
|
||||
1. Create a [Daily.co](https://www.daily.co/) account
|
||||
2. Add to `server/.env`:
|
||||
```env
|
||||
DEFAULT_VIDEO_PLATFORM=daily
|
||||
DAILY_API_KEY=your-daily-api-key
|
||||
DAILY_SUBDOMAIN=your-subdomain
|
||||
DAILY_WEBHOOK_SECRET=your-webhook-secret
|
||||
DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
||||
DEFAULT_VIDEO_PLATFORM=daily
|
||||
DAILYCO_STORAGE_AWS_BUCKET_NAME=your-recordings-bucket
|
||||
DAILYCO_STORAGE_AWS_REGION=us-east-1
|
||||
DAILYCO_STORAGE_AWS_ROLE_ARN=arn:aws:iam::role/DailyCoAccess
|
||||
DAILYCO_STORAGE_AWS_ROLE_ARN=arn:aws:iam::123456789:role/DailyCoAccess
|
||||
# Worker credentials for reading/deleting recordings from Daily's S3 bucket.
|
||||
# Required when transcript storage is separate from Daily's bucket
|
||||
# (e.g., selfhosted with Garage or a different S3 account).
|
||||
DAILYCO_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
|
||||
DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
|
||||
```
|
||||
3. Restart the server: `docker compose -f docker-compose.selfhosted.yml restart server worker`
|
||||
|
||||
> **Important:** The `DAILYCO_STORAGE_AWS_ACCESS_KEY_ID` and `SECRET_ACCESS_KEY` are AWS IAM
|
||||
> credentials that allow the Hatchet workers to **read and delete** recording files from Daily's
|
||||
> S3 bucket. These are separate from the `ROLE_ARN` (which Daily's API uses to *write* recordings).
|
||||
> Without these keys, multitrack processing will fail with 404 errors when transcript storage
|
||||
> (e.g., Garage) uses different credentials than the Daily recording bucket.
|
||||
|
||||
2. Run the setup script as normal:
|
||||
|
||||
```bash
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
```
|
||||
|
||||
The script detects `DAILY_API_KEY` and automatically:
|
||||
- Starts the Hatchet workflow engine (`hatchet` container)
|
||||
- Starts Hatchet CPU and LLM workers (`hatchet-worker-cpu`, `hatchet-worker-llm`)
|
||||
- Generates a `HATCHET_CLIENT_TOKEN` and saves it to `server/.env`
|
||||
- Sets `HATCHET_CLIENT_SERVER_URL` and `HATCHET_CLIENT_HOST_PORT`
|
||||
- Enables `FEATURE_ROOMS=true` in `www/.env`
|
||||
- Registers Daily.co beat tasks (recording polling, presence reconciliation)
|
||||
|
||||
3. (Optional) For faster recording discovery, configure a Daily.co webhook:
|
||||
- In the Daily.co dashboard, add a webhook pointing to `https://your-domain/v1/daily/webhook`
|
||||
- Set `DAILY_WEBHOOK_SECRET` in `server/.env` (the signing secret from Daily.co)
|
||||
- Without webhooks, the system polls the Daily.co API every 15 seconds
|
||||
|
||||
### What Gets Started
|
||||
|
||||
| Service | Purpose |
|
||||
|---------|---------|
|
||||
| `hatchet` | Workflow orchestration engine (manages multitrack processing pipelines) |
|
||||
| `hatchet-worker-cpu` | CPU-heavy audio tasks (track mixdown, waveform generation) |
|
||||
| `hatchet-worker-llm` | Transcription, LLM inference (summaries, topics, titles), orchestration |
|
||||
|
||||
### Hatchet Dashboard
|
||||
|
||||
The Hatchet workflow engine includes a web dashboard for monitoring workflow runs and debugging. The setup script auto-generates `.env.hatchet` at the project root with the dashboard URL and cookie domain configuration. This file is git-ignored.
|
||||
|
||||
- **With Caddy**: Accessible at `https://your-domain:8888` (TLS via Caddy)
|
||||
- **Without Caddy**: Accessible at `http://your-ip:8888` (direct port mapping)
|
||||
|
||||
### Conditional Beat Tasks
|
||||
|
||||
Beat tasks are registered based on which services are configured:
|
||||
|
||||
- **Whereby tasks** (only if `WHEREBY_API_KEY` or `AWS_PROCESS_RECORDING_QUEUE_URL`): `process_messages`, `reprocess_failed_recordings`
|
||||
- **Daily.co tasks** (only if `DAILY_API_KEY`): `poll_daily_recordings`, `trigger_daily_reconciliation`, `reprocess_failed_daily_recordings`
|
||||
- **Platform tasks** (if any video platform configured): `process_meetings`, `sync_all_ics_calendars`, `create_upcoming_meetings`
|
||||
- **Always registered**: `cleanup_old_public_data` (if `PUBLIC_MODE`), `healthcheck_ping` (if `HEALTHCHECK_URL`)
|
||||
|
||||
## Enabling Real Domain with Let's Encrypt
|
||||
|
||||
@@ -449,6 +546,15 @@ docker compose -f docker-compose.selfhosted.yml logs server --tail 50
|
||||
For self-signed certs, your browser will warn. Click Advanced > Proceed.
|
||||
For Let's Encrypt, ensure ports 80/443 are open and DNS is pointed correctly.
|
||||
|
||||
### File processing timeout on CPU
|
||||
CPU transcription and diarization are significantly slower than GPU. A 20-minute audio file can take 20-40 minutes to process on CPU. The setup script automatically sets `TRANSCRIPT_FILE_TIMEOUT=3600` and `DIARIZATION_FILE_TIMEOUT=3600` (1 hour) for `--cpu` mode. If you still hit timeouts with very long files, increase these values in `server/.env`:
|
||||
```bash
|
||||
# Increase to 2 hours for files over 1 hour
|
||||
TRANSCRIPT_FILE_TIMEOUT=7200
|
||||
DIARIZATION_FILE_TIMEOUT=7200
|
||||
```
|
||||
Then restart the worker: `docker compose -f docker-compose.selfhosted.yml restart worker`
|
||||
|
||||
### Summaries/topics not generating
|
||||
Check LLM configuration:
|
||||
```bash
|
||||
@@ -504,16 +610,29 @@ The setup script is idempotent — it won't overwrite existing secrets or env va
|
||||
│ │ │
|
||||
v v v
|
||||
┌───────────┐ ┌─────────┐ ┌─────────┐
|
||||
│transcription│ │postgres │ │ redis │
|
||||
│(gpu/cpu) │ │ :5432 │ │ :6379 │
|
||||
│ :8000 │ └─────────┘ └─────────┘
|
||||
└───────────┘
|
||||
│ ML models │ │postgres │ │ redis │
|
||||
│ (varies) │ │ :5432 │ │ :6379 │
|
||||
└───────────┘ └─────────┘ └─────────┘
|
||||
│
|
||||
┌─────┴─────┐ ┌─────────┐
|
||||
│ ollama │ │ garage │
|
||||
│ (optional)│ │(optional│
|
||||
│ :11435 │ │ S3) │
|
||||
└───────────┘ └─────────┘
|
||||
|
||||
┌───────────────────────────────────┐
|
||||
│ Hatchet (optional — Daily.co) │
|
||||
│ ┌─────────┐ ┌───────────────┐ │
|
||||
│ │ hatchet │ │ hatchet-worker│ │
|
||||
│ │ :8888 │──│ -cpu / -llm │ │
|
||||
│ └─────────┘ └───────────────┘ │
|
||||
└───────────────────────────────────┘
|
||||
|
||||
ML models box varies by mode:
|
||||
--gpu: Local GPU container (transcription:8000)
|
||||
--cpu: In-process on server/worker (no container)
|
||||
--hosted: Remote GPU service (user URL)
|
||||
```
|
||||
|
||||
All services communicate over Docker's internal network. Only Caddy (if enabled) exposes ports to the internet.
|
||||
All services communicate over Docker's internal network. Only Caddy (if enabled) exposes ports to the internet. Hatchet services are only started when `DAILY_API_KEY` is configured.
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ from contextlib import asynccontextmanager
|
||||
from fastapi import FastAPI
|
||||
|
||||
from .routers.diarization import router as diarization_router
|
||||
from .routers.padding import router as padding_router
|
||||
from .routers.transcription import router as transcription_router
|
||||
from .routers.translation import router as translation_router
|
||||
from .services.transcriber import WhisperService
|
||||
@@ -27,4 +28,5 @@ def create_app() -> FastAPI:
|
||||
app.include_router(transcription_router)
|
||||
app.include_router(translation_router)
|
||||
app.include_router(diarization_router)
|
||||
app.include_router(padding_router)
|
||||
return app
|
||||
|
||||
199
gpu/self_hosted/app/routers/padding.py
Normal file
199
gpu/self_hosted/app/routers/padding.py
Normal file
@@ -0,0 +1,199 @@
|
||||
"""
|
||||
Audio padding endpoint for selfhosted GPU service.
|
||||
|
||||
CPU-intensive audio padding service for adding silence to audio tracks.
|
||||
Uses PyAV filter graph (adelay) for precise track synchronization.
|
||||
|
||||
IMPORTANT: This padding logic is duplicated from server/reflector/utils/audio_padding.py
|
||||
for deployment isolation (self_hosted can't import from server/reflector/). If you modify
|
||||
the PyAV filter graph or padding algorithm, you MUST update both:
|
||||
- gpu/self_hosted/app/routers/padding.py (this file)
|
||||
- server/reflector/utils/audio_padding.py
|
||||
|
||||
Constants duplicated from server/reflector/utils/audio_constants.py for same reason.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import tempfile
|
||||
from fractions import Fraction
|
||||
|
||||
import av
|
||||
import requests
|
||||
from av.audio.resampler import AudioResampler
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
from ..auth import apikey_auth
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(tags=["padding"])
|
||||
|
||||
# ref B0F71CE8-FC59-4AA5-8414-DAFB836DB711
|
||||
OPUS_STANDARD_SAMPLE_RATE = 48000
|
||||
OPUS_DEFAULT_BIT_RATE = 128000
|
||||
|
||||
S3_TIMEOUT = 60
|
||||
|
||||
|
||||
class PaddingRequest(BaseModel):
|
||||
track_url: str
|
||||
output_url: str
|
||||
start_time_seconds: float
|
||||
track_index: int
|
||||
|
||||
|
||||
class PaddingResponse(BaseModel):
|
||||
size: int
|
||||
cancelled: bool = False
|
||||
|
||||
|
||||
@router.post("/pad", dependencies=[Depends(apikey_auth)], response_model=PaddingResponse)
|
||||
def pad_track(req: PaddingRequest):
|
||||
"""Pad audio track with silence using PyAV adelay filter graph."""
|
||||
if not req.track_url:
|
||||
raise HTTPException(status_code=400, detail="track_url cannot be empty")
|
||||
if not req.output_url:
|
||||
raise HTTPException(status_code=400, detail="output_url cannot be empty")
|
||||
if req.start_time_seconds <= 0:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"start_time_seconds must be positive, got {req.start_time_seconds}",
|
||||
)
|
||||
if req.start_time_seconds > 18000:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="start_time_seconds exceeds maximum 18000s (5 hours)",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Padding request: track %d, delay=%.3fs", req.track_index, req.start_time_seconds
|
||||
)
|
||||
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
input_path = None
|
||||
output_path = None
|
||||
|
||||
try:
|
||||
# Download source audio
|
||||
logger.info("Downloading track for padding")
|
||||
response = requests.get(req.track_url, stream=True, timeout=S3_TIMEOUT)
|
||||
response.raise_for_status()
|
||||
|
||||
input_path = os.path.join(temp_dir, "track.webm")
|
||||
total_bytes = 0
|
||||
with open(input_path, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
total_bytes += len(chunk)
|
||||
logger.info("Track downloaded: %d bytes", total_bytes)
|
||||
|
||||
# Apply padding using PyAV
|
||||
output_path = os.path.join(temp_dir, "padded.webm")
|
||||
delay_ms = math.floor(req.start_time_seconds * 1000)
|
||||
logger.info("Padding track %d with %dms delay using PyAV", req.track_index, delay_ms)
|
||||
|
||||
in_container = av.open(input_path)
|
||||
in_stream = next((s for s in in_container.streams if s.type == "audio"), None)
|
||||
if in_stream is None:
|
||||
in_container.close()
|
||||
raise HTTPException(status_code=400, detail="No audio stream in input")
|
||||
|
||||
with av.open(output_path, "w", format="webm") as out_container:
|
||||
out_stream = out_container.add_stream("libopus", rate=OPUS_STANDARD_SAMPLE_RATE)
|
||||
out_stream.bit_rate = OPUS_DEFAULT_BIT_RATE
|
||||
graph = av.filter.Graph()
|
||||
|
||||
abuf_args = (
|
||||
f"time_base=1/{OPUS_STANDARD_SAMPLE_RATE}:"
|
||||
f"sample_rate={OPUS_STANDARD_SAMPLE_RATE}:"
|
||||
f"sample_fmt=s16:"
|
||||
f"channel_layout=stereo"
|
||||
)
|
||||
src = graph.add("abuffer", args=abuf_args, name="src")
|
||||
aresample_f = graph.add("aresample", args="async=1", name="ares")
|
||||
delays_arg = f"{delay_ms}|{delay_ms}"
|
||||
adelay_f = graph.add(
|
||||
"adelay", args=f"delays={delays_arg}:all=1", name="delay"
|
||||
)
|
||||
sink = graph.add("abuffersink", name="sink")
|
||||
|
||||
src.link_to(aresample_f)
|
||||
aresample_f.link_to(adelay_f)
|
||||
adelay_f.link_to(sink)
|
||||
graph.configure()
|
||||
|
||||
resampler = AudioResampler(
|
||||
format="s16", layout="stereo", rate=OPUS_STANDARD_SAMPLE_RATE
|
||||
)
|
||||
|
||||
for frame in in_container.decode(in_stream):
|
||||
out_frames = resampler.resample(frame) or []
|
||||
for rframe in out_frames:
|
||||
rframe.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
rframe.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
src.push(rframe)
|
||||
|
||||
while True:
|
||||
try:
|
||||
f_out = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
for packet in out_stream.encode(f_out):
|
||||
out_container.mux(packet)
|
||||
|
||||
# Flush filter graph
|
||||
src.push(None)
|
||||
while True:
|
||||
try:
|
||||
f_out = sink.pull()
|
||||
except Exception:
|
||||
break
|
||||
f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE
|
||||
f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE)
|
||||
for packet in out_stream.encode(f_out):
|
||||
out_container.mux(packet)
|
||||
|
||||
# Flush encoder
|
||||
for packet in out_stream.encode(None):
|
||||
out_container.mux(packet)
|
||||
|
||||
in_container.close()
|
||||
|
||||
file_size = os.path.getsize(output_path)
|
||||
logger.info("Padding complete: %d bytes", file_size)
|
||||
|
||||
# Upload padded track
|
||||
logger.info("Uploading padded track to S3")
|
||||
with open(output_path, "rb") as f:
|
||||
upload_response = requests.put(req.output_url, data=f, timeout=S3_TIMEOUT)
|
||||
upload_response.raise_for_status()
|
||||
logger.info("Upload complete: %d bytes", file_size)
|
||||
|
||||
return PaddingResponse(size=file_size)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error("Padding failed for track %d: %s", req.track_index, e, exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Padding failed: {e}") from e
|
||||
finally:
|
||||
if input_path and os.path.exists(input_path):
|
||||
try:
|
||||
os.unlink(input_path)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup input file: %s", e)
|
||||
if output_path and os.path.exists(output_path):
|
||||
try:
|
||||
os.unlink(output_path)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup output file: %s", e)
|
||||
try:
|
||||
os.rmdir(temp_dir)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to cleanup temp directory: %s", e)
|
||||
@@ -11,9 +11,11 @@ dependencies = [
|
||||
"faster-whisper>=1.1.0",
|
||||
"librosa==0.10.1",
|
||||
"numpy<2",
|
||||
"silero-vad==5.1.0",
|
||||
"silero-vad==5.1.2",
|
||||
"transformers>=4.35.0",
|
||||
"sentencepiece",
|
||||
"pyannote.audio==3.1.0",
|
||||
"pyannote.audio==3.4.0",
|
||||
"pytorch-lightning<2.6",
|
||||
"torchaudio>=2.3.0",
|
||||
"av>=13.1.0",
|
||||
]
|
||||
|
||||
423
gpu/self_hosted/uv.lock
generated
423
gpu/self_hosted/uv.lock
generated
@@ -1,5 +1,5 @@
|
||||
version = 1
|
||||
revision = 2
|
||||
revision = 3
|
||||
requires-python = ">=3.12"
|
||||
|
||||
[[package]]
|
||||
@@ -13,7 +13,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "aiohttp"
|
||||
version = "3.12.15"
|
||||
version = "3.13.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiohappyeyeballs" },
|
||||
@@ -24,42 +24,76 @@ dependencies = [
|
||||
{ name = "propcache" },
|
||||
{ name = "yarl" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/9b/e7/d92a237d8802ca88483906c388f7c201bbe96cd80a165ffd0ac2f6a8d59f/aiohttp-3.12.15.tar.gz", hash = "sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2", size = 7823716, upload-time = "2025-07-29T05:52:32.215Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/50/42/32cf8e7704ceb4481406eb87161349abb46a57fee3f008ba9cb610968646/aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88", size = 7844556, upload-time = "2026-01-03T17:33:05.204Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/63/97/77cb2450d9b35f517d6cf506256bf4f5bda3f93a66b4ad64ba7fc917899c/aiohttp-3.12.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:802d3868f5776e28f7bf69d349c26fc0efadb81676d0afa88ed00d98a26340b7", size = 702333, upload-time = "2025-07-29T05:50:46.507Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/6d/0544e6b08b748682c30b9f65640d006e51f90763b41d7c546693bc22900d/aiohttp-3.12.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2800614cd560287be05e33a679638e586a2d7401f4ddf99e304d98878c29444", size = 476948, upload-time = "2025-07-29T05:50:48.067Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/1d/c8c40e611e5094330284b1aea8a4b02ca0858f8458614fa35754cab42b9c/aiohttp-3.12.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8466151554b593909d30a0a125d638b4e5f3836e5aecde85b66b80ded1cb5b0d", size = 469787, upload-time = "2025-07-29T05:50:49.669Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/7d/b76438e70319796bfff717f325d97ce2e9310f752a267bfdf5192ac6082b/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e5a495cb1be69dae4b08f35a6c4579c539e9b5706f606632102c0f855bcba7c", size = 1716590, upload-time = "2025-07-29T05:50:51.368Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/b1/60370d70cdf8b269ee1444b390cbd72ce514f0d1cd1a715821c784d272c9/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6404dfc8cdde35c69aaa489bb3542fb86ef215fc70277c892be8af540e5e21c0", size = 1699241, upload-time = "2025-07-29T05:50:53.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/2b/4968a7b8792437ebc12186db31523f541943e99bda8f30335c482bea6879/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ead1c00f8521a5c9070fcb88f02967b1d8a0544e6d85c253f6968b785e1a2ab", size = 1754335, upload-time = "2025-07-29T05:50:55.394Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/c1/49524ed553f9a0bec1a11fac09e790f49ff669bcd14164f9fab608831c4d/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6990ef617f14450bc6b34941dba4f12d5613cbf4e33805932f853fbd1cf18bfb", size = 1800491, upload-time = "2025-07-29T05:50:57.202Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/5e/3bf5acea47a96a28c121b167f5ef659cf71208b19e52a88cdfa5c37f1fcc/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd736ed420f4db2b8148b52b46b88ed038d0354255f9a73196b7bbce3ea97545", size = 1719929, upload-time = "2025-07-29T05:50:59.192Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/39/94/8ae30b806835bcd1cba799ba35347dee6961a11bd507db634516210e91d8/aiohttp-3.12.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c5092ce14361a73086b90c6efb3948ffa5be2f5b6fbcf52e8d8c8b8848bb97c", size = 1635733, upload-time = "2025-07-29T05:51:01.394Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/46/06cdef71dd03acd9da7f51ab3a9107318aee12ad38d273f654e4f981583a/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aaa2234bb60c4dbf82893e934d8ee8dea30446f0647e024074237a56a08c01bd", size = 1696790, upload-time = "2025-07-29T05:51:03.657Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/90/6b4cfaaf92ed98d0ec4d173e78b99b4b1a7551250be8937d9d67ecb356b4/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6d86a2fbdd14192e2f234a92d3b494dd4457e683ba07e5905a0b3ee25389ac9f", size = 1718245, upload-time = "2025-07-29T05:51:05.911Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/e6/2593751670fa06f080a846f37f112cbe6f873ba510d070136a6ed46117c6/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a041e7e2612041a6ddf1c6a33b883be6a421247c7afd47e885969ee4cc58bd8d", size = 1658899, upload-time = "2025-07-29T05:51:07.753Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/28/c15bacbdb8b8eb5bf39b10680d129ea7410b859e379b03190f02fa104ffd/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5015082477abeafad7203757ae44299a610e89ee82a1503e3d4184e6bafdd519", size = 1738459, upload-time = "2025-07-29T05:51:09.56Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/de/c269cbc4faa01fb10f143b1670633a8ddd5b2e1ffd0548f7aa49cb5c70e2/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:56822ff5ddfd1b745534e658faba944012346184fbfe732e0d6134b744516eea", size = 1766434, upload-time = "2025-07-29T05:51:11.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/b0/4ff3abd81aa7d929b27d2e1403722a65fc87b763e3a97b3a2a494bfc63bc/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2acbbfff69019d9014508c4ba0401822e8bae5a5fdc3b6814285b71231b60f3", size = 1726045, upload-time = "2025-07-29T05:51:13.689Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/16/949225a6a2dd6efcbd855fbd90cf476052e648fb011aa538e3b15b89a57a/aiohttp-3.12.15-cp312-cp312-win32.whl", hash = "sha256:d849b0901b50f2185874b9a232f38e26b9b3d4810095a7572eacea939132d4e1", size = 423591, upload-time = "2025-07-29T05:51:15.452Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/d8/fa65d2a349fe938b76d309db1a56a75c4fb8cc7b17a398b698488a939903/aiohttp-3.12.15-cp312-cp312-win_amd64.whl", hash = "sha256:b390ef5f62bb508a9d67cb3bba9b8356e23b3996da7062f1a57ce1a79d2b3d34", size = 450266, upload-time = "2025-07-29T05:51:17.239Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/33/918091abcf102e39d15aba2476ad9e7bd35ddb190dcdd43a854000d3da0d/aiohttp-3.12.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315", size = 696741, upload-time = "2025-07-29T05:51:19.021Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/2a/7495a81e39a998e400f3ecdd44a62107254803d1681d9189be5c2e4530cd/aiohttp-3.12.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd", size = 474407, upload-time = "2025-07-29T05:51:21.165Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/fc/a9576ab4be2dcbd0f73ee8675d16c707cfc12d5ee80ccf4015ba543480c9/aiohttp-3.12.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4", size = 466703, upload-time = "2025-07-29T05:51:22.948Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/2f/d4bcc8448cf536b2b54eed48f19682031ad182faa3a3fee54ebe5b156387/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7", size = 1705532, upload-time = "2025-07-29T05:51:25.211Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/f3/59406396083f8b489261e3c011aa8aee9df360a96ac8fa5c2e7e1b8f0466/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d", size = 1686794, upload-time = "2025-07-29T05:51:27.145Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/71/164d194993a8d114ee5656c3b7ae9c12ceee7040d076bf7b32fb98a8c5c6/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b", size = 1738865, upload-time = "2025-07-29T05:51:29.366Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/00/d198461b699188a93ead39cb458554d9f0f69879b95078dce416d3209b54/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d", size = 1788238, upload-time = "2025-07-29T05:51:31.285Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/85/b8/9e7175e1fa0ac8e56baa83bf3c214823ce250d0028955dfb23f43d5e61fd/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d", size = 1710566, upload-time = "2025-07-29T05:51:33.219Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/e4/16a8eac9df39b48ae102ec030fa9f726d3570732e46ba0c592aeeb507b93/aiohttp-3.12.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645", size = 1624270, upload-time = "2025-07-29T05:51:35.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/f8/cd84dee7b6ace0740908fd0af170f9fab50c2a41ccbc3806aabcb1050141/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461", size = 1677294, upload-time = "2025-07-29T05:51:37.215Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/42/d0f1f85e50d401eccd12bf85c46ba84f947a84839c8a1c2c5f6e8ab1eb50/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9", size = 1708958, upload-time = "2025-07-29T05:51:39.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/6b/f6fa6c5790fb602538483aa5a1b86fcbad66244997e5230d88f9412ef24c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d", size = 1651553, upload-time = "2025-07-29T05:51:41.356Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/36/a6d36ad545fa12e61d11d1932eef273928b0495e6a576eb2af04297fdd3c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693", size = 1727688, upload-time = "2025-07-29T05:51:43.452Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/c8/f195e5e06608a97a4e52c5d41c7927301bf757a8e8bb5bbf8cef6c314961/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64", size = 1761157, upload-time = "2025-07-29T05:51:45.643Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/6a/ea199e61b67f25ba688d3ce93f63b49b0a4e3b3d380f03971b4646412fc6/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51", size = 1710050, upload-time = "2025-07-29T05:51:48.203Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/2e/ffeb7f6256b33635c29dbed29a22a723ff2dd7401fff42ea60cf2060abfb/aiohttp-3.12.15-cp313-cp313-win32.whl", hash = "sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0", size = 422647, upload-time = "2025-07-29T05:51:50.718Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/8e/78ee35774201f38d5e1ba079c9958f7629b1fd079459aea9467441dbfbf5/aiohttp-3.12.15-cp313-cp313-win_amd64.whl", hash = "sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84", size = 449067, upload-time = "2025-07-29T05:51:52.549Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/be/4fc11f202955a69e0db803a12a062b8379c970c7c84f4882b6da17337cc1/aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c", size = 739732, upload-time = "2026-01-03T17:30:14.23Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/2c/621d5b851f94fa0bb7430d6089b3aa970a9d9b75196bc93bb624b0db237a/aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168", size = 494293, upload-time = "2026-01-03T17:30:15.96Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/43/4be01406b78e1be8320bb8316dc9c42dbab553d281c40364e0f862d5661c/aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d", size = 493533, upload-time = "2026-01-03T17:30:17.431Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/a8/5a35dc56a06a2c90d4742cbf35294396907027f80eea696637945a106f25/aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29", size = 1737839, upload-time = "2026-01-03T17:30:19.422Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/62/4b9eeb331da56530bf2e198a297e5303e1c1ebdceeb00fe9b568a65c5a0c/aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3", size = 1703932, upload-time = "2026-01-03T17:30:21.756Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/f6/af16887b5d419e6a367095994c0b1332d154f647e7dc2bd50e61876e8e3d/aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d", size = 1771906, upload-time = "2026-01-03T17:30:23.932Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/83/397c634b1bcc24292fa1e0c7822800f9f6569e32934bdeef09dae7992dfb/aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463", size = 1871020, upload-time = "2026-01-03T17:30:26Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/f6/a62cbbf13f0ac80a70f71b1672feba90fdb21fd7abd8dbf25c0105fb6fa3/aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc", size = 1755181, upload-time = "2026-01-03T17:30:27.554Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/87/20a35ad487efdd3fba93d5843efdfaa62d2f1479eaafa7453398a44faf13/aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf", size = 1561794, upload-time = "2026-01-03T17:30:29.254Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/95/8fd69a66682012f6716e1bc09ef8a1a2a91922c5725cb904689f112309c4/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033", size = 1697900, upload-time = "2026-01-03T17:30:31.033Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/66/7b94b3b5ba70e955ff597672dad1691333080e37f50280178967aff68657/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f", size = 1728239, upload-time = "2026-01-03T17:30:32.703Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/71/6f72f77f9f7d74719692ab65a2a0252584bf8d5f301e2ecb4c0da734530a/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679", size = 1740527, upload-time = "2026-01-03T17:30:34.695Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/b4/75ec16cbbd5c01bdaf4a05b19e103e78d7ce1ef7c80867eb0ace42ff4488/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423", size = 1554489, upload-time = "2026-01-03T17:30:36.864Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/8f/bc518c0eea29f8406dcf7ed1f96c9b48e3bc3995a96159b3fc11f9e08321/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce", size = 1767852, upload-time = "2026-01-03T17:30:39.433Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/f2/a07a75173124f31f11ea6f863dc44e6f09afe2bca45dd4e64979490deab1/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a", size = 1722379, upload-time = "2026-01-03T17:30:41.081Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/4a/1a3fee7c21350cac78e5c5cef711bac1b94feca07399f3d406972e2d8fcd/aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046", size = 428253, upload-time = "2026-01-03T17:30:42.644Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/b7/76175c7cb4eb73d91ad63c34e29fc4f77c9386bba4a65b53ba8e05ee3c39/aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57", size = 455407, upload-time = "2026-01-03T17:30:44.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/8a/12ca489246ca1faaf5432844adbfce7ff2cc4997733e0af120869345643a/aiohttp-3.13.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5dff64413671b0d3e7d5918ea490bdccb97a4ad29b3f311ed423200b2203e01c", size = 734190, upload-time = "2026-01-03T17:30:45.832Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/08/de43984c74ed1fca5c014808963cc83cb00d7bb06af228f132d33862ca76/aiohttp-3.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:87b9aab6d6ed88235aa2970294f496ff1a1f9adcd724d800e9b952395a80ffd9", size = 491783, upload-time = "2026-01-03T17:30:47.466Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/f8/8dd2cf6112a5a76f81f81a5130c57ca829d101ad583ce57f889179accdda/aiohttp-3.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:425c126c0dc43861e22cb1c14ba4c8e45d09516d0a3ae0a3f7494b79f5f233a3", size = 490704, upload-time = "2026-01-03T17:30:49.373Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/40/a46b03ca03936f832bc7eaa47cfbb1ad012ba1be4790122ee4f4f8cba074/aiohttp-3.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f9120f7093c2a32d9647abcaf21e6ad275b4fbec5b55969f978b1a97c7c86bf", size = 1720652, upload-time = "2026-01-03T17:30:50.974Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/7e/917fe18e3607af92657e4285498f500dca797ff8c918bd7d90b05abf6c2a/aiohttp-3.13.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:697753042d57f4bf7122cab985bf15d0cef23c770864580f5af4f52023a56bd6", size = 1692014, upload-time = "2026-01-03T17:30:52.729Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/b6/cefa4cbc00d315d68973b671cf105b21a609c12b82d52e5d0c9ae61d2a09/aiohttp-3.13.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6de499a1a44e7de70735d0b39f67c8f25eb3d91eb3103be99ca0fa882cdd987d", size = 1759777, upload-time = "2026-01-03T17:30:54.537Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/e3/e06ee07b45e59e6d81498b591fc589629be1553abb2a82ce33efe2a7b068/aiohttp-3.13.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:37239e9f9a7ea9ac5bf6b92b0260b01f8a22281996da609206a84df860bc1261", size = 1861276, upload-time = "2026-01-03T17:30:56.512Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/24/75d274228acf35ceeb2850b8ce04de9dd7355ff7a0b49d607ee60c29c518/aiohttp-3.13.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f76c1e3fe7d7c8afad7ed193f89a292e1999608170dcc9751a7462a87dfd5bc0", size = 1743131, upload-time = "2026-01-03T17:30:58.256Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/98/3d21dde21889b17ca2eea54fdcff21b27b93f45b7bb94ca029c31ab59dc3/aiohttp-3.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fc290605db2a917f6e81b0e1e0796469871f5af381ce15c604a3c5c7e51cb730", size = 1556863, upload-time = "2026-01-03T17:31:00.445Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/84/da0c3ab1192eaf64782b03971ab4055b475d0db07b17eff925e8c93b3aa5/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4021b51936308aeea0367b8f006dc999ca02bc118a0cc78c303f50a2ff6afb91", size = 1682793, upload-time = "2026-01-03T17:31:03.024Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/0f/5802ada182f575afa02cbd0ec5180d7e13a402afb7c2c03a9aa5e5d49060/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:49a03727c1bba9a97d3e93c9f93ca03a57300f484b6e935463099841261195d3", size = 1716676, upload-time = "2026-01-03T17:31:04.842Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/8c/714d53bd8b5a4560667f7bbbb06b20c2382f9c7847d198370ec6526af39c/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3d9908a48eb7416dc1f4524e69f1d32e5d90e3981e4e37eb0aa1cd18f9cfa2a4", size = 1733217, upload-time = "2026-01-03T17:31:06.868Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/79/e2176f46d2e963facea939f5be2d26368ce543622be6f00a12844d3c991f/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2712039939ec963c237286113c68dbad80a82a4281543f3abf766d9d73228998", size = 1552303, upload-time = "2026-01-03T17:31:08.958Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/6a/28ed4dea1759916090587d1fe57087b03e6c784a642b85ef48217b0277ae/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7bfdc049127717581866fa4708791220970ce291c23e28ccf3922c700740fdc0", size = 1763673, upload-time = "2026-01-03T17:31:10.676Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/35/4a3daeb8b9fab49240d21c04d50732313295e4bd813a465d840236dd0ce1/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8057c98e0c8472d8846b9c79f56766bcc57e3e8ac7bfd510482332366c56c591", size = 1721120, upload-time = "2026-01-03T17:31:12.575Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/9f/d643bb3c5fb99547323e635e251c609fbbc660d983144cfebec529e09264/aiohttp-3.13.3-cp313-cp313-win32.whl", hash = "sha256:1449ceddcdbcf2e0446957863af03ebaaa03f94c090f945411b61269e2cb5daf", size = 427383, upload-time = "2026-01-03T17:31:14.382Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/f1/ab0395f8a79933577cdd996dd2f9aa6014af9535f65dddcf88204682fe62/aiohttp-3.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:693781c45a4033d31d4187d2436f5ac701e7bbfe5df40d917736108c1cc7436e", size = 453899, upload-time = "2026-01-03T17:31:15.958Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/36/5b6514a9f5d66f4e2597e40dea2e3db271e023eb7a5d22defe96ba560996/aiohttp-3.13.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:ea37047c6b367fd4bd632bff8077449b8fa034b69e812a18e0132a00fae6e808", size = 737238, upload-time = "2026-01-03T17:31:17.909Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/49/459327f0d5bcd8c6c9ca69e60fdeebc3622861e696490d8674a6d0cb90a6/aiohttp-3.13.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6fc0e2337d1a4c3e6acafda6a78a39d4c14caea625124817420abceed36e2415", size = 492292, upload-time = "2026-01-03T17:31:19.919Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/0b/b97660c5fd05d3495b4eb27f2d0ef18dc1dc4eff7511a9bf371397ff0264/aiohttp-3.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c685f2d80bb67ca8c3837823ad76196b3694b0159d232206d1e461d3d434666f", size = 493021, upload-time = "2026-01-03T17:31:21.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/d4/438efabdf74e30aeceb890c3290bbaa449780583b1270b00661126b8aae4/aiohttp-3.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e377758516d262bde50c2584fc6c578af272559c409eecbdd2bae1601184d6", size = 1717263, upload-time = "2026-01-03T17:31:23.296Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/f2/7bddc7fd612367d1459c5bcf598a9e8f7092d6580d98de0e057eb42697ad/aiohttp-3.13.3-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:34749271508078b261c4abb1767d42b8d0c0cc9449c73a4df494777dc55f0687", size = 1669107, upload-time = "2026-01-03T17:31:25.334Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/5a/1aeaecca40e22560f97610a329e0e5efef5e0b5afdf9f857f0d93839ab2e/aiohttp-3.13.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:82611aeec80eb144416956ec85b6ca45a64d76429c1ed46ae1b5f86c6e0c9a26", size = 1760196, upload-time = "2026-01-03T17:31:27.394Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/f8/0ff6992bea7bd560fc510ea1c815f87eedd745fe035589c71ce05612a19a/aiohttp-3.13.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fff83cfc93f18f215896e3a190e8e5cb413ce01553901aca925176e7568963a", size = 1843591, upload-time = "2026-01-03T17:31:29.238Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/d1/e30e537a15f53485b61f5be525f2157da719819e8377298502aebac45536/aiohttp-3.13.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bbe7d4cecacb439e2e2a8a1a7b935c25b812af7a5fd26503a66dadf428e79ec1", size = 1720277, upload-time = "2026-01-03T17:31:31.053Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/45/23f4c451d8192f553d38d838831ebbc156907ea6e05557f39563101b7717/aiohttp-3.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b928f30fe49574253644b1ca44b1b8adbd903aa0da4b9054a6c20fc7f4092a25", size = 1548575, upload-time = "2026-01-03T17:31:32.87Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/ed/0a42b127a43712eda7807e7892c083eadfaf8429ca8fb619662a530a3aab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7b5e8fe4de30df199155baaf64f2fcd604f4c678ed20910db8e2c66dc4b11603", size = 1679455, upload-time = "2026-01-03T17:31:34.76Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/b5/c05f0c2b4b4fe2c9d55e73b6d3ed4fd6c9dc2684b1d81cbdf77e7fad9adb/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:8542f41a62bcc58fc7f11cf7c90e0ec324ce44950003feb70640fc2a9092c32a", size = 1687417, upload-time = "2026-01-03T17:31:36.699Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/6b/915bc5dad66aef602b9e459b5a973529304d4e89ca86999d9d75d80cbd0b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5e1d8c8b8f1d91cd08d8f4a3c2b067bfca6ec043d3ff36de0f3a715feeedf926", size = 1729968, upload-time = "2026-01-03T17:31:38.622Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/3b/e84581290a9520024a08640b63d07673057aec5ca548177a82026187ba73/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:90455115e5da1c3c51ab619ac57f877da8fd6d73c05aacd125c5ae9819582aba", size = 1545690, upload-time = "2026-01-03T17:31:40.57Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/04/0c3655a566c43fd647c81b895dfe361b9f9ad6d58c19309d45cff52d6c3b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:042e9e0bcb5fba81886c8b4fbb9a09d6b8a00245fd8d88e4d989c1f96c74164c", size = 1746390, upload-time = "2026-01-03T17:31:42.857Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/53/71165b26978f719c3419381514c9690bd5980e764a09440a10bb816ea4ab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2eb752b102b12a76ca02dff751a801f028b4ffbbc478840b473597fc91a9ed43", size = 1702188, upload-time = "2026-01-03T17:31:44.984Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/a7/cbe6c9e8e136314fa1980da388a59d2f35f35395948a08b6747baebb6aa6/aiohttp-3.13.3-cp314-cp314-win32.whl", hash = "sha256:b556c85915d8efaed322bf1bdae9486aa0f3f764195a0fb6ee962e5c71ef5ce1", size = 433126, upload-time = "2026-01-03T17:31:47.463Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/56/982704adea7d3b16614fc5936014e9af85c0e34b58f9046655817f04306e/aiohttp-3.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9bf9f7a65e7aa20dd764151fb3d616c81088f91f8df39c3893a536e279b4b984", size = 459128, upload-time = "2026-01-03T17:31:49.2Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/2a/3c79b638a9c3d4658d345339d22070241ea341ed4e07b5ac60fb0f418003/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:05861afbbec40650d8a07ea324367cb93e9e8cc7762e04dd4405df99fa65159c", size = 769512, upload-time = "2026-01-03T17:31:51.134Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/b9/3e5014d46c0ab0db8707e0ac2711ed28c4da0218c358a4e7c17bae0d8722/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2fc82186fadc4a8316768d61f3722c230e2c1dcab4200d52d2ebdf2482e47592", size = 506444, upload-time = "2026-01-03T17:31:52.85Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/03/c1d4ef9a054e151cd7839cdc497f2638f00b93cbe8043983986630d7a80c/aiohttp-3.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0add0900ff220d1d5c5ebbf99ed88b0c1bbf87aa7e4262300ed1376a6b13414f", size = 510798, upload-time = "2026-01-03T17:31:54.91Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/76/8c1e5abbfe8e127c893fe7ead569148a4d5a799f7cf958d8c09f3eedf097/aiohttp-3.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:568f416a4072fbfae453dcf9a99194bbb8bdeab718e08ee13dfa2ba0e4bebf29", size = 1868835, upload-time = "2026-01-03T17:31:56.733Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/ac/984c5a6f74c363b01ff97adc96a3976d9c98940b8969a1881575b279ac5d/aiohttp-3.13.3-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:add1da70de90a2569c5e15249ff76a631ccacfe198375eead4aadf3b8dc849dc", size = 1720486, upload-time = "2026-01-03T17:31:58.65Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/9a/b7039c5f099c4eb632138728828b33428585031a1e658d693d41d07d89d1/aiohttp-3.13.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b47b7ba335d2e9b1239fa571131a87e2d8ec96b333e68b2a305e7a98b0bae2", size = 1847951, upload-time = "2026-01-03T17:32:00.989Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/02/3bec2b9a1ba3c19ff89a43a19324202b8eb187ca1e928d8bdac9bbdddebd/aiohttp-3.13.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3dd4dce1c718e38081c8f35f323209d4c1df7d4db4bab1b5c88a6b4d12b74587", size = 1941001, upload-time = "2026-01-03T17:32:03.122Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/df/d879401cedeef27ac4717f6426c8c36c3091c6e9f08a9178cc87549c537f/aiohttp-3.13.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34bac00a67a812570d4a460447e1e9e06fae622946955f939051e7cc895cfab8", size = 1797246, upload-time = "2026-01-03T17:32:05.255Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/15/be122de1f67e6953add23335c8ece6d314ab67c8bebb3f181063010795a7/aiohttp-3.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a19884d2ee70b06d9204b2727a7b9f983d0c684c650254679e716b0b77920632", size = 1627131, upload-time = "2026-01-03T17:32:07.607Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/12/70eedcac9134cfa3219ab7af31ea56bc877395b1ac30d65b1bc4b27d0438/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ca7f2bb6ba8348a3614c7918cc4bb73268c5ac2a207576b7afea19d3d9f64", size = 1795196, upload-time = "2026-01-03T17:32:09.59Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/11/b30e1b1cd1f3054af86ebe60df96989c6a414dd87e27ad16950eee420bea/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:b0d95340658b9d2f11d9697f59b3814a9d3bb4b7a7c20b131df4bcef464037c0", size = 1782841, upload-time = "2026-01-03T17:32:11.445Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/0d/d98a9367b38912384a17e287850f5695c528cff0f14f791ce8ee2e4f7796/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1e53262fd202e4b40b70c3aff944a8155059beedc8a89bba9dc1f9ef06a1b56", size = 1795193, upload-time = "2026-01-03T17:32:13.705Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/a5/a2dfd1f5ff5581632c7f6a30e1744deda03808974f94f6534241ef60c751/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:d60ac9663f44168038586cab2157e122e46bdef09e9368b37f2d82d354c23f72", size = 1621979, upload-time = "2026-01-03T17:32:15.965Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/f0/12973c382ae7c1cccbc4417e129c5bf54c374dfb85af70893646e1f0e749/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:90751b8eed69435bac9ff4e3d2f6b3af1f57e37ecb0fbeee59c0174c9e2d41df", size = 1822193, upload-time = "2026-01-03T17:32:18.219Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/5f/24155e30ba7f8c96918af1350eb0663e2430aad9e001c0489d89cd708ab1/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa", size = 1769801, upload-time = "2026-01-03T17:32:20.25Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/f8/7314031ff5c10e6ece114da79b338ec17eeff3a079e53151f7e9f43c4723/aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767", size = 466523, upload-time = "2026-01-03T17:32:22.215Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/63/278a98c715ae467624eafe375542d8ba9b4383a016df8fdefe0ae28382a7/aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344", size = 499694, upload-time = "2026-01-03T17:32:24.546Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -89,6 +123,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/39/4a/4c61d4c84cfd9befb6fa08a702535b27b21fff08c946bc2f6139decbf7f7/alembic-1.16.5-py3-none-any.whl", hash = "sha256:e845dfe090c5ffa7b92593ae6687c5cb1a101e91fa53868497dbd79847f9dbe3", size = 247355, upload-time = "2025-08-27T18:02:07.37Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "annotated-doc"
|
||||
version = "0.0.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
version = "0.7.0"
|
||||
@@ -460,16 +503,18 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.116.1"
|
||||
version = "0.133.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "annotated-doc" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "starlette" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "typing-inspection" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/78/d7/6c8b3bfe33eeffa208183ec037fee0cce9f7f024089ab1c5d12ef04bd27c/fastapi-0.116.1.tar.gz", hash = "sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143", size = 296485, upload-time = "2025-07-11T16:22:32.057Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/22/6f/0eafed8349eea1fa462238b54a624c8b408cd1ba2795c8e64aa6c34f8ab7/fastapi-0.133.1.tar.gz", hash = "sha256:ed152a45912f102592976fde6cbce7dae1a8a1053da94202e51dd35d184fadd6", size = 378741, upload-time = "2026-02-25T18:18:17.398Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/c9/a175a7779f3599dfa4adfc97a6ce0e157237b3d7941538604aadaf97bfb6/fastapi-0.133.1-py3-none-any.whl", hash = "sha256:658f34ba334605b1617a65adf2ea6461901bdb9af3a3080d63ff791ecf7dc2e2", size = 109029, upload-time = "2026-02-25T18:18:18.578Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
@@ -478,6 +523,8 @@ standard = [
|
||||
{ name = "fastapi-cli", extra = ["standard"] },
|
||||
{ name = "httpx" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "pydantic-extra-types" },
|
||||
{ name = "pydantic-settings" },
|
||||
{ name = "python-multipart" },
|
||||
{ name = "uvicorn", extra = ["standard"] },
|
||||
]
|
||||
@@ -539,11 +586,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "filelock"
|
||||
version = "3.19.1"
|
||||
version = "3.20.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687, upload-time = "2025-08-14T16:56:03.016Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1d/65/ce7f1b70157833bf3cb851b556a37d4547ceafc158aa9b34b36782f23696/filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1", size = 19485, upload-time = "2026-01-09T17:55:05.421Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload-time = "2025-08-14T16:56:01.633Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/36/7fb70f04bf00bc646cd5bb45aa9eddb15e19437a28b8fb2b4a5249fac770/filelock-3.20.3-py3-none-any.whl", hash = "sha256:4b0dda527ee31078689fc205ec4f1c1bf7d56cf88b6dc9426c4f230e46c2dce1", size = 16701, upload-time = "2026-01-09T17:55:04.334Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -557,43 +604,43 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "fonttools"
|
||||
version = "4.59.2"
|
||||
version = "4.60.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0d/a5/fba25f9fbdab96e26dedcaeeba125e5f05a09043bf888e0305326e55685b/fonttools-4.59.2.tar.gz", hash = "sha256:e72c0749b06113f50bcb80332364c6be83a9582d6e3db3fe0b280f996dc2ef22", size = 3540889, upload-time = "2025-08-27T16:40:30.97Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3e/c4/db6a7b5eb0656534c3aa2596c2c5e18830d74f1b9aa5aa8a7dff63a0b11d/fonttools-4.60.2.tar.gz", hash = "sha256:d29552e6b155ebfc685b0aecf8d429cb76c14ab734c22ef5d3dea6fdf800c92c", size = 3562254, upload-time = "2025-12-09T13:38:11.835Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/3d/1f45db2df51e7bfa55492e8f23f383d372200be3a0ded4bf56a92753dd1f/fonttools-4.59.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:82906d002c349cad647a7634b004825a7335f8159d0d035ae89253b4abf6f3ea", size = 2769711, upload-time = "2025-08-27T16:39:04.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/df/cd236ab32a8abfd11558f296e064424258db5edefd1279ffdbcfd4fd8b76/fonttools-4.59.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a10c1bd7644dc58f8862d8ba0cf9fb7fef0af01ea184ba6ce3f50ab7dfe74d5a", size = 2340225, upload-time = "2025-08-27T16:39:06.143Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/12/b6f9f964fe6d4b4dd4406bcbd3328821c3de1f909ffc3ffa558fe72af48c/fonttools-4.59.2-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:738f31f23e0339785fd67652a94bc69ea49e413dfdb14dcb8c8ff383d249464e", size = 4912766, upload-time = "2025-08-27T16:39:08.138Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/78/82bde2f2d2c306ef3909b927363170b83df96171f74e0ccb47ad344563cd/fonttools-4.59.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ec99f9bdfee9cdb4a9172f9e8fd578cce5feb231f598909e0aecf5418da4f25", size = 4955178, upload-time = "2025-08-27T16:39:10.094Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/77/7de766afe2d31dda8ee46d7e479f35c7d48747e558961489a2d6e3a02bd4/fonttools-4.59.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0476ea74161322e08c7a982f83558a2b81b491509984523a1a540baf8611cc31", size = 4897898, upload-time = "2025-08-27T16:39:12.087Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/77/ce0e0b905d62a06415fda9f2b2e109a24a5db54a59502b769e9e297d2242/fonttools-4.59.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:95922a922daa1f77cc72611747c156cfb38030ead72436a2c551d30ecef519b9", size = 5049144, upload-time = "2025-08-27T16:39:13.84Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/ea/870d93aefd23fff2e07cbeebdc332527868422a433c64062c09d4d5e7fe6/fonttools-4.59.2-cp312-cp312-win32.whl", hash = "sha256:39ad9612c6a622726a6a130e8ab15794558591f999673f1ee7d2f3d30f6a3e1c", size = 2206473, upload-time = "2025-08-27T16:39:15.854Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/61/c4/e44bad000c4a4bb2e9ca11491d266e857df98ab6d7428441b173f0fe2517/fonttools-4.59.2-cp312-cp312-win_amd64.whl", hash = "sha256:980fd7388e461b19a881d35013fec32c713ffea1fc37aef2f77d11f332dfd7da", size = 2254706, upload-time = "2025-08-27T16:39:17.893Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/7b/d0d3b9431642947b5805201fbbbe938a47b70c76685ef1f0cb5f5d7140d6/fonttools-4.59.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:381bde13216ba09489864467f6bc0c57997bd729abfbb1ce6f807ba42c06cceb", size = 2761563, upload-time = "2025-08-27T16:39:20.286Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/be/fc5fe58dd76af7127b769b68071dbc32d4b95adc8b58d1d28d42d93c90f2/fonttools-4.59.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f33839aa091f7eef4e9078f5b7ab1b8ea4b1d8a50aeaef9fdb3611bba80869ec", size = 2335671, upload-time = "2025-08-27T16:39:22.027Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/9f/bf231c2a3fac99d1d7f1d89c76594f158693f981a4aa02be406e9f036832/fonttools-4.59.2-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6235fc06bcbdb40186f483ba9d5d68f888ea68aa3c8dac347e05a7c54346fbc8", size = 4893967, upload-time = "2025-08-27T16:39:23.664Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/a9/d46d2ad4fcb915198504d6727f83aa07f46764c64f425a861aa38756c9fd/fonttools-4.59.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83ad6e5d06ef3a2884c4fa6384a20d6367b5cfe560e3b53b07c9dc65a7020e73", size = 4951986, upload-time = "2025-08-27T16:39:25.379Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/90/1cc8d7dd8f707dfeeca472b82b898d3add0ebe85b1f645690dcd128ee63f/fonttools-4.59.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d029804c70fddf90be46ed5305c136cae15800a2300cb0f6bba96d48e770dde0", size = 4891630, upload-time = "2025-08-27T16:39:27.494Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/04/f0345b0d9fe67d65aa8d3f2d4cbf91d06f111bc7b8d802e65914eb06194d/fonttools-4.59.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:95807a3b5e78f2714acaa26a33bc2143005cc05c0217b322361a772e59f32b89", size = 5035116, upload-time = "2025-08-27T16:39:29.406Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/7d/5ba5eefffd243182fbd067cdbfeb12addd4e5aec45011b724c98a344ea33/fonttools-4.59.2-cp313-cp313-win32.whl", hash = "sha256:b3ebda00c3bb8f32a740b72ec38537d54c7c09f383a4cfefb0b315860f825b08", size = 2204907, upload-time = "2025-08-27T16:39:31.42Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/a9/be7219fc64a6026cc0aded17fa3720f9277001c185434230bd351bf678e6/fonttools-4.59.2-cp313-cp313-win_amd64.whl", hash = "sha256:a72155928d7053bbde499d32a9c77d3f0f3d29ae72b5a121752481bcbd71e50f", size = 2253742, upload-time = "2025-08-27T16:39:33.079Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/c7/486580d00be6fa5d45e41682e5ffa5c809f3d25773c6f39628d60f333521/fonttools-4.59.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:d09e487d6bfbe21195801323ba95c91cb3523f0fcc34016454d4d9ae9eaa57fe", size = 2762444, upload-time = "2025-08-27T16:39:34.759Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/9b/950ea9b7b764ceb8d18645c62191e14ce62124d8e05cb32a4dc5e65fde0b/fonttools-4.59.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:dec2f22486d7781087b173799567cffdcc75e9fb2f1c045f05f8317ccce76a3e", size = 2333256, upload-time = "2025-08-27T16:39:40.777Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/4d/8ee9d563126de9002eede950cde0051be86cc4e8c07c63eca0c9fc95734a/fonttools-4.59.2-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1647201af10993090120da2e66e9526c4e20e88859f3e34aa05b8c24ded2a564", size = 4834846, upload-time = "2025-08-27T16:39:42.885Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/26/f26d947b0712dce3d118e92ce30ca88f98938b066498f60d0ee000a892ae/fonttools-4.59.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:47742c33fe65f41eabed36eec2d7313a8082704b7b808752406452f766c573fc", size = 4930871, upload-time = "2025-08-27T16:39:44.818Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/7f/ebe878061a5a5e6b6502f0548489e01100f7e6c0049846e6546ba19a3ab4/fonttools-4.59.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:92ac2d45794f95d1ad4cb43fa07e7e3776d86c83dc4b9918cf82831518165b4b", size = 4876971, upload-time = "2025-08-27T16:39:47.027Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/0d/0d22e3a20ac566836098d30718092351935487e3271fd57385db1adb2fde/fonttools-4.59.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:fa9ecaf2dcef8941fb5719e16322345d730f4c40599bbf47c9753de40eb03882", size = 4987478, upload-time = "2025-08-27T16:39:48.774Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/a3/960cc83182a408ffacc795e61b5f698c6f7b0cfccf23da4451c39973f3c8/fonttools-4.59.2-cp314-cp314-win32.whl", hash = "sha256:a8d40594982ed858780e18a7e4c80415af65af0f22efa7de26bdd30bf24e1e14", size = 2208640, upload-time = "2025-08-27T16:39:50.592Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/74/55e5c57c414fa3965fee5fc036ed23f26a5c4e9e10f7f078a54ff9c7dfb7/fonttools-4.59.2-cp314-cp314-win_amd64.whl", hash = "sha256:9cde8b6a6b05f68516573523f2013a3574cb2c75299d7d500f44de82ba947b80", size = 2258457, upload-time = "2025-08-27T16:39:52.611Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/dc/8e4261dc591c5cfee68fecff3ffee2a9b29e1edc4c4d9cbafdc5aefe74ee/fonttools-4.59.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:036cd87a2dbd7ef72f7b68df8314ced00b8d9973aee296f2464d06a836aeb9a9", size = 2829901, upload-time = "2025-08-27T16:39:55.014Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/05/331538dcf21fd6331579cd628268150e85210d0d2bdae20f7598c2b36c05/fonttools-4.59.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:14870930181493b1d740b6f25483e20185e5aea58aec7d266d16da7be822b4bb", size = 2362717, upload-time = "2025-08-27T16:39:56.843Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/ae/d26428ca9ede809c0a93f0af91f44c87433dc0251e2aec333da5ed00d38f/fonttools-4.59.2-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7ff58ea1eb8fc7e05e9a949419f031890023f8785c925b44d6da17a6a7d6e85d", size = 4835120, upload-time = "2025-08-27T16:39:59.06Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/c4/0f6ac15895de509e07688cb1d45f1ae583adbaa0fa5a5699d73f3bd58ca0/fonttools-4.59.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6dee142b8b3096514c96ad9e2106bf039e2fe34a704c587585b569a36df08c3c", size = 5071115, upload-time = "2025-08-27T16:40:01.009Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/b6/147a711b7ecf7ea39f9da9422a55866f6dd5747c2f36b3b0a7a7e0c6820b/fonttools-4.59.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8991bdbae39cf78bcc9cd3d81f6528df1f83f2e7c23ccf6f990fa1f0b6e19708", size = 4943905, upload-time = "2025-08-27T16:40:03.179Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/4e/2ab19006646b753855e2b02200fa1cabb75faa4eeca4ef289f269a936974/fonttools-4.59.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:53c1a411b7690042535a4f0edf2120096a39a506adeb6c51484a232e59f2aa0c", size = 4960313, upload-time = "2025-08-27T16:40:05.45Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/3d/df77907e5be88adcca93cc2cee00646d039da220164be12bee028401e1cf/fonttools-4.59.2-cp314-cp314t-win32.whl", hash = "sha256:59d85088e29fa7a8f87d19e97a1beae2a35821ee48d8ef6d2c4f965f26cb9f8a", size = 2269719, upload-time = "2025-08-27T16:40:07.553Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/a0/d4c4bc5b50275449a9a908283b567caa032a94505fe1976e17f994faa6be/fonttools-4.59.2-cp314-cp314t-win_amd64.whl", hash = "sha256:7ad5d8d8cc9e43cb438b3eb4a0094dd6d4088daa767b0a24d52529361fd4c199", size = 2333169, upload-time = "2025-08-27T16:40:09.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/a4/d2f7be3c86708912c02571db0b550121caab8cd88a3c0aacb9cfa15ea66e/fonttools-4.59.2-py3-none-any.whl", hash = "sha256:8bd0f759020e87bb5d323e6283914d9bf4ae35a7307dafb2cbd1e379e720ad37", size = 1132315, upload-time = "2025-08-27T16:40:28.984Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/30/530c9eddcd1c39219dc0aaede2b5a4c8ab80e0bb88d1b3ffc12944c4aac3/fonttools-4.60.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e0164b7609d2b5c5dd4e044b8085b7bd7ca7363ef8c269a4ab5b5d4885a426b2", size = 2847196, upload-time = "2025-12-09T13:36:33.262Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/2f/4077a482836d5bbe3bc9dac1c004d02ee227cf04ed62b0a2dfc41d4f0dfd/fonttools-4.60.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1dd3d9574fc595c1e97faccae0f264dc88784ddf7fbf54c939528378bacc0033", size = 2395842, upload-time = "2025-12-09T13:36:35.47Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/05/aae5bb99c5398f8ed4a8b784f023fd9dd3568f0bd5d5b21e35b282550f11/fonttools-4.60.2-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:98d0719f1b11c2817307d2da2e94296a3b2a3503f8d6252a101dca3ee663b917", size = 4949713, upload-time = "2025-12-09T13:36:37.874Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/37/49067349fc78ff0efbf09fadefe80ddf41473ca8f8a25400e3770da38328/fonttools-4.60.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9d3ea26957dd07209f207b4fff64c702efe5496de153a54d3b91007ec28904dd", size = 4999907, upload-time = "2025-12-09T13:36:39.853Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/16/31/d0f11c758bd0db36b664c92a0f9dfdcc2d7313749aa7d6629805c6946f21/fonttools-4.60.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ee301273b0850f3a515299f212898f37421f42ff9adfc341702582ca5073c13", size = 4939717, upload-time = "2025-12-09T13:36:43.075Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/bc/1cff0d69522e561bf1b99bee7c3911c08c25e919584827c3454a64651ce9/fonttools-4.60.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c6eb4694cc3b9c03b7c01d65a9cf35b577f21aa6abdbeeb08d3114b842a58153", size = 5089205, upload-time = "2025-12-09T13:36:45.468Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/e6/fb174f0069b7122e19828c551298bfd34fdf9480535d2a6ac2ed37afacd3/fonttools-4.60.2-cp312-cp312-win32.whl", hash = "sha256:57f07b616c69c244cc1a5a51072eeef07dddda5ebef9ca5c6e9cf6d59ae65b70", size = 2264674, upload-time = "2025-12-09T13:36:49.238Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/57/6552ffd6b582d3e6a9f01780c5275e6dfff1e70ca146101733aa1c12a129/fonttools-4.60.2-cp312-cp312-win_amd64.whl", hash = "sha256:310035802392f1fe5a7cf43d76f6ff4a24c919e4c72c0352e7b8176e2584b8a0", size = 2314701, upload-time = "2025-12-09T13:36:51.09Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/e4/8381d0ca6b6c6c484660b03517ec5b5b81feeefca3808726dece36c652a9/fonttools-4.60.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2bb5fd231e56ccd7403212636dcccffc96c5ae0d6f9e4721fa0a32cb2e3ca432", size = 2842063, upload-time = "2025-12-09T13:36:53.468Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/2c/4367117ee8ff4f4374787a1222da0bd413d80cf3522111f727a7b8f80d1d/fonttools-4.60.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:536b5fab7b6fec78ccf59b5c59489189d9d0a8b0d3a77ed1858be59afb096696", size = 2393792, upload-time = "2025-12-09T13:36:55.742Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/b7/a76b6dffa193869e54e32ca2f9abb0d0e66784bc8a24e6f86eb093015481/fonttools-4.60.2-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6b9288fc38252ac86a9570f19313ecbc9ff678982e0f27c757a85f1f284d3400", size = 4924020, upload-time = "2025-12-09T13:36:58.229Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/4e/0078200e2259f0061c86a74075f507d64c43dd2ab38971956a5c0012d344/fonttools-4.60.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93fcb420791d839ef592eada2b69997c445d0ce9c969b5190f2e16828ec10607", size = 4980070, upload-time = "2025-12-09T13:37:00.311Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/85/1f/d87c85a11cb84852c975251581862681e4a0c1c3bd456c648792203f311b/fonttools-4.60.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7916a381b094db4052ac284255186aebf74c5440248b78860cb41e300036f598", size = 4921411, upload-time = "2025-12-09T13:37:02.345Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/c0/7efad650f5ed8e317c2633133ef3c64917e7adf2e4e2940c798f5d57ec6e/fonttools-4.60.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58c8c393d5e16b15662cfc2d988491940458aa87894c662154f50c7b49440bef", size = 5063465, upload-time = "2025-12-09T13:37:04.836Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/a8/750518c4f8cdd79393b386bc81226047ade80239e58c6c9f5dbe1fdd8ea1/fonttools-4.60.2-cp313-cp313-win32.whl", hash = "sha256:19c6e0afd8b02008caa0aa08ab896dfce5d0bcb510c49b2c499541d5cb95a963", size = 2263443, upload-time = "2025-12-09T13:37:06.762Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/22/026c60376f165981f80a0e90bd98a79ae3334e9d89a3d046c4d2e265c724/fonttools-4.60.2-cp313-cp313-win_amd64.whl", hash = "sha256:6a500dc59e11b2338c2dba1f8cf11a4ae8be35ec24af8b2628b8759a61457b76", size = 2313800, upload-time = "2025-12-09T13:37:08.713Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/ab/7cf1f5204e1366ddf9dc5cdc2789b571feb9eebcee0e3463c3f457df5f52/fonttools-4.60.2-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9387c532acbe323bbf2a920f132bce3c408a609d5f9dcfc6532fbc7e37f8ccbb", size = 2841690, upload-time = "2025-12-09T13:37:10.696Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/3c/0bf83c6f863cc8b934952567fa2bf737cfcec8fc4ffb59b3f93820095f89/fonttools-4.60.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e6f1c824185b5b8fb681297f315f26ae55abb0d560c2579242feea8236b1cfef", size = 2392191, upload-time = "2025-12-09T13:37:12.954Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/f0/40090d148b8907fbea12e9bdf1ff149f30cdf1769e3b2c3e0dbf5106b88d/fonttools-4.60.2-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:55a3129d1e4030b1a30260f1b32fe76781b585fb2111d04a988e141c09eb6403", size = 4873503, upload-time = "2025-12-09T13:37:15.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/e0/d8b13f99e58b8c293781288ba62fe634f1f0697c9c4c0ae104d3215f3a10/fonttools-4.60.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b196e63753abc33b3b97a6fd6de4b7c4fef5552c0a5ba5e562be214d1e9668e0", size = 4968493, upload-time = "2025-12-09T13:37:18.272Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/c5/960764d12c92bc225f02401d3067048cb7b282293d9e48e39fe2b0ec38a9/fonttools-4.60.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:de76c8d740fb55745f3b154f0470c56db92ae3be27af8ad6c2e88f1458260c9a", size = 4920015, upload-time = "2025-12-09T13:37:20.334Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/ab/839d8caf253d1eef3653ef4d34427d0326d17a53efaec9eb04056b670fff/fonttools-4.60.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6ba6303225c95998c9fda2d410aa792c3d2c1390a09df58d194b03e17583fa25", size = 5031165, upload-time = "2025-12-09T13:37:23.57Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/bf/3bc862796a6841cbe0725bb5512d272239b809dba631a4b0301df885e62d/fonttools-4.60.2-cp314-cp314-win32.whl", hash = "sha256:0a89728ce10d7c816fedaa5380c06d2793e7a8a634d7ce16810e536c22047384", size = 2267526, upload-time = "2025-12-09T13:37:25.821Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/a1/c1909cacf00c76dc37b4743451561fbaaf7db4172c22a6d9394081d114c3/fonttools-4.60.2-cp314-cp314-win_amd64.whl", hash = "sha256:fa8446e6ab8bd778b82cb1077058a2addba86f30de27ab9cc18ed32b34bc8667", size = 2319096, upload-time = "2025-12-09T13:37:28.058Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/b3/f66e71433f08e3a931b2b31a665aeed17fcc5e6911fc73529c70a232e421/fonttools-4.60.2-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:4063bc81ac5a4137642865cb63dd270e37b3cd1f55a07c0d6e41d072699ccca2", size = 2925167, upload-time = "2025-12-09T13:37:30.348Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/13/eeb491ff743594bbd0bee6e49422c03a59fe9c49002d3cc60eeb77414285/fonttools-4.60.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:ebfdb66fa69732ed604ab8e2a0431e6deff35e933a11d73418cbc7823d03b8e1", size = 2430923, upload-time = "2025-12-09T13:37:32.817Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/e5/db609f785e460796e53c4dbc3874a5f4948477f27beceb5e2d24b2537666/fonttools-4.60.2-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50b10b3b1a72d1d54c61b0e59239e1a94c0958f4a06a1febf97ce75388dd91a4", size = 4877729, upload-time = "2025-12-09T13:37:35.858Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5f/d6/85e4484dd4bfb03fee7bd370d65888cccbd3dee2681ee48c869dd5ccb23f/fonttools-4.60.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:beae16891a13b4a2ddec9b39b4de76092a3025e4d1c82362e3042b62295d5e4d", size = 5096003, upload-time = "2025-12-09T13:37:37.862Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/49/1a98e44b71030b83d2046f981373b80571868259d98e6dae7bc20099dac6/fonttools-4.60.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:522f017fdb3766fd5d2d321774ef351cc6ce88ad4e6ac9efe643e4a2b9d528db", size = 4974410, upload-time = "2025-12-09T13:37:40.166Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/07/d6f775d950ee8a841012472c7303f8819423d8cc3b4530915de7265ebfa2/fonttools-4.60.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:82cceceaf9c09a965a75b84a4b240dd3768e596ffb65ef53852681606fe7c9ba", size = 5002036, upload-time = "2025-12-09T13:37:42.639Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/f6/ba6458f83ce1a9f8c3b17bd8f7b8a2205a126aac1055796b7e7cfebbd38f/fonttools-4.60.2-cp314-cp314t-win32.whl", hash = "sha256:bbfbc918a75437fe7e6d64d1b1e1f713237df1cf00f3a36dedae910b2ba01cee", size = 2330985, upload-time = "2025-12-09T13:37:45.157Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/91/24/fea0ba4d3a32d4ed1103a1098bfd99dc78b5fe3bb97202920744a37b73dc/fonttools-4.60.2-cp314-cp314t-win_amd64.whl", hash = "sha256:0e5cd9b0830f6550d58c84f3ab151a9892b50c4f9d538c5603c0ce6fff2eb3f1", size = 2396226, upload-time = "2025-12-09T13:37:47.355Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/6c/10280af05b44fafd1dff69422805061fa1af29270bc52dce031ac69540bf/fonttools-4.60.2-py3-none-any.whl", hash = "sha256:73cf92eeda67cf6ff10c8af56fc8f4f07c1647d989a979be9e388a49be26552a", size = 1144610, upload-time = "2025-12-09T13:38:09.5Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -679,27 +726,30 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/44/69/9b804adb5fd0671f367781560eb5eb586c4d495277c93bde4307b9e28068/greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd", size = 274079, upload-time = "2025-08-07T13:15:45.033Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/e9/d2a80c99f19a153eff70bc451ab78615583b8dac0754cfb942223d2c1a0d/greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb", size = 640997, upload-time = "2025-08-07T13:42:56.234Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/16/035dcfcc48715ccd345f3a93183267167cdd162ad123cd93067d86f27ce4/greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968", size = 655185, upload-time = "2025-08-07T13:45:27.624Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/da/0386695eef69ffae1ad726881571dfe28b41970173947e7c558d9998de0f/greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9", size = 649926, upload-time = "2025-08-07T13:53:15.251Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/68/88/69bf19fd4dc19981928ceacbc5fd4bb6bc2215d53199e367832e98d1d8fe/greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6", size = 651839, upload-time = "2025-08-07T13:18:30.281Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/45/80935968b53cfd3f33cf99ea5f08227f2646e044568c9b1555b58ffd61c2/greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0", size = 1564846, upload-time = "2025-11-04T12:42:15.191Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/02/b7c30e5e04752cb4db6202a3858b149c0710e5453b71a3b2aec5d78a1aab/greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d", size = 1633814, upload-time = "2025-11-04T12:42:17.175Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/e8/58c7f85958bda41dafea50497cbd59738c5c43dbbea5ee83d651234398f4/greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31", size = 272814, upload-time = "2025-08-07T13:15:50.011Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/dd/b9f59862e9e257a16e4e610480cfffd29e3fae018a68c2332090b53aac3d/greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945", size = 641073, upload-time = "2025-08-07T13:42:57.23Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/0b/bc13f787394920b23073ca3b6c4a7a21396301ed75a655bcb47196b50e6e/greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc", size = 655191, upload-time = "2025-08-07T13:45:29.752Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/d6/6adde57d1345a8d0f14d31e4ab9c23cfe8e2cd39c3baf7674b4b0338d266/greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a", size = 649516, upload-time = "2025-08-07T13:53:16.314Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/3b/3a3328a788d4a473889a2d403199932be55b1b0060f4ddd96ee7cdfcad10/greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504", size = 652169, upload-time = "2025-08-07T13:18:32.861Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/43/3cecdc0349359e1a527cbf2e3e28e5f8f06d3343aaf82ca13437a9aa290f/greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671", size = 610497, upload-time = "2025-08-07T13:18:31.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/19/06b6cf5d604e2c382a6f31cafafd6f33d5dea706f4db7bdab184bad2b21d/greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b", size = 1121662, upload-time = "2025-08-07T13:42:41.117Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/15/0d5e4e1a66fab130d98168fe984c509249c833c1a3c16806b90f253ce7b9/greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae", size = 1149210, upload-time = "2025-08-07T13:18:24.072Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/53/f9c440463b3057485b8594d7a638bed53ba531165ef0ca0e6c364b5cc807/greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b", size = 1564759, upload-time = "2025-11-04T12:42:19.395Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/e4/3bb4240abdd0a8d23f4f88adec746a3099f0d86bfedb623f063b2e3b4df0/greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929", size = 1634288, upload-time = "2025-11-04T12:42:21.174Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/55/2321e43595e6801e105fcfdee02b34c0f996eb71e6ddffca6b10b7e1d771/greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b", size = 299685, upload-time = "2025-08-07T13:24:38.824Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/5c/85273fd7cc388285632b0498dbbab97596e04b154933dfe0f3e68156c68c/greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0", size = 273586, upload-time = "2025-08-07T13:16:08.004Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/75/10aeeaa3da9332c2e761e4c50d4c3556c21113ee3f0afa2cf5769946f7a3/greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f", size = 686346, upload-time = "2025-08-07T13:42:59.944Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/aa/687d6b12ffb505a4447567d1f3abea23bd20e73a5bed63871178e0831b7a/greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5", size = 699218, upload-time = "2025-08-07T13:45:30.969Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/8b/29aae55436521f1d6f8ff4e12fb676f3400de7fcf27fccd1d4d17fd8fecd/greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1", size = 694659, upload-time = "2025-08-07T13:53:17.759Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/2e/ea25914b1ebfde93b6fc4ff46d6864564fba59024e928bdc7de475affc25/greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735", size = 695355, upload-time = "2025-08-07T13:18:34.517Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/60/fc56c62046ec17f6b0d3060564562c64c862948c9d4bc8aa807cf5bd74f4/greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337", size = 657512, upload-time = "2025-08-07T13:18:33.969Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/6e/74407aed965a4ab6ddd93a7ded3180b730d281c77b765788419484cdfeef/greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269", size = 1612508, upload-time = "2025-11-04T12:42:23.427Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/da/343cd760ab2f92bac1845ca07ee3faea9fe52bee65f7bcb19f16ad7de08b/greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681", size = 1680760, upload-time = "2025-11-04T12:42:25.341Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/a5/6ddab2b4c112be95601c13428db1d8b6608a8b6039816f2ba09c346c08fc/greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01", size = 303425, upload-time = "2025-08-07T13:32:27.59Z" },
|
||||
]
|
||||
|
||||
@@ -1519,68 +1569,71 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "11.3.0"
|
||||
version = "12.1.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/d0/bebb3ffbf31c5a8e97241476c4cf8b9828954693ce6744b4a2326af3e16b/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af", size = 4062652, upload-time = "2026-02-11T04:21:53.19Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/c0/0e16fb0addda4851445c28f8350d8c512f09de27bbb0d6d0bbf8b6709605/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f", size = 4138823, upload-time = "2026-02-11T04:22:03.088Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/fb/6170ec655d6f6bb6630a013dd7cf7bc218423d7b5fa9071bf63dc32175ae/pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642", size = 3601143, upload-time = "2026-02-11T04:22:04.909Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/04/dc5c3f297510ba9a6837cbb318b87dd2b8f73eb41a43cc63767f65cb599c/pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd", size = 5266254, upload-time = "2026-02-11T04:22:07.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/30/5db1236b0d6313f03ebf97f5e17cda9ca060f524b2fcc875149a8360b21c/pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202", size = 4657499, upload-time = "2026-02-11T04:22:09.613Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/18/008d2ca0eb612e81968e8be0bbae5051efba24d52debf930126d7eaacbba/pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f", size = 6232137, upload-time = "2026-02-11T04:22:11.434Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/f1/f14d5b8eeb4b2cd62b9f9f847eb6605f103df89ef619ac68f92f748614ea/pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f", size = 8042721, upload-time = "2026-02-11T04:22:13.321Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/d6/17824509146e4babbdabf04d8171491fa9d776f7061ff6e727522df9bd03/pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f", size = 6347798, upload-time = "2026-02-11T04:22:15.449Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/ee/c85a38a9ab92037a75615aba572c85ea51e605265036e00c5b67dfafbfe2/pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e", size = 7039315, upload-time = "2026-02-11T04:22:17.24Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/f3/bc8ccc6e08a148290d7523bde4d9a0d6c981db34631390dc6e6ec34cacf6/pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0", size = 6462360, upload-time = "2026-02-11T04:22:19.111Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/ab/69a42656adb1d0665ab051eec58a41f169ad295cf81ad45406963105408f/pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb", size = 7165438, upload-time = "2026-02-11T04:22:21.041Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/46/81f7aa8941873f0f01d4b55cc543b0a3d03ec2ee30d617a0448bf6bd6dec/pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f", size = 6431503, upload-time = "2026-02-11T04:22:22.833Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/72/4c245f7d1044b67affc7f134a09ea619d4895333d35322b775b928180044/pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15", size = 7176748, upload-time = "2026-02-11T04:22:24.64Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/ad/8a87bdbe038c5c698736e3348af5c2194ffb872ea52f11894c95f9305435/pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f", size = 2544314, upload-time = "2026-02-11T04:22:26.685Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/9d/efd18493f9de13b87ede7c47e69184b9e859e4427225ea962e32e56a49bc/pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8", size = 5268612, upload-time = "2026-02-11T04:22:29.884Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/f1/4f42eb2b388eb2ffc660dcb7f7b556c1015c53ebd5f7f754965ef997585b/pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9", size = 4660567, upload-time = "2026-02-11T04:22:31.799Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/54/df6ef130fa43e4b82e32624a7b821a2be1c5653a5fdad8469687a7db4e00/pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60", size = 6269951, upload-time = "2026-02-11T04:22:33.921Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/48/618752d06cc44bb4aae8ce0cd4e6426871929ed7b46215638088270d9b34/pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7", size = 8074769, upload-time = "2026-02-11T04:22:35.877Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/bd/f1d71eb39a72fa088d938655afba3e00b38018d052752f435838961127d8/pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f", size = 6381358, upload-time = "2026-02-11T04:22:37.698Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/ef/c784e20b96674ed36a5af839305f55616f8b4f8aa8eeccf8531a6e312243/pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586", size = 7068558, upload-time = "2026-02-11T04:22:39.597Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/cb/8059688b74422ae61278202c4e1ad992e8a2e7375227be0a21c6b87ca8d5/pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce", size = 6493028, upload-time = "2026-02-11T04:22:42.73Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/da/e3c008ed7d2dd1f905b15949325934510b9d1931e5df999bb15972756818/pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8", size = 7191940, upload-time = "2026-02-11T04:22:44.543Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/4a/9202e8d11714c1fc5951f2e1ef362f2d7fbc595e1f6717971d5dd750e969/pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36", size = 6438736, upload-time = "2026-02-11T04:22:46.347Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/ca/cbce2327eb9885476b3957b2e82eb12c866a8b16ad77392864ad601022ce/pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b", size = 7182894, upload-time = "2026-02-11T04:22:48.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/d2/de599c95ba0a973b94410477f8bf0b6f0b5e67360eb89bcb1ad365258beb/pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334", size = 2546446, upload-time = "2026-02-11T04:22:50.342Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1674,21 +1727,22 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "6.32.0"
|
||||
version = "6.33.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c0/df/fb4a8eeea482eca989b51cffd274aac2ee24e825f0bf3cbce5281fa1567b/protobuf-6.32.0.tar.gz", hash = "sha256:a81439049127067fc49ec1d36e25c6ee1d1a2b7be930675f919258d03c04e7d2", size = 440614, upload-time = "2025-08-14T21:21:25.015Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/25/7c72c307aafc96fa87062aa6291d9f7c94836e43214d43722e86037aac02/protobuf-6.33.5.tar.gz", hash = "sha256:6ddcac2a081f8b7b9642c09406bc6a4290128fce5f471cddd165960bb9119e5c", size = 444465, upload-time = "2026-01-29T21:51:33.494Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/33/18/df8c87da2e47f4f1dcc5153a81cd6bca4e429803f4069a299e236e4dd510/protobuf-6.32.0-cp310-abi3-win32.whl", hash = "sha256:84f9e3c1ff6fb0308dbacb0950d8aa90694b0d0ee68e75719cb044b7078fe741", size = 424409, upload-time = "2025-08-14T21:21:12.366Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/59/0a820b7310f8139bd8d5a9388e6a38e1786d179d6f33998448609296c229/protobuf-6.32.0-cp310-abi3-win_amd64.whl", hash = "sha256:a8bdbb2f009cfc22a36d031f22a625a38b615b5e19e558a7b756b3279723e68e", size = 435735, upload-time = "2025-08-14T21:21:15.046Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/5b/0d421533c59c789e9c9894683efac582c06246bf24bb26b753b149bd88e4/protobuf-6.32.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d52691e5bee6c860fff9a1c86ad26a13afbeb4b168cd4445c922b7e2cf85aaf0", size = 426449, upload-time = "2025-08-14T21:21:16.687Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/7b/607764ebe6c7a23dcee06e054fd1de3d5841b7648a90fd6def9a3bb58c5e/protobuf-6.32.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:501fe6372fd1c8ea2a30b4d9be8f87955a64d6be9c88a973996cef5ef6f0abf1", size = 322869, upload-time = "2025-08-14T21:21:18.282Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/01/2e730bd1c25392fc32e3268e02446f0d77cb51a2c3a8486b1798e34d5805/protobuf-6.32.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:75a2aab2bd1aeb1f5dc7c5f33bcb11d82ea8c055c9becbb41c26a8c43fd7092c", size = 322009, upload-time = "2025-08-14T21:21:19.893Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/f2/80ffc4677aac1bc3519b26bc7f7f5de7fce0ee2f7e36e59e27d8beb32dd1/protobuf-6.32.0-py3-none-any.whl", hash = "sha256:ba377e5b67b908c8f3072a57b63e2c6a4cbd18aea4ed98d2584350dbf46f2783", size = 169287, upload-time = "2025-08-14T21:21:23.515Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/79/af92d0a8369732b027e6d6084251dd8e782c685c72da161bd4a2e00fbabb/protobuf-6.33.5-cp310-abi3-win32.whl", hash = "sha256:d71b040839446bac0f4d162e758bea99c8251161dae9d0983a3b88dee345153b", size = 425769, upload-time = "2026-01-29T21:51:21.751Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/75/bb9bc917d10e9ee13dee8607eb9ab963b7cf8be607c46e7862c748aa2af7/protobuf-6.33.5-cp310-abi3-win_amd64.whl", hash = "sha256:3093804752167bcab3998bec9f1048baae6e29505adaf1afd14a37bddede533c", size = 437118, upload-time = "2026-01-29T21:51:24.022Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/6b/e48dfc1191bc5b52950246275bf4089773e91cb5ba3592621723cdddca62/protobuf-6.33.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:a5cb85982d95d906df1e2210e58f8e4f1e3cdc088e52c921a041f9c9a0386de5", size = 427766, upload-time = "2026-01-29T21:51:25.413Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/b1/c79468184310de09d75095ed1314b839eb2f72df71097db9d1404a1b2717/protobuf-6.33.5-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:9b71e0281f36f179d00cbcb119cb19dec4d14a81393e5ea220f64b286173e190", size = 324638, upload-time = "2026-01-29T21:51:26.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/f5/65d838092fd01c44d16037953fd4c2cc851e783de9b8f02b27ec4ffd906f/protobuf-6.33.5-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8afa18e1d6d20af15b417e728e9f60f3aa108ee76f23c3b2c07a2c3b546d3afd", size = 339411, upload-time = "2026-01-29T21:51:27.446Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/53/a9443aa3ca9ba8724fdfa02dd1887c1bcd8e89556b715cfbacca6b63dbec/protobuf-6.33.5-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:cbf16ba3350fb7b889fca858fb215967792dc125b35c7976ca4818bee3521cf0", size = 323465, upload-time = "2026-01-29T21:51:28.925Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/bf/2086963c69bdac3d7cff1cc7ff79b8ce5ea0bec6797a017e1be338a46248/protobuf-6.33.5-py3-none-any.whl", hash = "sha256:69915a973dd0f60f31a08b8318b73eab2bd6a392c79184b3612226b0a3f8ec02", size = 170687, upload-time = "2026-01-29T21:51:32.557Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pyannote-audio"
|
||||
version = "3.1.0"
|
||||
version = "3.4.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "asteroid-filterbanks" },
|
||||
@@ -1711,9 +1765,9 @@ dependencies = [
|
||||
{ name = "torchaudio" },
|
||||
{ name = "torchmetrics" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ad/55/7253267c35e2aa9188b1d86cba121eb5bdd91ed12d3194488625a008cae7/pyannote.audio-3.1.0.tar.gz", hash = "sha256:da04705443d3b74607e034d3ca88f8b572c7e9672dd9a4199cab65a0dbc33fad", size = 14812058, upload-time = "2023-11-16T12:26:38.939Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ec/1e/efe9619c38f1281ddf21640654d8ea9e3f67c459b76f78657b26d8557bbe/pyannote_audio-3.4.0.tar.gz", hash = "sha256:d523d883cb8d37cb6daf99f3ba83f9138bb193646ad71e6eae7deb89d8ddd642", size = 804850, upload-time = "2025-09-09T07:04:51.17Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/37/158859ce4c45b5ba2dca40b53b0c10d36f935b7f6d4e737298397167c8b1/pyannote.audio-3.1.0-py2.py3-none-any.whl", hash = "sha256:66ab485728c6e141760e80555cb7a083e7be824cd528cc79b9e6f7d6421a91ae", size = 208592, upload-time = "2023-11-16T12:26:36.726Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/13/620c6f711b723653092fd063bfee82a6af5ea3a4d3c42efc53ce623a7f4d/pyannote_audio-3.4.0-py2.py3-none-any.whl", hash = "sha256:36e38f058059f46da3478dda581cda53d9d85a21173a3e70bbdbc3ba93b5e1b7", size = 897789, upload-time = "2025-09-09T07:04:49.464Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1857,6 +1911,33 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-extra-types"
|
||||
version = "2.11.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pydantic" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fd/35/2fee58b1316a73e025728583d3b1447218a97e621933fc776fb8c0f2ebdd/pydantic_extra_types-2.11.0.tar.gz", hash = "sha256:4e9991959d045b75feb775683437a97991d02c138e00b59176571db9ce634f0e", size = 157226, upload-time = "2025-12-31T16:18:27.944Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/17/fabd56da47096d240dd45ba627bead0333b0cf0ee8ada9bec579287dadf3/pydantic_extra_types-2.11.0-py3-none-any.whl", hash = "sha256:84b864d250a0fc62535b7ec591e36f2c5b4d1325fa0017eb8cda9aeb63b374a6", size = 74296, upload-time = "2025-12-31T16:18:26.38Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-settings"
|
||||
version = "2.13.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pydantic" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "typing-inspection" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/52/6d/fffca34caecc4a3f97bda81b2098da5e8ab7efc9a66e819074a11955d87e/pydantic_settings-2.13.1.tar.gz", hash = "sha256:b4c11847b15237fb0171e1462bf540e294affb9b86db4d9aa5c01730bdbe4025", size = 223826, upload-time = "2026-02-19T13:45:08.055Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/00/4b/ccc026168948fec4f7555b9164c724cf4125eac006e176541483d2c959be/pydantic_settings-2.13.1-py3-none-any.whl", hash = "sha256:d56fd801823dbeae7f0975e1f8c8e25c258eb75d278ea7abb5d9cebb01b56237", size = 58929, upload-time = "2026-02-19T13:45:06.034Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.19.2"
|
||||
@@ -1907,11 +1988,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "python-multipart"
|
||||
version = "0.0.20"
|
||||
version = "0.0.22"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/94/01/979e98d542a70714b0cb2b6728ed0b7c46792b695e3eaec3e20711271ca3/python_multipart-0.0.22.tar.gz", hash = "sha256:7340bef99a7e0032613f56dc36027b959fd3b30a787ed62d310e951f7c3a3a58", size = 37612, upload-time = "2026-01-25T10:15:56.219Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/d0/397f9626e711ff749a95d96b7af99b9c566a9bb5129b8e4c10fc4d100304/python_multipart-0.0.22-py3-none-any.whl", hash = "sha256:2b2cd894c83d21bf49d702499531c7bafd057d730c201782048f7945d82de155", size = 24579, upload-time = "2026-01-25T10:15:54.811Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1988,11 +2069,13 @@ name = "reflector-gpu"
|
||||
version = "0.1.0"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "av" },
|
||||
{ name = "fastapi", extra = ["standard"] },
|
||||
{ name = "faster-whisper" },
|
||||
{ name = "librosa" },
|
||||
{ name = "numpy" },
|
||||
{ name = "pyannote-audio" },
|
||||
{ name = "pytorch-lightning" },
|
||||
{ name = "sentencepiece" },
|
||||
{ name = "silero-vad" },
|
||||
{ name = "torch" },
|
||||
@@ -2003,13 +2086,15 @@ dependencies = [
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "av", specifier = ">=13.1.0" },
|
||||
{ name = "fastapi", extras = ["standard"], specifier = ">=0.116.1" },
|
||||
{ name = "faster-whisper", specifier = ">=1.1.0" },
|
||||
{ name = "librosa", specifier = "==0.10.1" },
|
||||
{ name = "numpy", specifier = "<2" },
|
||||
{ name = "pyannote-audio", specifier = "==3.1.0" },
|
||||
{ name = "pyannote-audio", specifier = "==3.4.0" },
|
||||
{ name = "pytorch-lightning", specifier = "<2.6" },
|
||||
{ name = "sentencepiece" },
|
||||
{ name = "silero-vad", specifier = "==5.1.0" },
|
||||
{ name = "silero-vad", specifier = "==5.1.2" },
|
||||
{ name = "torch", specifier = ">=2.3.0" },
|
||||
{ name = "torchaudio", specifier = ">=2.3.0" },
|
||||
{ name = "transformers", specifier = ">=4.35.0" },
|
||||
@@ -2389,16 +2474,16 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "silero-vad"
|
||||
version = "5.1"
|
||||
version = "5.1.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "onnxruntime" },
|
||||
{ name = "torch" },
|
||||
{ name = "torchaudio" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7c/5d/b912e45d21b8b61859a552554893222d2cdebfd0f9afa7e8ba69c7a3441a/silero_vad-5.1.tar.gz", hash = "sha256:c644275ba5df06cee596cc050ba0bd1e0f5237d1abfa44d58dd4618f6e77434d", size = 3996829, upload-time = "2024-07-09T13:19:24.181Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b1/b4/d0311b2e6220a11f8f4699f4a278cb088131573286cdfe804c87c7eb5123/silero_vad-5.1.2.tar.gz", hash = "sha256:c442971160026d2d7aa0ad83f0c7ee86c89797a65289fe625c8ea59fc6fb828d", size = 5098526, upload-time = "2024-10-09T09:50:47.019Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/be/0fdbc72030b93d6f55107490d5d2185ddf0dbabdc921f589649d3e92ccd5/silero_vad-5.1-py3-none-any.whl", hash = "sha256:ecb50b484f538f7a962ce5cd3c07120d9db7b9d5a0c5861ccafe459856f22c8f", size = 3939986, upload-time = "2024-07-09T13:19:21.383Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/f7/5ae11d13fbb733cd3bfd7ff1c3a3902e6f55437df4b72307c1f168146268/silero_vad-5.1.2-py3-none-any.whl", hash = "sha256:93b41953d7774b165407fda6b533c119c5803864e367d5034dc626c82cfdf661", size = 5026737, upload-time = "2024-10-09T09:50:44.355Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2515,15 +2600,15 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "starlette"
|
||||
version = "0.47.3"
|
||||
version = "0.49.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/15/b9/cc3017f9a9c9b6e27c5106cc10cc7904653c3eec0729793aec10479dd669/starlette-0.47.3.tar.gz", hash = "sha256:6bc94f839cc176c4858894f1f8908f0ab79dfec1a6b8402f6da9be26ebea52e9", size = 2584144, upload-time = "2025-08-24T13:36:42.122Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1b/3f/507c21db33b66fb027a332f2cb3abbbe924cc3a79ced12f01ed8645955c9/starlette-0.49.1.tar.gz", hash = "sha256:481a43b71e24ed8c43b11ea02f5353d77840e01480881b8cb5a26b8cae64a8cb", size = 2654703, upload-time = "2025-10-28T17:34:10.928Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/fd/901cfa59aaa5b30a99e16876f11abe38b59a1a2c51ffb3d7142bb6089069/starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51", size = 72991, upload-time = "2025-08-24T13:36:40.887Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/da/545b75d420bb23b5d494b0517757b351963e974e79933f01e05c929f20a6/starlette-0.49.1-py3-none-any.whl", hash = "sha256:d92ce9f07e4a3caa3ac13a79523bd18e3bc0042bb8ff2d759a8e7dd0e1859875", size = 74175, upload-time = "2025-10-28T17:34:09.13Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2777,14 +2862,14 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "typing-inspection"
|
||||
version = "0.4.1"
|
||||
version = "0.4.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2798,11 +2883,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.5.0"
|
||||
version = "2.6.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -4,11 +4,12 @@
|
||||
# Single script to configure and launch everything on one server.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/setup-selfhosted.sh <--gpu|--cpu> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--password PASSWORD] [--build]
|
||||
# ./scripts/setup-selfhosted.sh <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--password PASSWORD] [--build]
|
||||
#
|
||||
# Specialized models (pick ONE — required):
|
||||
# --gpu NVIDIA GPU for transcription/diarization/translation
|
||||
# --cpu CPU-only for transcription/diarization/translation (slower)
|
||||
# ML processing modes (pick ONE — required):
|
||||
# --gpu NVIDIA GPU container for transcription/diarization/translation
|
||||
# --cpu In-process CPU processing (no ML container, slower)
|
||||
# --hosted Remote GPU service URL (no ML container)
|
||||
#
|
||||
# Local LLM (optional — for summarization & topic detection):
|
||||
# --ollama-gpu Local Ollama with NVIDIA GPU acceleration
|
||||
@@ -29,11 +30,16 @@
|
||||
# ./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
# ./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --domain reflector.example.com
|
||||
# ./scripts/setup-selfhosted.sh --cpu --ollama-cpu --garage --caddy
|
||||
# ./scripts/setup-selfhosted.sh --hosted --garage --caddy
|
||||
# ./scripts/setup-selfhosted.sh --gpu --ollama-gpu --llm-model mistral --garage --caddy
|
||||
# ./scripts/setup-selfhosted.sh --gpu --garage --caddy --password mysecretpass
|
||||
# ./scripts/setup-selfhosted.sh --gpu --garage --caddy
|
||||
# ./scripts/setup-selfhosted.sh --cpu
|
||||
#
|
||||
# The script auto-detects Daily.co (DAILY_API_KEY) and Whereby (WHEREBY_API_KEY)
|
||||
# from server/.env. If Daily.co is configured, Hatchet workflow services are
|
||||
# started automatically for multitrack recording processing.
|
||||
#
|
||||
# Idempotent — safe to re-run at any time.
|
||||
#
|
||||
set -euo pipefail
|
||||
@@ -179,11 +185,14 @@ for i in "${!ARGS[@]}"; do
|
||||
arg="${ARGS[$i]}"
|
||||
case "$arg" in
|
||||
--gpu)
|
||||
[[ -n "$MODEL_MODE" ]] && { err "Cannot combine --gpu and --cpu. Pick one."; exit 1; }
|
||||
[[ -n "$MODEL_MODE" ]] && { err "Cannot combine --gpu, --cpu, and --hosted. Pick one."; exit 1; }
|
||||
MODEL_MODE="gpu" ;;
|
||||
--cpu)
|
||||
[[ -n "$MODEL_MODE" ]] && { err "Cannot combine --gpu and --cpu. Pick one."; exit 1; }
|
||||
[[ -n "$MODEL_MODE" ]] && { err "Cannot combine --gpu, --cpu, and --hosted. Pick one."; exit 1; }
|
||||
MODEL_MODE="cpu" ;;
|
||||
--hosted)
|
||||
[[ -n "$MODEL_MODE" ]] && { err "Cannot combine --gpu, --cpu, and --hosted. Pick one."; exit 1; }
|
||||
MODEL_MODE="hosted" ;;
|
||||
--ollama-gpu)
|
||||
[[ -n "$OLLAMA_MODE" ]] && { err "Cannot combine --ollama-gpu and --ollama-cpu. Pick one."; exit 1; }
|
||||
OLLAMA_MODE="ollama-gpu" ;;
|
||||
@@ -220,20 +229,21 @@ for i in "${!ARGS[@]}"; do
|
||||
SKIP_NEXT=true ;;
|
||||
*)
|
||||
err "Unknown argument: $arg"
|
||||
err "Usage: $0 <--gpu|--cpu> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--password PASS] [--build]"
|
||||
err "Usage: $0 <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--password PASS] [--build]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$MODEL_MODE" ]]; then
|
||||
err "No model mode specified. You must choose --gpu or --cpu."
|
||||
err "No model mode specified. You must choose --gpu, --cpu, or --hosted."
|
||||
err ""
|
||||
err "Usage: $0 <--gpu|--cpu> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--password PASS] [--build]"
|
||||
err "Usage: $0 <--gpu|--cpu|--hosted> [--ollama-gpu|--ollama-cpu] [--llm-model MODEL] [--garage] [--caddy] [--domain DOMAIN] [--password PASS] [--build]"
|
||||
err ""
|
||||
err "Specialized models (required):"
|
||||
err " --gpu NVIDIA GPU for transcription/diarization/translation"
|
||||
err " --cpu CPU-only (slower but works without GPU)"
|
||||
err "ML processing modes (required):"
|
||||
err " --gpu NVIDIA GPU container for transcription/diarization/translation"
|
||||
err " --cpu In-process CPU processing (no ML container, slower)"
|
||||
err " --hosted Remote GPU service URL (no ML container)"
|
||||
err ""
|
||||
err "Local LLM (optional):"
|
||||
err " --ollama-gpu Local Ollama with GPU (for summarization/topics)"
|
||||
@@ -251,7 +261,9 @@ if [[ -z "$MODEL_MODE" ]]; then
|
||||
fi
|
||||
|
||||
# Build profiles list — one profile per feature
|
||||
COMPOSE_PROFILES=("$MODEL_MODE")
|
||||
# Only --gpu needs a compose profile; --cpu and --hosted use in-process/remote backends
|
||||
COMPOSE_PROFILES=()
|
||||
[[ "$MODEL_MODE" == "gpu" ]] && COMPOSE_PROFILES+=("gpu")
|
||||
[[ -n "$OLLAMA_MODE" ]] && COMPOSE_PROFILES+=("$OLLAMA_MODE")
|
||||
[[ "$USE_GARAGE" == "true" ]] && COMPOSE_PROFILES+=("garage")
|
||||
[[ "$USE_CADDY" == "true" ]] && COMPOSE_PROFILES+=("caddy")
|
||||
@@ -418,39 +430,102 @@ step_server_env() {
|
||||
env_set "$SERVER_ENV" "WEBRTC_HOST" "$PRIMARY_IP"
|
||||
fi
|
||||
|
||||
# Specialized models (always via gpu/cpu container aliased as "transcription")
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_URL" "http://transcription:8000"
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_MODAL_API_KEY" "selfhosted"
|
||||
# Specialized models — backend configuration per mode
|
||||
env_set "$SERVER_ENV" "DIARIZATION_ENABLED" "true"
|
||||
env_set "$SERVER_ENV" "DIARIZATION_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "DIARIZATION_URL" "http://transcription:8000"
|
||||
env_set "$SERVER_ENV" "TRANSLATION_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "TRANSLATE_URL" "http://transcription:8000"
|
||||
case "$MODEL_MODE" in
|
||||
gpu)
|
||||
# GPU container aliased as "transcription" on docker network
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_URL" "http://transcription:8000"
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_MODAL_API_KEY" "selfhosted"
|
||||
env_set "$SERVER_ENV" "DIARIZATION_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "DIARIZATION_URL" "http://transcription:8000"
|
||||
env_set "$SERVER_ENV" "TRANSLATION_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "TRANSLATE_URL" "http://transcription:8000"
|
||||
env_set "$SERVER_ENV" "PADDING_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "PADDING_URL" "http://transcription:8000"
|
||||
ok "ML backends: GPU container (modal)"
|
||||
;;
|
||||
cpu)
|
||||
# In-process backends — no ML service container needed
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_BACKEND" "whisper"
|
||||
env_set "$SERVER_ENV" "DIARIZATION_BACKEND" "pyannote"
|
||||
env_set "$SERVER_ENV" "TRANSLATION_BACKEND" "marian"
|
||||
env_set "$SERVER_ENV" "PADDING_BACKEND" "pyav"
|
||||
ok "ML backends: in-process CPU (whisper/pyannote/marian/pyav)"
|
||||
;;
|
||||
hosted)
|
||||
# Remote GPU service — user provides URL
|
||||
local gpu_url=""
|
||||
if env_has_key "$SERVER_ENV" "TRANSCRIPT_URL"; then
|
||||
gpu_url=$(env_get "$SERVER_ENV" "TRANSCRIPT_URL")
|
||||
fi
|
||||
if [[ -z "$gpu_url" ]] && [[ -t 0 ]]; then
|
||||
echo ""
|
||||
info "Enter the URL of your remote GPU service (e.g. https://gpu.example.com)"
|
||||
read -rp " GPU service URL: " gpu_url
|
||||
fi
|
||||
if [[ -z "$gpu_url" ]]; then
|
||||
err "GPU service URL required for --hosted mode."
|
||||
err "Set TRANSCRIPT_URL in server/.env or provide it interactively."
|
||||
exit 1
|
||||
fi
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_URL" "$gpu_url"
|
||||
env_set "$SERVER_ENV" "DIARIZATION_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "DIARIZATION_URL" "$gpu_url"
|
||||
env_set "$SERVER_ENV" "TRANSLATION_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "TRANSLATE_URL" "$gpu_url"
|
||||
env_set "$SERVER_ENV" "PADDING_BACKEND" "modal"
|
||||
env_set "$SERVER_ENV" "PADDING_URL" "$gpu_url"
|
||||
# API key for remote service
|
||||
local gpu_api_key=""
|
||||
if env_has_key "$SERVER_ENV" "TRANSCRIPT_MODAL_API_KEY"; then
|
||||
gpu_api_key=$(env_get "$SERVER_ENV" "TRANSCRIPT_MODAL_API_KEY")
|
||||
fi
|
||||
if [[ -z "$gpu_api_key" ]] && [[ -t 0 ]]; then
|
||||
read -rp " GPU service API key (or Enter to skip): " gpu_api_key
|
||||
fi
|
||||
if [[ -n "$gpu_api_key" ]]; then
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_MODAL_API_KEY" "$gpu_api_key"
|
||||
fi
|
||||
ok "ML backends: remote hosted ($gpu_url)"
|
||||
;;
|
||||
esac
|
||||
|
||||
# HuggingFace token for gated models (pyannote diarization)
|
||||
# Written to root .env so docker compose picks it up for gpu/cpu containers
|
||||
local root_env="$ROOT_DIR/.env"
|
||||
local current_hf_token="${HF_TOKEN:-}"
|
||||
if [[ -f "$root_env" ]] && env_has_key "$root_env" "HF_TOKEN"; then
|
||||
current_hf_token=$(env_get "$root_env" "HF_TOKEN")
|
||||
fi
|
||||
if [[ -z "$current_hf_token" ]]; then
|
||||
echo ""
|
||||
warn "HF_TOKEN not set. Diarization will use a public model fallback."
|
||||
warn "For best results, get a token at https://huggingface.co/settings/tokens"
|
||||
warn "and accept pyannote licenses at https://huggingface.co/pyannote/speaker-diarization-3.1"
|
||||
read -rp " HuggingFace token (or press Enter to skip): " current_hf_token
|
||||
fi
|
||||
if [[ -n "$current_hf_token" ]]; then
|
||||
touch "$root_env"
|
||||
env_set "$root_env" "HF_TOKEN" "$current_hf_token"
|
||||
export HF_TOKEN="$current_hf_token"
|
||||
ok "HF_TOKEN configured"
|
||||
else
|
||||
touch "$root_env"
|
||||
env_set "$root_env" "HF_TOKEN" ""
|
||||
ok "HF_TOKEN skipped (using public model fallback)"
|
||||
# --gpu: written to root .env (docker compose passes to GPU container)
|
||||
# --cpu: written to both root .env and server/.env (in-process pyannote needs it)
|
||||
# --hosted: not needed (remote service handles its own auth)
|
||||
if [[ "$MODEL_MODE" != "hosted" ]]; then
|
||||
local root_env="$ROOT_DIR/.env"
|
||||
local current_hf_token="${HF_TOKEN:-}"
|
||||
if [[ -f "$root_env" ]] && env_has_key "$root_env" "HF_TOKEN"; then
|
||||
current_hf_token=$(env_get "$root_env" "HF_TOKEN")
|
||||
fi
|
||||
if [[ -z "$current_hf_token" ]]; then
|
||||
echo ""
|
||||
warn "HF_TOKEN not set. Diarization will use a public model fallback."
|
||||
warn "For best results, get a token at https://huggingface.co/settings/tokens"
|
||||
warn "and accept pyannote licenses at https://huggingface.co/pyannote/speaker-diarization-3.1"
|
||||
if [[ -t 0 ]]; then
|
||||
read -rp " HuggingFace token (or press Enter to skip): " current_hf_token
|
||||
fi
|
||||
fi
|
||||
if [[ -n "$current_hf_token" ]]; then
|
||||
touch "$root_env"
|
||||
env_set "$root_env" "HF_TOKEN" "$current_hf_token"
|
||||
export HF_TOKEN="$current_hf_token"
|
||||
# In CPU mode, server process needs HF_TOKEN directly
|
||||
if [[ "$MODEL_MODE" == "cpu" ]]; then
|
||||
env_set "$SERVER_ENV" "HF_TOKEN" "$current_hf_token"
|
||||
fi
|
||||
ok "HF_TOKEN configured"
|
||||
else
|
||||
touch "$root_env"
|
||||
env_set "$root_env" "HF_TOKEN" ""
|
||||
ok "HF_TOKEN skipped (using public model fallback)"
|
||||
fi
|
||||
fi
|
||||
|
||||
# LLM configuration
|
||||
@@ -466,7 +541,7 @@ step_server_env() {
|
||||
if env_has_key "$SERVER_ENV" "LLM_URL"; then
|
||||
current_llm_url=$(env_get "$SERVER_ENV" "LLM_URL")
|
||||
fi
|
||||
if [[ -z "$current_llm_url" ]] || [[ "$current_llm_url" == "http://host.docker.internal"* ]]; then
|
||||
if [[ -z "$current_llm_url" ]]; then
|
||||
warn "LLM not configured. Summarization and topic detection will NOT work."
|
||||
warn "Edit server/.env and set LLM_URL, LLM_API_KEY, LLM_MODEL"
|
||||
warn "Example: LLM_URL=https://api.openai.com/v1 LLM_MODEL=gpt-4o-mini"
|
||||
@@ -475,6 +550,20 @@ step_server_env() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# CPU mode: increase file processing timeouts (default 600s is too short for long audio on CPU)
|
||||
if [[ "$MODEL_MODE" == "cpu" ]]; then
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_FILE_TIMEOUT" "3600"
|
||||
env_set "$SERVER_ENV" "DIARIZATION_FILE_TIMEOUT" "3600"
|
||||
ok "CPU mode — file processing timeouts set to 3600s (1 hour)"
|
||||
fi
|
||||
|
||||
# If Daily.co is manually configured, ensure Hatchet connectivity vars are set
|
||||
if env_has_key "$SERVER_ENV" "DAILY_API_KEY" && [[ -n "$(env_get "$SERVER_ENV" "DAILY_API_KEY")" ]]; then
|
||||
env_set "$SERVER_ENV" "HATCHET_CLIENT_SERVER_URL" "http://hatchet:8888"
|
||||
env_set "$SERVER_ENV" "HATCHET_CLIENT_HOST_PORT" "hatchet:7077"
|
||||
ok "Daily.co detected — Hatchet connectivity configured"
|
||||
fi
|
||||
|
||||
ok "server/.env ready"
|
||||
}
|
||||
|
||||
@@ -535,6 +624,19 @@ step_www_env() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Enable rooms if any video platform is configured in server/.env
|
||||
local _daily_key="" _whereby_key=""
|
||||
if env_has_key "$SERVER_ENV" "DAILY_API_KEY"; then
|
||||
_daily_key=$(env_get "$SERVER_ENV" "DAILY_API_KEY")
|
||||
fi
|
||||
if env_has_key "$SERVER_ENV" "WHEREBY_API_KEY"; then
|
||||
_whereby_key=$(env_get "$SERVER_ENV" "WHEREBY_API_KEY")
|
||||
fi
|
||||
if [[ -n "$_daily_key" ]] || [[ -n "$_whereby_key" ]]; then
|
||||
env_set "$WWW_ENV" "FEATURE_ROOMS" "true"
|
||||
ok "Rooms feature enabled (video platform configured)"
|
||||
fi
|
||||
|
||||
ok "www/.env ready (URL=$base_url)"
|
||||
}
|
||||
|
||||
@@ -739,6 +841,23 @@ CADDYEOF
|
||||
else
|
||||
ok "Caddyfile already exists"
|
||||
fi
|
||||
|
||||
# Add Hatchet dashboard route if Daily.co is detected
|
||||
if [[ "$DAILY_DETECTED" == "true" ]]; then
|
||||
if ! grep -q "hatchet" "$caddyfile" 2>/dev/null; then
|
||||
cat >> "$caddyfile" << CADDYEOF
|
||||
|
||||
# Hatchet workflow dashboard (Daily.co multitrack processing)
|
||||
:8888 {
|
||||
tls internal
|
||||
reverse_proxy hatchet:8888
|
||||
}
|
||||
CADDYEOF
|
||||
ok "Added Hatchet dashboard route to Caddyfile (port 8888)"
|
||||
else
|
||||
ok "Hatchet dashboard route already in Caddyfile"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
@@ -747,11 +866,12 @@ CADDYEOF
|
||||
step_services() {
|
||||
info "Step 6: Starting Docker services"
|
||||
|
||||
# Build GPU/CPU image from source (always needed — no prebuilt image)
|
||||
local build_svc="$MODEL_MODE"
|
||||
info "Building $build_svc image (first build downloads ML models, may take a while)..."
|
||||
compose_cmd build "$build_svc"
|
||||
ok "$build_svc image built"
|
||||
# Build GPU image from source (only for --gpu mode)
|
||||
if [[ "$MODEL_MODE" == "gpu" ]]; then
|
||||
info "Building gpu image (first build downloads ML models, may take a while)..."
|
||||
compose_cmd build gpu
|
||||
ok "gpu image built"
|
||||
fi
|
||||
|
||||
# Build or pull backend and frontend images
|
||||
if [[ "$BUILD_IMAGES" == "true" ]]; then
|
||||
@@ -766,6 +886,37 @@ step_services() {
|
||||
compose_cmd pull server web || warn "Pull failed — using cached images"
|
||||
fi
|
||||
|
||||
# Build hatchet workers if Daily.co is configured (same backend image)
|
||||
if [[ "$DAILY_DETECTED" == "true" ]] && [[ "$BUILD_IMAGES" == "true" ]]; then
|
||||
info "Building Hatchet worker images..."
|
||||
compose_cmd build hatchet-worker-cpu hatchet-worker-llm
|
||||
ok "Hatchet worker images built"
|
||||
fi
|
||||
|
||||
# Ensure hatchet database exists before starting hatchet (init-hatchet-db.sql only runs on fresh postgres volumes)
|
||||
if [[ "$DAILY_DETECTED" == "true" ]]; then
|
||||
info "Ensuring postgres is running for Hatchet database setup..."
|
||||
compose_cmd up -d postgres
|
||||
local pg_ready=false
|
||||
for i in $(seq 1 30); do
|
||||
if compose_cmd exec -T postgres pg_isready -U reflector > /dev/null 2>&1; then
|
||||
pg_ready=true
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
if [[ "$pg_ready" == "true" ]]; then
|
||||
compose_cmd exec -T postgres psql -U reflector -tc \
|
||||
"SELECT 1 FROM pg_database WHERE datname = 'hatchet'" 2>/dev/null \
|
||||
| grep -q 1 \
|
||||
|| compose_cmd exec -T postgres psql -U reflector -c "CREATE DATABASE hatchet" 2>/dev/null \
|
||||
|| true
|
||||
ok "Hatchet database ready"
|
||||
else
|
||||
warn "Postgres not ready — hatchet database may need to be created manually"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start all services
|
||||
compose_cmd up -d
|
||||
ok "Containers started"
|
||||
@@ -788,25 +939,29 @@ step_services() {
|
||||
step_health() {
|
||||
info "Step 7: Health checks"
|
||||
|
||||
# Specialized model service (gpu or cpu)
|
||||
local model_svc="$MODEL_MODE"
|
||||
|
||||
info "Waiting for $model_svc service (first start downloads ~1GB of models)..."
|
||||
local model_ok=false
|
||||
for i in $(seq 1 120); do
|
||||
if curl -sf http://localhost:8000/docs > /dev/null 2>&1; then
|
||||
model_ok=true
|
||||
break
|
||||
# Specialized model service (only for --gpu mode)
|
||||
if [[ "$MODEL_MODE" == "gpu" ]]; then
|
||||
info "Waiting for gpu service (first start downloads ~1GB of models)..."
|
||||
local model_ok=false
|
||||
for i in $(seq 1 120); do
|
||||
if curl -sf http://localhost:8000/docs > /dev/null 2>&1; then
|
||||
model_ok=true
|
||||
break
|
||||
fi
|
||||
echo -ne "\r Waiting for gpu service... ($i/120)"
|
||||
sleep 5
|
||||
done
|
||||
echo ""
|
||||
if [[ "$model_ok" == "true" ]]; then
|
||||
ok "gpu service healthy (transcription + diarization)"
|
||||
else
|
||||
warn "gpu service not ready yet — it will keep loading in the background"
|
||||
warn "Check with: docker compose -f docker-compose.selfhosted.yml logs gpu"
|
||||
fi
|
||||
echo -ne "\r Waiting for $model_svc service... ($i/120)"
|
||||
sleep 5
|
||||
done
|
||||
echo ""
|
||||
if [[ "$model_ok" == "true" ]]; then
|
||||
ok "$model_svc service healthy (transcription + diarization)"
|
||||
else
|
||||
warn "$model_svc service not ready yet — it will keep loading in the background"
|
||||
warn "Check with: docker compose -f docker-compose.selfhosted.yml logs $model_svc"
|
||||
elif [[ "$MODEL_MODE" == "cpu" ]]; then
|
||||
ok "CPU mode — ML processing runs in-process on server/worker (no separate service)"
|
||||
elif [[ "$MODEL_MODE" == "hosted" ]]; then
|
||||
ok "Hosted mode — ML processing via remote GPU service (no local health check)"
|
||||
fi
|
||||
|
||||
# Ollama (if applicable)
|
||||
@@ -894,6 +1049,26 @@ step_health() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Hatchet (if Daily.co detected)
|
||||
if [[ "$DAILY_DETECTED" == "true" ]]; then
|
||||
info "Waiting for Hatchet workflow engine..."
|
||||
local hatchet_ok=false
|
||||
for i in $(seq 1 60); do
|
||||
if curl -sf http://localhost:8888/api/live > /dev/null 2>&1; then
|
||||
hatchet_ok=true
|
||||
break
|
||||
fi
|
||||
echo -ne "\r Waiting for Hatchet... ($i/60)"
|
||||
sleep 3
|
||||
done
|
||||
echo ""
|
||||
if [[ "$hatchet_ok" == "true" ]]; then
|
||||
ok "Hatchet workflow engine healthy"
|
||||
else
|
||||
warn "Hatchet not ready yet. Check: docker compose logs hatchet"
|
||||
fi
|
||||
fi
|
||||
|
||||
# LLM warning for non-Ollama modes
|
||||
if [[ "$USES_OLLAMA" == "false" ]]; then
|
||||
local llm_url=""
|
||||
@@ -911,6 +1086,71 @@ step_health() {
|
||||
fi
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# Step 8: Hatchet token generation (Daily.co only)
|
||||
# =========================================================
|
||||
step_hatchet_token() {
|
||||
if [[ "$DAILY_DETECTED" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Skip if token already set
|
||||
if env_has_key "$SERVER_ENV" "HATCHET_CLIENT_TOKEN" && [[ -n "$(env_get "$SERVER_ENV" "HATCHET_CLIENT_TOKEN")" ]]; then
|
||||
ok "HATCHET_CLIENT_TOKEN already set — skipping generation"
|
||||
return
|
||||
fi
|
||||
|
||||
info "Step 8: Generating Hatchet API token"
|
||||
|
||||
# Wait for hatchet to be healthy
|
||||
local hatchet_ok=false
|
||||
for i in $(seq 1 60); do
|
||||
if curl -sf http://localhost:8888/api/live > /dev/null 2>&1; then
|
||||
hatchet_ok=true
|
||||
break
|
||||
fi
|
||||
echo -ne "\r Waiting for Hatchet API... ($i/60)"
|
||||
sleep 3
|
||||
done
|
||||
echo ""
|
||||
|
||||
if [[ "$hatchet_ok" != "true" ]]; then
|
||||
err "Hatchet not responding — cannot generate token"
|
||||
err "Check: docker compose logs hatchet"
|
||||
return
|
||||
fi
|
||||
|
||||
# Get tenant ID from hatchet database
|
||||
local tenant_id
|
||||
tenant_id=$(compose_cmd exec -T postgres psql -U reflector -d hatchet -t -c \
|
||||
"SELECT id FROM \"Tenant\" WHERE slug = 'default';" 2>/dev/null | tr -d ' \n')
|
||||
|
||||
if [[ -z "$tenant_id" ]]; then
|
||||
err "Could not find default tenant in Hatchet database"
|
||||
err "Hatchet may still be initializing. Try re-running the script."
|
||||
return
|
||||
fi
|
||||
|
||||
# Generate token via hatchet-admin
|
||||
local token
|
||||
token=$(compose_cmd exec -T hatchet /hatchet-admin token create \
|
||||
--config /config --tenant-id "$tenant_id" 2>/dev/null | tr -d '\n')
|
||||
|
||||
if [[ -z "$token" ]]; then
|
||||
err "Failed to generate Hatchet token"
|
||||
err "Try generating manually: see server/README.md"
|
||||
return
|
||||
fi
|
||||
|
||||
env_set "$SERVER_ENV" "HATCHET_CLIENT_TOKEN" "$token"
|
||||
ok "HATCHET_CLIENT_TOKEN generated and saved to server/.env"
|
||||
|
||||
# Restart services that need the token
|
||||
info "Restarting services with new Hatchet token..."
|
||||
compose_cmd restart server worker hatchet-worker-cpu hatchet-worker-llm
|
||||
ok "Services restarted with Hatchet token"
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# Main
|
||||
# =========================================================
|
||||
@@ -957,6 +1197,48 @@ main() {
|
||||
echo ""
|
||||
step_server_env
|
||||
echo ""
|
||||
|
||||
# Auto-detect video platforms from server/.env (after step_server_env so file exists)
|
||||
DAILY_DETECTED=false
|
||||
WHEREBY_DETECTED=false
|
||||
if env_has_key "$SERVER_ENV" "DAILY_API_KEY" && [[ -n "$(env_get "$SERVER_ENV" "DAILY_API_KEY")" ]]; then
|
||||
DAILY_DETECTED=true
|
||||
fi
|
||||
if env_has_key "$SERVER_ENV" "WHEREBY_API_KEY" && [[ -n "$(env_get "$SERVER_ENV" "WHEREBY_API_KEY")" ]]; then
|
||||
WHEREBY_DETECTED=true
|
||||
fi
|
||||
ANY_PLATFORM_DETECTED=false
|
||||
[[ "$DAILY_DETECTED" == "true" || "$WHEREBY_DETECTED" == "true" ]] && ANY_PLATFORM_DETECTED=true
|
||||
|
||||
# Conditional profile activation for Daily.co
|
||||
if [[ "$DAILY_DETECTED" == "true" ]]; then
|
||||
COMPOSE_PROFILES+=("dailyco")
|
||||
ok "Daily.co detected — enabling Hatchet workflow services"
|
||||
fi
|
||||
|
||||
# Generate .env.hatchet for hatchet dashboard config
|
||||
if [[ "$DAILY_DETECTED" == "true" ]]; then
|
||||
local hatchet_server_url hatchet_cookie_domain
|
||||
if [[ -n "$CUSTOM_DOMAIN" ]]; then
|
||||
hatchet_server_url="https://${CUSTOM_DOMAIN}:8888"
|
||||
hatchet_cookie_domain="$CUSTOM_DOMAIN"
|
||||
elif [[ -n "$PRIMARY_IP" ]]; then
|
||||
hatchet_server_url="http://${PRIMARY_IP}:8888"
|
||||
hatchet_cookie_domain="$PRIMARY_IP"
|
||||
else
|
||||
hatchet_server_url="http://localhost:8888"
|
||||
hatchet_cookie_domain="localhost"
|
||||
fi
|
||||
cat > "$ROOT_DIR/.env.hatchet" << EOF
|
||||
SERVER_URL=$hatchet_server_url
|
||||
SERVER_AUTH_COOKIE_DOMAIN=$hatchet_cookie_domain
|
||||
EOF
|
||||
ok "Generated .env.hatchet (dashboard URL=$hatchet_server_url)"
|
||||
else
|
||||
# Create empty .env.hatchet so compose doesn't fail if dailyco profile is ever activated manually
|
||||
touch "$ROOT_DIR/.env.hatchet"
|
||||
fi
|
||||
|
||||
step_www_env
|
||||
echo ""
|
||||
step_storage
|
||||
@@ -966,6 +1248,8 @@ main() {
|
||||
step_services
|
||||
echo ""
|
||||
step_health
|
||||
echo ""
|
||||
step_hatchet_token
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
@@ -995,6 +1279,9 @@ main() {
|
||||
[[ "$USE_GARAGE" != "true" ]] && echo " Storage: External S3"
|
||||
[[ "$USES_OLLAMA" == "true" ]] && echo " LLM: Ollama ($OLLAMA_MODEL) for summarization/topics"
|
||||
[[ "$USES_OLLAMA" != "true" ]] && echo " LLM: External (configure in server/.env)"
|
||||
[[ "$DAILY_DETECTED" == "true" ]] && echo " Video: Daily.co (live rooms + multitrack processing via Hatchet)"
|
||||
[[ "$WHEREBY_DETECTED" == "true" ]] && echo " Video: Whereby (live rooms)"
|
||||
[[ "$ANY_PLATFORM_DETECTED" != "true" ]] && echo " Video: None (rooms disabled)"
|
||||
echo ""
|
||||
echo " To stop: docker compose -f docker-compose.selfhosted.yml down"
|
||||
echo " To re-run: ./scripts/setup-selfhosted.sh $*"
|
||||
|
||||
@@ -86,11 +86,23 @@ LLM_API_KEY=not-needed
|
||||
## Context size for summary generation (tokens)
|
||||
LLM_CONTEXT_WINDOW=16000
|
||||
|
||||
## =======================================================
|
||||
## Audio Padding
|
||||
##
|
||||
## backends: pyav (in-process PyAV), modal (HTTP API client)
|
||||
## Default is "pyav" — no external service needed.
|
||||
## Set to "modal" when using Modal.com or self-hosted gpu/self_hosted/ container.
|
||||
## =======================================================
|
||||
#PADDING_BACKEND=pyav
|
||||
#PADDING_BACKEND=modal
|
||||
#PADDING_URL=https://xxxxx--reflector-padding-web.modal.run
|
||||
#PADDING_MODAL_API_KEY=xxxxx
|
||||
|
||||
## =======================================================
|
||||
## Diarization
|
||||
##
|
||||
## Only available on modal
|
||||
## To allow diarization, you need to expose expose the files to be dowloded by the pipeline
|
||||
## backends: modal (HTTP API), pyannote (in-process pyannote.audio)
|
||||
## To allow diarization, you need to expose expose the files to be downloaded by the pipeline
|
||||
## =======================================================
|
||||
DIARIZATION_ENABLED=false
|
||||
DIARIZATION_BACKEND=modal
|
||||
@@ -137,6 +149,10 @@ TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
|
||||
#DAILYCO_STORAGE_AWS_ROLE_ARN=... # IAM role ARN for Daily.co S3 access
|
||||
#DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
||||
#DAILYCO_STORAGE_AWS_REGION=us-west-2
|
||||
# Worker credentials for reading/deleting from Daily's recording bucket
|
||||
# Required when transcript storage is separate from Daily's bucket (e.g., selfhosted with Garage)
|
||||
#DAILYCO_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
|
||||
#DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
|
||||
|
||||
## Whereby (optional separate bucket)
|
||||
#WHEREBY_STORAGE_AWS_BUCKET_NAME=reflector-whereby
|
||||
|
||||
@@ -32,23 +32,46 @@ AUTH_BACKEND=none
|
||||
|
||||
# =======================================================
|
||||
# Specialized Models (Transcription, Diarization, Translation)
|
||||
# These run in the gpu/cpu container — NOT an LLM.
|
||||
# The "modal" backend means "HTTP API client" — it talks to
|
||||
# the self-hosted container, not Modal.com cloud.
|
||||
# These do NOT use an LLM. Configured per mode by the setup script:
|
||||
#
|
||||
# --gpu mode: modal backends → GPU container (http://transcription:8000)
|
||||
# --cpu mode: whisper/pyannote/marian/pyav → in-process ML on server/worker
|
||||
# --hosted mode: modal backends → user-provided remote GPU service URL
|
||||
# =======================================================
|
||||
|
||||
# --- --gpu mode (default) ---
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=http://transcription:8000
|
||||
TRANSCRIPT_MODAL_API_KEY=selfhosted
|
||||
|
||||
DIARIZATION_ENABLED=true
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=http://transcription:8000
|
||||
|
||||
TRANSLATION_BACKEND=modal
|
||||
TRANSLATE_URL=http://transcription:8000
|
||||
PADDING_BACKEND=modal
|
||||
PADDING_URL=http://transcription:8000
|
||||
|
||||
# HuggingFace token — optional, for gated models (e.g. pyannote).
|
||||
# Falls back to public S3 model bundle if not set.
|
||||
# --- --cpu mode (set by setup script) ---
|
||||
# TRANSCRIPT_BACKEND=whisper
|
||||
# DIARIZATION_BACKEND=pyannote
|
||||
# TRANSLATION_BACKEND=marian
|
||||
# PADDING_BACKEND=pyav
|
||||
|
||||
# --- --hosted mode (set by setup script) ---
|
||||
# TRANSCRIPT_BACKEND=modal
|
||||
# TRANSCRIPT_URL=https://your-gpu-service.example.com
|
||||
# DIARIZATION_BACKEND=modal
|
||||
# DIARIZATION_URL=https://your-gpu-service.example.com
|
||||
# ... (all URLs point to one remote service)
|
||||
|
||||
# Whisper model sizes for local transcription (--cpu mode)
|
||||
# Options: "tiny", "base", "small", "medium", "large-v2"
|
||||
# WHISPER_CHUNK_MODEL=tiny
|
||||
# WHISPER_FILE_MODEL=tiny
|
||||
|
||||
# HuggingFace token — for gated models (e.g. pyannote diarization).
|
||||
# Required for --gpu and --cpu modes; falls back to public S3 bundle if not set.
|
||||
# Not needed for --hosted mode (remote service handles its own auth).
|
||||
# HF_TOKEN=hf_xxxxx
|
||||
|
||||
# =======================================================
|
||||
@@ -93,15 +116,42 @@ TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
|
||||
# =======================================================
|
||||
# Daily.co Live Rooms (Optional)
|
||||
# Enable real-time meeting rooms with Daily.co integration.
|
||||
# Requires a Daily.co account: https://www.daily.co/
|
||||
# Configure these BEFORE running setup-selfhosted.sh and the
|
||||
# script will auto-detect and start Hatchet workflow services.
|
||||
#
|
||||
# Prerequisites:
|
||||
# 1. Daily.co account: https://www.daily.co/
|
||||
# 2. API key: Dashboard → Developers → API Keys
|
||||
# 3. S3 bucket for recordings: https://docs.daily.co/guides/products/live-streaming-recording/storing-recordings-in-a-custom-s3-bucket
|
||||
# 4. IAM role ARN for Daily.co to write recordings to your bucket
|
||||
#
|
||||
# After configuring, run: ./scripts/setup-selfhosted.sh <your-flags>
|
||||
# The script will detect DAILY_API_KEY and automatically:
|
||||
# - Start Hatchet workflow engine + CPU/LLM workers
|
||||
# - Generate a Hatchet API token
|
||||
# - Enable FEATURE_ROOMS in the frontend
|
||||
# =======================================================
|
||||
# DEFAULT_VIDEO_PLATFORM=daily
|
||||
# DAILY_API_KEY=your-daily-api-key
|
||||
# DAILY_SUBDOMAIN=your-subdomain
|
||||
# DAILY_WEBHOOK_SECRET=your-daily-webhook-secret
|
||||
# DEFAULT_VIDEO_PLATFORM=daily
|
||||
# DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
||||
# DAILYCO_STORAGE_AWS_REGION=us-east-1
|
||||
# DAILYCO_STORAGE_AWS_ROLE_ARN=arn:aws:iam::role/DailyCoAccess
|
||||
# Worker credentials for reading/deleting from Daily's recording bucket
|
||||
# Required when transcript storage is separate from Daily's bucket (e.g., selfhosted with Garage)
|
||||
# DAILYCO_STORAGE_AWS_ACCESS_KEY_ID=your-aws-access-key
|
||||
# DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret-key
|
||||
# DAILY_WEBHOOK_SECRET=your-daily-webhook-secret # optional, for faster recording discovery
|
||||
|
||||
# =======================================================
|
||||
# Hatchet Workflow Engine (Auto-configured for Daily.co)
|
||||
# Required for Daily.co multitrack recording processing.
|
||||
# The setup script generates HATCHET_CLIENT_TOKEN automatically.
|
||||
# Do not set these manually unless you know what you're doing.
|
||||
# =======================================================
|
||||
# HATCHET_CLIENT_TOKEN=<auto-generated-by-script>
|
||||
# HATCHET_CLIENT_SERVER_URL=http://hatchet:8888
|
||||
# HATCHET_CLIENT_HOST_PORT=hatchet:7077
|
||||
|
||||
# =======================================================
|
||||
# Feature Flags
|
||||
|
||||
@@ -6,7 +6,7 @@ ENV PYTHONUNBUFFERED=1 \
|
||||
|
||||
# builder install base dependencies
|
||||
WORKDIR /tmp
|
||||
RUN apt-get update && apt-get install -y curl && apt-get clean
|
||||
RUN apt-get update && apt-get install -y curl ffmpeg && apt-get clean
|
||||
ADD https://astral.sh/uv/install.sh /uv-installer.sh
|
||||
RUN sh /uv-installer.sh && rm /uv-installer.sh
|
||||
ENV PATH="/root/.local/bin/:$PATH"
|
||||
@@ -17,9 +17,6 @@ WORKDIR /app
|
||||
COPY pyproject.toml uv.lock README.md /app/
|
||||
RUN uv sync --compile-bytecode --locked
|
||||
|
||||
# pre-download nltk packages
|
||||
RUN uv run python -c "import nltk; nltk.download('punkt_tab'); nltk.download('averaged_perceptron_tagger_eng')"
|
||||
|
||||
# bootstrap
|
||||
COPY alembic.ini runserver.sh /app/
|
||||
COPY images /app/images
|
||||
|
||||
@@ -18,17 +18,16 @@ dependencies = [
|
||||
"fastapi[standard]>=0.100.1",
|
||||
"sentry-sdk[fastapi]>=1.29.2",
|
||||
"httpx>=0.24.1",
|
||||
"fastapi-pagination>=0.12.6",
|
||||
"fastapi-pagination>=0.14.2",
|
||||
"databases[aiosqlite, asyncpg]>=0.7.0",
|
||||
"sqlalchemy<1.5",
|
||||
"alembic>=1.11.3",
|
||||
"nltk>=3.8.1",
|
||||
"prometheus-fastapi-instrumentator>=6.1.0",
|
||||
"sentencepiece>=0.1.99",
|
||||
"protobuf>=4.24.3",
|
||||
"celery>=5.3.4",
|
||||
"redis>=5.0.1",
|
||||
"python-jose[cryptography]>=3.3.0",
|
||||
"pyjwt[crypto]>=2.8.0",
|
||||
"python-multipart>=0.0.6",
|
||||
"transformers>=4.36.2",
|
||||
"jsonschema>=4.23.0",
|
||||
@@ -39,7 +38,8 @@ dependencies = [
|
||||
"pytest-env>=1.1.5",
|
||||
"webvtt-py>=0.5.0",
|
||||
"icalendar>=6.0.0",
|
||||
"hatchet-sdk>=0.47.0",
|
||||
"hatchet-sdk==1.22.16",
|
||||
"pydantic>=2.12.5",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
@@ -71,9 +71,12 @@ local = [
|
||||
"faster-whisper>=0.10.0",
|
||||
]
|
||||
silero-vad = [
|
||||
"silero-vad>=5.1.2",
|
||||
"silero-vad==5.1.2",
|
||||
"torch>=2.8.0",
|
||||
"torchaudio>=2.8.0",
|
||||
"pyannote.audio==3.4.0",
|
||||
"pytorch-lightning<2.6",
|
||||
"librosa==0.10.1",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
|
||||
13
server/reflector/_warnings_filter.py
Normal file
13
server/reflector/_warnings_filter.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""
|
||||
Suppress known dependency warnings. Import this before any reflector/hatchet_sdk
|
||||
imports that pull in pydantic (e.g. llama_index) to hide UnsupportedFieldAttributeWarning
|
||||
about validate_default.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=".*validate_default.*",
|
||||
category=UserWarning,
|
||||
)
|
||||
@@ -14,6 +14,7 @@ current_user = auth_module.current_user
|
||||
current_user_optional = auth_module.current_user_optional
|
||||
parse_ws_bearer_token = auth_module.parse_ws_bearer_token
|
||||
current_user_ws_optional = auth_module.current_user_ws_optional
|
||||
verify_raw_token = auth_module.verify_raw_token
|
||||
|
||||
# Optional router (e.g. for /auth/login in password backend)
|
||||
router = getattr(auth_module, "router", None)
|
||||
|
||||
@@ -4,8 +4,8 @@ from fastapi import Depends, HTTPException
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastapi import WebSocket
|
||||
import jwt
|
||||
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer
|
||||
from jose import JWTError, jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.db.user_api_keys import user_api_keys_controller
|
||||
@@ -54,7 +54,7 @@ class JWTAuth:
|
||||
audience=jwt_audience,
|
||||
)
|
||||
return payload
|
||||
except JWTError as e:
|
||||
except jwt.PyJWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise
|
||||
|
||||
@@ -94,7 +94,7 @@ async def _authenticate_user(
|
||||
)
|
||||
|
||||
user_infos.append(UserInfo(sub=user.id, email=email))
|
||||
except JWTError as e:
|
||||
except jwt.PyJWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||
|
||||
@@ -144,3 +144,8 @@ async def current_user_ws_optional(websocket: "WebSocket") -> Optional[UserInfo]
|
||||
if not token:
|
||||
return None
|
||||
return await _authenticate_user(token, None, JWTAuth())
|
||||
|
||||
|
||||
async def verify_raw_token(token: str) -> Optional[UserInfo]:
|
||||
"""Verify a raw JWT token string (used for query-param auth fallback)."""
|
||||
return await _authenticate_user(token, None, JWTAuth())
|
||||
|
||||
@@ -27,3 +27,8 @@ def parse_ws_bearer_token(websocket):
|
||||
|
||||
async def current_user_ws_optional(websocket):
|
||||
return None
|
||||
|
||||
|
||||
async def verify_raw_token(token):
|
||||
"""Verify a raw JWT token string (used for query-param auth fallback)."""
|
||||
return None
|
||||
|
||||
@@ -9,9 +9,9 @@ from collections import defaultdict
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import TYPE_CHECKING, Annotated, Optional
|
||||
|
||||
import jwt
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer
|
||||
from jose import JWTError, jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.auth.password_utils import verify_password
|
||||
@@ -110,7 +110,7 @@ async def _authenticate_user(
|
||||
user_id = payload["sub"]
|
||||
email = payload.get("email")
|
||||
user_infos.append(UserInfo(sub=user_id, email=email))
|
||||
except JWTError as e:
|
||||
except jwt.PyJWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||
|
||||
@@ -168,6 +168,11 @@ async def current_user_ws_optional(websocket: "WebSocket") -> Optional[UserInfo]
|
||||
return await _authenticate_user(token, None)
|
||||
|
||||
|
||||
async def verify_raw_token(token: str) -> Optional[UserInfo]:
|
||||
"""Verify a raw JWT token string (used for query-param auth fallback)."""
|
||||
return await _authenticate_user(token, None)
|
||||
|
||||
|
||||
# --- Login router ---
|
||||
router = APIRouter(prefix="/auth", tags=["auth"])
|
||||
|
||||
|
||||
@@ -39,5 +39,12 @@ TIMEOUT_MEDIUM = (
|
||||
300 # Single LLM calls, waveform generation (5m for slow LLM responses)
|
||||
)
|
||||
TIMEOUT_LONG = 180 # Action items (larger context LLM)
|
||||
TIMEOUT_AUDIO = 720 # Audio processing: padding, mixdown
|
||||
TIMEOUT_HEAVY = 600 # Transcription, fan-out LLM tasks
|
||||
TIMEOUT_TITLE = 300 # generate_title (single LLM call; doc: reduce from 600s)
|
||||
TIMEOUT_AUDIO = 720 # Audio processing: padding, mixdown (Hatchet execution_timeout)
|
||||
TIMEOUT_AUDIO_HTTP = (
|
||||
660 # httpx timeout for pad_track — below 720 so Hatchet doesn't race
|
||||
)
|
||||
TIMEOUT_HEAVY = 600 # Transcription, fan-out LLM tasks (Hatchet execution_timeout)
|
||||
TIMEOUT_HEAVY_HTTP = (
|
||||
540 # httpx timeout for transcribe_track — below 600 so Hatchet doesn't race
|
||||
)
|
||||
|
||||
74
server/reflector/hatchet/error_classification.py
Normal file
74
server/reflector/hatchet/error_classification.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""Classify exceptions as non-retryable for Hatchet workflows.
|
||||
|
||||
When a task raises NonRetryableException (or an exception classified as
|
||||
non-retryable and re-raised as such), Hatchet stops immediately — no further
|
||||
retries. Used by with_error_handling to avoid wasting retries on config errors,
|
||||
auth failures, corrupt data, etc.
|
||||
"""
|
||||
|
||||
# Optional dependencies: only classify if the exception type is available.
|
||||
# This avoids hard dependency on openai/av/botocore for code paths that don't use them.
|
||||
try:
|
||||
import openai
|
||||
except ImportError:
|
||||
openai = None # type: ignore[assignment]
|
||||
|
||||
try:
|
||||
import av
|
||||
except ImportError:
|
||||
av = None # type: ignore[assignment]
|
||||
|
||||
try:
|
||||
from botocore.exceptions import ClientError as BotoClientError
|
||||
except ImportError:
|
||||
BotoClientError = None # type: ignore[misc, assignment]
|
||||
|
||||
from hatchet_sdk import NonRetryableException
|
||||
from httpx import HTTPStatusError
|
||||
|
||||
from reflector.llm import LLMParseError
|
||||
|
||||
# HTTP status codes that won't change on retry (auth, not found, payment, payload)
|
||||
NON_RETRYABLE_HTTP_STATUSES = {401, 402, 403, 404, 413}
|
||||
NON_RETRYABLE_S3_CODES = {"AccessDenied", "NoSuchBucket", "NoSuchKey"}
|
||||
|
||||
|
||||
def is_non_retryable(e: BaseException) -> bool:
|
||||
"""Return True if the exception should stop Hatchet retries immediately.
|
||||
|
||||
Hard failures (config, auth, missing resource, corrupt data) return True.
|
||||
Transient errors (timeouts, 5xx, 429, connection) return False.
|
||||
"""
|
||||
if isinstance(e, NonRetryableException):
|
||||
return True
|
||||
|
||||
# Config/input errors
|
||||
if isinstance(e, (ValueError, TypeError)):
|
||||
return True
|
||||
|
||||
# HTTP status codes that won't change on retry
|
||||
if isinstance(e, HTTPStatusError):
|
||||
return e.response.status_code in NON_RETRYABLE_HTTP_STATUSES
|
||||
|
||||
# OpenAI auth errors
|
||||
if openai is not None and isinstance(e, openai.AuthenticationError):
|
||||
return True
|
||||
|
||||
# LLM parse failures (already retried internally)
|
||||
if isinstance(e, LLMParseError):
|
||||
return True
|
||||
|
||||
# S3 permission/existence errors
|
||||
if BotoClientError is not None and isinstance(e, BotoClientError):
|
||||
code = e.response.get("Error", {}).get("Code", "")
|
||||
return code in NON_RETRYABLE_S3_CODES
|
||||
|
||||
# Corrupt audio (PyAV) — AVError in some versions; fallback to InvalidDataError
|
||||
if av is not None:
|
||||
av_error = getattr(av, "AVError", None) or getattr(
|
||||
getattr(av, "error", None), "InvalidDataError", None
|
||||
)
|
||||
if av_error is not None and isinstance(e, av_error):
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -7,6 +7,7 @@ Configuration:
|
||||
- Worker affinity: pool=cpu-heavy
|
||||
"""
|
||||
|
||||
import reflector._warnings_filter # noqa: F401 -- side effect: suppress pydantic validate_default warning
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
daily_multitrack_pipeline,
|
||||
|
||||
@@ -5,6 +5,7 @@ Handles: all tasks except mixdown_tracks (transcription, LLM inference, orchestr
|
||||
|
||||
import asyncio
|
||||
|
||||
import reflector._warnings_filter # noqa: F401 -- side effect: suppress pydantic validate_default warning
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
daily_multitrack_pipeline,
|
||||
|
||||
@@ -27,6 +27,7 @@ from hatchet_sdk import (
|
||||
ConcurrencyExpression,
|
||||
ConcurrencyLimitStrategy,
|
||||
Context,
|
||||
NonRetryableException,
|
||||
)
|
||||
from hatchet_sdk.labels import DesiredWorkerLabel
|
||||
from pydantic import BaseModel
|
||||
@@ -43,8 +44,10 @@ from reflector.hatchet.constants import (
|
||||
TIMEOUT_LONG,
|
||||
TIMEOUT_MEDIUM,
|
||||
TIMEOUT_SHORT,
|
||||
TIMEOUT_TITLE,
|
||||
TaskName,
|
||||
)
|
||||
from reflector.hatchet.error_classification import is_non_retryable
|
||||
from reflector.hatchet.workflows.models import (
|
||||
ActionItemsResult,
|
||||
ConsentResult,
|
||||
@@ -90,7 +93,6 @@ from reflector.processors.summary.summary_builder import SummaryBuilder
|
||||
from reflector.processors.types import TitleSummary, Word
|
||||
from reflector.processors.types import Transcript as TranscriptType
|
||||
from reflector.settings import settings
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
from reflector.utils.audio_constants import (
|
||||
PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
WAVEFORM_SEGMENTS,
|
||||
@@ -117,6 +119,7 @@ class PipelineInput(BaseModel):
|
||||
bucket_name: NonEmptyString
|
||||
transcript_id: NonEmptyString
|
||||
room_id: NonEmptyString | None = None
|
||||
source_platform: str = "daily"
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
@@ -170,15 +173,10 @@ async def set_workflow_error_status(transcript_id: NonEmptyString) -> bool:
|
||||
|
||||
|
||||
def _spawn_storage():
|
||||
"""Create fresh storage instance."""
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
)
|
||||
"""Create fresh storage instance for writing to our transcript bucket."""
|
||||
from reflector.storage import get_transcripts_storage # noqa: PLC0415
|
||||
|
||||
return get_transcripts_storage()
|
||||
|
||||
|
||||
class Loggable(Protocol):
|
||||
@@ -221,6 +219,13 @@ def make_audio_progress_logger(
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def _successful_run_results(
|
||||
results: list[dict[str, Any] | BaseException],
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Return only successful (non-exception) results from aio_run_many(return_exceptions=True)."""
|
||||
return [r for r in results if not isinstance(r, BaseException)]
|
||||
|
||||
|
||||
def with_error_handling(
|
||||
step_name: TaskName, set_error_status: bool = True
|
||||
) -> Callable[
|
||||
@@ -248,8 +253,12 @@ def with_error_handling(
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
if set_error_status:
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
if is_non_retryable(e):
|
||||
# Hard fail: stop retries, set error status, fail workflow
|
||||
if set_error_status:
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
raise NonRetryableException(str(e)) from e
|
||||
# Transient: do not set error status — Hatchet will retry
|
||||
raise
|
||||
|
||||
return wrapper # type: ignore[return-value]
|
||||
@@ -258,7 +267,10 @@ def with_error_handling(
|
||||
|
||||
|
||||
@daily_multitrack_pipeline.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT), retries=3
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.GET_RECORDING)
|
||||
async def get_recording(input: PipelineInput, ctx: Context) -> RecordingResult:
|
||||
@@ -314,6 +326,8 @@ async def get_recording(input: PipelineInput, ctx: Context) -> RecordingResult:
|
||||
parents=[get_recording],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.GET_PARTICIPANTS)
|
||||
async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsResult:
|
||||
@@ -417,6 +431,8 @@ async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsRe
|
||||
parents=[get_participants],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.PROCESS_TRACKS)
|
||||
async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksResult:
|
||||
@@ -434,12 +450,13 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
|
||||
bucket_name=input.bucket_name,
|
||||
transcript_id=input.transcript_id,
|
||||
language=source_language,
|
||||
source_platform=input.source_platform,
|
||||
)
|
||||
)
|
||||
for i, track in enumerate(input.tracks)
|
||||
]
|
||||
|
||||
results = await track_workflow.aio_run_many(bulk_runs)
|
||||
results = await track_workflow.aio_run_many(bulk_runs, return_exceptions=True)
|
||||
|
||||
target_language = participants_result.target_language
|
||||
|
||||
@@ -447,7 +464,18 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
|
||||
padded_tracks = []
|
||||
created_padded_files = set()
|
||||
|
||||
for result in results:
|
||||
for i, result in enumerate(results):
|
||||
if isinstance(result, BaseException):
|
||||
logger.error(
|
||||
"[Hatchet] process_tracks: track workflow failed, failing step",
|
||||
transcript_id=input.transcript_id,
|
||||
track_index=i,
|
||||
error=str(result),
|
||||
)
|
||||
ctx.log(f"process_tracks: track {i} failed ({result}), failing step")
|
||||
raise ValueError(
|
||||
f"Track {i} workflow failed after retries: {result!s}"
|
||||
) from result
|
||||
transcribe_result = TranscribeTrackResult(**result[TaskName.TRANSCRIBE_TRACK])
|
||||
track_words.append(transcribe_result.words)
|
||||
|
||||
@@ -485,7 +513,9 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
|
||||
@daily_multitrack_pipeline.task(
|
||||
parents=[process_tracks],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_AUDIO),
|
||||
retries=3,
|
||||
retries=2,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
desired_worker_labels={
|
||||
"pool": DesiredWorkerLabel(
|
||||
value="cpu-heavy",
|
||||
@@ -597,6 +627,8 @@ async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
|
||||
parents=[mixdown_tracks],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_WAVEFORM)
|
||||
async def generate_waveform(input: PipelineInput, ctx: Context) -> WaveformResult:
|
||||
@@ -665,6 +697,8 @@ async def generate_waveform(input: PipelineInput, ctx: Context) -> WaveformResul
|
||||
parents=[process_tracks],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.DETECT_TOPICS)
|
||||
async def detect_topics(input: PipelineInput, ctx: Context) -> TopicsResult:
|
||||
@@ -726,11 +760,22 @@ async def detect_topics(input: PipelineInput, ctx: Context) -> TopicsResult:
|
||||
for chunk in chunks
|
||||
]
|
||||
|
||||
results = await topic_chunk_workflow.aio_run_many(bulk_runs)
|
||||
results = await topic_chunk_workflow.aio_run_many(bulk_runs, return_exceptions=True)
|
||||
|
||||
topic_chunks = [
|
||||
TopicChunkResult(**result[TaskName.DETECT_CHUNK_TOPIC]) for result in results
|
||||
]
|
||||
topic_chunks: list[TopicChunkResult] = []
|
||||
for i, result in enumerate(results):
|
||||
if isinstance(result, BaseException):
|
||||
logger.error(
|
||||
"[Hatchet] detect_topics: chunk workflow failed, failing step",
|
||||
transcript_id=input.transcript_id,
|
||||
chunk_index=i,
|
||||
error=str(result),
|
||||
)
|
||||
ctx.log(f"detect_topics: chunk {i} failed ({result}), failing step")
|
||||
raise ValueError(
|
||||
f"Topic chunk {i} workflow failed after retries: {result!s}"
|
||||
) from result
|
||||
topic_chunks.append(TopicChunkResult(**result[TaskName.DETECT_CHUNK_TOPIC]))
|
||||
|
||||
async with fresh_db_connection():
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
@@ -768,8 +813,10 @@ async def detect_topics(input: PipelineInput, ctx: Context) -> TopicsResult:
|
||||
|
||||
@daily_multitrack_pipeline.task(
|
||||
parents=[detect_topics],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_TITLE),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_TITLE)
|
||||
async def generate_title(input: PipelineInput, ctx: Context) -> TitleResult:
|
||||
@@ -834,7 +881,9 @@ async def generate_title(input: PipelineInput, ctx: Context) -> TitleResult:
|
||||
@daily_multitrack_pipeline.task(
|
||||
parents=[detect_topics],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.EXTRACT_SUBJECTS)
|
||||
async def extract_subjects(input: PipelineInput, ctx: Context) -> SubjectsResult:
|
||||
@@ -913,6 +962,8 @@ async def extract_subjects(input: PipelineInput, ctx: Context) -> SubjectsResult
|
||||
parents=[extract_subjects],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
@with_error_handling(TaskName.PROCESS_SUBJECTS)
|
||||
async def process_subjects(input: PipelineInput, ctx: Context) -> ProcessSubjectsResult:
|
||||
@@ -939,12 +990,24 @@ async def process_subjects(input: PipelineInput, ctx: Context) -> ProcessSubject
|
||||
for i, subject in enumerate(subjects)
|
||||
]
|
||||
|
||||
results = await subject_workflow.aio_run_many(bulk_runs)
|
||||
results = await subject_workflow.aio_run_many(bulk_runs, return_exceptions=True)
|
||||
|
||||
subject_summaries = [
|
||||
SubjectSummaryResult(**result[TaskName.GENERATE_DETAILED_SUMMARY])
|
||||
for result in results
|
||||
]
|
||||
subject_summaries: list[SubjectSummaryResult] = []
|
||||
for i, result in enumerate(results):
|
||||
if isinstance(result, BaseException):
|
||||
logger.error(
|
||||
"[Hatchet] process_subjects: subject workflow failed, failing step",
|
||||
transcript_id=input.transcript_id,
|
||||
subject_index=i,
|
||||
error=str(result),
|
||||
)
|
||||
ctx.log(f"process_subjects: subject {i} failed ({result}), failing step")
|
||||
raise ValueError(
|
||||
f"Subject {i} workflow failed after retries: {result!s}"
|
||||
) from result
|
||||
subject_summaries.append(
|
||||
SubjectSummaryResult(**result[TaskName.GENERATE_DETAILED_SUMMARY])
|
||||
)
|
||||
|
||||
ctx.log(f"process_subjects complete: {len(subject_summaries)} summaries")
|
||||
|
||||
@@ -955,6 +1018,8 @@ async def process_subjects(input: PipelineInput, ctx: Context) -> ProcessSubject
|
||||
parents=[process_subjects],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.GENERATE_RECAP)
|
||||
async def generate_recap(input: PipelineInput, ctx: Context) -> RecapResult:
|
||||
@@ -1044,6 +1109,8 @@ async def generate_recap(input: PipelineInput, ctx: Context) -> RecapResult:
|
||||
parents=[extract_subjects],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_LONG),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.IDENTIFY_ACTION_ITEMS)
|
||||
async def identify_action_items(
|
||||
@@ -1112,6 +1179,8 @@ async def identify_action_items(
|
||||
parents=[process_tracks, generate_title, generate_recap, identify_action_items],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=5,
|
||||
)
|
||||
@with_error_handling(TaskName.FINALIZE)
|
||||
async def finalize(input: PipelineInput, ctx: Context) -> FinalizeResult:
|
||||
@@ -1181,7 +1250,11 @@ async def finalize(input: PipelineInput, ctx: Context) -> FinalizeResult:
|
||||
|
||||
|
||||
@daily_multitrack_pipeline.task(
|
||||
parents=[finalize], execution_timeout=timedelta(seconds=TIMEOUT_SHORT), retries=3
|
||||
parents=[finalize],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=10,
|
||||
)
|
||||
@with_error_handling(TaskName.CLEANUP_CONSENT, set_error_status=False)
|
||||
async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
|
||||
@@ -1195,7 +1268,10 @@ async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
|
||||
)
|
||||
from reflector.db.recordings import recordings_controller # noqa: PLC0415
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
from reflector.storage import get_transcripts_storage # noqa: PLC0415
|
||||
from reflector.storage import ( # noqa: PLC0415
|
||||
get_source_storage,
|
||||
get_transcripts_storage,
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if not transcript:
|
||||
@@ -1245,7 +1321,7 @@ async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
|
||||
deletion_errors = []
|
||||
|
||||
if input_track_keys and input.bucket_name:
|
||||
master_storage = get_transcripts_storage()
|
||||
master_storage = get_source_storage(input.source_platform)
|
||||
for key in input_track_keys:
|
||||
try:
|
||||
await master_storage.delete_file(key, bucket=input.bucket_name)
|
||||
@@ -1284,6 +1360,8 @@ async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
|
||||
parents=[cleanup_consent],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.POST_ZULIP, set_error_status=False)
|
||||
async def post_zulip(input: PipelineInput, ctx: Context) -> ZulipResult:
|
||||
@@ -1311,6 +1389,8 @@ async def post_zulip(input: PipelineInput, ctx: Context) -> ZulipResult:
|
||||
parents=[cleanup_consent],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=15,
|
||||
)
|
||||
@with_error_handling(TaskName.SEND_WEBHOOK, set_error_status=False)
|
||||
async def send_webhook(input: PipelineInput, ctx: Context) -> WebhookResult:
|
||||
@@ -1379,3 +1459,32 @@ async def send_webhook(input: PipelineInput, ctx: Context) -> WebhookResult:
|
||||
except Exception as e:
|
||||
ctx.log(f"send_webhook unexpected error, continuing anyway: {e}")
|
||||
return WebhookResult(webhook_sent=False)
|
||||
|
||||
|
||||
async def on_workflow_failure(input: PipelineInput, ctx: Context) -> None:
|
||||
"""Run when the workflow is truly dead (all retries exhausted).
|
||||
|
||||
Sets transcript status to 'error' only if it is not already 'ended'.
|
||||
Post-finalize tasks (cleanup_consent, post_zulip, send_webhook) use
|
||||
set_error_status=False; if one of them fails, we must not overwrite
|
||||
the 'ended' status that finalize already set.
|
||||
"""
|
||||
async with fresh_db_connection():
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(input.transcript_id)
|
||||
if transcript and transcript.status == "ended":
|
||||
logger.info(
|
||||
"[Hatchet] on_workflow_failure: transcript already ended, skipping error status (failure was post-finalize)",
|
||||
transcript_id=input.transcript_id,
|
||||
)
|
||||
ctx.log(
|
||||
"on_workflow_failure: transcript already ended, skipping error status"
|
||||
)
|
||||
return
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
|
||||
|
||||
@daily_multitrack_pipeline.on_failure_task()
|
||||
async def _register_on_workflow_failure(input: PipelineInput, ctx: Context) -> None:
|
||||
await on_workflow_failure(input, ctx)
|
||||
|
||||
@@ -24,6 +24,7 @@ class PaddingInput(BaseModel):
|
||||
s3_key: str
|
||||
bucket_name: str
|
||||
transcript_id: str
|
||||
source_platform: str = "daily"
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
@@ -33,7 +34,12 @@ padding_workflow = hatchet.workflow(
|
||||
)
|
||||
|
||||
|
||||
@padding_workflow.task(execution_timeout=timedelta(seconds=TIMEOUT_AUDIO), retries=3)
|
||||
@padding_workflow.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_AUDIO),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
async def pad_track(input: PaddingInput, ctx: Context) -> PadTrackResult:
|
||||
"""Pad audio track with silence based on WebM container start_time."""
|
||||
ctx.log(f"pad_track: track {input.track_index}, s3_key={input.s3_key}")
|
||||
@@ -45,20 +51,14 @@ async def pad_track(input: PaddingInput, ctx: Context) -> PadTrackResult:
|
||||
)
|
||||
|
||||
try:
|
||||
# Create fresh storage instance to avoid aioboto3 fork issues
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
from reflector.storage import ( # noqa: PLC0415
|
||||
get_source_storage,
|
||||
get_transcripts_storage,
|
||||
)
|
||||
|
||||
source_url = await storage.get_file_url(
|
||||
# Source reads: use platform-specific credentials
|
||||
source_storage = get_source_storage(input.source_platform)
|
||||
source_url = await source_storage.get_file_url(
|
||||
input.s3_key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
@@ -96,52 +96,28 @@ async def pad_track(input: PaddingInput, ctx: Context) -> PadTrackResult:
|
||||
|
||||
storage_path = f"file_pipeline_hatchet/{input.transcript_id}/tracks/padded_{input.track_index}.webm"
|
||||
|
||||
# Presign PUT URL for output (Modal will upload directly)
|
||||
output_url = await storage.get_file_url(
|
||||
# Output writes: use transcript storage (our own bucket)
|
||||
output_storage = get_transcripts_storage()
|
||||
output_url = await output_storage.get_file_url(
|
||||
storage_path,
|
||||
operation="put_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
)
|
||||
|
||||
import httpx # noqa: PLC0415
|
||||
|
||||
from reflector.processors.audio_padding_modal import ( # noqa: PLC0415
|
||||
AudioPaddingModalProcessor,
|
||||
from reflector.processors.audio_padding_auto import ( # noqa: PLC0415
|
||||
AudioPaddingAutoProcessor,
|
||||
)
|
||||
|
||||
try:
|
||||
processor = AudioPaddingModalProcessor()
|
||||
result = await processor.pad_track(
|
||||
track_url=source_url,
|
||||
output_url=output_url,
|
||||
start_time_seconds=start_time_seconds,
|
||||
track_index=input.track_index,
|
||||
)
|
||||
file_size = result.size
|
||||
processor = AudioPaddingAutoProcessor()
|
||||
result = await processor.pad_track(
|
||||
track_url=source_url,
|
||||
output_url=output_url,
|
||||
start_time_seconds=start_time_seconds,
|
||||
track_index=input.track_index,
|
||||
)
|
||||
file_size = result.size
|
||||
|
||||
ctx.log(f"pad_track: Modal returned size={file_size}")
|
||||
except httpx.HTTPStatusError as e:
|
||||
error_detail = e.response.text if hasattr(e.response, "text") else str(e)
|
||||
logger.error(
|
||||
"[Hatchet] Modal padding HTTP error",
|
||||
transcript_id=input.transcript_id,
|
||||
track_index=input.track_index,
|
||||
status_code=e.response.status_code if hasattr(e, "response") else None,
|
||||
error=error_detail,
|
||||
exc_info=True,
|
||||
)
|
||||
raise Exception(
|
||||
f"Modal padding failed: HTTP {e.response.status_code}"
|
||||
) from e
|
||||
except httpx.TimeoutException as e:
|
||||
logger.error(
|
||||
"[Hatchet] Modal padding timeout",
|
||||
transcript_id=input.transcript_id,
|
||||
track_index=input.track_index,
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
raise Exception("Modal padding timeout") from e
|
||||
ctx.log(f"pad_track: padding returned size={file_size}")
|
||||
|
||||
logger.info(
|
||||
"[Hatchet] pad_track complete",
|
||||
|
||||
@@ -13,7 +13,7 @@ from hatchet_sdk.rate_limit import RateLimit
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.constants import LLM_RATE_LIMIT_KEY, TIMEOUT_MEDIUM
|
||||
from reflector.hatchet.constants import LLM_RATE_LIMIT_KEY, TIMEOUT_HEAVY
|
||||
from reflector.hatchet.workflows.models import SubjectSummaryResult
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.summary.prompts import (
|
||||
@@ -41,8 +41,10 @@ subject_workflow = hatchet.workflow(
|
||||
|
||||
|
||||
@subject_workflow.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=60,
|
||||
rate_limits=[RateLimit(static_key=LLM_RATE_LIMIT_KEY, units=2)],
|
||||
)
|
||||
async def generate_detailed_summary(
|
||||
|
||||
@@ -50,7 +50,9 @@ topic_chunk_workflow = hatchet.workflow(
|
||||
|
||||
@topic_chunk_workflow.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_MEDIUM),
|
||||
retries=3,
|
||||
retries=5,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=60,
|
||||
rate_limits=[RateLimit(static_key=LLM_RATE_LIMIT_KEY, units=1)],
|
||||
)
|
||||
async def detect_chunk_topic(input: TopicChunkInput, ctx: Context) -> TopicChunkResult:
|
||||
|
||||
@@ -36,6 +36,7 @@ class TrackInput(BaseModel):
|
||||
bucket_name: str
|
||||
transcript_id: str
|
||||
language: str = "en"
|
||||
source_platform: str = "daily"
|
||||
|
||||
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
@@ -43,7 +44,12 @@ hatchet = HatchetClientManager.get_client()
|
||||
track_workflow = hatchet.workflow(name="TrackProcessing", input_validator=TrackInput)
|
||||
|
||||
|
||||
@track_workflow.task(execution_timeout=timedelta(seconds=TIMEOUT_AUDIO), retries=3)
|
||||
@track_workflow.task(
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_AUDIO),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
"""Pad single audio track with silence for alignment.
|
||||
|
||||
@@ -59,20 +65,14 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
)
|
||||
|
||||
try:
|
||||
# Create fresh storage instance to avoid aioboto3 fork issues
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
from reflector.storage import ( # noqa: PLC0415
|
||||
get_source_storage,
|
||||
get_transcripts_storage,
|
||||
)
|
||||
|
||||
source_url = await storage.get_file_url(
|
||||
# Source reads: use platform-specific credentials
|
||||
source_storage = get_source_storage(input.source_platform)
|
||||
source_url = await source_storage.get_file_url(
|
||||
input.s3_key,
|
||||
operation="get_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
@@ -99,18 +99,19 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
|
||||
storage_path = f"file_pipeline_hatchet/{input.transcript_id}/tracks/padded_{input.track_index}.webm"
|
||||
|
||||
# Presign PUT URL for output (Modal uploads directly)
|
||||
output_url = await storage.get_file_url(
|
||||
# Output writes: use transcript storage (our own bucket)
|
||||
output_storage = get_transcripts_storage()
|
||||
output_url = await output_storage.get_file_url(
|
||||
storage_path,
|
||||
operation="put_object",
|
||||
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
|
||||
)
|
||||
|
||||
from reflector.processors.audio_padding_modal import ( # noqa: PLC0415
|
||||
AudioPaddingModalProcessor,
|
||||
from reflector.processors.audio_padding_auto import ( # noqa: PLC0415
|
||||
AudioPaddingAutoProcessor,
|
||||
)
|
||||
|
||||
processor = AudioPaddingModalProcessor()
|
||||
processor = AudioPaddingAutoProcessor()
|
||||
result = await processor.pad_track(
|
||||
track_url=source_url,
|
||||
output_url=output_url,
|
||||
@@ -141,7 +142,11 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
|
||||
|
||||
@track_workflow.task(
|
||||
parents=[pad_track], execution_timeout=timedelta(seconds=TIMEOUT_HEAVY), retries=3
|
||||
parents=[pad_track],
|
||||
execution_timeout=timedelta(seconds=TIMEOUT_HEAVY),
|
||||
retries=3,
|
||||
backoff_factor=2.0,
|
||||
backoff_max_seconds=30,
|
||||
)
|
||||
async def transcribe_track(input: TrackInput, ctx: Context) -> TranscribeTrackResult:
|
||||
"""Transcribe audio track using GPU (Modal.com) or local Whisper."""
|
||||
@@ -161,18 +166,18 @@ async def transcribe_track(input: TrackInput, ctx: Context) -> TranscribeTrackRe
|
||||
raise ValueError("Missing padded_key from pad_track")
|
||||
|
||||
# Presign URL on demand (avoids stale URLs on workflow replay)
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
from reflector.storage import ( # noqa: PLC0415
|
||||
get_source_storage,
|
||||
get_transcripts_storage,
|
||||
)
|
||||
|
||||
# If bucket_name is set, file is still in the platform's source bucket (no padding applied).
|
||||
# If bucket_name is None, padded file was written to our transcript storage.
|
||||
if bucket_name:
|
||||
storage = get_source_storage(input.source_platform)
|
||||
else:
|
||||
storage = get_transcripts_storage()
|
||||
|
||||
audio_url = await storage.get_file_url(
|
||||
padded_key,
|
||||
operation="get_object",
|
||||
|
||||
@@ -1,42 +1,23 @@
|
||||
import logging
|
||||
from contextvars import ContextVar
|
||||
from typing import Generic, Type, TypeVar
|
||||
from typing import Type, TypeVar
|
||||
from uuid import uuid4
|
||||
|
||||
from llama_index.core import Settings
|
||||
from llama_index.core.output_parsers import PydanticOutputParser
|
||||
from llama_index.core.prompts import PromptTemplate
|
||||
from llama_index.core.response_synthesizers import TreeSummarize
|
||||
from llama_index.core.workflow import (
|
||||
Context,
|
||||
Event,
|
||||
StartEvent,
|
||||
StopEvent,
|
||||
Workflow,
|
||||
step,
|
||||
)
|
||||
from llama_index.llms.openai_like import OpenAILike
|
||||
from pydantic import BaseModel, ValidationError
|
||||
from workflows.errors import WorkflowTimeoutError
|
||||
|
||||
from reflector.utils.retry import retry
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
OutputT = TypeVar("OutputT", bound=BaseModel)
|
||||
|
||||
# Session ID for LiteLLM request grouping - set per processing run
|
||||
llm_session_id: ContextVar[str | None] = ContextVar("llm_session_id", default=None)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STRUCTURED_RESPONSE_PROMPT_TEMPLATE = """
|
||||
Based on the following analysis, provide the information in the requested JSON format:
|
||||
|
||||
Analysis:
|
||||
{analysis}
|
||||
|
||||
{format_instructions}
|
||||
"""
|
||||
|
||||
|
||||
class LLMParseError(Exception):
|
||||
"""Raised when LLM output cannot be parsed after retries."""
|
||||
@@ -50,157 +31,6 @@ class LLMParseError(Exception):
|
||||
)
|
||||
|
||||
|
||||
class ExtractionDone(Event):
|
||||
"""Event emitted when LLM JSON formatting completes."""
|
||||
|
||||
output: str
|
||||
|
||||
|
||||
class ValidationErrorEvent(Event):
|
||||
"""Event emitted when validation fails."""
|
||||
|
||||
error: str
|
||||
wrong_output: str
|
||||
|
||||
|
||||
class StructuredOutputWorkflow(Workflow, Generic[OutputT]):
|
||||
"""Workflow for structured output extraction with validation retry.
|
||||
|
||||
This workflow handles parse/validation retries only. Network error retries
|
||||
are handled internally by Settings.llm (OpenAILike max_retries=3).
|
||||
The caller should NOT wrap this workflow in additional retry logic.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
output_cls: Type[OutputT],
|
||||
max_retries: int = 3,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.output_cls: Type[OutputT] = output_cls
|
||||
self.max_retries = max_retries
|
||||
self.output_parser = PydanticOutputParser(output_cls)
|
||||
|
||||
@step
|
||||
async def extract(
|
||||
self, ctx: Context, ev: StartEvent | ValidationErrorEvent
|
||||
) -> StopEvent | ExtractionDone:
|
||||
"""Extract structured data from text using two-step LLM process.
|
||||
|
||||
Step 1 (first call only): TreeSummarize generates text analysis
|
||||
Step 2 (every call): Settings.llm.acomplete formats analysis as JSON
|
||||
"""
|
||||
current_retries = await ctx.store.get("retries", default=0)
|
||||
await ctx.store.set("retries", current_retries + 1)
|
||||
|
||||
if current_retries >= self.max_retries:
|
||||
last_error = await ctx.store.get("last_error", default=None)
|
||||
logger.error(
|
||||
f"Max retries ({self.max_retries}) reached for {self.output_cls.__name__}"
|
||||
)
|
||||
return StopEvent(result={"error": last_error, "attempts": current_retries})
|
||||
|
||||
if isinstance(ev, StartEvent):
|
||||
# First call: run TreeSummarize to get analysis, store in context
|
||||
prompt = ev.get("prompt")
|
||||
texts = ev.get("texts")
|
||||
tone_name = ev.get("tone_name")
|
||||
if not prompt or not isinstance(texts, list):
|
||||
raise ValueError(
|
||||
"StartEvent must contain 'prompt' (str) and 'texts' (list)"
|
||||
)
|
||||
|
||||
summarizer = TreeSummarize(verbose=False)
|
||||
analysis = await summarizer.aget_response(
|
||||
prompt, texts, tone_name=tone_name
|
||||
)
|
||||
await ctx.store.set("analysis", str(analysis))
|
||||
reflection = ""
|
||||
else:
|
||||
# Retry: reuse analysis from context
|
||||
analysis = await ctx.store.get("analysis")
|
||||
if not analysis:
|
||||
raise RuntimeError("Internal error: analysis not found in context")
|
||||
|
||||
wrong_output = ev.wrong_output
|
||||
if len(wrong_output) > 2000:
|
||||
wrong_output = wrong_output[:2000] + "... [truncated]"
|
||||
reflection = (
|
||||
f"\n\nYour previous response could not be parsed:\n{wrong_output}\n\n"
|
||||
f"Error:\n{ev.error}\n\n"
|
||||
"Please try again. Return ONLY valid JSON matching the schema above, "
|
||||
"with no markdown formatting or extra text."
|
||||
)
|
||||
|
||||
# Step 2: Format analysis as JSON using LLM completion
|
||||
format_instructions = self.output_parser.format(
|
||||
"Please structure the above information in the following JSON format:"
|
||||
)
|
||||
|
||||
json_prompt = STRUCTURED_RESPONSE_PROMPT_TEMPLATE.format(
|
||||
analysis=analysis,
|
||||
format_instructions=format_instructions + reflection,
|
||||
)
|
||||
|
||||
# Network retries handled by OpenAILike (max_retries=3)
|
||||
# response_format enables grammar-based constrained decoding on backends
|
||||
# that support it (DMR/llama.cpp, vLLM, Ollama, OpenAI).
|
||||
response = await Settings.llm.acomplete(
|
||||
json_prompt,
|
||||
response_format={
|
||||
"type": "json_schema",
|
||||
"json_schema": {
|
||||
"name": self.output_cls.__name__,
|
||||
"schema": self.output_cls.model_json_schema(),
|
||||
},
|
||||
},
|
||||
)
|
||||
return ExtractionDone(output=response.text)
|
||||
|
||||
@step
|
||||
async def validate(
|
||||
self, ctx: Context, ev: ExtractionDone
|
||||
) -> StopEvent | ValidationErrorEvent:
|
||||
"""Validate extracted output against Pydantic schema."""
|
||||
raw_output = ev.output
|
||||
retries = await ctx.store.get("retries", default=0)
|
||||
|
||||
try:
|
||||
parsed = self.output_parser.parse(raw_output)
|
||||
if retries > 1:
|
||||
logger.info(
|
||||
f"LLM parse succeeded on attempt {retries}/{self.max_retries} "
|
||||
f"for {self.output_cls.__name__}"
|
||||
)
|
||||
return StopEvent(result={"success": parsed})
|
||||
|
||||
except (ValidationError, ValueError) as e:
|
||||
error_msg = self._format_error(e, raw_output)
|
||||
await ctx.store.set("last_error", error_msg)
|
||||
|
||||
logger.error(
|
||||
f"LLM parse error (attempt {retries}/{self.max_retries}): "
|
||||
f"{type(e).__name__}: {e}\nRaw response: {raw_output[:500]}"
|
||||
)
|
||||
|
||||
return ValidationErrorEvent(
|
||||
error=error_msg,
|
||||
wrong_output=raw_output,
|
||||
)
|
||||
|
||||
def _format_error(self, error: Exception, raw_output: str) -> str:
|
||||
"""Format error for LLM feedback."""
|
||||
if isinstance(error, ValidationError):
|
||||
error_messages = []
|
||||
for err in error.errors():
|
||||
field = ".".join(str(loc) for loc in err["loc"])
|
||||
error_messages.append(f"- {err['msg']} in field '{field}'")
|
||||
return "Schema validation errors:\n" + "\n".join(error_messages)
|
||||
else:
|
||||
return f"Parse error: {str(error)}"
|
||||
|
||||
|
||||
class LLM:
|
||||
def __init__(
|
||||
self, settings, temperature: float = 0.4, max_tokens: int | None = None
|
||||
@@ -225,7 +55,7 @@ class LLM:
|
||||
api_key=self.api_key,
|
||||
context_window=self.context_window,
|
||||
is_chat_model=True,
|
||||
is_function_calling_model=False,
|
||||
is_function_calling_model=True,
|
||||
temperature=self.temperature,
|
||||
max_tokens=self.max_tokens,
|
||||
timeout=self.settings_obj.LLM_REQUEST_TIMEOUT,
|
||||
@@ -235,10 +65,25 @@ class LLM:
|
||||
async def get_response(
|
||||
self, prompt: str, texts: list[str], tone_name: str | None = None
|
||||
) -> str:
|
||||
"""Get a text response using TreeSummarize for non-function-calling models"""
|
||||
summarizer = TreeSummarize(verbose=False)
|
||||
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
|
||||
return str(response).strip()
|
||||
"""Get a text response using TreeSummarize for non-function-calling models.
|
||||
|
||||
Uses the same retry() wrapper as get_structured_response for transient
|
||||
network errors (connection, timeout, OSError) with exponential backoff.
|
||||
"""
|
||||
|
||||
async def _call():
|
||||
summarizer = TreeSummarize(verbose=False)
|
||||
response = await summarizer.aget_response(
|
||||
prompt, texts, tone_name=tone_name
|
||||
)
|
||||
return str(response).strip()
|
||||
|
||||
return await retry(_call)(
|
||||
retry_attempts=3,
|
||||
retry_backoff_interval=1.0,
|
||||
retry_backoff_max=30.0,
|
||||
retry_ignore_exc_types=(ConnectionError, TimeoutError, OSError),
|
||||
)
|
||||
|
||||
async def get_structured_response(
|
||||
self,
|
||||
@@ -248,36 +93,91 @@ class LLM:
|
||||
tone_name: str | None = None,
|
||||
timeout: int | None = None,
|
||||
) -> T:
|
||||
"""Get structured output from LLM with validation retry via Workflow."""
|
||||
if timeout is None:
|
||||
timeout = self.settings_obj.LLM_STRUCTURED_RESPONSE_TIMEOUT
|
||||
"""Get structured output from LLM using tool-call with reflection retry.
|
||||
|
||||
async def run_workflow():
|
||||
workflow = StructuredOutputWorkflow(
|
||||
Uses astructured_predict (function-calling / tool-call mode) for the
|
||||
first attempt. On ValidationError or parse failure the wrong output
|
||||
and error are fed back as a reflection prompt and the call is retried
|
||||
up to LLM_PARSE_MAX_RETRIES times.
|
||||
|
||||
The outer retry() wrapper handles transient network errors with
|
||||
exponential back-off.
|
||||
"""
|
||||
max_retries = self.settings_obj.LLM_PARSE_MAX_RETRIES
|
||||
|
||||
async def _call_with_reflection():
|
||||
# Build full prompt: instruction + source texts
|
||||
if texts:
|
||||
texts_block = "\n\n".join(texts)
|
||||
full_prompt = f"{prompt}\n\n{texts_block}"
|
||||
else:
|
||||
full_prompt = prompt
|
||||
|
||||
prompt_tmpl = PromptTemplate("{user_prompt}")
|
||||
last_error: str | None = None
|
||||
|
||||
for attempt in range(1, max_retries + 2): # +2: first try + retries
|
||||
try:
|
||||
if attempt == 1:
|
||||
result = await Settings.llm.astructured_predict(
|
||||
output_cls, prompt_tmpl, user_prompt=full_prompt
|
||||
)
|
||||
else:
|
||||
reflection_tmpl = PromptTemplate(
|
||||
"{user_prompt}\n\n{reflection}"
|
||||
)
|
||||
result = await Settings.llm.astructured_predict(
|
||||
output_cls,
|
||||
reflection_tmpl,
|
||||
user_prompt=full_prompt,
|
||||
reflection=reflection,
|
||||
)
|
||||
|
||||
if attempt > 1:
|
||||
logger.info(
|
||||
f"LLM structured_predict succeeded on attempt "
|
||||
f"{attempt}/{max_retries + 1} for {output_cls.__name__}"
|
||||
)
|
||||
return result
|
||||
|
||||
except (ValidationError, ValueError) as e:
|
||||
wrong_output = str(e)
|
||||
if len(wrong_output) > 2000:
|
||||
wrong_output = wrong_output[:2000] + "... [truncated]"
|
||||
|
||||
last_error = self._format_validation_error(e)
|
||||
reflection = (
|
||||
f"Your previous response could not be parsed.\n\n"
|
||||
f"Error:\n{last_error}\n\n"
|
||||
"Please try again and return valid data matching the schema."
|
||||
)
|
||||
|
||||
logger.error(
|
||||
f"LLM parse error (attempt {attempt}/{max_retries + 1}): "
|
||||
f"{type(e).__name__}: {e}\n"
|
||||
f"Raw response: {wrong_output[:500]}"
|
||||
)
|
||||
|
||||
raise LLMParseError(
|
||||
output_cls=output_cls,
|
||||
max_retries=self.settings_obj.LLM_PARSE_MAX_RETRIES + 1,
|
||||
timeout=timeout,
|
||||
error_msg=last_error or "Max retries exceeded",
|
||||
attempts=max_retries + 1,
|
||||
)
|
||||
|
||||
result = await workflow.run(
|
||||
prompt=prompt,
|
||||
texts=texts,
|
||||
tone_name=tone_name,
|
||||
)
|
||||
|
||||
if "error" in result:
|
||||
error_msg = result["error"] or "Max retries exceeded"
|
||||
raise LLMParseError(
|
||||
output_cls=output_cls,
|
||||
error_msg=error_msg,
|
||||
attempts=result.get("attempts", 0),
|
||||
)
|
||||
|
||||
return result["success"]
|
||||
|
||||
return await retry(run_workflow)(
|
||||
return await retry(_call_with_reflection)(
|
||||
retry_attempts=3,
|
||||
retry_backoff_interval=1.0,
|
||||
retry_backoff_max=30.0,
|
||||
retry_ignore_exc_types=(WorkflowTimeoutError,),
|
||||
retry_ignore_exc_types=(ConnectionError, TimeoutError, OSError),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _format_validation_error(error: Exception) -> str:
|
||||
"""Format a validation/parse error for LLM reflection feedback."""
|
||||
if isinstance(error, ValidationError):
|
||||
error_messages = []
|
||||
for err in error.errors():
|
||||
field = ".".join(str(loc) for loc in err["loc"])
|
||||
error_messages.append(f"- {err['msg']} in field '{field}'")
|
||||
return "Schema validation errors:\n" + "\n".join(error_messages)
|
||||
return f"Parse error: {str(error)}"
|
||||
|
||||
@@ -4,6 +4,8 @@ from .audio_diarization_auto import AudioDiarizationAutoProcessor # noqa: F401
|
||||
from .audio_downscale import AudioDownscaleProcessor # noqa: F401
|
||||
from .audio_file_writer import AudioFileWriterProcessor # noqa: F401
|
||||
from .audio_merge import AudioMergeProcessor # noqa: F401
|
||||
from .audio_padding import AudioPaddingProcessor # noqa: F401
|
||||
from .audio_padding_auto import AudioPaddingAutoProcessor # noqa: F401
|
||||
from .audio_transcript import AudioTranscriptProcessor # noqa: F401
|
||||
from .audio_transcript_auto import AudioTranscriptAutoProcessor # noqa: F401
|
||||
from .base import ( # noqa: F401
|
||||
|
||||
86
server/reflector/processors/_audio_download.py
Normal file
86
server/reflector/processors/_audio_download.py
Normal file
@@ -0,0 +1,86 @@
|
||||
"""
|
||||
Shared audio download utility for local processors.
|
||||
|
||||
Downloads audio from a URL to a temporary file for in-process ML inference.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
|
||||
from reflector.logger import logger
|
||||
|
||||
S3_TIMEOUT = 60
|
||||
|
||||
|
||||
async def download_audio_to_temp(url: str) -> Path:
|
||||
"""Download audio from URL to a temporary file.
|
||||
|
||||
The caller is responsible for deleting the temp file after use.
|
||||
|
||||
Args:
|
||||
url: Presigned URL or public URL to download audio from.
|
||||
|
||||
Returns:
|
||||
Path to the downloaded temporary file.
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(None, _download_blocking, url)
|
||||
|
||||
|
||||
def _download_blocking(url: str) -> Path:
|
||||
"""Blocking download implementation."""
|
||||
log = logger.bind(url=url[:80])
|
||||
log.info("Downloading audio to temp file")
|
||||
|
||||
response = requests.get(url, stream=True, timeout=S3_TIMEOUT)
|
||||
response.raise_for_status()
|
||||
|
||||
# Determine extension from content-type or URL
|
||||
ext = _detect_extension(url, response.headers.get("content-type", ""))
|
||||
|
||||
fd, tmp_path = tempfile.mkstemp(suffix=ext)
|
||||
try:
|
||||
total_bytes = 0
|
||||
with os.fdopen(fd, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
total_bytes += len(chunk)
|
||||
log.info("Audio downloaded", bytes=total_bytes, path=tmp_path)
|
||||
return Path(tmp_path)
|
||||
except Exception:
|
||||
# Clean up on failure
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def _detect_extension(url: str, content_type: str) -> str:
|
||||
"""Detect audio file extension from URL or content-type."""
|
||||
# Try URL path first
|
||||
path = url.split("?")[0] # Strip query params
|
||||
for ext in (".wav", ".mp3", ".mp4", ".m4a", ".webm", ".ogg", ".flac"):
|
||||
if path.lower().endswith(ext):
|
||||
return ext
|
||||
|
||||
# Try content-type
|
||||
ct_map = {
|
||||
"audio/wav": ".wav",
|
||||
"audio/x-wav": ".wav",
|
||||
"audio/mpeg": ".mp3",
|
||||
"audio/mp4": ".m4a",
|
||||
"audio/webm": ".webm",
|
||||
"audio/ogg": ".ogg",
|
||||
"audio/flac": ".flac",
|
||||
}
|
||||
for ct, ext in ct_map.items():
|
||||
if ct in content_type.lower():
|
||||
return ext
|
||||
|
||||
return ".audio"
|
||||
76
server/reflector/processors/_marian_translator_service.py
Normal file
76
server/reflector/processors/_marian_translator_service.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
MarianMT translation service.
|
||||
|
||||
Singleton service that loads HuggingFace MarianMT translation models
|
||||
and reuses them across all MarianMT translator processor instances.
|
||||
|
||||
Ported from gpu/self_hosted/app/services/translator.py for in-process use.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
from transformers import MarianMTModel, MarianTokenizer, pipeline
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MarianTranslatorService:
|
||||
"""MarianMT text translation service for in-process use."""
|
||||
|
||||
def __init__(self):
|
||||
self._pipeline = None
|
||||
self._current_pair = None
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def load(self, source_language: str = "en", target_language: str = "fr"):
|
||||
"""Load the translation model for a specific language pair."""
|
||||
model_name = self._resolve_model_name(source_language, target_language)
|
||||
logger.info(
|
||||
"Loading MarianMT model: %s (%s -> %s)",
|
||||
model_name,
|
||||
source_language,
|
||||
target_language,
|
||||
)
|
||||
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
||||
model = MarianMTModel.from_pretrained(model_name)
|
||||
self._pipeline = pipeline("translation", model=model, tokenizer=tokenizer)
|
||||
self._current_pair = (source_language.lower(), target_language.lower())
|
||||
|
||||
def _resolve_model_name(self, src: str, tgt: str) -> str:
|
||||
"""Resolve language pair to MarianMT model name."""
|
||||
pair = (src.lower(), tgt.lower())
|
||||
mapping = {
|
||||
("en", "fr"): "Helsinki-NLP/opus-mt-en-fr",
|
||||
("fr", "en"): "Helsinki-NLP/opus-mt-fr-en",
|
||||
("en", "es"): "Helsinki-NLP/opus-mt-en-es",
|
||||
("es", "en"): "Helsinki-NLP/opus-mt-es-en",
|
||||
("en", "de"): "Helsinki-NLP/opus-mt-en-de",
|
||||
("de", "en"): "Helsinki-NLP/opus-mt-de-en",
|
||||
}
|
||||
return mapping.get(pair, "Helsinki-NLP/opus-mt-en-fr")
|
||||
|
||||
def translate(self, text: str, source_language: str, target_language: str) -> dict:
|
||||
"""Translate text between languages.
|
||||
|
||||
Args:
|
||||
text: Text to translate.
|
||||
source_language: Source language code (e.g. "en").
|
||||
target_language: Target language code (e.g. "fr").
|
||||
|
||||
Returns:
|
||||
dict with "text" key containing {source_language: original, target_language: translated}.
|
||||
"""
|
||||
pair = (source_language.lower(), target_language.lower())
|
||||
if self._pipeline is None or self._current_pair != pair:
|
||||
self.load(source_language, target_language)
|
||||
with self._lock:
|
||||
results = self._pipeline(
|
||||
text, src_lang=source_language, tgt_lang=target_language
|
||||
)
|
||||
translated = results[0]["translation_text"] if results else ""
|
||||
return {"text": {source_language: text, target_language: translated}}
|
||||
|
||||
|
||||
# Module-level singleton — shared across all MarianMT translator processors
|
||||
translator_service = MarianTranslatorService()
|
||||
133
server/reflector/processors/_pyannote_diarization_service.py
Normal file
133
server/reflector/processors/_pyannote_diarization_service.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""
|
||||
Pyannote diarization service using pyannote.audio.
|
||||
|
||||
Singleton service that loads the pyannote speaker diarization model once
|
||||
and reuses it across all pyannote diarization processor instances.
|
||||
|
||||
Ported from gpu/self_hosted/app/services/diarizer.py for in-process use.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import tarfile
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from urllib.request import urlopen
|
||||
|
||||
import torch
|
||||
import torchaudio
|
||||
import yaml
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
from reflector.settings import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
S3_BUNDLE_URL = "https://reflector-public.s3.us-east-1.amazonaws.com/pyannote-speaker-diarization-3.1.tar.gz"
|
||||
BUNDLE_CACHE_DIR = Path.home() / ".cache" / "pyannote-bundle"
|
||||
|
||||
|
||||
def _ensure_model(cache_dir: Path) -> str:
|
||||
"""Download and extract S3 model bundle if not cached."""
|
||||
model_dir = cache_dir / "pyannote-speaker-diarization-3.1"
|
||||
config_path = model_dir / "config.yaml"
|
||||
|
||||
if config_path.exists():
|
||||
logger.info("Using cached model bundle at %s", model_dir)
|
||||
return str(model_dir)
|
||||
|
||||
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
tarball_path = cache_dir / "model.tar.gz"
|
||||
|
||||
logger.info("Downloading model bundle from %s", S3_BUNDLE_URL)
|
||||
with urlopen(S3_BUNDLE_URL) as response, open(tarball_path, "wb") as f:
|
||||
while chunk := response.read(8192):
|
||||
f.write(chunk)
|
||||
|
||||
logger.info("Extracting model bundle")
|
||||
with tarfile.open(tarball_path, "r:gz") as tar:
|
||||
tar.extractall(path=cache_dir, filter="data")
|
||||
tarball_path.unlink()
|
||||
|
||||
_patch_config(model_dir, cache_dir)
|
||||
return str(model_dir)
|
||||
|
||||
|
||||
def _patch_config(model_dir: Path, cache_dir: Path) -> None:
|
||||
"""Rewrite config.yaml to reference local pytorch_model.bin paths."""
|
||||
config_path = model_dir / "config.yaml"
|
||||
with open(config_path) as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
config["pipeline"]["params"]["segmentation"] = str(
|
||||
cache_dir / "pyannote-segmentation-3.0" / "pytorch_model.bin"
|
||||
)
|
||||
config["pipeline"]["params"]["embedding"] = str(
|
||||
cache_dir / "pyannote-wespeaker-voxceleb-resnet34-LM" / "pytorch_model.bin"
|
||||
)
|
||||
|
||||
with open(config_path, "w") as f:
|
||||
yaml.dump(config, f)
|
||||
|
||||
logger.info("Patched config.yaml with local model paths")
|
||||
|
||||
|
||||
class PyannoteDiarizationService:
|
||||
"""Pyannote speaker diarization service for in-process use."""
|
||||
|
||||
def __init__(self):
|
||||
self._pipeline = None
|
||||
self._device = "cpu"
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def load(self):
|
||||
self._device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
hf_token = settings.HF_TOKEN
|
||||
|
||||
if hf_token:
|
||||
logger.info("Loading pyannote model from HuggingFace (HF_TOKEN set)")
|
||||
self._pipeline = Pipeline.from_pretrained(
|
||||
"pyannote/speaker-diarization-3.1",
|
||||
use_auth_token=hf_token,
|
||||
)
|
||||
else:
|
||||
logger.info("HF_TOKEN not set — loading model from S3 bundle")
|
||||
model_path = _ensure_model(BUNDLE_CACHE_DIR)
|
||||
config_path = Path(model_path) / "config.yaml"
|
||||
self._pipeline = Pipeline.from_pretrained(str(config_path))
|
||||
|
||||
self._pipeline.to(torch.device(self._device))
|
||||
|
||||
def diarize_file(self, file_path: str, timestamp: float = 0.0) -> dict:
|
||||
"""Run speaker diarization on an audio file.
|
||||
|
||||
Args:
|
||||
file_path: Path to the audio file.
|
||||
timestamp: Offset to add to all segment timestamps.
|
||||
|
||||
Returns:
|
||||
dict with "diarization" key containing list of
|
||||
{"start": float, "end": float, "speaker": int} segments.
|
||||
"""
|
||||
if self._pipeline is None:
|
||||
self.load()
|
||||
waveform, sample_rate = torchaudio.load(file_path)
|
||||
with self._lock:
|
||||
diarization = self._pipeline(
|
||||
{"waveform": waveform, "sample_rate": sample_rate}
|
||||
)
|
||||
segments = []
|
||||
for diarization_segment, _, speaker in diarization.itertracks(yield_label=True):
|
||||
segments.append(
|
||||
{
|
||||
"start": round(timestamp + diarization_segment.start, 3),
|
||||
"end": round(timestamp + diarization_segment.end, 3),
|
||||
"speaker": int(speaker[-2:])
|
||||
if speaker and speaker[-2:].isdigit()
|
||||
else 0,
|
||||
}
|
||||
)
|
||||
return {"diarization": segments}
|
||||
|
||||
|
||||
# Module-level singleton — shared across all pyannote diarization processors
|
||||
diarization_service = PyannoteDiarizationService()
|
||||
37
server/reflector/processors/audio_diarization_pyannote.py
Normal file
37
server/reflector/processors/audio_diarization_pyannote.py
Normal file
@@ -0,0 +1,37 @@
|
||||
"""
|
||||
Pyannote audio diarization processor using pyannote.audio in-process.
|
||||
|
||||
Downloads audio from URL, runs pyannote diarization locally,
|
||||
and returns speaker segments. No HTTP backend needed.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
from reflector.processors._audio_download import download_audio_to_temp
|
||||
from reflector.processors._pyannote_diarization_service import diarization_service
|
||||
from reflector.processors.audio_diarization import AudioDiarizationProcessor
|
||||
from reflector.processors.audio_diarization_auto import AudioDiarizationAutoProcessor
|
||||
from reflector.processors.types import AudioDiarizationInput
|
||||
|
||||
|
||||
class AudioDiarizationPyannoteProcessor(AudioDiarizationProcessor):
|
||||
INPUT_TYPE = AudioDiarizationInput
|
||||
|
||||
async def _diarize(self, data: AudioDiarizationInput):
|
||||
"""Run pyannote diarization on audio from URL."""
|
||||
tmp_path = await download_audio_to_temp(data.audio_url)
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None, diarization_service.diarize_file, str(tmp_path)
|
||||
)
|
||||
return result["diarization"]
|
||||
finally:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
AudioDiarizationAutoProcessor.register("pyannote", AudioDiarizationPyannoteProcessor)
|
||||
23
server/reflector/processors/audio_padding.py
Normal file
23
server/reflector/processors/audio_padding.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""
|
||||
Base class for audio padding processors.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class PaddingResponse(BaseModel):
|
||||
size: int
|
||||
cancelled: bool = False
|
||||
|
||||
|
||||
class AudioPaddingProcessor:
|
||||
"""Base class for audio padding processors."""
|
||||
|
||||
async def pad_track(
|
||||
self,
|
||||
track_url: str,
|
||||
output_url: str,
|
||||
start_time_seconds: float,
|
||||
track_index: int,
|
||||
) -> PaddingResponse:
|
||||
raise NotImplementedError
|
||||
32
server/reflector/processors/audio_padding_auto.py
Normal file
32
server/reflector/processors/audio_padding_auto.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import importlib
|
||||
|
||||
from reflector.processors.audio_padding import AudioPaddingProcessor
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class AudioPaddingAutoProcessor(AudioPaddingProcessor):
|
||||
_registry = {}
|
||||
|
||||
@classmethod
|
||||
def register(cls, name, kclass):
|
||||
cls._registry[name] = kclass
|
||||
|
||||
def __new__(cls, name: str | None = None, **kwargs):
|
||||
if name is None:
|
||||
name = settings.PADDING_BACKEND
|
||||
if name not in cls._registry:
|
||||
module_name = f"reflector.processors.audio_padding_{name}"
|
||||
importlib.import_module(module_name)
|
||||
|
||||
# gather specific configuration for the processor
|
||||
# search `PADDING_XXX_YYY`, push to constructor as `xxx_yyy`
|
||||
config = {}
|
||||
name_upper = name.upper()
|
||||
settings_prefix = "PADDING_"
|
||||
config_prefix = f"{settings_prefix}{name_upper}_"
|
||||
for key, value in settings:
|
||||
if key.startswith(config_prefix):
|
||||
config_name = key[len(settings_prefix) :].lower()
|
||||
config[config_name] = value
|
||||
|
||||
return cls._registry[name](**config | kwargs)
|
||||
@@ -6,18 +6,14 @@ import asyncio
|
||||
import os
|
||||
|
||||
import httpx
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.hatchet.constants import TIMEOUT_AUDIO
|
||||
from reflector.hatchet.constants import TIMEOUT_AUDIO_HTTP
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.audio_padding import AudioPaddingProcessor, PaddingResponse
|
||||
from reflector.processors.audio_padding_auto import AudioPaddingAutoProcessor
|
||||
|
||||
|
||||
class PaddingResponse(BaseModel):
|
||||
size: int
|
||||
cancelled: bool = False
|
||||
|
||||
|
||||
class AudioPaddingModalProcessor:
|
||||
class AudioPaddingModalProcessor(AudioPaddingProcessor):
|
||||
"""Audio padding processor using Modal.com CPU backend via HTTP."""
|
||||
|
||||
def __init__(
|
||||
@@ -64,7 +60,7 @@ class AudioPaddingModalProcessor:
|
||||
headers["Authorization"] = f"Bearer {self.modal_api_key}"
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=TIMEOUT_AUDIO) as client:
|
||||
async with httpx.AsyncClient(timeout=TIMEOUT_AUDIO_HTTP) as client:
|
||||
response = await client.post(
|
||||
url,
|
||||
headers=headers,
|
||||
@@ -111,3 +107,6 @@ class AudioPaddingModalProcessor:
|
||||
except Exception as e:
|
||||
log.error("Modal padding unexpected error", error=str(e), exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
AudioPaddingAutoProcessor.register("modal", AudioPaddingModalProcessor)
|
||||
|
||||
133
server/reflector/processors/audio_padding_pyav.py
Normal file
133
server/reflector/processors/audio_padding_pyav.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""
|
||||
PyAV audio padding processor.
|
||||
|
||||
Pads audio tracks with silence directly in-process (no HTTP).
|
||||
Reuses the shared PyAV utilities from reflector.utils.audio_padding.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import av
|
||||
|
||||
from reflector.logger import logger
|
||||
from reflector.processors.audio_padding import AudioPaddingProcessor, PaddingResponse
|
||||
from reflector.processors.audio_padding_auto import AudioPaddingAutoProcessor
|
||||
from reflector.utils.audio_padding import apply_audio_padding_to_file
|
||||
|
||||
S3_TIMEOUT = 60
|
||||
|
||||
|
||||
class AudioPaddingPyavProcessor(AudioPaddingProcessor):
|
||||
"""Audio padding processor using PyAV (no HTTP backend)."""
|
||||
|
||||
async def pad_track(
|
||||
self,
|
||||
track_url: str,
|
||||
output_url: str,
|
||||
start_time_seconds: float,
|
||||
track_index: int,
|
||||
) -> PaddingResponse:
|
||||
"""Pad audio track with silence via PyAV.
|
||||
|
||||
Args:
|
||||
track_url: Presigned GET URL for source audio track
|
||||
output_url: Presigned PUT URL for output WebM
|
||||
start_time_seconds: Amount of silence to prepend
|
||||
track_index: Track index for logging
|
||||
"""
|
||||
if not track_url:
|
||||
raise ValueError("track_url cannot be empty")
|
||||
if start_time_seconds <= 0:
|
||||
raise ValueError(
|
||||
f"start_time_seconds must be positive, got {start_time_seconds}"
|
||||
)
|
||||
|
||||
log = logger.bind(track_index=track_index, padding_seconds=start_time_seconds)
|
||||
log.info("Starting local PyAV padding")
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
self._pad_track_blocking,
|
||||
track_url,
|
||||
output_url,
|
||||
start_time_seconds,
|
||||
track_index,
|
||||
)
|
||||
|
||||
def _pad_track_blocking(
|
||||
self,
|
||||
track_url: str,
|
||||
output_url: str,
|
||||
start_time_seconds: float,
|
||||
track_index: int,
|
||||
) -> PaddingResponse:
|
||||
"""Blocking padding work: download, pad with PyAV, upload."""
|
||||
import requests
|
||||
|
||||
log = logger.bind(track_index=track_index, padding_seconds=start_time_seconds)
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
input_path = None
|
||||
output_path = None
|
||||
|
||||
try:
|
||||
# Download source audio
|
||||
log.info("Downloading track for local padding")
|
||||
response = requests.get(track_url, stream=True, timeout=S3_TIMEOUT)
|
||||
response.raise_for_status()
|
||||
|
||||
input_path = os.path.join(temp_dir, "track.webm")
|
||||
total_bytes = 0
|
||||
with open(input_path, "wb") as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
total_bytes += len(chunk)
|
||||
log.info("Track downloaded", bytes=total_bytes)
|
||||
|
||||
# Apply padding using shared PyAV utility
|
||||
output_path = os.path.join(temp_dir, "padded.webm")
|
||||
with av.open(input_path) as in_container:
|
||||
apply_audio_padding_to_file(
|
||||
in_container,
|
||||
output_path,
|
||||
start_time_seconds,
|
||||
track_index,
|
||||
logger=logger,
|
||||
)
|
||||
|
||||
file_size = os.path.getsize(output_path)
|
||||
log.info("Local padding complete", size=file_size)
|
||||
|
||||
# Upload padded track
|
||||
log.info("Uploading padded track to S3")
|
||||
with open(output_path, "rb") as f:
|
||||
upload_response = requests.put(output_url, data=f, timeout=S3_TIMEOUT)
|
||||
upload_response.raise_for_status()
|
||||
log.info("Upload complete", size=file_size)
|
||||
|
||||
return PaddingResponse(size=file_size)
|
||||
|
||||
except Exception as e:
|
||||
log.error("Local padding failed", error=str(e), exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
if input_path and os.path.exists(input_path):
|
||||
try:
|
||||
os.unlink(input_path)
|
||||
except Exception as e:
|
||||
log.warning("Failed to cleanup input file", error=str(e))
|
||||
if output_path and os.path.exists(output_path):
|
||||
try:
|
||||
os.unlink(output_path)
|
||||
except Exception as e:
|
||||
log.warning("Failed to cleanup output file", error=str(e))
|
||||
try:
|
||||
os.rmdir(temp_dir)
|
||||
except Exception as e:
|
||||
log.warning("Failed to cleanup temp directory", error=str(e))
|
||||
|
||||
|
||||
AudioPaddingAutoProcessor.register("pyav", AudioPaddingPyavProcessor)
|
||||
@@ -3,13 +3,17 @@ from faster_whisper import WhisperModel
|
||||
from reflector.processors.audio_transcript import AudioTranscriptProcessor
|
||||
from reflector.processors.audio_transcript_auto import AudioTranscriptAutoProcessor
|
||||
from reflector.processors.types import AudioFile, Transcript, Word
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
class AudioTranscriptWhisperProcessor(AudioTranscriptProcessor):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.model = WhisperModel(
|
||||
"tiny", device="cpu", compute_type="float32", num_workers=12
|
||||
settings.WHISPER_CHUNK_MODEL,
|
||||
device="cpu",
|
||||
compute_type="float32",
|
||||
num_workers=12,
|
||||
)
|
||||
|
||||
async def _transcript(self, data: AudioFile):
|
||||
|
||||
39
server/reflector/processors/file_diarization_pyannote.py
Normal file
39
server/reflector/processors/file_diarization_pyannote.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""
|
||||
Pyannote file diarization processor using pyannote.audio in-process.
|
||||
|
||||
Downloads audio from URL, runs pyannote diarization locally,
|
||||
and returns speaker segments. No HTTP backend needed.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
from reflector.processors._audio_download import download_audio_to_temp
|
||||
from reflector.processors._pyannote_diarization_service import diarization_service
|
||||
from reflector.processors.file_diarization import (
|
||||
FileDiarizationInput,
|
||||
FileDiarizationOutput,
|
||||
FileDiarizationProcessor,
|
||||
)
|
||||
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
|
||||
|
||||
|
||||
class FileDiarizationPyannoteProcessor(FileDiarizationProcessor):
|
||||
async def _diarize(self, data: FileDiarizationInput):
|
||||
"""Run pyannote diarization on file from URL."""
|
||||
self.logger.info(f"Starting pyannote diarization from {data.audio_url}")
|
||||
tmp_path = await download_audio_to_temp(data.audio_url)
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None, diarization_service.diarize_file, str(tmp_path)
|
||||
)
|
||||
return FileDiarizationOutput(diarization=result["diarization"])
|
||||
finally:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
FileDiarizationAutoProcessor.register("pyannote", FileDiarizationPyannoteProcessor)
|
||||
275
server/reflector/processors/file_transcript_whisper.py
Normal file
275
server/reflector/processors/file_transcript_whisper.py
Normal file
@@ -0,0 +1,275 @@
|
||||
"""
|
||||
Local file transcription processor using faster-whisper with Silero VAD pipeline.
|
||||
|
||||
Downloads audio from URL, segments it using Silero VAD, transcribes each
|
||||
segment with faster-whisper, and merges results. No HTTP backend needed.
|
||||
|
||||
VAD pipeline ported from gpu/self_hosted/app/services/transcriber.py.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import threading
|
||||
from typing import Generator
|
||||
|
||||
import numpy as np
|
||||
from silero_vad import VADIterator, load_silero_vad
|
||||
|
||||
from reflector.processors._audio_download import download_audio_to_temp
|
||||
from reflector.processors.file_transcript import (
|
||||
FileTranscriptInput,
|
||||
FileTranscriptProcessor,
|
||||
)
|
||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||
from reflector.processors.types import Transcript, Word
|
||||
from reflector.settings import settings
|
||||
|
||||
SAMPLE_RATE = 16000
|
||||
|
||||
VAD_CONFIG = {
|
||||
"batch_max_duration": 30.0,
|
||||
"silence_padding": 0.5,
|
||||
"window_size": 512,
|
||||
}
|
||||
|
||||
|
||||
class FileTranscriptWhisperProcessor(FileTranscriptProcessor):
|
||||
"""Transcribe complete audio files using local faster-whisper with VAD."""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._model = None
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _ensure_model(self):
|
||||
"""Lazy-load the whisper model on first use."""
|
||||
if self._model is not None:
|
||||
return
|
||||
|
||||
import faster_whisper
|
||||
import torch
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
compute_type = "float16" if device == "cuda" else "int8"
|
||||
model_name = settings.WHISPER_FILE_MODEL
|
||||
|
||||
self.logger.info(
|
||||
"Loading whisper model",
|
||||
model=model_name,
|
||||
device=device,
|
||||
compute_type=compute_type,
|
||||
)
|
||||
self._model = faster_whisper.WhisperModel(
|
||||
model_name,
|
||||
device=device,
|
||||
compute_type=compute_type,
|
||||
num_workers=1,
|
||||
)
|
||||
|
||||
async def _transcript(self, data: FileTranscriptInput):
|
||||
"""Download file, run VAD segmentation, transcribe each segment."""
|
||||
tmp_path = await download_audio_to_temp(data.audio_url)
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
self._transcribe_file_blocking,
|
||||
str(tmp_path),
|
||||
data.language,
|
||||
)
|
||||
return result
|
||||
finally:
|
||||
try:
|
||||
os.unlink(tmp_path)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _transcribe_file_blocking(self, file_path: str, language: str) -> Transcript:
|
||||
"""Blocking transcription with VAD pipeline."""
|
||||
self._ensure_model()
|
||||
|
||||
audio_array = _load_audio_via_ffmpeg(file_path, SAMPLE_RATE)
|
||||
|
||||
# VAD segmentation → batch merging
|
||||
merged_batches: list[tuple[float, float]] = []
|
||||
batch_start = None
|
||||
batch_end = None
|
||||
max_duration = VAD_CONFIG["batch_max_duration"]
|
||||
|
||||
for seg_start, seg_end in _vad_segments(audio_array):
|
||||
if batch_start is None:
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
continue
|
||||
if seg_end - batch_start <= max_duration:
|
||||
batch_end = seg_end
|
||||
else:
|
||||
merged_batches.append((batch_start, batch_end))
|
||||
batch_start, batch_end = seg_start, seg_end
|
||||
|
||||
if batch_start is not None and batch_end is not None:
|
||||
merged_batches.append((batch_start, batch_end))
|
||||
|
||||
# If no speech detected, try transcribing the whole file
|
||||
if not merged_batches:
|
||||
return self._transcribe_whole_file(file_path, language)
|
||||
|
||||
# Transcribe each batch
|
||||
all_words = []
|
||||
for start_time, end_time in merged_batches:
|
||||
s_idx = int(start_time * SAMPLE_RATE)
|
||||
e_idx = int(end_time * SAMPLE_RATE)
|
||||
segment = audio_array[s_idx:e_idx]
|
||||
segment = _pad_audio(segment, SAMPLE_RATE)
|
||||
|
||||
with self._lock:
|
||||
segments, _ = self._model.transcribe(
|
||||
segment,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
segments = list(segments)
|
||||
|
||||
for seg in segments:
|
||||
for w in seg.words:
|
||||
all_words.append(
|
||||
{
|
||||
"word": w.word,
|
||||
"start": round(float(w.start) + start_time, 2),
|
||||
"end": round(float(w.end) + start_time, 2),
|
||||
}
|
||||
)
|
||||
|
||||
all_words = _enforce_word_timing_constraints(all_words)
|
||||
|
||||
words = [
|
||||
Word(text=w["word"], start=w["start"], end=w["end"]) for w in all_words
|
||||
]
|
||||
words.sort(key=lambda w: w.start)
|
||||
return Transcript(words=words)
|
||||
|
||||
def _transcribe_whole_file(self, file_path: str, language: str) -> Transcript:
|
||||
"""Fallback: transcribe entire file without VAD segmentation."""
|
||||
with self._lock:
|
||||
segments, _ = self._model.transcribe(
|
||||
file_path,
|
||||
language=language,
|
||||
beam_size=5,
|
||||
word_timestamps=True,
|
||||
vad_filter=True,
|
||||
vad_parameters={"min_silence_duration_ms": 500},
|
||||
)
|
||||
segments = list(segments)
|
||||
|
||||
words = []
|
||||
for seg in segments:
|
||||
for w in seg.words:
|
||||
words.append(
|
||||
Word(
|
||||
text=w.word,
|
||||
start=round(float(w.start), 2),
|
||||
end=round(float(w.end), 2),
|
||||
)
|
||||
)
|
||||
return Transcript(words=words)
|
||||
|
||||
|
||||
# --- VAD helpers (ported from gpu/self_hosted/app/services/transcriber.py) ---
|
||||
# IMPORTANT: This VAD segment logic is duplicated for deployment isolation.
|
||||
# If you modify this, consider updating the GPU service copy as well:
|
||||
# - gpu/self_hosted/app/services/transcriber.py
|
||||
# - gpu/modal_deployments/reflector_transcriber.py
|
||||
# - gpu/modal_deployments/reflector_transcriber_parakeet.py
|
||||
|
||||
|
||||
def _load_audio_via_ffmpeg(
|
||||
input_path: str, sample_rate: int = SAMPLE_RATE
|
||||
) -> np.ndarray:
|
||||
"""Load audio file via ffmpeg, converting to mono float32 at target sample rate."""
|
||||
ffmpeg_bin = shutil.which("ffmpeg") or "ffmpeg"
|
||||
cmd = [
|
||||
ffmpeg_bin,
|
||||
"-nostdin",
|
||||
"-threads",
|
||||
"1",
|
||||
"-i",
|
||||
input_path,
|
||||
"-f",
|
||||
"f32le",
|
||||
"-acodec",
|
||||
"pcm_f32le",
|
||||
"-ac",
|
||||
"1",
|
||||
"-ar",
|
||||
str(sample_rate),
|
||||
"pipe:1",
|
||||
]
|
||||
proc = subprocess.run(
|
||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
|
||||
)
|
||||
return np.frombuffer(proc.stdout, dtype=np.float32)
|
||||
|
||||
|
||||
def _vad_segments(
|
||||
audio_array: np.ndarray,
|
||||
sample_rate: int = SAMPLE_RATE,
|
||||
window_size: int = VAD_CONFIG["window_size"],
|
||||
) -> Generator[tuple[float, float], None, None]:
|
||||
"""Detect speech segments using Silero VAD."""
|
||||
vad_model = load_silero_vad(onnx=False)
|
||||
iterator = VADIterator(vad_model, sampling_rate=sample_rate)
|
||||
start = None
|
||||
|
||||
for i in range(0, len(audio_array), window_size):
|
||||
chunk = audio_array[i : i + window_size]
|
||||
if len(chunk) < window_size:
|
||||
chunk = np.pad(chunk, (0, window_size - len(chunk)), mode="constant")
|
||||
speech = iterator(chunk)
|
||||
if not speech:
|
||||
continue
|
||||
if "start" in speech:
|
||||
start = speech["start"]
|
||||
continue
|
||||
if "end" in speech and start is not None:
|
||||
end = speech["end"]
|
||||
yield (start / float(SAMPLE_RATE), end / float(SAMPLE_RATE))
|
||||
start = None
|
||||
|
||||
# Handle case where audio ends while speech is still active
|
||||
if start is not None:
|
||||
audio_duration = len(audio_array) / float(sample_rate)
|
||||
yield (start / float(SAMPLE_RATE), audio_duration)
|
||||
|
||||
iterator.reset_states()
|
||||
|
||||
|
||||
def _pad_audio(audio_array: np.ndarray, sample_rate: int = SAMPLE_RATE) -> np.ndarray:
|
||||
"""Pad short audio with silence for VAD compatibility."""
|
||||
audio_duration = len(audio_array) / sample_rate
|
||||
if audio_duration < VAD_CONFIG["silence_padding"]:
|
||||
silence_samples = int(sample_rate * VAD_CONFIG["silence_padding"])
|
||||
silence = np.zeros(silence_samples, dtype=np.float32)
|
||||
return np.concatenate([audio_array, silence])
|
||||
return audio_array
|
||||
|
||||
|
||||
def _enforce_word_timing_constraints(words: list[dict]) -> list[dict]:
|
||||
"""Ensure no word end time exceeds the next word's start time."""
|
||||
if len(words) <= 1:
|
||||
return words
|
||||
enforced: list[dict] = []
|
||||
for i, word in enumerate(words):
|
||||
current = dict(word)
|
||||
if i < len(words) - 1:
|
||||
next_start = words[i + 1]["start"]
|
||||
if current["end"] > next_start:
|
||||
current["end"] = next_start
|
||||
enforced.append(current)
|
||||
return enforced
|
||||
|
||||
|
||||
FileTranscriptAutoProcessor.register("whisper", FileTranscriptWhisperProcessor)
|
||||
@@ -14,10 +14,12 @@ class TopicResponse(BaseModel):
|
||||
title: str = Field(
|
||||
description="A descriptive title for the topic being discussed",
|
||||
validation_alias=AliasChoices("title", "Title"),
|
||||
min_length=8,
|
||||
)
|
||||
summary: str = Field(
|
||||
description="A concise 1-2 sentence summary of the discussion",
|
||||
validation_alias=AliasChoices("summary", "Summary"),
|
||||
min_length=8,
|
||||
)
|
||||
|
||||
|
||||
|
||||
50
server/reflector/processors/transcript_translator_marian.py
Normal file
50
server/reflector/processors/transcript_translator_marian.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""
|
||||
MarianMT transcript translator processor using HuggingFace MarianMT in-process.
|
||||
|
||||
Translates transcript text using HuggingFace MarianMT models
|
||||
locally. No HTTP backend needed.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
from reflector.processors._marian_translator_service import translator_service
|
||||
from reflector.processors.transcript_translator import TranscriptTranslatorProcessor
|
||||
from reflector.processors.transcript_translator_auto import (
|
||||
TranscriptTranslatorAutoProcessor,
|
||||
)
|
||||
from reflector.processors.types import TranslationLanguages
|
||||
|
||||
|
||||
class TranscriptTranslatorMarianProcessor(TranscriptTranslatorProcessor):
|
||||
"""Translate transcript text using MarianMT models."""
|
||||
|
||||
async def _translate(self, text: str) -> str | None:
|
||||
source_language = self.get_pref("audio:source_language", "en")
|
||||
target_language = self.get_pref("audio:target_language", "en")
|
||||
|
||||
languages = TranslationLanguages()
|
||||
assert languages.is_supported(target_language)
|
||||
|
||||
self.logger.debug(f"MarianMT translate {text=}")
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None,
|
||||
translator_service.translate,
|
||||
text,
|
||||
source_language,
|
||||
target_language,
|
||||
)
|
||||
|
||||
if target_language in result["text"]:
|
||||
translation = result["text"][target_language]
|
||||
else:
|
||||
translation = None
|
||||
|
||||
self.logger.debug(f"Translation result: {text=}, {translation=}")
|
||||
return translation
|
||||
|
||||
|
||||
TranscriptTranslatorAutoProcessor.register(
|
||||
"marian", TranscriptTranslatorMarianProcessor
|
||||
)
|
||||
@@ -40,6 +40,7 @@ class MultitrackProcessingConfig:
|
||||
track_keys: list[str]
|
||||
recording_id: NonEmptyString | None = None
|
||||
room_id: NonEmptyString | None = None
|
||||
source_platform: str = "daily"
|
||||
mode: Literal["multitrack"] = "multitrack"
|
||||
|
||||
|
||||
@@ -256,6 +257,7 @@ async def dispatch_transcript_processing(
|
||||
"bucket_name": config.bucket_name,
|
||||
"transcript_id": config.transcript_id,
|
||||
"room_id": config.room_id,
|
||||
"source_platform": config.source_platform,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": config.transcript_id,
|
||||
|
||||
@@ -40,14 +40,24 @@ class Settings(BaseSettings):
|
||||
# backends: silero, frames
|
||||
AUDIO_CHUNKER_BACKEND: str = "frames"
|
||||
|
||||
# HuggingFace token for gated models (pyannote diarization in --cpu mode)
|
||||
HF_TOKEN: str | None = None
|
||||
|
||||
# Audio Transcription
|
||||
# backends:
|
||||
# - whisper: in-process model loading (no HTTP, runs in same process)
|
||||
# - modal: HTTP API client (works with Modal.com OR self-hosted gpu/self_hosted/)
|
||||
TRANSCRIPT_BACKEND: str = "whisper"
|
||||
|
||||
# Whisper model sizes for local transcription
|
||||
# Options: "tiny", "base", "small", "medium", "large-v2"
|
||||
WHISPER_CHUNK_MODEL: str = "tiny"
|
||||
WHISPER_FILE_MODEL: str = "tiny"
|
||||
TRANSCRIPT_URL: str | None = None
|
||||
TRANSCRIPT_TIMEOUT: int = 90
|
||||
TRANSCRIPT_FILE_TIMEOUT: int = 600
|
||||
TRANSCRIPT_FILE_TIMEOUT: int = (
|
||||
540 # Below Hatchet TIMEOUT_HEAVY (600) to avoid timeout race
|
||||
)
|
||||
|
||||
# Audio Transcription: modal backend
|
||||
TRANSCRIPT_MODAL_API_KEY: str | None = None
|
||||
@@ -73,6 +83,9 @@ class Settings(BaseSettings):
|
||||
DAILYCO_STORAGE_AWS_BUCKET_NAME: str | None = None
|
||||
DAILYCO_STORAGE_AWS_REGION: str | None = None
|
||||
DAILYCO_STORAGE_AWS_ROLE_ARN: str | None = None
|
||||
# Worker credentials for reading/deleting from Daily's recording bucket
|
||||
DAILYCO_STORAGE_AWS_ACCESS_KEY_ID: str | None = None
|
||||
DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
|
||||
|
||||
# Translate into the target language
|
||||
TRANSLATION_BACKEND: str = "passthrough"
|
||||
@@ -97,7 +110,7 @@ class Settings(BaseSettings):
|
||||
)
|
||||
|
||||
# Diarization
|
||||
# backend: modal — HTTP API client (works with Modal.com OR self-hosted gpu/self_hosted/)
|
||||
# backends: modal — HTTP API client, pyannote — in-process pyannote.audio
|
||||
DIARIZATION_ENABLED: bool = True
|
||||
DIARIZATION_BACKEND: str = "modal"
|
||||
DIARIZATION_URL: str | None = None
|
||||
@@ -106,7 +119,11 @@ class Settings(BaseSettings):
|
||||
# Diarization: modal backend
|
||||
DIARIZATION_MODAL_API_KEY: str | None = None
|
||||
|
||||
# Audio Padding (Modal.com backend)
|
||||
# Audio Padding
|
||||
# backends:
|
||||
# - pyav: in-process PyAV padding (no HTTP, runs in same process)
|
||||
# - modal: HTTP API client (works with Modal.com OR self-hosted gpu/self_hosted/)
|
||||
PADDING_BACKEND: str = "pyav"
|
||||
PADDING_URL: str | None = None
|
||||
PADDING_MODAL_API_KEY: str | None = None
|
||||
|
||||
|
||||
@@ -17,6 +17,49 @@ def get_transcripts_storage() -> Storage:
|
||||
)
|
||||
|
||||
|
||||
def get_source_storage(platform: str) -> Storage:
|
||||
"""Get storage for reading/deleting source recording files from the platform's bucket.
|
||||
|
||||
Returns an AwsStorage configured with the platform's worker credentials
|
||||
(access keys), or falls back to get_transcripts_storage() when platform-specific
|
||||
credentials aren't configured (e.g., single-bucket setups).
|
||||
|
||||
Args:
|
||||
platform: Recording platform name ("daily", "whereby", or other).
|
||||
"""
|
||||
if platform == "daily":
|
||||
if (
|
||||
settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID
|
||||
and settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY
|
||||
and settings.DAILYCO_STORAGE_AWS_BUCKET_NAME
|
||||
):
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.DAILYCO_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.DAILYCO_STORAGE_AWS_REGION or "us-east-1",
|
||||
aws_access_key_id=settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
)
|
||||
|
||||
elif platform == "whereby":
|
||||
if (
|
||||
settings.WHEREBY_STORAGE_AWS_ACCESS_KEY_ID
|
||||
and settings.WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY
|
||||
and settings.WHEREBY_STORAGE_AWS_BUCKET_NAME
|
||||
):
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.WHEREBY_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.WHEREBY_STORAGE_AWS_REGION or "us-east-1",
|
||||
aws_access_key_id=settings.WHEREBY_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
)
|
||||
|
||||
return get_transcripts_storage()
|
||||
|
||||
|
||||
def get_whereby_storage() -> Storage:
|
||||
"""
|
||||
Get storage config for Whereby (for passing to Whereby API).
|
||||
@@ -47,6 +90,9 @@ def get_dailyco_storage() -> Storage:
|
||||
"""
|
||||
Get storage config for Daily.co (for passing to Daily API).
|
||||
|
||||
Uses role_arn only — access keys are excluded because they're for
|
||||
worker reads (get_source_storage), not for the Daily API.
|
||||
|
||||
Usage:
|
||||
daily_storage = get_dailyco_storage()
|
||||
daily_api.create_meeting(
|
||||
@@ -57,13 +103,15 @@ def get_dailyco_storage() -> Storage:
|
||||
|
||||
Do NOT use for our file operations - use get_transcripts_storage() instead.
|
||||
"""
|
||||
# Fail fast if platform-specific config missing
|
||||
if not settings.DAILYCO_STORAGE_AWS_BUCKET_NAME:
|
||||
raise ValueError(
|
||||
"DAILYCO_STORAGE_AWS_BUCKET_NAME required for Daily.co with AWS storage"
|
||||
)
|
||||
|
||||
return Storage.get_instance(
|
||||
name="aws",
|
||||
settings_prefix="DAILYCO_STORAGE_",
|
||||
from reflector.storage.storage_aws import AwsStorage
|
||||
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.DAILYCO_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.DAILYCO_STORAGE_AWS_REGION or "us-east-1",
|
||||
aws_role_arn=settings.DAILYCO_STORAGE_AWS_ROLE_ARN,
|
||||
)
|
||||
|
||||
@@ -17,6 +17,7 @@ from typing import Callable
|
||||
from celery.result import AsyncResult
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
import reflector._warnings_filter # noqa: F401 -- side effect: suppress pydantic validate_default warning
|
||||
from reflector.db import get_database
|
||||
from reflector.db.transcripts import Transcript, transcripts_controller
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
|
||||
@@ -30,6 +30,7 @@ def retry(fn):
|
||||
"retry_httpx_status_stop",
|
||||
(
|
||||
401, # auth issue
|
||||
402, # payment required / no credits — needs human action
|
||||
404, # not found
|
||||
413, # payload too large
|
||||
418, # teapot
|
||||
@@ -58,8 +59,9 @@ def retry(fn):
|
||||
result = await fn(*args, **kwargs)
|
||||
if isinstance(result, Response):
|
||||
result.raise_for_status()
|
||||
if result:
|
||||
return result
|
||||
# Return any result including falsy (e.g. "" from get_response);
|
||||
# only retry on exception, not on empty string.
|
||||
return result
|
||||
except HTTPStatusError as e:
|
||||
retry_logger.exception(e)
|
||||
status_code = e.response.status_code
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Annotated, Literal, Optional, assert_never
|
||||
|
||||
import jwt
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from fastapi_pagination import Page
|
||||
from fastapi_pagination.ext.databases import apaginate
|
||||
from jose import jwt
|
||||
from pydantic import (
|
||||
AwareDatetime,
|
||||
BaseModel,
|
||||
|
||||
@@ -7,13 +7,12 @@ Transcripts audio related endpoints
|
||||
from typing import Annotated, Optional
|
||||
|
||||
import httpx
|
||||
import jwt
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
|
||||
from jose import jwt
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.db.transcripts import AudioWaveform, transcripts_controller
|
||||
from reflector.settings import settings
|
||||
from reflector.views.transcripts import ALGORITHM
|
||||
|
||||
from ._range_requests_response import range_requests_response
|
||||
|
||||
@@ -36,16 +35,23 @@ async def transcript_get_audio_mp3(
|
||||
):
|
||||
user_id = user["sub"] if user else None
|
||||
if not user_id and token:
|
||||
unauthorized_exception = HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid or expired token",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
try:
|
||||
payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[ALGORITHM])
|
||||
user_id: str = payload.get("sub")
|
||||
except jwt.JWTError:
|
||||
raise unauthorized_exception
|
||||
token_user = await auth.verify_raw_token(token)
|
||||
except Exception:
|
||||
token_user = None
|
||||
# Fallback: try as internal HS256 token (created by _generate_local_audio_link)
|
||||
if not token_user:
|
||||
try:
|
||||
payload = jwt.decode(token, settings.SECRET_KEY, algorithms=["HS256"])
|
||||
user_id = payload.get("sub")
|
||||
except jwt.PyJWTError:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid or expired token",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
else:
|
||||
user_id = token_user["sub"]
|
||||
|
||||
transcript = await transcripts_controller.get_by_id_for_http(
|
||||
transcript_id, user_id=user_id
|
||||
|
||||
@@ -50,5 +50,8 @@ async def transcript_process(
|
||||
if isinstance(config, ProcessError):
|
||||
raise HTTPException(status_code=500, detail=config.detail)
|
||||
else:
|
||||
await dispatch_transcript_processing(config)
|
||||
# When transcript is in error state, force a new workflow instead of replaying
|
||||
# (replay would re-run from failure point with same conditions and likely fail again)
|
||||
force = transcript.status == "error"
|
||||
await dispatch_transcript_processing(config, force=force)
|
||||
return ProcessStatus(status="ok")
|
||||
|
||||
@@ -24,6 +24,118 @@ RECONCILIATION_INTERVAL = _override or 30.0
|
||||
ICS_SYNC_INTERVAL = _override or 60.0
|
||||
UPCOMING_MEETINGS_INTERVAL = _override or 30.0
|
||||
|
||||
|
||||
def build_beat_schedule(
|
||||
*,
|
||||
whereby_api_key=None,
|
||||
aws_process_recording_queue_url=None,
|
||||
daily_api_key=None,
|
||||
public_mode=False,
|
||||
public_data_retention_days=None,
|
||||
healthcheck_url=None,
|
||||
):
|
||||
"""Build the Celery beat schedule based on configured services.
|
||||
|
||||
Only registers tasks for services that are actually configured,
|
||||
avoiding unnecessary worker wake-ups in selfhosted deployments.
|
||||
"""
|
||||
beat_schedule = {}
|
||||
|
||||
_whereby_enabled = bool(whereby_api_key) or bool(aws_process_recording_queue_url)
|
||||
if _whereby_enabled:
|
||||
beat_schedule["process_messages"] = {
|
||||
"task": "reflector.worker.process.process_messages",
|
||||
"schedule": SQS_POLL_INTERVAL,
|
||||
}
|
||||
beat_schedule["reprocess_failed_recordings"] = {
|
||||
"task": "reflector.worker.process.reprocess_failed_recordings",
|
||||
"schedule": crontab(hour=5, minute=0), # Midnight EST
|
||||
}
|
||||
logger.info(
|
||||
"Whereby beat tasks enabled",
|
||||
tasks=["process_messages", "reprocess_failed_recordings"],
|
||||
)
|
||||
else:
|
||||
logger.info("Whereby beat tasks disabled (no WHEREBY_API_KEY or SQS URL)")
|
||||
|
||||
_daily_enabled = bool(daily_api_key)
|
||||
if _daily_enabled:
|
||||
beat_schedule["poll_daily_recordings"] = {
|
||||
"task": "reflector.worker.process.poll_daily_recordings",
|
||||
"schedule": POLL_DAILY_RECORDINGS_INTERVAL_SEC,
|
||||
}
|
||||
beat_schedule["trigger_daily_reconciliation"] = {
|
||||
"task": "reflector.worker.process.trigger_daily_reconciliation",
|
||||
"schedule": RECONCILIATION_INTERVAL,
|
||||
}
|
||||
beat_schedule["reprocess_failed_daily_recordings"] = {
|
||||
"task": "reflector.worker.process.reprocess_failed_daily_recordings",
|
||||
"schedule": crontab(hour=5, minute=0), # Midnight EST
|
||||
}
|
||||
logger.info(
|
||||
"Daily.co beat tasks enabled",
|
||||
tasks=[
|
||||
"poll_daily_recordings",
|
||||
"trigger_daily_reconciliation",
|
||||
"reprocess_failed_daily_recordings",
|
||||
],
|
||||
)
|
||||
else:
|
||||
logger.info("Daily.co beat tasks disabled (no DAILY_API_KEY)")
|
||||
|
||||
_any_platform = _whereby_enabled or _daily_enabled
|
||||
if _any_platform:
|
||||
beat_schedule["process_meetings"] = {
|
||||
"task": "reflector.worker.process.process_meetings",
|
||||
"schedule": SQS_POLL_INTERVAL,
|
||||
}
|
||||
beat_schedule["sync_all_ics_calendars"] = {
|
||||
"task": "reflector.worker.ics_sync.sync_all_ics_calendars",
|
||||
"schedule": ICS_SYNC_INTERVAL,
|
||||
}
|
||||
beat_schedule["create_upcoming_meetings"] = {
|
||||
"task": "reflector.worker.ics_sync.create_upcoming_meetings",
|
||||
"schedule": UPCOMING_MEETINGS_INTERVAL,
|
||||
}
|
||||
logger.info(
|
||||
"Platform tasks enabled",
|
||||
tasks=[
|
||||
"process_meetings",
|
||||
"sync_all_ics_calendars",
|
||||
"create_upcoming_meetings",
|
||||
],
|
||||
)
|
||||
else:
|
||||
logger.info("Platform tasks disabled (no video platform configured)")
|
||||
|
||||
if public_mode:
|
||||
beat_schedule["cleanup_old_public_data"] = {
|
||||
"task": "reflector.worker.cleanup.cleanup_old_public_data_task",
|
||||
"schedule": crontab(hour=3, minute=0),
|
||||
}
|
||||
logger.info(
|
||||
"Public mode cleanup enabled",
|
||||
retention_days=public_data_retention_days,
|
||||
)
|
||||
|
||||
if healthcheck_url:
|
||||
beat_schedule["healthcheck_ping"] = {
|
||||
"task": "reflector.worker.healthcheck.healthcheck_ping",
|
||||
"schedule": 60.0 * 10,
|
||||
}
|
||||
logger.info("Healthcheck enabled", url=healthcheck_url)
|
||||
else:
|
||||
logger.warning("Healthcheck disabled, no url configured")
|
||||
|
||||
logger.info(
|
||||
"Beat schedule configured",
|
||||
total_tasks=len(beat_schedule),
|
||||
task_names=sorted(beat_schedule.keys()),
|
||||
)
|
||||
|
||||
return beat_schedule
|
||||
|
||||
|
||||
if celery.current_app.main != "default":
|
||||
logger.info(f"Celery already configured ({celery.current_app})")
|
||||
app = celery.current_app
|
||||
@@ -42,57 +154,11 @@ else:
|
||||
]
|
||||
)
|
||||
|
||||
# crontab
|
||||
app.conf.beat_schedule = {
|
||||
"process_messages": {
|
||||
"task": "reflector.worker.process.process_messages",
|
||||
"schedule": SQS_POLL_INTERVAL,
|
||||
},
|
||||
"process_meetings": {
|
||||
"task": "reflector.worker.process.process_meetings",
|
||||
"schedule": SQS_POLL_INTERVAL,
|
||||
},
|
||||
"reprocess_failed_recordings": {
|
||||
"task": "reflector.worker.process.reprocess_failed_recordings",
|
||||
"schedule": crontab(hour=5, minute=0), # Midnight EST
|
||||
},
|
||||
"reprocess_failed_daily_recordings": {
|
||||
"task": "reflector.worker.process.reprocess_failed_daily_recordings",
|
||||
"schedule": crontab(hour=5, minute=0), # Midnight EST
|
||||
},
|
||||
"poll_daily_recordings": {
|
||||
"task": "reflector.worker.process.poll_daily_recordings",
|
||||
"schedule": POLL_DAILY_RECORDINGS_INTERVAL_SEC,
|
||||
},
|
||||
"trigger_daily_reconciliation": {
|
||||
"task": "reflector.worker.process.trigger_daily_reconciliation",
|
||||
"schedule": RECONCILIATION_INTERVAL,
|
||||
},
|
||||
"sync_all_ics_calendars": {
|
||||
"task": "reflector.worker.ics_sync.sync_all_ics_calendars",
|
||||
"schedule": ICS_SYNC_INTERVAL,
|
||||
},
|
||||
"create_upcoming_meetings": {
|
||||
"task": "reflector.worker.ics_sync.create_upcoming_meetings",
|
||||
"schedule": UPCOMING_MEETINGS_INTERVAL,
|
||||
},
|
||||
}
|
||||
|
||||
if settings.PUBLIC_MODE:
|
||||
app.conf.beat_schedule["cleanup_old_public_data"] = {
|
||||
"task": "reflector.worker.cleanup.cleanup_old_public_data_task",
|
||||
"schedule": crontab(hour=3, minute=0),
|
||||
}
|
||||
logger.info(
|
||||
"Public mode cleanup enabled",
|
||||
retention_days=settings.PUBLIC_DATA_RETENTION_DAYS,
|
||||
)
|
||||
|
||||
if settings.HEALTHCHECK_URL:
|
||||
app.conf.beat_schedule["healthcheck_ping"] = {
|
||||
"task": "reflector.worker.healthcheck.healthcheck_ping",
|
||||
"schedule": 60.0 * 10,
|
||||
}
|
||||
logger.info("Healthcheck enabled", url=settings.HEALTHCHECK_URL)
|
||||
else:
|
||||
logger.warning("Healthcheck disabled, no url configured")
|
||||
app.conf.beat_schedule = build_beat_schedule(
|
||||
whereby_api_key=settings.WHEREBY_API_KEY,
|
||||
aws_process_recording_queue_url=settings.AWS_PROCESS_RECORDING_QUEUE_URL,
|
||||
daily_api_key=settings.DAILY_API_KEY,
|
||||
public_mode=settings.PUBLIC_MODE,
|
||||
public_data_retention_days=settings.PUBLIC_DATA_RETENTION_DAYS,
|
||||
healthcheck_url=settings.HEALTHCHECK_URL,
|
||||
)
|
||||
|
||||
@@ -357,6 +357,7 @@ async def _process_multitrack_recording_inner(
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id,
|
||||
"source_platform": "daily",
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
@@ -1068,6 +1069,7 @@ async def reprocess_failed_daily_recordings():
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id if room else None,
|
||||
"source_platform": "daily",
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
"""Tests for the password auth backend."""
|
||||
|
||||
import jwt
|
||||
import pytest
|
||||
from httpx import AsyncClient
|
||||
from jose import jwt
|
||||
|
||||
from reflector.auth.password_utils import hash_password
|
||||
from reflector.settings import settings
|
||||
|
||||
247
server/tests/test_beat_schedule.py
Normal file
247
server/tests/test_beat_schedule.py
Normal file
@@ -0,0 +1,247 @@
|
||||
"""Tests for conditional Celery beat schedule registration.
|
||||
|
||||
Verifies that beat tasks are only registered when their corresponding
|
||||
services are configured (WHEREBY_API_KEY, DAILY_API_KEY, etc.).
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.worker.app import build_beat_schedule
|
||||
|
||||
|
||||
# Override autouse fixtures from conftest — these tests don't need database or websockets
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_database():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def ws_manager_in_memory():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def reset_hatchet_client():
|
||||
yield
|
||||
|
||||
|
||||
# Task name sets for each group
|
||||
WHEREBY_TASKS = {"process_messages", "reprocess_failed_recordings"}
|
||||
DAILY_TASKS = {
|
||||
"poll_daily_recordings",
|
||||
"trigger_daily_reconciliation",
|
||||
"reprocess_failed_daily_recordings",
|
||||
}
|
||||
PLATFORM_TASKS = {
|
||||
"process_meetings",
|
||||
"sync_all_ics_calendars",
|
||||
"create_upcoming_meetings",
|
||||
}
|
||||
|
||||
|
||||
class TestNoPlatformConfigured:
|
||||
"""When no video platform is configured, no platform tasks should be registered."""
|
||||
|
||||
def test_no_platform_tasks(self):
|
||||
schedule = build_beat_schedule()
|
||||
task_names = set(schedule.keys())
|
||||
assert not task_names & WHEREBY_TASKS
|
||||
assert not task_names & DAILY_TASKS
|
||||
assert not task_names & PLATFORM_TASKS
|
||||
|
||||
def test_only_healthcheck_disabled_warning(self):
|
||||
"""With no config at all, schedule should be empty (healthcheck needs URL)."""
|
||||
schedule = build_beat_schedule()
|
||||
assert len(schedule) == 0
|
||||
|
||||
def test_healthcheck_only(self):
|
||||
schedule = build_beat_schedule(healthcheck_url="https://hc.example.com/ping")
|
||||
assert set(schedule.keys()) == {"healthcheck_ping"}
|
||||
|
||||
def test_public_mode_only(self):
|
||||
schedule = build_beat_schedule(public_mode=True)
|
||||
assert set(schedule.keys()) == {"cleanup_old_public_data"}
|
||||
|
||||
|
||||
class TestWherebyOnly:
|
||||
"""When only Whereby is configured."""
|
||||
|
||||
def test_whereby_api_key(self):
|
||||
schedule = build_beat_schedule(whereby_api_key="test-key")
|
||||
task_names = set(schedule.keys())
|
||||
assert WHEREBY_TASKS <= task_names
|
||||
assert PLATFORM_TASKS <= task_names
|
||||
assert not task_names & DAILY_TASKS
|
||||
|
||||
def test_whereby_sqs_url(self):
|
||||
schedule = build_beat_schedule(
|
||||
aws_process_recording_queue_url="https://sqs.us-east-1.amazonaws.com/123/queue"
|
||||
)
|
||||
task_names = set(schedule.keys())
|
||||
assert WHEREBY_TASKS <= task_names
|
||||
assert PLATFORM_TASKS <= task_names
|
||||
assert not task_names & DAILY_TASKS
|
||||
|
||||
def test_whereby_task_count(self):
|
||||
schedule = build_beat_schedule(whereby_api_key="test-key")
|
||||
# Whereby (2) + Platform (3) = 5
|
||||
assert len(schedule) == 5
|
||||
|
||||
|
||||
class TestDailyOnly:
|
||||
"""When only Daily.co is configured."""
|
||||
|
||||
def test_daily_api_key(self):
|
||||
schedule = build_beat_schedule(daily_api_key="test-daily-key")
|
||||
task_names = set(schedule.keys())
|
||||
assert DAILY_TASKS <= task_names
|
||||
assert PLATFORM_TASKS <= task_names
|
||||
assert not task_names & WHEREBY_TASKS
|
||||
|
||||
def test_daily_task_count(self):
|
||||
schedule = build_beat_schedule(daily_api_key="test-daily-key")
|
||||
# Daily (3) + Platform (3) = 6
|
||||
assert len(schedule) == 6
|
||||
|
||||
|
||||
class TestBothPlatforms:
|
||||
"""When both Whereby and Daily.co are configured."""
|
||||
|
||||
def test_all_tasks_registered(self):
|
||||
schedule = build_beat_schedule(
|
||||
whereby_api_key="test-key",
|
||||
daily_api_key="test-daily-key",
|
||||
)
|
||||
task_names = set(schedule.keys())
|
||||
assert WHEREBY_TASKS <= task_names
|
||||
assert DAILY_TASKS <= task_names
|
||||
assert PLATFORM_TASKS <= task_names
|
||||
|
||||
def test_combined_task_count(self):
|
||||
schedule = build_beat_schedule(
|
||||
whereby_api_key="test-key",
|
||||
daily_api_key="test-daily-key",
|
||||
)
|
||||
# Whereby (2) + Daily (3) + Platform (3) = 8
|
||||
assert len(schedule) == 8
|
||||
|
||||
|
||||
class TestConditionalFlags:
|
||||
"""Test PUBLIC_MODE and HEALTHCHECK_URL interact correctly with platform tasks."""
|
||||
|
||||
def test_all_flags_enabled(self):
|
||||
schedule = build_beat_schedule(
|
||||
whereby_api_key="test-key",
|
||||
daily_api_key="test-daily-key",
|
||||
public_mode=True,
|
||||
healthcheck_url="https://hc.example.com/ping",
|
||||
)
|
||||
task_names = set(schedule.keys())
|
||||
assert "cleanup_old_public_data" in task_names
|
||||
assert "healthcheck_ping" in task_names
|
||||
assert WHEREBY_TASKS <= task_names
|
||||
assert DAILY_TASKS <= task_names
|
||||
assert PLATFORM_TASKS <= task_names
|
||||
# Whereby (2) + Daily (3) + Platform (3) + cleanup (1) + healthcheck (1) = 10
|
||||
assert len(schedule) == 10
|
||||
|
||||
def test_public_mode_with_whereby(self):
|
||||
schedule = build_beat_schedule(
|
||||
whereby_api_key="test-key",
|
||||
public_mode=True,
|
||||
)
|
||||
task_names = set(schedule.keys())
|
||||
assert "cleanup_old_public_data" in task_names
|
||||
assert WHEREBY_TASKS <= task_names
|
||||
assert PLATFORM_TASKS <= task_names
|
||||
|
||||
def test_healthcheck_with_daily(self):
|
||||
schedule = build_beat_schedule(
|
||||
daily_api_key="test-daily-key",
|
||||
healthcheck_url="https://hc.example.com/ping",
|
||||
)
|
||||
task_names = set(schedule.keys())
|
||||
assert "healthcheck_ping" in task_names
|
||||
assert DAILY_TASKS <= task_names
|
||||
assert PLATFORM_TASKS <= task_names
|
||||
|
||||
|
||||
class TestTaskDefinitions:
|
||||
"""Verify task definitions have correct structure."""
|
||||
|
||||
def test_whereby_task_paths(self):
|
||||
schedule = build_beat_schedule(whereby_api_key="test-key")
|
||||
assert (
|
||||
schedule["process_messages"]["task"]
|
||||
== "reflector.worker.process.process_messages"
|
||||
)
|
||||
assert (
|
||||
schedule["reprocess_failed_recordings"]["task"]
|
||||
== "reflector.worker.process.reprocess_failed_recordings"
|
||||
)
|
||||
|
||||
def test_daily_task_paths(self):
|
||||
schedule = build_beat_schedule(daily_api_key="test-daily-key")
|
||||
assert (
|
||||
schedule["poll_daily_recordings"]["task"]
|
||||
== "reflector.worker.process.poll_daily_recordings"
|
||||
)
|
||||
assert (
|
||||
schedule["trigger_daily_reconciliation"]["task"]
|
||||
== "reflector.worker.process.trigger_daily_reconciliation"
|
||||
)
|
||||
assert (
|
||||
schedule["reprocess_failed_daily_recordings"]["task"]
|
||||
== "reflector.worker.process.reprocess_failed_daily_recordings"
|
||||
)
|
||||
|
||||
def test_platform_task_paths(self):
|
||||
schedule = build_beat_schedule(daily_api_key="test-daily-key")
|
||||
assert (
|
||||
schedule["process_meetings"]["task"]
|
||||
== "reflector.worker.process.process_meetings"
|
||||
)
|
||||
assert (
|
||||
schedule["sync_all_ics_calendars"]["task"]
|
||||
== "reflector.worker.ics_sync.sync_all_ics_calendars"
|
||||
)
|
||||
assert (
|
||||
schedule["create_upcoming_meetings"]["task"]
|
||||
== "reflector.worker.ics_sync.create_upcoming_meetings"
|
||||
)
|
||||
|
||||
def test_all_tasks_have_schedule(self):
|
||||
"""Every registered task must have a 'schedule' key."""
|
||||
schedule = build_beat_schedule(
|
||||
whereby_api_key="test-key",
|
||||
daily_api_key="test-daily-key",
|
||||
public_mode=True,
|
||||
healthcheck_url="https://hc.example.com/ping",
|
||||
)
|
||||
for name, config in schedule.items():
|
||||
assert "schedule" in config, f"Task '{name}' missing 'schedule' key"
|
||||
assert "task" in config, f"Task '{name}' missing 'task' key"
|
||||
|
||||
|
||||
class TestEmptyStringValues:
|
||||
"""Empty strings should be treated as not configured (falsy)."""
|
||||
|
||||
def test_empty_whereby_key(self):
|
||||
schedule = build_beat_schedule(whereby_api_key="")
|
||||
assert not set(schedule.keys()) & WHEREBY_TASKS
|
||||
|
||||
def test_empty_daily_key(self):
|
||||
schedule = build_beat_schedule(daily_api_key="")
|
||||
assert not set(schedule.keys()) & DAILY_TASKS
|
||||
|
||||
def test_empty_sqs_url(self):
|
||||
schedule = build_beat_schedule(aws_process_recording_queue_url="")
|
||||
assert not set(schedule.keys()) & WHEREBY_TASKS
|
||||
|
||||
def test_none_values(self):
|
||||
schedule = build_beat_schedule(
|
||||
whereby_api_key=None,
|
||||
daily_api_key=None,
|
||||
aws_process_recording_queue_url=None,
|
||||
)
|
||||
assert len(schedule) == 0
|
||||
303
server/tests/test_hatchet_error_handling.py
Normal file
303
server/tests/test_hatchet_error_handling.py
Normal file
@@ -0,0 +1,303 @@
|
||||
"""
|
||||
Tests for Hatchet error handling: NonRetryable classification and error status.
|
||||
|
||||
These tests encode the desired behavior from the Hatchet Workflow Analysis doc:
|
||||
- Transient exceptions: do NOT set error status (let Hatchet retry; user stays on "processing").
|
||||
- Hard-fail exceptions: set error status and re-raise as NonRetryableException (stop retries).
|
||||
- on_failure_task: sets error status when workflow is truly dead.
|
||||
|
||||
Run before the fix: some tests fail (reproducing the issues).
|
||||
Run after the fix: all tests pass.
|
||||
"""
|
||||
|
||||
from contextlib import asynccontextmanager
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
from hatchet_sdk import NonRetryableException
|
||||
|
||||
from reflector.hatchet.error_classification import is_non_retryable
|
||||
from reflector.llm import LLMParseError
|
||||
|
||||
# --- Tests for is_non_retryable() (pass once error_classification exists) ---
|
||||
|
||||
|
||||
def test_is_non_retryable_returns_true_for_value_error():
|
||||
"""ValueError (e.g. missing config) should stop retries."""
|
||||
assert is_non_retryable(ValueError("DAILY_API_KEY must be set")) is True
|
||||
|
||||
|
||||
def test_is_non_retryable_returns_true_for_type_error():
|
||||
"""TypeError (bad input) should stop retries."""
|
||||
assert is_non_retryable(TypeError("expected str")) is True
|
||||
|
||||
|
||||
def test_is_non_retryable_returns_true_for_http_401():
|
||||
"""HTTP 401 auth error should stop retries."""
|
||||
resp = MagicMock()
|
||||
resp.status_code = 401
|
||||
err = httpx.HTTPStatusError("Unauthorized", request=MagicMock(), response=resp)
|
||||
assert is_non_retryable(err) is True
|
||||
|
||||
|
||||
def test_is_non_retryable_returns_true_for_http_402():
|
||||
"""HTTP 402 (no credits) should stop retries."""
|
||||
resp = MagicMock()
|
||||
resp.status_code = 402
|
||||
err = httpx.HTTPStatusError("Payment Required", request=MagicMock(), response=resp)
|
||||
assert is_non_retryable(err) is True
|
||||
|
||||
|
||||
def test_is_non_retryable_returns_true_for_http_404():
|
||||
"""HTTP 404 should stop retries."""
|
||||
resp = MagicMock()
|
||||
resp.status_code = 404
|
||||
err = httpx.HTTPStatusError("Not Found", request=MagicMock(), response=resp)
|
||||
assert is_non_retryable(err) is True
|
||||
|
||||
|
||||
def test_is_non_retryable_returns_false_for_http_503():
|
||||
"""HTTP 503 is transient; retries are useful."""
|
||||
resp = MagicMock()
|
||||
resp.status_code = 503
|
||||
err = httpx.HTTPStatusError(
|
||||
"Service Unavailable", request=MagicMock(), response=resp
|
||||
)
|
||||
assert is_non_retryable(err) is False
|
||||
|
||||
|
||||
def test_is_non_retryable_returns_false_for_timeout():
|
||||
"""Timeout is transient."""
|
||||
assert is_non_retryable(httpx.TimeoutException("timed out")) is False
|
||||
|
||||
|
||||
def test_is_non_retryable_returns_true_for_llm_parse_error():
|
||||
"""LLMParseError after internal retries should stop."""
|
||||
from pydantic import BaseModel
|
||||
|
||||
class _Dummy(BaseModel):
|
||||
pass
|
||||
|
||||
assert is_non_retryable(LLMParseError(_Dummy, "Failed to parse", 3)) is True
|
||||
|
||||
|
||||
def test_is_non_retryable_returns_true_for_non_retryable_exception():
|
||||
"""Already-wrapped NonRetryableException should stay non-retryable."""
|
||||
assert is_non_retryable(NonRetryableException("custom")) is True
|
||||
|
||||
|
||||
# --- Tests for with_error_handling (need pipeline module with patch) ---
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def pipeline_module():
|
||||
"""Import daily_multitrack_pipeline with Hatchet client mocked."""
|
||||
with patch("reflector.hatchet.client.settings") as s:
|
||||
s.HATCHET_CLIENT_TOKEN = "test-token"
|
||||
s.HATCHET_DEBUG = False
|
||||
mock_client = MagicMock()
|
||||
mock_client.workflow.return_value = MagicMock()
|
||||
with patch(
|
||||
"reflector.hatchet.client.HatchetClientManager.get_client",
|
||||
return_value=mock_client,
|
||||
):
|
||||
from reflector.hatchet.workflows import daily_multitrack_pipeline
|
||||
|
||||
return daily_multitrack_pipeline
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_input():
|
||||
"""Minimal PipelineInput for decorator tests."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import PipelineInput
|
||||
|
||||
return PipelineInput(
|
||||
recording_id="rec-1",
|
||||
tracks=[],
|
||||
bucket_name="bucket",
|
||||
transcript_id="ts-123",
|
||||
room_id=None,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_ctx():
|
||||
"""Minimal Context-like object."""
|
||||
ctx = MagicMock()
|
||||
ctx.log = MagicMock()
|
||||
return ctx
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_with_error_handling_transient_does_not_set_error_status(
|
||||
pipeline_module, mock_input, mock_ctx
|
||||
):
|
||||
"""Transient exception must NOT set error status (so user stays on 'processing' during retries).
|
||||
|
||||
Before fix: set_workflow_error_status is called on every exception → FAIL.
|
||||
After fix: not called for transient → PASS.
|
||||
"""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
TaskName,
|
||||
with_error_handling,
|
||||
)
|
||||
|
||||
async def failing_task(input, ctx):
|
||||
raise httpx.TimeoutException("timed out")
|
||||
|
||||
wrapped = with_error_handling(TaskName.GET_RECORDING)(failing_task)
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.set_workflow_error_status",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_set_error:
|
||||
with pytest.raises(httpx.TimeoutException):
|
||||
await wrapped(mock_input, mock_ctx)
|
||||
|
||||
# Desired: do NOT set error status for transient (Hatchet will retry)
|
||||
mock_set_error.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_with_error_handling_hard_fail_raises_non_retryable_and_sets_status(
|
||||
pipeline_module, mock_input, mock_ctx
|
||||
):
|
||||
"""Hard-fail (e.g. ValueError) must set error status and re-raise NonRetryableException.
|
||||
|
||||
Before fix: raises ValueError, set_workflow_error_status called → test would need to expect ValueError.
|
||||
After fix: raises NonRetryableException, set_workflow_error_status called → PASS.
|
||||
"""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
TaskName,
|
||||
with_error_handling,
|
||||
)
|
||||
|
||||
async def failing_task(input, ctx):
|
||||
raise ValueError("PADDING_URL must be set")
|
||||
|
||||
wrapped = with_error_handling(TaskName.GET_RECORDING)(failing_task)
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.set_workflow_error_status",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_set_error:
|
||||
with pytest.raises(NonRetryableException) as exc_info:
|
||||
await wrapped(mock_input, mock_ctx)
|
||||
|
||||
assert "PADDING_URL" in str(exc_info.value)
|
||||
mock_set_error.assert_called_once_with("ts-123")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_with_error_handling_set_error_status_false_never_sets_status(
|
||||
pipeline_module, mock_input, mock_ctx
|
||||
):
|
||||
"""When set_error_status=False, we must never set error status (e.g. cleanup_consent)."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
TaskName,
|
||||
with_error_handling,
|
||||
)
|
||||
|
||||
async def failing_task(input, ctx):
|
||||
raise ValueError("something went wrong")
|
||||
|
||||
wrapped = with_error_handling(TaskName.CLEANUP_CONSENT, set_error_status=False)(
|
||||
failing_task
|
||||
)
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.set_workflow_error_status",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_set_error:
|
||||
with pytest.raises((ValueError, NonRetryableException)):
|
||||
await wrapped(mock_input, mock_ctx)
|
||||
|
||||
mock_set_error.assert_not_called()
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _noop_db_context():
|
||||
"""Async context manager that yields without touching the DB (for unit tests)."""
|
||||
yield None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_on_failure_task_sets_error_status(pipeline_module, mock_input, mock_ctx):
|
||||
"""When workflow fails and transcript is not yet 'ended', on_failure sets status to 'error'."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
on_workflow_failure,
|
||||
)
|
||||
|
||||
transcript_processing = MagicMock()
|
||||
transcript_processing.status = "processing"
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.fresh_db_connection",
|
||||
_noop_db_context,
|
||||
):
|
||||
with patch(
|
||||
"reflector.db.transcripts.transcripts_controller.get_by_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=transcript_processing,
|
||||
):
|
||||
with patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.set_workflow_error_status",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_set_error:
|
||||
await on_workflow_failure(mock_input, mock_ctx)
|
||||
mock_set_error.assert_called_once_with(mock_input.transcript_id)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_on_failure_task_does_not_overwrite_ended(
|
||||
pipeline_module, mock_input, mock_ctx
|
||||
):
|
||||
"""When workflow fails after finalize (e.g. post_zulip), do not overwrite 'ended' with 'error'.
|
||||
|
||||
cleanup_consent, post_zulip, send_webhook use set_error_status=False; if one fails,
|
||||
on_workflow_failure must not set status to 'error' when transcript is already 'ended'.
|
||||
"""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
on_workflow_failure,
|
||||
)
|
||||
|
||||
transcript_ended = MagicMock()
|
||||
transcript_ended.status = "ended"
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.fresh_db_connection",
|
||||
_noop_db_context,
|
||||
):
|
||||
with patch(
|
||||
"reflector.db.transcripts.transcripts_controller.get_by_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=transcript_ended,
|
||||
):
|
||||
with patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.set_workflow_error_status",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_set_error:
|
||||
await on_workflow_failure(mock_input, mock_ctx)
|
||||
mock_set_error.assert_not_called()
|
||||
|
||||
|
||||
# --- Tests for fan-out helper (_successful_run_results) ---
|
||||
|
||||
|
||||
def test_successful_run_results_filters_exceptions():
|
||||
"""_successful_run_results returns only non-exception items from aio_run_many(return_exceptions=True)."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
_successful_run_results,
|
||||
)
|
||||
|
||||
results = [
|
||||
{"key": "ok1"},
|
||||
ValueError("child failed"),
|
||||
{"key": "ok2"},
|
||||
RuntimeError("another"),
|
||||
]
|
||||
successful = _successful_run_results(results)
|
||||
assert len(successful) == 2
|
||||
assert successful[0] == {"key": "ok1"}
|
||||
assert successful[1] == {"key": "ok2"}
|
||||
@@ -1,13 +1,11 @@
|
||||
"""Tests for LLM parse error recovery using llama-index Workflow"""
|
||||
"""Tests for LLM structured output with astructured_predict + reflection retry"""
|
||||
|
||||
from time import monotonic
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
from workflows.errors import WorkflowRuntimeError, WorkflowTimeoutError
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
|
||||
from reflector.llm import LLM, LLMParseError, StructuredOutputWorkflow
|
||||
from reflector.llm import LLM, LLMParseError
|
||||
from reflector.utils.retry import RetryException
|
||||
|
||||
|
||||
@@ -19,51 +17,43 @@ class TestResponse(BaseModel):
|
||||
confidence: float = Field(description="Confidence score", ge=0, le=1)
|
||||
|
||||
|
||||
def make_completion_response(text: str):
|
||||
"""Create a mock CompletionResponse with .text attribute"""
|
||||
response = MagicMock()
|
||||
response.text = text
|
||||
return response
|
||||
|
||||
|
||||
class TestLLMParseErrorRecovery:
|
||||
"""Test parse error recovery with Workflow feedback loop"""
|
||||
"""Test parse error recovery with astructured_predict reflection loop"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_error_recovery_with_feedback(self, test_settings):
|
||||
"""Test that parse errors trigger retry with error feedback"""
|
||||
"""Test that parse errors trigger retry with reflection prompt"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
# TreeSummarize returns plain text analysis (step 1)
|
||||
mock_summarizer.aget_response = AsyncMock(
|
||||
return_value="The analysis shows a test with summary and high confidence."
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# First call: raise ValidationError (missing fields)
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {"title": "Test"},
|
||||
}
|
||||
],
|
||||
)
|
||||
else:
|
||||
# Second call: should have reflection in the prompt
|
||||
assert "reflection" in kwargs
|
||||
assert "could not be parsed" in kwargs["reflection"]
|
||||
assert "Error:" in kwargs["reflection"]
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(prompt, *args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# First JSON formatting call returns invalid JSON
|
||||
return make_completion_response('{"title": "Test"}')
|
||||
else:
|
||||
# Second call should have error feedback in prompt
|
||||
assert "Your previous response could not be parsed:" in prompt
|
||||
assert '{"title": "Test"}' in prompt
|
||||
assert "Error:" in prompt
|
||||
assert "Please try again" in prompt
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
@@ -71,8 +61,6 @@ class TestLLMParseErrorRecovery:
|
||||
assert result.title == "Test"
|
||||
assert result.summary == "Summary"
|
||||
assert result.confidence == 0.95
|
||||
# TreeSummarize called once, Settings.llm.acomplete called twice
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -80,56 +68,61 @@ class TestLLMParseErrorRecovery:
|
||||
"""Test that parse error retry stops after max attempts"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Always return invalid JSON from acomplete
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"invalid": "missing required fields"}'
|
||||
)
|
||||
# Always raise ValidationError
|
||||
async def always_fail(output_cls, prompt_tmpl, **kwargs):
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(side_effect=always_fail)
|
||||
|
||||
with pytest.raises(LLMParseError, match="Failed to parse"):
|
||||
await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
expected_attempts = test_settings.LLM_PARSE_MAX_RETRIES + 1
|
||||
# TreeSummarize called once, acomplete called max_retries times
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert mock_settings.llm.acomplete.call_count == expected_attempts
|
||||
assert mock_settings.llm.astructured_predict.call_count == expected_attempts
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_raw_response_logging_on_parse_error(self, test_settings, caplog):
|
||||
"""Test that raw response is logged when parse error occurs"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {"title": "Test"},
|
||||
}
|
||||
],
|
||||
)
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
caplog.at_level("ERROR"),
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(*args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
return make_completion_response('{"title": "Test"}') # Invalid
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
@@ -143,35 +136,45 @@ class TestLLMParseErrorRecovery:
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_validation_errors_in_feedback(self, test_settings):
|
||||
"""Test that validation errors are included in feedback"""
|
||||
"""Test that validation errors are included in reflection feedback"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
call_count = {"count": 0}
|
||||
|
||||
call_count = {"count": 0}
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# Missing title and summary
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("title",),
|
||||
"msg": "Field required",
|
||||
"input": {},
|
||||
},
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {},
|
||||
},
|
||||
],
|
||||
)
|
||||
else:
|
||||
# Should have schema validation errors in reflection
|
||||
assert "reflection" in kwargs
|
||||
assert (
|
||||
"Schema validation errors" in kwargs["reflection"]
|
||||
or "error" in kwargs["reflection"].lower()
|
||||
)
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
async def acomplete_handler(prompt, *args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# Missing title and summary
|
||||
return make_completion_response('{"confidence": 0.5}')
|
||||
else:
|
||||
# Should have schema validation errors in prompt
|
||||
assert (
|
||||
"Schema validation errors" in prompt
|
||||
or "error" in prompt.lower()
|
||||
)
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
@@ -185,17 +188,10 @@ class TestLLMParseErrorRecovery:
|
||||
"""Test that no retry happens when first attempt succeeds"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
return_value=TestResponse(
|
||||
title="Test", summary="Summary", confidence=0.95
|
||||
)
|
||||
)
|
||||
|
||||
@@ -206,195 +202,28 @@ class TestLLMParseErrorRecovery:
|
||||
assert result.title == "Test"
|
||||
assert result.summary == "Summary"
|
||||
assert result.confidence == 0.95
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert mock_settings.llm.acomplete.call_count == 1
|
||||
|
||||
|
||||
class TestStructuredOutputWorkflow:
|
||||
"""Direct tests for the StructuredOutputWorkflow"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_workflow_retries_on_validation_error(self):
|
||||
"""Test workflow retries when validation fails"""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=TestResponse,
|
||||
max_retries=3,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(*args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] < 2:
|
||||
return make_completion_response('{"title": "Only title"}')
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.9}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await workflow.run(
|
||||
prompt="Extract data",
|
||||
texts=["Some text"],
|
||||
tone_name=None,
|
||||
)
|
||||
|
||||
assert "success" in result
|
||||
assert result["success"].title == "Test"
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_workflow_returns_error_after_max_retries(self):
|
||||
"""Test workflow returns error after exhausting retries"""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=TestResponse,
|
||||
max_retries=2,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Always return invalid JSON
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response('{"invalid": true}')
|
||||
)
|
||||
|
||||
result = await workflow.run(
|
||||
prompt="Extract data",
|
||||
texts=["Some text"],
|
||||
tone_name=None,
|
||||
)
|
||||
|
||||
assert "error" in result
|
||||
# TreeSummarize called once, acomplete called max_retries times
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert mock_settings.llm.acomplete.call_count == 2
|
||||
assert mock_settings.llm.astructured_predict.call_count == 1
|
||||
|
||||
|
||||
class TestNetworkErrorRetries:
|
||||
"""Test that network error retries are handled by OpenAILike, not Workflow"""
|
||||
"""Test that network errors are retried by the outer retry() wrapper"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_propagates_after_openai_retries(self, test_settings):
|
||||
"""Test that network errors are retried by OpenAILike and then propagate.
|
||||
|
||||
Network retries are handled by OpenAILike (max_retries=3), not by our
|
||||
StructuredOutputWorkflow. This test verifies that network errors propagate
|
||||
up after OpenAILike exhausts its retries.
|
||||
"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Simulate network error from acomplete (after OpenAILike retries exhausted)
|
||||
network_error = ConnectionError("Connection refused")
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=network_error)
|
||||
|
||||
# Network error wrapped in WorkflowRuntimeError
|
||||
with pytest.raises(WorkflowRuntimeError, match="Connection refused"):
|
||||
await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
# acomplete called only once - network error propagates, not retried by Workflow
|
||||
assert mock_settings.llm.acomplete.call_count == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_not_retried_by_workflow(self, test_settings):
|
||||
"""Test that Workflow does NOT retry network errors (OpenAILike handles those).
|
||||
|
||||
This verifies the separation of concerns:
|
||||
- StructuredOutputWorkflow: retries parse/validation errors
|
||||
- OpenAILike: retries network errors (internally, max_retries=3)
|
||||
"""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=TestResponse,
|
||||
max_retries=3,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Network error should propagate immediately, not trigger Workflow retry
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
side_effect=TimeoutError("Request timed out")
|
||||
)
|
||||
|
||||
# Network error wrapped in WorkflowRuntimeError
|
||||
with pytest.raises(WorkflowRuntimeError, match="Request timed out"):
|
||||
await workflow.run(
|
||||
prompt="Extract data",
|
||||
texts=["Some text"],
|
||||
tone_name=None,
|
||||
)
|
||||
|
||||
# Only called once - Workflow doesn't retry network errors
|
||||
assert mock_settings.llm.acomplete.call_count == 1
|
||||
|
||||
|
||||
class TestWorkflowTimeoutRetry:
|
||||
"""Test timeout retry mechanism in get_structured_response"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_timeout_retry_succeeds_on_retry(self, test_settings):
|
||||
"""Test that WorkflowTimeoutError triggers retry and succeeds"""
|
||||
async def test_network_error_retried_by_outer_wrapper(self, test_settings):
|
||||
"""Test that network errors trigger the outer retry wrapper"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def workflow_run_side_effect(*args, **kwargs):
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
raise WorkflowTimeoutError("Operation timed out after 120 seconds")
|
||||
return {
|
||||
"success": TestResponse(
|
||||
title="Test", summary="Summary", confidence=0.95
|
||||
)
|
||||
}
|
||||
raise ConnectionError("Connection refused")
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.StructuredOutputWorkflow") as mock_workflow_class,
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_workflow = MagicMock()
|
||||
mock_workflow.run = AsyncMock(side_effect=workflow_run_side_effect)
|
||||
mock_workflow_class.return_value = mock_workflow
|
||||
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
@@ -402,36 +231,16 @@ class TestWorkflowTimeoutRetry:
|
||||
)
|
||||
|
||||
assert result.title == "Test"
|
||||
assert result.summary == "Summary"
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_timeout_retry_exhausts_after_max_attempts(self, test_settings):
|
||||
"""Test that timeout retry stops after max attempts"""
|
||||
async def test_network_error_exhausts_retries(self, test_settings):
|
||||
"""Test that persistent network errors exhaust retry attempts"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def workflow_run_side_effect(*args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
raise WorkflowTimeoutError("Operation timed out after 120 seconds")
|
||||
|
||||
with (
|
||||
patch("reflector.llm.StructuredOutputWorkflow") as mock_workflow_class,
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_workflow = MagicMock()
|
||||
mock_workflow.run = AsyncMock(side_effect=workflow_run_side_effect)
|
||||
mock_workflow_class.return_value = mock_workflow
|
||||
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=ConnectionError("Connection refused")
|
||||
)
|
||||
|
||||
with pytest.raises(RetryException, match="Retry attempts exceeded"):
|
||||
@@ -439,41 +248,186 @@ class TestWorkflowTimeoutRetry:
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
assert call_count["count"] == 3
|
||||
# 3 retry attempts
|
||||
assert mock_settings.llm.astructured_predict.call_count == 3
|
||||
|
||||
|
||||
class TestGetResponseRetries:
|
||||
"""Test that get_response() uses the same retry() wrapper for transient errors."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_timeout_retry_with_backoff(self, test_settings):
|
||||
"""Test that exponential backoff is applied between retries"""
|
||||
async def test_get_response_retries_on_connection_error(self, test_settings):
|
||||
"""Test that get_response retries on ConnectionError and returns on success."""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_times = []
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.aget_response = AsyncMock(
|
||||
side_effect=[
|
||||
ConnectionError("Connection refused"),
|
||||
" Summary text ",
|
||||
]
|
||||
)
|
||||
|
||||
async def workflow_run_side_effect(*args, **kwargs):
|
||||
call_times.append(monotonic())
|
||||
if len(call_times) < 3:
|
||||
raise WorkflowTimeoutError("Operation timed out after 120 seconds")
|
||||
return {
|
||||
"success": TestResponse(
|
||||
title="Test", summary="Summary", confidence=0.95
|
||||
with patch("reflector.llm.TreeSummarize", return_value=mock_instance):
|
||||
result = await llm.get_response("Prompt", ["text"])
|
||||
|
||||
assert result == "Summary text"
|
||||
assert mock_instance.aget_response.call_count == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_response_exhausts_retries(self, test_settings):
|
||||
"""Test that get_response raises RetryException after retry attempts exceeded."""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.aget_response = AsyncMock(
|
||||
side_effect=ConnectionError("Connection refused")
|
||||
)
|
||||
|
||||
with patch("reflector.llm.TreeSummarize", return_value=mock_instance):
|
||||
with pytest.raises(RetryException, match="Retry attempts exceeded"):
|
||||
await llm.get_response("Prompt", ["text"])
|
||||
|
||||
assert mock_instance.aget_response.call_count == 3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_response_returns_empty_string_without_retry(self, test_settings):
|
||||
"""Empty or whitespace-only LLM response must return '' and not raise RetryException.
|
||||
|
||||
retry() must return falsy results (e.g. '' from get_response) instead of
|
||||
treating them as 'no result' and retrying until RetryException.
|
||||
"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.aget_response = AsyncMock(return_value=" \n ") # strip() -> ""
|
||||
|
||||
with patch("reflector.llm.TreeSummarize", return_value=mock_instance):
|
||||
result = await llm.get_response("Prompt", ["text"])
|
||||
|
||||
assert result == ""
|
||||
assert mock_instance.aget_response.call_count == 1
|
||||
|
||||
|
||||
class TestTextsInclusion:
|
||||
"""Test that texts parameter is included in the prompt sent to astructured_predict"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_texts_included_in_prompt(self, test_settings):
|
||||
"""Test that texts content is appended to the prompt for astructured_predict"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
captured_prompts = []
|
||||
|
||||
async def capture_prompt(output_cls, prompt_tmpl, **kwargs):
|
||||
captured_prompts.append(kwargs.get("user_prompt", ""))
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=capture_prompt
|
||||
)
|
||||
|
||||
await llm.get_structured_response(
|
||||
prompt="Identify all participants",
|
||||
texts=["Alice: Hello everyone", "Bob: Hi Alice"],
|
||||
output_cls=TestResponse,
|
||||
)
|
||||
|
||||
assert len(captured_prompts) == 1
|
||||
prompt_sent = captured_prompts[0]
|
||||
assert "Identify all participants" in prompt_sent
|
||||
assert "Alice: Hello everyone" in prompt_sent
|
||||
assert "Bob: Hi Alice" in prompt_sent
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_texts_uses_prompt_only(self, test_settings):
|
||||
"""Test that empty texts list sends only the prompt"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
captured_prompts = []
|
||||
|
||||
async def capture_prompt(output_cls, prompt_tmpl, **kwargs):
|
||||
captured_prompts.append(kwargs.get("user_prompt", ""))
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=capture_prompt
|
||||
)
|
||||
|
||||
await llm.get_structured_response(
|
||||
prompt="Identify all participants",
|
||||
texts=[],
|
||||
output_cls=TestResponse,
|
||||
)
|
||||
|
||||
assert len(captured_prompts) == 1
|
||||
assert captured_prompts[0] == "Identify all participants"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_texts_included_in_reflection_retry(self, test_settings):
|
||||
"""Test that texts are included in the prompt even during reflection retries"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
captured_prompts = []
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def capture_and_fail_first(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
captured_prompts.append(kwargs.get("user_prompt", ""))
|
||||
if call_count["count"] == 1:
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {},
|
||||
}
|
||||
],
|
||||
)
|
||||
}
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.StructuredOutputWorkflow") as mock_workflow_class,
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_workflow = MagicMock()
|
||||
mock_workflow.run = AsyncMock(side_effect=workflow_run_side_effect)
|
||||
mock_workflow_class.return_value = mock_workflow
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=capture_and_fail_first
|
||||
)
|
||||
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
await llm.get_structured_response(
|
||||
prompt="Summarize this",
|
||||
texts=["The meeting covered project updates"],
|
||||
output_cls=TestResponse,
|
||||
)
|
||||
|
||||
# Both first attempt and reflection retry should include the texts
|
||||
assert len(captured_prompts) == 2
|
||||
for prompt_sent in captured_prompts:
|
||||
assert "Summarize this" in prompt_sent
|
||||
assert "The meeting covered project updates" in prompt_sent
|
||||
|
||||
|
||||
class TestReflectionRetryBackoff:
|
||||
"""Test the reflection retry timing behavior"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_value_error_triggers_reflection(self, test_settings):
|
||||
"""Test that ValueError (parse failure) also triggers reflection retry"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
raise ValueError("Could not parse output")
|
||||
assert "reflection" in kwargs
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
@@ -481,8 +435,20 @@ class TestWorkflowTimeoutRetry:
|
||||
)
|
||||
|
||||
assert result.title == "Test"
|
||||
if len(call_times) >= 2:
|
||||
time_between_calls = call_times[1] - call_times[0]
|
||||
assert (
|
||||
time_between_calls >= 1.5
|
||||
), f"Expected ~2s backoff, got {time_between_calls}s"
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_format_validation_error_method(self, test_settings):
|
||||
"""Test _format_validation_error produces correct feedback"""
|
||||
# ValidationError
|
||||
try:
|
||||
TestResponse(title="x", summary="y", confidence=5.0) # confidence > 1
|
||||
except ValidationError as e:
|
||||
result = LLM._format_validation_error(e)
|
||||
assert "Schema validation errors" in result
|
||||
assert "confidence" in result
|
||||
|
||||
# ValueError
|
||||
result = LLM._format_validation_error(ValueError("bad input"))
|
||||
assert "Parse error:" in result
|
||||
assert "bad input" in result
|
||||
|
||||
450
server/tests/test_processors_cpu.py
Normal file
450
server/tests/test_processors_cpu.py
Normal file
@@ -0,0 +1,450 @@
|
||||
"""
|
||||
Tests for in-process processor backends (--cpu mode).
|
||||
|
||||
All ML model calls are mocked — no actual model loading needed.
|
||||
Tests verify processor registration, wiring, error handling, and data flow.
|
||||
"""
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.processors.file_diarization import (
|
||||
FileDiarizationInput,
|
||||
FileDiarizationOutput,
|
||||
)
|
||||
from reflector.processors.types import (
|
||||
AudioDiarizationInput,
|
||||
TitleSummaryWithId,
|
||||
Transcript,
|
||||
Word,
|
||||
)
|
||||
|
||||
# ── Registration Tests ──────────────────────────────────────────────────
|
||||
|
||||
|
||||
def test_audio_diarization_pyannote_registers():
|
||||
"""Verify AudioDiarizationPyannoteProcessor registers with 'pyannote' backend."""
|
||||
# Importing the module triggers registration
|
||||
import reflector.processors.audio_diarization_pyannote # noqa: F401
|
||||
from reflector.processors.audio_diarization_auto import (
|
||||
AudioDiarizationAutoProcessor,
|
||||
)
|
||||
|
||||
assert "pyannote" in AudioDiarizationAutoProcessor._registry
|
||||
|
||||
|
||||
def test_file_diarization_pyannote_registers():
|
||||
"""Verify FileDiarizationPyannoteProcessor registers with 'pyannote' backend."""
|
||||
import reflector.processors.file_diarization_pyannote # noqa: F401
|
||||
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
|
||||
|
||||
assert "pyannote" in FileDiarizationAutoProcessor._registry
|
||||
|
||||
|
||||
def test_transcript_translator_marian_registers():
|
||||
"""Verify TranscriptTranslatorMarianProcessor registers with 'marian' backend."""
|
||||
import reflector.processors.transcript_translator_marian # noqa: F401
|
||||
from reflector.processors.transcript_translator_auto import (
|
||||
TranscriptTranslatorAutoProcessor,
|
||||
)
|
||||
|
||||
assert "marian" in TranscriptTranslatorAutoProcessor._registry
|
||||
|
||||
|
||||
def test_file_transcript_whisper_registers():
|
||||
"""Verify FileTranscriptWhisperProcessor registers with 'whisper' backend."""
|
||||
import reflector.processors.file_transcript_whisper # noqa: F401
|
||||
from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor
|
||||
|
||||
assert "whisper" in FileTranscriptAutoProcessor._registry
|
||||
|
||||
|
||||
# ── Audio Download Utility Tests ────────────────────────────────────────
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_download_audio_to_temp_success():
|
||||
"""Verify download_audio_to_temp downloads to a temp file and returns path."""
|
||||
from reflector.processors._audio_download import download_audio_to_temp
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.headers = {"content-type": "audio/wav"}
|
||||
mock_response.iter_content.return_value = [b"fake audio data"]
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
with patch("reflector.processors._audio_download.requests.get") as mock_get:
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
result = await download_audio_to_temp("https://example.com/test.wav")
|
||||
|
||||
assert isinstance(result, Path)
|
||||
assert result.exists()
|
||||
assert result.read_bytes() == b"fake audio data"
|
||||
assert result.suffix == ".wav"
|
||||
|
||||
# Cleanup
|
||||
os.unlink(result)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_download_audio_to_temp_cleanup_on_error():
|
||||
"""Verify temp file is cleaned up when download fails mid-write."""
|
||||
from reflector.processors._audio_download import download_audio_to_temp
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.headers = {"content-type": "audio/wav"}
|
||||
mock_response.raise_for_status = MagicMock()
|
||||
|
||||
def fail_iter(*args, **kwargs):
|
||||
raise ConnectionError("Download interrupted")
|
||||
|
||||
mock_response.iter_content = fail_iter
|
||||
|
||||
with patch("reflector.processors._audio_download.requests.get") as mock_get:
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
with pytest.raises(ConnectionError, match="Download interrupted"):
|
||||
await download_audio_to_temp("https://example.com/test.wav")
|
||||
|
||||
|
||||
def test_detect_extension_from_url():
|
||||
"""Verify extension detection from URL path."""
|
||||
from reflector.processors._audio_download import _detect_extension
|
||||
|
||||
assert _detect_extension("https://example.com/test.wav", "") == ".wav"
|
||||
assert _detect_extension("https://example.com/test.mp3?signed=1", "") == ".mp3"
|
||||
assert _detect_extension("https://example.com/test.webm", "") == ".webm"
|
||||
|
||||
|
||||
def test_detect_extension_from_content_type():
|
||||
"""Verify extension detection from content-type header."""
|
||||
from reflector.processors._audio_download import _detect_extension
|
||||
|
||||
assert _detect_extension("https://s3.aws/uuid", "audio/mpeg") == ".mp3"
|
||||
assert _detect_extension("https://s3.aws/uuid", "audio/wav") == ".wav"
|
||||
assert _detect_extension("https://s3.aws/uuid", "audio/webm") == ".webm"
|
||||
|
||||
|
||||
def test_detect_extension_fallback():
|
||||
"""Verify fallback extension when neither URL nor content-type is recognized."""
|
||||
from reflector.processors._audio_download import _detect_extension
|
||||
|
||||
assert (
|
||||
_detect_extension("https://s3.aws/uuid", "application/octet-stream") == ".audio"
|
||||
)
|
||||
|
||||
|
||||
# ── Audio Diarization Pyannote Processor Tests ──────────────────────────
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_diarization_pyannote_diarize():
|
||||
"""Verify pyannote audio diarization downloads, diarizes, and cleans up."""
|
||||
from reflector.processors.audio_diarization_pyannote import (
|
||||
AudioDiarizationPyannoteProcessor,
|
||||
)
|
||||
|
||||
mock_diarization_result = {
|
||||
"diarization": [
|
||||
{"start": 0.0, "end": 2.5, "speaker": 0},
|
||||
{"start": 2.5, "end": 5.0, "speaker": 1},
|
||||
]
|
||||
}
|
||||
|
||||
# Create a temp file to simulate download
|
||||
tmp = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
|
||||
tmp.write(b"fake audio")
|
||||
tmp.close()
|
||||
tmp_path = Path(tmp.name)
|
||||
|
||||
processor = AudioDiarizationPyannoteProcessor()
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.processors.audio_diarization_pyannote.download_audio_to_temp",
|
||||
new_callable=AsyncMock,
|
||||
return_value=tmp_path,
|
||||
),
|
||||
patch(
|
||||
"reflector.processors.audio_diarization_pyannote.diarization_service"
|
||||
) as mock_svc,
|
||||
):
|
||||
mock_svc.diarize_file.return_value = mock_diarization_result
|
||||
|
||||
data = AudioDiarizationInput(
|
||||
audio_url="https://example.com/test.wav",
|
||||
topics=[
|
||||
TitleSummaryWithId(
|
||||
id="topic-1",
|
||||
title="Test Topic",
|
||||
summary="A test topic",
|
||||
timestamp=0.0,
|
||||
duration=5.0,
|
||||
transcript=Transcript(
|
||||
words=[Word(text="hello", start=0.0, end=1.0)]
|
||||
),
|
||||
)
|
||||
],
|
||||
)
|
||||
result = await processor._diarize(data)
|
||||
|
||||
assert result == mock_diarization_result["diarization"]
|
||||
mock_svc.diarize_file.assert_called_once()
|
||||
|
||||
|
||||
# ── File Diarization Pyannote Processor Tests ───────────────────────────
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_diarization_pyannote_diarize():
|
||||
"""Verify pyannote file diarization returns FileDiarizationOutput."""
|
||||
from reflector.processors.file_diarization_pyannote import (
|
||||
FileDiarizationPyannoteProcessor,
|
||||
)
|
||||
|
||||
mock_diarization_result = {
|
||||
"diarization": [
|
||||
{"start": 0.0, "end": 3.0, "speaker": 0},
|
||||
{"start": 3.0, "end": 6.0, "speaker": 1},
|
||||
]
|
||||
}
|
||||
|
||||
tmp = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
|
||||
tmp.write(b"fake audio")
|
||||
tmp.close()
|
||||
tmp_path = Path(tmp.name)
|
||||
|
||||
processor = FileDiarizationPyannoteProcessor()
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.processors.file_diarization_pyannote.download_audio_to_temp",
|
||||
new_callable=AsyncMock,
|
||||
return_value=tmp_path,
|
||||
),
|
||||
patch(
|
||||
"reflector.processors.file_diarization_pyannote.diarization_service"
|
||||
) as mock_svc,
|
||||
):
|
||||
mock_svc.diarize_file.return_value = mock_diarization_result
|
||||
|
||||
data = FileDiarizationInput(audio_url="https://example.com/test.wav")
|
||||
result = await processor._diarize(data)
|
||||
|
||||
assert isinstance(result, FileDiarizationOutput)
|
||||
assert len(result.diarization) == 2
|
||||
assert result.diarization[0]["start"] == 0.0
|
||||
assert result.diarization[1]["speaker"] == 1
|
||||
|
||||
|
||||
# ── Transcript Translator Marian Processor Tests ───────────────────────
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_translator_marian_translate():
|
||||
"""Verify MarianMT translator calls service and extracts translation."""
|
||||
from reflector.processors.transcript_translator_marian import (
|
||||
TranscriptTranslatorMarianProcessor,
|
||||
)
|
||||
|
||||
mock_result = {"text": {"en": "Hello world", "fr": "Bonjour le monde"}}
|
||||
|
||||
processor = TranscriptTranslatorMarianProcessor()
|
||||
|
||||
def fake_get_pref(key, default=None):
|
||||
prefs = {"audio:source_language": "en", "audio:target_language": "fr"}
|
||||
return prefs.get(key, default)
|
||||
|
||||
with (
|
||||
patch.object(processor, "get_pref", side_effect=fake_get_pref),
|
||||
patch(
|
||||
"reflector.processors.transcript_translator_marian.translator_service"
|
||||
) as mock_svc,
|
||||
):
|
||||
mock_svc.translate.return_value = mock_result
|
||||
|
||||
result = await processor._translate("Hello world")
|
||||
|
||||
assert result == "Bonjour le monde"
|
||||
mock_svc.translate.assert_called_once_with("Hello world", "en", "fr")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_translator_marian_no_translation():
|
||||
"""Verify translator returns None when target language not in result."""
|
||||
from reflector.processors.transcript_translator_marian import (
|
||||
TranscriptTranslatorMarianProcessor,
|
||||
)
|
||||
|
||||
mock_result = {"text": {"en": "Hello world"}}
|
||||
|
||||
processor = TranscriptTranslatorMarianProcessor()
|
||||
|
||||
def fake_get_pref(key, default=None):
|
||||
prefs = {"audio:source_language": "en", "audio:target_language": "fr"}
|
||||
return prefs.get(key, default)
|
||||
|
||||
with (
|
||||
patch.object(processor, "get_pref", side_effect=fake_get_pref),
|
||||
patch(
|
||||
"reflector.processors.transcript_translator_marian.translator_service"
|
||||
) as mock_svc,
|
||||
):
|
||||
mock_svc.translate.return_value = mock_result
|
||||
|
||||
result = await processor._translate("Hello world")
|
||||
|
||||
assert result is None
|
||||
|
||||
|
||||
# ── File Transcript Whisper Processor Tests ─────────────────────────────
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_transcript_whisper_transcript():
|
||||
"""Verify whisper file processor downloads, transcribes, and returns Transcript."""
|
||||
from reflector.processors.file_transcript import FileTranscriptInput
|
||||
from reflector.processors.file_transcript_whisper import (
|
||||
FileTranscriptWhisperProcessor,
|
||||
)
|
||||
|
||||
tmp = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
|
||||
tmp.write(b"fake audio")
|
||||
tmp.close()
|
||||
tmp_path = Path(tmp.name)
|
||||
|
||||
processor = FileTranscriptWhisperProcessor()
|
||||
|
||||
# Mock the blocking transcription method
|
||||
mock_transcript = Transcript(
|
||||
words=[
|
||||
Word(text="Hello", start=0.0, end=0.5),
|
||||
Word(text=" world", start=0.5, end=1.0),
|
||||
]
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.processors.file_transcript_whisper.download_audio_to_temp",
|
||||
new_callable=AsyncMock,
|
||||
return_value=tmp_path,
|
||||
),
|
||||
patch.object(
|
||||
processor,
|
||||
"_transcribe_file_blocking",
|
||||
return_value=mock_transcript,
|
||||
),
|
||||
):
|
||||
data = FileTranscriptInput(
|
||||
audio_url="https://example.com/test.wav", language="en"
|
||||
)
|
||||
result = await processor._transcript(data)
|
||||
|
||||
assert isinstance(result, Transcript)
|
||||
assert len(result.words) == 2
|
||||
assert result.words[0].text == "Hello"
|
||||
|
||||
|
||||
# ── VAD Helper Tests ────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def test_enforce_word_timing_constraints():
|
||||
"""Verify word timing enforcement prevents overlapping times."""
|
||||
from reflector.processors.file_transcript_whisper import (
|
||||
_enforce_word_timing_constraints,
|
||||
)
|
||||
|
||||
words = [
|
||||
{"word": "hello", "start": 0.0, "end": 1.5},
|
||||
{"word": "world", "start": 1.0, "end": 2.0}, # overlaps with previous
|
||||
{"word": "test", "start": 2.0, "end": 3.0},
|
||||
]
|
||||
|
||||
result = _enforce_word_timing_constraints(words)
|
||||
|
||||
assert result[0]["end"] == 1.0 # Clamped to next word's start
|
||||
assert result[1]["end"] == 2.0 # Clamped to next word's start
|
||||
assert result[2]["end"] == 3.0 # Last word unchanged
|
||||
|
||||
|
||||
def test_enforce_word_timing_constraints_empty():
|
||||
"""Verify timing enforcement handles empty and single-word lists."""
|
||||
from reflector.processors.file_transcript_whisper import (
|
||||
_enforce_word_timing_constraints,
|
||||
)
|
||||
|
||||
assert _enforce_word_timing_constraints([]) == []
|
||||
assert _enforce_word_timing_constraints([{"word": "a", "start": 0, "end": 1}]) == [
|
||||
{"word": "a", "start": 0, "end": 1}
|
||||
]
|
||||
|
||||
|
||||
def test_pad_audio_short():
|
||||
"""Verify short audio gets padded with silence."""
|
||||
import numpy as np
|
||||
|
||||
from reflector.processors.file_transcript_whisper import _pad_audio
|
||||
|
||||
short_audio = np.zeros(100, dtype=np.float32) # Very short
|
||||
result = _pad_audio(short_audio, sample_rate=16000)
|
||||
|
||||
# Should be padded to at least silence_padding duration
|
||||
assert len(result) > len(short_audio)
|
||||
|
||||
|
||||
def test_pad_audio_long():
|
||||
"""Verify long audio is not padded."""
|
||||
import numpy as np
|
||||
|
||||
from reflector.processors.file_transcript_whisper import _pad_audio
|
||||
|
||||
long_audio = np.zeros(32000, dtype=np.float32) # 2 seconds
|
||||
result = _pad_audio(long_audio, sample_rate=16000)
|
||||
|
||||
assert len(result) == len(long_audio)
|
||||
|
||||
|
||||
# ── Translator Service Tests ────────────────────────────────────────────
|
||||
|
||||
|
||||
def test_translator_service_resolve_model():
|
||||
"""Verify model resolution for known and unknown language pairs."""
|
||||
from reflector.processors._marian_translator_service import MarianTranslatorService
|
||||
|
||||
svc = MarianTranslatorService()
|
||||
|
||||
assert svc._resolve_model_name("en", "fr") == "Helsinki-NLP/opus-mt-en-fr"
|
||||
assert svc._resolve_model_name("es", "en") == "Helsinki-NLP/opus-mt-es-en"
|
||||
assert svc._resolve_model_name("en", "de") == "Helsinki-NLP/opus-mt-en-de"
|
||||
# Unknown pair falls back to en->fr
|
||||
assert svc._resolve_model_name("ja", "ko") == "Helsinki-NLP/opus-mt-en-fr"
|
||||
|
||||
|
||||
# ── Diarization Service Tests ───────────────────────────────────────────
|
||||
|
||||
|
||||
def test_diarization_service_singleton():
|
||||
"""Verify diarization_service is a module-level singleton."""
|
||||
from reflector.processors._pyannote_diarization_service import (
|
||||
PyannoteDiarizationService,
|
||||
diarization_service,
|
||||
)
|
||||
|
||||
assert isinstance(diarization_service, PyannoteDiarizationService)
|
||||
assert diarization_service._pipeline is None # Not loaded until first use
|
||||
|
||||
|
||||
def test_translator_service_singleton():
|
||||
"""Verify translator_service is a module-level singleton."""
|
||||
from reflector.processors._marian_translator_service import (
|
||||
MarianTranslatorService,
|
||||
translator_service,
|
||||
)
|
||||
|
||||
assert isinstance(translator_service, MarianTranslatorService)
|
||||
assert translator_service._pipeline is None # Not loaded until first use
|
||||
@@ -49,6 +49,15 @@ async def test_retry_httpx(httpx_mock):
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_retry_402_stops_by_default(httpx_mock):
|
||||
"""402 (payment required / no credits) is in default retry_httpx_status_stop — do not retry."""
|
||||
httpx_mock.add_response(status_code=402, json={"error": "insufficient_credits"})
|
||||
async with httpx.AsyncClient() as client:
|
||||
with pytest.raises(RetryHTTPException):
|
||||
await retry(client.get)("https://test_url", retry_timeout=5)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_retry_normal():
|
||||
left = 3
|
||||
|
||||
@@ -367,3 +367,390 @@ async def test_aws_storage_none_endpoint_url():
|
||||
assert storage.base_url == "https://reflector-bucket.s3.amazonaws.com/"
|
||||
# No s3 addressing_style override — boto_config should only have retries
|
||||
assert not hasattr(storage.boto_config, "s3") or storage.boto_config.s3 is None
|
||||
|
||||
|
||||
# --- Tests for get_source_storage() ---
|
||||
|
||||
|
||||
def test_get_source_storage_daily_with_credentials():
|
||||
"""Daily platform with access keys returns AwsStorage with Daily credentials."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID = "daily-key"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY = "daily-secret"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "daily-bucket"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_REGION = "us-west-2"
|
||||
|
||||
from reflector.storage import get_source_storage
|
||||
|
||||
storage = get_source_storage("daily")
|
||||
|
||||
assert isinstance(storage, AwsStorage)
|
||||
assert storage._bucket_name == "daily-bucket"
|
||||
assert storage._region == "us-west-2"
|
||||
assert storage._access_key_id == "daily-key"
|
||||
assert storage._secret_access_key == "daily-secret"
|
||||
assert storage._endpoint_url is None
|
||||
|
||||
|
||||
def test_get_source_storage_daily_falls_back_without_credentials():
|
||||
"""Daily platform without access keys falls back to transcript storage."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID = None
|
||||
mock_settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY = None
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "daily-bucket"
|
||||
mock_settings.TRANSCRIPT_STORAGE_BACKEND = "aws"
|
||||
mock_settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME = "transcript-bucket"
|
||||
mock_settings.TRANSCRIPT_STORAGE_AWS_REGION = "us-east-1"
|
||||
mock_settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID = "transcript-key"
|
||||
mock_settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY = "transcript-secret"
|
||||
mock_settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL = None
|
||||
|
||||
from reflector.storage import get_source_storage
|
||||
|
||||
with patch("reflector.storage.get_transcripts_storage") as mock_get_transcripts:
|
||||
fallback = AwsStorage(
|
||||
aws_bucket_name="transcript-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="transcript-key",
|
||||
aws_secret_access_key="transcript-secret",
|
||||
)
|
||||
mock_get_transcripts.return_value = fallback
|
||||
|
||||
storage = get_source_storage("daily")
|
||||
|
||||
mock_get_transcripts.assert_called_once()
|
||||
assert storage is fallback
|
||||
|
||||
|
||||
def test_get_source_storage_whereby_with_credentials():
|
||||
"""Whereby platform with access keys returns AwsStorage with Whereby credentials."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.WHEREBY_STORAGE_AWS_ACCESS_KEY_ID = "whereby-key"
|
||||
mock_settings.WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY = "whereby-secret"
|
||||
mock_settings.WHEREBY_STORAGE_AWS_BUCKET_NAME = "whereby-bucket"
|
||||
mock_settings.WHEREBY_STORAGE_AWS_REGION = "eu-west-1"
|
||||
|
||||
from reflector.storage import get_source_storage
|
||||
|
||||
storage = get_source_storage("whereby")
|
||||
|
||||
assert isinstance(storage, AwsStorage)
|
||||
assert storage._bucket_name == "whereby-bucket"
|
||||
assert storage._region == "eu-west-1"
|
||||
assert storage._access_key_id == "whereby-key"
|
||||
assert storage._secret_access_key == "whereby-secret"
|
||||
|
||||
|
||||
def test_get_source_storage_unknown_platform_falls_back():
|
||||
"""Unknown platform falls back to transcript storage."""
|
||||
with patch("reflector.storage.settings"):
|
||||
from reflector.storage import get_source_storage
|
||||
|
||||
with patch("reflector.storage.get_transcripts_storage") as mock_get_transcripts:
|
||||
fallback = MagicMock()
|
||||
mock_get_transcripts.return_value = fallback
|
||||
|
||||
storage = get_source_storage("unknown-platform")
|
||||
|
||||
mock_get_transcripts.assert_called_once()
|
||||
assert storage is fallback
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_source_storage_presigns_for_correct_bucket():
|
||||
"""Source storage presigns URLs using the platform's credentials and the override bucket."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID = "daily-key"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY = "daily-secret"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "daily-bucket"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_REGION = "us-west-2"
|
||||
|
||||
from reflector.storage import get_source_storage
|
||||
|
||||
storage = get_source_storage("daily")
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.generate_presigned_url = AsyncMock(
|
||||
return_value="https://daily-bucket.s3.amazonaws.com/track.webm?signed"
|
||||
)
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
|
||||
with patch.object(storage.session, "client", return_value=mock_client):
|
||||
url = await storage.get_file_url(
|
||||
"track.webm",
|
||||
operation="get_object",
|
||||
expires_in=3600,
|
||||
bucket="override-bucket",
|
||||
)
|
||||
|
||||
assert "track.webm" in url
|
||||
mock_client.generate_presigned_url.assert_called_once()
|
||||
call_kwargs = mock_client.generate_presigned_url.call_args
|
||||
params = call_kwargs[1].get("Params") or call_kwargs[0][1]
|
||||
assert params["Bucket"] == "override-bucket"
|
||||
assert params["Key"] == "track.webm"
|
||||
|
||||
|
||||
def test_get_source_storage_daily_default_region():
|
||||
"""Daily platform without region falls back to us-east-1."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID = "daily-key"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY = "daily-secret"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "daily-bucket"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_REGION = None
|
||||
|
||||
from reflector.storage import get_source_storage
|
||||
|
||||
storage = get_source_storage("daily")
|
||||
|
||||
assert isinstance(storage, AwsStorage)
|
||||
assert storage._region == "us-east-1"
|
||||
|
||||
|
||||
# --- Tests for get_dailyco_storage() ---
|
||||
|
||||
|
||||
def test_get_dailyco_storage_with_role_arn():
|
||||
"""get_dailyco_storage returns AwsStorage with role_arn for Daily API."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "daily-bucket"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_REGION = "us-west-2"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ROLE_ARN = "arn:aws:iam::123:role/DailyAccess"
|
||||
|
||||
from reflector.storage import get_dailyco_storage
|
||||
|
||||
storage = get_dailyco_storage()
|
||||
|
||||
assert isinstance(storage, AwsStorage)
|
||||
assert storage._bucket_name == "daily-bucket"
|
||||
assert storage._region == "us-west-2"
|
||||
assert storage._role_arn == "arn:aws:iam::123:role/DailyAccess"
|
||||
assert storage._access_key_id is None
|
||||
assert storage._secret_access_key is None
|
||||
|
||||
|
||||
def test_get_dailyco_storage_no_conflict_when_access_keys_also_set():
|
||||
"""get_dailyco_storage ignores access keys even when set (avoids mixed-auth error).
|
||||
|
||||
This is the key regression test: DAILYCO_STORAGE_AWS_ACCESS_KEY_ID and
|
||||
SECRET_ACCESS_KEY are for get_source_storage(), not for get_dailyco_storage().
|
||||
"""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "daily-bucket"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_REGION = "us-west-2"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ROLE_ARN = "arn:aws:iam::123:role/DailyAccess"
|
||||
# These are set for get_source_storage but must NOT leak into get_dailyco_storage
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID = "AKIA-worker-key"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY = "worker-secret"
|
||||
|
||||
from reflector.storage import get_dailyco_storage
|
||||
|
||||
# Must NOT raise "cannot use both aws_role_arn and access keys"
|
||||
storage = get_dailyco_storage()
|
||||
|
||||
assert isinstance(storage, AwsStorage)
|
||||
assert storage._role_arn == "arn:aws:iam::123:role/DailyAccess"
|
||||
assert storage._access_key_id is None
|
||||
assert storage._secret_access_key is None
|
||||
|
||||
|
||||
def test_get_dailyco_storage_default_region():
|
||||
"""get_dailyco_storage falls back to us-east-1 when region is None."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "daily-bucket"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_REGION = None
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ROLE_ARN = "arn:aws:iam::123:role/DailyAccess"
|
||||
|
||||
from reflector.storage import get_dailyco_storage
|
||||
|
||||
storage = get_dailyco_storage()
|
||||
|
||||
assert storage._region == "us-east-1"
|
||||
|
||||
|
||||
def test_get_dailyco_storage_raises_without_bucket():
|
||||
"""get_dailyco_storage raises ValueError when bucket is not configured."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = None
|
||||
|
||||
from reflector.storage import get_dailyco_storage
|
||||
|
||||
with pytest.raises(
|
||||
ValueError, match="DAILYCO_STORAGE_AWS_BUCKET_NAME required"
|
||||
):
|
||||
get_dailyco_storage()
|
||||
|
||||
|
||||
def test_get_dailyco_storage_exposes_role_credential():
|
||||
"""get_dailyco_storage().role_credential returns the role ARN."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "daily-bucket"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_REGION = "us-east-1"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ROLE_ARN = "arn:aws:iam::123:role/DailyAccess"
|
||||
|
||||
from reflector.storage import get_dailyco_storage
|
||||
|
||||
storage = get_dailyco_storage()
|
||||
|
||||
assert storage.role_credential == "arn:aws:iam::123:role/DailyAccess"
|
||||
assert storage.bucket_name == "daily-bucket"
|
||||
assert storage.region == "us-east-1"
|
||||
|
||||
|
||||
# --- Tests for get_whereby_storage() ---
|
||||
|
||||
|
||||
def test_get_whereby_storage_with_access_keys():
|
||||
"""get_whereby_storage returns AwsStorage with Whereby access keys."""
|
||||
whereby_settings = [
|
||||
("WHEREBY_STORAGE_AWS_BUCKET_NAME", "whereby-bucket"),
|
||||
("WHEREBY_STORAGE_AWS_REGION", "eu-west-1"),
|
||||
("WHEREBY_STORAGE_AWS_ACCESS_KEY_ID", "whereby-key"),
|
||||
("WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY", "whereby-secret"),
|
||||
]
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.WHEREBY_STORAGE_AWS_BUCKET_NAME = "whereby-bucket"
|
||||
mock_settings.__iter__ = MagicMock(return_value=iter(whereby_settings))
|
||||
|
||||
# Patch both settings references: __init__.py and base.py
|
||||
with (
|
||||
patch("reflector.storage.settings", mock_settings),
|
||||
patch("reflector.storage.base.settings", mock_settings),
|
||||
):
|
||||
from reflector.storage import get_whereby_storage
|
||||
|
||||
storage = get_whereby_storage()
|
||||
|
||||
assert isinstance(storage, AwsStorage)
|
||||
assert storage._bucket_name == "whereby-bucket"
|
||||
assert storage._region == "eu-west-1"
|
||||
assert storage._access_key_id == "whereby-key"
|
||||
assert storage._secret_access_key == "whereby-secret"
|
||||
|
||||
|
||||
def test_get_whereby_storage_raises_without_bucket():
|
||||
"""get_whereby_storage raises ValueError when bucket is not configured."""
|
||||
with patch("reflector.storage.settings") as mock_settings:
|
||||
mock_settings.WHEREBY_STORAGE_AWS_BUCKET_NAME = None
|
||||
|
||||
from reflector.storage import get_whereby_storage
|
||||
|
||||
with pytest.raises(
|
||||
ValueError, match="WHEREBY_STORAGE_AWS_BUCKET_NAME required"
|
||||
):
|
||||
get_whereby_storage()
|
||||
|
||||
|
||||
# --- Tests for get_transcripts_storage() ---
|
||||
|
||||
|
||||
def test_get_transcripts_storage_with_garage():
|
||||
"""get_transcripts_storage returns AwsStorage configured for Garage (custom endpoint)."""
|
||||
garage_settings = [
|
||||
("TRANSCRIPT_STORAGE_AWS_BUCKET_NAME", "reflector-media"),
|
||||
("TRANSCRIPT_STORAGE_AWS_REGION", "garage"),
|
||||
("TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID", "GK-garage-key"),
|
||||
("TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY", "garage-secret"),
|
||||
("TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL", "http://garage:3900"),
|
||||
]
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.TRANSCRIPT_STORAGE_BACKEND = "aws"
|
||||
mock_settings.__iter__ = MagicMock(return_value=iter(garage_settings))
|
||||
|
||||
with (
|
||||
patch("reflector.storage.settings", mock_settings),
|
||||
patch("reflector.storage.base.settings", mock_settings),
|
||||
):
|
||||
from reflector.storage import get_transcripts_storage
|
||||
|
||||
storage = get_transcripts_storage()
|
||||
|
||||
assert isinstance(storage, AwsStorage)
|
||||
assert storage._bucket_name == "reflector-media"
|
||||
assert storage._endpoint_url == "http://garage:3900"
|
||||
assert storage._access_key_id == "GK-garage-key"
|
||||
assert storage.boto_config.s3["addressing_style"] == "path"
|
||||
|
||||
|
||||
def test_get_transcripts_storage_with_vanilla_aws():
|
||||
"""get_transcripts_storage returns AwsStorage configured for real AWS S3."""
|
||||
aws_settings = [
|
||||
("TRANSCRIPT_STORAGE_AWS_BUCKET_NAME", "prod-transcripts"),
|
||||
("TRANSCRIPT_STORAGE_AWS_REGION", "us-east-1"),
|
||||
("TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID", "AKIA-prod-key"),
|
||||
("TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY", "prod-secret"),
|
||||
]
|
||||
mock_settings = MagicMock()
|
||||
mock_settings.TRANSCRIPT_STORAGE_BACKEND = "aws"
|
||||
mock_settings.__iter__ = MagicMock(return_value=iter(aws_settings))
|
||||
|
||||
with (
|
||||
patch("reflector.storage.settings", mock_settings),
|
||||
patch("reflector.storage.base.settings", mock_settings),
|
||||
):
|
||||
from reflector.storage import get_transcripts_storage
|
||||
|
||||
storage = get_transcripts_storage()
|
||||
|
||||
assert isinstance(storage, AwsStorage)
|
||||
assert storage._bucket_name == "prod-transcripts"
|
||||
assert storage._endpoint_url is None
|
||||
assert storage._access_key_id == "AKIA-prod-key"
|
||||
|
||||
|
||||
# --- Tests for coexistence (selfhosted scenario) ---
|
||||
|
||||
|
||||
def test_all_factories_coexist_selfhosted_scenario():
|
||||
"""All storage factories work simultaneously with selfhosted config.
|
||||
|
||||
Simulates the real selfhosted setup:
|
||||
- Transcript storage → Garage (http://garage:3900)
|
||||
- Daily API storage → role_arn (for Daily to write recordings)
|
||||
- Source storage → access keys (for workers to read Daily's S3 bucket)
|
||||
"""
|
||||
transcript_settings = [
|
||||
("TRANSCRIPT_STORAGE_AWS_BUCKET_NAME", "reflector-media"),
|
||||
("TRANSCRIPT_STORAGE_AWS_REGION", "garage"),
|
||||
("TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID", "GK-garage-key"),
|
||||
("TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY", "garage-secret"),
|
||||
("TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL", "http://garage:3900"),
|
||||
]
|
||||
mock_settings = MagicMock()
|
||||
# Transcript storage: Garage
|
||||
mock_settings.TRANSCRIPT_STORAGE_BACKEND = "aws"
|
||||
mock_settings.__iter__ = MagicMock(return_value=iter(transcript_settings))
|
||||
|
||||
# Daily.co: both role_arn AND access keys configured
|
||||
mock_settings.DAILYCO_STORAGE_AWS_BUCKET_NAME = "daily-recordings"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_REGION = "us-west-2"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ROLE_ARN = "arn:aws:iam::123:role/DailyAccess"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_ACCESS_KEY_ID = "AKIA-daily-worker"
|
||||
mock_settings.DAILYCO_STORAGE_AWS_SECRET_ACCESS_KEY = "daily-worker-secret"
|
||||
|
||||
with (
|
||||
patch("reflector.storage.settings", mock_settings),
|
||||
patch("reflector.storage.base.settings", mock_settings),
|
||||
):
|
||||
from reflector.storage import (
|
||||
get_dailyco_storage,
|
||||
get_source_storage,
|
||||
get_transcripts_storage,
|
||||
)
|
||||
|
||||
# 1. Transcript storage → Garage
|
||||
transcript_storage = get_transcripts_storage()
|
||||
assert transcript_storage._endpoint_url == "http://garage:3900"
|
||||
assert transcript_storage._access_key_id == "GK-garage-key"
|
||||
|
||||
# 2. Daily API storage → role_arn only (no access keys)
|
||||
daily_api_storage = get_dailyco_storage()
|
||||
assert daily_api_storage._role_arn == "arn:aws:iam::123:role/DailyAccess"
|
||||
assert daily_api_storage._access_key_id is None
|
||||
|
||||
# 3. Source storage → access keys only (no role_arn)
|
||||
source_storage = get_source_storage("daily")
|
||||
assert source_storage._access_key_id == "AKIA-daily-worker"
|
||||
assert source_storage._role_arn is None
|
||||
assert source_storage._endpoint_url is None
|
||||
|
||||
327
server/tests/test_transcripts_audio_token_auth.py
Normal file
327
server/tests/test_transcripts_audio_token_auth.py
Normal file
@@ -0,0 +1,327 @@
|
||||
"""Tests for audio mp3 endpoint token query-param authentication.
|
||||
|
||||
Covers both password (HS256) and JWT/Authentik (RS256) auth backends,
|
||||
verifying that private transcripts can be accessed via ?token= query param.
|
||||
"""
|
||||
|
||||
import shutil
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import jwt
|
||||
import pytest
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
OWNER_USER_ID = "test-owner-user-id"
|
||||
|
||||
|
||||
def _create_hs256_token(user_id: str, secret: str, expired: bool = False) -> str:
|
||||
"""Create an HS256 JWT like the password auth backend does."""
|
||||
delta = timedelta(minutes=-5) if expired else timedelta(hours=24)
|
||||
payload = {
|
||||
"sub": user_id,
|
||||
"email": "test@example.com",
|
||||
"exp": datetime.now(timezone.utc) + delta,
|
||||
}
|
||||
return jwt.encode(payload, secret, algorithm="HS256")
|
||||
|
||||
|
||||
def _generate_rsa_keypair():
|
||||
"""Generate a fresh RSA keypair for tests."""
|
||||
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
|
||||
public_pem = private_key.public_key().public_bytes(
|
||||
serialization.Encoding.PEM, serialization.PublicFormat.SubjectPublicKeyInfo
|
||||
)
|
||||
return private_key, public_pem.decode()
|
||||
|
||||
|
||||
def _create_rs256_token(
|
||||
authentik_uid: str,
|
||||
private_key,
|
||||
audience: str,
|
||||
expired: bool = False,
|
||||
) -> str:
|
||||
"""Create an RS256 JWT like Authentik would issue."""
|
||||
delta = timedelta(minutes=-5) if expired else timedelta(hours=1)
|
||||
payload = {
|
||||
"sub": authentik_uid,
|
||||
"email": "authentik-user@example.com",
|
||||
"aud": audience,
|
||||
"exp": datetime.now(timezone.utc) + delta,
|
||||
}
|
||||
return jwt.encode(payload, private_key, algorithm="RS256")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def private_transcript(tmpdir):
|
||||
"""Create a private transcript owned by OWNER_USER_ID with an mp3 file.
|
||||
|
||||
Created directly via the controller (not HTTP) so no auth override
|
||||
leaks into the test scope.
|
||||
"""
|
||||
from reflector.db.transcripts import SourceKind, transcripts_controller
|
||||
from reflector.settings import settings
|
||||
|
||||
settings.DATA_DIR = Path(tmpdir)
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
"Private audio test",
|
||||
source_kind=SourceKind.FILE,
|
||||
user_id=OWNER_USER_ID,
|
||||
share_mode="private",
|
||||
)
|
||||
await transcripts_controller.update(transcript, {"status": "ended"})
|
||||
|
||||
# Copy a real mp3 to the expected location
|
||||
audio_filename = transcript.audio_mp3_filename
|
||||
mp3_source = Path(__file__).parent / "records" / "test_mathieu_hello.mp3"
|
||||
audio_filename.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy(mp3_source, audio_filename)
|
||||
|
||||
yield transcript
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Core access control tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_private_no_auth_returns_403(private_transcript, client):
|
||||
"""Without auth, accessing a private transcript's audio returns 403."""
|
||||
response = await client.get(f"/transcripts/{private_transcript.id}/audio/mp3")
|
||||
assert response.status_code == 403
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_with_bearer_header(private_transcript, client):
|
||||
"""Owner accessing audio via Authorization header works."""
|
||||
from reflector.app import app
|
||||
from reflector.auth import current_user_optional
|
||||
|
||||
# Temporarily override to simulate the owner being authenticated
|
||||
app.dependency_overrides[current_user_optional] = lambda: {
|
||||
"sub": OWNER_USER_ID,
|
||||
"email": "test@example.com",
|
||||
}
|
||||
try:
|
||||
response = await client.get(f"/transcripts/{private_transcript.id}/audio/mp3")
|
||||
finally:
|
||||
del app.dependency_overrides[current_user_optional]
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.headers["content-type"] == "audio/mpeg"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_public_transcript_no_auth_ok(tmpdir, client):
|
||||
"""Public transcripts are accessible without any auth."""
|
||||
from reflector.db.transcripts import SourceKind, transcripts_controller
|
||||
from reflector.settings import settings
|
||||
|
||||
settings.DATA_DIR = Path(tmpdir)
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
"Public audio test",
|
||||
source_kind=SourceKind.FILE,
|
||||
user_id=OWNER_USER_ID,
|
||||
share_mode="public",
|
||||
)
|
||||
await transcripts_controller.update(transcript, {"status": "ended"})
|
||||
|
||||
audio_filename = transcript.audio_mp3_filename
|
||||
mp3_source = Path(__file__).parent / "records" / "test_mathieu_hello.mp3"
|
||||
audio_filename.parent.mkdir(parents=True, exist_ok=True)
|
||||
shutil.copy(mp3_source, audio_filename)
|
||||
|
||||
response = await client.get(f"/transcripts/{transcript.id}/audio/mp3")
|
||||
assert response.status_code == 200
|
||||
assert response.headers["content-type"] == "audio/mpeg"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Password auth backend tests (?token= with HS256)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_password_token_query_param(private_transcript, client):
|
||||
"""Password backend: valid HS256 ?token= grants access to private audio."""
|
||||
from reflector.auth.auth_password import UserInfo
|
||||
from reflector.settings import settings
|
||||
|
||||
token = _create_hs256_token(OWNER_USER_ID, settings.SECRET_KEY)
|
||||
|
||||
with patch("reflector.auth.verify_raw_token") as mock_verify:
|
||||
mock_verify.return_value = UserInfo(sub=OWNER_USER_ID, email="test@example.com")
|
||||
response = await client.get(
|
||||
f"/transcripts/{private_transcript.id}/audio/mp3?token={token}"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.headers["content-type"] == "audio/mpeg"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_password_expired_token_returns_401(private_transcript, client):
|
||||
"""Password backend: expired HS256 ?token= returns 401."""
|
||||
from reflector.settings import settings
|
||||
|
||||
expired_token = _create_hs256_token(
|
||||
OWNER_USER_ID, settings.SECRET_KEY, expired=True
|
||||
)
|
||||
|
||||
with patch("reflector.auth.verify_raw_token") as mock_verify:
|
||||
mock_verify.side_effect = jwt.ExpiredSignatureError("token expired")
|
||||
response = await client.get(
|
||||
f"/transcripts/{private_transcript.id}/audio/mp3" f"?token={expired_token}"
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_password_wrong_user_returns_403(private_transcript, client):
|
||||
"""Password backend: valid token for a different user returns 403."""
|
||||
from reflector.auth.auth_password import UserInfo
|
||||
from reflector.settings import settings
|
||||
|
||||
token = _create_hs256_token("other-user-id", settings.SECRET_KEY)
|
||||
|
||||
with patch("reflector.auth.verify_raw_token") as mock_verify:
|
||||
mock_verify.return_value = UserInfo(
|
||||
sub="other-user-id", email="other@example.com"
|
||||
)
|
||||
response = await client.get(
|
||||
f"/transcripts/{private_transcript.id}/audio/mp3?token={token}"
|
||||
)
|
||||
|
||||
assert response.status_code == 403
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_invalid_token_returns_401(private_transcript, client):
|
||||
"""Garbage token string returns 401."""
|
||||
with patch("reflector.auth.verify_raw_token") as mock_verify:
|
||||
mock_verify.return_value = None
|
||||
response = await client.get(
|
||||
f"/transcripts/{private_transcript.id}/audio/mp3" "?token=not-a-real-token"
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# JWT/Authentik auth backend tests (?token= with RS256)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_authentik_token_query_param(private_transcript, client):
|
||||
"""Authentik backend: valid RS256 ?token= grants access to private audio."""
|
||||
from reflector.auth.auth_password import UserInfo
|
||||
|
||||
private_key, _ = _generate_rsa_keypair()
|
||||
token = _create_rs256_token("authentik-abc123", private_key, "test-audience")
|
||||
|
||||
with patch("reflector.auth.verify_raw_token") as mock_verify:
|
||||
# Authentik flow maps authentik_uid -> internal user id
|
||||
mock_verify.return_value = UserInfo(
|
||||
sub=OWNER_USER_ID, email="authentik-user@example.com"
|
||||
)
|
||||
response = await client.get(
|
||||
f"/transcripts/{private_transcript.id}/audio/mp3?token={token}"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.headers["content-type"] == "audio/mpeg"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_authentik_expired_token_returns_401(
|
||||
private_transcript, client
|
||||
):
|
||||
"""Authentik backend: expired RS256 ?token= returns 401."""
|
||||
private_key, _ = _generate_rsa_keypair()
|
||||
expired_token = _create_rs256_token(
|
||||
"authentik-abc123", private_key, "test-audience", expired=True
|
||||
)
|
||||
|
||||
with patch("reflector.auth.verify_raw_token") as mock_verify:
|
||||
mock_verify.side_effect = jwt.ExpiredSignatureError("token expired")
|
||||
response = await client.get(
|
||||
f"/transcripts/{private_transcript.id}/audio/mp3" f"?token={expired_token}"
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_mp3_authentik_wrong_user_returns_403(private_transcript, client):
|
||||
"""Authentik backend: valid RS256 token for different user returns 403."""
|
||||
from reflector.auth.auth_password import UserInfo
|
||||
|
||||
private_key, _ = _generate_rsa_keypair()
|
||||
token = _create_rs256_token("authentik-other", private_key, "test-audience")
|
||||
|
||||
with patch("reflector.auth.verify_raw_token") as mock_verify:
|
||||
mock_verify.return_value = UserInfo(
|
||||
sub="different-user-id", email="other@example.com"
|
||||
)
|
||||
response = await client.get(
|
||||
f"/transcripts/{private_transcript.id}/audio/mp3?token={token}"
|
||||
)
|
||||
|
||||
assert response.status_code == 403
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _generate_local_audio_link produces HS256 tokens — must be verifiable
|
||||
# by any auth backend
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_local_audio_link_token_works_with_authentik_backend(
|
||||
private_transcript, client
|
||||
):
|
||||
"""_generate_local_audio_link creates an HS256 token via create_access_token.
|
||||
|
||||
When the Authentik (RS256) auth backend is active, verify_raw_token uses
|
||||
JWTAuth which expects RS256 + public key. The HS256 token created by
|
||||
_generate_local_audio_link will fail verification, returning 401.
|
||||
|
||||
This test documents the bug: the internal audio URL generated for the
|
||||
diarization pipeline is unusable under the JWT auth backend.
|
||||
"""
|
||||
from urllib.parse import parse_qs, urlparse
|
||||
|
||||
# Generate the internal audio link (uses create_access_token → HS256)
|
||||
url = private_transcript._generate_local_audio_link()
|
||||
parsed = urlparse(url)
|
||||
token = parse_qs(parsed.query)["token"][0]
|
||||
|
||||
# Simulate what happens when the JWT/Authentik backend tries to verify
|
||||
# this HS256 token: JWTAuth.verify_token expects RS256, so it raises.
|
||||
with patch("reflector.auth.verify_raw_token") as mock_verify:
|
||||
mock_verify.side_effect = jwt.exceptions.InvalidAlgorithmError(
|
||||
"the specified alg value is not allowed"
|
||||
)
|
||||
response = await client.get(
|
||||
f"/transcripts/{private_transcript.id}/audio/mp3?token={token}"
|
||||
)
|
||||
|
||||
# BUG: this should be 200 (the token was created by our own server),
|
||||
# but the Authentik backend rejects it because it's HS256, not RS256.
|
||||
assert response.status_code == 200
|
||||
@@ -231,3 +231,81 @@ async def test_dailyco_recording_uses_multitrack_pipeline(client):
|
||||
{"s3_key": k} for k in track_keys
|
||||
]
|
||||
mock_file_pipeline.delay.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("setup_database")
|
||||
@pytest.mark.asyncio
|
||||
async def test_reprocess_error_transcript_passes_force(client):
|
||||
"""When transcript status is 'error', reprocess passes force=True to start fresh workflow."""
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from reflector.db.recordings import Recording, recordings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
|
||||
room = await rooms_controller.add(
|
||||
name="test-room",
|
||||
user_id="test-user",
|
||||
zulip_auto_post=False,
|
||||
zulip_stream="",
|
||||
zulip_topic="",
|
||||
is_locked=False,
|
||||
room_mode="normal",
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic-2nd-participant",
|
||||
is_shared=False,
|
||||
)
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
"",
|
||||
source_kind="room",
|
||||
source_language="en",
|
||||
target_language="en",
|
||||
user_id="test-user",
|
||||
share_mode="public",
|
||||
room_id=room.id,
|
||||
)
|
||||
|
||||
track_keys = ["recordings/test-room/track1.webm"]
|
||||
recording = await recordings_controller.create(
|
||||
Recording(
|
||||
bucket_name="daily-bucket",
|
||||
object_key="recordings/test-room",
|
||||
meeting_id="test-meeting",
|
||||
track_keys=track_keys,
|
||||
recorded_at=datetime.now(timezone.utc),
|
||||
)
|
||||
)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript,
|
||||
{
|
||||
"recording_id": recording.id,
|
||||
"status": "error",
|
||||
"workflow_run_id": "old-failed-run",
|
||||
},
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.services.transcript_process.task_is_scheduled_or_active"
|
||||
) as mock_celery,
|
||||
patch(
|
||||
"reflector.services.transcript_process.HatchetClientManager"
|
||||
) as mock_hatchet,
|
||||
patch(
|
||||
"reflector.views.transcripts_process.dispatch_transcript_processing",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_dispatch,
|
||||
):
|
||||
mock_celery.return_value = False
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
mock_hatchet.get_workflow_run_status = AsyncMock(
|
||||
return_value=V1TaskStatus.FAILED
|
||||
)
|
||||
response = await client.post(f"/transcripts/{transcript.id}/process")
|
||||
|
||||
assert response.status_code == 200
|
||||
mock_dispatch.assert_called_once()
|
||||
assert mock_dispatch.call_args.kwargs["force"] is True
|
||||
|
||||
@@ -67,7 +67,7 @@ def appserver_ws_user(setup_database):
|
||||
@pytest.fixture(autouse=True)
|
||||
def patch_jwt_verification(monkeypatch):
|
||||
"""Patch JWT verification to accept HS256 tokens signed with SECRET_KEY for tests."""
|
||||
from jose import jwt
|
||||
import jwt
|
||||
|
||||
from reflector.settings import settings
|
||||
|
||||
@@ -84,7 +84,7 @@ def _make_dummy_jwt(sub: str = "user123") -> str:
|
||||
# Create a short HS256 JWT using the app secret to pass verification in tests
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from jose import jwt
|
||||
import jwt
|
||||
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
3939
server/uv.lock
generated
3939
server/uv.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -27,5 +27,5 @@ WEBSOCKET_URL=ws://127.0.0.1:1250
|
||||
AUTH_CALLBACK_URL=http://localhost:3000/auth-callback
|
||||
|
||||
# Sentry
|
||||
# SENTRY_DSN=https://your-dsn@sentry.io/project-id
|
||||
# NEXT_PUBLIC_SENTRY_DSN=https://your-dsn@sentry.io/project-id
|
||||
# SENTRY_IGNORE_API_RESOLUTION_ERROR=1
|
||||
|
||||
@@ -50,4 +50,4 @@ AUTHENTIK_REFRESH_TOKEN_URL=
|
||||
# =======================================================
|
||||
# Sentry (Optional)
|
||||
# =======================================================
|
||||
# SENTRY_DSN=
|
||||
# NEXT_PUBLIC_SENTRY_DSN=
|
||||
|
||||
1
www/.gitignore
vendored
1
www/.gitignore
vendored
@@ -46,3 +46,4 @@ openapi-ts-error-*.log
|
||||
|
||||
# pnpm
|
||||
.pnpm-store
|
||||
/v10
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
|
||||
import { faClose } from "@fortawesome/free-solid-svg-icons";
|
||||
import type { JSX } from "react";
|
||||
import { MouseEventHandler } from "react";
|
||||
|
||||
type ModalProps = {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import { Box, Stack, Link, Heading } from "@chakra-ui/react";
|
||||
import NextLink from "next/link";
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
"use client";
|
||||
|
||||
import React, { useState } from "react";
|
||||
import {
|
||||
Box,
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import { Container, Flex, Link } from "@chakra-ui/react";
|
||||
import { featureEnabled } from "../lib/features";
|
||||
import { Container, Flex } from "@chakra-ui/react";
|
||||
import NextLink from "next/link";
|
||||
import Image from "next/image";
|
||||
import UserInfo from "../(auth)/userInfo";
|
||||
import AuthWrapper from "./AuthWrapper";
|
||||
import { RECORD_A_MEETING_URL } from "../api/urls";
|
||||
import MainNav from "../components/MainNav";
|
||||
|
||||
export default async function AppLayout({
|
||||
children,
|
||||
@@ -30,7 +28,7 @@ export default async function AppLayout({
|
||||
mt="1"
|
||||
>
|
||||
{/* Logo on the left */}
|
||||
<Link as={NextLink} href="/" className="flex">
|
||||
<NextLink href="/" className="flex">
|
||||
<Image
|
||||
src="/reach.svg"
|
||||
width={32}
|
||||
@@ -46,53 +44,8 @@ export default async function AppLayout({
|
||||
Capture the signal, not the noise
|
||||
</p>
|
||||
</div>
|
||||
</Link>
|
||||
<div>
|
||||
{/* Text link on the right */}
|
||||
<Link
|
||||
as={NextLink}
|
||||
href={RECORD_A_MEETING_URL}
|
||||
className="font-light px-2"
|
||||
>
|
||||
Create
|
||||
</Link>
|
||||
{featureEnabled("browse") ? (
|
||||
<>
|
||||
·
|
||||
<Link href="/browse" as={NextLink} className="font-light px-2">
|
||||
Browse
|
||||
</Link>
|
||||
</>
|
||||
) : (
|
||||
<></>
|
||||
)}
|
||||
{featureEnabled("rooms") ? (
|
||||
<>
|
||||
·
|
||||
<Link href="/rooms" as={NextLink} className="font-light px-2">
|
||||
Rooms
|
||||
</Link>
|
||||
</>
|
||||
) : (
|
||||
<></>
|
||||
)}
|
||||
{featureEnabled("requireLogin") ? (
|
||||
<>
|
||||
·
|
||||
<Link
|
||||
href="/settings/api-keys"
|
||||
as={NextLink}
|
||||
className="font-light px-2"
|
||||
>
|
||||
Settings
|
||||
</Link>
|
||||
·
|
||||
<UserInfo />
|
||||
</>
|
||||
) : (
|
||||
<></>
|
||||
)}
|
||||
</div>
|
||||
</NextLink>
|
||||
<MainNav />
|
||||
</Flex>
|
||||
|
||||
<AuthWrapper>{children}</AuthWrapper>
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
"use client";
|
||||
import React, { useEffect, useState } from "react";
|
||||
import useAudioDevice from "../useAudioDevice";
|
||||
import "react-select-search/style.css";
|
||||
import "../../../styles/form.scss";
|
||||
import About from "../../../(aboutAndPrivacy)/about";
|
||||
import Privacy from "../../../(aboutAndPrivacy)/privacy";
|
||||
import { useRouter } from "next/navigation";
|
||||
import useCreateTranscript from "../createTranscript";
|
||||
import SelectSearch from "react-select-search";
|
||||
import { supportedLanguages } from "../../../supportedLanguages";
|
||||
import {
|
||||
Flex,
|
||||
@@ -21,6 +19,7 @@ import {
|
||||
} from "@chakra-ui/react";
|
||||
import { useAuth } from "../../../lib/AuthProvider";
|
||||
import { featureEnabled } from "../../../lib/features";
|
||||
import { SearchableLanguageSelect } from "../../../components/SearchableLanguageSelect";
|
||||
|
||||
const TranscriptCreate = () => {
|
||||
const router = useRouter();
|
||||
@@ -147,31 +146,27 @@ const TranscriptCreate = () => {
|
||||
p={8}
|
||||
flexDir="column"
|
||||
my={4}
|
||||
className="form-on-primary"
|
||||
>
|
||||
<Heading size="xl" mb={4}>
|
||||
Try Reflector
|
||||
</Heading>
|
||||
<Box mb={4}>
|
||||
<Text>Recording name</Text>
|
||||
<div className="select-search-container">
|
||||
<input
|
||||
className="select-search-input"
|
||||
type="text"
|
||||
onChange={nameChange}
|
||||
placeholder="Optional"
|
||||
/>
|
||||
</div>
|
||||
<Text mb={1}>Recording name</Text>
|
||||
<input
|
||||
className="form-field-input"
|
||||
type="text"
|
||||
onChange={nameChange}
|
||||
placeholder="Optional"
|
||||
/>
|
||||
</Box>
|
||||
<Box mb={4}>
|
||||
<Text>Do you want to enable live translation?</Text>
|
||||
<SelectSearch
|
||||
search
|
||||
<Text mb={1}>Do you want to enable live translation?</Text>
|
||||
<SearchableLanguageSelect
|
||||
options={supportedLanguages}
|
||||
value={targetLanguage}
|
||||
onChange={onLanguageChange}
|
||||
onBlur={() => {}}
|
||||
onFocus={() => {}}
|
||||
placeholder="Choose your language"
|
||||
placeholder="No translation"
|
||||
/>
|
||||
</Box>
|
||||
{!loading ? (
|
||||
|
||||
@@ -78,10 +78,10 @@ const useMp3 = (transcriptId: string, waiting?: boolean): Mp3Response => {
|
||||
|
||||
// Audio is not deleted, proceed to load it
|
||||
audioElement = document.createElement("audio");
|
||||
const audioUrl = `${API_URL}/v1/transcripts/${transcriptId}/audio/mp3`;
|
||||
audioElement.src = accessTokenInfo
|
||||
? `${audioUrl}?token=${encodeURIComponent(accessTokenInfo)}`
|
||||
: audioUrl;
|
||||
const audioUrl = accessTokenInfo
|
||||
? `${API_URL}/v1/transcripts/${transcriptId}/audio/mp3?token=${encodeURIComponent(accessTokenInfo)}`
|
||||
: `${API_URL}/v1/transcripts/${transcriptId}/audio/mp3`;
|
||||
audioElement.src = audioUrl;
|
||||
audioElement.crossOrigin = "anonymous";
|
||||
audioElement.preload = "auto";
|
||||
|
||||
|
||||
@@ -346,9 +346,10 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
|
||||
let intentionalClose = false;
|
||||
|
||||
const connect = () => {
|
||||
const subprotocols = auth.accessToken
|
||||
? ["bearer", auth.accessToken]
|
||||
: undefined;
|
||||
const subprotocols =
|
||||
auth.status === "authenticated" && auth.accessToken
|
||||
? ["bearer", auth.accessToken]
|
||||
: undefined;
|
||||
ws = new WebSocket(url, subprotocols);
|
||||
|
||||
ws.onopen = () => {
|
||||
|
||||
@@ -28,7 +28,7 @@ function WherebyConsentDialogButton({
|
||||
meetingId: MeetingId;
|
||||
recordingType: Meeting["recording_type"];
|
||||
skipConsent: boolean;
|
||||
wherebyRef: React.RefObject<HTMLElement>;
|
||||
wherebyRef: React.RefObject<HTMLElement | null>;
|
||||
}) {
|
||||
const previousFocusRef = useRef<HTMLElement | null>(null);
|
||||
|
||||
|
||||
@@ -49,8 +49,8 @@ export type RoomDetails = {
|
||||
|
||||
// stages: we focus on the consent, then whereby steals focus, then we focus on the consent again, then return focus to whoever stole it initially
|
||||
const useConsentWherebyFocusManagement = (
|
||||
acceptButtonRef: RefObject<HTMLButtonElement>,
|
||||
wherebyRef: RefObject<HTMLElement>,
|
||||
acceptButtonRef: RefObject<HTMLButtonElement | null>,
|
||||
wherebyRef: RefObject<HTMLElement | null>,
|
||||
) => {
|
||||
const currentFocusRef = useRef<HTMLElement | null>(null);
|
||||
useEffect(() => {
|
||||
@@ -87,7 +87,7 @@ const useConsentWherebyFocusManagement = (
|
||||
|
||||
const useConsentDialog = (
|
||||
meetingId: MeetingId,
|
||||
wherebyRef: RefObject<HTMLElement> /*accessibility*/,
|
||||
wherebyRef: RefObject<HTMLElement | null> /*accessibility*/,
|
||||
) => {
|
||||
const { state: consentState, touch, hasAnswered } = useRecordingConsent();
|
||||
// toast would open duplicates, even with using "id=" prop
|
||||
@@ -220,7 +220,7 @@ function ConsentDialogButton({
|
||||
wherebyRef,
|
||||
}: {
|
||||
meetingId: MeetingId;
|
||||
wherebyRef: React.RefObject<HTMLElement>;
|
||||
wherebyRef: React.RefObject<HTMLElement | null>;
|
||||
}) {
|
||||
const { showConsentModal, consentState, hasAnswered, consentLoading } =
|
||||
useConsentDialog(meetingId, wherebyRef);
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
import NextAuth from "next-auth";
|
||||
import { authOptions } from "../../../lib/authBackend";
|
||||
|
||||
const handler = NextAuth(authOptions());
|
||||
export const dynamic = "force-dynamic";
|
||||
|
||||
export { handler as GET, handler as POST };
|
||||
// authOptions() is deferred to request time to avoid calling getNextEnvVar
|
||||
// during Turbopack's build-phase module evaluation (Next.js 16+)
|
||||
export function GET(req: Request, ctx: any) {
|
||||
return NextAuth(authOptions())(req as any, ctx);
|
||||
}
|
||||
|
||||
export function POST(req: Request, ctx: any) {
|
||||
return NextAuth(authOptions())(req as any, ctx);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
export const dynamic = "force-dynamic";
|
||||
|
||||
export async function GET() {
|
||||
const health = {
|
||||
status: "healthy",
|
||||
|
||||
46
www/app/components/MainNav.tsx
Normal file
46
www/app/components/MainNav.tsx
Normal file
@@ -0,0 +1,46 @@
|
||||
import NextLink from "next/link";
|
||||
import { featureEnabled } from "../lib/features";
|
||||
import UserInfo from "../(auth)/userInfo";
|
||||
import { RECORD_A_MEETING_URL } from "../api/urls";
|
||||
|
||||
function NavLink({
|
||||
href,
|
||||
children,
|
||||
}: {
|
||||
href: string;
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
return (
|
||||
<NextLink href={href} className="font-light px-10">
|
||||
{children}
|
||||
</NextLink>
|
||||
);
|
||||
}
|
||||
|
||||
export default function MainNav() {
|
||||
return (
|
||||
<nav>
|
||||
<NavLink href={RECORD_A_MEETING_URL}>Create</NavLink>
|
||||
{featureEnabled("browse") && (
|
||||
<>
|
||||
·
|
||||
<NavLink href="/browse">Browse</NavLink>
|
||||
</>
|
||||
)}
|
||||
{featureEnabled("rooms") && (
|
||||
<>
|
||||
·
|
||||
<NavLink href="/rooms">Rooms</NavLink>
|
||||
</>
|
||||
)}
|
||||
{featureEnabled("requireLogin") && (
|
||||
<>
|
||||
·
|
||||
<NavLink href="/settings/api-keys">Settings</NavLink>
|
||||
·
|
||||
<UserInfo />
|
||||
</>
|
||||
)}
|
||||
</nav>
|
||||
);
|
||||
}
|
||||
98
www/app/components/SearchableLanguageSelect.tsx
Normal file
98
www/app/components/SearchableLanguageSelect.tsx
Normal file
@@ -0,0 +1,98 @@
|
||||
"use client";
|
||||
|
||||
import React, { useMemo } from "react";
|
||||
import {
|
||||
Combobox,
|
||||
createListCollection,
|
||||
useComboboxContext,
|
||||
} from "@chakra-ui/react";
|
||||
|
||||
export type LangOption = { value: string | undefined; name: string };
|
||||
|
||||
type Item = { label: string; value: string };
|
||||
|
||||
function FilteredComboboxItems({ items }: { items: Item[] }) {
|
||||
const ctx = useComboboxContext();
|
||||
const inputValue = (ctx as { inputValue?: string }).inputValue ?? "";
|
||||
const filtered = useMemo(() => {
|
||||
const q = inputValue.trim().toLowerCase();
|
||||
if (!q) return items;
|
||||
return items.filter((item) => item.label.toLowerCase().includes(q));
|
||||
}, [items, inputValue]);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Combobox.Empty>No matches</Combobox.Empty>
|
||||
{filtered.map((item) => (
|
||||
<Combobox.Item key={item.value} item={item}>
|
||||
{item.label}
|
||||
</Combobox.Item>
|
||||
))}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
type Props = {
|
||||
options: LangOption[];
|
||||
value: string;
|
||||
onChange: (value: string) => void;
|
||||
placeholder: string;
|
||||
};
|
||||
|
||||
export function SearchableLanguageSelect({
|
||||
options,
|
||||
value,
|
||||
onChange,
|
||||
placeholder,
|
||||
}: Props) {
|
||||
const items = useMemo(() => {
|
||||
const result: Item[] = [];
|
||||
let addedNone = false;
|
||||
for (const opt of options) {
|
||||
const val = opt.value ?? "NOTRANSLATION";
|
||||
if (val === "NOTRANSLATION" || val === "") {
|
||||
if (addedNone) continue;
|
||||
addedNone = true;
|
||||
result.push({ label: "No translation", value: "NOTRANSLATION" });
|
||||
} else {
|
||||
result.push({ label: opt.name, value: val });
|
||||
}
|
||||
}
|
||||
return result.sort((a, b) => {
|
||||
if (a.value === "NOTRANSLATION") return -1;
|
||||
if (b.value === "NOTRANSLATION") return 1;
|
||||
return a.label.localeCompare(b.label);
|
||||
});
|
||||
}, [options]);
|
||||
|
||||
const collection = useMemo(() => createListCollection({ items }), [items]);
|
||||
|
||||
const selectedValues = value && value !== "NOTRANSLATION" ? [value] : [];
|
||||
|
||||
return (
|
||||
<Combobox.Root
|
||||
collection={collection}
|
||||
value={selectedValues}
|
||||
onValueChange={(e) => onChange(e.value[0] ?? "NOTRANSLATION")}
|
||||
openOnClick
|
||||
closeOnSelect
|
||||
selectionBehavior="replace"
|
||||
placeholder={placeholder}
|
||||
className="form-combobox"
|
||||
size="md"
|
||||
positioning={{ strategy: "fixed", hideWhenDetached: true }}
|
||||
>
|
||||
<Combobox.Control>
|
||||
<Combobox.Input />
|
||||
<Combobox.IndicatorGroup>
|
||||
<Combobox.Trigger />
|
||||
</Combobox.IndicatorGroup>
|
||||
</Combobox.Control>
|
||||
<Combobox.Positioner>
|
||||
<Combobox.Content>
|
||||
<FilteredComboboxItems items={items} />
|
||||
</Combobox.Content>
|
||||
</Combobox.Positioner>
|
||||
</Combobox.Root>
|
||||
);
|
||||
}
|
||||
@@ -24,55 +24,63 @@ export const viewport: Viewport = {
|
||||
maximumScale: 1,
|
||||
};
|
||||
|
||||
const SITE_URL = getNextEnvVar("SITE_URL");
|
||||
const env = getClientEnv();
|
||||
|
||||
export const metadata: Metadata = {
|
||||
metadataBase: new URL(SITE_URL),
|
||||
title: {
|
||||
template: "%s – Reflector",
|
||||
default: "Reflector - AI-Powered Meeting Transcriptions by Monadical",
|
||||
},
|
||||
description:
|
||||
"Reflector is an AI-powered tool that transcribes your meetings with unparalleled accuracy, divides content by topics, and provides insightful summaries. Maximize your productivity with Reflector, brought to you by Monadical. Capture the signal, not the noise",
|
||||
applicationName: "Reflector",
|
||||
referrer: "origin-when-cross-origin",
|
||||
keywords: ["Reflector", "Monadical", "AI", "Meetings", "Transcription"],
|
||||
authors: [{ name: "Monadical Team", url: "https://monadical.com/team.html" }],
|
||||
formatDetection: {
|
||||
email: false,
|
||||
address: false,
|
||||
telephone: false,
|
||||
},
|
||||
|
||||
openGraph: {
|
||||
title: "Reflector",
|
||||
export function generateMetadata(): Metadata {
|
||||
const SITE_URL = getNextEnvVar("SITE_URL");
|
||||
return {
|
||||
metadataBase: new URL(SITE_URL),
|
||||
title: {
|
||||
template: "%s – Reflector",
|
||||
default: "Reflector - AI-Powered Meeting Transcriptions by Monadical",
|
||||
},
|
||||
description:
|
||||
"Reflector is an AI-powered tool that transcribes your meetings with unparalleled accuracy, divides content by topics, and provides insightful summaries. Maximize your productivity with Reflector, brought to you by Monadical. Capture the signal, not the noise.",
|
||||
type: "website",
|
||||
},
|
||||
"Reflector is an AI-powered tool that transcribes your meetings with unparalleled accuracy, divides content by topics, and provides insightful summaries. Maximize your productivity with Reflector, brought to you by Monadical. Capture the signal, not the noise",
|
||||
applicationName: "Reflector",
|
||||
referrer: "origin-when-cross-origin",
|
||||
keywords: ["Reflector", "Monadical", "AI", "Meetings", "Transcription"],
|
||||
authors: [
|
||||
{ name: "Monadical Team", url: "https://monadical.com/team.html" },
|
||||
],
|
||||
formatDetection: {
|
||||
email: false,
|
||||
address: false,
|
||||
telephone: false,
|
||||
},
|
||||
|
||||
twitter: {
|
||||
card: "summary_large_image",
|
||||
title: "Reflector",
|
||||
description:
|
||||
"Reflector is an AI-powered tool that transcribes your meetings with unparalleled accuracy, divides content by topics, and provides insightful summaries. Maximize your productivity with Reflector, brought to you by Monadical. Capture the signal, not the noise.",
|
||||
images: ["/r-icon.png"],
|
||||
},
|
||||
openGraph: {
|
||||
title: "Reflector",
|
||||
description:
|
||||
"Reflector is an AI-powered tool that transcribes your meetings with unparalleled accuracy, divides content by topics, and provides insightful summaries. Maximize your productivity with Reflector, brought to you by Monadical. Capture the signal, not the noise.",
|
||||
type: "website",
|
||||
},
|
||||
|
||||
icons: {
|
||||
icon: "/r-icon.png",
|
||||
shortcut: "/r-icon.png",
|
||||
apple: "/r-icon.png",
|
||||
},
|
||||
robots: { index: false, follow: false, noarchive: true, noimageindex: true },
|
||||
};
|
||||
twitter: {
|
||||
card: "summary_large_image",
|
||||
title: "Reflector",
|
||||
description:
|
||||
"Reflector is an AI-powered tool that transcribes your meetings with unparalleled accuracy, divides content by topics, and provides insightful summaries. Maximize your productivity with Reflector, brought to you by Monadical. Capture the signal, not the noise.",
|
||||
images: ["/r-icon.png"],
|
||||
},
|
||||
|
||||
icons: {
|
||||
icon: "/r-icon.png",
|
||||
shortcut: "/r-icon.png",
|
||||
apple: "/r-icon.png",
|
||||
},
|
||||
robots: {
|
||||
index: false,
|
||||
follow: false,
|
||||
noarchive: true,
|
||||
noimageindex: true,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export default async function RootLayout({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
const env = getClientEnv();
|
||||
return (
|
||||
<html lang="en" className={poppins.className} suppressHydrationWarning>
|
||||
<body
|
||||
|
||||
88
www/app/lib/__tests__/authBackend.test.ts
Normal file
88
www/app/lib/__tests__/authBackend.test.ts
Normal file
@@ -0,0 +1,88 @@
|
||||
// env vars must be set before any module imports
|
||||
process.env.AUTHENTIK_REFRESH_TOKEN_URL =
|
||||
"https://authentik.example.com/application/o/token/";
|
||||
process.env.AUTHENTIK_ISSUER =
|
||||
"https://authentik.example.com/application/o/reflector/";
|
||||
process.env.AUTHENTIK_CLIENT_ID = "test-client-id";
|
||||
process.env.AUTHENTIK_CLIENT_SECRET = "test-client-secret";
|
||||
process.env.SERVER_API_URL = "http://localhost:1250";
|
||||
process.env.FEATURE_REQUIRE_LOGIN = "true";
|
||||
// must NOT be "credentials" so authOptions() returns the Authentik path
|
||||
delete process.env.AUTH_PROVIDER;
|
||||
|
||||
jest.mock("../next", () => ({ isBuildPhase: false }));
|
||||
|
||||
jest.mock("../features", () => ({
|
||||
featureEnabled: (name: string) => name === "requireLogin",
|
||||
}));
|
||||
|
||||
jest.mock("../redisClient", () => ({
|
||||
tokenCacheRedis: {},
|
||||
redlock: {
|
||||
using: jest.fn((_keys: string[], _ttl: number, fn: () => unknown) => fn()),
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock("../redisTokenCache", () => ({
|
||||
getTokenCache: jest.fn().mockResolvedValue(null),
|
||||
setTokenCache: jest.fn().mockResolvedValue(undefined),
|
||||
deleteTokenCache: jest.fn().mockResolvedValue(undefined),
|
||||
}));
|
||||
|
||||
const mockFetch = jest.fn();
|
||||
global.fetch = mockFetch;
|
||||
|
||||
import { authOptions } from "../authBackend";
|
||||
|
||||
describe("Authentik token refresh", () => {
|
||||
beforeEach(() => {
|
||||
mockFetch.mockReset();
|
||||
});
|
||||
|
||||
test("refresh request preserves trailing slash in token URL", async () => {
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: "new-access-token",
|
||||
expires_in: 300,
|
||||
refresh_token: "new-refresh-token",
|
||||
}),
|
||||
});
|
||||
|
||||
const options = authOptions();
|
||||
const jwtCallback = options.callbacks!.jwt!;
|
||||
|
||||
// Simulate a returning user whose access token has expired (no account/user = not initial login)
|
||||
const expiredToken = {
|
||||
sub: "test-user-123",
|
||||
accessToken: "expired-access-token",
|
||||
accessTokenExpires: Date.now() - 60_000,
|
||||
refreshToken: "old-refresh-token",
|
||||
};
|
||||
|
||||
await jwtCallback({
|
||||
token: expiredToken,
|
||||
user: undefined as any,
|
||||
account: null,
|
||||
profile: undefined,
|
||||
trigger: "update",
|
||||
isNewUser: false,
|
||||
session: undefined,
|
||||
});
|
||||
|
||||
// The refresh POST must go to the exact URL from the env var (trailing slash included)
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"https://authentik.example.com/application/o/token/",
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
body: expect.any(String),
|
||||
}),
|
||||
);
|
||||
|
||||
const body = new URLSearchParams(mockFetch.mock.calls[0][1].body);
|
||||
expect(body.get("grant_type")).toBe("refresh_token");
|
||||
expect(body.get("refresh_token")).toBe("old-refresh-token");
|
||||
expect(body.get("client_id")).toBe("test-client-id");
|
||||
expect(body.get("client_secret")).toBe("test-client-secret");
|
||||
});
|
||||
});
|
||||
@@ -53,7 +53,7 @@ const TOKEN_CACHE_TTL = REFRESH_ACCESS_TOKEN_BEFORE;
|
||||
const getAuthentikClientId = () => getNextEnvVar("AUTHENTIK_CLIENT_ID");
|
||||
const getAuthentikClientSecret = () => getNextEnvVar("AUTHENTIK_CLIENT_SECRET");
|
||||
const getAuthentikRefreshTokenUrl = () =>
|
||||
getNextEnvVar("AUTHENTIK_REFRESH_TOKEN_URL").replace(/\/+$/, "");
|
||||
getNextEnvVar("AUTHENTIK_REFRESH_TOKEN_URL");
|
||||
|
||||
const getAuthentikIssuer = () => {
|
||||
const stringUrl = getNextEnvVar("AUTHENTIK_ISSUER");
|
||||
@@ -62,7 +62,7 @@ const getAuthentikIssuer = () => {
|
||||
} catch (e) {
|
||||
throw new Error("AUTHENTIK_ISSUER is not a valid URL: " + stringUrl);
|
||||
}
|
||||
return stringUrl.replace(/\/+$/, "");
|
||||
return stringUrl;
|
||||
};
|
||||
|
||||
export const authOptions = (): AuthOptions => {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user