mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2026-03-22 07:06:47 +00:00
Compare commits
17 Commits
v0.36.0
...
feat/dag-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1eeb651f6 | ||
|
|
499de45fdb | ||
|
|
b4ccbe6928 | ||
|
|
38f100a83e | ||
|
|
faec509a33 | ||
|
|
4d9f5fa4b4 | ||
|
|
455cb3d099 | ||
|
|
2410688559 | ||
|
|
6dd96bfa5e | ||
|
|
0acaa0de93 | ||
|
|
c45d3182ee | ||
|
|
0c06cdd117 | ||
|
|
ebae9124b6 | ||
|
|
a6a5d35e44 | ||
|
|
025e6da539 | ||
|
|
4b79b0c989 | ||
|
|
a359c845ff |
6
.github/workflows/test_server.yml
vendored
6
.github/workflows/test_server.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
uv run -m pytest -v tests
|
||||
|
||||
docker-amd64:
|
||||
runs-on: [linux-amd64]
|
||||
runs-on: linux-amd64
|
||||
concurrency:
|
||||
group: docker-amd64-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -52,14 +52,12 @@ jobs:
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
|
||||
docker-arm64:
|
||||
runs-on: [linux-arm64]
|
||||
runs-on: linux-arm64
|
||||
concurrency:
|
||||
group: docker-arm64-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Wait for Docker daemon
|
||||
run: while ! docker version; do sleep 1; done
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Build ARM64
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -23,5 +23,3 @@ www/.env.production
|
||||
docs/pnpm-lock.yaml
|
||||
.secrets
|
||||
opencode.json
|
||||
|
||||
vibedocs/
|
||||
|
||||
@@ -6,7 +6,7 @@ repos:
|
||||
- id: format
|
||||
name: run format
|
||||
language: system
|
||||
entry: bash -c 'if [ -f "$HOME/.nvm/nvm.sh" ]; then source "$HOME/.nvm/nvm.sh"; fi; cd www && pnpm format'
|
||||
entry: bash -c 'cd www && pnpm format'
|
||||
pass_filenames: false
|
||||
files: ^www/
|
||||
|
||||
|
||||
38
CHANGELOG.md
38
CHANGELOG.md
@@ -1,43 +1,5 @@
|
||||
# Changelog
|
||||
|
||||
## [0.35.1](https://github.com/GreyhavenHQ/reflector/compare/v0.35.0...v0.35.1) (2026-02-25)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* enable sentry on frontend ([#876](https://github.com/GreyhavenHQ/reflector/issues/876)) ([bc6bb63](https://github.com/GreyhavenHQ/reflector/commit/bc6bb63c32dc84be5d3b00388618d53f04f64e35))
|
||||
* switch structured output to tool-call with reflection retry ([#879](https://github.com/GreyhavenHQ/reflector/issues/879)) ([5d54758](https://github.com/GreyhavenHQ/reflector/commit/5d547586ef0f54514d1d65aacca8e57869013a82))
|
||||
|
||||
## [0.35.0](https://github.com/Monadical-SAS/reflector/compare/v0.34.0...v0.35.0) (2026-02-23)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Add Single User authentication to Selfhosted ([#870](https://github.com/Monadical-SAS/reflector/issues/870)) ([c8db373](https://github.com/Monadical-SAS/reflector/commit/c8db37362b6cfd8f772aee8857de2909f283c029))
|
||||
|
||||
## [0.34.0](https://github.com/Monadical-SAS/reflector/compare/v0.33.0...v0.34.0) (2026-02-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add Caddy reverse proxy with auto HTTPS for LAN access and auto-derive WebSocket URL ([#863](https://github.com/Monadical-SAS/reflector/issues/863)) ([7f2a401](https://github.com/Monadical-SAS/reflector/commit/7f2a4013cbb3d3ee3e76885f28d73331dcaf325c))
|
||||
* add change_seq to transcripts for ingestion support ([#868](https://github.com/Monadical-SAS/reflector/issues/868)) ([d4cc6be](https://github.com/Monadical-SAS/reflector/commit/d4cc6be1fed56ea7fba06acb8d50c9de43b26b07))
|
||||
* local llm support + standalone-script doc/draft ([#856](https://github.com/Monadical-SAS/reflector/issues/856)) ([b468427](https://github.com/Monadical-SAS/reflector/commit/b468427f1bb12634f5840990e9d64b2c145d7c1a))
|
||||
* remove network_mode host for standalone WebRTC ([#864](https://github.com/Monadical-SAS/reflector/issues/864)) ([9dbf155](https://github.com/Monadical-SAS/reflector/commit/9dbf155be4de7c059035a75f90c7bf0845344b74))
|
||||
* standalone frontend uses production build instead of dev server ([#862](https://github.com/Monadical-SAS/reflector/issues/862)) ([5bca925](https://github.com/Monadical-SAS/reflector/commit/5bca92510a5c33f8baeeaac2c346fb1978366ac8))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* auto-rebuild standalone images and blank Hatchet vars ([3d13e5d](https://github.com/Monadical-SAS/reflector/commit/3d13e5d42fc53ce3c005841265ed1e8735a61518))
|
||||
* check compose version output, not just exit code ([e57c618](https://github.com/Monadical-SAS/reflector/commit/e57c6186f92d66e4525786e56b018c08cf792d2f))
|
||||
* check for Docker BuildKit (buildx) before building images ([14a8b58](https://github.com/Monadical-SAS/reflector/commit/14a8b5808e5aed860e55aaed35a0fdf8b2f4afa3))
|
||||
* check for Docker Compose plugin before running standalone setup ([36a8dae](https://github.com/Monadical-SAS/reflector/commit/36a8daee61c2b7a0937fd0914d51fb4ea8212ae7))
|
||||
* live flow real-time updates during processing ([#861](https://github.com/Monadical-SAS/reflector/issues/861)) ([972a52d](https://github.com/Monadical-SAS/reflector/commit/972a52d22f989f9e2c6f52362b3f1a4e17773663))
|
||||
* remove max_tokens cap to support thinking models (Kimi-K2.5) ([#869](https://github.com/Monadical-SAS/reflector/issues/869)) ([527a069](https://github.com/Monadical-SAS/reflector/commit/527a069ba9eff6717ccd4bb1e839674edebffceb))
|
||||
* standalone on ubuntu ([#865](https://github.com/Monadical-SAS/reflector/issues/865)) ([a8ad237](https://github.com/Monadical-SAS/reflector/commit/a8ad237d8571d5ef5c78fb4427c538592d6a7b43))
|
||||
* standalone server networking and setup diagnostics ([695f3c4](https://github.com/Monadical-SAS/reflector/commit/695f3c49285254869f6a6cbd5f860d1169fa4daa))
|
||||
|
||||
## [0.33.0](https://github.com/Monadical-SAS/reflector/compare/v0.32.2...v0.33.0) (2026-02-05)
|
||||
|
||||
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
# Reflector self-hosted production — HTTPS via Caddy reverse proxy
|
||||
# Copy to Caddyfile: cp Caddyfile.selfhosted.example Caddyfile
|
||||
# Run: ./scripts/setup-selfhosted.sh --ollama-gpu --garage --caddy
|
||||
#
|
||||
# DOMAIN defaults to localhost (self-signed cert).
|
||||
# Set to your real domain for automatic Let's Encrypt:
|
||||
# export DOMAIN=reflector.example.com
|
||||
#
|
||||
# TLS_MODE defaults to "internal" (self-signed).
|
||||
# Set to "" for automatic Let's Encrypt (requires real domain + ports 80/443 open):
|
||||
# export TLS_MODE=""
|
||||
|
||||
{$DOMAIN:localhost} {
|
||||
tls {$TLS_MODE:internal}
|
||||
|
||||
handle /v1/* {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
handle /health {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
handle {
|
||||
reverse_proxy web:3000
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
# Reflector standalone — HTTPS via Caddy (droplet / IP access)
|
||||
# Copy to Caddyfile: cp Caddyfile.standalone.example Caddyfile
|
||||
# Run: docker compose -f docker-compose.standalone.yml --profile ollama-cpu up -d
|
||||
#
|
||||
# :443 = catch-all inside container; Docker maps host port 3043 → container 443
|
||||
# on_demand = generate self-signed cert for IP/SNI on first request (required for bare IP access)
|
||||
# Browser will warn. Click Advanced → Proceed.
|
||||
# Access at https://localhost:3043 (or https://YOUR_IP:3043 on droplet)
|
||||
# Update www/.env.local with: API_URL=https://YOUR_IP:3043, WEBSOCKET_URL=wss://YOUR_IP:3043, SITE_URL=https://YOUR_IP:3043, NEXTAUTH_URL=https://YOUR_IP:3043
|
||||
|
||||
:443 {
|
||||
tls internal {
|
||||
on_demand
|
||||
}
|
||||
handle /v1/* {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
handle /health {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
handle {
|
||||
reverse_proxy web:3000
|
||||
}
|
||||
}
|
||||
|
||||
# Option B: localhost (comment Option A, uncomment this)
|
||||
# app.localhost {
|
||||
# tls internal
|
||||
# reverse_proxy web:3000
|
||||
# }
|
||||
# api.localhost {
|
||||
# tls internal
|
||||
# reverse_proxy server:1250
|
||||
# }
|
||||
|
||||
# Option C: Real domain (uncomment and replace example.com)
|
||||
# app.example.com {
|
||||
# reverse_proxy web:3000
|
||||
# }
|
||||
# api.example.com {
|
||||
# reverse_proxy server:1250
|
||||
# }
|
||||
208
README.md
208
README.md
@@ -34,8 +34,6 @@ Reflector is an AI-powered audio transcription and meeting analysis platform tha
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
<p align="center" style="font-size: 1.5em; font-weight: bold;">By <a href="https://greyhaven.co">Greyhaven</a></p>
|
||||
|
||||
## What is Reflector?
|
||||
|
||||
Reflector is a web application that utilizes local models to process audio content, providing:
|
||||
@@ -46,100 +44,22 @@ Reflector is a web application that utilizes local models to process audio conte
|
||||
- **Topic Detection & Summarization**: Extract key topics and generate concise summaries using LLMs
|
||||
- **Meeting Recording**: Create permanent records of meetings with searchable transcripts
|
||||
|
||||
## Architecture
|
||||
Currently we provide [modal.com](https://modal.com/) gpu template to deploy.
|
||||
|
||||
The project consists of three primary components:
|
||||
## Background
|
||||
|
||||
- **Back-End**: Python FastAPI server with async database operations and background processing, found in `server/`.
|
||||
- **Front-End**: Next.js 14 React application with Chakra UI, located in `www/`.
|
||||
- **GPU Models**: Specialized ML models for transcription, diarization, translation, and summarization.
|
||||
The project architecture consists of three primary components:
|
||||
|
||||
Currently, Reflector supports two input methods:
|
||||
- **Screenshare capture**: Real-time audio capture from your browser via WebRTC
|
||||
- **Audio file upload**: Upload pre-recorded audio files for processing
|
||||
- **Back-End**: Python server that offers an API and data persistence, found in `server/`.
|
||||
- **Front-End**: NextJS React project hosted on Vercel, located in `www/`.
|
||||
- **GPU implementation**: Providing services such as speech-to-text transcription, topic generation, automated summaries, and translations.
|
||||
|
||||
## Installation
|
||||
It also uses authentik for authentication if activated.
|
||||
|
||||
For full deployment instructions, see the [Self-Hosted Production Guide](docsv2/selfhosted-production.md) and the [Architecture Reference](docsv2/selfhosted-architecture.md).
|
||||
## Contribution Guidelines
|
||||
|
||||
### Self-Hosted Deployment
|
||||
|
||||
The self-hosted setup script configures and launches everything on a single server:
|
||||
|
||||
```bash
|
||||
# GPU with local Ollama LLM, local S3 storage, and Caddy reverse proxy
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
|
||||
# With a custom domain (enables Let's Encrypt auto-HTTPS)
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --domain reflector.example.com
|
||||
|
||||
# CPU-only mode (slower, no NVIDIA GPU required)
|
||||
./scripts/setup-selfhosted.sh --cpu --ollama-cpu --garage --caddy
|
||||
|
||||
# With password authentication
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --password mysecretpass
|
||||
```
|
||||
|
||||
The script is idempotent and safe to re-run. See `./scripts/setup-selfhosted.sh --help` for all options.
|
||||
|
||||
### Authentication
|
||||
|
||||
Reflector supports three authentication modes:
|
||||
|
||||
- **Password authentication (recommended for self-hosted / single-user)**: Use the `--password` flag in the setup script. This creates an `admin@localhost` user with the provided password. Users must log in to create, edit, or delete transcripts.
|
||||
|
||||
```bash
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --password mysecretpass
|
||||
```
|
||||
|
||||
- **Authentik OIDC**: For multi-user or enterprise deployments, Reflector supports [Authentik](https://goauthentik.io/) as an OAuth/OIDC provider. This enables SSO, LDAP/AD integration, and centralized user management. Requires configuring `AUTH_BACKEND=jwt` on the backend and `AUTH_PROVIDER=authentik` on the frontend. See the [Self-Hosted Production Guide](docsv2/selfhosted-production.md) for details.
|
||||
|
||||
- **Public mode (default when no auth is configured)**: If neither password nor Authentik is set up, Reflector runs in public mode. In this mode, no login is required — anyone with access to the URL can use the application. Transcripts are created anonymously (not tied to any user account), which means they **cannot be edited or deleted** through the UI or API. Anonymous transcripts are automatically cleaned up after 7 days. This mode is suitable for demos or testing but not recommended for production use.
|
||||
|
||||
### Development Setup
|
||||
|
||||
```bash
|
||||
# Backend
|
||||
cd server
|
||||
uv sync
|
||||
docker compose up -d redis
|
||||
uv run alembic upgrade head
|
||||
uv run -m reflector.app --reload
|
||||
|
||||
# In a separate terminal — start the worker
|
||||
cd server
|
||||
uv run celery -A reflector.worker.app worker --loglevel=info
|
||||
|
||||
# Frontend
|
||||
cd www
|
||||
pnpm install
|
||||
cp .env_template .env
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
### Modal.com GPU (Optional)
|
||||
|
||||
Reflector also supports deploying specialized models (transcription, diarization) to [Modal.com](https://modal.com/) for serverless GPU processing. This is **not integrated into the self-hosted setup script** and must be configured manually.
|
||||
|
||||
See [Modal.com Setup Guide](docs/docs/installation/modal-setup.md) for deployment instructions.
|
||||
|
||||
## Audio Processing Commands
|
||||
|
||||
### Process a local audio file
|
||||
|
||||
```bash
|
||||
cd server
|
||||
uv run python -m reflector.tools.process path/to/audio.wav
|
||||
```
|
||||
|
||||
### Reprocess an existing transcription
|
||||
|
||||
Re-run the processing pipeline on a previously uploaded transcription by its UUID:
|
||||
|
||||
```bash
|
||||
cd server
|
||||
uv run -m reflector.tools.process_transcript <transcript-uuid> --sync
|
||||
```
|
||||
All new contributions should be made in a separate branch, and goes through a Pull Request.
|
||||
[Conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) must be used for the PR title and commits.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -167,9 +87,96 @@ Note: We currently do not have instructions for Windows users.
|
||||
- Then goto `System Preferences -> Sound` and choose the devices created from the Output and Input tabs.
|
||||
- The input from your local microphone, the browser run meeting should be aggregated into one virtual stream to listen to and the output should be fed back to your specified output devices if everything is configured properly.
|
||||
|
||||
## Installation
|
||||
|
||||
*Note: we're working toward better installation, theses instructions are not accurate for now*
|
||||
|
||||
### Frontend
|
||||
|
||||
Start with `cd www`.
|
||||
|
||||
**Installation**
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
Then, fill in the environment variables in `.env` as needed. If you are unsure on how to proceed, ask in Zulip.
|
||||
|
||||
**Run in development mode**
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
Then (after completing server setup and starting it) open [http://localhost:3000](http://localhost:3000) to view it in the browser.
|
||||
|
||||
**OpenAPI Code Generation**
|
||||
|
||||
To generate the TypeScript files from the openapi.json file, make sure the python server is running, then run:
|
||||
|
||||
```bash
|
||||
pnpm openapi
|
||||
```
|
||||
|
||||
### Backend
|
||||
|
||||
Start with `cd server`.
|
||||
|
||||
**Run in development mode**
|
||||
|
||||
```bash
|
||||
docker compose up -d redis
|
||||
|
||||
# on the first run, or if the schemas changed
|
||||
uv run alembic upgrade head
|
||||
|
||||
# start the worker
|
||||
uv run celery -A reflector.worker.app worker --loglevel=info
|
||||
|
||||
# start the app
|
||||
uv run -m reflector.app --reload
|
||||
```
|
||||
|
||||
Then fill `.env` with the omitted values (ask in Zulip).
|
||||
|
||||
**Crontab (optional)**
|
||||
|
||||
For crontab (only healthcheck for now), start the celery beat (you don't need it on your local dev environment):
|
||||
|
||||
```bash
|
||||
uv run celery -A reflector.worker.app beat
|
||||
```
|
||||
|
||||
### GPU models
|
||||
|
||||
Currently, reflector heavily use custom local models, deployed on modal. All the micro services are available in server/gpu/
|
||||
|
||||
To deploy llm changes to modal, you need:
|
||||
- a modal account
|
||||
- set up the required secret in your modal account (REFLECTOR_GPU_APIKEY)
|
||||
- install the modal cli
|
||||
- connect your modal cli to your account if not done previously
|
||||
- `modal run path/to/required/llm`
|
||||
|
||||
## Using local files
|
||||
|
||||
You can manually process an audio file by calling the process tool:
|
||||
|
||||
```bash
|
||||
uv run python -m reflector.tools.process path/to/audio.wav
|
||||
```
|
||||
|
||||
## Reprocessing any transcription
|
||||
|
||||
```bash
|
||||
uv run -m reflector.tools.process_transcript 81ec38d1-9dd7-43d2-b3f8-51f4d34a07cd --sync
|
||||
```
|
||||
|
||||
## Build-time env variables
|
||||
|
||||
Next.js projects are more used to NEXT_PUBLIC_ prefixed buildtime vars. We don't have those for the reason we need to serve a customizable prebuilt docker container.
|
||||
Next.js projects are more used to NEXT_PUBLIC_ prefixed buildtime vars. We don't have those for the reason we need to serve a ccustomizable prebuild docker container.
|
||||
|
||||
Instead, all the variables are runtime. Variables needed to the frontend are served to the frontend app at initial render.
|
||||
|
||||
@@ -204,22 +211,3 @@ FEATURE_BROWSE=false
|
||||
# Enable Zulip integration
|
||||
FEATURE_SEND_TO_ZULIP=true
|
||||
```
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
All new contributions should be made in a separate branch, and goes through a Pull Request.
|
||||
[Conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) must be used for the PR title and commits.
|
||||
|
||||
## Future Plans
|
||||
|
||||
- **Multi-language support enhancement**: Default language selection per room/user, automatic language detection improvements, multi-language diarization, and RTL language UI support
|
||||
- **Jitsi integration**: Self-hosted video conferencing rooms with no external API keys, full control over video infrastructure, and enhanced privacy
|
||||
- **Calendar integration**: Google Calendar and Microsoft Outlook synchronization, automatic meeting room creation, and post-meeting transcript delivery
|
||||
- **Enhanced analytics**: Meeting insights dashboard, speaker participation metrics, topic trends over time, and team collaboration patterns
|
||||
- **Advanced AI features**: Real-time sentiment analysis, emotion detection, meeting quality scores, and automated coaching suggestions
|
||||
- **Integration ecosystem**: Slack/Teams notifications, CRM integration (Salesforce, HubSpot), project management tools (Jira, Asana), and knowledge bases (Notion, Confluence)
|
||||
- **Performance improvements**: WebAssembly for client-side processing, edge computing support, and network optimization
|
||||
|
||||
## Legacy Documentation
|
||||
|
||||
The `docs/` folder contains an older Docusaurus-based documentation site. These docs are **no longer actively maintained** and may be outdated. For current installation and deployment instructions, refer to the [`docsv2/`](docsv2/) folder instead.
|
||||
|
||||
@@ -1,321 +0,0 @@
|
||||
# Self-hosted production Docker Compose — single file for everything.
|
||||
#
|
||||
# Usage: ./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
# or: docker compose -f docker-compose.selfhosted.yml --profile gpu [--profile ollama-gpu] [--profile garage] [--profile caddy] up -d
|
||||
#
|
||||
# Specialized models (pick ONE — required):
|
||||
# --profile gpu NVIDIA GPU for transcription/diarization/translation
|
||||
# --profile cpu CPU-only for transcription/diarization/translation
|
||||
#
|
||||
# Local LLM (optional — for summarization/topics):
|
||||
# --profile ollama-gpu Local Ollama with NVIDIA GPU
|
||||
# --profile ollama-cpu Local Ollama on CPU only
|
||||
#
|
||||
# Other optional services:
|
||||
# --profile garage Local S3-compatible storage (Garage)
|
||||
# --profile caddy Reverse proxy with auto-SSL
|
||||
#
|
||||
# Prerequisites:
|
||||
# 1. Run ./scripts/setup-selfhosted.sh to generate env files and secrets
|
||||
# 2. Or manually create server/.env and www/.env from the .selfhosted.example templates
|
||||
|
||||
services:
|
||||
# ===========================================================
|
||||
# Always-on core services (no profile required)
|
||||
# ===========================================================
|
||||
|
||||
server:
|
||||
build:
|
||||
context: ./server
|
||||
dockerfile: Dockerfile
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:1250:1250"
|
||||
- "50000-50100:50000-50100/udp"
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: server
|
||||
# Docker-internal overrides (always correct inside compose network)
|
||||
DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: ""
|
||||
HATCHET_CLIENT_HOST_PORT: ""
|
||||
# Specialized models via gpu/cpu container (aliased as "transcription")
|
||||
TRANSCRIPT_BACKEND: modal
|
||||
TRANSCRIPT_URL: http://transcription:8000
|
||||
TRANSCRIPT_MODAL_API_KEY: selfhosted
|
||||
DIARIZATION_BACKEND: modal
|
||||
DIARIZATION_URL: http://transcription:8000
|
||||
TRANSLATION_BACKEND: modal
|
||||
TRANSLATE_URL: http://transcription:8000
|
||||
# WebRTC: fixed UDP port range for ICE candidates (mapped above)
|
||||
WEBRTC_PORT_RANGE: "50000-50100"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
|
||||
worker:
|
||||
build:
|
||||
context: ./server
|
||||
dockerfile: Dockerfile
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: worker
|
||||
DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: ""
|
||||
HATCHET_CLIENT_HOST_PORT: ""
|
||||
TRANSCRIPT_BACKEND: modal
|
||||
TRANSCRIPT_URL: http://transcription:8000
|
||||
TRANSCRIPT_MODAL_API_KEY: selfhosted
|
||||
DIARIZATION_BACKEND: modal
|
||||
DIARIZATION_URL: http://transcription:8000
|
||||
TRANSLATION_BACKEND: modal
|
||||
TRANSLATE_URL: http://transcription:8000
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_started
|
||||
volumes:
|
||||
- server_data:/app/data
|
||||
|
||||
beat:
|
||||
build:
|
||||
context: ./server
|
||||
dockerfile: Dockerfile
|
||||
image: monadicalsas/reflector-backend:latest
|
||||
restart: unless-stopped
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: beat
|
||||
DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_started
|
||||
|
||||
web:
|
||||
build:
|
||||
context: ./www
|
||||
dockerfile: Dockerfile
|
||||
image: monadicalsas/reflector-frontend:latest
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:3000:3000"
|
||||
env_file:
|
||||
- ./www/.env
|
||||
environment:
|
||||
NODE_ENV: production
|
||||
NODE_TLS_REJECT_UNAUTHORIZED: "0"
|
||||
SERVER_API_URL: http://server:1250
|
||||
KV_URL: redis://redis:6379
|
||||
KV_USE_TLS: "false"
|
||||
NEXTAUTH_URL_INTERNAL: http://localhost:3000
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
redis:
|
||||
image: redis:7.2-alpine
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 30s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
postgres:
|
||||
image: postgres:17-alpine
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_USER: reflector
|
||||
POSTGRES_PASSWORD: reflector
|
||||
POSTGRES_DB: reflector
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U reflector"]
|
||||
interval: 30s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
|
||||
# ===========================================================
|
||||
# Specialized model containers (transcription, diarization, translation)
|
||||
# Both gpu and cpu get alias "transcription" so server config never changes.
|
||||
# ===========================================================
|
||||
|
||||
gpu:
|
||||
build:
|
||||
context: ./gpu/self_hosted
|
||||
dockerfile: Dockerfile
|
||||
profiles: [gpu]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:8000:8000"
|
||||
environment:
|
||||
HF_TOKEN: ${HF_TOKEN:-}
|
||||
volumes:
|
||||
- gpu_cache:/root/.cache
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/docs"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 120s
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- transcription
|
||||
|
||||
cpu:
|
||||
build:
|
||||
context: ./gpu/self_hosted
|
||||
dockerfile: Dockerfile.cpu
|
||||
profiles: [cpu]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:8000:8000"
|
||||
environment:
|
||||
HF_TOKEN: ${HF_TOKEN:-}
|
||||
volumes:
|
||||
- gpu_cache:/root/.cache
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/docs"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 120s
|
||||
networks:
|
||||
default:
|
||||
aliases:
|
||||
- transcription
|
||||
|
||||
# ===========================================================
|
||||
# Ollama — local LLM for summarization & topic detection
|
||||
# Only started with --ollama-gpu or --ollama-cpu modes.
|
||||
# ===========================================================
|
||||
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
profiles: [ollama-gpu]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:11435:11435"
|
||||
volumes:
|
||||
- ollama_data:/root/.ollama
|
||||
environment:
|
||||
OLLAMA_HOST: "0.0.0.0:11435"
|
||||
OLLAMA_KEEP_ALIVE: "24h"
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11435/api/tags"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
ollama-cpu:
|
||||
image: ollama/ollama:latest
|
||||
profiles: [ollama-cpu]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:11435:11435"
|
||||
volumes:
|
||||
- ollama_data:/root/.ollama
|
||||
environment:
|
||||
OLLAMA_HOST: "0.0.0.0:11435"
|
||||
OLLAMA_KEEP_ALIVE: "24h" # keep model loaded to avoid reload delays
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11435/api/tags"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# ===========================================================
|
||||
# Garage — local S3-compatible object storage (optional)
|
||||
# ===========================================================
|
||||
|
||||
garage:
|
||||
image: dxflrs/garage:v1.1.0
|
||||
profiles: [garage]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "3900:3900" # S3 API
|
||||
- "3903:3903" # Admin API
|
||||
volumes:
|
||||
- garage_data:/var/lib/garage/data
|
||||
- garage_meta:/var/lib/garage/meta
|
||||
- ./data/garage.toml:/etc/garage.toml:ro
|
||||
healthcheck:
|
||||
test: ["CMD", "/garage", "stats"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 5s
|
||||
|
||||
# ===========================================================
|
||||
# Caddy — reverse proxy with automatic SSL (optional)
|
||||
# Maps 80:80 and 443:443 — only exposed ports in the stack.
|
||||
# ===========================================================
|
||||
|
||||
caddy:
|
||||
image: caddy:2-alpine
|
||||
profiles: [caddy]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
- caddy_data:/data
|
||||
- caddy_config:/config
|
||||
depends_on:
|
||||
- web
|
||||
- server
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
redis_data:
|
||||
server_data:
|
||||
gpu_cache:
|
||||
garage_data:
|
||||
garage_meta:
|
||||
ollama_data:
|
||||
caddy_data:
|
||||
caddy_config:
|
||||
|
||||
networks:
|
||||
default:
|
||||
attachable: true
|
||||
@@ -1,241 +0,0 @@
|
||||
# Self-contained standalone compose for fully local deployment (no external dependencies).
|
||||
# Usage: docker compose -f docker-compose.standalone.yml up -d
|
||||
#
|
||||
# On Linux with NVIDIA GPU, also pass: --profile ollama-gpu
|
||||
# On Linux without GPU: --profile ollama-cpu
|
||||
# On Mac: Ollama runs natively (Metal GPU) — no profile needed, services here unused.
|
||||
|
||||
services:
|
||||
caddy:
|
||||
image: caddy:2-alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "3043:443"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
- caddy_data:/data
|
||||
- caddy_config:/config
|
||||
depends_on:
|
||||
- web
|
||||
- server
|
||||
|
||||
server:
|
||||
build:
|
||||
context: server
|
||||
ports:
|
||||
- "1250:1250"
|
||||
- "50000-50100:50000-50100/udp"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: server
|
||||
# Docker DNS names instead of localhost
|
||||
DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
REDIS_HOST: redis
|
||||
CELERY_BROKER_URL: redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://redis:6379/1
|
||||
# Standalone doesn't run Hatchet
|
||||
HATCHET_CLIENT_SERVER_URL: ""
|
||||
HATCHET_CLIENT_HOST_PORT: ""
|
||||
# Self-hosted transcription/diarization via CPU service
|
||||
TRANSCRIPT_BACKEND: modal
|
||||
TRANSCRIPT_URL: http://cpu:8000
|
||||
TRANSCRIPT_MODAL_API_KEY: local
|
||||
DIARIZATION_BACKEND: modal
|
||||
DIARIZATION_URL: http://cpu:8000
|
||||
# Caddy reverse proxy prefix
|
||||
ROOT_PATH: /server-api
|
||||
# WebRTC: fixed UDP port range for ICE candidates (mapped above).
|
||||
# WEBRTC_HOST is set by setup-standalone.sh in server/.env (LAN IP detection).
|
||||
WEBRTC_PORT_RANGE: "50000-50100"
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_started
|
||||
|
||||
worker:
|
||||
build:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: worker
|
||||
HATCHET_CLIENT_SERVER_URL: ""
|
||||
HATCHET_CLIENT_HOST_PORT: ""
|
||||
TRANSCRIPT_BACKEND: modal
|
||||
TRANSCRIPT_URL: http://cpu:8000
|
||||
TRANSCRIPT_MODAL_API_KEY: local
|
||||
DIARIZATION_BACKEND: modal
|
||||
DIARIZATION_URL: http://cpu:8000
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_started
|
||||
|
||||
beat:
|
||||
build:
|
||||
context: server
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
env_file:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: beat
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_started
|
||||
|
||||
redis:
|
||||
image: redis:7.2
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
postgres:
|
||||
image: postgres:17
|
||||
command: postgres -c 'max_connections=200'
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
POSTGRES_USER: reflector
|
||||
POSTGRES_PASSWORD: reflector
|
||||
POSTGRES_DB: reflector
|
||||
volumes:
|
||||
- ./data/postgres:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -d reflector -U reflector"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 15s
|
||||
|
||||
web:
|
||||
image: reflector-frontend-standalone
|
||||
build:
|
||||
context: ./www
|
||||
ports:
|
||||
- "3000:3000"
|
||||
command: ["node", "server.js"]
|
||||
env_file:
|
||||
- ./www/.env.local
|
||||
environment:
|
||||
NODE_ENV: production
|
||||
# API_URL, WEBSOCKET_URL, SITE_URL, NEXTAUTH_URL from www/.env.local (allows HTTPS)
|
||||
# Server-side URLs (docker-network internal)
|
||||
SERVER_API_URL: http://server:1250
|
||||
KV_URL: redis://redis:6379
|
||||
KV_USE_TLS: "false"
|
||||
# Standalone: no external auth provider
|
||||
FEATURE_REQUIRE_LOGIN: "false"
|
||||
FEATURE_ROOMS: "false"
|
||||
NEXTAUTH_SECRET: standalone-local-secret
|
||||
# Nullify partial auth vars inherited from base env_file
|
||||
AUTHENTIK_ISSUER: ""
|
||||
AUTHENTIK_REFRESH_TOKEN_URL: ""
|
||||
|
||||
garage:
|
||||
image: dxflrs/garage:v1.1.0
|
||||
ports:
|
||||
- "3900:3900" # S3 API
|
||||
- "3903:3903" # Admin API
|
||||
volumes:
|
||||
- garage_data:/var/lib/garage/data
|
||||
- garage_meta:/var/lib/garage/meta
|
||||
- ./data/garage.toml:/etc/garage.toml:ro
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "/garage", "stats"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 5s
|
||||
|
||||
cpu:
|
||||
build:
|
||||
context: ./gpu/self_hosted
|
||||
dockerfile: Dockerfile.cpu
|
||||
ports:
|
||||
- "8100:8000"
|
||||
volumes:
|
||||
- gpu_cache:/root/.cache
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/docs"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 120s
|
||||
|
||||
gpu-nvidia:
|
||||
build:
|
||||
context: ./gpu/self_hosted
|
||||
profiles: ["gpu-nvidia"]
|
||||
volumes:
|
||||
- gpu_cache:/root/.cache
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8000/docs"]
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 120s
|
||||
|
||||
ollama:
|
||||
image: ollama/ollama:latest
|
||||
profiles: ["ollama-gpu"]
|
||||
ports:
|
||||
- "11434:11434"
|
||||
volumes:
|
||||
- ollama_data:/root/.ollama
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
ollama-cpu:
|
||||
image: ollama/ollama:latest
|
||||
profiles: ["ollama-cpu"]
|
||||
ports:
|
||||
- "11434:11434"
|
||||
volumes:
|
||||
- ollama_data:/root/.ollama
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
volumes:
|
||||
garage_data:
|
||||
garage_meta:
|
||||
ollama_data:
|
||||
gpu_cache:
|
||||
caddy_data:
|
||||
caddy_config:
|
||||
@@ -2,7 +2,8 @@ services:
|
||||
server:
|
||||
build:
|
||||
context: server
|
||||
network_mode: host
|
||||
ports:
|
||||
- 1250:1250
|
||||
volumes:
|
||||
- ./server/:/app/
|
||||
- /app/.venv
|
||||
@@ -10,12 +11,6 @@ services:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: server
|
||||
DATABASE_URL: postgresql+asyncpg://reflector:reflector@localhost:5432/reflector
|
||||
REDIS_HOST: localhost
|
||||
CELERY_BROKER_URL: redis://localhost:6379/1
|
||||
CELERY_RESULT_BACKEND: redis://localhost:6379/1
|
||||
HATCHET_CLIENT_SERVER_URL: http://localhost:8889
|
||||
HATCHET_CLIENT_HOST_PORT: localhost:7078
|
||||
|
||||
worker:
|
||||
build:
|
||||
@@ -27,11 +22,6 @@ services:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: worker
|
||||
HATCHET_CLIENT_SERVER_URL: http://hatchet:8888
|
||||
HATCHET_CLIENT_HOST_PORT: hatchet:7077
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_started
|
||||
|
||||
beat:
|
||||
build:
|
||||
@@ -43,9 +33,6 @@ services:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: beat
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_started
|
||||
|
||||
hatchet-worker-cpu:
|
||||
build:
|
||||
@@ -57,8 +44,6 @@ services:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: hatchet-worker-cpu
|
||||
HATCHET_CLIENT_SERVER_URL: http://hatchet:8888
|
||||
HATCHET_CLIENT_HOST_PORT: hatchet:7077
|
||||
depends_on:
|
||||
hatchet:
|
||||
condition: service_healthy
|
||||
@@ -72,8 +57,6 @@ services:
|
||||
- ./server/.env
|
||||
environment:
|
||||
ENTRYPOINT: hatchet-worker-llm
|
||||
HATCHET_CLIENT_SERVER_URL: http://hatchet:8888
|
||||
HATCHET_CLIENT_HOST_PORT: hatchet:7077
|
||||
depends_on:
|
||||
hatchet:
|
||||
condition: service_healthy
|
||||
@@ -83,21 +66,19 @@ services:
|
||||
ports:
|
||||
- 6379:6379
|
||||
web:
|
||||
build:
|
||||
context: ./www
|
||||
dockerfile: Dockerfile
|
||||
image: node:22-alpine
|
||||
ports:
|
||||
- "3000:3000"
|
||||
command: sh -c "corepack enable && pnpm install && pnpm dev"
|
||||
restart: unless-stopped
|
||||
working_dir: /app
|
||||
volumes:
|
||||
- ./www:/app/
|
||||
- /app/node_modules
|
||||
env_file:
|
||||
- ./www/.env.local
|
||||
environment:
|
||||
NODE_ENV: development
|
||||
SERVER_API_URL: http://host.docker.internal:1250
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
depends_on:
|
||||
redis:
|
||||
condition: service_started
|
||||
- NODE_ENV=development
|
||||
|
||||
postgres:
|
||||
image: postgres:17
|
||||
@@ -113,14 +94,13 @@ services:
|
||||
- ./server/docker/init-hatchet-db.sql:/docker-entrypoint-initdb.d/init-hatchet-db.sql:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -d reflector -U reflector"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 15s
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 10s
|
||||
|
||||
hatchet:
|
||||
image: ghcr.io/hatchet-dev/hatchet/hatchet-lite:latest
|
||||
restart: on-failure
|
||||
ports:
|
||||
- "8889:8888"
|
||||
- "7078:7077"
|
||||
@@ -128,7 +108,7 @@ services:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: "postgresql://reflector:reflector@postgres:5432/hatchet?sslmode=disable&connect_timeout=30"
|
||||
DATABASE_URL: "postgresql://reflector:reflector@postgres:5432/hatchet?sslmode=disable"
|
||||
SERVER_AUTH_COOKIE_DOMAIN: localhost
|
||||
SERVER_AUTH_COOKIE_INSECURE: "t"
|
||||
SERVER_GRPC_BIND_ADDRESS: "0.0.0.0"
|
||||
@@ -148,5 +128,6 @@ services:
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
|
||||
volumes:
|
||||
next_cache:
|
||||
networks:
|
||||
default:
|
||||
attachable: true
|
||||
|
||||
@@ -1,310 +0,0 @@
|
||||
---
|
||||
sidebar_position: 2
|
||||
title: Standalone Local Setup
|
||||
---
|
||||
|
||||
# Standalone Local Setup
|
||||
|
||||
**The goal**: a clueless user clones the repo, runs one script, and has a working Reflector instance locally. No cloud accounts, no API keys, no manual env file editing.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/monadical-sas/reflector.git
|
||||
cd reflector
|
||||
./scripts/setup-standalone.sh
|
||||
```
|
||||
|
||||
On Ubuntu, the setup script installs Docker automatically if missing.
|
||||
|
||||
The script is idempotent — safe to re-run at any time. It detects what's already set up and skips completed steps.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker with Compose V2 plugin (Docker Desktop, OrbStack, or Docker Engine + compose plugin)
|
||||
- Mac (Apple Silicon) or Linux
|
||||
- 16GB+ RAM (32GB recommended for 14B LLM models)
|
||||
- **Mac only**: [Ollama](https://ollama.com/download) installed (`brew install ollama`)
|
||||
|
||||
### Installing Docker (if not already installed)
|
||||
|
||||
**Ubuntu**: The setup script runs `install-docker-ubuntu.sh` automatically when Docker is missing. Or run it manually:
|
||||
|
||||
```bash
|
||||
./scripts/install-docker-ubuntu.sh
|
||||
```
|
||||
|
||||
**Mac**: Install [Docker Desktop](https://www.docker.com/products/docker-desktop/) or [OrbStack](https://orbstack.dev/).
|
||||
|
||||
## What the script does
|
||||
|
||||
### 1. LLM inference via Ollama
|
||||
|
||||
**Mac**: starts Ollama natively (Metal GPU acceleration). Pulls the LLM model. Docker containers reach it via `host.docker.internal:11435`.
|
||||
|
||||
**Linux**: starts containerized Ollama via `docker-compose.standalone.yml` profile (`ollama-gpu` with NVIDIA, `ollama-cpu` without). Pulls model inside the container.
|
||||
|
||||
### 2. Environment files
|
||||
|
||||
Generates `server/.env` and `www/.env.local` with standalone defaults:
|
||||
|
||||
**`server/.env`** — key settings:
|
||||
|
||||
| Variable | Value | Why |
|
||||
| --------------------- | -------------------------------------------------- | ----------------------------------- |
|
||||
| `DATABASE_URL` | `postgresql+asyncpg://...@postgres:5432/reflector` | Docker-internal hostname |
|
||||
| `REDIS_HOST` | `redis` | Docker-internal hostname |
|
||||
| `CELERY_BROKER_URL` | `redis://redis:6379/1` | Docker-internal hostname |
|
||||
| `AUTH_BACKEND` | `none` | No Authentik in standalone |
|
||||
| `TRANSCRIPT_BACKEND` | `modal` | HTTP API to self-hosted CPU service |
|
||||
| `TRANSCRIPT_URL` | `http://cpu:8000` | Docker-internal CPU service |
|
||||
| `DIARIZATION_BACKEND` | `modal` | HTTP API to self-hosted CPU service |
|
||||
| `DIARIZATION_URL` | `http://cpu:8000` | Docker-internal CPU service |
|
||||
| `TRANSLATION_BACKEND` | `passthrough` | No Modal |
|
||||
| `LLM_URL` | `http://host.docker.internal:11435/v1` (Mac) | Ollama endpoint |
|
||||
|
||||
**`www/.env.local`** — key settings:
|
||||
|
||||
| Variable | Value |
|
||||
| ----------------------- | ------------------------------------------ |
|
||||
| `API_URL` | `https://localhost:3043` or `https://YOUR_IP:3043` (Linux) |
|
||||
| `SERVER_API_URL` | `http://server:1250` |
|
||||
| `WEBSOCKET_URL` | `auto` |
|
||||
| `FEATURE_REQUIRE_LOGIN` | `false` |
|
||||
| `NEXTAUTH_SECRET` | `standalone-dev-secret-not-for-production` |
|
||||
|
||||
If env files already exist (including symlinks from worktree setup), the script resolves symlinks and ensures all standalone-critical vars are set. Existing vars not related to standalone are preserved.
|
||||
|
||||
### 3. Object storage (Garage)
|
||||
|
||||
Standalone uses [Garage](https://garagehq.deuxfleurs.fr/) — a lightweight S3-compatible object store running in Docker. The setup script starts Garage, initializes the layout, creates a bucket and access key, and writes the credentials to `server/.env`.
|
||||
|
||||
**`server/.env`** — storage settings added by the script:
|
||||
|
||||
| Variable | Value | Why |
|
||||
| ------------------------------------------ | -------------------- | ------------------------------------- |
|
||||
| `TRANSCRIPT_STORAGE_BACKEND` | `aws` | Uses the S3-compatible storage driver |
|
||||
| `TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL` | `http://garage:3900` | Docker-internal Garage S3 API |
|
||||
| `TRANSCRIPT_STORAGE_AWS_BUCKET_NAME` | `reflector-media` | Created by the script |
|
||||
| `TRANSCRIPT_STORAGE_AWS_REGION` | `garage` | Must match Garage config |
|
||||
| `TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID` | _(auto-generated)_ | Created by `garage key create` |
|
||||
| `TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY` | _(auto-generated)_ | Created by `garage key create` |
|
||||
|
||||
The `TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL` setting enables S3-compatible backends. When set, the storage driver uses path-style addressing and routes all requests to the custom endpoint. When unset (production AWS), behavior is unchanged.
|
||||
|
||||
Garage config template lives at `scripts/garage.toml`. The setup script generates `data/garage.toml` (gitignored) with a random RPC secret and mounts it read-only into the container. Single-node, `replication_factor=1`.
|
||||
|
||||
> **Note**: Presigned URLs embed the Garage Docker hostname (`http://garage:3900`). This is fine — the server proxies S3 responses to the browser. Modal GPU workers cannot reach internal Garage, but standalone doesn't use Modal.
|
||||
|
||||
### 4. Transcription and diarization
|
||||
|
||||
Standalone runs the self-hosted ML service (`gpu/self_hosted/`) in a CPU-only Docker container named `cpu`. This is the same FastAPI service used for Modal.com GPU deployments, but built with `Dockerfile.cpu` (no NVIDIA CUDA dependencies). The compose service is named `cpu` (not `gpu`) to make clear it runs without GPU acceleration; the source code lives in `gpu/self_hosted/` because it's shared with the GPU deployment.
|
||||
|
||||
The `modal` backend name is reused — it just means "HTTP API client". Setting `TRANSCRIPT_URL` / `DIARIZATION_URL` to `http://cpu:8000` routes requests to the local container instead of Modal.com.
|
||||
|
||||
On first start, the service downloads pyannote speaker diarization models (~1GB) from a public S3 bundle. Models are cached in a Docker volume (`gpu_cache`) so subsequent starts are fast. No HuggingFace token or API key needed.
|
||||
|
||||
> **Performance**: CPU-only transcription and diarization work but are slow (~15 min for a 3 min file). For faster processing on Linux with NVIDIA GPU, use `--profile gpu-nvidia` instead (see `docker-compose.standalone.yml`).
|
||||
|
||||
### 5. Docker services
|
||||
|
||||
```bash
|
||||
docker compose up -d postgres redis garage cpu server worker beat web
|
||||
```
|
||||
|
||||
All services start in a single command. Garage and `cpu` are already started by earlier steps but included for idempotency. No Hatchet in standalone mode — LLM processing (summaries, topics, titles) runs via Celery tasks.
|
||||
|
||||
### 6. Database migrations
|
||||
|
||||
Run automatically by the `server` container on startup (`runserver.sh` calls `alembic upgrade head`). No manual step needed.
|
||||
|
||||
### 7. Health check
|
||||
|
||||
Verifies:
|
||||
|
||||
- CPU service responds (transcription + diarization ready)
|
||||
- Server responds at `http://localhost:1250/health`
|
||||
- Frontend serves at `http://localhost:3000` (or via Caddy at `https://localhost:3043`)
|
||||
- LLM endpoint reachable from inside containers
|
||||
|
||||
## Services
|
||||
|
||||
| Service | Port | Purpose |
|
||||
| ---------- | ---------- | -------------------------------------------------- |
|
||||
| `caddy` | 3043 | Reverse proxy (HTTPS, self-signed cert) |
|
||||
| `server` | 1250 | FastAPI backend (runs migrations on start) |
|
||||
| `web` | 3000 | Next.js frontend |
|
||||
| `postgres` | 5432 | PostgreSQL database |
|
||||
| `redis` | 6379 | Cache + Celery broker |
|
||||
| `garage` | 3900, 3903 | S3-compatible object storage (S3 API + admin API) |
|
||||
| `cpu` | — | Self-hosted transcription + diarization (CPU-only) |
|
||||
| `worker` | — | Celery worker (live pipeline post-processing) |
|
||||
| `beat` | — | Celery beat (scheduled tasks) |
|
||||
|
||||
## Testing programmatically
|
||||
|
||||
After the setup script completes, verify the full pipeline (upload, transcription, diarization, LLM summary) via the API:
|
||||
|
||||
```bash
|
||||
# 1. Create a transcript
|
||||
TRANSCRIPT_ID=$(curl -s -X POST 'http://localhost:1250/v1/transcripts' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"name":"test-upload"}' | python3 -c "import sys,json; print(json.load(sys.stdin)['id'])")
|
||||
echo "Created: $TRANSCRIPT_ID"
|
||||
|
||||
# 2. Upload an audio file (single-chunk upload)
|
||||
curl -s "http://localhost:1250/v1/transcripts/${TRANSCRIPT_ID}/record/upload?chunk_number=0&total_chunks=1" \
|
||||
-X POST -F "chunk=@/path/to/audio.mp3"
|
||||
|
||||
# 3. Poll until processing completes (status: ended or error)
|
||||
while true; do
|
||||
STATUS=$(curl -s "http://localhost:1250/v1/transcripts/${TRANSCRIPT_ID}" \
|
||||
| python3 -c "import sys,json; print(json.load(sys.stdin)['status'])")
|
||||
echo "Status: $STATUS"
|
||||
case "$STATUS" in ended|error) break;; esac
|
||||
sleep 10
|
||||
done
|
||||
|
||||
# 4. Check the result
|
||||
curl -s "http://localhost:1250/v1/transcripts/${TRANSCRIPT_ID}" | python3 -m json.tool
|
||||
```
|
||||
|
||||
Expected result: status `ended`, auto-generated `title`, `short_summary`, `long_summary`, and `transcript` text with `Speaker 0` / `Speaker 1` labels.
|
||||
|
||||
CPU-only processing is slow (~15 min for a 3 min audio file). Diarization finishes in ~3 min, transcription takes the rest.
|
||||
|
||||
## Enabling HTTPS (droplet via IP)
|
||||
|
||||
To serve Reflector over HTTPS on a droplet accessed by IP (self-signed certificate):
|
||||
|
||||
1. **Copy the Caddyfile** (no edits needed — `:443` catches all HTTPS inside container, mapped to host port 3043):
|
||||
```bash
|
||||
cp Caddyfile.standalone.example Caddyfile
|
||||
```
|
||||
|
||||
2. **Update `www/.env.local`** with HTTPS URLs (port 3043):
|
||||
```env
|
||||
API_URL=https://YOUR_IP:3043
|
||||
WEBSOCKET_URL=wss://YOUR_IP:3043
|
||||
SITE_URL=https://YOUR_IP:3043
|
||||
NEXTAUTH_URL=https://YOUR_IP:3043
|
||||
```
|
||||
|
||||
3. **Restart services**:
|
||||
```bash
|
||||
docker compose -f docker-compose.standalone.yml --profile ollama-cpu up -d
|
||||
```
|
||||
(Use `ollama-gpu` instead of `ollama-cpu` if you have an NVIDIA GPU.)
|
||||
|
||||
4. **Access** at `https://YOUR_IP:3043`. The browser will warn about the self-signed cert — click **Advanced** → **Proceed to YOUR_IP (unsafe)**. All traffic (page, API, WebSocket) uses the same origin, so accepting once is enough.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### ERR_SSL_PROTOCOL_ERROR when accessing https://YOUR_IP
|
||||
|
||||
You do **not** need a domain — the setup works with an IP address. This error usually means Caddy isn't serving TLS on port 3043. Check in order:
|
||||
|
||||
1. **Caddyfile** — must use the `:443` catch-all (container-internal; Docker maps host 3043 → container 443):
|
||||
```bash
|
||||
cp Caddyfile.standalone.example Caddyfile
|
||||
```
|
||||
|
||||
2. **Firewall** — allow port 3043 (common on DigitalOcean):
|
||||
```bash
|
||||
sudo ufw allow 3043
|
||||
sudo ufw status
|
||||
```
|
||||
|
||||
3. **Caddy running** — verify and restart:
|
||||
```bash
|
||||
docker compose -f docker-compose.standalone.yml ps
|
||||
docker compose -f docker-compose.standalone.yml logs caddy --tail 20
|
||||
docker compose -f docker-compose.standalone.yml --profile ollama-cpu up -d
|
||||
```
|
||||
|
||||
4. **Test from the droplet** — if this works, the issue is external (firewall, network):
|
||||
```bash
|
||||
curl -vk https://localhost:3043
|
||||
```
|
||||
|
||||
5. **localhost works but external IP fails** — Re-run the setup script; it generates a Caddyfile with your droplet IP explicitly, so Caddy provisions the cert at startup:
|
||||
```bash
|
||||
./scripts/setup-standalone.sh
|
||||
```
|
||||
Or manually create `Caddyfile` with your IP (replace 138.197.162.116):
|
||||
```
|
||||
https://138.197.162.116, localhost {
|
||||
tls internal
|
||||
handle /v1/* { reverse_proxy server:1250 }
|
||||
handle /health { reverse_proxy server:1250 }
|
||||
handle { reverse_proxy web:3000 }
|
||||
}
|
||||
```
|
||||
Then restart: `docker compose -f docker-compose.standalone.yml --profile ollama-cpu up -d`
|
||||
|
||||
6. **Still failing?** Try HTTP (no TLS) — create `Caddyfile`:
|
||||
```
|
||||
:80 {
|
||||
handle /v1/* { reverse_proxy server:1250 }
|
||||
handle /health { reverse_proxy server:1250 }
|
||||
handle { reverse_proxy web:3000 }
|
||||
}
|
||||
```
|
||||
Update `www/.env.local`: `API_URL=http://YOUR_IP:3043`, `WEBSOCKET_URL=ws://YOUR_IP:3043`, `SITE_URL=http://YOUR_IP:3043`, `NEXTAUTH_URL=http://YOUR_IP:3043`. Restart, then access `http://YOUR_IP:3043`.
|
||||
|
||||
### Docker not ready
|
||||
|
||||
If setup fails with "Docker not ready", on Ubuntu run `./scripts/install-docker-ubuntu.sh`. If Docker is installed but you're not root, run `newgrp docker` then run the setup script again.
|
||||
|
||||
### Port conflicts (most common issue)
|
||||
|
||||
If the frontend or backend behaves unexpectedly (e.g., env vars seem ignored, changes don't take effect), **check for port conflicts first**:
|
||||
|
||||
```bash
|
||||
# Check what's listening on key ports
|
||||
lsof -i :3000 # frontend
|
||||
lsof -i :1250 # backend
|
||||
lsof -i :5432 # postgres
|
||||
lsof -i :3900 # Garage S3 API
|
||||
lsof -i :6379 # Redis
|
||||
|
||||
# Kill stale processes on a port
|
||||
lsof -ti :3000 | xargs kill
|
||||
```
|
||||
|
||||
Common causes:
|
||||
|
||||
- A stale `next dev` or `pnpm dev` process from another terminal/worktree
|
||||
- Another Docker Compose project (different worktree) with containers on the same ports — the setup script only manages its own project; containers from other projects must be stopped manually (`docker ps` to find them, `docker stop` to kill them)
|
||||
|
||||
The setup script checks ports 3000, 1250, 5432, 6379, 3900, 3903 for conflicts before starting services. It ignores OrbStack/Docker Desktop port forwarding processes (which always bind these ports but are not real conflicts).
|
||||
|
||||
### OrbStack false port-conflict warnings (Mac)
|
||||
|
||||
If you use OrbStack as your Docker runtime, `lsof` will show OrbStack binding ports like 3000, 1250, etc. even when no containers are running. This is OrbStack's port forwarding mechanism — not a real conflict. The setup script filters these out automatically.
|
||||
|
||||
### Re-enabling authentication
|
||||
|
||||
Standalone runs without authentication (`FEATURE_REQUIRE_LOGIN=false`, `AUTH_BACKEND=none`). To re-enable:
|
||||
|
||||
1. In `www/.env.local`: set `FEATURE_REQUIRE_LOGIN=true`, uncomment `AUTHENTIK_ISSUER` and `AUTHENTIK_REFRESH_TOKEN_URL`
|
||||
2. In `server/.env`: set `AUTH_BACKEND=authentik` (or your backend), configure `AUTH_JWT_AUDIENCE`
|
||||
3. Restart: `docker compose -f docker-compose.standalone.yml up -d --force-recreate web server`
|
||||
|
||||
## What's NOT covered
|
||||
|
||||
These require external accounts and infrastructure that can't be scripted:
|
||||
|
||||
- **Live meeting rooms** — requires Daily.co account, S3 bucket, IAM roles
|
||||
- **Authentication** — requires Authentik deployment and OAuth configuration
|
||||
- **Hatchet workflows** — requires separate Hatchet setup for multitrack processing
|
||||
- **Production deployment** — see [Deployment Guide](./overview)
|
||||
|
||||
## Current status
|
||||
|
||||
All steps implemented. The setup script handles everything end-to-end:
|
||||
|
||||
- Step 1 (Ollama/LLM) — implemented
|
||||
- Step 2 (environment files) — implemented
|
||||
- Step 3 (object storage / Garage) — implemented
|
||||
- Step 4 (transcription/diarization) — implemented (self-hosted GPU service)
|
||||
- Steps 5-7 (Docker, migrations, health) — implemented
|
||||
- **Unified script**: `scripts/setup-standalone.sh`
|
||||
@@ -1,472 +0,0 @@
|
||||
# How the Self-Hosted Setup Works
|
||||
|
||||
This document explains the internals of the self-hosted deployment: how the setup script orchestrates everything, how the Docker Compose profiles work, how services communicate, and how configuration flows from flags to running containers.
|
||||
|
||||
> For quick-start instructions and flag reference, see [Self-Hosted Production Deployment](selfhosted-production.md).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Overview](#overview)
|
||||
- [The Setup Script Step by Step](#the-setup-script-step-by-step)
|
||||
- [Docker Compose Profile System](#docker-compose-profile-system)
|
||||
- [Service Architecture](#service-architecture)
|
||||
- [Configuration Flow](#configuration-flow)
|
||||
- [Storage Architecture](#storage-architecture)
|
||||
- [SSL/TLS and Reverse Proxy](#ssltls-and-reverse-proxy)
|
||||
- [Build vs Pull Workflow](#build-vs-pull-workflow)
|
||||
- [Background Task Processing](#background-task-processing)
|
||||
- [Network and Port Layout](#network-and-port-layout)
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The self-hosted deployment runs the entire Reflector platform on a single server using Docker Compose. A single bash script (`scripts/setup-selfhosted.sh`) handles all configuration and orchestration. The key design principles are:
|
||||
|
||||
- **One command to deploy** — flags select which features to enable
|
||||
- **Idempotent** — safe to re-run without losing existing configuration
|
||||
- **Profile-based composition** — Docker Compose profiles activate optional services
|
||||
- **No external dependencies required** — with `--garage` and `--ollama-*`, everything runs locally
|
||||
|
||||
## The Setup Script Step by Step
|
||||
|
||||
The script (`scripts/setup-selfhosted.sh`) runs 7 sequential steps. Here's what each one does and why.
|
||||
|
||||
### Step 0: Prerequisites
|
||||
|
||||
Validates the environment before doing anything:
|
||||
|
||||
- **Docker Compose V2** — checks `docker compose version` output (not the legacy `docker-compose`)
|
||||
- **Docker daemon** — verifies `docker info` succeeds
|
||||
- **NVIDIA GPU** — only checked when `--gpu` or `--ollama-gpu` is used; runs `nvidia-smi` to confirm drivers are installed
|
||||
- **Compose file** — verifies `docker-compose.selfhosted.yml` exists at the expected path
|
||||
|
||||
If any check fails, the script exits with a clear error message and remediation steps.
|
||||
|
||||
### Step 1: Generate Secrets
|
||||
|
||||
Creates cryptographic secrets needed by the backend and frontend:
|
||||
|
||||
- **`SECRET_KEY`** — used by the FastAPI server for session signing (64 hex chars via `openssl rand -hex 32`)
|
||||
- **`NEXTAUTH_SECRET`** — used by Next.js NextAuth for JWT signing
|
||||
|
||||
Secrets are only generated if they don't already exist or are still set to the placeholder value `changeme`. This is what makes the script idempotent for secrets.
|
||||
|
||||
If `--password` is passed, this step also generates a PBKDF2-SHA256 password hash from the provided password. The hash is computed using Python's stdlib (`hashlib.pbkdf2_hmac`) with 100,000 iterations and a random 16-byte salt, producing a hash in the format `pbkdf2:sha256:100000$<salt_hex>$<hash_hex>`.
|
||||
|
||||
### Step 2: Generate `server/.env`
|
||||
|
||||
Creates or updates the backend environment file from `server/.env.selfhosted.example`. Sets:
|
||||
|
||||
- **Infrastructure** — PostgreSQL URL, Redis host, Celery broker (all pointing to Docker-internal hostnames)
|
||||
- **Public URLs** — `BASE_URL` and `CORS_ORIGIN` computed from the domain (if `--domain`), IP (if detected on Linux), or `localhost`
|
||||
- **WebRTC** — `WEBRTC_HOST` set to the server's LAN IP so browsers can reach UDP ICE candidates
|
||||
- **Specialized models** — always points to `http://transcription:8000` (the Docker network alias shared by GPU and CPU containers)
|
||||
- **HuggingFace token** — prompts interactively for pyannote model access; writes to root `.env` so Docker Compose can inject it into GPU/CPU containers
|
||||
- **LLM** — if `--ollama-*` is used, configures `LLM_URL` pointing to the Ollama container. Otherwise, warns that the user needs to configure an external LLM
|
||||
- **Public mode** — sets `PUBLIC_MODE=true` so the app is accessible without authentication by default
|
||||
- **Password auth** — if `--password` is passed, sets `AUTH_BACKEND=password`, `PUBLIC_MODE=false`, `ADMIN_EMAIL=admin@localhost`, and `ADMIN_PASSWORD_HASH` (the hash generated in Step 1). The admin user is provisioned in the database on container startup via `runserver.sh`
|
||||
|
||||
The script uses `env_set` for each variable, which either updates an existing line or appends a new one. This means re-running the script updates values in-place without duplicating keys.
|
||||
|
||||
### Step 3: Generate `www/.env`
|
||||
|
||||
Creates or updates the frontend environment file from `www/.env.selfhosted.example`. Sets:
|
||||
|
||||
- **`SITE_URL` / `NEXTAUTH_URL` / `API_URL`** — all set to the same public-facing URL (with `https://` if Caddy is enabled)
|
||||
- **`WEBSOCKET_URL`** — set to `auto`, which tells the frontend to derive the WebSocket URL from the page URL automatically
|
||||
- **`SERVER_API_URL`** — always `http://server:1250` (Docker-internal, used for server-side rendering)
|
||||
- **`KV_URL`** — Redis URL for Next.js caching
|
||||
- **`FEATURE_REQUIRE_LOGIN`** — `false` by default (matches `PUBLIC_MODE=true` on the backend)
|
||||
- **Password auth** — if `--password` is passed, sets `FEATURE_REQUIRE_LOGIN=true` and `AUTH_PROVIDER=credentials`, which tells the frontend to use a local email/password login form instead of Authentik OAuth
|
||||
|
||||
### Step 4: Storage Setup
|
||||
|
||||
Branches based on whether `--garage` was passed:
|
||||
|
||||
**With `--garage` (local S3):**
|
||||
|
||||
1. Generates `data/garage.toml` from a template, injecting a random RPC secret
|
||||
2. Starts only the Garage container (`docker compose --profile garage up -d garage`)
|
||||
3. Waits for the Garage admin API to respond on port 3903
|
||||
4. Assigns the node to a storage layout (1GB capacity, zone `dc1`)
|
||||
5. Creates the `reflector-media` bucket
|
||||
6. Creates an access key named `reflector` and grants it read/write on the bucket
|
||||
7. Writes all S3 credentials (`ENDPOINT_URL`, `BUCKET_NAME`, `REGION`, `ACCESS_KEY_ID`, `SECRET_ACCESS_KEY`) to `server/.env`
|
||||
|
||||
The Garage endpoint is `http://garage:3900` (Docker-internal), and the region is set to `garage` (arbitrary, Garage ignores it). The boto3 client uses path-style addressing when an endpoint URL is configured, which is required for S3-compatible services like Garage.
|
||||
|
||||
**Without `--garage` (external S3):**
|
||||
|
||||
1. Checks `server/.env` for the four required S3 variables
|
||||
2. If any are missing, prompts interactively for each one
|
||||
3. Optionally prompts for an endpoint URL (for MinIO, Backblaze B2, etc.)
|
||||
|
||||
### Step 5: Caddyfile
|
||||
|
||||
Only runs when `--caddy` or `--domain` is used. Generates a Caddy configuration file:
|
||||
|
||||
**With `--domain`:** Creates a named site block (`reflector.example.com { ... }`). Caddy automatically provisions a Let's Encrypt certificate for this domain. Requires DNS pointing to the server and ports 80/443 open.
|
||||
|
||||
**Without `--domain` (IP access):** Creates a catch-all `:443 { tls internal ... }` block. Caddy generates a self-signed certificate. Browsers will show a security warning.
|
||||
|
||||
Both configurations route:
|
||||
- `/v1/*` and `/health` to the backend (`server:1250`)
|
||||
- Everything else to the frontend (`web:3000`)
|
||||
|
||||
### Step 6: Start Services
|
||||
|
||||
1. **Always builds the GPU/CPU model image** — these are never prebuilt because they contain ML model download logic specific to the host's hardware
|
||||
2. **With `--build`:** Also builds backend (server, worker, beat) and frontend (web) images from source
|
||||
3. **Without `--build`:** Pulls prebuilt images from the Docker registry (`monadicalsas/reflector-backend:latest`, `monadicalsas/reflector-frontend:latest`)
|
||||
4. **Starts all services** — `docker compose up -d` with the active profiles
|
||||
5. **Quick sanity check** — after 3 seconds, checks for any containers that exited immediately
|
||||
|
||||
### Step 7: Health Checks
|
||||
|
||||
Waits for each service in order, with generous timeouts:
|
||||
|
||||
| Service | Check | Timeout | Notes |
|
||||
|---------|-------|---------|-------|
|
||||
| GPU/CPU models | `curl http://localhost:8000/docs` | 10 min (120 x 5s) | First start downloads ~1GB of models |
|
||||
| Ollama | `curl http://localhost:11435/api/tags` | 3 min (60 x 3s) | Then pulls the selected model |
|
||||
| Server API | `curl http://localhost:1250/health` | 7.5 min (90 x 5s) | First start runs database migrations |
|
||||
| Frontend | `curl http://localhost:3000` | 1.5 min (30 x 3s) | Next.js build on first start |
|
||||
| Caddy | `curl -k https://localhost` | Quick check | After other services are up |
|
||||
|
||||
If the server container exits during the health check, the script dumps diagnostics (container statuses + logs) before exiting.
|
||||
|
||||
After the Ollama health check passes, the script checks if the selected model is already pulled. If not, it runs `ollama pull <model>` inside the container.
|
||||
|
||||
---
|
||||
|
||||
## Docker Compose Profile System
|
||||
|
||||
The compose file (`docker-compose.selfhosted.yml`) uses Docker Compose profiles to make services optional. Only services whose profiles match the active `--profile` flags are started.
|
||||
|
||||
### Always-on Services (no profile)
|
||||
|
||||
These start regardless of which flags you pass:
|
||||
|
||||
| Service | Role | Image |
|
||||
|---------|------|-------|
|
||||
| `server` | FastAPI backend, API endpoints, WebRTC | `monadicalsas/reflector-backend:latest` |
|
||||
| `worker` | Celery worker for background processing | Same image, `ENTRYPOINT=worker` |
|
||||
| `beat` | Celery beat scheduler for periodic tasks | Same image, `ENTRYPOINT=beat` |
|
||||
| `web` | Next.js frontend | `monadicalsas/reflector-frontend:latest` |
|
||||
| `redis` | Message broker + caching | `redis:7.2-alpine` |
|
||||
| `postgres` | Primary database | `postgres:17-alpine` |
|
||||
|
||||
### Profile-Based Services
|
||||
|
||||
| Profile | Service | Role |
|
||||
|---------|---------|------|
|
||||
| `gpu` | `gpu` | NVIDIA GPU-accelerated transcription/diarization/translation |
|
||||
| `cpu` | `cpu` | CPU-only transcription/diarization/translation |
|
||||
| `ollama-gpu` | `ollama` | Local Ollama LLM with GPU |
|
||||
| `ollama-cpu` | `ollama-cpu` | Local Ollama LLM on CPU |
|
||||
| `garage` | `garage` | Local S3-compatible object storage |
|
||||
| `caddy` | `caddy` | Reverse proxy with SSL |
|
||||
|
||||
### The "transcription" Alias
|
||||
|
||||
Both the `gpu` and `cpu` services define a Docker network alias of `transcription`. This means the backend always connects to `http://transcription:8000` regardless of which profile is active. The alias is defined in the compose file's `networks.default.aliases` section.
|
||||
|
||||
---
|
||||
|
||||
## Service Architecture
|
||||
|
||||
```
|
||||
┌─────────────┐
|
||||
Internet ────────>│ Caddy │ :80/:443 (profile: caddy)
|
||||
└──────┬──────┘
|
||||
│
|
||||
┌────────────┼────────────┐
|
||||
│ │ │
|
||||
v v │
|
||||
┌─────────┐ ┌─────────┐ │
|
||||
│ web │ │ server │ │
|
||||
│ :3000 │ │ :1250 │ │
|
||||
└─────────┘ └────┬────┘ │
|
||||
│ │
|
||||
┌────┴────┐ │
|
||||
│ worker │ │
|
||||
│ beat │ │
|
||||
└────┬────┘ │
|
||||
│ │
|
||||
┌──────────────┼────────────┤
|
||||
│ │ │
|
||||
v v v
|
||||
┌───────────┐ ┌─────────┐ ┌─────────┐
|
||||
│transcription│ │postgres │ │ redis │
|
||||
│ (gpu/cpu) │ │ :5432 │ │ :6379 │
|
||||
│ :8000 │ └─────────┘ └─────────┘
|
||||
└───────────┘
|
||||
│
|
||||
┌─────┴─────┐ ┌─────────┐
|
||||
│ ollama │ │ garage │
|
||||
│(optional) │ │(optional│
|
||||
│ :11435 │ │ S3) │
|
||||
└───────────┘ └─────────┘
|
||||
```
|
||||
|
||||
### How Services Interact
|
||||
|
||||
1. **User request** hits Caddy (if enabled), which routes to `web` (pages) or `server` (API)
|
||||
2. **`web`** renders pages server-side using `SERVER_API_URL=http://server:1250` and client-side using the public `API_URL`
|
||||
3. **`server`** handles API requests, file uploads, WebRTC streaming. Dispatches background work to Celery via Redis
|
||||
4. **`worker`** picks up Celery tasks (transcription pipelines, audio processing). Calls `transcription:8000` for ML inference and uploads results to S3 storage
|
||||
5. **`beat`** schedules periodic tasks (cleanup, webhook retries) by pushing them onto the Celery queue
|
||||
6. **`transcription` (gpu/cpu)** runs Whisper/Parakeet (transcription), Pyannote (diarization), and translation models. Stateless HTTP API
|
||||
7. **`ollama`** provides an OpenAI-compatible API for summarization and topic detection. Called by the worker during post-processing
|
||||
8. **`garage`** provides S3-compatible storage for audio files and processed results. Accessed by the worker via boto3
|
||||
|
||||
---
|
||||
|
||||
## Configuration Flow
|
||||
|
||||
Environment variables flow through multiple layers. Understanding this prevents confusion when debugging:
|
||||
|
||||
```
|
||||
Flags (--gpu, --garage, etc.)
|
||||
│
|
||||
├── setup-selfhosted.sh interprets flags
|
||||
│ │
|
||||
│ ├── Writes server/.env (backend config)
|
||||
│ ├── Writes www/.env (frontend config)
|
||||
│ ├── Writes .env (HF_TOKEN for compose interpolation)
|
||||
│ └── Writes Caddyfile (proxy routes)
|
||||
│
|
||||
└── docker-compose.selfhosted.yml reads:
|
||||
├── env_file: ./server/.env (loaded into server, worker, beat)
|
||||
├── env_file: ./www/.env (loaded into web)
|
||||
├── .env (compose variable interpolation, e.g. ${HF_TOKEN})
|
||||
└── environment: {...} (hardcoded overrides, always win over env_file)
|
||||
```
|
||||
|
||||
### Precedence Rules
|
||||
|
||||
Docker Compose `environment:` keys **always override** `env_file:` values. This is by design — the compose file hardcodes infrastructure values that must be correct inside the Docker network (like `DATABASE_URL=postgresql+asyncpg://...@postgres:5432/...`) regardless of what's in `server/.env`.
|
||||
|
||||
The `server/.env` file is still useful for:
|
||||
- Values not overridden in the compose file (LLM config, storage credentials, auth settings)
|
||||
- Running the server outside Docker during development
|
||||
|
||||
### The Three `.env` Files
|
||||
|
||||
| File | Used By | Contains |
|
||||
|------|---------|----------|
|
||||
| `server/.env` | server, worker, beat | Backend config: database, Redis, S3, LLM, auth, public URLs |
|
||||
| `www/.env` | web | Frontend config: site URL, auth, feature flags |
|
||||
| `.env` (root) | Docker Compose interpolation | Only `HF_TOKEN` — injected into GPU/CPU container env |
|
||||
|
||||
---
|
||||
|
||||
## Storage Architecture
|
||||
|
||||
All audio files and processing results are stored in S3-compatible object storage. The backend uses boto3 (via aioboto3) with automatic path-style addressing when a custom endpoint URL is configured.
|
||||
|
||||
### How Garage Works
|
||||
|
||||
Garage is a lightweight, self-hosted S3-compatible storage engine. In this deployment:
|
||||
|
||||
- Runs as a single-node cluster with 1GB capacity allocation
|
||||
- Listens on port 3900 (S3 API) and 3903 (admin API)
|
||||
- Data persists in Docker volumes (`garage_data`, `garage_meta`)
|
||||
- Accessed by the worker at `http://garage:3900` (Docker-internal)
|
||||
|
||||
The setup script creates:
|
||||
- A bucket called `reflector-media`
|
||||
- An access key called `reflector` with read/write permissions on that bucket
|
||||
|
||||
### Path-Style vs Virtual-Hosted Addressing
|
||||
|
||||
AWS S3 uses virtual-hosted addressing by default (`bucket.s3.amazonaws.com`). S3-compatible services like Garage require path-style addressing (`endpoint/bucket`). The `AwsStorage` class detects this automatically: when `TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL` is set, it configures boto3 with `addressing_style: "path"`.
|
||||
|
||||
---
|
||||
|
||||
## SSL/TLS and Reverse Proxy
|
||||
|
||||
### With `--domain` (Production)
|
||||
|
||||
Caddy automatically obtains and renews a Let's Encrypt certificate. Requirements:
|
||||
- DNS A record pointing to the server
|
||||
- Ports 80 (HTTP challenge) and 443 (HTTPS) open to the internet
|
||||
|
||||
The generated Caddyfile uses the domain as the site address, which triggers Caddy's automatic HTTPS.
|
||||
|
||||
### Without `--domain` (Development/LAN)
|
||||
|
||||
Caddy generates a self-signed certificate and listens on `:443` as a catch-all. Browsers will show a security warning that must be accepted manually.
|
||||
|
||||
### Without `--caddy` (BYO Proxy)
|
||||
|
||||
No ports are exposed to the internet. The services listen on `127.0.0.1` only:
|
||||
- Frontend: `localhost:3000`
|
||||
- Backend API: `localhost:1250`
|
||||
|
||||
You can point your own reverse proxy (nginx, Traefik, etc.) at these ports.
|
||||
|
||||
### WebRTC and UDP
|
||||
|
||||
The server exposes UDP ports 50000-50100 for WebRTC ICE candidates. The `WEBRTC_HOST` variable tells the server which IP to advertise in ICE candidates — this must be the server's actual IP address (not a domain), because WebRTC uses UDP which doesn't go through the HTTP reverse proxy.
|
||||
|
||||
---
|
||||
|
||||
## Build vs Pull Workflow
|
||||
|
||||
### Default (no `--build` flag)
|
||||
|
||||
```
|
||||
GPU/CPU model image: Always built from source (./gpu/self_hosted/)
|
||||
Backend image: Pulled from monadicalsas/reflector-backend:latest
|
||||
Frontend image: Pulled from monadicalsas/reflector-frontend:latest
|
||||
```
|
||||
|
||||
The GPU/CPU image is always built because it contains hardware-specific build steps and ML model download logic.
|
||||
|
||||
### With `--build`
|
||||
|
||||
```
|
||||
GPU/CPU model image: Built from source (./gpu/self_hosted/)
|
||||
Backend image: Built from source (./server/)
|
||||
Frontend image: Built from source (./www/)
|
||||
```
|
||||
|
||||
Use `--build` when:
|
||||
- You've made local code changes
|
||||
- The prebuilt registry images are outdated
|
||||
- You want to verify the build works on your hardware
|
||||
|
||||
### Rebuilding Individual Services
|
||||
|
||||
```bash
|
||||
# Rebuild just the backend
|
||||
docker compose -f docker-compose.selfhosted.yml build server worker beat
|
||||
|
||||
# Rebuild just the frontend
|
||||
docker compose -f docker-compose.selfhosted.yml build web
|
||||
|
||||
# Rebuild the GPU model container
|
||||
docker compose -f docker-compose.selfhosted.yml build gpu
|
||||
|
||||
# Force a clean rebuild (no cache)
|
||||
docker compose -f docker-compose.selfhosted.yml build --no-cache server
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Background Task Processing
|
||||
|
||||
### Celery Architecture
|
||||
|
||||
The backend uses Celery for all background work, with Redis as the message broker:
|
||||
|
||||
- **`worker`** — picks up tasks from the Redis queue and executes them
|
||||
- **`beat`** — schedules periodic tasks (cron-like) by pushing them onto the queue
|
||||
- **`Redis`** — acts as both message broker and result backend
|
||||
|
||||
### The Audio Processing Pipeline
|
||||
|
||||
When a file is uploaded, the worker runs a multi-step pipeline:
|
||||
|
||||
```
|
||||
Upload → Extract Audio → Upload to S3
|
||||
│
|
||||
┌──────┼──────┐
|
||||
│ │ │
|
||||
v v v
|
||||
Transcribe Diarize Waveform
|
||||
│ │ │
|
||||
└──────┼──────┘
|
||||
│
|
||||
Assemble
|
||||
│
|
||||
┌──────┼──────┐
|
||||
v v v
|
||||
Topics Title Summaries
|
||||
│
|
||||
Done
|
||||
```
|
||||
|
||||
Transcription, diarization, and waveform generation run in parallel. After assembly, topic detection, title generation, and summarization also run in parallel. Each step calls the appropriate service (transcription container for ML, Ollama/external LLM for text generation, S3 for storage).
|
||||
|
||||
### Event Loop Management
|
||||
|
||||
Each Celery task runs in its own `asyncio.run()` call, which creates a fresh event loop. The `asynctask` decorator in `server/reflector/asynctask.py` handles:
|
||||
|
||||
1. **Database connections** — resets the connection pool before each task (connections from a previous event loop would cause "Future attached to a different loop" errors)
|
||||
2. **Redis connections** — resets the WebSocket manager singleton so Redis pub/sub reconnects on the current loop
|
||||
3. **Cleanup** — disconnects the database and clears the context variable in the `finally` block
|
||||
|
||||
---
|
||||
|
||||
## Network and Port Layout
|
||||
|
||||
All services communicate over Docker's default bridge network. Only specific ports are exposed to the host:
|
||||
|
||||
| Port | Service | Binding | Purpose |
|
||||
|------|---------|---------|---------|
|
||||
| 80 | Caddy | `0.0.0.0:80` | HTTP (redirect to HTTPS / Let's Encrypt challenge) |
|
||||
| 443 | Caddy | `0.0.0.0:443` | HTTPS (main entry point) |
|
||||
| 1250 | Server | `127.0.0.1:1250` | Backend API (localhost only) |
|
||||
| 3000 | Web | `127.0.0.1:3000` | Frontend (localhost only) |
|
||||
| 3900 | Garage | `0.0.0.0:3900` | S3 API (for admin/debug access) |
|
||||
| 3903 | Garage | `0.0.0.0:3903` | Garage admin API |
|
||||
| 8000 | GPU/CPU | `127.0.0.1:8000` | ML model API (localhost only) |
|
||||
| 11435 | Ollama | `127.0.0.1:11435` | Ollama API (localhost only) |
|
||||
| 50000-50100/udp | Server | `0.0.0.0:50000-50100` | WebRTC ICE candidates |
|
||||
|
||||
Services bound to `127.0.0.1` are only accessible from the host itself (not from the network). Caddy is the only service exposed to the internet on standard HTTP/HTTPS ports.
|
||||
|
||||
### Docker-Internal Hostnames
|
||||
|
||||
Inside the Docker network, services reach each other by their compose service name:
|
||||
|
||||
| Hostname | Resolves To |
|
||||
|----------|-------------|
|
||||
| `server` | Backend API container |
|
||||
| `web` | Frontend container |
|
||||
| `postgres` | PostgreSQL container |
|
||||
| `redis` | Redis container |
|
||||
| `transcription` | GPU or CPU container (network alias) |
|
||||
| `ollama` / `ollama-cpu` | Ollama container |
|
||||
| `garage` | Garage S3 container |
|
||||
|
||||
---
|
||||
|
||||
## Diagnostics and Error Handling
|
||||
|
||||
The setup script includes an `ERR` trap that automatically dumps diagnostics when any command fails:
|
||||
|
||||
1. Lists all container statuses
|
||||
2. Shows the last 30 lines of logs for any stopped/exited containers
|
||||
3. Shows the last 40 lines of the specific failing service
|
||||
|
||||
This means if something goes wrong during setup, you'll see the relevant logs immediately without having to run manual debug commands.
|
||||
|
||||
### Common Debug Commands
|
||||
|
||||
```bash
|
||||
# Overall status
|
||||
docker compose -f docker-compose.selfhosted.yml ps
|
||||
|
||||
# Logs for a specific service
|
||||
docker compose -f docker-compose.selfhosted.yml logs server --tail 50
|
||||
docker compose -f docker-compose.selfhosted.yml logs worker --tail 50
|
||||
|
||||
# Check environment inside a container
|
||||
docker compose -f docker-compose.selfhosted.yml exec server env | grep TRANSCRIPT
|
||||
|
||||
# Health check from inside the network
|
||||
docker compose -f docker-compose.selfhosted.yml exec server curl http://localhost:1250/health
|
||||
|
||||
# Check S3 storage connectivity
|
||||
docker compose -f docker-compose.selfhosted.yml exec server curl http://garage:3900
|
||||
|
||||
# Database access
|
||||
docker compose -f docker-compose.selfhosted.yml exec postgres psql -U reflector -c "SELECT id, status FROM transcript ORDER BY created_at DESC LIMIT 5;"
|
||||
|
||||
# List files in server data directory
|
||||
docker compose -f docker-compose.selfhosted.yml exec server ls -la /app/data/
|
||||
```
|
||||
@@ -1,522 +0,0 @@
|
||||
# Self-Hosted Production Deployment
|
||||
|
||||
Deploy Reflector on a single server with everything running in Docker. Transcription, diarization, and translation use specialized ML models (Whisper/Parakeet, Pyannote); only summarization and topic detection require an LLM.
|
||||
|
||||
> For a detailed walkthrough of how the setup script and infrastructure work under the hood, see [How the Self-Hosted Setup Works](selfhosted-architecture.md).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Hardware
|
||||
- **With GPU**: Linux server with NVIDIA GPU (8GB+ VRAM recommended), 16GB+ RAM, 50GB+ disk
|
||||
- **CPU-only**: 8+ cores, 32GB+ RAM (transcription is slower but works)
|
||||
- Disk space for ML models (~2GB on first run) + audio storage
|
||||
|
||||
### Software
|
||||
- Docker Engine 24+ with Compose V2
|
||||
- NVIDIA drivers + `nvidia-container-toolkit` (GPU modes only)
|
||||
- `curl`, `openssl` (usually pre-installed)
|
||||
|
||||
### Accounts & Credentials (depending on options)
|
||||
|
||||
**Always recommended:**
|
||||
- **HuggingFace token** — For downloading pyannote speaker diarization models. Get one at https://huggingface.co/settings/tokens and accept the model licenses:
|
||||
- https://huggingface.co/pyannote/speaker-diarization-3.1
|
||||
- https://huggingface.co/pyannote/segmentation-3.0
|
||||
- The setup script will prompt for this. If skipped, diarization falls back to a public model bundle (may be less reliable).
|
||||
|
||||
**LLM for summarization & topic detection (pick one):**
|
||||
- **With `--ollama-gpu` or `--ollama-cpu`**: Nothing extra — Ollama runs locally and pulls the model automatically
|
||||
- **Without `--ollama-*`**: An OpenAI-compatible LLM API key and endpoint. Examples:
|
||||
- OpenAI: `LLM_URL=https://api.openai.com/v1`, `LLM_API_KEY=sk-...`, `LLM_MODEL=gpt-4o-mini`
|
||||
- Anthropic, Together, Groq, or any OpenAI-compatible API
|
||||
- A self-managed vLLM or Ollama instance elsewhere on the network
|
||||
|
||||
**Object storage (pick one):**
|
||||
- **With `--garage`**: Nothing extra — Garage (local S3-compatible storage) is auto-configured by the script
|
||||
- **Without `--garage`**: S3-compatible storage credentials. The script will prompt for these, or you can pre-fill `server/.env`. Options include:
|
||||
- **AWS S3**: Access Key ID, Secret Access Key, bucket name, region
|
||||
- **MinIO**: Same credentials + `TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL=http://your-minio:9000`
|
||||
- **Any S3-compatible provider** (Backblaze B2, Cloudflare R2, DigitalOcean Spaces, etc.): same fields + custom endpoint URL
|
||||
|
||||
**Optional add-ons (configure after initial setup):**
|
||||
- **Authentik** (user authentication): Requires an Authentik instance with an OAuth2/OIDC application configured for Reflector. See [Enabling Authentication](#enabling-authentication-authentik) below.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Monadical-SAS/reflector.git
|
||||
cd reflector
|
||||
|
||||
# GPU + local Ollama LLM + local Garage storage + Caddy SSL (with domain):
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --domain reflector.example.com
|
||||
|
||||
# Same but without a domain (self-signed cert, access via IP):
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
|
||||
# CPU-only (same, but slower):
|
||||
./scripts/setup-selfhosted.sh --cpu --ollama-cpu --garage --caddy
|
||||
|
||||
# With password authentication (single admin user):
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --password mysecretpass
|
||||
|
||||
# Build from source instead of pulling prebuilt images:
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --build
|
||||
```
|
||||
|
||||
That's it. The script generates env files, secrets, starts all containers, waits for health checks, and prints the URL.
|
||||
|
||||
## Specialized Models (Required)
|
||||
|
||||
Pick `--gpu` or `--cpu`. This determines how **transcription, diarization, and translation** run:
|
||||
|
||||
| Flag | What it does | Requires |
|
||||
|------|-------------|----------|
|
||||
| `--gpu` | NVIDIA GPU acceleration for ML models | NVIDIA GPU + drivers + `nvidia-container-toolkit` |
|
||||
| `--cpu` | CPU-only (slower but works without GPU) | 8+ cores, 32GB+ RAM recommended |
|
||||
|
||||
## Local LLM (Optional)
|
||||
|
||||
Optionally add `--ollama-gpu` or `--ollama-cpu` for a **local Ollama instance** that handles summarization and topic detection. If omitted, configure an external OpenAI-compatible LLM in `server/.env`.
|
||||
|
||||
| Flag | What it does | Requires |
|
||||
|------|-------------|----------|
|
||||
| `--ollama-gpu` | Local Ollama with NVIDIA GPU acceleration | NVIDIA GPU |
|
||||
| `--ollama-cpu` | Local Ollama on CPU only | Nothing extra |
|
||||
| `--llm-model MODEL` | Choose which Ollama model to download (default: `qwen2.5:14b`) | `--ollama-gpu` or `--ollama-cpu` |
|
||||
| *(omitted)* | User configures external LLM (OpenAI, Anthropic, etc.) | LLM API key |
|
||||
|
||||
### macOS / Apple Silicon
|
||||
|
||||
`--ollama-gpu` requires an NVIDIA GPU and **does not work on macOS**. Docker on macOS cannot access Apple GPU acceleration, so the containerized Ollama will run on CPU only regardless of the flag used.
|
||||
|
||||
For the best performance on Mac, we recommend running Ollama **natively outside Docker** (install from https://ollama.com) — this gives Ollama direct access to Apple Metal GPU acceleration. Then omit `--ollama-gpu`/`--ollama-cpu` from the setup script and point the backend to your local Ollama instance:
|
||||
|
||||
```env
|
||||
# In server/.env
|
||||
LLM_URL=http://host.docker.internal:11434/v1
|
||||
LLM_MODEL=qwen2.5:14b
|
||||
LLM_API_KEY=not-needed
|
||||
```
|
||||
|
||||
`--ollama-cpu` does work on macOS but will be significantly slower than a native Ollama install with Metal acceleration.
|
||||
|
||||
### Choosing an Ollama model
|
||||
|
||||
The default model is `qwen2.5:14b` (~9GB download, good multilingual support and summary quality). Override with `--llm-model`:
|
||||
|
||||
```bash
|
||||
# Default (qwen2.5:14b)
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy
|
||||
|
||||
# Mistral — good balance of speed and quality (~4.1GB)
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --llm-model mistral --garage --caddy
|
||||
|
||||
# Phi-4 — smaller and faster (~9.1GB)
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --llm-model phi4 --garage --caddy
|
||||
|
||||
# Llama 3.3 70B — best quality, needs 48GB+ RAM or GPU VRAM (~43GB)
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --llm-model llama3.3:70b --garage --caddy
|
||||
|
||||
# Gemma 2 9B (~5.4GB)
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --llm-model gemma2 --garage --caddy
|
||||
|
||||
# DeepSeek R1 8B — reasoning model, verbose but thorough summaries (~4.9GB)
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --llm-model deepseek-r1:8b --garage --caddy
|
||||
```
|
||||
|
||||
Browse all available models at https://ollama.com/library.
|
||||
|
||||
### Recommended combinations
|
||||
|
||||
- **`--gpu --ollama-gpu`**: Best for servers with NVIDIA GPU. Fully self-contained, no external API keys needed.
|
||||
- **`--cpu --ollama-cpu`**: No GPU available but want everything self-contained. Slower but works.
|
||||
- **`--gpu --ollama-cpu`**: GPU for transcription, CPU for LLM. Saves GPU VRAM for ML models.
|
||||
- **`--gpu`**: Have NVIDIA GPU but prefer a cloud LLM (faster/better summaries with GPT-4, Claude, etc.).
|
||||
- **`--cpu`**: No GPU, prefer cloud LLM. Slowest transcription but best summary quality.
|
||||
|
||||
## Other Optional Flags
|
||||
|
||||
| Flag | What it does |
|
||||
|------|-------------|
|
||||
| `--garage` | Starts Garage (local S3-compatible storage). Auto-configures bucket, keys, and env vars. |
|
||||
| `--caddy` | Starts Caddy reverse proxy on ports 80/443 with self-signed cert. |
|
||||
| `--domain DOMAIN` | Use a real domain with Let's Encrypt auto-HTTPS (implies `--caddy`). Requires DNS A record pointing to this server and ports 80/443 open. |
|
||||
| `--password PASS` | Enable password authentication with an `admin@localhost` user. Sets `AUTH_BACKEND=password`, `PUBLIC_MODE=false`. See [Enabling Password Authentication](#enabling-password-authentication). |
|
||||
| `--build` | Build backend (server, worker, beat) and frontend (web) Docker images from source instead of pulling prebuilt images from the registry. Useful for development or when running a version with local changes. |
|
||||
|
||||
Without `--garage`, you **must** provide S3-compatible credentials (the script will prompt interactively or you can pre-fill `server/.env`).
|
||||
|
||||
Without `--caddy` or `--domain`, no ports are exposed. Point your own reverse proxy at `web:3000` (frontend) and `server:1250` (API).
|
||||
|
||||
**Using a domain (recommended for production):** Point a DNS A record at your server's IP, then pass `--domain your.domain.com`. Caddy will automatically obtain and renew a Let's Encrypt certificate. Ports 80 and 443 must be open.
|
||||
|
||||
**Without a domain:** `--caddy` alone uses a self-signed certificate. Browsers will show a security warning that must be accepted.
|
||||
|
||||
## What the Script Does
|
||||
|
||||
1. **Prerequisites check** — Docker, NVIDIA GPU (if needed), compose file exists
|
||||
2. **Generate secrets** — `SECRET_KEY`, `NEXTAUTH_SECRET` via `openssl rand`
|
||||
3. **Generate `server/.env`** — From template, sets infrastructure defaults, configures LLM based on mode, enables `PUBLIC_MODE`
|
||||
4. **Generate `www/.env`** — Auto-detects server IP, sets URLs
|
||||
5. **Storage setup** — Either initializes Garage (bucket, keys, permissions) or prompts for external S3 credentials
|
||||
6. **Caddyfile** — Generates domain-specific (Let's Encrypt) or IP-specific (self-signed) configuration
|
||||
7. **Build & start** — Always builds GPU/CPU model image from source. With `--build`, also builds backend and frontend from source; otherwise pulls prebuilt images from the registry
|
||||
8. **Health checks** — Waits for each service, pulls Ollama model if needed, warns about missing LLM config
|
||||
|
||||
> For a deeper dive into each step, see [How the Self-Hosted Setup Works](selfhosted-architecture.md).
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
### Server Environment (`server/.env`)
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `DATABASE_URL` | PostgreSQL connection | Auto-set (Docker internal) |
|
||||
| `REDIS_HOST` | Redis hostname | Auto-set (`redis`) |
|
||||
| `SECRET_KEY` | App secret | Auto-generated |
|
||||
| `AUTH_BACKEND` | Authentication method (`none`, `password`, `jwt`) | `none` |
|
||||
| `PUBLIC_MODE` | Allow unauthenticated access | `true` |
|
||||
| `ADMIN_EMAIL` | Admin email for password auth | *(unset)* |
|
||||
| `ADMIN_PASSWORD_HASH` | PBKDF2 hash for password auth | *(unset)* |
|
||||
| `WEBRTC_HOST` | IP advertised in WebRTC ICE candidates | Auto-detected (server IP) |
|
||||
| `TRANSCRIPT_URL` | Specialized model endpoint | `http://transcription:8000` |
|
||||
| `LLM_URL` | OpenAI-compatible LLM endpoint | Auto-set for Ollama modes |
|
||||
| `LLM_API_KEY` | LLM API key | `not-needed` for Ollama |
|
||||
| `LLM_MODEL` | LLM model name | `qwen2.5:14b` for Ollama (override with `--llm-model`) |
|
||||
| `CELERY_BEAT_POLL_INTERVAL` | Override all worker polling intervals (seconds). `0` = use individual defaults | `300` (selfhosted), `0` (other) |
|
||||
| `TRANSCRIPT_STORAGE_BACKEND` | Storage backend | `aws` |
|
||||
| `TRANSCRIPT_STORAGE_AWS_*` | S3 credentials | Auto-set for Garage |
|
||||
|
||||
### Frontend Environment (`www/.env`)
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `SITE_URL` | Public-facing URL | Auto-detected |
|
||||
| `API_URL` | API URL (browser-side) | Same as SITE_URL |
|
||||
| `SERVER_API_URL` | API URL (server-side) | `http://server:1250` |
|
||||
| `NEXTAUTH_SECRET` | Auth secret | Auto-generated |
|
||||
| `FEATURE_REQUIRE_LOGIN` | Require authentication | `false` |
|
||||
| `AUTH_PROVIDER` | Auth provider (`authentik` or `credentials`) | *(unset)* |
|
||||
|
||||
## Storage Options
|
||||
|
||||
### Garage (Recommended for Self-Hosted)
|
||||
|
||||
Use `--garage` flag. The script automatically:
|
||||
- Generates `data/garage.toml` with a random RPC secret
|
||||
- Starts the Garage container
|
||||
- Creates the `reflector-media` bucket
|
||||
- Creates an access key with read/write permissions
|
||||
- Writes all S3 credentials to `server/.env`
|
||||
|
||||
### External S3 (AWS, MinIO, etc.)
|
||||
|
||||
Don't use `--garage`. The script will prompt for:
|
||||
- Access Key ID
|
||||
- Secret Access Key
|
||||
- Bucket Name
|
||||
- Region
|
||||
- Endpoint URL (for non-AWS like MinIO)
|
||||
|
||||
Or pre-fill in `server/.env`:
|
||||
```env
|
||||
TRANSCRIPT_STORAGE_BACKEND=aws
|
||||
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=your-key
|
||||
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=your-secret
|
||||
TRANSCRIPT_STORAGE_AWS_BUCKET_NAME=reflector-media
|
||||
TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
|
||||
# For non-AWS S3 (MinIO, etc.):
|
||||
TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL=http://minio:9000
|
||||
```
|
||||
|
||||
## What Authentication Enables
|
||||
|
||||
By default, Reflector runs in **public mode** (`AUTH_BACKEND=none`, `PUBLIC_MODE=true`) — anyone can create and view transcripts without logging in. Transcripts are anonymous (not linked to any user) and cannot be edited or deleted after creation.
|
||||
|
||||
Enabling authentication (either password or Authentik) unlocks:
|
||||
|
||||
| Feature | Public mode (no auth) | With authentication |
|
||||
|---------|----------------------|---------------------|
|
||||
| Create transcripts (record/upload) | Yes (anonymous, unowned) | Yes (owned by user) |
|
||||
| View transcripts | All transcripts visible | Own transcripts + shared rooms |
|
||||
| Edit/delete transcripts | No | Yes (owner only) |
|
||||
| Privacy controls (private/semi-private/public) | No (everything public) | Yes (owner can set share mode) |
|
||||
| Speaker reassignment and merging | No | Yes (owner only) |
|
||||
| Participant management (add/edit/delete) | Read-only | Full CRUD (owner only) |
|
||||
| Create rooms | No | Yes |
|
||||
| Edit/delete rooms | No | Yes (owner only) |
|
||||
| Room calendar (ICS) sync | No | Yes (owner only) |
|
||||
| API key management | No | Yes |
|
||||
| Post to Zulip | No | Yes (owner only) |
|
||||
| Real-time WebSocket notifications | No (connection closed) | Yes (transcript create/delete events) |
|
||||
| Meeting host access (Daily.co token) | No | Yes (room owner) |
|
||||
|
||||
In short: public mode is "demo-friendly" — great for trying Reflector out. Authentication adds **ownership, privacy, and management** of your data.
|
||||
|
||||
## Authentication Options
|
||||
|
||||
Reflector supports three authentication backends:
|
||||
|
||||
| Backend | `AUTH_BACKEND` | Use case |
|
||||
|---------|---------------|----------|
|
||||
| `none` | `none` | Public/demo mode, no login required |
|
||||
| `password` | `password` | Single-user self-hosted, simple email/password login |
|
||||
| `jwt` | `jwt` | Multi-user via Authentik (OAuth2/OIDC) |
|
||||
|
||||
## Enabling Password Authentication
|
||||
|
||||
The simplest way to add authentication. Creates a single admin user with email/password login — no external identity provider needed.
|
||||
|
||||
### Quick setup (recommended)
|
||||
|
||||
Pass `--password` to the setup script:
|
||||
|
||||
```bash
|
||||
./scripts/setup-selfhosted.sh --gpu --ollama-gpu --garage --caddy --password mysecretpass
|
||||
```
|
||||
|
||||
This automatically:
|
||||
- Sets `AUTH_BACKEND=password` and `PUBLIC_MODE=false` in `server/.env`
|
||||
- Creates an `admin@localhost` user with the given password
|
||||
- Sets `FEATURE_REQUIRE_LOGIN=true` and `AUTH_PROVIDER=credentials` in `www/.env`
|
||||
- Provisions the admin user in the database on container startup
|
||||
|
||||
### Manual setup
|
||||
|
||||
If you prefer to configure manually or want to change the admin email:
|
||||
|
||||
1. Generate a password hash:
|
||||
```bash
|
||||
cd server
|
||||
uv run python -m reflector.tools.create_admin --hash-only --password yourpassword
|
||||
```
|
||||
|
||||
2. Update `server/.env`:
|
||||
```env
|
||||
AUTH_BACKEND=password
|
||||
PUBLIC_MODE=false
|
||||
ADMIN_EMAIL=admin@yourdomain.com
|
||||
ADMIN_PASSWORD_HASH=pbkdf2:sha256:100000$<salt>$<hash>
|
||||
```
|
||||
|
||||
3. Update `www/.env`:
|
||||
```env
|
||||
FEATURE_REQUIRE_LOGIN=true
|
||||
AUTH_PROVIDER=credentials
|
||||
```
|
||||
|
||||
4. Restart:
|
||||
```bash
|
||||
docker compose -f docker-compose.selfhosted.yml down
|
||||
./scripts/setup-selfhosted.sh <same-flags>
|
||||
```
|
||||
|
||||
### How it works
|
||||
|
||||
- The backend issues HS256 JWTs (signed with `SECRET_KEY`) on successful login via `POST /v1/auth/login`
|
||||
- Tokens expire after 24 hours; the user must log in again after expiry
|
||||
- The frontend shows a login page at `/login` with email and password fields
|
||||
- A rate limiter blocks IPs after 10 failed login attempts within 5 minutes
|
||||
- The admin user is provisioned automatically on container startup from `ADMIN_EMAIL` and `ADMIN_PASSWORD_HASH` environment variables
|
||||
- Passwords are hashed with PBKDF2-SHA256 (100,000 iterations) — no additional dependencies required
|
||||
|
||||
### Changing the admin password
|
||||
|
||||
```bash
|
||||
cd server
|
||||
uv run python -m reflector.tools.create_admin --email admin@localhost --password newpassword
|
||||
```
|
||||
|
||||
Or update `ADMIN_PASSWORD_HASH` in `server/.env` and restart the containers.
|
||||
|
||||
## Enabling Authentication (Authentik)
|
||||
|
||||
For multi-user deployments with SSO. Requires an external Authentik instance.
|
||||
|
||||
By default, authentication is disabled (`AUTH_BACKEND=none`, `FEATURE_REQUIRE_LOGIN=false`). To enable:
|
||||
|
||||
1. Deploy an Authentik instance (see [Authentik docs](https://goauthentik.io/docs/installation))
|
||||
2. Create an OAuth2/OIDC application for Reflector
|
||||
3. Update `server/.env`:
|
||||
```env
|
||||
AUTH_BACKEND=jwt
|
||||
AUTH_JWT_AUDIENCE=your-client-id
|
||||
```
|
||||
4. Update `www/.env`:
|
||||
```env
|
||||
FEATURE_REQUIRE_LOGIN=true
|
||||
AUTH_PROVIDER=authentik
|
||||
AUTHENTIK_ISSUER=https://authentik.example.com/application/o/reflector
|
||||
AUTHENTIK_REFRESH_TOKEN_URL=https://authentik.example.com/application/o/token/
|
||||
AUTHENTIK_CLIENT_ID=your-client-id
|
||||
AUTHENTIK_CLIENT_SECRET=your-client-secret
|
||||
```
|
||||
5. Restart: `docker compose -f docker-compose.selfhosted.yml down && ./scripts/setup-selfhosted.sh <same-flags>`
|
||||
|
||||
## Enabling Real Domain with Let's Encrypt
|
||||
|
||||
By default, Caddy uses self-signed certificates. For a real domain:
|
||||
|
||||
1. Point your domain's DNS to your server's IP
|
||||
2. Ensure ports 80 and 443 are open
|
||||
3. Edit `Caddyfile`:
|
||||
```
|
||||
reflector.example.com {
|
||||
handle /v1/* {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
handle /health {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
handle {
|
||||
reverse_proxy web:3000
|
||||
}
|
||||
}
|
||||
```
|
||||
4. Update `www/.env`:
|
||||
```env
|
||||
SITE_URL=https://reflector.example.com
|
||||
NEXTAUTH_URL=https://reflector.example.com
|
||||
API_URL=https://reflector.example.com
|
||||
```
|
||||
5. Restart Caddy: `docker compose -f docker-compose.selfhosted.yml restart caddy web`
|
||||
|
||||
## Worker Polling Frequency
|
||||
|
||||
The selfhosted setup defaults all background worker polling intervals to **300 seconds (5 minutes)** to reduce CPU and memory usage. This controls how often the beat scheduler triggers tasks like recording discovery, meeting reconciliation, and calendar sync.
|
||||
|
||||
To change the interval, edit `server/.env`:
|
||||
|
||||
```env
|
||||
# Poll every 60 seconds (more responsive, uses more resources)
|
||||
CELERY_BEAT_POLL_INTERVAL=60
|
||||
|
||||
# Poll every 5 minutes (default for selfhosted)
|
||||
CELERY_BEAT_POLL_INTERVAL=300
|
||||
|
||||
# Use individual per-task defaults (production SaaS behavior)
|
||||
CELERY_BEAT_POLL_INTERVAL=0
|
||||
```
|
||||
|
||||
After changing, restart the beat and worker containers:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.selfhosted.yml restart beat worker
|
||||
```
|
||||
|
||||
**Affected tasks when `CELERY_BEAT_POLL_INTERVAL` is set:**
|
||||
|
||||
| Task | Default (no override) | With override |
|
||||
|------|-----------------------|---------------|
|
||||
| SQS message polling | 60s | Override value |
|
||||
| Daily.co recording discovery | 15s (no webhook) / 180s (webhook) | Override value |
|
||||
| Meeting reconciliation | 30s | Override value |
|
||||
| ICS calendar sync | 60s | Override value |
|
||||
| Upcoming meeting creation | 30s | Override value |
|
||||
|
||||
> **Note:** Daily crontab tasks (failed recording reprocessing at 05:00 UTC, public data cleanup at 03:00 UTC) and healthcheck pings (10 min) are **not** affected by this setting.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Check service status
|
||||
```bash
|
||||
docker compose -f docker-compose.selfhosted.yml ps
|
||||
```
|
||||
|
||||
### View logs for a specific service
|
||||
```bash
|
||||
docker compose -f docker-compose.selfhosted.yml logs server --tail 50
|
||||
docker compose -f docker-compose.selfhosted.yml logs gpu --tail 50
|
||||
docker compose -f docker-compose.selfhosted.yml logs web --tail 50
|
||||
```
|
||||
|
||||
### GPU service taking too long
|
||||
First start downloads ~1-2GB of ML models. Check progress:
|
||||
```bash
|
||||
docker compose -f docker-compose.selfhosted.yml logs gpu -f
|
||||
```
|
||||
|
||||
### Server exits immediately
|
||||
Usually a database migration issue. Check:
|
||||
```bash
|
||||
docker compose -f docker-compose.selfhosted.yml logs server --tail 50
|
||||
```
|
||||
|
||||
### Caddy certificate issues
|
||||
For self-signed certs, your browser will warn. Click Advanced > Proceed.
|
||||
For Let's Encrypt, ensure ports 80/443 are open and DNS is pointed correctly.
|
||||
|
||||
### Summaries/topics not generating
|
||||
Check LLM configuration:
|
||||
```bash
|
||||
grep LLM_ server/.env
|
||||
```
|
||||
If you didn't use `--ollama-gpu` or `--ollama-cpu`, you must set `LLM_URL`, `LLM_API_KEY`, and `LLM_MODEL`.
|
||||
|
||||
### Health check from inside containers
|
||||
```bash
|
||||
docker compose -f docker-compose.selfhosted.yml exec server curl http://localhost:1250/health
|
||||
docker compose -f docker-compose.selfhosted.yml exec gpu curl http://localhost:8000/docs
|
||||
```
|
||||
|
||||
## Updating
|
||||
|
||||
```bash
|
||||
# Option A: Pull latest prebuilt images and restart
|
||||
docker compose -f docker-compose.selfhosted.yml down
|
||||
./scripts/setup-selfhosted.sh <same-flags-as-before>
|
||||
|
||||
# Option B: Build from source (after git pull) and restart
|
||||
git pull
|
||||
docker compose -f docker-compose.selfhosted.yml down
|
||||
./scripts/setup-selfhosted.sh <same-flags-as-before> --build
|
||||
|
||||
# Rebuild only the GPU/CPU model image (picks up model updates)
|
||||
docker compose -f docker-compose.selfhosted.yml build gpu # or cpu
|
||||
```
|
||||
|
||||
The setup script is idempotent — it won't overwrite existing secrets or env vars that are already set.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────┐
|
||||
Internet ────────>│ Caddy │ :80/:443
|
||||
└────┬────┘
|
||||
│
|
||||
┌────────────┼────────────┐
|
||||
│ │ │
|
||||
v v │
|
||||
┌─────────┐ ┌─────────┐ │
|
||||
│ web │ │ server │ │
|
||||
│ :3000 │ │ :1250 │ │
|
||||
└─────────┘ └────┬────┘ │
|
||||
│ │
|
||||
┌────┴────┐ │
|
||||
│ worker │ │
|
||||
│ beat │ │
|
||||
└────┬────┘ │
|
||||
│ │
|
||||
┌──────────────┼────────────┤
|
||||
│ │ │
|
||||
v v v
|
||||
┌───────────┐ ┌─────────┐ ┌─────────┐
|
||||
│transcription│ │postgres │ │ redis │
|
||||
│(gpu/cpu) │ │ :5432 │ │ :6379 │
|
||||
│ :8000 │ └─────────┘ └─────────┘
|
||||
└───────────┘
|
||||
│
|
||||
┌─────┴─────┐ ┌─────────┐
|
||||
│ ollama │ │ garage │
|
||||
│ (optional)│ │(optional│
|
||||
│ :11435 │ │ S3) │
|
||||
└───────────┘ └─────────┘
|
||||
```
|
||||
|
||||
All services communicate over Docker's internal network. Only Caddy (if enabled) exposes ports to the internet.
|
||||
|
||||
## Future Plans for the Self-Hosted Script
|
||||
|
||||
The following features are supported by Reflector but are **not yet integrated into the self-hosted setup script** and require manual configuration:
|
||||
|
||||
- **Daily.co live rooms with multitrack processing**: Daily.co enables real-time meeting rooms with automatic recording and per-participant audio tracks for improved diarization. Requires a Daily.co account, API key, and an AWS S3 bucket for recording storage. Currently not automated in the script because the worker orchestration (hatchet) is not yet supported in the selfhosted compose setup.
|
||||
@@ -1,39 +0,0 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
UV_LINK_MODE=copy \
|
||||
UV_NO_CACHE=1
|
||||
|
||||
WORKDIR /tmp
|
||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
apt-get update \
|
||||
&& apt-get install -y \
|
||||
ffmpeg \
|
||||
curl \
|
||||
ca-certificates \
|
||||
gnupg \
|
||||
wget
|
||||
ADD https://astral.sh/uv/install.sh /uv-installer.sh
|
||||
RUN sh /uv-installer.sh && rm /uv-installer.sh
|
||||
ENV PATH="/root/.local/bin/:$PATH"
|
||||
|
||||
RUN mkdir -p /app
|
||||
WORKDIR /app
|
||||
COPY pyproject.toml uv.lock /app/
|
||||
|
||||
|
||||
COPY ./app /app/app
|
||||
COPY ./main.py /app/
|
||||
COPY ./runserver.sh /app/
|
||||
|
||||
# prevent uv failing with too many open files on big cpus
|
||||
ENV UV_CONCURRENT_INSTALLS=16
|
||||
|
||||
# first install
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
uv sync --compile-bytecode --locked
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["sh", "/app/runserver.sh"]
|
||||
@@ -3,14 +3,14 @@ import os
|
||||
from fastapi import Depends, HTTPException, status
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
|
||||
|
||||
|
||||
def apikey_auth(apikey: str | None = Depends(oauth2_scheme)):
|
||||
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
|
||||
required_key = os.environ.get("REFLECTOR_GPU_APIKEY")
|
||||
if not required_key:
|
||||
return
|
||||
if apikey and apikey == required_key:
|
||||
if apikey == required_key:
|
||||
return
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
|
||||
@@ -1,65 +1,10 @@
|
||||
import logging
|
||||
import os
|
||||
import tarfile
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from urllib.request import urlopen
|
||||
|
||||
import torch
|
||||
import torchaudio
|
||||
import yaml
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
S3_BUNDLE_URL = "https://reflector-public.s3.us-east-1.amazonaws.com/pyannote-speaker-diarization-3.1.tar.gz"
|
||||
BUNDLE_CACHE_DIR = Path("/root/.cache/pyannote-bundle")
|
||||
|
||||
|
||||
def _ensure_model(cache_dir: Path) -> str:
|
||||
"""Download and extract S3 model bundle if not cached."""
|
||||
model_dir = cache_dir / "pyannote-speaker-diarization-3.1"
|
||||
config_path = model_dir / "config.yaml"
|
||||
|
||||
if config_path.exists():
|
||||
logger.info("Using cached model bundle at %s", model_dir)
|
||||
return str(model_dir)
|
||||
|
||||
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
tarball_path = cache_dir / "model.tar.gz"
|
||||
|
||||
logger.info("Downloading model bundle from %s", S3_BUNDLE_URL)
|
||||
with urlopen(S3_BUNDLE_URL) as response, open(tarball_path, "wb") as f:
|
||||
while chunk := response.read(8192):
|
||||
f.write(chunk)
|
||||
|
||||
logger.info("Extracting model bundle")
|
||||
with tarfile.open(tarball_path, "r:gz") as tar:
|
||||
tar.extractall(path=cache_dir, filter="data")
|
||||
tarball_path.unlink()
|
||||
|
||||
_patch_config(model_dir, cache_dir)
|
||||
return str(model_dir)
|
||||
|
||||
|
||||
def _patch_config(model_dir: Path, cache_dir: Path) -> None:
|
||||
"""Rewrite config.yaml to reference local pytorch_model.bin paths."""
|
||||
config_path = model_dir / "config.yaml"
|
||||
with open(config_path) as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
config["pipeline"]["params"]["segmentation"] = str(
|
||||
cache_dir / "pyannote-segmentation-3.0" / "pytorch_model.bin"
|
||||
)
|
||||
config["pipeline"]["params"]["embedding"] = str(
|
||||
cache_dir / "pyannote-wespeaker-voxceleb-resnet34-LM" / "pytorch_model.bin"
|
||||
)
|
||||
|
||||
with open(config_path, "w") as f:
|
||||
yaml.dump(config, f)
|
||||
|
||||
logger.info("Patched config.yaml with local model paths")
|
||||
|
||||
|
||||
class PyannoteDiarizationService:
|
||||
def __init__(self):
|
||||
@@ -69,20 +14,10 @@ class PyannoteDiarizationService:
|
||||
|
||||
def load(self):
|
||||
self._device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
hf_token = os.environ.get("HF_TOKEN")
|
||||
|
||||
if hf_token:
|
||||
logger.info("Loading pyannote model from HuggingFace (HF_TOKEN set)")
|
||||
self._pipeline = Pipeline.from_pretrained(
|
||||
"pyannote/speaker-diarization-3.1",
|
||||
use_auth_token=hf_token,
|
||||
)
|
||||
else:
|
||||
logger.info("HF_TOKEN not set — loading model from S3 bundle")
|
||||
model_path = _ensure_model(BUNDLE_CACHE_DIR)
|
||||
config_path = Path(model_path) / "config.yaml"
|
||||
self._pipeline = Pipeline.from_pretrained(str(config_path))
|
||||
|
||||
self._pipeline = Pipeline.from_pretrained(
|
||||
"pyannote/speaker-diarization-3.1",
|
||||
use_auth_token=os.environ.get("HF_TOKEN"),
|
||||
)
|
||||
self._pipeline.to(torch.device(self._device))
|
||||
|
||||
def diarize_file(self, file_path: str, timestamp: float = 0.0) -> dict:
|
||||
|
||||
400
gpu/self_hosted/uv.lock
generated
400
gpu/self_hosted/uv.lock
generated
@@ -1,5 +1,5 @@
|
||||
version = 1
|
||||
revision = 3
|
||||
revision = 2
|
||||
requires-python = ">=3.12"
|
||||
|
||||
[[package]]
|
||||
@@ -13,7 +13,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "aiohttp"
|
||||
version = "3.13.3"
|
||||
version = "3.12.15"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiohappyeyeballs" },
|
||||
@@ -24,76 +24,42 @@ dependencies = [
|
||||
{ name = "propcache" },
|
||||
{ name = "yarl" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/50/42/32cf8e7704ceb4481406eb87161349abb46a57fee3f008ba9cb610968646/aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88", size = 7844556, upload-time = "2026-01-03T17:33:05.204Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/9b/e7/d92a237d8802ca88483906c388f7c201bbe96cd80a165ffd0ac2f6a8d59f/aiohttp-3.12.15.tar.gz", hash = "sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2", size = 7823716, upload-time = "2025-07-29T05:52:32.215Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/be/4fc11f202955a69e0db803a12a062b8379c970c7c84f4882b6da17337cc1/aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c", size = 739732, upload-time = "2026-01-03T17:30:14.23Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/2c/621d5b851f94fa0bb7430d6089b3aa970a9d9b75196bc93bb624b0db237a/aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168", size = 494293, upload-time = "2026-01-03T17:30:15.96Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/43/4be01406b78e1be8320bb8316dc9c42dbab553d281c40364e0f862d5661c/aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d", size = 493533, upload-time = "2026-01-03T17:30:17.431Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/a8/5a35dc56a06a2c90d4742cbf35294396907027f80eea696637945a106f25/aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29", size = 1737839, upload-time = "2026-01-03T17:30:19.422Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/62/4b9eeb331da56530bf2e198a297e5303e1c1ebdceeb00fe9b568a65c5a0c/aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3", size = 1703932, upload-time = "2026-01-03T17:30:21.756Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/f6/af16887b5d419e6a367095994c0b1332d154f647e7dc2bd50e61876e8e3d/aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d", size = 1771906, upload-time = "2026-01-03T17:30:23.932Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/83/397c634b1bcc24292fa1e0c7822800f9f6569e32934bdeef09dae7992dfb/aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463", size = 1871020, upload-time = "2026-01-03T17:30:26Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/86/f6/a62cbbf13f0ac80a70f71b1672feba90fdb21fd7abd8dbf25c0105fb6fa3/aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc", size = 1755181, upload-time = "2026-01-03T17:30:27.554Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/87/20a35ad487efdd3fba93d5843efdfaa62d2f1479eaafa7453398a44faf13/aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf", size = 1561794, upload-time = "2026-01-03T17:30:29.254Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/95/8fd69a66682012f6716e1bc09ef8a1a2a91922c5725cb904689f112309c4/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033", size = 1697900, upload-time = "2026-01-03T17:30:31.033Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/66/7b94b3b5ba70e955ff597672dad1691333080e37f50280178967aff68657/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f", size = 1728239, upload-time = "2026-01-03T17:30:32.703Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/71/6f72f77f9f7d74719692ab65a2a0252584bf8d5f301e2ecb4c0da734530a/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679", size = 1740527, upload-time = "2026-01-03T17:30:34.695Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/b4/75ec16cbbd5c01bdaf4a05b19e103e78d7ce1ef7c80867eb0ace42ff4488/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423", size = 1554489, upload-time = "2026-01-03T17:30:36.864Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/8f/bc518c0eea29f8406dcf7ed1f96c9b48e3bc3995a96159b3fc11f9e08321/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce", size = 1767852, upload-time = "2026-01-03T17:30:39.433Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/f2/a07a75173124f31f11ea6f863dc44e6f09afe2bca45dd4e64979490deab1/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a", size = 1722379, upload-time = "2026-01-03T17:30:41.081Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/4a/1a3fee7c21350cac78e5c5cef711bac1b94feca07399f3d406972e2d8fcd/aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046", size = 428253, upload-time = "2026-01-03T17:30:42.644Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/b7/76175c7cb4eb73d91ad63c34e29fc4f77c9386bba4a65b53ba8e05ee3c39/aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57", size = 455407, upload-time = "2026-01-03T17:30:44.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/8a/12ca489246ca1faaf5432844adbfce7ff2cc4997733e0af120869345643a/aiohttp-3.13.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5dff64413671b0d3e7d5918ea490bdccb97a4ad29b3f311ed423200b2203e01c", size = 734190, upload-time = "2026-01-03T17:30:45.832Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/08/de43984c74ed1fca5c014808963cc83cb00d7bb06af228f132d33862ca76/aiohttp-3.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:87b9aab6d6ed88235aa2970294f496ff1a1f9adcd724d800e9b952395a80ffd9", size = 491783, upload-time = "2026-01-03T17:30:47.466Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/f8/8dd2cf6112a5a76f81f81a5130c57ca829d101ad583ce57f889179accdda/aiohttp-3.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:425c126c0dc43861e22cb1c14ba4c8e45d09516d0a3ae0a3f7494b79f5f233a3", size = 490704, upload-time = "2026-01-03T17:30:49.373Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/40/a46b03ca03936f832bc7eaa47cfbb1ad012ba1be4790122ee4f4f8cba074/aiohttp-3.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f9120f7093c2a32d9647abcaf21e6ad275b4fbec5b55969f978b1a97c7c86bf", size = 1720652, upload-time = "2026-01-03T17:30:50.974Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/7e/917fe18e3607af92657e4285498f500dca797ff8c918bd7d90b05abf6c2a/aiohttp-3.13.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:697753042d57f4bf7122cab985bf15d0cef23c770864580f5af4f52023a56bd6", size = 1692014, upload-time = "2026-01-03T17:30:52.729Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/b6/cefa4cbc00d315d68973b671cf105b21a609c12b82d52e5d0c9ae61d2a09/aiohttp-3.13.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6de499a1a44e7de70735d0b39f67c8f25eb3d91eb3103be99ca0fa882cdd987d", size = 1759777, upload-time = "2026-01-03T17:30:54.537Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/e3/e06ee07b45e59e6d81498b591fc589629be1553abb2a82ce33efe2a7b068/aiohttp-3.13.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:37239e9f9a7ea9ac5bf6b92b0260b01f8a22281996da609206a84df860bc1261", size = 1861276, upload-time = "2026-01-03T17:30:56.512Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/24/75d274228acf35ceeb2850b8ce04de9dd7355ff7a0b49d607ee60c29c518/aiohttp-3.13.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f76c1e3fe7d7c8afad7ed193f89a292e1999608170dcc9751a7462a87dfd5bc0", size = 1743131, upload-time = "2026-01-03T17:30:58.256Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/98/3d21dde21889b17ca2eea54fdcff21b27b93f45b7bb94ca029c31ab59dc3/aiohttp-3.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fc290605db2a917f6e81b0e1e0796469871f5af381ce15c604a3c5c7e51cb730", size = 1556863, upload-time = "2026-01-03T17:31:00.445Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/84/da0c3ab1192eaf64782b03971ab4055b475d0db07b17eff925e8c93b3aa5/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4021b51936308aeea0367b8f006dc999ca02bc118a0cc78c303f50a2ff6afb91", size = 1682793, upload-time = "2026-01-03T17:31:03.024Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/0f/5802ada182f575afa02cbd0ec5180d7e13a402afb7c2c03a9aa5e5d49060/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:49a03727c1bba9a97d3e93c9f93ca03a57300f484b6e935463099841261195d3", size = 1716676, upload-time = "2026-01-03T17:31:04.842Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/8c/714d53bd8b5a4560667f7bbbb06b20c2382f9c7847d198370ec6526af39c/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3d9908a48eb7416dc1f4524e69f1d32e5d90e3981e4e37eb0aa1cd18f9cfa2a4", size = 1733217, upload-time = "2026-01-03T17:31:06.868Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/79/e2176f46d2e963facea939f5be2d26368ce543622be6f00a12844d3c991f/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2712039939ec963c237286113c68dbad80a82a4281543f3abf766d9d73228998", size = 1552303, upload-time = "2026-01-03T17:31:08.958Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/6a/28ed4dea1759916090587d1fe57087b03e6c784a642b85ef48217b0277ae/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7bfdc049127717581866fa4708791220970ce291c23e28ccf3922c700740fdc0", size = 1763673, upload-time = "2026-01-03T17:31:10.676Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/35/4a3daeb8b9fab49240d21c04d50732313295e4bd813a465d840236dd0ce1/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8057c98e0c8472d8846b9c79f56766bcc57e3e8ac7bfd510482332366c56c591", size = 1721120, upload-time = "2026-01-03T17:31:12.575Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/9f/d643bb3c5fb99547323e635e251c609fbbc660d983144cfebec529e09264/aiohttp-3.13.3-cp313-cp313-win32.whl", hash = "sha256:1449ceddcdbcf2e0446957863af03ebaaa03f94c090f945411b61269e2cb5daf", size = 427383, upload-time = "2026-01-03T17:31:14.382Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/f1/ab0395f8a79933577cdd996dd2f9aa6014af9535f65dddcf88204682fe62/aiohttp-3.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:693781c45a4033d31d4187d2436f5ac701e7bbfe5df40d917736108c1cc7436e", size = 453899, upload-time = "2026-01-03T17:31:15.958Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/99/36/5b6514a9f5d66f4e2597e40dea2e3db271e023eb7a5d22defe96ba560996/aiohttp-3.13.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:ea37047c6b367fd4bd632bff8077449b8fa034b69e812a18e0132a00fae6e808", size = 737238, upload-time = "2026-01-03T17:31:17.909Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/49/459327f0d5bcd8c6c9ca69e60fdeebc3622861e696490d8674a6d0cb90a6/aiohttp-3.13.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6fc0e2337d1a4c3e6acafda6a78a39d4c14caea625124817420abceed36e2415", size = 492292, upload-time = "2026-01-03T17:31:19.919Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/0b/b97660c5fd05d3495b4eb27f2d0ef18dc1dc4eff7511a9bf371397ff0264/aiohttp-3.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c685f2d80bb67ca8c3837823ad76196b3694b0159d232206d1e461d3d434666f", size = 493021, upload-time = "2026-01-03T17:31:21.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/d4/438efabdf74e30aeceb890c3290bbaa449780583b1270b00661126b8aae4/aiohttp-3.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e377758516d262bde50c2584fc6c578af272559c409eecbdd2bae1601184d6", size = 1717263, upload-time = "2026-01-03T17:31:23.296Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/f2/7bddc7fd612367d1459c5bcf598a9e8f7092d6580d98de0e057eb42697ad/aiohttp-3.13.3-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:34749271508078b261c4abb1767d42b8d0c0cc9449c73a4df494777dc55f0687", size = 1669107, upload-time = "2026-01-03T17:31:25.334Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/5a/1aeaecca40e22560f97610a329e0e5efef5e0b5afdf9f857f0d93839ab2e/aiohttp-3.13.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:82611aeec80eb144416956ec85b6ca45a64d76429c1ed46ae1b5f86c6e0c9a26", size = 1760196, upload-time = "2026-01-03T17:31:27.394Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/f8/0ff6992bea7bd560fc510ea1c815f87eedd745fe035589c71ce05612a19a/aiohttp-3.13.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fff83cfc93f18f215896e3a190e8e5cb413ce01553901aca925176e7568963a", size = 1843591, upload-time = "2026-01-03T17:31:29.238Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/d1/e30e537a15f53485b61f5be525f2157da719819e8377298502aebac45536/aiohttp-3.13.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bbe7d4cecacb439e2e2a8a1a7b935c25b812af7a5fd26503a66dadf428e79ec1", size = 1720277, upload-time = "2026-01-03T17:31:31.053Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/45/23f4c451d8192f553d38d838831ebbc156907ea6e05557f39563101b7717/aiohttp-3.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b928f30fe49574253644b1ca44b1b8adbd903aa0da4b9054a6c20fc7f4092a25", size = 1548575, upload-time = "2026-01-03T17:31:32.87Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/ed/0a42b127a43712eda7807e7892c083eadfaf8429ca8fb619662a530a3aab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7b5e8fe4de30df199155baaf64f2fcd604f4c678ed20910db8e2c66dc4b11603", size = 1679455, upload-time = "2026-01-03T17:31:34.76Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/b5/c05f0c2b4b4fe2c9d55e73b6d3ed4fd6c9dc2684b1d81cbdf77e7fad9adb/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:8542f41a62bcc58fc7f11cf7c90e0ec324ce44950003feb70640fc2a9092c32a", size = 1687417, upload-time = "2026-01-03T17:31:36.699Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/6b/915bc5dad66aef602b9e459b5a973529304d4e89ca86999d9d75d80cbd0b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5e1d8c8b8f1d91cd08d8f4a3c2b067bfca6ec043d3ff36de0f3a715feeedf926", size = 1729968, upload-time = "2026-01-03T17:31:38.622Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/11/3b/e84581290a9520024a08640b63d07673057aec5ca548177a82026187ba73/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:90455115e5da1c3c51ab619ac57f877da8fd6d73c05aacd125c5ae9819582aba", size = 1545690, upload-time = "2026-01-03T17:31:40.57Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/04/0c3655a566c43fd647c81b895dfe361b9f9ad6d58c19309d45cff52d6c3b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:042e9e0bcb5fba81886c8b4fbb9a09d6b8a00245fd8d88e4d989c1f96c74164c", size = 1746390, upload-time = "2026-01-03T17:31:42.857Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/53/71165b26978f719c3419381514c9690bd5980e764a09440a10bb816ea4ab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2eb752b102b12a76ca02dff751a801f028b4ffbbc478840b473597fc91a9ed43", size = 1702188, upload-time = "2026-01-03T17:31:44.984Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/a7/cbe6c9e8e136314fa1980da388a59d2f35f35395948a08b6747baebb6aa6/aiohttp-3.13.3-cp314-cp314-win32.whl", hash = "sha256:b556c85915d8efaed322bf1bdae9486aa0f3f764195a0fb6ee962e5c71ef5ce1", size = 433126, upload-time = "2026-01-03T17:31:47.463Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/56/982704adea7d3b16614fc5936014e9af85c0e34b58f9046655817f04306e/aiohttp-3.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9bf9f7a65e7aa20dd764151fb3d616c81088f91f8df39c3893a536e279b4b984", size = 459128, upload-time = "2026-01-03T17:31:49.2Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/2a/3c79b638a9c3d4658d345339d22070241ea341ed4e07b5ac60fb0f418003/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:05861afbbec40650d8a07ea324367cb93e9e8cc7762e04dd4405df99fa65159c", size = 769512, upload-time = "2026-01-03T17:31:51.134Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/b9/3e5014d46c0ab0db8707e0ac2711ed28c4da0218c358a4e7c17bae0d8722/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2fc82186fadc4a8316768d61f3722c230e2c1dcab4200d52d2ebdf2482e47592", size = 506444, upload-time = "2026-01-03T17:31:52.85Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/03/c1d4ef9a054e151cd7839cdc497f2638f00b93cbe8043983986630d7a80c/aiohttp-3.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0add0900ff220d1d5c5ebbf99ed88b0c1bbf87aa7e4262300ed1376a6b13414f", size = 510798, upload-time = "2026-01-03T17:31:54.91Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/76/8c1e5abbfe8e127c893fe7ead569148a4d5a799f7cf958d8c09f3eedf097/aiohttp-3.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:568f416a4072fbfae453dcf9a99194bbb8bdeab718e08ee13dfa2ba0e4bebf29", size = 1868835, upload-time = "2026-01-03T17:31:56.733Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/ac/984c5a6f74c363b01ff97adc96a3976d9c98940b8969a1881575b279ac5d/aiohttp-3.13.3-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:add1da70de90a2569c5e15249ff76a631ccacfe198375eead4aadf3b8dc849dc", size = 1720486, upload-time = "2026-01-03T17:31:58.65Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/9a/b7039c5f099c4eb632138728828b33428585031a1e658d693d41d07d89d1/aiohttp-3.13.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b47b7ba335d2e9b1239fa571131a87e2d8ec96b333e68b2a305e7a98b0bae2", size = 1847951, upload-time = "2026-01-03T17:32:00.989Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/02/3bec2b9a1ba3c19ff89a43a19324202b8eb187ca1e928d8bdac9bbdddebd/aiohttp-3.13.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3dd4dce1c718e38081c8f35f323209d4c1df7d4db4bab1b5c88a6b4d12b74587", size = 1941001, upload-time = "2026-01-03T17:32:03.122Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/df/d879401cedeef27ac4717f6426c8c36c3091c6e9f08a9178cc87549c537f/aiohttp-3.13.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34bac00a67a812570d4a460447e1e9e06fae622946955f939051e7cc895cfab8", size = 1797246, upload-time = "2026-01-03T17:32:05.255Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/15/be122de1f67e6953add23335c8ece6d314ab67c8bebb3f181063010795a7/aiohttp-3.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a19884d2ee70b06d9204b2727a7b9f983d0c684c650254679e716b0b77920632", size = 1627131, upload-time = "2026-01-03T17:32:07.607Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/12/70eedcac9134cfa3219ab7af31ea56bc877395b1ac30d65b1bc4b27d0438/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ca7f2bb6ba8348a3614c7918cc4bb73268c5ac2a207576b7afea19d3d9f64", size = 1795196, upload-time = "2026-01-03T17:32:09.59Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/11/b30e1b1cd1f3054af86ebe60df96989c6a414dd87e27ad16950eee420bea/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:b0d95340658b9d2f11d9697f59b3814a9d3bb4b7a7c20b131df4bcef464037c0", size = 1782841, upload-time = "2026-01-03T17:32:11.445Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/88/0d/d98a9367b38912384a17e287850f5695c528cff0f14f791ce8ee2e4f7796/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1e53262fd202e4b40b70c3aff944a8155059beedc8a89bba9dc1f9ef06a1b56", size = 1795193, upload-time = "2026-01-03T17:32:13.705Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/a5/a2dfd1f5ff5581632c7f6a30e1744deda03808974f94f6534241ef60c751/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:d60ac9663f44168038586cab2157e122e46bdef09e9368b37f2d82d354c23f72", size = 1621979, upload-time = "2026-01-03T17:32:15.965Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/f0/12973c382ae7c1cccbc4417e129c5bf54c374dfb85af70893646e1f0e749/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:90751b8eed69435bac9ff4e3d2f6b3af1f57e37ecb0fbeee59c0174c9e2d41df", size = 1822193, upload-time = "2026-01-03T17:32:18.219Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/5f/24155e30ba7f8c96918af1350eb0663e2430aad9e001c0489d89cd708ab1/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa", size = 1769801, upload-time = "2026-01-03T17:32:20.25Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/f8/7314031ff5c10e6ece114da79b338ec17eeff3a079e53151f7e9f43c4723/aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767", size = 466523, upload-time = "2026-01-03T17:32:22.215Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/63/278a98c715ae467624eafe375542d8ba9b4383a016df8fdefe0ae28382a7/aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344", size = 499694, upload-time = "2026-01-03T17:32:24.546Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/63/97/77cb2450d9b35f517d6cf506256bf4f5bda3f93a66b4ad64ba7fc917899c/aiohttp-3.12.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:802d3868f5776e28f7bf69d349c26fc0efadb81676d0afa88ed00d98a26340b7", size = 702333, upload-time = "2025-07-29T05:50:46.507Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/6d/0544e6b08b748682c30b9f65640d006e51f90763b41d7c546693bc22900d/aiohttp-3.12.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2800614cd560287be05e33a679638e586a2d7401f4ddf99e304d98878c29444", size = 476948, upload-time = "2025-07-29T05:50:48.067Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/1d/c8c40e611e5094330284b1aea8a4b02ca0858f8458614fa35754cab42b9c/aiohttp-3.12.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8466151554b593909d30a0a125d638b4e5f3836e5aecde85b66b80ded1cb5b0d", size = 469787, upload-time = "2025-07-29T05:50:49.669Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/7d/b76438e70319796bfff717f325d97ce2e9310f752a267bfdf5192ac6082b/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e5a495cb1be69dae4b08f35a6c4579c539e9b5706f606632102c0f855bcba7c", size = 1716590, upload-time = "2025-07-29T05:50:51.368Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/b1/60370d70cdf8b269ee1444b390cbd72ce514f0d1cd1a715821c784d272c9/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6404dfc8cdde35c69aaa489bb3542fb86ef215fc70277c892be8af540e5e21c0", size = 1699241, upload-time = "2025-07-29T05:50:53.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/2b/4968a7b8792437ebc12186db31523f541943e99bda8f30335c482bea6879/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ead1c00f8521a5c9070fcb88f02967b1d8a0544e6d85c253f6968b785e1a2ab", size = 1754335, upload-time = "2025-07-29T05:50:55.394Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/c1/49524ed553f9a0bec1a11fac09e790f49ff669bcd14164f9fab608831c4d/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6990ef617f14450bc6b34941dba4f12d5613cbf4e33805932f853fbd1cf18bfb", size = 1800491, upload-time = "2025-07-29T05:50:57.202Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/5e/3bf5acea47a96a28c121b167f5ef659cf71208b19e52a88cdfa5c37f1fcc/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd736ed420f4db2b8148b52b46b88ed038d0354255f9a73196b7bbce3ea97545", size = 1719929, upload-time = "2025-07-29T05:50:59.192Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/39/94/8ae30b806835bcd1cba799ba35347dee6961a11bd507db634516210e91d8/aiohttp-3.12.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c5092ce14361a73086b90c6efb3948ffa5be2f5b6fbcf52e8d8c8b8848bb97c", size = 1635733, upload-time = "2025-07-29T05:51:01.394Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/46/06cdef71dd03acd9da7f51ab3a9107318aee12ad38d273f654e4f981583a/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aaa2234bb60c4dbf82893e934d8ee8dea30446f0647e024074237a56a08c01bd", size = 1696790, upload-time = "2025-07-29T05:51:03.657Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/90/6b4cfaaf92ed98d0ec4d173e78b99b4b1a7551250be8937d9d67ecb356b4/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6d86a2fbdd14192e2f234a92d3b494dd4457e683ba07e5905a0b3ee25389ac9f", size = 1718245, upload-time = "2025-07-29T05:51:05.911Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/e6/2593751670fa06f080a846f37f112cbe6f873ba510d070136a6ed46117c6/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a041e7e2612041a6ddf1c6a33b883be6a421247c7afd47e885969ee4cc58bd8d", size = 1658899, upload-time = "2025-07-29T05:51:07.753Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/28/c15bacbdb8b8eb5bf39b10680d129ea7410b859e379b03190f02fa104ffd/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5015082477abeafad7203757ae44299a610e89ee82a1503e3d4184e6bafdd519", size = 1738459, upload-time = "2025-07-29T05:51:09.56Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/de/c269cbc4faa01fb10f143b1670633a8ddd5b2e1ffd0548f7aa49cb5c70e2/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:56822ff5ddfd1b745534e658faba944012346184fbfe732e0d6134b744516eea", size = 1766434, upload-time = "2025-07-29T05:51:11.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/b0/4ff3abd81aa7d929b27d2e1403722a65fc87b763e3a97b3a2a494bfc63bc/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2acbbfff69019d9014508c4ba0401822e8bae5a5fdc3b6814285b71231b60f3", size = 1726045, upload-time = "2025-07-29T05:51:13.689Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/16/949225a6a2dd6efcbd855fbd90cf476052e648fb011aa538e3b15b89a57a/aiohttp-3.12.15-cp312-cp312-win32.whl", hash = "sha256:d849b0901b50f2185874b9a232f38e26b9b3d4810095a7572eacea939132d4e1", size = 423591, upload-time = "2025-07-29T05:51:15.452Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2b/d8/fa65d2a349fe938b76d309db1a56a75c4fb8cc7b17a398b698488a939903/aiohttp-3.12.15-cp312-cp312-win_amd64.whl", hash = "sha256:b390ef5f62bb508a9d67cb3bba9b8356e23b3996da7062f1a57ce1a79d2b3d34", size = 450266, upload-time = "2025-07-29T05:51:17.239Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/33/918091abcf102e39d15aba2476ad9e7bd35ddb190dcdd43a854000d3da0d/aiohttp-3.12.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315", size = 696741, upload-time = "2025-07-29T05:51:19.021Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/2a/7495a81e39a998e400f3ecdd44a62107254803d1681d9189be5c2e4530cd/aiohttp-3.12.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd", size = 474407, upload-time = "2025-07-29T05:51:21.165Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/fc/a9576ab4be2dcbd0f73ee8675d16c707cfc12d5ee80ccf4015ba543480c9/aiohttp-3.12.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4", size = 466703, upload-time = "2025-07-29T05:51:22.948Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/2f/d4bcc8448cf536b2b54eed48f19682031ad182faa3a3fee54ebe5b156387/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7", size = 1705532, upload-time = "2025-07-29T05:51:25.211Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f1/f3/59406396083f8b489261e3c011aa8aee9df360a96ac8fa5c2e7e1b8f0466/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d", size = 1686794, upload-time = "2025-07-29T05:51:27.145Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/71/164d194993a8d114ee5656c3b7ae9c12ceee7040d076bf7b32fb98a8c5c6/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b", size = 1738865, upload-time = "2025-07-29T05:51:29.366Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/00/d198461b699188a93ead39cb458554d9f0f69879b95078dce416d3209b54/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d", size = 1788238, upload-time = "2025-07-29T05:51:31.285Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/85/b8/9e7175e1fa0ac8e56baa83bf3c214823ce250d0028955dfb23f43d5e61fd/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d", size = 1710566, upload-time = "2025-07-29T05:51:33.219Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/e4/16a8eac9df39b48ae102ec030fa9f726d3570732e46ba0c592aeeb507b93/aiohttp-3.12.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645", size = 1624270, upload-time = "2025-07-29T05:51:35.195Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/f8/cd84dee7b6ace0740908fd0af170f9fab50c2a41ccbc3806aabcb1050141/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461", size = 1677294, upload-time = "2025-07-29T05:51:37.215Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/42/d0f1f85e50d401eccd12bf85c46ba84f947a84839c8a1c2c5f6e8ab1eb50/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9", size = 1708958, upload-time = "2025-07-29T05:51:39.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/6b/f6fa6c5790fb602538483aa5a1b86fcbad66244997e5230d88f9412ef24c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d", size = 1651553, upload-time = "2025-07-29T05:51:41.356Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/36/a6d36ad545fa12e61d11d1932eef273928b0495e6a576eb2af04297fdd3c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693", size = 1727688, upload-time = "2025-07-29T05:51:43.452Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/c8/f195e5e06608a97a4e52c5d41c7927301bf757a8e8bb5bbf8cef6c314961/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64", size = 1761157, upload-time = "2025-07-29T05:51:45.643Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/6a/ea199e61b67f25ba688d3ce93f63b49b0a4e3b3d380f03971b4646412fc6/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51", size = 1710050, upload-time = "2025-07-29T05:51:48.203Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/2e/ffeb7f6256b33635c29dbed29a22a723ff2dd7401fff42ea60cf2060abfb/aiohttp-3.12.15-cp313-cp313-win32.whl", hash = "sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0", size = 422647, upload-time = "2025-07-29T05:51:50.718Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/8e/78ee35774201f38d5e1ba079c9958f7629b1fd079459aea9467441dbfbf5/aiohttp-3.12.15-cp313-cp313-win_amd64.whl", hash = "sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84", size = 449067, upload-time = "2025-07-29T05:51:52.549Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -123,15 +89,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/39/4a/4c61d4c84cfd9befb6fa08a702535b27b21fff08c946bc2f6139decbf7f7/alembic-1.16.5-py3-none-any.whl", hash = "sha256:e845dfe090c5ffa7b92593ae6687c5cb1a101e91fa53868497dbd79847f9dbe3", size = 247355, upload-time = "2025-08-27T18:02:07.37Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "annotated-doc"
|
||||
version = "0.0.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
version = "0.7.0"
|
||||
@@ -503,18 +460,16 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "fastapi"
|
||||
version = "0.133.1"
|
||||
version = "0.116.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "annotated-doc" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "starlette" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "typing-inspection" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/22/6f/0eafed8349eea1fa462238b54a624c8b408cd1ba2795c8e64aa6c34f8ab7/fastapi-0.133.1.tar.gz", hash = "sha256:ed152a45912f102592976fde6cbce7dae1a8a1053da94202e51dd35d184fadd6", size = 378741, upload-time = "2026-02-25T18:18:17.398Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/78/d7/6c8b3bfe33eeffa208183ec037fee0cce9f7f024089ab1c5d12ef04bd27c/fastapi-0.116.1.tar.gz", hash = "sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143", size = 296485, upload-time = "2025-07-11T16:22:32.057Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/c9/a175a7779f3599dfa4adfc97a6ce0e157237b3d7941538604aadaf97bfb6/fastapi-0.133.1-py3-none-any.whl", hash = "sha256:658f34ba334605b1617a65adf2ea6461901bdb9af3a3080d63ff791ecf7dc2e2", size = 109029, upload-time = "2026-02-25T18:18:18.578Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
@@ -523,8 +478,6 @@ standard = [
|
||||
{ name = "fastapi-cli", extra = ["standard"] },
|
||||
{ name = "httpx" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "pydantic-extra-types" },
|
||||
{ name = "pydantic-settings" },
|
||||
{ name = "python-multipart" },
|
||||
{ name = "uvicorn", extra = ["standard"] },
|
||||
]
|
||||
@@ -586,11 +539,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "filelock"
|
||||
version = "3.20.3"
|
||||
version = "3.19.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1d/65/ce7f1b70157833bf3cb851b556a37d4547ceafc158aa9b34b36782f23696/filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1", size = 19485, upload-time = "2026-01-09T17:55:05.421Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687, upload-time = "2025-08-14T16:56:03.016Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/36/7fb70f04bf00bc646cd5bb45aa9eddb15e19437a28b8fb2b4a5249fac770/filelock-3.20.3-py3-none-any.whl", hash = "sha256:4b0dda527ee31078689fc205ec4f1c1bf7d56cf88b6dc9426c4f230e46c2dce1", size = 16701, upload-time = "2026-01-09T17:55:04.334Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload-time = "2025-08-14T16:56:01.633Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -604,43 +557,43 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "fonttools"
|
||||
version = "4.60.2"
|
||||
version = "4.59.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3e/c4/db6a7b5eb0656534c3aa2596c2c5e18830d74f1b9aa5aa8a7dff63a0b11d/fonttools-4.60.2.tar.gz", hash = "sha256:d29552e6b155ebfc685b0aecf8d429cb76c14ab734c22ef5d3dea6fdf800c92c", size = 3562254, upload-time = "2025-12-09T13:38:11.835Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/0d/a5/fba25f9fbdab96e26dedcaeeba125e5f05a09043bf888e0305326e55685b/fonttools-4.59.2.tar.gz", hash = "sha256:e72c0749b06113f50bcb80332364c6be83a9582d6e3db3fe0b280f996dc2ef22", size = 3540889, upload-time = "2025-08-27T16:40:30.97Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/30/530c9eddcd1c39219dc0aaede2b5a4c8ab80e0bb88d1b3ffc12944c4aac3/fonttools-4.60.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e0164b7609d2b5c5dd4e044b8085b7bd7ca7363ef8c269a4ab5b5d4885a426b2", size = 2847196, upload-time = "2025-12-09T13:36:33.262Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/2f/4077a482836d5bbe3bc9dac1c004d02ee227cf04ed62b0a2dfc41d4f0dfd/fonttools-4.60.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1dd3d9574fc595c1e97faccae0f264dc88784ddf7fbf54c939528378bacc0033", size = 2395842, upload-time = "2025-12-09T13:36:35.47Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/05/aae5bb99c5398f8ed4a8b784f023fd9dd3568f0bd5d5b21e35b282550f11/fonttools-4.60.2-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:98d0719f1b11c2817307d2da2e94296a3b2a3503f8d6252a101dca3ee663b917", size = 4949713, upload-time = "2025-12-09T13:36:37.874Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/37/49067349fc78ff0efbf09fadefe80ddf41473ca8f8a25400e3770da38328/fonttools-4.60.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9d3ea26957dd07209f207b4fff64c702efe5496de153a54d3b91007ec28904dd", size = 4999907, upload-time = "2025-12-09T13:36:39.853Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/16/31/d0f11c758bd0db36b664c92a0f9dfdcc2d7313749aa7d6629805c6946f21/fonttools-4.60.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ee301273b0850f3a515299f212898f37421f42ff9adfc341702582ca5073c13", size = 4939717, upload-time = "2025-12-09T13:36:43.075Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/bc/1cff0d69522e561bf1b99bee7c3911c08c25e919584827c3454a64651ce9/fonttools-4.60.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c6eb4694cc3b9c03b7c01d65a9cf35b577f21aa6abdbeeb08d3114b842a58153", size = 5089205, upload-time = "2025-12-09T13:36:45.468Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/e6/fb174f0069b7122e19828c551298bfd34fdf9480535d2a6ac2ed37afacd3/fonttools-4.60.2-cp312-cp312-win32.whl", hash = "sha256:57f07b616c69c244cc1a5a51072eeef07dddda5ebef9ca5c6e9cf6d59ae65b70", size = 2264674, upload-time = "2025-12-09T13:36:49.238Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/57/6552ffd6b582d3e6a9f01780c5275e6dfff1e70ca146101733aa1c12a129/fonttools-4.60.2-cp312-cp312-win_amd64.whl", hash = "sha256:310035802392f1fe5a7cf43d76f6ff4a24c919e4c72c0352e7b8176e2584b8a0", size = 2314701, upload-time = "2025-12-09T13:36:51.09Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/e4/8381d0ca6b6c6c484660b03517ec5b5b81feeefca3808726dece36c652a9/fonttools-4.60.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2bb5fd231e56ccd7403212636dcccffc96c5ae0d6f9e4721fa0a32cb2e3ca432", size = 2842063, upload-time = "2025-12-09T13:36:53.468Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/2c/4367117ee8ff4f4374787a1222da0bd413d80cf3522111f727a7b8f80d1d/fonttools-4.60.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:536b5fab7b6fec78ccf59b5c59489189d9d0a8b0d3a77ed1858be59afb096696", size = 2393792, upload-time = "2025-12-09T13:36:55.742Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/b7/a76b6dffa193869e54e32ca2f9abb0d0e66784bc8a24e6f86eb093015481/fonttools-4.60.2-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6b9288fc38252ac86a9570f19313ecbc9ff678982e0f27c757a85f1f284d3400", size = 4924020, upload-time = "2025-12-09T13:36:58.229Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/4e/0078200e2259f0061c86a74075f507d64c43dd2ab38971956a5c0012d344/fonttools-4.60.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:93fcb420791d839ef592eada2b69997c445d0ce9c969b5190f2e16828ec10607", size = 4980070, upload-time = "2025-12-09T13:37:00.311Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/85/1f/d87c85a11cb84852c975251581862681e4a0c1c3bd456c648792203f311b/fonttools-4.60.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7916a381b094db4052ac284255186aebf74c5440248b78860cb41e300036f598", size = 4921411, upload-time = "2025-12-09T13:37:02.345Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/c0/7efad650f5ed8e317c2633133ef3c64917e7adf2e4e2940c798f5d57ec6e/fonttools-4.60.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58c8c393d5e16b15662cfc2d988491940458aa87894c662154f50c7b49440bef", size = 5063465, upload-time = "2025-12-09T13:37:04.836Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/a8/750518c4f8cdd79393b386bc81226047ade80239e58c6c9f5dbe1fdd8ea1/fonttools-4.60.2-cp313-cp313-win32.whl", hash = "sha256:19c6e0afd8b02008caa0aa08ab896dfce5d0bcb510c49b2c499541d5cb95a963", size = 2263443, upload-time = "2025-12-09T13:37:06.762Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/22/026c60376f165981f80a0e90bd98a79ae3334e9d89a3d046c4d2e265c724/fonttools-4.60.2-cp313-cp313-win_amd64.whl", hash = "sha256:6a500dc59e11b2338c2dba1f8cf11a4ae8be35ec24af8b2628b8759a61457b76", size = 2313800, upload-time = "2025-12-09T13:37:08.713Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/ab/7cf1f5204e1366ddf9dc5cdc2789b571feb9eebcee0e3463c3f457df5f52/fonttools-4.60.2-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9387c532acbe323bbf2a920f132bce3c408a609d5f9dcfc6532fbc7e37f8ccbb", size = 2841690, upload-time = "2025-12-09T13:37:10.696Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/3c/0bf83c6f863cc8b934952567fa2bf737cfcec8fc4ffb59b3f93820095f89/fonttools-4.60.2-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e6f1c824185b5b8fb681297f315f26ae55abb0d560c2579242feea8236b1cfef", size = 2392191, upload-time = "2025-12-09T13:37:12.954Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/f0/40090d148b8907fbea12e9bdf1ff149f30cdf1769e3b2c3e0dbf5106b88d/fonttools-4.60.2-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:55a3129d1e4030b1a30260f1b32fe76781b585fb2111d04a988e141c09eb6403", size = 4873503, upload-time = "2025-12-09T13:37:15.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/e0/d8b13f99e58b8c293781288ba62fe634f1f0697c9c4c0ae104d3215f3a10/fonttools-4.60.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b196e63753abc33b3b97a6fd6de4b7c4fef5552c0a5ba5e562be214d1e9668e0", size = 4968493, upload-time = "2025-12-09T13:37:18.272Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/46/c5/960764d12c92bc225f02401d3067048cb7b282293d9e48e39fe2b0ec38a9/fonttools-4.60.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:de76c8d740fb55745f3b154f0470c56db92ae3be27af8ad6c2e88f1458260c9a", size = 4920015, upload-time = "2025-12-09T13:37:20.334Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/ab/839d8caf253d1eef3653ef4d34427d0326d17a53efaec9eb04056b670fff/fonttools-4.60.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6ba6303225c95998c9fda2d410aa792c3d2c1390a09df58d194b03e17583fa25", size = 5031165, upload-time = "2025-12-09T13:37:23.57Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/bf/3bc862796a6841cbe0725bb5512d272239b809dba631a4b0301df885e62d/fonttools-4.60.2-cp314-cp314-win32.whl", hash = "sha256:0a89728ce10d7c816fedaa5380c06d2793e7a8a634d7ce16810e536c22047384", size = 2267526, upload-time = "2025-12-09T13:37:25.821Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/a1/c1909cacf00c76dc37b4743451561fbaaf7db4172c22a6d9394081d114c3/fonttools-4.60.2-cp314-cp314-win_amd64.whl", hash = "sha256:fa8446e6ab8bd778b82cb1077058a2addba86f30de27ab9cc18ed32b34bc8667", size = 2319096, upload-time = "2025-12-09T13:37:28.058Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/b3/f66e71433f08e3a931b2b31a665aeed17fcc5e6911fc73529c70a232e421/fonttools-4.60.2-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:4063bc81ac5a4137642865cb63dd270e37b3cd1f55a07c0d6e41d072699ccca2", size = 2925167, upload-time = "2025-12-09T13:37:30.348Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/13/eeb491ff743594bbd0bee6e49422c03a59fe9c49002d3cc60eeb77414285/fonttools-4.60.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:ebfdb66fa69732ed604ab8e2a0431e6deff35e933a11d73418cbc7823d03b8e1", size = 2430923, upload-time = "2025-12-09T13:37:32.817Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/e5/db609f785e460796e53c4dbc3874a5f4948477f27beceb5e2d24b2537666/fonttools-4.60.2-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50b10b3b1a72d1d54c61b0e59239e1a94c0958f4a06a1febf97ce75388dd91a4", size = 4877729, upload-time = "2025-12-09T13:37:35.858Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5f/d6/85e4484dd4bfb03fee7bd370d65888cccbd3dee2681ee48c869dd5ccb23f/fonttools-4.60.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:beae16891a13b4a2ddec9b39b4de76092a3025e4d1c82362e3042b62295d5e4d", size = 5096003, upload-time = "2025-12-09T13:37:37.862Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/49/1a98e44b71030b83d2046f981373b80571868259d98e6dae7bc20099dac6/fonttools-4.60.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:522f017fdb3766fd5d2d321774ef351cc6ce88ad4e6ac9efe643e4a2b9d528db", size = 4974410, upload-time = "2025-12-09T13:37:40.166Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/07/d6f775d950ee8a841012472c7303f8819423d8cc3b4530915de7265ebfa2/fonttools-4.60.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:82cceceaf9c09a965a75b84a4b240dd3768e596ffb65ef53852681606fe7c9ba", size = 5002036, upload-time = "2025-12-09T13:37:42.639Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/f6/ba6458f83ce1a9f8c3b17bd8f7b8a2205a126aac1055796b7e7cfebbd38f/fonttools-4.60.2-cp314-cp314t-win32.whl", hash = "sha256:bbfbc918a75437fe7e6d64d1b1e1f713237df1cf00f3a36dedae910b2ba01cee", size = 2330985, upload-time = "2025-12-09T13:37:45.157Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/91/24/fea0ba4d3a32d4ed1103a1098bfd99dc78b5fe3bb97202920744a37b73dc/fonttools-4.60.2-cp314-cp314t-win_amd64.whl", hash = "sha256:0e5cd9b0830f6550d58c84f3ab151a9892b50c4f9d538c5603c0ce6fff2eb3f1", size = 2396226, upload-time = "2025-12-09T13:37:47.355Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/6c/10280af05b44fafd1dff69422805061fa1af29270bc52dce031ac69540bf/fonttools-4.60.2-py3-none-any.whl", hash = "sha256:73cf92eeda67cf6ff10c8af56fc8f4f07c1647d989a979be9e388a49be26552a", size = 1144610, upload-time = "2025-12-09T13:38:09.5Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/3d/1f45db2df51e7bfa55492e8f23f383d372200be3a0ded4bf56a92753dd1f/fonttools-4.59.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:82906d002c349cad647a7634b004825a7335f8159d0d035ae89253b4abf6f3ea", size = 2769711, upload-time = "2025-08-27T16:39:04.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/df/cd236ab32a8abfd11558f296e064424258db5edefd1279ffdbcfd4fd8b76/fonttools-4.59.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a10c1bd7644dc58f8862d8ba0cf9fb7fef0af01ea184ba6ce3f50ab7dfe74d5a", size = 2340225, upload-time = "2025-08-27T16:39:06.143Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/12/b6f9f964fe6d4b4dd4406bcbd3328821c3de1f909ffc3ffa558fe72af48c/fonttools-4.59.2-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:738f31f23e0339785fd67652a94bc69ea49e413dfdb14dcb8c8ff383d249464e", size = 4912766, upload-time = "2025-08-27T16:39:08.138Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/78/82bde2f2d2c306ef3909b927363170b83df96171f74e0ccb47ad344563cd/fonttools-4.59.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ec99f9bdfee9cdb4a9172f9e8fd578cce5feb231f598909e0aecf5418da4f25", size = 4955178, upload-time = "2025-08-27T16:39:10.094Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/77/7de766afe2d31dda8ee46d7e479f35c7d48747e558961489a2d6e3a02bd4/fonttools-4.59.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0476ea74161322e08c7a982f83558a2b81b491509984523a1a540baf8611cc31", size = 4897898, upload-time = "2025-08-27T16:39:12.087Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/77/ce0e0b905d62a06415fda9f2b2e109a24a5db54a59502b769e9e297d2242/fonttools-4.59.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:95922a922daa1f77cc72611747c156cfb38030ead72436a2c551d30ecef519b9", size = 5049144, upload-time = "2025-08-27T16:39:13.84Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/ea/870d93aefd23fff2e07cbeebdc332527868422a433c64062c09d4d5e7fe6/fonttools-4.59.2-cp312-cp312-win32.whl", hash = "sha256:39ad9612c6a622726a6a130e8ab15794558591f999673f1ee7d2f3d30f6a3e1c", size = 2206473, upload-time = "2025-08-27T16:39:15.854Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/61/c4/e44bad000c4a4bb2e9ca11491d266e857df98ab6d7428441b173f0fe2517/fonttools-4.59.2-cp312-cp312-win_amd64.whl", hash = "sha256:980fd7388e461b19a881d35013fec32c713ffea1fc37aef2f77d11f332dfd7da", size = 2254706, upload-time = "2025-08-27T16:39:17.893Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/7b/d0d3b9431642947b5805201fbbbe938a47b70c76685ef1f0cb5f5d7140d6/fonttools-4.59.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:381bde13216ba09489864467f6bc0c57997bd729abfbb1ce6f807ba42c06cceb", size = 2761563, upload-time = "2025-08-27T16:39:20.286Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/76/be/fc5fe58dd76af7127b769b68071dbc32d4b95adc8b58d1d28d42d93c90f2/fonttools-4.59.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f33839aa091f7eef4e9078f5b7ab1b8ea4b1d8a50aeaef9fdb3611bba80869ec", size = 2335671, upload-time = "2025-08-27T16:39:22.027Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/9f/bf231c2a3fac99d1d7f1d89c76594f158693f981a4aa02be406e9f036832/fonttools-4.59.2-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6235fc06bcbdb40186f483ba9d5d68f888ea68aa3c8dac347e05a7c54346fbc8", size = 4893967, upload-time = "2025-08-27T16:39:23.664Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/a9/d46d2ad4fcb915198504d6727f83aa07f46764c64f425a861aa38756c9fd/fonttools-4.59.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83ad6e5d06ef3a2884c4fa6384a20d6367b5cfe560e3b53b07c9dc65a7020e73", size = 4951986, upload-time = "2025-08-27T16:39:25.379Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/90/1cc8d7dd8f707dfeeca472b82b898d3add0ebe85b1f645690dcd128ee63f/fonttools-4.59.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d029804c70fddf90be46ed5305c136cae15800a2300cb0f6bba96d48e770dde0", size = 4891630, upload-time = "2025-08-27T16:39:27.494Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/04/f0345b0d9fe67d65aa8d3f2d4cbf91d06f111bc7b8d802e65914eb06194d/fonttools-4.59.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:95807a3b5e78f2714acaa26a33bc2143005cc05c0217b322361a772e59f32b89", size = 5035116, upload-time = "2025-08-27T16:39:29.406Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/7d/5ba5eefffd243182fbd067cdbfeb12addd4e5aec45011b724c98a344ea33/fonttools-4.59.2-cp313-cp313-win32.whl", hash = "sha256:b3ebda00c3bb8f32a740b72ec38537d54c7c09f383a4cfefb0b315860f825b08", size = 2204907, upload-time = "2025-08-27T16:39:31.42Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/a9/be7219fc64a6026cc0aded17fa3720f9277001c185434230bd351bf678e6/fonttools-4.59.2-cp313-cp313-win_amd64.whl", hash = "sha256:a72155928d7053bbde499d32a9c77d3f0f3d29ae72b5a121752481bcbd71e50f", size = 2253742, upload-time = "2025-08-27T16:39:33.079Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/c7/486580d00be6fa5d45e41682e5ffa5c809f3d25773c6f39628d60f333521/fonttools-4.59.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:d09e487d6bfbe21195801323ba95c91cb3523f0fcc34016454d4d9ae9eaa57fe", size = 2762444, upload-time = "2025-08-27T16:39:34.759Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/9b/950ea9b7b764ceb8d18645c62191e14ce62124d8e05cb32a4dc5e65fde0b/fonttools-4.59.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:dec2f22486d7781087b173799567cffdcc75e9fb2f1c045f05f8317ccce76a3e", size = 2333256, upload-time = "2025-08-27T16:39:40.777Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/4d/8ee9d563126de9002eede950cde0051be86cc4e8c07c63eca0c9fc95734a/fonttools-4.59.2-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1647201af10993090120da2e66e9526c4e20e88859f3e34aa05b8c24ded2a564", size = 4834846, upload-time = "2025-08-27T16:39:42.885Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/26/f26d947b0712dce3d118e92ce30ca88f98938b066498f60d0ee000a892ae/fonttools-4.59.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:47742c33fe65f41eabed36eec2d7313a8082704b7b808752406452f766c573fc", size = 4930871, upload-time = "2025-08-27T16:39:44.818Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/7f/ebe878061a5a5e6b6502f0548489e01100f7e6c0049846e6546ba19a3ab4/fonttools-4.59.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:92ac2d45794f95d1ad4cb43fa07e7e3776d86c83dc4b9918cf82831518165b4b", size = 4876971, upload-time = "2025-08-27T16:39:47.027Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/0d/0d22e3a20ac566836098d30718092351935487e3271fd57385db1adb2fde/fonttools-4.59.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:fa9ecaf2dcef8941fb5719e16322345d730f4c40599bbf47c9753de40eb03882", size = 4987478, upload-time = "2025-08-27T16:39:48.774Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/a3/960cc83182a408ffacc795e61b5f698c6f7b0cfccf23da4451c39973f3c8/fonttools-4.59.2-cp314-cp314-win32.whl", hash = "sha256:a8d40594982ed858780e18a7e4c80415af65af0f22efa7de26bdd30bf24e1e14", size = 2208640, upload-time = "2025-08-27T16:39:50.592Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/74/55e5c57c414fa3965fee5fc036ed23f26a5c4e9e10f7f078a54ff9c7dfb7/fonttools-4.59.2-cp314-cp314-win_amd64.whl", hash = "sha256:9cde8b6a6b05f68516573523f2013a3574cb2c75299d7d500f44de82ba947b80", size = 2258457, upload-time = "2025-08-27T16:39:52.611Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/dc/8e4261dc591c5cfee68fecff3ffee2a9b29e1edc4c4d9cbafdc5aefe74ee/fonttools-4.59.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:036cd87a2dbd7ef72f7b68df8314ced00b8d9973aee296f2464d06a836aeb9a9", size = 2829901, upload-time = "2025-08-27T16:39:55.014Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/05/331538dcf21fd6331579cd628268150e85210d0d2bdae20f7598c2b36c05/fonttools-4.59.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:14870930181493b1d740b6f25483e20185e5aea58aec7d266d16da7be822b4bb", size = 2362717, upload-time = "2025-08-27T16:39:56.843Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/ae/d26428ca9ede809c0a93f0af91f44c87433dc0251e2aec333da5ed00d38f/fonttools-4.59.2-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7ff58ea1eb8fc7e05e9a949419f031890023f8785c925b44d6da17a6a7d6e85d", size = 4835120, upload-time = "2025-08-27T16:39:59.06Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/c4/0f6ac15895de509e07688cb1d45f1ae583adbaa0fa5a5699d73f3bd58ca0/fonttools-4.59.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6dee142b8b3096514c96ad9e2106bf039e2fe34a704c587585b569a36df08c3c", size = 5071115, upload-time = "2025-08-27T16:40:01.009Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/b6/147a711b7ecf7ea39f9da9422a55866f6dd5747c2f36b3b0a7a7e0c6820b/fonttools-4.59.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8991bdbae39cf78bcc9cd3d81f6528df1f83f2e7c23ccf6f990fa1f0b6e19708", size = 4943905, upload-time = "2025-08-27T16:40:03.179Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5b/4e/2ab19006646b753855e2b02200fa1cabb75faa4eeca4ef289f269a936974/fonttools-4.59.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:53c1a411b7690042535a4f0edf2120096a39a506adeb6c51484a232e59f2aa0c", size = 4960313, upload-time = "2025-08-27T16:40:05.45Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/3d/df77907e5be88adcca93cc2cee00646d039da220164be12bee028401e1cf/fonttools-4.59.2-cp314-cp314t-win32.whl", hash = "sha256:59d85088e29fa7a8f87d19e97a1beae2a35821ee48d8ef6d2c4f965f26cb9f8a", size = 2269719, upload-time = "2025-08-27T16:40:07.553Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/a0/d4c4bc5b50275449a9a908283b567caa032a94505fe1976e17f994faa6be/fonttools-4.59.2-cp314-cp314t-win_amd64.whl", hash = "sha256:7ad5d8d8cc9e43cb438b3eb4a0094dd6d4088daa767b0a24d52529361fd4c199", size = 2333169, upload-time = "2025-08-27T16:40:09.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/a4/d2f7be3c86708912c02571db0b550121caab8cd88a3c0aacb9cfa15ea66e/fonttools-4.59.2-py3-none-any.whl", hash = "sha256:8bd0f759020e87bb5d323e6283914d9bf4ae35a7307dafb2cbd1e379e720ad37", size = 1132315, upload-time = "2025-08-27T16:40:28.984Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -731,8 +684,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/45/80935968b53cfd3f33cf99ea5f08227f2646e044568c9b1555b58ffd61c2/greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0", size = 1564846, upload-time = "2025-11-04T12:42:15.191Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/02/b7c30e5e04752cb4db6202a3858b149c0710e5453b71a3b2aec5d78a1aab/greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d", size = 1633814, upload-time = "2025-11-04T12:42:17.175Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/e8/58c7f85958bda41dafea50497cbd59738c5c43dbbea5ee83d651234398f4/greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31", size = 272814, upload-time = "2025-08-07T13:15:50.011Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/dd/b9f59862e9e257a16e4e610480cfffd29e3fae018a68c2332090b53aac3d/greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945", size = 641073, upload-time = "2025-08-07T13:42:57.23Z" },
|
||||
@@ -742,8 +693,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/43/3cecdc0349359e1a527cbf2e3e28e5f8f06d3343aaf82ca13437a9aa290f/greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671", size = 610497, upload-time = "2025-08-07T13:18:31.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/19/06b6cf5d604e2c382a6f31cafafd6f33d5dea706f4db7bdab184bad2b21d/greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b", size = 1121662, upload-time = "2025-08-07T13:42:41.117Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/15/0d5e4e1a66fab130d98168fe984c509249c833c1a3c16806b90f253ce7b9/greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae", size = 1149210, upload-time = "2025-08-07T13:18:24.072Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/53/f9c440463b3057485b8594d7a638bed53ba531165ef0ca0e6c364b5cc807/greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b", size = 1564759, upload-time = "2025-11-04T12:42:19.395Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/e4/3bb4240abdd0a8d23f4f88adec746a3099f0d86bfedb623f063b2e3b4df0/greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929", size = 1634288, upload-time = "2025-11-04T12:42:21.174Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/55/2321e43595e6801e105fcfdee02b34c0f996eb71e6ddffca6b10b7e1d771/greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b", size = 299685, upload-time = "2025-08-07T13:24:38.824Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/5c/85273fd7cc388285632b0498dbbab97596e04b154933dfe0f3e68156c68c/greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0", size = 273586, upload-time = "2025-08-07T13:16:08.004Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/75/10aeeaa3da9332c2e761e4c50d4c3556c21113ee3f0afa2cf5769946f7a3/greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f", size = 686346, upload-time = "2025-08-07T13:42:59.944Z" },
|
||||
@@ -751,8 +700,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/8b/29aae55436521f1d6f8ff4e12fb676f3400de7fcf27fccd1d4d17fd8fecd/greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1", size = 694659, upload-time = "2025-08-07T13:53:17.759Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/2e/ea25914b1ebfde93b6fc4ff46d6864564fba59024e928bdc7de475affc25/greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735", size = 695355, upload-time = "2025-08-07T13:18:34.517Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/60/fc56c62046ec17f6b0d3060564562c64c862948c9d4bc8aa807cf5bd74f4/greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337", size = 657512, upload-time = "2025-08-07T13:18:33.969Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/6e/74407aed965a4ab6ddd93a7ded3180b730d281c77b765788419484cdfeef/greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269", size = 1612508, upload-time = "2025-11-04T12:42:23.427Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/da/343cd760ab2f92bac1845ca07ee3faea9fe52bee65f7bcb19f16ad7de08b/greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681", size = 1680760, upload-time = "2025-11-04T12:42:25.341Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/a5/6ddab2b4c112be95601c13428db1d8b6608a8b6039816f2ba09c346c08fc/greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01", size = 303425, upload-time = "2025-08-07T13:32:27.59Z" },
|
||||
]
|
||||
|
||||
@@ -1572,71 +1519,68 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "12.1.1"
|
||||
version = "11.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/d0/bebb3ffbf31c5a8e97241476c4cf8b9828954693ce6744b4a2326af3e16b/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af", size = 4062652, upload-time = "2026-02-11T04:21:53.19Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/c0/0e16fb0addda4851445c28f8350d8c512f09de27bbb0d6d0bbf8b6709605/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f", size = 4138823, upload-time = "2026-02-11T04:22:03.088Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/fb/6170ec655d6f6bb6630a013dd7cf7bc218423d7b5fa9071bf63dc32175ae/pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642", size = 3601143, upload-time = "2026-02-11T04:22:04.909Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/04/dc5c3f297510ba9a6837cbb318b87dd2b8f73eb41a43cc63767f65cb599c/pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd", size = 5266254, upload-time = "2026-02-11T04:22:07.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/30/5db1236b0d6313f03ebf97f5e17cda9ca060f524b2fcc875149a8360b21c/pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202", size = 4657499, upload-time = "2026-02-11T04:22:09.613Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/18/008d2ca0eb612e81968e8be0bbae5051efba24d52debf930126d7eaacbba/pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f", size = 6232137, upload-time = "2026-02-11T04:22:11.434Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/f1/f14d5b8eeb4b2cd62b9f9f847eb6605f103df89ef619ac68f92f748614ea/pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f", size = 8042721, upload-time = "2026-02-11T04:22:13.321Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/d6/17824509146e4babbdabf04d8171491fa9d776f7061ff6e727522df9bd03/pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f", size = 6347798, upload-time = "2026-02-11T04:22:15.449Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/ee/c85a38a9ab92037a75615aba572c85ea51e605265036e00c5b67dfafbfe2/pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e", size = 7039315, upload-time = "2026-02-11T04:22:17.24Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/f3/bc8ccc6e08a148290d7523bde4d9a0d6c981db34631390dc6e6ec34cacf6/pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0", size = 6462360, upload-time = "2026-02-11T04:22:19.111Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/ab/69a42656adb1d0665ab051eec58a41f169ad295cf81ad45406963105408f/pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb", size = 7165438, upload-time = "2026-02-11T04:22:21.041Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/46/81f7aa8941873f0f01d4b55cc543b0a3d03ec2ee30d617a0448bf6bd6dec/pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f", size = 6431503, upload-time = "2026-02-11T04:22:22.833Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/72/4c245f7d1044b67affc7f134a09ea619d4895333d35322b775b928180044/pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15", size = 7176748, upload-time = "2026-02-11T04:22:24.64Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/ad/8a87bdbe038c5c698736e3348af5c2194ffb872ea52f11894c95f9305435/pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f", size = 2544314, upload-time = "2026-02-11T04:22:26.685Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/9d/efd18493f9de13b87ede7c47e69184b9e859e4427225ea962e32e56a49bc/pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8", size = 5268612, upload-time = "2026-02-11T04:22:29.884Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/f1/4f42eb2b388eb2ffc660dcb7f7b556c1015c53ebd5f7f754965ef997585b/pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9", size = 4660567, upload-time = "2026-02-11T04:22:31.799Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/54/df6ef130fa43e4b82e32624a7b821a2be1c5653a5fdad8469687a7db4e00/pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60", size = 6269951, upload-time = "2026-02-11T04:22:33.921Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/48/618752d06cc44bb4aae8ce0cd4e6426871929ed7b46215638088270d9b34/pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7", size = 8074769, upload-time = "2026-02-11T04:22:35.877Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/bd/f1d71eb39a72fa088d938655afba3e00b38018d052752f435838961127d8/pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f", size = 6381358, upload-time = "2026-02-11T04:22:37.698Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/ef/c784e20b96674ed36a5af839305f55616f8b4f8aa8eeccf8531a6e312243/pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586", size = 7068558, upload-time = "2026-02-11T04:22:39.597Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/cb/8059688b74422ae61278202c4e1ad992e8a2e7375227be0a21c6b87ca8d5/pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce", size = 6493028, upload-time = "2026-02-11T04:22:42.73Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/da/e3c008ed7d2dd1f905b15949325934510b9d1931e5df999bb15972756818/pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8", size = 7191940, upload-time = "2026-02-11T04:22:44.543Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/4a/9202e8d11714c1fc5951f2e1ef362f2d7fbc595e1f6717971d5dd750e969/pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36", size = 6438736, upload-time = "2026-02-11T04:22:46.347Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/ca/cbce2327eb9885476b3957b2e82eb12c866a8b16ad77392864ad601022ce/pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b", size = 7182894, upload-time = "2026-02-11T04:22:48.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/d2/de599c95ba0a973b94410477f8bf0b6f0b5e67360eb89bcb1ad365258beb/pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334", size = 2546446, upload-time = "2026-02-11T04:22:50.342Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1730,17 +1674,16 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "6.33.5"
|
||||
version = "6.32.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ba/25/7c72c307aafc96fa87062aa6291d9f7c94836e43214d43722e86037aac02/protobuf-6.33.5.tar.gz", hash = "sha256:6ddcac2a081f8b7b9642c09406bc6a4290128fce5f471cddd165960bb9119e5c", size = 444465, upload-time = "2026-01-29T21:51:33.494Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c0/df/fb4a8eeea482eca989b51cffd274aac2ee24e825f0bf3cbce5281fa1567b/protobuf-6.32.0.tar.gz", hash = "sha256:a81439049127067fc49ec1d36e25c6ee1d1a2b7be930675f919258d03c04e7d2", size = 440614, upload-time = "2025-08-14T21:21:25.015Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b1/79/af92d0a8369732b027e6d6084251dd8e782c685c72da161bd4a2e00fbabb/protobuf-6.33.5-cp310-abi3-win32.whl", hash = "sha256:d71b040839446bac0f4d162e758bea99c8251161dae9d0983a3b88dee345153b", size = 425769, upload-time = "2026-01-29T21:51:21.751Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/75/bb9bc917d10e9ee13dee8607eb9ab963b7cf8be607c46e7862c748aa2af7/protobuf-6.33.5-cp310-abi3-win_amd64.whl", hash = "sha256:3093804752167bcab3998bec9f1048baae6e29505adaf1afd14a37bddede533c", size = 437118, upload-time = "2026-01-29T21:51:24.022Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/6b/e48dfc1191bc5b52950246275bf4089773e91cb5ba3592621723cdddca62/protobuf-6.33.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:a5cb85982d95d906df1e2210e58f8e4f1e3cdc088e52c921a041f9c9a0386de5", size = 427766, upload-time = "2026-01-29T21:51:25.413Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/b1/c79468184310de09d75095ed1314b839eb2f72df71097db9d1404a1b2717/protobuf-6.33.5-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:9b71e0281f36f179d00cbcb119cb19dec4d14a81393e5ea220f64b286173e190", size = 324638, upload-time = "2026-01-29T21:51:26.423Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c5/f5/65d838092fd01c44d16037953fd4c2cc851e783de9b8f02b27ec4ffd906f/protobuf-6.33.5-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8afa18e1d6d20af15b417e728e9f60f3aa108ee76f23c3b2c07a2c3b546d3afd", size = 339411, upload-time = "2026-01-29T21:51:27.446Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/53/a9443aa3ca9ba8724fdfa02dd1887c1bcd8e89556b715cfbacca6b63dbec/protobuf-6.33.5-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:cbf16ba3350fb7b889fca858fb215967792dc125b35c7976ca4818bee3521cf0", size = 323465, upload-time = "2026-01-29T21:51:28.925Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/bf/2086963c69bdac3d7cff1cc7ff79b8ce5ea0bec6797a017e1be338a46248/protobuf-6.33.5-py3-none-any.whl", hash = "sha256:69915a973dd0f60f31a08b8318b73eab2bd6a392c79184b3612226b0a3f8ec02", size = 170687, upload-time = "2026-01-29T21:51:32.557Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/18/df8c87da2e47f4f1dcc5153a81cd6bca4e429803f4069a299e236e4dd510/protobuf-6.32.0-cp310-abi3-win32.whl", hash = "sha256:84f9e3c1ff6fb0308dbacb0950d8aa90694b0d0ee68e75719cb044b7078fe741", size = 424409, upload-time = "2025-08-14T21:21:12.366Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/59/0a820b7310f8139bd8d5a9388e6a38e1786d179d6f33998448609296c229/protobuf-6.32.0-cp310-abi3-win_amd64.whl", hash = "sha256:a8bdbb2f009cfc22a36d031f22a625a38b615b5e19e558a7b756b3279723e68e", size = 435735, upload-time = "2025-08-14T21:21:15.046Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/5b/0d421533c59c789e9c9894683efac582c06246bf24bb26b753b149bd88e4/protobuf-6.32.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d52691e5bee6c860fff9a1c86ad26a13afbeb4b168cd4445c922b7e2cf85aaf0", size = 426449, upload-time = "2025-08-14T21:21:16.687Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/7b/607764ebe6c7a23dcee06e054fd1de3d5841b7648a90fd6def9a3bb58c5e/protobuf-6.32.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:501fe6372fd1c8ea2a30b4d9be8f87955a64d6be9c88a973996cef5ef6f0abf1", size = 322869, upload-time = "2025-08-14T21:21:18.282Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/01/2e730bd1c25392fc32e3268e02446f0d77cb51a2c3a8486b1798e34d5805/protobuf-6.32.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:75a2aab2bd1aeb1f5dc7c5f33bcb11d82ea8c055c9becbb41c26a8c43fd7092c", size = 322009, upload-time = "2025-08-14T21:21:19.893Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/f2/80ffc4677aac1bc3519b26bc7f7f5de7fce0ee2f7e36e59e27d8beb32dd1/protobuf-6.32.0-py3-none-any.whl", hash = "sha256:ba377e5b67b908c8f3072a57b63e2c6a4cbd18aea4ed98d2584350dbf46f2783", size = 169287, upload-time = "2025-08-14T21:21:23.515Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1914,33 +1857,6 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-extra-types"
|
||||
version = "2.11.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pydantic" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fd/35/2fee58b1316a73e025728583d3b1447218a97e621933fc776fb8c0f2ebdd/pydantic_extra_types-2.11.0.tar.gz", hash = "sha256:4e9991959d045b75feb775683437a97991d02c138e00b59176571db9ce634f0e", size = 157226, upload-time = "2025-12-31T16:18:27.944Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/17/fabd56da47096d240dd45ba627bead0333b0cf0ee8ada9bec579287dadf3/pydantic_extra_types-2.11.0-py3-none-any.whl", hash = "sha256:84b864d250a0fc62535b7ec591e36f2c5b4d1325fa0017eb8cda9aeb63b374a6", size = 74296, upload-time = "2025-12-31T16:18:26.38Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic-settings"
|
||||
version = "2.13.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pydantic" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "typing-inspection" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/52/6d/fffca34caecc4a3f97bda81b2098da5e8ab7efc9a66e819074a11955d87e/pydantic_settings-2.13.1.tar.gz", hash = "sha256:b4c11847b15237fb0171e1462bf540e294affb9b86db4d9aa5c01730bdbe4025", size = 223826, upload-time = "2026-02-19T13:45:08.055Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/00/4b/ccc026168948fec4f7555b9164c724cf4125eac006e176541483d2c959be/pydantic_settings-2.13.1-py3-none-any.whl", hash = "sha256:d56fd801823dbeae7f0975e1f8c8e25c258eb75d278ea7abb5d9cebb01b56237", size = 58929, upload-time = "2026-02-19T13:45:06.034Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.19.2"
|
||||
@@ -1991,11 +1907,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "python-multipart"
|
||||
version = "0.0.22"
|
||||
version = "0.0.20"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/94/01/979e98d542a70714b0cb2b6728ed0b7c46792b695e3eaec3e20711271ca3/python_multipart-0.0.22.tar.gz", hash = "sha256:7340bef99a7e0032613f56dc36027b959fd3b30a787ed62d310e951f7c3a3a58", size = 37612, upload-time = "2026-01-25T10:15:56.219Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/d0/397f9626e711ff749a95d96b7af99b9c566a9bb5129b8e4c10fc4d100304/python_multipart-0.0.22-py3-none-any.whl", hash = "sha256:2b2cd894c83d21bf49d702499531c7bafd057d730c201782048f7945d82de155", size = 24579, upload-time = "2026-01-25T10:15:54.811Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2599,15 +2515,15 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "starlette"
|
||||
version = "0.49.1"
|
||||
version = "0.47.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/1b/3f/507c21db33b66fb027a332f2cb3abbbe924cc3a79ced12f01ed8645955c9/starlette-0.49.1.tar.gz", hash = "sha256:481a43b71e24ed8c43b11ea02f5353d77840e01480881b8cb5a26b8cae64a8cb", size = 2654703, upload-time = "2025-10-28T17:34:10.928Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/15/b9/cc3017f9a9c9b6e27c5106cc10cc7904653c3eec0729793aec10479dd669/starlette-0.47.3.tar.gz", hash = "sha256:6bc94f839cc176c4858894f1f8908f0ab79dfec1a6b8402f6da9be26ebea52e9", size = 2584144, upload-time = "2025-08-24T13:36:42.122Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/51/da/545b75d420bb23b5d494b0517757b351963e974e79933f01e05c929f20a6/starlette-0.49.1-py3-none-any.whl", hash = "sha256:d92ce9f07e4a3caa3ac13a79523bd18e3bc0042bb8ff2d759a8e7dd0e1859875", size = 74175, upload-time = "2025-10-28T17:34:09.13Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/fd/901cfa59aaa5b30a99e16876f11abe38b59a1a2c51ffb3d7142bb6089069/starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51", size = 72991, upload-time = "2025-08-24T13:36:40.887Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2861,14 +2777,14 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "typing-inspection"
|
||||
version = "0.4.2"
|
||||
version = "0.4.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2882,11 +2798,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.6.3"
|
||||
version = "2.5.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
10
node_modules/.yarn-integrity
generated
vendored
10
node_modules/.yarn-integrity
generated
vendored
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"systemParams": "darwin-x64-83",
|
||||
"modulesFolders": [],
|
||||
"flags": [],
|
||||
"linkedModules": [],
|
||||
"topLevelPatterns": [],
|
||||
"lockfileEntries": {},
|
||||
"files": [],
|
||||
"artifacts": {}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
metadata_dir = "/var/lib/garage/meta"
|
||||
data_dir = "/var/lib/garage/data"
|
||||
replication_factor = 1
|
||||
|
||||
rpc_secret = "__GARAGE_RPC_SECRET__"
|
||||
rpc_bind_addr = "[::]:3901"
|
||||
|
||||
[s3_api]
|
||||
api_bind_addr = "[::]:3900"
|
||||
s3_region = "garage"
|
||||
root_domain = ".s3.garage.localhost"
|
||||
|
||||
[admin]
|
||||
api_bind_addr = "[::]:3903"
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Install Docker Engine + Compose plugin on Ubuntu.
|
||||
# Ubuntu's default repos don't include docker-compose-plugin, so we add Docker's official repo.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/install-docker-ubuntu.sh
|
||||
#
|
||||
# Requires: root or sudo
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# --- Colors ---
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
|
||||
info() { echo -e "${CYAN}==>${NC} $*"; }
|
||||
ok() { echo -e "${GREEN} ✓${NC} $*"; }
|
||||
warn() { echo -e "${YELLOW} !${NC} $*"; }
|
||||
err() { echo -e "${RED} ✗${NC} $*" >&2; }
|
||||
|
||||
# Use sudo if available and not root; otherwise run directly
|
||||
if [[ $(id -u) -eq 0 ]]; then
|
||||
MAYBE_SUDO=""
|
||||
elif command -v sudo &>/dev/null; then
|
||||
MAYBE_SUDO="sudo "
|
||||
else
|
||||
err "Need root. Run as root or install sudo: apt install sudo"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check Ubuntu
|
||||
if [[ ! -f /etc/os-release ]]; then
|
||||
err "Cannot detect OS. This script is for Ubuntu."
|
||||
exit 1
|
||||
fi
|
||||
source /etc/os-release
|
||||
if [[ "${ID:-}" != "ubuntu" ]] && [[ "${ID_LIKE:-}" != *"ubuntu"* ]]; then
|
||||
err "This script is for Ubuntu. Detected: ${ID:-unknown}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
info "Adding Docker's official repository..."
|
||||
${MAYBE_SUDO}apt update
|
||||
${MAYBE_SUDO}apt install -y ca-certificates curl
|
||||
${MAYBE_SUDO}install -m 0755 -d /etc/apt/keyrings
|
||||
${MAYBE_SUDO}rm -f /etc/apt/sources.list.d/docker.list /etc/apt/sources.list.d/docker.sources
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | ${MAYBE_SUDO}tee /etc/apt/keyrings/docker.asc > /dev/null
|
||||
${MAYBE_SUDO}chmod a+r /etc/apt/keyrings/docker.asc
|
||||
CODENAME="$(. /etc/os-release && echo "${UBUNTU_CODENAME:-${VERSION_CODENAME:-}}")"
|
||||
[[ -z "$CODENAME" ]] && { err "Could not detect Ubuntu version codename."; exit 1; }
|
||||
${MAYBE_SUDO}tee /etc/apt/sources.list.d/docker.sources > /dev/null <<EOF
|
||||
Types: deb
|
||||
URIs: https://download.docker.com/linux/ubuntu
|
||||
Suites: ${CODENAME}
|
||||
Components: stable
|
||||
Signed-By: /etc/apt/keyrings/docker.asc
|
||||
EOF
|
||||
|
||||
info "Installing Docker Engine and Compose plugin..."
|
||||
${MAYBE_SUDO}apt update
|
||||
${MAYBE_SUDO}apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
|
||||
if [[ -d /run/systemd/system ]]; then
|
||||
info "Enabling and starting Docker..."
|
||||
${MAYBE_SUDO}systemctl enable --now docker
|
||||
else
|
||||
err "No systemd. This script requires Ubuntu with systemd (e.g. DigitalOcean droplet)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DOCKER_USER="${SUDO_USER:-${USER:-root}}"
|
||||
if [[ "$DOCKER_USER" != "root" ]]; then
|
||||
info "Adding $DOCKER_USER to docker group..."
|
||||
${MAYBE_SUDO}usermod -aG docker "$DOCKER_USER"
|
||||
fi
|
||||
|
||||
ok "Docker installed successfully."
|
||||
echo ""
|
||||
echo " Log out and back in (or run: newgrp docker) so the group change takes effect."
|
||||
echo " Then verify with: docker compose version"
|
||||
echo ""
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,675 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Standalone local development setup for Reflector.
|
||||
# Takes a fresh clone to a working instance — no cloud accounts, no API keys.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/setup-standalone.sh
|
||||
#
|
||||
# Idempotent — safe to re-run at any time.
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
SERVER_ENV="$ROOT_DIR/server/.env"
|
||||
WWW_ENV="$ROOT_DIR/www/.env.local"
|
||||
|
||||
MODEL="${LLM_MODEL:-qwen2.5:14b}"
|
||||
OLLAMA_PORT="${OLLAMA_PORT:-11435}"
|
||||
|
||||
OS="$(uname -s)"
|
||||
|
||||
# --- Colors ---
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
|
||||
info() { echo -e "${CYAN}==>${NC} $*"; }
|
||||
ok() { echo -e "${GREEN} ✓${NC} $*"; }
|
||||
warn() { echo -e "${YELLOW} !${NC} $*"; }
|
||||
err() { echo -e "${RED} ✗${NC} $*" >&2; }
|
||||
|
||||
# --- Helpers ---
|
||||
|
||||
dump_diagnostics() {
|
||||
local failed_svc="${1:-}"
|
||||
echo ""
|
||||
err "========== DIAGNOSTICS =========="
|
||||
|
||||
err "Container status:"
|
||||
compose_cmd ps -a --format "table {{.Name}}\t{{.Status}}" 2>/dev/null || true
|
||||
echo ""
|
||||
|
||||
# Show logs for any container that exited
|
||||
local stopped
|
||||
stopped=$(compose_cmd ps -a --format '{{.Name}}\t{{.Status}}' 2>/dev/null \
|
||||
| grep -iv 'up\|running' | awk -F'\t' '{print $1}' || true)
|
||||
for c in $stopped; do
|
||||
err "--- Logs for $c (exited/unhealthy) ---"
|
||||
docker logs --tail 30 "$c" 2>&1 || true
|
||||
echo ""
|
||||
done
|
||||
|
||||
# If a specific service failed, always show its logs
|
||||
if [[ -n "$failed_svc" ]]; then
|
||||
err "--- Logs for $failed_svc (last 40) ---"
|
||||
compose_cmd logs "$failed_svc" --tail 40 2>&1 || true
|
||||
echo ""
|
||||
# Try health check from inside the container as extra signal
|
||||
err "--- Internal health check ($failed_svc) ---"
|
||||
compose_cmd exec -T "$failed_svc" \
|
||||
curl -sf http://localhost:1250/health 2>&1 || echo "(not reachable internally either)"
|
||||
fi
|
||||
|
||||
err "================================="
|
||||
}
|
||||
|
||||
trap 'dump_diagnostics' ERR
|
||||
|
||||
# Get the image ID for a compose service (works even when containers are not running).
|
||||
svc_image_id() {
|
||||
local svc="$1"
|
||||
# Extract image name from compose config YAML, fall back to <project>-<service>
|
||||
local img_name
|
||||
img_name=$(compose_cmd config 2>/dev/null \
|
||||
| sed -n "/^ ${svc}:/,/^ [a-z]/p" | grep '^\s*image:' | awk '{print $2}')
|
||||
img_name="${img_name:-reflector-$svc}"
|
||||
docker images -q "$img_name" 2>/dev/null | head -1
|
||||
}
|
||||
|
||||
# Ensure images with build contexts are up-to-date.
|
||||
# Docker layer cache makes this fast (~seconds) when source hasn't changed.
|
||||
rebuild_images() {
|
||||
local svc
|
||||
for svc in web cpu; do
|
||||
local old_id
|
||||
old_id=$(svc_image_id "$svc")
|
||||
old_id="${old_id:-<none>}"
|
||||
|
||||
info "Building $svc..."
|
||||
compose_cmd build "$svc"
|
||||
|
||||
local new_id
|
||||
new_id=$(svc_image_id "$svc")
|
||||
|
||||
if [[ "$old_id" == "$new_id" ]]; then
|
||||
ok "$svc unchanged (${new_id:0:12})"
|
||||
else
|
||||
ok "$svc rebuilt (${old_id:0:12} -> ${new_id:0:12})"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
detect_lan_ip() {
|
||||
# Returns the host's LAN IP — used for WebRTC ICE candidate rewriting.
|
||||
case "$OS" in
|
||||
Darwin)
|
||||
# Try common interfaces: en0 (Wi-Fi), en1 (Ethernet)
|
||||
for iface in en0 en1 en2 en3; do
|
||||
local ip
|
||||
ip=$(ipconfig getifaddr "$iface" 2>/dev/null || true)
|
||||
if [[ -n "$ip" ]]; then
|
||||
echo "$ip"
|
||||
return
|
||||
fi
|
||||
done
|
||||
;;
|
||||
Linux)
|
||||
ip route get 1.1.1.1 2>/dev/null | sed -n 's/.*src \([^ ]*\).*/\1/p'
|
||||
return
|
||||
;;
|
||||
esac
|
||||
# Fallback — empty means "not detected"
|
||||
echo ""
|
||||
}
|
||||
|
||||
wait_for_url() {
|
||||
local url="$1" label="$2" retries="${3:-30}" interval="${4:-2}"
|
||||
for i in $(seq 1 "$retries"); do
|
||||
if curl -sf "$url" > /dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
echo -ne "\r Waiting for $label... ($i/$retries)"
|
||||
sleep "$interval"
|
||||
done
|
||||
echo ""
|
||||
err "$label not responding at $url after $retries attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
env_has_key() {
|
||||
local file="$1" key="$2"
|
||||
grep -q "^${key}=" "$file" 2>/dev/null
|
||||
}
|
||||
|
||||
env_set() {
|
||||
local file="$1" key="$2" value="$3"
|
||||
if env_has_key "$file" "$key"; then
|
||||
# Replace existing value (portable sed)
|
||||
if [[ "$OS" == "Darwin" ]]; then
|
||||
sed -i '' "s|^${key}=.*|${key}=${value}|" "$file"
|
||||
else
|
||||
sed -i "s|^${key}=.*|${key}=${value}|" "$file"
|
||||
fi
|
||||
else
|
||||
echo "${key}=${value}" >> "$file"
|
||||
fi
|
||||
}
|
||||
|
||||
resolve_symlink() {
|
||||
local file="$1"
|
||||
if [[ -L "$file" ]]; then
|
||||
warn "$(basename "$file") is a symlink — creating standalone copy"
|
||||
cp -L "$file" "$file.tmp"
|
||||
rm "$file"
|
||||
mv "$file.tmp" "$file"
|
||||
fi
|
||||
}
|
||||
|
||||
compose_cmd() {
|
||||
local compose_files="-f $ROOT_DIR/docker-compose.standalone.yml"
|
||||
if [[ "$OS" == "Linux" ]] && [[ -n "${OLLAMA_PROFILE:-}" ]]; then
|
||||
docker compose $compose_files --profile "$OLLAMA_PROFILE" "$@"
|
||||
else
|
||||
docker compose $compose_files "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# Step 1: LLM / Ollama
|
||||
# =========================================================
|
||||
step_llm() {
|
||||
info "Step 1: LLM setup (Ollama + $MODEL)"
|
||||
|
||||
case "$OS" in
|
||||
Darwin)
|
||||
if ! command -v ollama &> /dev/null; then
|
||||
err "Ollama not found. Install it:"
|
||||
err " brew install ollama"
|
||||
err " # or https://ollama.com/download"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start if not running
|
||||
if ! curl -sf "http://localhost:$OLLAMA_PORT/api/tags" > /dev/null 2>&1; then
|
||||
info "Starting Ollama..."
|
||||
ollama serve &
|
||||
disown
|
||||
fi
|
||||
|
||||
wait_for_url "http://localhost:$OLLAMA_PORT/api/tags" "Ollama"
|
||||
echo ""
|
||||
|
||||
# Pull model if not already present
|
||||
if ollama list 2>/dev/null | awk '{print $1}' | grep -qxF "$MODEL"; then
|
||||
ok "Model $MODEL already pulled"
|
||||
else
|
||||
info "Pulling model $MODEL (this may take a while)..."
|
||||
ollama pull "$MODEL"
|
||||
fi
|
||||
|
||||
LLM_URL_VALUE="http://host.docker.internal:$OLLAMA_PORT/v1"
|
||||
;;
|
||||
|
||||
Linux)
|
||||
if command -v nvidia-smi &> /dev/null && nvidia-smi > /dev/null 2>&1; then
|
||||
ok "NVIDIA GPU detected — using ollama-gpu profile"
|
||||
OLLAMA_PROFILE="ollama-gpu"
|
||||
OLLAMA_SVC="ollama"
|
||||
LLM_URL_VALUE="http://ollama:$OLLAMA_PORT/v1"
|
||||
else
|
||||
warn "No NVIDIA GPU — using ollama-cpu profile"
|
||||
OLLAMA_PROFILE="ollama-cpu"
|
||||
OLLAMA_SVC="ollama-cpu"
|
||||
LLM_URL_VALUE="http://ollama-cpu:$OLLAMA_PORT/v1"
|
||||
fi
|
||||
|
||||
info "Starting Ollama container..."
|
||||
compose_cmd up -d
|
||||
|
||||
wait_for_url "http://localhost:$OLLAMA_PORT/api/tags" "Ollama"
|
||||
echo ""
|
||||
|
||||
# Pull model inside container
|
||||
if compose_cmd exec "$OLLAMA_SVC" ollama list 2>/dev/null | awk '{print $1}' | grep -qxF "$MODEL"; then
|
||||
ok "Model $MODEL already pulled"
|
||||
else
|
||||
info "Pulling model $MODEL inside container (this may take a while)..."
|
||||
compose_cmd exec "$OLLAMA_SVC" ollama pull "$MODEL"
|
||||
fi
|
||||
;;
|
||||
|
||||
*)
|
||||
err "Unsupported OS: $OS"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
ok "LLM ready ($MODEL via Ollama)"
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# Step 2: Generate server/.env
|
||||
# =========================================================
|
||||
step_server_env() {
|
||||
info "Step 2: Generating server/.env"
|
||||
|
||||
resolve_symlink "$SERVER_ENV"
|
||||
|
||||
if [[ -f "$SERVER_ENV" ]]; then
|
||||
ok "server/.env already exists — ensuring standalone vars"
|
||||
else
|
||||
cat > "$SERVER_ENV" << 'ENVEOF'
|
||||
# Generated by setup-standalone.sh — standalone local development
|
||||
# Source of truth for settings: server/reflector/settings.py
|
||||
ENVEOF
|
||||
ok "Created server/.env"
|
||||
fi
|
||||
|
||||
# Ensure all standalone-critical vars (appends if missing, replaces if present)
|
||||
env_set "$SERVER_ENV" "DATABASE_URL" "postgresql+asyncpg://reflector:reflector@postgres:5432/reflector"
|
||||
env_set "$SERVER_ENV" "REDIS_HOST" "redis"
|
||||
env_set "$SERVER_ENV" "CELERY_BROKER_URL" "redis://redis:6379/1"
|
||||
env_set "$SERVER_ENV" "CELERY_RESULT_BACKEND" "redis://redis:6379/1"
|
||||
env_set "$SERVER_ENV" "AUTH_BACKEND" "none"
|
||||
env_set "$SERVER_ENV" "PUBLIC_MODE" "true"
|
||||
# TRANSCRIPT_BACKEND, TRANSCRIPT_URL, DIARIZATION_BACKEND, DIARIZATION_URL
|
||||
# are set via docker-compose.standalone.yml `environment:` overrides — not written here
|
||||
# so we don't clobber the user's server/.env for non-standalone use.
|
||||
env_set "$SERVER_ENV" "TRANSLATION_BACKEND" "passthrough"
|
||||
env_set "$SERVER_ENV" "LLM_URL" "$LLM_URL_VALUE"
|
||||
env_set "$SERVER_ENV" "LLM_MODEL" "$MODEL"
|
||||
env_set "$SERVER_ENV" "LLM_API_KEY" "not-needed"
|
||||
|
||||
# WebRTC: detect LAN IP for ICE candidate rewriting (bridge networking)
|
||||
local lan_ip
|
||||
lan_ip=$(detect_lan_ip)
|
||||
if [[ -n "$lan_ip" ]]; then
|
||||
env_set "$SERVER_ENV" "WEBRTC_HOST" "$lan_ip"
|
||||
ok "WebRTC host IP: $lan_ip"
|
||||
else
|
||||
warn "Could not detect LAN IP — WebRTC recording from other devices may not work"
|
||||
warn "Set WEBRTC_HOST=<your-lan-ip> in server/.env manually"
|
||||
fi
|
||||
|
||||
ok "Standalone vars set (LLM_URL=$LLM_URL_VALUE)"
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# Step 3: Object storage (Garage)
|
||||
# =========================================================
|
||||
step_storage() {
|
||||
info "Step 3: Object storage (Garage)"
|
||||
|
||||
# Generate garage.toml from template (fill in RPC secret)
|
||||
GARAGE_TOML="$ROOT_DIR/scripts/garage.toml"
|
||||
GARAGE_TOML_RUNTIME="$ROOT_DIR/data/garage.toml"
|
||||
mkdir -p "$ROOT_DIR/data"
|
||||
if [[ -d "$GARAGE_TOML_RUNTIME" ]]; then
|
||||
rm -rf "$GARAGE_TOML_RUNTIME"
|
||||
fi
|
||||
if [[ ! -f "$GARAGE_TOML_RUNTIME" ]]; then
|
||||
RPC_SECRET=$(openssl rand -hex 32)
|
||||
sed "s|__GARAGE_RPC_SECRET__|${RPC_SECRET}|" "$GARAGE_TOML" > "$GARAGE_TOML_RUNTIME"
|
||||
fi
|
||||
|
||||
compose_cmd up -d garage
|
||||
|
||||
# Use /metrics for readiness — /health returns 503 until layout is applied
|
||||
if ! wait_for_url "http://localhost:3903/metrics" "Garage admin API"; then
|
||||
echo ""
|
||||
err "Garage container logs:"
|
||||
compose_cmd logs garage --tail 30 2>&1 || true
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Layout: get node ID, assign, apply (skip if already applied)
|
||||
NODE_ID=$(compose_cmd exec -T garage /garage node id -q 2>/dev/null | tr -d '[:space:]')
|
||||
LAYOUT_STATUS=$(compose_cmd exec -T garage /garage layout show 2>&1 || true)
|
||||
if echo "$LAYOUT_STATUS" | grep -q "No nodes"; then
|
||||
compose_cmd exec -T garage /garage layout assign "$NODE_ID" -c 1G -z dc1
|
||||
compose_cmd exec -T garage /garage layout apply --version 1
|
||||
fi
|
||||
|
||||
# Create bucket (idempotent — skip if exists)
|
||||
if ! compose_cmd exec -T garage /garage bucket info reflector-media &>/dev/null; then
|
||||
compose_cmd exec -T garage /garage bucket create reflector-media
|
||||
fi
|
||||
|
||||
# Create key (idempotent — skip if exists)
|
||||
CREATED_KEY=false
|
||||
if compose_cmd exec -T garage /garage key info reflector &>/dev/null; then
|
||||
ok "Key 'reflector' already exists"
|
||||
else
|
||||
KEY_OUTPUT=$(compose_cmd exec -T garage /garage key create reflector)
|
||||
CREATED_KEY=true
|
||||
fi
|
||||
|
||||
# Grant bucket permissions (idempotent)
|
||||
compose_cmd exec -T garage /garage bucket allow reflector-media --read --write --key reflector
|
||||
|
||||
# Set env vars (only parse key on first create — key info redacts the secret)
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_STORAGE_BACKEND" "aws"
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL" "http://garage:3900"
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_STORAGE_AWS_BUCKET_NAME" "reflector-media"
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_STORAGE_AWS_REGION" "garage"
|
||||
if [[ "$CREATED_KEY" == "true" ]]; then
|
||||
KEY_ID=$(echo "$KEY_OUTPUT" | grep -i "key id" | awk '{print $NF}')
|
||||
KEY_SECRET=$(echo "$KEY_OUTPUT" | grep -i "secret key" | awk '{print $NF}')
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID" "$KEY_ID"
|
||||
env_set "$SERVER_ENV" "TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY" "$KEY_SECRET"
|
||||
fi
|
||||
|
||||
ok "Object storage ready (Garage)"
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# Step 4: Generate www/.env.local
|
||||
# =========================================================
|
||||
step_www_env() {
|
||||
info "Step 4: Generating www/.env.local"
|
||||
|
||||
resolve_symlink "$WWW_ENV"
|
||||
|
||||
if [[ -f "$WWW_ENV" ]]; then
|
||||
ok "www/.env.local already exists — ensuring standalone vars"
|
||||
else
|
||||
cat > "$WWW_ENV" << 'ENVEOF'
|
||||
# Generated by setup-standalone.sh — standalone local development
|
||||
ENVEOF
|
||||
ok "Created www/.env.local"
|
||||
fi
|
||||
|
||||
# Caddyfile.standalone.example serves API at /v1, /health — use base URL
|
||||
if [[ -n "${PRIMARY_IP:-}" ]]; then
|
||||
BASE_URL="https://$PRIMARY_IP:3043"
|
||||
else
|
||||
BASE_URL="https://localhost:3043"
|
||||
fi
|
||||
env_set "$WWW_ENV" "SITE_URL" "$BASE_URL"
|
||||
env_set "$WWW_ENV" "NEXTAUTH_URL" "$BASE_URL"
|
||||
env_set "$WWW_ENV" "NEXTAUTH_SECRET" "standalone-dev-secret-not-for-production"
|
||||
env_set "$WWW_ENV" "API_URL" "$BASE_URL"
|
||||
env_set "$WWW_ENV" "WEBSOCKET_URL" "auto"
|
||||
env_set "$WWW_ENV" "SERVER_API_URL" "http://server:1250"
|
||||
env_set "$WWW_ENV" "FEATURE_REQUIRE_LOGIN" "false"
|
||||
|
||||
ok "Standalone www vars set"
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# Step 5: Start all services
|
||||
# =========================================================
|
||||
step_services() {
|
||||
info "Step 5: Starting Docker services"
|
||||
|
||||
# Check for port conflicts — stale processes silently shadow Docker port mappings.
|
||||
# OrbStack/Docker Desktop bind ports for forwarding; ignore those PIDs.
|
||||
local ports_ok=true
|
||||
for port in 3043 3000 1250 5432 6379 3900 3903; do
|
||||
local pids
|
||||
pids=$(lsof -ti :"$port" 2>/dev/null || true)
|
||||
for pid in $pids; do
|
||||
local pname
|
||||
pname=$(ps -p "$pid" -o comm= 2>/dev/null || true)
|
||||
# OrbStack and Docker Desktop own port forwarding — not real conflicts
|
||||
if [[ "$pname" == *"OrbStack"* ]] || [[ "$pname" == *"com.docker"* ]] || [[ "$pname" == *"vpnkit"* ]]; then
|
||||
continue
|
||||
fi
|
||||
warn "Port $port already in use by PID $pid ($pname)"
|
||||
warn "Kill it with: lsof -ti :$port | xargs kill"
|
||||
ports_ok=false
|
||||
done
|
||||
done
|
||||
if [[ "$ports_ok" == "false" ]]; then
|
||||
warn "Port conflicts detected — Docker containers may not be reachable"
|
||||
warn "Continuing anyway (services will start but may be shadowed)"
|
||||
fi
|
||||
|
||||
# Rebuild images if source has changed (Docker layer cache makes this fast when unchanged)
|
||||
rebuild_images
|
||||
|
||||
# server runs alembic migrations on startup automatically (see runserver.sh)
|
||||
compose_cmd up -d postgres redis garage cpu server worker beat web caddy
|
||||
ok "Containers started"
|
||||
|
||||
# Quick sanity check — catch containers that exit immediately (bad image, missing file, etc.)
|
||||
sleep 3
|
||||
local exited
|
||||
exited=$(compose_cmd ps -a --format '{{.Name}} {{.Status}}' 2>/dev/null \
|
||||
| grep -i 'exit' || true)
|
||||
if [[ -n "$exited" ]]; then
|
||||
warn "Some containers exited immediately:"
|
||||
echo "$exited" | while read -r line; do warn " $line"; done
|
||||
dump_diagnostics
|
||||
fi
|
||||
|
||||
info "Server is running migrations (alembic upgrade head)..."
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# Step 6: Health checks
|
||||
# =========================================================
|
||||
step_health() {
|
||||
info "Step 6: Health checks"
|
||||
|
||||
# CPU service may take a while on first start (model download + load).
|
||||
# No host port exposed — check via docker exec.
|
||||
info "Waiting for CPU service (first start downloads ~1GB of models)..."
|
||||
local cpu_ok=false
|
||||
for i in $(seq 1 120); do
|
||||
if compose_cmd exec -T cpu curl -sf http://localhost:8000/docs > /dev/null 2>&1; then
|
||||
cpu_ok=true
|
||||
break
|
||||
fi
|
||||
echo -ne "\r Waiting for CPU service... ($i/120)"
|
||||
sleep 5
|
||||
done
|
||||
echo ""
|
||||
if [[ "$cpu_ok" == "true" ]]; then
|
||||
ok "CPU service healthy (transcription + diarization)"
|
||||
else
|
||||
warn "CPU service not ready yet — it will keep loading in the background"
|
||||
warn "Check with: docker compose logs cpu"
|
||||
fi
|
||||
|
||||
# Server may take a long time on first run — alembic migrations run before uvicorn starts.
|
||||
# Use docker exec so this works regardless of network_mode or port mapping.
|
||||
info "Waiting for Server API (first run includes database migrations)..."
|
||||
local server_ok=false
|
||||
for i in $(seq 1 90); do
|
||||
# Check if container is still running
|
||||
local svc_status
|
||||
svc_status=$(compose_cmd ps server --format '{{.Status}}' 2>/dev/null || true)
|
||||
if [[ -z "$svc_status" ]] || echo "$svc_status" | grep -qi 'exit'; then
|
||||
echo ""
|
||||
err "Server container exited unexpectedly"
|
||||
dump_diagnostics server
|
||||
exit 1
|
||||
fi
|
||||
# Health check from inside container (avoids host networking issues)
|
||||
if compose_cmd exec -T server curl -sf http://localhost:1250/health > /dev/null 2>&1; then
|
||||
server_ok=true
|
||||
break
|
||||
fi
|
||||
echo -ne "\r Waiting for Server API... ($i/90)"
|
||||
sleep 5
|
||||
done
|
||||
echo ""
|
||||
if [[ "$server_ok" == "true" ]]; then
|
||||
ok "Server API healthy"
|
||||
else
|
||||
err "Server API not ready after ~7 minutes"
|
||||
dump_diagnostics server
|
||||
exit 1
|
||||
fi
|
||||
|
||||
wait_for_url "http://localhost:3000" "Frontend" 90 3
|
||||
echo ""
|
||||
ok "Frontend responding"
|
||||
|
||||
# Caddy reverse proxy (self-signed TLS — curl needs -k)
|
||||
if curl -sfk "https://localhost:3043" > /dev/null 2>&1; then
|
||||
ok "Caddy proxy healthy (https://localhost:3043)"
|
||||
else
|
||||
warn "Caddy proxy not responding on https://localhost:3043"
|
||||
warn "Check with: docker compose logs caddy"
|
||||
fi
|
||||
|
||||
# Check LLM reachability from inside a container
|
||||
if compose_cmd exec -T server \
|
||||
curl -sf "$LLM_URL_VALUE/models" > /dev/null 2>&1; then
|
||||
ok "LLM reachable from containers"
|
||||
else
|
||||
warn "LLM not reachable from containers at $LLM_URL_VALUE"
|
||||
warn "Summaries/topics/titles won't work until LLM is accessible"
|
||||
fi
|
||||
}
|
||||
|
||||
# =========================================================
|
||||
# Main
|
||||
# =========================================================
|
||||
main() {
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo " Reflector — Standalone Local Setup"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
# Ensure we're in the repo root
|
||||
if [[ ! -f "$ROOT_DIR/docker-compose.yml" ]]; then
|
||||
err "docker-compose.yml not found in $ROOT_DIR"
|
||||
err "Run this script from the repo root: ./scripts/setup-standalone.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Docker: Compose plugin, buildx, and daemon. On Ubuntu, auto-install if missing.
|
||||
docker_ready() {
|
||||
docker compose version 2>/dev/null | grep -qi compose \
|
||||
&& docker buildx version &>/dev/null \
|
||||
&& docker info &>/dev/null
|
||||
}
|
||||
|
||||
if ! docker_ready; then
|
||||
RAN_INSTALL=false
|
||||
if [[ "$OS" == "Linux" ]] && [[ -f /etc/os-release ]] && (source /etc/os-release 2>/dev/null; [[ "${ID:-}" == "ubuntu" || "${ID_LIKE:-}" == *"ubuntu"* ]]); then
|
||||
info "Docker not ready. Running install-docker-ubuntu.sh..."
|
||||
"$SCRIPT_DIR/install-docker-ubuntu.sh" || true
|
||||
RAN_INSTALL=true
|
||||
[[ -d /run/systemd/system ]] && command -v systemctl &>/dev/null && systemctl start docker 2>/dev/null || true
|
||||
sleep 2
|
||||
fi
|
||||
if ! docker_ready; then
|
||||
# Docker may be installed but current shell lacks docker group (needs newgrp)
|
||||
if [[ "$RAN_INSTALL" == "true" ]] && [[ $(id -u) -ne 0 ]] && command -v sg &>/dev/null && getent group docker &>/dev/null; then
|
||||
info "Re-running with docker group..."
|
||||
exec sg docker -c "$(printf '%q' "$0" && printf ' %q' "$@")"
|
||||
fi
|
||||
if [[ "$OS" == "Darwin" ]]; then
|
||||
err "Docker not ready. Install Docker Desktop or OrbStack."
|
||||
elif [[ "$OS" == "Linux" ]]; then
|
||||
err "Docker not ready. Run: ./scripts/install-docker-ubuntu.sh"
|
||||
err "Then run: newgrp docker (or log out and back in), then run this script again."
|
||||
else
|
||||
err "Docker not ready. Install Docker with Compose V2 and buildx."
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# LLM_URL_VALUE is set by step_llm, used by later steps
|
||||
LLM_URL_VALUE=""
|
||||
OLLAMA_PROFILE=""
|
||||
|
||||
# docker-compose.yml may reference env_files that don't exist yet;
|
||||
# touch them so compose_cmd works before the steps that populate them.
|
||||
touch "$SERVER_ENV" "$WWW_ENV"
|
||||
|
||||
# Ensure garage.toml exists before any compose up (step_llm starts all services including garage)
|
||||
GARAGE_TOML="$ROOT_DIR/scripts/garage.toml"
|
||||
GARAGE_TOML_RUNTIME="$ROOT_DIR/data/garage.toml"
|
||||
mkdir -p "$ROOT_DIR/data"
|
||||
if [[ -d "$GARAGE_TOML_RUNTIME" ]]; then
|
||||
rm -rf "$GARAGE_TOML_RUNTIME"
|
||||
fi
|
||||
if [[ ! -f "$GARAGE_TOML_RUNTIME" ]]; then
|
||||
RPC_SECRET=$(openssl rand -hex 32)
|
||||
sed "s|__GARAGE_RPC_SECRET__|${RPC_SECRET}|" "$GARAGE_TOML" > "$GARAGE_TOML_RUNTIME"
|
||||
fi
|
||||
|
||||
# Remove containers that may have bad mounts (was directory); force recreate
|
||||
compose_cmd rm -f -s garage caddy 2>/dev/null || true
|
||||
|
||||
# Detect primary IP for droplet (used for Caddyfile, step_www_env, success message)
|
||||
PRIMARY_IP=""
|
||||
if [[ "$OS" == "Linux" ]]; then
|
||||
PRIMARY_IP=$(hostname -I 2>/dev/null | awk '{print $1}' || true)
|
||||
if [[ "$PRIMARY_IP" == "127."* ]] || [[ -z "$PRIMARY_IP" ]]; then
|
||||
PRIMARY_IP=$(ip -4 route get 1 2>/dev/null | sed -n 's/.*src \([0-9.]*\).*/\1/p' || true)
|
||||
fi
|
||||
fi
|
||||
|
||||
# Ensure Caddyfile exists before any compose up (step_llm starts caddy)
|
||||
# On droplet: explicit IP + localhost so Caddy provisions cert at startup (avoids on_demand/SNI issues)
|
||||
CADDYFILE="$ROOT_DIR/Caddyfile"
|
||||
if [[ -d "$CADDYFILE" ]]; then
|
||||
rm -rf "$CADDYFILE"
|
||||
fi
|
||||
if [[ -n "$PRIMARY_IP" ]]; then
|
||||
cat > "$CADDYFILE" << CADDYEOF
|
||||
# Generated by setup-standalone.sh — explicit IP for droplet (provisions cert at startup)
|
||||
https://$PRIMARY_IP, localhost {
|
||||
tls internal
|
||||
handle /v1/* {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
handle /health {
|
||||
reverse_proxy server:1250
|
||||
}
|
||||
handle {
|
||||
reverse_proxy web:3000
|
||||
}
|
||||
}
|
||||
CADDYEOF
|
||||
ok "Created Caddyfile for $PRIMARY_IP and localhost"
|
||||
elif [[ ! -f "$CADDYFILE" ]]; then
|
||||
cp "$ROOT_DIR/Caddyfile.standalone.example" "$CADDYFILE"
|
||||
fi
|
||||
|
||||
step_llm
|
||||
echo ""
|
||||
step_server_env
|
||||
echo ""
|
||||
step_storage
|
||||
echo ""
|
||||
step_www_env
|
||||
echo ""
|
||||
step_services
|
||||
echo ""
|
||||
step_health
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo -e " ${GREEN}Reflector is running!${NC}"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
if [[ -n "$PRIMARY_IP" ]]; then
|
||||
echo " App: https://$PRIMARY_IP:3043 (accept self-signed cert in browser)"
|
||||
echo " API: https://$PRIMARY_IP:3043/v1/"
|
||||
echo " Local: https://localhost:3043"
|
||||
else
|
||||
echo " App: https://localhost:3043 (accept self-signed cert in browser)"
|
||||
echo " API: https://localhost:3043/v1/"
|
||||
fi
|
||||
echo ""
|
||||
echo " To stop: docker compose down"
|
||||
echo " To re-run: ./scripts/setup-standalone.sh"
|
||||
echo ""
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@@ -66,22 +66,15 @@ TRANSLATE_URL=https://monadical-sas--reflector-translator-web.modal.run
|
||||
## LLM backend (Required)
|
||||
##
|
||||
## Responsible for generating titles, summaries, and topic detection
|
||||
## Supports any OpenAI-compatible endpoint.
|
||||
## Requires OpenAI API key
|
||||
## =======================================================
|
||||
|
||||
## --- Option A: Local LLM via Ollama (recommended for dev) ---
|
||||
## Setup: ./scripts/setup-standalone.sh
|
||||
## Mac: Ollama runs natively (Metal GPU). Containers reach it via host.docker.internal.
|
||||
## Linux: docker compose --profile ollama-gpu up -d (or ollama-cpu for no GPU)
|
||||
LLM_URL=http://host.docker.internal:11435/v1
|
||||
LLM_MODEL=qwen2.5:14b
|
||||
LLM_API_KEY=not-needed
|
||||
## Linux with containerized Ollama: LLM_URL=http://ollama:11435/v1
|
||||
## OpenAI API key - get from https://platform.openai.com/account/api-keys
|
||||
LLM_API_KEY=sk-your-openai-api-key
|
||||
LLM_MODEL=gpt-4o-mini
|
||||
|
||||
## --- Option B: Remote/cloud LLM ---
|
||||
#LLM_API_KEY=sk-your-openai-api-key
|
||||
#LLM_MODEL=gpt-4o-mini
|
||||
## LLM_URL defaults to OpenAI when unset
|
||||
## Optional: Custom endpoint (defaults to OpenAI)
|
||||
# LLM_URL=https://api.openai.com/v1
|
||||
|
||||
## Context size for summary generation (tokens)
|
||||
LLM_CONTEXT_WINDOW=16000
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
# =======================================================
|
||||
# Reflector Self-Hosted Production — Backend Configuration
|
||||
# Generated by: ./scripts/setup-selfhosted.sh
|
||||
# Reference: server/reflector/settings.py
|
||||
# =======================================================
|
||||
|
||||
# =======================================================
|
||||
# Database & Infrastructure
|
||||
# Pre-filled for Docker internal networking (docker-compose.selfhosted.yml)
|
||||
# =======================================================
|
||||
DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
|
||||
REDIS_HOST=redis
|
||||
REDIS_PORT=6379
|
||||
CELERY_BROKER_URL=redis://redis:6379/1
|
||||
CELERY_RESULT_BACKEND=redis://redis:6379/1
|
||||
|
||||
# Secret key — auto-generated by setup script
|
||||
# Generate manually with: openssl rand -hex 32
|
||||
SECRET_KEY=changeme-generate-a-secure-random-string
|
||||
|
||||
# =======================================================
|
||||
# Authentication
|
||||
# Disabled by default. Enable Authentik for multi-user access.
|
||||
# See docsv2/selfhosted-production.md for setup instructions.
|
||||
# =======================================================
|
||||
AUTH_BACKEND=none
|
||||
# AUTH_BACKEND=jwt
|
||||
# AUTH_JWT_AUDIENCE=
|
||||
# AUTH_BACKEND=password
|
||||
# ADMIN_EMAIL=admin@localhost
|
||||
# ADMIN_PASSWORD_HASH=pbkdf2:sha256:100000$<salt>$<hash>
|
||||
|
||||
# =======================================================
|
||||
# Specialized Models (Transcription, Diarization, Translation)
|
||||
# These run in the gpu/cpu container — NOT an LLM.
|
||||
# The "modal" backend means "HTTP API client" — it talks to
|
||||
# the self-hosted container, not Modal.com cloud.
|
||||
# =======================================================
|
||||
TRANSCRIPT_BACKEND=modal
|
||||
TRANSCRIPT_URL=http://transcription:8000
|
||||
TRANSCRIPT_MODAL_API_KEY=selfhosted
|
||||
|
||||
DIARIZATION_ENABLED=true
|
||||
DIARIZATION_BACKEND=modal
|
||||
DIARIZATION_URL=http://transcription:8000
|
||||
|
||||
TRANSLATION_BACKEND=modal
|
||||
TRANSLATE_URL=http://transcription:8000
|
||||
|
||||
# HuggingFace token — optional, for gated models (e.g. pyannote).
|
||||
# Falls back to public S3 model bundle if not set.
|
||||
# HF_TOKEN=hf_xxxxx
|
||||
|
||||
# =======================================================
|
||||
# LLM for Summarization & Topic Detection
|
||||
# Only summaries and topics use an LLM. Everything else
|
||||
# (transcription, diarization, translation) uses specialized models above.
|
||||
#
|
||||
# Supports any OpenAI-compatible endpoint.
|
||||
# Auto-configured by setup script if using --ollama-gpu or --ollama-cpu.
|
||||
# For --gpu or --cpu modes, you MUST configure an external LLM.
|
||||
# =======================================================
|
||||
|
||||
# --- Option A: External OpenAI-compatible API ---
|
||||
# LLM_URL=https://api.openai.com/v1
|
||||
# LLM_API_KEY=sk-your-api-key
|
||||
# LLM_MODEL=gpt-4o-mini
|
||||
|
||||
# --- Option B: Local Ollama (auto-set by --ollama-gpu/--ollama-cpu) ---
|
||||
# LLM_URL=http://ollama:11435/v1
|
||||
# LLM_API_KEY=not-needed
|
||||
# LLM_MODEL=llama3.1
|
||||
|
||||
LLM_CONTEXT_WINDOW=16000
|
||||
|
||||
# =======================================================
|
||||
# S3 Storage (REQUIRED)
|
||||
# Where to store audio files and transcripts.
|
||||
#
|
||||
# Option A: Use --garage flag (auto-configured by setup script)
|
||||
# Option B: Any S3-compatible endpoint (AWS, MinIO, etc.)
|
||||
# Set TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL for non-AWS endpoints.
|
||||
# =======================================================
|
||||
TRANSCRIPT_STORAGE_BACKEND=aws
|
||||
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID=
|
||||
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY=
|
||||
TRANSCRIPT_STORAGE_AWS_BUCKET_NAME=reflector-media
|
||||
TRANSCRIPT_STORAGE_AWS_REGION=us-east-1
|
||||
|
||||
# For non-AWS S3-compatible endpoints (Garage, MinIO, etc.):
|
||||
# TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL=http://garage:3900
|
||||
|
||||
# =======================================================
|
||||
# Daily.co Live Rooms (Optional)
|
||||
# Enable real-time meeting rooms with Daily.co integration.
|
||||
# Requires a Daily.co account: https://www.daily.co/
|
||||
# =======================================================
|
||||
# DEFAULT_VIDEO_PLATFORM=daily
|
||||
# DAILY_API_KEY=your-daily-api-key
|
||||
# DAILY_SUBDOMAIN=your-subdomain
|
||||
# DAILY_WEBHOOK_SECRET=your-daily-webhook-secret
|
||||
# DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco
|
||||
# DAILYCO_STORAGE_AWS_REGION=us-east-1
|
||||
# DAILYCO_STORAGE_AWS_ROLE_ARN=arn:aws:iam::role/DailyCoAccess
|
||||
|
||||
# =======================================================
|
||||
# Feature Flags
|
||||
# =======================================================
|
||||
PUBLIC_MODE=true
|
||||
# FEATURE_ROOMS=true
|
||||
|
||||
# =======================================================
|
||||
# Sentry (Optional)
|
||||
# =======================================================
|
||||
# SENTRY_DSN=
|
||||
@@ -17,6 +17,9 @@ WORKDIR /app
|
||||
COPY pyproject.toml uv.lock README.md /app/
|
||||
RUN uv sync --compile-bytecode --locked
|
||||
|
||||
# pre-download nltk packages
|
||||
RUN uv run python -c "import nltk; nltk.download('punkt_tab'); nltk.download('averaged_perceptron_tagger_eng')"
|
||||
|
||||
# bootstrap
|
||||
COPY alembic.ini runserver.sh /app/
|
||||
COPY images /app/images
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
"""add_change_seq_to_transcript
|
||||
|
||||
Revision ID: 623af934249a
|
||||
Revises: 3aa20b96d963
|
||||
Create Date: 2026-02-19 18:53:12.315440
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "623af934249a"
|
||||
down_revision: Union[str, None] = "3aa20b96d963"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Sequence
|
||||
op.execute("CREATE SEQUENCE IF NOT EXISTS transcript_change_seq;")
|
||||
|
||||
# Column (nullable first for backfill)
|
||||
op.add_column("transcript", sa.Column("change_seq", sa.BigInteger(), nullable=True))
|
||||
|
||||
# Backfill existing rows with sequential values (ordered by created_at for determinism)
|
||||
op.execute("""
|
||||
UPDATE transcript SET change_seq = sub.seq FROM (
|
||||
SELECT id, nextval('transcript_change_seq') AS seq
|
||||
FROM transcript ORDER BY created_at ASC
|
||||
) sub WHERE transcript.id = sub.id;
|
||||
""")
|
||||
|
||||
# Now make NOT NULL
|
||||
op.alter_column("transcript", "change_seq", nullable=False)
|
||||
|
||||
# Default for any inserts between now and trigger creation
|
||||
op.alter_column(
|
||||
"transcript",
|
||||
"change_seq",
|
||||
server_default=sa.text("nextval('transcript_change_seq')"),
|
||||
)
|
||||
|
||||
# Trigger function
|
||||
op.execute("""
|
||||
CREATE OR REPLACE FUNCTION set_transcript_change_seq()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.change_seq := nextval('transcript_change_seq');
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
""")
|
||||
|
||||
# Trigger (fires on every INSERT or UPDATE)
|
||||
op.execute("""
|
||||
CREATE TRIGGER trigger_transcript_change_seq
|
||||
BEFORE INSERT OR UPDATE ON transcript
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION set_transcript_change_seq();
|
||||
""")
|
||||
|
||||
# Index for efficient polling
|
||||
op.create_index("idx_transcript_change_seq", "transcript", ["change_seq"])
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.execute("DROP TRIGGER IF EXISTS trigger_transcript_change_seq ON transcript;")
|
||||
op.execute("DROP FUNCTION IF EXISTS set_transcript_change_seq();")
|
||||
op.drop_index("idx_transcript_change_seq", table_name="transcript")
|
||||
op.drop_column("transcript", "change_seq")
|
||||
op.execute("DROP SEQUENCE IF EXISTS transcript_change_seq;")
|
||||
@@ -1,25 +0,0 @@
|
||||
"""add password_hash to user table
|
||||
|
||||
Revision ID: e1f093f7f124
|
||||
Revises: 623af934249a
|
||||
Create Date: 2026-02-19 00:00:00.000000
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
revision: str = "e1f093f7f124"
|
||||
down_revision: Union[str, None] = "623af934249a"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
op.add_column("user", sa.Column("password_hash", sa.String(), nullable=True))
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
op.drop_column("user", "password_hash")
|
||||
@@ -18,10 +18,11 @@ dependencies = [
|
||||
"fastapi[standard]>=0.100.1",
|
||||
"sentry-sdk[fastapi]>=1.29.2",
|
||||
"httpx>=0.24.1",
|
||||
"fastapi-pagination>=0.14.2",
|
||||
"fastapi-pagination>=0.12.6",
|
||||
"databases[aiosqlite, asyncpg]>=0.7.0",
|
||||
"sqlalchemy<1.5",
|
||||
"alembic>=1.11.3",
|
||||
"nltk>=3.8.1",
|
||||
"prometheus-fastapi-instrumentator>=6.1.0",
|
||||
"sentencepiece>=0.1.99",
|
||||
"protobuf>=4.24.3",
|
||||
@@ -39,7 +40,6 @@ dependencies = [
|
||||
"webvtt-py>=0.5.0",
|
||||
"icalendar>=6.0.0",
|
||||
"hatchet-sdk>=0.47.0",
|
||||
"pydantic>=2.12.5",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
@@ -68,6 +68,7 @@ evaluation = [
|
||||
"pydantic>=2.1.1",
|
||||
]
|
||||
local = [
|
||||
"pyannote-audio>=3.3.2",
|
||||
"faster-whisper>=0.10.0",
|
||||
]
|
||||
silero-vad = [
|
||||
|
||||
@@ -8,7 +8,6 @@ from prometheus_fastapi_instrumentator import Instrumentator
|
||||
|
||||
import reflector.auth # noqa
|
||||
import reflector.db # noqa
|
||||
from reflector.auth import router as auth_router
|
||||
from reflector.events import subscribers_shutdown, subscribers_startup
|
||||
from reflector.logger import logger
|
||||
from reflector.metrics import metrics_init
|
||||
@@ -38,13 +37,6 @@ try:
|
||||
except ImportError:
|
||||
sentry_sdk = None
|
||||
|
||||
# Patch aioice port range if configured (must happen before any RTCPeerConnection)
|
||||
if settings.WEBRTC_PORT_RANGE:
|
||||
from reflector.webrtc_ports import parse_port_range, patch_aioice_port_range
|
||||
|
||||
_min, _max = parse_port_range(settings.WEBRTC_PORT_RANGE)
|
||||
patch_aioice_port_range(_min, _max)
|
||||
|
||||
|
||||
# lifespan events
|
||||
@asynccontextmanager
|
||||
@@ -67,7 +59,7 @@ else:
|
||||
logger.info("Sentry disabled")
|
||||
|
||||
# build app
|
||||
app = FastAPI(lifespan=lifespan, root_path=settings.ROOT_PATH)
|
||||
app = FastAPI(lifespan=lifespan)
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_credentials=settings.CORS_ALLOW_CREDENTIALS or False,
|
||||
@@ -106,8 +98,6 @@ app.include_router(user_ws_router, prefix="/v1")
|
||||
app.include_router(zulip_router, prefix="/v1")
|
||||
app.include_router(whereby_router, prefix="/v1")
|
||||
app.include_router(daily_router, prefix="/v1/daily")
|
||||
if auth_router:
|
||||
app.include_router(auth_router, prefix="/v1")
|
||||
add_pagination(app)
|
||||
|
||||
# prepare celery
|
||||
|
||||
@@ -4,9 +4,8 @@ from uuid import uuid4
|
||||
|
||||
from celery import current_task
|
||||
|
||||
from reflector.db import _database_context, get_database
|
||||
from reflector.db import get_database
|
||||
from reflector.llm import llm_session_id
|
||||
from reflector.ws_manager import reset_ws_manager
|
||||
|
||||
|
||||
def asynctask(f):
|
||||
@@ -21,18 +20,8 @@ def asynctask(f):
|
||||
return await f(*args, **kwargs)
|
||||
finally:
|
||||
await database.disconnect()
|
||||
_database_context.set(None)
|
||||
|
||||
if current_task:
|
||||
# Reset cached connections before each Celery task.
|
||||
# Each asyncio.run() creates a new event loop, making connections
|
||||
# from previous tasks stale ("Future attached to a different loop").
|
||||
_database_context.set(None)
|
||||
reset_ws_manager()
|
||||
|
||||
coro = run_with_db()
|
||||
if current_task:
|
||||
return asyncio.run(coro)
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
except RuntimeError:
|
||||
|
||||
@@ -12,8 +12,3 @@ AccessTokenInfo = auth_module.AccessTokenInfo
|
||||
authenticated = auth_module.authenticated
|
||||
current_user = auth_module.current_user
|
||||
current_user_optional = auth_module.current_user_optional
|
||||
parse_ws_bearer_token = auth_module.parse_ws_bearer_token
|
||||
current_user_ws_optional = auth_module.current_user_ws_optional
|
||||
|
||||
# Optional router (e.g. for /auth/login in password backend)
|
||||
router = getattr(auth_module, "router", None)
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
from typing import TYPE_CHECKING, Annotated, List, Optional
|
||||
from typing import Annotated, List, Optional
|
||||
|
||||
from fastapi import Depends, HTTPException
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastapi import WebSocket
|
||||
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer
|
||||
from jose import JWTError, jwt
|
||||
from pydantic import BaseModel
|
||||
@@ -127,20 +124,3 @@ async def current_user_optional(
|
||||
jwtauth: JWTAuth = Depends(),
|
||||
):
|
||||
return await _authenticate_user(jwt_token, api_key, jwtauth)
|
||||
|
||||
|
||||
def parse_ws_bearer_token(
|
||||
websocket: "WebSocket",
|
||||
) -> tuple[Optional[str], Optional[str]]:
|
||||
raw = websocket.headers.get("sec-websocket-protocol") or ""
|
||||
parts = [p.strip() for p in raw.split(",") if p.strip()]
|
||||
if len(parts) >= 2 and parts[0].lower() == "bearer":
|
||||
return parts[1], "bearer"
|
||||
return None, None
|
||||
|
||||
|
||||
async def current_user_ws_optional(websocket: "WebSocket") -> Optional[UserInfo]:
|
||||
token, _ = parse_ws_bearer_token(websocket)
|
||||
if not token:
|
||||
return None
|
||||
return await _authenticate_user(token, None, JWTAuth())
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
from typing import Annotated
|
||||
|
||||
from fastapi import Depends
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
from pydantic import BaseModel
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token", auto_error=False)
|
||||
|
||||
|
||||
class UserInfo(BaseModel):
|
||||
sub: str
|
||||
@@ -9,21 +15,13 @@ class AccessTokenInfo(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
def authenticated():
|
||||
def authenticated(token: Annotated[str, Depends(oauth2_scheme)]):
|
||||
return None
|
||||
|
||||
|
||||
def current_user():
|
||||
def current_user(token: Annotated[str, Depends(oauth2_scheme)]):
|
||||
return None
|
||||
|
||||
|
||||
def current_user_optional():
|
||||
return None
|
||||
|
||||
|
||||
def parse_ws_bearer_token(websocket):
|
||||
return None, None
|
||||
|
||||
|
||||
async def current_user_ws_optional(websocket):
|
||||
def current_user_optional(token: Annotated[str, Depends(oauth2_scheme)]):
|
||||
return None
|
||||
|
||||
@@ -1,198 +0,0 @@
|
||||
"""Password-based authentication backend for selfhosted deployments.
|
||||
|
||||
Issues HS256 JWTs signed with settings.SECRET_KEY. Provides a POST /auth/login
|
||||
endpoint for email/password authentication.
|
||||
"""
|
||||
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import TYPE_CHECKING, Annotated, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer
|
||||
from jose import JWTError, jwt
|
||||
from pydantic import BaseModel
|
||||
|
||||
from reflector.auth.password_utils import verify_password
|
||||
from reflector.db.user_api_keys import user_api_keys_controller
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastapi import WebSocket
|
||||
|
||||
# --- FastAPI security schemes (same pattern as auth_jwt.py) ---
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/v1/auth/login", auto_error=False)
|
||||
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
||||
|
||||
# --- JWT configuration ---
|
||||
JWT_ALGORITHM = "HS256"
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 # 24 hours
|
||||
|
||||
# --- Rate limiting (in-memory) ---
|
||||
_login_attempts: dict[str, list[float]] = defaultdict(list)
|
||||
RATE_LIMIT_WINDOW = 300 # 5 minutes
|
||||
RATE_LIMIT_MAX = 10 # max attempts per window
|
||||
|
||||
|
||||
def _check_rate_limit(key: str) -> bool:
|
||||
"""Return True if request is allowed, False if rate-limited."""
|
||||
now = time.monotonic()
|
||||
attempts = _login_attempts[key]
|
||||
_login_attempts[key] = [t for t in attempts if now - t < RATE_LIMIT_WINDOW]
|
||||
if len(_login_attempts[key]) >= RATE_LIMIT_MAX:
|
||||
return False
|
||||
_login_attempts[key].append(now)
|
||||
return True
|
||||
|
||||
|
||||
# --- Pydantic models ---
|
||||
class UserInfo(BaseModel):
|
||||
sub: str
|
||||
email: Optional[str] = None
|
||||
|
||||
def __getitem__(self, key):
|
||||
return getattr(self, key)
|
||||
|
||||
|
||||
class AccessTokenInfo(BaseModel):
|
||||
exp: Optional[int] = None
|
||||
sub: Optional[str] = None
|
||||
|
||||
|
||||
class LoginRequest(BaseModel):
|
||||
email: str
|
||||
password: str
|
||||
|
||||
|
||||
class LoginResponse(BaseModel):
|
||||
access_token: str
|
||||
token_type: str = "bearer"
|
||||
expires_in: int
|
||||
|
||||
|
||||
# --- JWT token creation and verification ---
|
||||
def _create_access_token(user_id: str, email: str) -> tuple[str, int]:
|
||||
"""Create an HS256 JWT. Returns (token, expires_in_seconds)."""
|
||||
expires_delta = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
|
||||
expire = datetime.now(timezone.utc) + expires_delta
|
||||
payload = {
|
||||
"sub": user_id,
|
||||
"email": email,
|
||||
"exp": expire,
|
||||
}
|
||||
token = jwt.encode(payload, settings.SECRET_KEY, algorithm=JWT_ALGORITHM)
|
||||
return token, int(expires_delta.total_seconds())
|
||||
|
||||
|
||||
def _verify_token(token: str) -> dict:
|
||||
"""Verify and decode an HS256 JWT."""
|
||||
return jwt.decode(token, settings.SECRET_KEY, algorithms=[JWT_ALGORITHM])
|
||||
|
||||
|
||||
# --- Authentication logic (mirrors auth_jwt._authenticate_user) ---
|
||||
async def _authenticate_user(
|
||||
jwt_token: Optional[str],
|
||||
api_key: Optional[str],
|
||||
) -> UserInfo | None:
|
||||
user_infos: list[UserInfo] = []
|
||||
|
||||
if api_key:
|
||||
user_api_key = await user_api_keys_controller.verify_key(api_key)
|
||||
if user_api_key:
|
||||
user_infos.append(UserInfo(sub=user_api_key.user_id, email=None))
|
||||
|
||||
if jwt_token:
|
||||
try:
|
||||
payload = _verify_token(jwt_token)
|
||||
user_id = payload["sub"]
|
||||
email = payload.get("email")
|
||||
user_infos.append(UserInfo(sub=user_id, email=email))
|
||||
except JWTError as e:
|
||||
logger.error(f"JWT error: {e}")
|
||||
raise HTTPException(status_code=401, detail="Invalid authentication")
|
||||
|
||||
if len(user_infos) == 0:
|
||||
return None
|
||||
|
||||
if len(set(x.sub for x in user_infos)) > 1:
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail="Invalid authentication: more than one user provided",
|
||||
)
|
||||
|
||||
return user_infos[0]
|
||||
|
||||
|
||||
# --- FastAPI dependencies (exported, required by auth/__init__.py) ---
|
||||
def authenticated(token: Annotated[str, Depends(oauth2_scheme)]):
|
||||
if token is None:
|
||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||
return None
|
||||
|
||||
|
||||
async def current_user(
|
||||
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||
api_key: Annotated[Optional[str], Depends(api_key_header)],
|
||||
):
|
||||
user = await _authenticate_user(jwt_token, api_key)
|
||||
if user is None:
|
||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||
return user
|
||||
|
||||
|
||||
async def current_user_optional(
|
||||
jwt_token: Annotated[Optional[str], Depends(oauth2_scheme)],
|
||||
api_key: Annotated[Optional[str], Depends(api_key_header)],
|
||||
):
|
||||
return await _authenticate_user(jwt_token, api_key)
|
||||
|
||||
|
||||
# --- WebSocket auth (same pattern as auth_jwt.py) ---
|
||||
def parse_ws_bearer_token(
|
||||
websocket: "WebSocket",
|
||||
) -> tuple[Optional[str], Optional[str]]:
|
||||
raw = websocket.headers.get("sec-websocket-protocol") or ""
|
||||
parts = [p.strip() for p in raw.split(",") if p.strip()]
|
||||
if len(parts) >= 2 and parts[0].lower() == "bearer":
|
||||
return parts[1], "bearer"
|
||||
return None, None
|
||||
|
||||
|
||||
async def current_user_ws_optional(websocket: "WebSocket") -> Optional[UserInfo]:
|
||||
token, _ = parse_ws_bearer_token(websocket)
|
||||
if not token:
|
||||
return None
|
||||
return await _authenticate_user(token, None)
|
||||
|
||||
|
||||
# --- Login router ---
|
||||
router = APIRouter(prefix="/auth", tags=["auth"])
|
||||
|
||||
|
||||
@router.post("/login", response_model=LoginResponse)
|
||||
async def login(request: Request, body: LoginRequest):
|
||||
client_ip = request.client.host if request.client else "unknown"
|
||||
if not _check_rate_limit(client_ip):
|
||||
raise HTTPException(
|
||||
status_code=429,
|
||||
detail="Too many login attempts. Try again later.",
|
||||
)
|
||||
|
||||
user = await user_controller.get_by_email(body.email)
|
||||
if not user or not user.password_hash:
|
||||
print("invalid email")
|
||||
raise HTTPException(status_code=401, detail="Invalid email or password")
|
||||
|
||||
if not verify_password(body.password, user.password_hash):
|
||||
print("invalid pass")
|
||||
raise HTTPException(status_code=401, detail="Invalid email or password")
|
||||
|
||||
access_token, expires_in = _create_access_token(user.id, user.email)
|
||||
return LoginResponse(
|
||||
access_token=access_token,
|
||||
token_type="bearer",
|
||||
expires_in=expires_in,
|
||||
)
|
||||
@@ -1,41 +0,0 @@
|
||||
"""Password hashing utilities using PBKDF2-SHA256 (stdlib only)."""
|
||||
|
||||
import hashlib
|
||||
import hmac
|
||||
import os
|
||||
|
||||
PBKDF2_ITERATIONS = 100_000
|
||||
SALT_LENGTH = 16 # bytes, hex-encoded to 32 chars
|
||||
|
||||
|
||||
def hash_password(password: str) -> str:
|
||||
"""Hash a password using PBKDF2-SHA256 with a random salt.
|
||||
|
||||
Format: pbkdf2:sha256:<iterations>$<salt_hex>$<hash_hex>
|
||||
"""
|
||||
salt = os.urandom(SALT_LENGTH).hex()
|
||||
dk = hashlib.pbkdf2_hmac(
|
||||
"sha256",
|
||||
password.encode("utf-8"),
|
||||
salt.encode("utf-8"),
|
||||
PBKDF2_ITERATIONS,
|
||||
)
|
||||
return f"pbkdf2:sha256:{PBKDF2_ITERATIONS}${salt}${dk.hex()}"
|
||||
|
||||
|
||||
def verify_password(password: str, password_hash: str) -> bool:
|
||||
"""Verify a password against its hash using constant-time comparison."""
|
||||
try:
|
||||
header, salt, stored_hash = password_hash.split("$", 2)
|
||||
_, algo, iterations_str = header.split(":")
|
||||
iterations = int(iterations_str)
|
||||
|
||||
dk = hashlib.pbkdf2_hmac(
|
||||
algo,
|
||||
password.encode("utf-8"),
|
||||
salt.encode("utf-8"),
|
||||
iterations,
|
||||
)
|
||||
return hmac.compare_digest(dk.hex(), stored_hash)
|
||||
except (ValueError, AttributeError):
|
||||
return False
|
||||
@@ -146,8 +146,6 @@ class DailyApiClient:
|
||||
)
|
||||
raise DailyApiError(operation, response)
|
||||
|
||||
if not response.content:
|
||||
return {}
|
||||
return response.json()
|
||||
|
||||
# ============================================================================
|
||||
|
||||
@@ -99,7 +99,7 @@ def extract_room_name(event: DailyWebhookEvent) -> str | None:
|
||||
>>> event = DailyWebhookEvent(**webhook_payload)
|
||||
>>> room_name = extract_room_name(event)
|
||||
"""
|
||||
room = event.payload.get("room_name") or event.payload.get("room")
|
||||
room = event.payload.get("room_name")
|
||||
# Ensure we return a string, not any falsy value that might be in payload
|
||||
return room if isinstance(room, str) else None
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
|
||||
from typing import Annotated, Any, Dict, Literal, Union
|
||||
|
||||
from pydantic import AliasChoices, BaseModel, ConfigDict, Field, field_validator
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
@@ -41,8 +41,6 @@ class DailyTrack(BaseModel):
|
||||
Reference: https://docs.daily.co/reference/rest-api/recordings
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
type: Literal["audio", "video"]
|
||||
s3Key: NonEmptyString = Field(description="S3 object key for the track file")
|
||||
size: int = Field(description="File size in bytes")
|
||||
@@ -56,8 +54,6 @@ class DailyWebhookEvent(BaseModel):
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
version: NonEmptyString = Field(
|
||||
description="Represents the version of the event. This uses semantic versioning to inform a consumer if the payload has introduced any breaking changes"
|
||||
)
|
||||
@@ -86,13 +82,7 @@ class ParticipantJoinedPayload(BaseModel):
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-joined
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
room_name: NonEmptyString | None = Field(
|
||||
None,
|
||||
description="Daily.co room name",
|
||||
validation_alias=AliasChoices("room_name", "room"),
|
||||
)
|
||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
||||
session_id: NonEmptyString = Field(description="Daily.co session identifier")
|
||||
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
|
||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
||||
@@ -110,13 +100,7 @@ class ParticipantLeftPayload(BaseModel):
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-left
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
room_name: NonEmptyString | None = Field(
|
||||
None,
|
||||
description="Daily.co room name",
|
||||
validation_alias=AliasChoices("room_name", "room"),
|
||||
)
|
||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
||||
session_id: NonEmptyString = Field(description="Daily.co session identifier")
|
||||
user_id: NonEmptyString = Field(description="User identifier (may be encoded)")
|
||||
user_name: NonEmptyString | None = Field(None, description="User display name")
|
||||
@@ -128,9 +112,6 @@ class ParticipantLeftPayload(BaseModel):
|
||||
_normalize_joined_at = field_validator("joined_at", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
_normalize_duration = field_validator("duration", mode="before")(
|
||||
normalize_timestamp_to_int
|
||||
)
|
||||
|
||||
|
||||
class RecordingStartedPayload(BaseModel):
|
||||
@@ -140,8 +121,6 @@ class RecordingStartedPayload(BaseModel):
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-started
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
room_name: NonEmptyString | None = Field(None, description="Daily.co room name")
|
||||
recording_id: NonEmptyString = Field(description="Recording identifier")
|
||||
start_ts: int | None = Field(None, description="Recording start timestamp")
|
||||
@@ -159,9 +138,7 @@ class RecordingReadyToDownloadPayload(BaseModel):
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-ready-to-download
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
type: Literal["cloud", "cloud-audio-only", "raw-tracks"] = Field(
|
||||
type: Literal["cloud", "raw-tracks"] = Field(
|
||||
description="The type of recording that was generated"
|
||||
)
|
||||
recording_id: NonEmptyString = Field(
|
||||
@@ -176,9 +153,8 @@ class RecordingReadyToDownloadPayload(BaseModel):
|
||||
status: Literal["finished"] = Field(
|
||||
description="The status of the given recording (always 'finished' in ready-to-download webhook, see RecordingStatus in responses.py for full API statuses)"
|
||||
)
|
||||
max_participants: int | None = Field(
|
||||
None,
|
||||
description="The number of participants on the call that were recorded (optional; Daily may omit it in some webhook versions)",
|
||||
max_participants: int = Field(
|
||||
description="The number of participants on the call that were recorded"
|
||||
)
|
||||
duration: int = Field(description="The duration in seconds of the call")
|
||||
s3_key: NonEmptyString = Field(
|
||||
@@ -204,8 +180,6 @@ class RecordingErrorPayload(BaseModel):
|
||||
Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-error
|
||||
"""
|
||||
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
action: Literal["clourd-recording-err", "cloud-recording-error"] = Field(
|
||||
description="A string describing the event that was emitted (both variants are documented)"
|
||||
)
|
||||
@@ -226,8 +200,6 @@ class RecordingErrorPayload(BaseModel):
|
||||
|
||||
|
||||
class ParticipantJoinedEvent(BaseModel):
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
version: NonEmptyString
|
||||
type: Literal["participant.joined"]
|
||||
id: NonEmptyString
|
||||
@@ -240,8 +212,6 @@ class ParticipantJoinedEvent(BaseModel):
|
||||
|
||||
|
||||
class ParticipantLeftEvent(BaseModel):
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
version: NonEmptyString
|
||||
type: Literal["participant.left"]
|
||||
id: NonEmptyString
|
||||
@@ -254,8 +224,6 @@ class ParticipantLeftEvent(BaseModel):
|
||||
|
||||
|
||||
class RecordingStartedEvent(BaseModel):
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
version: NonEmptyString
|
||||
type: Literal["recording.started"]
|
||||
id: NonEmptyString
|
||||
@@ -268,8 +236,6 @@ class RecordingStartedEvent(BaseModel):
|
||||
|
||||
|
||||
class RecordingReadyEvent(BaseModel):
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
version: NonEmptyString
|
||||
type: Literal["recording.ready-to-download"]
|
||||
id: NonEmptyString
|
||||
@@ -282,8 +248,6 @@ class RecordingReadyEvent(BaseModel):
|
||||
|
||||
|
||||
class RecordingErrorEvent(BaseModel):
|
||||
model_config = ConfigDict(extra="ignore")
|
||||
|
||||
version: NonEmptyString
|
||||
type: Literal["recording.error"]
|
||||
id: NonEmptyString
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Search functionality for transcripts and other entities."""
|
||||
|
||||
import itertools
|
||||
import json
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from io import StringIO
|
||||
@@ -26,7 +27,6 @@ from reflector.db.rooms import rooms
|
||||
from reflector.db.transcripts import SourceKind, TranscriptStatus, transcripts
|
||||
from reflector.db.utils import is_postgresql
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
from reflector.utils.string import NonEmptyString, try_parse_non_empty_string
|
||||
|
||||
DEFAULT_SEARCH_LIMIT = 20
|
||||
@@ -151,7 +151,6 @@ class SearchResultDB(BaseModel):
|
||||
title: str | None = None
|
||||
source_kind: SourceKind
|
||||
room_id: str | None = None
|
||||
change_seq: int | None = None
|
||||
rank: float = Field(..., ge=0, le=1)
|
||||
|
||||
|
||||
@@ -174,7 +173,9 @@ class SearchResult(BaseModel):
|
||||
total_match_count: NonNegativeInt = Field(
|
||||
default=0, description="Total number of matches found in the transcript"
|
||||
)
|
||||
change_seq: int | None = None
|
||||
dag_status: list[dict] | None = Field(
|
||||
default=None, description="Latest DAG task status for processing transcripts"
|
||||
)
|
||||
|
||||
@field_serializer("created_at", when_used="json")
|
||||
def serialize_datetime(self, dt: datetime) -> str:
|
||||
@@ -331,6 +332,42 @@ class SnippetGenerator:
|
||||
return summary_snippets + webvtt_snippets, total_matches
|
||||
|
||||
|
||||
async def _fetch_dag_statuses(transcript_ids: list[str]) -> dict[str, list[dict]]:
|
||||
"""Fetch latest DAG_STATUS event data for given transcript IDs.
|
||||
|
||||
Returns dict mapping transcript_id -> tasks list from the last DAG_STATUS event.
|
||||
"""
|
||||
if not transcript_ids:
|
||||
return {}
|
||||
|
||||
db = get_database()
|
||||
query = sqlalchemy.select(
|
||||
[
|
||||
transcripts.c.id,
|
||||
transcripts.c.events,
|
||||
]
|
||||
).where(transcripts.c.id.in_(transcript_ids))
|
||||
|
||||
rows = await db.fetch_all(query)
|
||||
result: dict[str, list[dict]] = {}
|
||||
|
||||
for row in rows:
|
||||
events_raw = row["events"]
|
||||
if not events_raw:
|
||||
continue
|
||||
# events is stored as JSON list
|
||||
events = events_raw if isinstance(events_raw, list) else json.loads(events_raw)
|
||||
# Find last DAG_STATUS event
|
||||
for ev in reversed(events):
|
||||
if isinstance(ev, dict) and ev.get("event") == "DAG_STATUS":
|
||||
tasks = ev.get("data", {}).get("tasks")
|
||||
if tasks:
|
||||
result[row["id"]] = tasks
|
||||
break
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class SearchController:
|
||||
"""Controller for search operations across different entities."""
|
||||
|
||||
@@ -358,7 +395,6 @@ class SearchController:
|
||||
transcripts.c.user_id,
|
||||
transcripts.c.room_id,
|
||||
transcripts.c.source_kind,
|
||||
transcripts.c.change_seq,
|
||||
transcripts.c.webvtt,
|
||||
transcripts.c.long_summary,
|
||||
sqlalchemy.case(
|
||||
@@ -400,7 +436,7 @@ class SearchController:
|
||||
transcripts.c.user_id == params.user_id, rooms.c.is_shared
|
||||
)
|
||||
)
|
||||
elif not settings.PUBLIC_MODE:
|
||||
else:
|
||||
base_query = base_query.where(rooms.c.is_shared)
|
||||
if params.room_id:
|
||||
base_query = base_query.where(transcripts.c.room_id == params.room_id)
|
||||
@@ -474,6 +510,14 @@ class SearchController:
|
||||
logger.error(f"Error processing search results: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
# Enrich processing transcripts with DAG status
|
||||
processing_ids = [r.id for r in results if r.status == "processing"]
|
||||
if processing_ids:
|
||||
dag_statuses = await _fetch_dag_statuses(processing_ids)
|
||||
for r in results:
|
||||
if r.id in dag_statuses:
|
||||
r.dag_status = dag_statuses[r.id]
|
||||
|
||||
return results, total
|
||||
|
||||
|
||||
|
||||
@@ -5,10 +5,7 @@ import shutil
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, Literal, Sequence
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from reflector.ws_events import TranscriptEventName
|
||||
from typing import Any, Literal, Sequence
|
||||
|
||||
import sqlalchemy
|
||||
from fastapi import HTTPException
|
||||
@@ -35,8 +32,6 @@ class SourceKind(enum.StrEnum):
|
||||
FILE = enum.auto()
|
||||
|
||||
|
||||
transcript_change_seq = sqlalchemy.Sequence("transcript_change_seq", metadata=metadata)
|
||||
|
||||
transcripts = sqlalchemy.Table(
|
||||
"transcript",
|
||||
metadata,
|
||||
@@ -91,12 +86,6 @@ transcripts = sqlalchemy.Table(
|
||||
sqlalchemy.Column("webvtt", sqlalchemy.Text),
|
||||
# Hatchet workflow run ID for resumption of failed workflows
|
||||
sqlalchemy.Column("workflow_run_id", sqlalchemy.String),
|
||||
sqlalchemy.Column(
|
||||
"change_seq",
|
||||
sqlalchemy.BigInteger,
|
||||
transcript_change_seq,
|
||||
server_default=transcript_change_seq.next_value(),
|
||||
),
|
||||
sqlalchemy.Index("idx_transcript_recording_id", "recording_id"),
|
||||
sqlalchemy.Index("idx_transcript_user_id", "user_id"),
|
||||
sqlalchemy.Index("idx_transcript_created_at", "created_at"),
|
||||
@@ -195,7 +184,7 @@ class TranscriptWaveform(BaseModel):
|
||||
|
||||
|
||||
class TranscriptEvent(BaseModel):
|
||||
event: str # Typed at call sites via ws_events.TranscriptEventName; str here for DB compat
|
||||
event: str
|
||||
data: dict
|
||||
|
||||
|
||||
@@ -237,7 +226,6 @@ class Transcript(BaseModel):
|
||||
audio_deleted: bool | None = None
|
||||
webvtt: str | None = None
|
||||
workflow_run_id: str | None = None # Hatchet workflow run ID for resumption
|
||||
change_seq: int | None = None
|
||||
|
||||
@field_serializer("created_at", when_used="json")
|
||||
def serialize_datetime(self, dt: datetime) -> str:
|
||||
@@ -245,10 +233,8 @@ class Transcript(BaseModel):
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
return dt.isoformat()
|
||||
|
||||
def add_event(
|
||||
self, event: "TranscriptEventName", data: BaseModel
|
||||
) -> TranscriptEvent:
|
||||
ev = TranscriptEvent(event=event, data=data.model_dump())
|
||||
def add_event(self, event: str, data: BaseModel) -> TranscriptEvent:
|
||||
ev = TranscriptEvent(event=event, data=data.model_dump(mode="json"))
|
||||
self.events.append(ev)
|
||||
return ev
|
||||
|
||||
@@ -390,7 +376,6 @@ class TranscriptController:
|
||||
source_kind: SourceKind | None = None,
|
||||
room_id: str | None = None,
|
||||
search_term: str | None = None,
|
||||
change_seq_from: int | None = None,
|
||||
return_query: bool = False,
|
||||
exclude_columns: list[str] = [
|
||||
"topics",
|
||||
@@ -411,7 +396,6 @@ class TranscriptController:
|
||||
- `filter_recording`: filter out transcripts that are currently recording
|
||||
- `room_id`: filter transcripts by room ID
|
||||
- `search_term`: filter transcripts by search term
|
||||
- `change_seq_from`: filter transcripts with change_seq > this value
|
||||
"""
|
||||
|
||||
query = transcripts.select().join(
|
||||
@@ -422,7 +406,7 @@ class TranscriptController:
|
||||
query = query.where(
|
||||
or_(transcripts.c.user_id == user_id, rooms.c.is_shared)
|
||||
)
|
||||
elif not settings.PUBLIC_MODE:
|
||||
else:
|
||||
query = query.where(rooms.c.is_shared)
|
||||
|
||||
if source_kind:
|
||||
@@ -434,9 +418,6 @@ class TranscriptController:
|
||||
if search_term:
|
||||
query = query.where(transcripts.c.title.ilike(f"%{search_term}%"))
|
||||
|
||||
if change_seq_from is not None:
|
||||
query = query.where(transcripts.c.change_seq > change_seq_from)
|
||||
|
||||
# Exclude heavy JSON columns from list queries
|
||||
transcript_columns = [
|
||||
col for col in transcripts.c if col.name not in exclude_columns
|
||||
@@ -450,10 +431,9 @@ class TranscriptController:
|
||||
)
|
||||
|
||||
if order_by is not None:
|
||||
field = getattr(transcripts.c, order_by[1:])
|
||||
if order_by.startswith("-"):
|
||||
field = getattr(transcripts.c, order_by[1:]).desc()
|
||||
else:
|
||||
field = getattr(transcripts.c, order_by)
|
||||
field = field.desc()
|
||||
query = query.order_by(field)
|
||||
|
||||
if filter_empty:
|
||||
@@ -708,7 +688,7 @@ class TranscriptController:
|
||||
async def append_event(
|
||||
self,
|
||||
transcript: Transcript,
|
||||
event: "TranscriptEventName",
|
||||
event: str,
|
||||
data: Any,
|
||||
) -> TranscriptEvent:
|
||||
"""
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""User table for storing user information."""
|
||||
"""User table for storing Authentik user information."""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
|
||||
@@ -15,7 +15,6 @@ users = sqlalchemy.Table(
|
||||
sqlalchemy.Column("id", sqlalchemy.String, primary_key=True),
|
||||
sqlalchemy.Column("email", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("authentik_uid", sqlalchemy.String, nullable=False),
|
||||
sqlalchemy.Column("password_hash", sqlalchemy.String, nullable=True),
|
||||
sqlalchemy.Column("created_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
||||
sqlalchemy.Column("updated_at", sqlalchemy.DateTime(timezone=True), nullable=False),
|
||||
sqlalchemy.Index("idx_user_authentik_uid", "authentik_uid", unique=True),
|
||||
@@ -27,7 +26,6 @@ class User(BaseModel):
|
||||
id: NonEmptyString = Field(default_factory=generate_uuid4)
|
||||
email: NonEmptyString
|
||||
authentik_uid: NonEmptyString
|
||||
password_hash: str | None = None
|
||||
created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
@@ -53,29 +51,22 @@ class UserController:
|
||||
|
||||
@staticmethod
|
||||
async def create_or_update(
|
||||
id: NonEmptyString,
|
||||
authentik_uid: NonEmptyString,
|
||||
email: NonEmptyString,
|
||||
password_hash: str | None = None,
|
||||
id: NonEmptyString, authentik_uid: NonEmptyString, email: NonEmptyString
|
||||
) -> User:
|
||||
existing = await UserController.get_by_authentik_uid(authentik_uid)
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
if existing:
|
||||
update_values: dict = {"email": email, "updated_at": now}
|
||||
if password_hash is not None:
|
||||
update_values["password_hash"] = password_hash
|
||||
query = (
|
||||
users.update()
|
||||
.where(users.c.authentik_uid == authentik_uid)
|
||||
.values(**update_values)
|
||||
.values(email=email, updated_at=now)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
return User(
|
||||
id=existing.id,
|
||||
authentik_uid=authentik_uid,
|
||||
email=email,
|
||||
password_hash=password_hash or existing.password_hash,
|
||||
created_at=existing.created_at,
|
||||
updated_at=now,
|
||||
)
|
||||
@@ -84,7 +75,6 @@ class UserController:
|
||||
id=id,
|
||||
authentik_uid=authentik_uid,
|
||||
email=email,
|
||||
password_hash=password_hash,
|
||||
created_at=now,
|
||||
updated_at=now,
|
||||
)
|
||||
@@ -92,16 +82,6 @@ class UserController:
|
||||
await get_database().execute(query)
|
||||
return user
|
||||
|
||||
@staticmethod
|
||||
async def set_password_hash(user_id: NonEmptyString, password_hash: str) -> None:
|
||||
now = datetime.now(timezone.utc)
|
||||
query = (
|
||||
users.update()
|
||||
.where(users.c.id == user_id)
|
||||
.values(password_hash=password_hash, updated_at=now)
|
||||
)
|
||||
await get_database().execute(query)
|
||||
|
||||
@staticmethod
|
||||
async def list_all() -> list[User]:
|
||||
query = users.select().order_by(users.c.created_at.desc())
|
||||
|
||||
@@ -12,11 +12,10 @@ import structlog
|
||||
|
||||
from reflector.db.transcripts import Transcript, TranscriptEvent, transcripts_controller
|
||||
from reflector.utils.string import NonEmptyString
|
||||
from reflector.ws_events import TranscriptEventName
|
||||
from reflector.ws_manager import get_ws_manager
|
||||
|
||||
# Events that should also be sent to user room (matches Celery behavior)
|
||||
USER_ROOM_EVENTS: set[TranscriptEventName] = {"STATUS", "FINAL_TITLE", "DURATION"}
|
||||
USER_ROOM_EVENTS = {"STATUS", "FINAL_TITLE", "DURATION", "DAG_STATUS"}
|
||||
|
||||
|
||||
async def broadcast_event(
|
||||
@@ -82,7 +81,8 @@ async def set_status_and_broadcast(
|
||||
async def append_event_and_broadcast(
|
||||
transcript_id: NonEmptyString,
|
||||
transcript: Transcript,
|
||||
event_name: TranscriptEventName,
|
||||
event_name: NonEmptyString,
|
||||
# TODO proper dictionary event => type
|
||||
data: Any,
|
||||
logger: structlog.BoundLogger,
|
||||
) -> TranscriptEvent:
|
||||
|
||||
@@ -12,9 +12,7 @@ import threading
|
||||
|
||||
from hatchet_sdk import ClientConfig, Hatchet
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
from hatchet_sdk.rate_limit import RateLimitDuration
|
||||
|
||||
from reflector.hatchet.constants import LLM_RATE_LIMIT_KEY, LLM_RATE_LIMIT_PER_SECOND
|
||||
from reflector.logger import logger
|
||||
from reflector.settings import settings
|
||||
|
||||
@@ -115,26 +113,3 @@ class HatchetClientManager:
|
||||
"""Reset the client instance (for testing)."""
|
||||
with cls._lock:
|
||||
cls._instance = None
|
||||
|
||||
@classmethod
|
||||
async def ensure_rate_limit(cls) -> None:
|
||||
"""Ensure the LLM rate limit exists in Hatchet.
|
||||
|
||||
Uses the Hatchet SDK rate_limits client (aio_put). See:
|
||||
https://docs.hatchet.run/sdks/python/feature-clients/rate_limits
|
||||
"""
|
||||
logger.info(
|
||||
"[Hatchet] Ensuring rate limit exists",
|
||||
rate_limit_key=LLM_RATE_LIMIT_KEY,
|
||||
limit=LLM_RATE_LIMIT_PER_SECOND,
|
||||
)
|
||||
client = cls.get_client()
|
||||
await client.rate_limits.aio_put(
|
||||
key=LLM_RATE_LIMIT_KEY,
|
||||
limit=LLM_RATE_LIMIT_PER_SECOND,
|
||||
duration=RateLimitDuration.SECOND,
|
||||
)
|
||||
logger.info(
|
||||
"[Hatchet] Rate limit put successfully",
|
||||
rate_limit_key=LLM_RATE_LIMIT_KEY,
|
||||
)
|
||||
|
||||
230
server/reflector/hatchet/dag_progress.py
Normal file
230
server/reflector/hatchet/dag_progress.py
Normal file
@@ -0,0 +1,230 @@
|
||||
"""
|
||||
DAG Progress Reporting — models and transform.
|
||||
|
||||
Converts Hatchet V1WorkflowRunDetails into structured DagTask list
|
||||
for frontend WebSocket/REST consumption.
|
||||
|
||||
Ported from render_hatchet_run.py (feat-dag-zulip) which renders markdown;
|
||||
this module produces structured Pydantic models instead.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import StrEnum
|
||||
|
||||
from hatchet_sdk.clients.rest.models import (
|
||||
V1TaskStatus,
|
||||
V1WorkflowRunDetails,
|
||||
WorkflowRunShapeItemForWorkflowRunDetails,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class DagTaskStatus(StrEnum):
|
||||
QUEUED = "queued"
|
||||
RUNNING = "running"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
CANCELLED = "cancelled"
|
||||
|
||||
|
||||
_HATCHET_TO_DAG_STATUS: dict[V1TaskStatus, DagTaskStatus] = {
|
||||
V1TaskStatus.QUEUED: DagTaskStatus.QUEUED,
|
||||
V1TaskStatus.RUNNING: DagTaskStatus.RUNNING,
|
||||
V1TaskStatus.COMPLETED: DagTaskStatus.COMPLETED,
|
||||
V1TaskStatus.FAILED: DagTaskStatus.FAILED,
|
||||
V1TaskStatus.CANCELLED: DagTaskStatus.CANCELLED,
|
||||
}
|
||||
|
||||
|
||||
class DagTask(BaseModel):
|
||||
name: str
|
||||
status: DagTaskStatus
|
||||
started_at: datetime | None
|
||||
finished_at: datetime | None
|
||||
duration_seconds: float | None
|
||||
parents: list[str]
|
||||
error: str | None
|
||||
children_total: int | None
|
||||
children_completed: int | None
|
||||
progress_pct: float | None
|
||||
|
||||
|
||||
class DagStatusData(BaseModel):
|
||||
workflow_run_id: str
|
||||
tasks: list[DagTask]
|
||||
|
||||
|
||||
def _topo_sort(
|
||||
shape: list[WorkflowRunShapeItemForWorkflowRunDetails],
|
||||
) -> list[str]:
|
||||
"""Topological sort of step_ids from shape DAG (Kahn's algorithm).
|
||||
|
||||
Ported from render_hatchet_run.py.
|
||||
"""
|
||||
step_ids = {s.step_id for s in shape}
|
||||
children_map: dict[str, list[str]] = {}
|
||||
in_degree: dict[str, int] = {sid: 0 for sid in step_ids}
|
||||
|
||||
for s in shape:
|
||||
children = [c for c in (s.children_step_ids or []) if c in step_ids]
|
||||
children_map[s.step_id] = children
|
||||
for c in children:
|
||||
in_degree[c] += 1
|
||||
|
||||
queue = sorted(sid for sid, deg in in_degree.items() if deg == 0)
|
||||
result: list[str] = []
|
||||
while queue:
|
||||
node = queue.pop(0)
|
||||
result.append(node)
|
||||
for c in children_map.get(node, []):
|
||||
in_degree[c] -= 1
|
||||
if in_degree[c] == 0:
|
||||
queue.append(c)
|
||||
queue.sort()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _extract_error_summary(error_message: str | None) -> str | None:
|
||||
"""Extract first meaningful line from error message, skipping traceback frames."""
|
||||
if not error_message or not error_message.strip():
|
||||
return None
|
||||
|
||||
err_lines = error_message.strip().split("\n")
|
||||
err_summary = err_lines[0]
|
||||
for line in err_lines:
|
||||
stripped = line.strip()
|
||||
if stripped and not stripped.startswith(("Traceback", "File ", "{", ")")):
|
||||
err_summary = stripped
|
||||
return err_summary
|
||||
|
||||
|
||||
def extract_dag_tasks(details: V1WorkflowRunDetails) -> list[DagTask]:
|
||||
"""Extract structured DagTask list from Hatchet workflow run details.
|
||||
|
||||
Returns tasks in topological order with status, timestamps, parents,
|
||||
error summaries, and fan-out children counts.
|
||||
"""
|
||||
shape = details.shape or []
|
||||
tasks = details.tasks or []
|
||||
|
||||
if not shape:
|
||||
return []
|
||||
|
||||
# Build lookups
|
||||
step_to_shape: dict[str, WorkflowRunShapeItemForWorkflowRunDetails] = {
|
||||
s.step_id: s for s in shape
|
||||
}
|
||||
step_to_name: dict[str, str] = {s.step_id: s.task_name for s in shape}
|
||||
|
||||
# Reverse edges: child -> parent names
|
||||
parents_by_step: dict[str, list[str]] = {s.step_id: [] for s in shape}
|
||||
for s in shape:
|
||||
for child_id in s.children_step_ids or []:
|
||||
if child_id in parents_by_step:
|
||||
parents_by_step[child_id].append(step_to_name[s.step_id])
|
||||
|
||||
# Join tasks by step_id
|
||||
from hatchet_sdk.clients.rest.models import V1TaskSummary # noqa: PLC0415
|
||||
|
||||
task_by_step: dict[str, V1TaskSummary] = {}
|
||||
for t in tasks:
|
||||
if t.step_id and t.step_id in step_to_name:
|
||||
task_by_step[t.step_id] = t
|
||||
|
||||
ordered = _topo_sort(shape)
|
||||
|
||||
result: list[DagTask] = []
|
||||
for step_id in ordered:
|
||||
name = step_to_name[step_id]
|
||||
t = task_by_step.get(step_id)
|
||||
|
||||
if not t:
|
||||
result.append(
|
||||
DagTask(
|
||||
name=name,
|
||||
status=DagTaskStatus.QUEUED,
|
||||
started_at=None,
|
||||
finished_at=None,
|
||||
duration_seconds=None,
|
||||
parents=parents_by_step.get(step_id, []),
|
||||
error=None,
|
||||
children_total=None,
|
||||
children_completed=None,
|
||||
progress_pct=None,
|
||||
)
|
||||
)
|
||||
continue
|
||||
|
||||
status = _HATCHET_TO_DAG_STATUS.get(t.status, DagTaskStatus.QUEUED)
|
||||
|
||||
duration_seconds: float | None = None
|
||||
if t.duration is not None:
|
||||
duration_seconds = t.duration / 1000.0
|
||||
|
||||
# Fan-out children
|
||||
children_total: int | None = None
|
||||
children_completed: int | None = None
|
||||
if t.num_spawned_children and t.num_spawned_children > 0:
|
||||
children_total = t.num_spawned_children
|
||||
children_completed = sum(
|
||||
1 for c in (t.children or []) if c.status == V1TaskStatus.COMPLETED
|
||||
)
|
||||
|
||||
result.append(
|
||||
DagTask(
|
||||
name=name,
|
||||
status=status,
|
||||
started_at=t.started_at,
|
||||
finished_at=t.finished_at,
|
||||
duration_seconds=duration_seconds,
|
||||
parents=parents_by_step.get(step_id, []),
|
||||
error=_extract_error_summary(t.error_message),
|
||||
children_total=children_total,
|
||||
children_completed=children_completed,
|
||||
progress_pct=None,
|
||||
)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def broadcast_dag_status(transcript_id: str, workflow_run_id: str) -> None:
|
||||
"""Fetch current DAG state from Hatchet and broadcast via WebSocket.
|
||||
|
||||
Fire-and-forget: exceptions are logged but never raised.
|
||||
All imports are deferred for fork-safety (Hatchet workers fork processes).
|
||||
"""
|
||||
try:
|
||||
from reflector.db.transcripts import transcripts_controller # noqa: I001, PLC0415
|
||||
from reflector.hatchet.broadcast import append_event_and_broadcast # noqa: PLC0415
|
||||
from reflector.hatchet.client import HatchetClientManager # noqa: PLC0415
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import ( # noqa: PLC0415
|
||||
fresh_db_connection,
|
||||
)
|
||||
from reflector.logger import logger # noqa: PLC0415
|
||||
|
||||
async with fresh_db_connection():
|
||||
client = HatchetClientManager.get_client()
|
||||
details = await client.runs.aio_get(workflow_run_id)
|
||||
dag_tasks = extract_dag_tasks(details)
|
||||
dag_status = DagStatusData(workflow_run_id=workflow_run_id, tasks=dag_tasks)
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if transcript:
|
||||
await append_event_and_broadcast(
|
||||
transcript_id,
|
||||
transcript,
|
||||
"DAG_STATUS",
|
||||
dag_status,
|
||||
logger,
|
||||
)
|
||||
except Exception:
|
||||
from reflector.logger import logger # noqa: PLC0415
|
||||
|
||||
logger.warning(
|
||||
"[DAG Progress] Failed to broadcast DAG status",
|
||||
transcript_id=transcript_id,
|
||||
workflow_run_id=workflow_run_id,
|
||||
exc_info=True,
|
||||
)
|
||||
@@ -3,8 +3,6 @@ LLM/I/O worker pool for all non-CPU tasks.
|
||||
Handles: all tasks except mixdown_tracks (transcription, LLM inference, orchestration)
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
daily_multitrack_pipeline,
|
||||
@@ -22,15 +20,6 @@ POOL = "llm-io"
|
||||
def main():
|
||||
hatchet = HatchetClientManager.get_client()
|
||||
|
||||
try:
|
||||
asyncio.run(HatchetClientManager.ensure_rate_limit())
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"[Hatchet] Rate limit initialization failed, but continuing. "
|
||||
"If workflows fail to register, rate limits may need to be created manually.",
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Starting Hatchet LLM worker pool (all tasks except mixdown)",
|
||||
worker_name=WORKER_NAME,
|
||||
|
||||
@@ -171,13 +171,11 @@ async def set_workflow_error_status(transcript_id: NonEmptyString) -> bool:
|
||||
|
||||
def _spawn_storage():
|
||||
"""Create fresh storage instance."""
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
return AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
)
|
||||
|
||||
|
||||
@@ -186,7 +184,10 @@ class Loggable(Protocol):
|
||||
|
||||
|
||||
def make_audio_progress_logger(
|
||||
ctx: Loggable, task_name: TaskName, interval: float = 5.0
|
||||
ctx: Loggable,
|
||||
task_name: TaskName,
|
||||
interval: float = 5.0,
|
||||
transcript_id: str | None = None,
|
||||
) -> Callable[[float | None, float], None]:
|
||||
"""Create a throttled progress logger callback for audio processing.
|
||||
|
||||
@@ -194,6 +195,7 @@ def make_audio_progress_logger(
|
||||
ctx: Object with .log() method (e.g., Hatchet Context).
|
||||
task_name: Name to prefix in log messages.
|
||||
interval: Minimum seconds between log messages.
|
||||
transcript_id: If provided, broadcasts transient DAG_TASK_PROGRESS events.
|
||||
|
||||
Returns:
|
||||
Callback(progress_pct, audio_position) that logs at most every `interval` seconds.
|
||||
@@ -215,6 +217,27 @@ def make_audio_progress_logger(
|
||||
)
|
||||
last_log_time[0] = now
|
||||
|
||||
if transcript_id and progress_pct is not None:
|
||||
try:
|
||||
import asyncio # noqa: PLC0415
|
||||
|
||||
from reflector.db.transcripts import TranscriptEvent # noqa: PLC0415
|
||||
from reflector.hatchet.broadcast import broadcast_event # noqa: PLC0415
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.create_task(
|
||||
broadcast_event(
|
||||
transcript_id,
|
||||
TranscriptEvent(
|
||||
event="DAG_TASK_PROGRESS",
|
||||
data={"task_name": task_name, "progress_pct": progress_pct},
|
||||
),
|
||||
logger=logger,
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
pass # transient, never fail the callback
|
||||
|
||||
return callback
|
||||
|
||||
|
||||
@@ -239,8 +262,15 @@ def with_error_handling(
|
||||
) -> Callable[[PipelineInput, Context], Coroutine[Any, Any, R]]:
|
||||
@functools.wraps(func)
|
||||
async def wrapper(input: PipelineInput, ctx: Context) -> R:
|
||||
from reflector.hatchet.dag_progress import broadcast_dag_status # noqa: I001, PLC0415
|
||||
|
||||
try:
|
||||
return await func(input, ctx)
|
||||
result = await func(input, ctx)
|
||||
try:
|
||||
await broadcast_dag_status(input.transcript_id, ctx.workflow_run_id)
|
||||
except Exception:
|
||||
pass
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"[Hatchet] {step_name} failed",
|
||||
@@ -248,6 +278,10 @@ def with_error_handling(
|
||||
error=str(e),
|
||||
exc_info=True,
|
||||
)
|
||||
try:
|
||||
await broadcast_dag_status(input.transcript_id, ctx.workflow_run_id)
|
||||
except Exception:
|
||||
pass
|
||||
if set_error_status:
|
||||
await set_workflow_error_status(input.transcript_id)
|
||||
raise
|
||||
@@ -562,7 +596,9 @@ async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
|
||||
target_sample_rate,
|
||||
offsets_seconds=None,
|
||||
logger=logger,
|
||||
progress_callback=make_audio_progress_logger(ctx, TaskName.MIXDOWN_TRACKS),
|
||||
progress_callback=make_audio_progress_logger(
|
||||
ctx, TaskName.MIXDOWN_TRACKS, transcript_id=input.transcript_id
|
||||
),
|
||||
expected_duration_sec=recording_duration if recording_duration > 0 else None,
|
||||
)
|
||||
await writer.flush()
|
||||
|
||||
@@ -49,13 +49,11 @@ async def pad_track(input: PaddingInput, ctx: Context) -> PadTrackResult:
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name=settings.TRANSCRIPT_STORAGE_AWS_BUCKET_NAME,
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
)
|
||||
|
||||
source_url = await storage.get_file_url(
|
||||
|
||||
@@ -71,7 +71,7 @@ async def detect_chunk_topic(input: TopicChunkInput, ctx: Context) -> TopicChunk
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.utils.text import clean_title # noqa: PLC0415
|
||||
|
||||
llm = LLM(settings=settings, temperature=0.9)
|
||||
llm = LLM(settings=settings, temperature=0.9, max_tokens=500)
|
||||
|
||||
prompt = TOPIC_PROMPT.format(text=input.chunk_text)
|
||||
response = await llm.get_structured_response(
|
||||
|
||||
@@ -60,7 +60,6 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
|
||||
try:
|
||||
# Create fresh storage instance to avoid aioboto3 fork issues
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
@@ -69,7 +68,6 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
)
|
||||
|
||||
source_url = await storage.get_file_url(
|
||||
@@ -161,7 +159,6 @@ async def transcribe_track(input: TrackInput, ctx: Context) -> TranscribeTrackRe
|
||||
raise ValueError("Missing padded_key from pad_track")
|
||||
|
||||
# Presign URL on demand (avoids stale URLs on workflow replay)
|
||||
# TODO: replace direct AwsStorage construction with get_transcripts_storage() factory
|
||||
from reflector.settings import settings # noqa: PLC0415
|
||||
from reflector.storage.storage_aws import AwsStorage # noqa: PLC0415
|
||||
|
||||
@@ -170,7 +167,6 @@ async def transcribe_track(input: TrackInput, ctx: Context) -> TranscribeTrackRe
|
||||
aws_region=settings.TRANSCRIPT_STORAGE_AWS_REGION,
|
||||
aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY,
|
||||
aws_endpoint_url=settings.TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL,
|
||||
)
|
||||
|
||||
audio_url = await storage.get_file_url(
|
||||
|
||||
@@ -1,23 +1,42 @@
|
||||
import logging
|
||||
from contextvars import ContextVar
|
||||
from typing import Type, TypeVar
|
||||
from typing import Generic, Type, TypeVar
|
||||
from uuid import uuid4
|
||||
|
||||
from llama_index.core import Settings
|
||||
from llama_index.core.prompts import PromptTemplate
|
||||
from llama_index.core.output_parsers import PydanticOutputParser
|
||||
from llama_index.core.response_synthesizers import TreeSummarize
|
||||
from llama_index.core.workflow import (
|
||||
Context,
|
||||
Event,
|
||||
StartEvent,
|
||||
StopEvent,
|
||||
Workflow,
|
||||
step,
|
||||
)
|
||||
from llama_index.llms.openai_like import OpenAILike
|
||||
from pydantic import BaseModel, ValidationError
|
||||
from workflows.errors import WorkflowTimeoutError
|
||||
|
||||
from reflector.utils.retry import retry
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
OutputT = TypeVar("OutputT", bound=BaseModel)
|
||||
|
||||
# Session ID for LiteLLM request grouping - set per processing run
|
||||
llm_session_id: ContextVar[str | None] = ContextVar("llm_session_id", default=None)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STRUCTURED_RESPONSE_PROMPT_TEMPLATE = """
|
||||
Based on the following analysis, provide the information in the requested JSON format:
|
||||
|
||||
Analysis:
|
||||
{analysis}
|
||||
|
||||
{format_instructions}
|
||||
"""
|
||||
|
||||
|
||||
class LLMParseError(Exception):
|
||||
"""Raised when LLM output cannot be parsed after retries."""
|
||||
@@ -31,10 +50,148 @@ class LLMParseError(Exception):
|
||||
)
|
||||
|
||||
|
||||
class LLM:
|
||||
class ExtractionDone(Event):
|
||||
"""Event emitted when LLM JSON formatting completes."""
|
||||
|
||||
output: str
|
||||
|
||||
|
||||
class ValidationErrorEvent(Event):
|
||||
"""Event emitted when validation fails."""
|
||||
|
||||
error: str
|
||||
wrong_output: str
|
||||
|
||||
|
||||
class StructuredOutputWorkflow(Workflow, Generic[OutputT]):
|
||||
"""Workflow for structured output extraction with validation retry.
|
||||
|
||||
This workflow handles parse/validation retries only. Network error retries
|
||||
are handled internally by Settings.llm (OpenAILike max_retries=3).
|
||||
The caller should NOT wrap this workflow in additional retry logic.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, settings, temperature: float = 0.4, max_tokens: int | None = None
|
||||
self,
|
||||
output_cls: Type[OutputT],
|
||||
max_retries: int = 3,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.output_cls: Type[OutputT] = output_cls
|
||||
self.max_retries = max_retries
|
||||
self.output_parser = PydanticOutputParser(output_cls)
|
||||
|
||||
@step
|
||||
async def extract(
|
||||
self, ctx: Context, ev: StartEvent | ValidationErrorEvent
|
||||
) -> StopEvent | ExtractionDone:
|
||||
"""Extract structured data from text using two-step LLM process.
|
||||
|
||||
Step 1 (first call only): TreeSummarize generates text analysis
|
||||
Step 2 (every call): Settings.llm.acomplete formats analysis as JSON
|
||||
"""
|
||||
current_retries = await ctx.store.get("retries", default=0)
|
||||
await ctx.store.set("retries", current_retries + 1)
|
||||
|
||||
if current_retries >= self.max_retries:
|
||||
last_error = await ctx.store.get("last_error", default=None)
|
||||
logger.error(
|
||||
f"Max retries ({self.max_retries}) reached for {self.output_cls.__name__}"
|
||||
)
|
||||
return StopEvent(result={"error": last_error, "attempts": current_retries})
|
||||
|
||||
if isinstance(ev, StartEvent):
|
||||
# First call: run TreeSummarize to get analysis, store in context
|
||||
prompt = ev.get("prompt")
|
||||
texts = ev.get("texts")
|
||||
tone_name = ev.get("tone_name")
|
||||
if not prompt or not isinstance(texts, list):
|
||||
raise ValueError(
|
||||
"StartEvent must contain 'prompt' (str) and 'texts' (list)"
|
||||
)
|
||||
|
||||
summarizer = TreeSummarize(verbose=False)
|
||||
analysis = await summarizer.aget_response(
|
||||
prompt, texts, tone_name=tone_name
|
||||
)
|
||||
await ctx.store.set("analysis", str(analysis))
|
||||
reflection = ""
|
||||
else:
|
||||
# Retry: reuse analysis from context
|
||||
analysis = await ctx.store.get("analysis")
|
||||
if not analysis:
|
||||
raise RuntimeError("Internal error: analysis not found in context")
|
||||
|
||||
wrong_output = ev.wrong_output
|
||||
if len(wrong_output) > 2000:
|
||||
wrong_output = wrong_output[:2000] + "... [truncated]"
|
||||
reflection = (
|
||||
f"\n\nYour previous response could not be parsed:\n{wrong_output}\n\n"
|
||||
f"Error:\n{ev.error}\n\n"
|
||||
"Please try again. Return ONLY valid JSON matching the schema above, "
|
||||
"with no markdown formatting or extra text."
|
||||
)
|
||||
|
||||
# Step 2: Format analysis as JSON using LLM completion
|
||||
format_instructions = self.output_parser.format(
|
||||
"Please structure the above information in the following JSON format:"
|
||||
)
|
||||
|
||||
json_prompt = STRUCTURED_RESPONSE_PROMPT_TEMPLATE.format(
|
||||
analysis=analysis,
|
||||
format_instructions=format_instructions + reflection,
|
||||
)
|
||||
|
||||
# Network retries handled by OpenAILike (max_retries=3)
|
||||
response = await Settings.llm.acomplete(json_prompt)
|
||||
return ExtractionDone(output=response.text)
|
||||
|
||||
@step
|
||||
async def validate(
|
||||
self, ctx: Context, ev: ExtractionDone
|
||||
) -> StopEvent | ValidationErrorEvent:
|
||||
"""Validate extracted output against Pydantic schema."""
|
||||
raw_output = ev.output
|
||||
retries = await ctx.store.get("retries", default=0)
|
||||
|
||||
try:
|
||||
parsed = self.output_parser.parse(raw_output)
|
||||
if retries > 1:
|
||||
logger.info(
|
||||
f"LLM parse succeeded on attempt {retries}/{self.max_retries} "
|
||||
f"for {self.output_cls.__name__}"
|
||||
)
|
||||
return StopEvent(result={"success": parsed})
|
||||
|
||||
except (ValidationError, ValueError) as e:
|
||||
error_msg = self._format_error(e, raw_output)
|
||||
await ctx.store.set("last_error", error_msg)
|
||||
|
||||
logger.error(
|
||||
f"LLM parse error (attempt {retries}/{self.max_retries}): "
|
||||
f"{type(e).__name__}: {e}\nRaw response: {raw_output[:500]}"
|
||||
)
|
||||
|
||||
return ValidationErrorEvent(
|
||||
error=error_msg,
|
||||
wrong_output=raw_output,
|
||||
)
|
||||
|
||||
def _format_error(self, error: Exception, raw_output: str) -> str:
|
||||
"""Format error for LLM feedback."""
|
||||
if isinstance(error, ValidationError):
|
||||
error_messages = []
|
||||
for err in error.errors():
|
||||
field = ".".join(str(loc) for loc in err["loc"])
|
||||
error_messages.append(f"- {err['msg']} in field '{field}'")
|
||||
return "Schema validation errors:\n" + "\n".join(error_messages)
|
||||
else:
|
||||
return f"Parse error: {str(error)}"
|
||||
|
||||
|
||||
class LLM:
|
||||
def __init__(self, settings, temperature: float = 0.4, max_tokens: int = 2048):
|
||||
self.settings_obj = settings
|
||||
self.model_name = settings.LLM_MODEL
|
||||
self.url = settings.LLM_URL
|
||||
@@ -55,10 +212,9 @@ class LLM:
|
||||
api_key=self.api_key,
|
||||
context_window=self.context_window,
|
||||
is_chat_model=True,
|
||||
is_function_calling_model=True,
|
||||
is_function_calling_model=False,
|
||||
temperature=self.temperature,
|
||||
max_tokens=self.max_tokens,
|
||||
timeout=self.settings_obj.LLM_REQUEST_TIMEOUT,
|
||||
additional_kwargs={"extra_body": {"litellm_session_id": session_id}},
|
||||
)
|
||||
|
||||
@@ -78,91 +234,36 @@ class LLM:
|
||||
tone_name: str | None = None,
|
||||
timeout: int | None = None,
|
||||
) -> T:
|
||||
"""Get structured output from LLM using tool-call with reflection retry.
|
||||
"""Get structured output from LLM with validation retry via Workflow."""
|
||||
if timeout is None:
|
||||
timeout = self.settings_obj.LLM_STRUCTURED_RESPONSE_TIMEOUT
|
||||
|
||||
Uses astructured_predict (function-calling / tool-call mode) for the
|
||||
first attempt. On ValidationError or parse failure the wrong output
|
||||
and error are fed back as a reflection prompt and the call is retried
|
||||
up to LLM_PARSE_MAX_RETRIES times.
|
||||
|
||||
The outer retry() wrapper handles transient network errors with
|
||||
exponential back-off.
|
||||
"""
|
||||
max_retries = self.settings_obj.LLM_PARSE_MAX_RETRIES
|
||||
|
||||
async def _call_with_reflection():
|
||||
# Build full prompt: instruction + source texts
|
||||
if texts:
|
||||
texts_block = "\n\n".join(texts)
|
||||
full_prompt = f"{prompt}\n\n{texts_block}"
|
||||
else:
|
||||
full_prompt = prompt
|
||||
|
||||
prompt_tmpl = PromptTemplate("{user_prompt}")
|
||||
last_error: str | None = None
|
||||
|
||||
for attempt in range(1, max_retries + 2): # +2: first try + retries
|
||||
try:
|
||||
if attempt == 1:
|
||||
result = await Settings.llm.astructured_predict(
|
||||
output_cls, prompt_tmpl, user_prompt=full_prompt
|
||||
)
|
||||
else:
|
||||
reflection_tmpl = PromptTemplate(
|
||||
"{user_prompt}\n\n{reflection}"
|
||||
)
|
||||
result = await Settings.llm.astructured_predict(
|
||||
output_cls,
|
||||
reflection_tmpl,
|
||||
user_prompt=full_prompt,
|
||||
reflection=reflection,
|
||||
)
|
||||
|
||||
if attempt > 1:
|
||||
logger.info(
|
||||
f"LLM structured_predict succeeded on attempt "
|
||||
f"{attempt}/{max_retries + 1} for {output_cls.__name__}"
|
||||
)
|
||||
return result
|
||||
|
||||
except (ValidationError, ValueError) as e:
|
||||
wrong_output = str(e)
|
||||
if len(wrong_output) > 2000:
|
||||
wrong_output = wrong_output[:2000] + "... [truncated]"
|
||||
|
||||
last_error = self._format_validation_error(e)
|
||||
reflection = (
|
||||
f"Your previous response could not be parsed.\n\n"
|
||||
f"Error:\n{last_error}\n\n"
|
||||
"Please try again and return valid data matching the schema."
|
||||
)
|
||||
|
||||
logger.error(
|
||||
f"LLM parse error (attempt {attempt}/{max_retries + 1}): "
|
||||
f"{type(e).__name__}: {e}\n"
|
||||
f"Raw response: {wrong_output[:500]}"
|
||||
)
|
||||
|
||||
raise LLMParseError(
|
||||
async def run_workflow():
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=output_cls,
|
||||
error_msg=last_error or "Max retries exceeded",
|
||||
attempts=max_retries + 1,
|
||||
max_retries=self.settings_obj.LLM_PARSE_MAX_RETRIES + 1,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return await retry(_call_with_reflection)(
|
||||
result = await workflow.run(
|
||||
prompt=prompt,
|
||||
texts=texts,
|
||||
tone_name=tone_name,
|
||||
)
|
||||
|
||||
if "error" in result:
|
||||
error_msg = result["error"] or "Max retries exceeded"
|
||||
raise LLMParseError(
|
||||
output_cls=output_cls,
|
||||
error_msg=error_msg,
|
||||
attempts=result.get("attempts", 0),
|
||||
)
|
||||
|
||||
return result["success"]
|
||||
|
||||
return await retry(run_workflow)(
|
||||
retry_attempts=3,
|
||||
retry_backoff_interval=1.0,
|
||||
retry_backoff_max=30.0,
|
||||
retry_ignore_exc_types=(ConnectionError, TimeoutError, OSError),
|
||||
retry_ignore_exc_types=(WorkflowTimeoutError,),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _format_validation_error(error: Exception) -> str:
|
||||
"""Format a validation/parse error for LLM reflection feedback."""
|
||||
if isinstance(error, ValidationError):
|
||||
error_messages = []
|
||||
for err in error.errors():
|
||||
field = ".".join(str(loc) for loc in err["loc"])
|
||||
error_messages.append(f"- {err['msg']} in field '{field}'")
|
||||
return "Schema validation errors:\n" + "\n".join(error_messages)
|
||||
return f"Parse error: {str(error)}"
|
||||
|
||||
@@ -62,8 +62,6 @@ from reflector.processors.types import (
|
||||
from reflector.processors.types import Transcript as TranscriptProcessorType
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.views.transcripts import GetTranscriptTopic
|
||||
from reflector.ws_events import TranscriptEventName
|
||||
from reflector.ws_manager import WebsocketManager, get_ws_manager
|
||||
from reflector.zulip import (
|
||||
get_zulip_message,
|
||||
@@ -91,11 +89,7 @@ def broadcast_to_sockets(func):
|
||||
if transcript and transcript.user_id:
|
||||
# Emit only relevant events to the user room to avoid noisy updates.
|
||||
# Allowed: STATUS, FINAL_TITLE, DURATION. All are prefixed with TRANSCRIPT_
|
||||
allowed_user_events: set[TranscriptEventName] = {
|
||||
"STATUS",
|
||||
"FINAL_TITLE",
|
||||
"DURATION",
|
||||
}
|
||||
allowed_user_events = {"STATUS", "FINAL_TITLE", "DURATION"}
|
||||
if resp.event in allowed_user_events:
|
||||
await self.ws_manager.send_json(
|
||||
room_id=f"user:{transcript.user_id}",
|
||||
@@ -250,14 +244,13 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage]
|
||||
)
|
||||
if isinstance(data, TitleSummaryWithIdProcessorType):
|
||||
topic.id = data.id
|
||||
get_topic = GetTranscriptTopic.from_transcript_topic(topic)
|
||||
async with self.transaction():
|
||||
transcript = await self.get_transcript()
|
||||
await transcripts_controller.upsert_topic(transcript, topic)
|
||||
return await transcripts_controller.append_event(
|
||||
transcript=transcript,
|
||||
event="TOPIC",
|
||||
data=get_topic,
|
||||
data=topic,
|
||||
)
|
||||
|
||||
@broadcast_to_sockets
|
||||
|
||||
74
server/reflector/processors/audio_diarization_pyannote.py
Normal file
74
server/reflector/processors/audio_diarization_pyannote.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import os
|
||||
|
||||
import torch
|
||||
import torchaudio
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
from reflector.processors.audio_diarization import AudioDiarizationProcessor
|
||||
from reflector.processors.audio_diarization_auto import AudioDiarizationAutoProcessor
|
||||
from reflector.processors.types import AudioDiarizationInput, DiarizationSegment
|
||||
|
||||
|
||||
class AudioDiarizationPyannoteProcessor(AudioDiarizationProcessor):
|
||||
"""Local diarization processor using pyannote.audio library"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str = "pyannote/speaker-diarization-3.1",
|
||||
pyannote_auth_token: str | None = None,
|
||||
device: str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.model_name = model_name
|
||||
self.auth_token = pyannote_auth_token or os.environ.get("HF_TOKEN")
|
||||
self.device = device
|
||||
|
||||
if device is None:
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
self.logger.info(f"Loading pyannote diarization model: {self.model_name}")
|
||||
self.diarization_pipeline = Pipeline.from_pretrained(
|
||||
self.model_name, use_auth_token=self.auth_token
|
||||
)
|
||||
self.diarization_pipeline.to(torch.device(self.device))
|
||||
self.logger.info(f"Diarization model loaded on device: {self.device}")
|
||||
|
||||
async def _diarize(self, data: AudioDiarizationInput) -> list[DiarizationSegment]:
|
||||
try:
|
||||
# Load audio file (audio_url is assumed to be a local file path)
|
||||
self.logger.info(f"Loading local audio file: {data.audio_url}")
|
||||
waveform, sample_rate = torchaudio.load(data.audio_url)
|
||||
audio_input = {"waveform": waveform, "sample_rate": sample_rate}
|
||||
self.logger.info("Running speaker diarization")
|
||||
diarization = self.diarization_pipeline(audio_input)
|
||||
|
||||
# Convert pyannote diarization output to our format
|
||||
segments = []
|
||||
for segment, _, speaker in diarization.itertracks(yield_label=True):
|
||||
# Extract speaker number from label (e.g., "SPEAKER_00" -> 0)
|
||||
speaker_id = 0
|
||||
if speaker.startswith("SPEAKER_"):
|
||||
try:
|
||||
speaker_id = int(speaker.split("_")[-1])
|
||||
except (ValueError, IndexError):
|
||||
# Fallback to hash-based ID if parsing fails
|
||||
speaker_id = hash(speaker) % 1000
|
||||
|
||||
segments.append(
|
||||
{
|
||||
"start": round(segment.start, 3),
|
||||
"end": round(segment.end, 3),
|
||||
"speaker": speaker_id,
|
||||
}
|
||||
)
|
||||
|
||||
self.logger.info(f"Diarization completed with {len(segments)} segments")
|
||||
return segments
|
||||
|
||||
except Exception as e:
|
||||
self.logger.exception(f"Diarization failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
AudioDiarizationAutoProcessor.register("pyannote", AudioDiarizationPyannoteProcessor)
|
||||
@@ -39,7 +39,7 @@ class TranscriptFinalTitleProcessor(Processor):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.chunks: list[TitleSummary] = []
|
||||
self.llm = LLM(settings=settings, temperature=0.5)
|
||||
self.llm = LLM(settings=settings, temperature=0.5, max_tokens=200)
|
||||
|
||||
async def _push(self, data: TitleSummary):
|
||||
self.chunks.append(data)
|
||||
|
||||
@@ -14,12 +14,10 @@ class TopicResponse(BaseModel):
|
||||
title: str = Field(
|
||||
description="A descriptive title for the topic being discussed",
|
||||
validation_alias=AliasChoices("title", "Title"),
|
||||
min_length=8,
|
||||
)
|
||||
summary: str = Field(
|
||||
description="A concise 1-2 sentence summary of the discussion",
|
||||
validation_alias=AliasChoices("summary", "Summary"),
|
||||
min_length=8,
|
||||
)
|
||||
|
||||
|
||||
@@ -37,7 +35,7 @@ class TranscriptTopicDetectorProcessor(Processor):
|
||||
super().__init__(**kwargs)
|
||||
self.transcript = None
|
||||
self.min_transcript_length = min_transcript_length
|
||||
self.llm = LLM(settings=settings, temperature=0.9)
|
||||
self.llm = LLM(settings=settings, temperature=0.9, max_tokens=500)
|
||||
|
||||
async def _push(self, data: Transcript):
|
||||
if self.transcript is None:
|
||||
|
||||
@@ -97,11 +97,8 @@ async def validate_transcript_for_processing(
|
||||
if transcript.locked:
|
||||
return ValidationLocked(detail="Recording is locked")
|
||||
|
||||
if (
|
||||
transcript.status == "idle"
|
||||
and not transcript.workflow_run_id
|
||||
and not transcript.recording_id
|
||||
):
|
||||
# Check if recording is ready for processing
|
||||
if transcript.status == "idle" and not transcript.workflow_run_id:
|
||||
return ValidationNotReady(detail="Recording is not ready for processing")
|
||||
|
||||
# Check Celery tasks
|
||||
@@ -270,6 +267,19 @@ async def dispatch_transcript_processing(
|
||||
)
|
||||
|
||||
logger.info("Hatchet workflow dispatched", workflow_id=workflow_id)
|
||||
|
||||
try:
|
||||
from reflector.hatchet.dag_progress import broadcast_dag_status # noqa: I001, PLC0415
|
||||
|
||||
await broadcast_dag_status(config.transcript_id, workflow_id)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"[DAG Progress] Failed initial broadcast",
|
||||
transcript_id=config.transcript_id,
|
||||
workflow_id=workflow_id,
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
elif isinstance(config, FileProcessingConfig):
|
||||
|
||||
@@ -12,17 +12,6 @@ class Settings(BaseSettings):
|
||||
extra="ignore",
|
||||
)
|
||||
|
||||
ROOT_PATH: str = "/"
|
||||
|
||||
# WebRTC port range for ICE candidates (e.g. "50000-50100").
|
||||
# When set, monkey-patches aioice to bind UDP sockets within this range,
|
||||
# allowing Docker port mapping instead of network_mode: host.
|
||||
WEBRTC_PORT_RANGE: str | None = None
|
||||
# Host IP or hostname to advertise in ICE candidates instead of the
|
||||
# container's internal IP. Use "host.docker.internal" in Docker with
|
||||
# extra_hosts, or a specific LAN IP. Resolved at connection time.
|
||||
WEBRTC_HOST: str | None = None
|
||||
|
||||
# CORS
|
||||
UI_BASE_URL: str = "http://localhost:3000"
|
||||
CORS_ORIGIN: str = "*"
|
||||
@@ -60,7 +49,6 @@ class Settings(BaseSettings):
|
||||
TRANSCRIPT_STORAGE_AWS_REGION: str = "us-east-1"
|
||||
TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID: str | None = None
|
||||
TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None
|
||||
TRANSCRIPT_STORAGE_AWS_ENDPOINT_URL: str | None = None
|
||||
|
||||
# Platform-specific recording storage (follows {PREFIX}_STORAGE_AWS_{CREDENTIAL} pattern)
|
||||
# Whereby storage configuration
|
||||
@@ -87,7 +75,6 @@ class Settings(BaseSettings):
|
||||
LLM_URL: str | None = None
|
||||
LLM_API_KEY: str | None = None
|
||||
LLM_CONTEXT_WINDOW: int = 16000
|
||||
LLM_REQUEST_TIMEOUT: float = 300.0 # HTTP request timeout for LLM calls (seconds)
|
||||
|
||||
LLM_PARSE_MAX_RETRIES: int = (
|
||||
3 # Max retries for JSON/validation errors (total attempts = retries + 1)
|
||||
@@ -97,7 +84,9 @@ class Settings(BaseSettings):
|
||||
)
|
||||
|
||||
# Diarization
|
||||
# backend: modal — HTTP API client (works with Modal.com OR self-hosted gpu/self_hosted/)
|
||||
# backends:
|
||||
# - pyannote: in-process model loading (no HTTP, runs in same process)
|
||||
# - modal: HTTP API client (works with Modal.com OR self-hosted gpu/self_hosted/)
|
||||
DIARIZATION_ENABLED: bool = True
|
||||
DIARIZATION_BACKEND: str = "modal"
|
||||
DIARIZATION_URL: str | None = None
|
||||
@@ -106,6 +95,9 @@ class Settings(BaseSettings):
|
||||
# Diarization: modal backend
|
||||
DIARIZATION_MODAL_API_KEY: str | None = None
|
||||
|
||||
# Diarization: local pyannote.audio
|
||||
DIARIZATION_PYANNOTE_AUTH_TOKEN: str | None = None
|
||||
|
||||
# Audio Padding (Modal.com backend)
|
||||
PADDING_URL: str | None = None
|
||||
PADDING_MODAL_API_KEY: str | None = None
|
||||
@@ -113,7 +105,7 @@ class Settings(BaseSettings):
|
||||
# Sentry
|
||||
SENTRY_DSN: str | None = None
|
||||
|
||||
# User authentication (none, jwt, password)
|
||||
# User authentication (none, jwt)
|
||||
AUTH_BACKEND: str = "none"
|
||||
|
||||
# User authentication using JWT
|
||||
@@ -121,10 +113,6 @@ class Settings(BaseSettings):
|
||||
AUTH_JWT_PUBLIC_KEY: str | None = "authentik.monadical.com_public.pem"
|
||||
AUTH_JWT_AUDIENCE: str | None = None
|
||||
|
||||
# User authentication using password (selfhosted)
|
||||
ADMIN_EMAIL: str | None = None
|
||||
ADMIN_PASSWORD_HASH: str | None = None
|
||||
|
||||
PUBLIC_MODE: bool = False
|
||||
PUBLIC_DATA_RETENTION_DAYS: PositiveInt = 7
|
||||
|
||||
@@ -158,9 +146,6 @@ class Settings(BaseSettings):
|
||||
WHEREBY_WEBHOOK_SECRET: str | None = None
|
||||
AWS_PROCESS_RECORDING_QUEUE_URL: str | None = None
|
||||
SQS_POLLING_TIMEOUT_SECONDS: int = 60
|
||||
CELERY_BEAT_POLL_INTERVAL: int = (
|
||||
0 # 0 = use individual defaults; set e.g. 300 for 5-min polling
|
||||
)
|
||||
|
||||
# Daily.co integration
|
||||
DAILY_API_KEY: str | None = None
|
||||
|
||||
@@ -53,7 +53,6 @@ class AwsStorage(Storage):
|
||||
aws_access_key_id: str | None = None,
|
||||
aws_secret_access_key: str | None = None,
|
||||
aws_role_arn: str | None = None,
|
||||
aws_endpoint_url: str | None = None,
|
||||
):
|
||||
if not aws_bucket_name:
|
||||
raise ValueError("Storage `aws_storage` require `aws_bucket_name`")
|
||||
@@ -74,26 +73,17 @@ class AwsStorage(Storage):
|
||||
self._access_key_id = aws_access_key_id
|
||||
self._secret_access_key = aws_secret_access_key
|
||||
self._role_arn = aws_role_arn
|
||||
self._endpoint_url = aws_endpoint_url
|
||||
|
||||
self.aws_folder = ""
|
||||
if "/" in aws_bucket_name:
|
||||
self._bucket_name, self.aws_folder = aws_bucket_name.split("/", 1)
|
||||
|
||||
config_kwargs: dict = {"retries": {"max_attempts": 3, "mode": "adaptive"}}
|
||||
if aws_endpoint_url:
|
||||
config_kwargs["s3"] = {"addressing_style": "path"}
|
||||
self.boto_config = Config(**config_kwargs)
|
||||
|
||||
self.boto_config = Config(retries={"max_attempts": 3, "mode": "adaptive"})
|
||||
self.session = aioboto3.Session(
|
||||
aws_access_key_id=aws_access_key_id,
|
||||
aws_secret_access_key=aws_secret_access_key,
|
||||
region_name=aws_region,
|
||||
)
|
||||
if aws_endpoint_url:
|
||||
self.base_url = f"{aws_endpoint_url}/{self._bucket_name}/"
|
||||
else:
|
||||
self.base_url = f"https://{self._bucket_name}.s3.amazonaws.com/"
|
||||
self.base_url = f"https://{self._bucket_name}.s3.amazonaws.com/"
|
||||
|
||||
# Implement credential properties
|
||||
@property
|
||||
@@ -149,9 +139,7 @@ class AwsStorage(Storage):
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
logger.info(f"Uploading {filename} to S3 {actual_bucket}/{folder}")
|
||||
|
||||
async with self.session.client(
|
||||
"s3", config=self.boto_config, endpoint_url=self._endpoint_url
|
||||
) as client:
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
if isinstance(data, bytes):
|
||||
await client.put_object(Bucket=actual_bucket, Key=s3filename, Body=data)
|
||||
else:
|
||||
@@ -174,9 +162,7 @@ class AwsStorage(Storage):
|
||||
actual_bucket = bucket or self._bucket_name
|
||||
folder = self.aws_folder
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client(
|
||||
"s3", config=self.boto_config, endpoint_url=self._endpoint_url
|
||||
) as client:
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
presigned_url = await client.generate_presigned_url(
|
||||
operation,
|
||||
Params={"Bucket": actual_bucket, "Key": s3filename},
|
||||
@@ -191,9 +177,7 @@ class AwsStorage(Storage):
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Deleting {filename} from S3 {actual_bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client(
|
||||
"s3", config=self.boto_config, endpoint_url=self._endpoint_url
|
||||
) as client:
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
await client.delete_object(Bucket=actual_bucket, Key=s3filename)
|
||||
|
||||
@handle_s3_client_errors("download")
|
||||
@@ -202,9 +186,7 @@ class AwsStorage(Storage):
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Downloading {filename} from S3 {actual_bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client(
|
||||
"s3", config=self.boto_config, endpoint_url=self._endpoint_url
|
||||
) as client:
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
response = await client.get_object(Bucket=actual_bucket, Key=s3filename)
|
||||
return await response["Body"].read()
|
||||
|
||||
@@ -219,9 +201,7 @@ class AwsStorage(Storage):
|
||||
logger.info(f"Listing objects from S3 {actual_bucket} with prefix '{s3prefix}'")
|
||||
|
||||
keys = []
|
||||
async with self.session.client(
|
||||
"s3", config=self.boto_config, endpoint_url=self._endpoint_url
|
||||
) as client:
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
async for page in paginator.paginate(Bucket=actual_bucket, Prefix=s3prefix):
|
||||
if "Contents" in page:
|
||||
@@ -247,9 +227,7 @@ class AwsStorage(Storage):
|
||||
folder = self.aws_folder
|
||||
logger.info(f"Streaming {filename} from S3 {actual_bucket}/{folder}")
|
||||
s3filename = f"{folder}/{filename}" if folder else filename
|
||||
async with self.session.client(
|
||||
"s3", config=self.boto_config, endpoint_url=self._endpoint_url
|
||||
) as client:
|
||||
async with self.session.client("s3", config=self.boto_config) as client:
|
||||
await client.download_fileobj(
|
||||
Bucket=actual_bucket, Key=s3filename, Fileobj=fileobj
|
||||
)
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
"""Create or update an admin user with password authentication.
|
||||
|
||||
Usage:
|
||||
uv run python -m reflector.tools.create_admin --email admin@localhost --password <pass>
|
||||
uv run python -m reflector.tools.create_admin --email admin@localhost # prompts for password
|
||||
uv run python -m reflector.tools.create_admin --hash-only --password <pass> # print hash only
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import getpass
|
||||
import sys
|
||||
|
||||
from reflector.auth.password_utils import hash_password
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
|
||||
async def create_admin(email: str, password: str) -> None:
|
||||
from reflector.db import get_database
|
||||
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
|
||||
try:
|
||||
password_hash = hash_password(password)
|
||||
|
||||
existing = await user_controller.get_by_email(email)
|
||||
if existing:
|
||||
await user_controller.set_password_hash(existing.id, password_hash)
|
||||
print(f"Updated password for existing user: {email} (id={existing.id})")
|
||||
else:
|
||||
user = await user_controller.create_or_update(
|
||||
id=generate_uuid4(),
|
||||
authentik_uid=f"local:{email}",
|
||||
email=email,
|
||||
password_hash=password_hash,
|
||||
)
|
||||
print(f"Created admin user: {email} (id={user.id})")
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Create or update an admin user")
|
||||
parser.add_argument(
|
||||
"--email", default="admin@localhost", help="Admin email address"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--password",
|
||||
help="Admin password (will prompt if not provided)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--hash-only",
|
||||
action="store_true",
|
||||
help="Print the password hash and exit (for ADMIN_PASSWORD_HASH env var)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
password = args.password
|
||||
if not password:
|
||||
password = getpass.getpass("Password: ")
|
||||
confirm = getpass.getpass("Confirm password: ")
|
||||
if password != confirm:
|
||||
print("Passwords do not match", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if not password:
|
||||
print("Password cannot be empty", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if args.hash_only:
|
||||
print(hash_password(password))
|
||||
sys.exit(0)
|
||||
|
||||
asyncio.run(create_admin(args.email, password))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -24,9 +24,6 @@ from reflector.pipelines.main_live_pipeline import (
|
||||
pipeline_process as live_pipeline_process,
|
||||
)
|
||||
from reflector.storage import Storage
|
||||
from reflector.worker.app import (
|
||||
app as celery_app, # noqa: F401 - ensure Celery uses Redis broker
|
||||
)
|
||||
|
||||
|
||||
def validate_s3_bucket_name(bucket: str) -> None:
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
"""Provision admin user on server startup using environment variables.
|
||||
|
||||
Reads ADMIN_EMAIL and ADMIN_PASSWORD_HASH from settings and creates or updates
|
||||
the admin user. Intended to be called from runserver.sh on container startup.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.settings import settings
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
|
||||
async def provision() -> None:
|
||||
if not settings.ADMIN_EMAIL or not settings.ADMIN_PASSWORD_HASH:
|
||||
return
|
||||
|
||||
from reflector.db import get_database
|
||||
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
|
||||
try:
|
||||
existing = await user_controller.get_by_email(settings.ADMIN_EMAIL)
|
||||
if existing:
|
||||
await user_controller.set_password_hash(
|
||||
existing.id, settings.ADMIN_PASSWORD_HASH
|
||||
)
|
||||
print(f"Updated admin user: {settings.ADMIN_EMAIL}")
|
||||
else:
|
||||
await user_controller.create_or_update(
|
||||
id=generate_uuid4(),
|
||||
authentik_uid=f"local:{settings.ADMIN_EMAIL}",
|
||||
email=settings.ADMIN_EMAIL,
|
||||
password_hash=settings.ADMIN_PASSWORD_HASH,
|
||||
)
|
||||
print(f"Created admin user: {settings.ADMIN_EMAIL}")
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(provision())
|
||||
@@ -80,14 +80,7 @@ async def webhook(request: Request):
|
||||
try:
|
||||
event = event_adapter.validate_python(body_json)
|
||||
except Exception as e:
|
||||
err_detail = str(e)
|
||||
if hasattr(e, "errors"):
|
||||
err_detail = f"{err_detail}; errors={e.errors()!r}"
|
||||
logger.error(
|
||||
"Failed to parse webhook event",
|
||||
error=err_detail,
|
||||
body=body.decode(),
|
||||
)
|
||||
logger.error("Failed to parse webhook event", error=str(e), body=body.decode())
|
||||
raise HTTPException(status_code=422, detail="Invalid event format")
|
||||
|
||||
match event:
|
||||
|
||||
@@ -10,7 +10,6 @@ from pydantic import BaseModel
|
||||
from reflector.events import subscribers_shutdown
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines.runner import PipelineRunner
|
||||
from reflector.settings import settings
|
||||
|
||||
sessions = []
|
||||
router = APIRouter()
|
||||
@@ -124,16 +123,7 @@ async def rtc_offer_base(
|
||||
# update metrics
|
||||
m_rtc_sessions.inc()
|
||||
|
||||
sdp = pc.localDescription.sdp
|
||||
|
||||
# Rewrite ICE candidate IPs when running behind Docker bridge networking
|
||||
if settings.WEBRTC_HOST:
|
||||
from reflector.webrtc_ports import resolve_webrtc_host, rewrite_sdp_host
|
||||
|
||||
host_ip = resolve_webrtc_host(settings.WEBRTC_HOST)
|
||||
sdp = rewrite_sdp_host(sdp, host_ip)
|
||||
|
||||
return RtcOffer(sdp=sdp, type=pc.localDescription.type)
|
||||
return RtcOffer(sdp=pc.localDescription.sdp, type=pc.localDescription.type)
|
||||
|
||||
|
||||
@subscribers_shutdown.append
|
||||
|
||||
@@ -111,7 +111,7 @@ class GetTranscriptMinimal(BaseModel):
|
||||
room_id: str | None = None
|
||||
room_name: str | None = None
|
||||
audio_deleted: bool | None = None
|
||||
change_seq: int | None = None
|
||||
dag_status: list[dict] | None = None
|
||||
|
||||
|
||||
class TranscriptParticipantWithEmail(TranscriptParticipant):
|
||||
@@ -267,22 +267,12 @@ async def transcripts_list(
|
||||
source_kind: SourceKind | None = None,
|
||||
room_id: str | None = None,
|
||||
search_term: str | None = None,
|
||||
change_seq_from: int | None = None,
|
||||
sort_by: Literal["created_at", "change_seq"] | None = None,
|
||||
):
|
||||
if not user and not settings.PUBLIC_MODE:
|
||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||
|
||||
user_id = user["sub"] if user else None
|
||||
|
||||
# Default behavior preserved: sort_by=None → "-created_at"
|
||||
if sort_by == "change_seq":
|
||||
order_by = "change_seq" # ASC (ascending for checkpoint-based polling)
|
||||
elif sort_by == "created_at":
|
||||
order_by = "-created_at" # DESC (newest first, same as current default)
|
||||
else:
|
||||
order_by = "-created_at" # default, backward compatible
|
||||
|
||||
return await apaginate(
|
||||
get_database(),
|
||||
await transcripts_controller.get_all(
|
||||
@@ -290,8 +280,7 @@ async def transcripts_list(
|
||||
source_kind=SourceKind(source_kind) if source_kind else None,
|
||||
room_id=room_id,
|
||||
search_term=search_term,
|
||||
order_by=order_by,
|
||||
change_seq_from=change_seq_from,
|
||||
order_by="-created_at",
|
||||
return_query=True,
|
||||
),
|
||||
)
|
||||
@@ -503,6 +492,13 @@ async def transcript_get(
|
||||
)
|
||||
)
|
||||
|
||||
dag_status = None
|
||||
if transcript.status == "processing" and transcript.events:
|
||||
for ev in reversed(transcript.events):
|
||||
if ev.event == "DAG_STATUS":
|
||||
dag_status = ev.data.get("tasks") if isinstance(ev.data, dict) else None
|
||||
break
|
||||
|
||||
base_data = {
|
||||
"id": transcript.id,
|
||||
"user_id": transcript.user_id,
|
||||
@@ -524,7 +520,7 @@ async def transcript_get(
|
||||
"room_id": transcript.room_id,
|
||||
"room_name": room_name,
|
||||
"audio_deleted": transcript.audio_deleted,
|
||||
"change_seq": transcript.change_seq,
|
||||
"dag_status": dag_status,
|
||||
"participants": participants,
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ from fastapi import APIRouter, Depends, HTTPException, UploadFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.db.transcripts import SourceKind, transcripts_controller
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
|
||||
router = APIRouter()
|
||||
@@ -88,10 +88,8 @@ async def transcript_record_upload(
|
||||
finally:
|
||||
container.close()
|
||||
|
||||
# set the status to "uploaded" and mark as file source
|
||||
await transcripts_controller.update(
|
||||
transcript, {"status": "uploaded", "source_kind": SourceKind.FILE}
|
||||
)
|
||||
# set the status to "uploaded"
|
||||
await transcripts_controller.update(transcript, {"status": "uploaded"})
|
||||
|
||||
# launch a background task to process the file
|
||||
task_pipeline_file_process.delay(transcript_id=transcript_id)
|
||||
|
||||
@@ -4,22 +4,18 @@ Transcripts websocket API
|
||||
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, WebSocket, WebSocketDisconnect
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, WebSocket, WebSocketDisconnect
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.db.transcripts import transcripts_controller
|
||||
from reflector.ws_events import TranscriptWsEvent
|
||||
from reflector.ws_manager import get_ws_manager
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/transcripts/{transcript_id}/events",
|
||||
response_model=TranscriptWsEvent,
|
||||
summary="Transcript WebSocket event schema",
|
||||
description="Stub exposing the discriminated union of all transcript-level WS events for OpenAPI type generation. Real events are delivered over the WebSocket at the same path.",
|
||||
)
|
||||
@router.get("/transcripts/{transcript_id}/events")
|
||||
async def transcript_get_websocket_events(transcript_id: str):
|
||||
pass
|
||||
|
||||
@@ -28,9 +24,8 @@ async def transcript_get_websocket_events(transcript_id: str):
|
||||
async def transcript_events_websocket(
|
||||
transcript_id: str,
|
||||
websocket: WebSocket,
|
||||
user: Optional[auth.UserInfo] = Depends(auth.current_user_optional),
|
||||
):
|
||||
_, negotiated_subprotocol = auth.parse_ws_bearer_token(websocket)
|
||||
user = await auth.current_user_ws_optional(websocket)
|
||||
user_id = user["sub"] if user else None
|
||||
transcript = await transcripts_controller.get_by_id_for_http(
|
||||
transcript_id, user_id=user_id
|
||||
@@ -42,19 +37,23 @@ async def transcript_events_websocket(
|
||||
# use ts:transcript_id as room id
|
||||
room_id = f"ts:{transcript_id}"
|
||||
ws_manager = get_ws_manager()
|
||||
await ws_manager.add_user_to_room(
|
||||
room_id, websocket, subprotocol=negotiated_subprotocol
|
||||
)
|
||||
await ws_manager.add_user_to_room(room_id, websocket)
|
||||
|
||||
try:
|
||||
# on first connection, send all events only to the current user
|
||||
# Find the last DAG_STATUS to send after other historical events
|
||||
last_dag_status = None
|
||||
for event in transcript.events:
|
||||
# for now, do not send TRANSCRIPT or STATUS options - theses are live event
|
||||
# not necessary to be sent to the client; but keep the rest
|
||||
name = event.event
|
||||
if name in ("TRANSCRIPT", "STATUS"):
|
||||
continue
|
||||
if name == "DAG_STATUS":
|
||||
last_dag_status = event
|
||||
continue
|
||||
await websocket.send_json(event.model_dump(mode="json"))
|
||||
# Send only the most recent DAG_STATUS so reconnecting clients get current state
|
||||
if last_dag_status is not None:
|
||||
await websocket.send_json(last_dag_status.model_dump(mode="json"))
|
||||
|
||||
# XXX if transcript is final (locked=True and status=ended)
|
||||
# XXX send a final event to the client and close the connection
|
||||
|
||||
@@ -1,48 +1,55 @@
|
||||
from typing import Optional
|
||||
|
||||
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
|
||||
from fastapi import APIRouter, WebSocket
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.ws_events import UserWsEvent
|
||||
from reflector.auth.auth_jwt import JWTAuth # type: ignore
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.ws_manager import get_ws_manager
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/events",
|
||||
response_model=UserWsEvent,
|
||||
summary="User WebSocket event schema",
|
||||
description="Stub exposing the discriminated union of all user-level WS events for OpenAPI type generation. Real events are delivered over the WebSocket at the same path.",
|
||||
)
|
||||
async def user_get_websocket_events():
|
||||
pass
|
||||
|
||||
|
||||
# Close code for unauthorized WebSocket connections
|
||||
UNAUTHORISED = 4401
|
||||
|
||||
|
||||
@router.websocket("/events")
|
||||
async def user_events_websocket(websocket: WebSocket):
|
||||
token, negotiated_subprotocol = auth.parse_ws_bearer_token(websocket)
|
||||
# Browser can't send Authorization header for WS; use subprotocol: ["bearer", token]
|
||||
raw_subprotocol = websocket.headers.get("sec-websocket-protocol") or ""
|
||||
parts = [p.strip() for p in raw_subprotocol.split(",") if p.strip()]
|
||||
token: Optional[str] = None
|
||||
negotiated_subprotocol: Optional[str] = None
|
||||
if len(parts) >= 2 and parts[0].lower() == "bearer":
|
||||
negotiated_subprotocol = "bearer"
|
||||
token = parts[1]
|
||||
|
||||
user_id: Optional[str] = None
|
||||
if not token:
|
||||
await websocket.close(code=UNAUTHORISED)
|
||||
return
|
||||
|
||||
try:
|
||||
user = await auth.current_user_ws_optional(websocket)
|
||||
payload = JWTAuth().verify_token(token)
|
||||
authentik_uid = payload.get("sub")
|
||||
|
||||
if authentik_uid:
|
||||
user = await user_controller.get_by_authentik_uid(authentik_uid)
|
||||
if user:
|
||||
user_id = user.id
|
||||
else:
|
||||
await websocket.close(code=UNAUTHORISED)
|
||||
return
|
||||
else:
|
||||
await websocket.close(code=UNAUTHORISED)
|
||||
return
|
||||
except Exception:
|
||||
await websocket.close(code=UNAUTHORISED)
|
||||
return
|
||||
|
||||
if not user:
|
||||
if not user_id:
|
||||
await websocket.close(code=UNAUTHORISED)
|
||||
return
|
||||
|
||||
user_id: Optional[str] = user.sub if hasattr(user, "sub") else user["sub"]
|
||||
|
||||
room_id = f"user:{user_id}"
|
||||
ws_manager = get_ws_manager()
|
||||
|
||||
@@ -53,8 +60,6 @@ async def user_events_websocket(websocket: WebSocket):
|
||||
try:
|
||||
while True:
|
||||
await websocket.receive()
|
||||
except (RuntimeError, WebSocketDisconnect):
|
||||
pass
|
||||
finally:
|
||||
if room_id:
|
||||
await ws_manager.remove_user_from_room(room_id, websocket)
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
"""
|
||||
Monkey-patch aioice to use a fixed UDP port range for ICE candidates,
|
||||
and optionally rewrite SDP to advertise a different host IP.
|
||||
|
||||
This allows running the server in Docker with bridge networking
|
||||
(no network_mode: host) by:
|
||||
1. Restricting ICE UDP ports to a known range that can be mapped in Docker
|
||||
2. Replacing container-internal IPs with the Docker host IP in SDP answers
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import socket
|
||||
|
||||
from reflector.logger import logger
|
||||
|
||||
|
||||
def parse_port_range(range_str: str) -> tuple[int, int]:
|
||||
"""Parse a 'min-max' string into (min_port, max_port)."""
|
||||
parts = range_str.split("-")
|
||||
if len(parts) != 2:
|
||||
raise ValueError(f"WEBRTC_PORT_RANGE must be 'min-max', got: {range_str!r}")
|
||||
min_port, max_port = int(parts[0]), int(parts[1])
|
||||
if not (1024 <= min_port <= max_port <= 65535):
|
||||
raise ValueError(
|
||||
f"Invalid port range: {min_port}-{max_port} "
|
||||
"(must be 1024-65535 with min <= max)"
|
||||
)
|
||||
return min_port, max_port
|
||||
|
||||
|
||||
def patch_aioice_port_range(min_port: int, max_port: int) -> None:
|
||||
"""
|
||||
Monkey-patch aioice so that ICE candidate UDP sockets bind to ports
|
||||
within [min_port, max_port] instead of OS-assigned ephemeral ports.
|
||||
|
||||
Works by temporarily wrapping loop.create_datagram_endpoint() during
|
||||
aioice's get_component_candidates() to intercept bind(addr, 0) calls.
|
||||
"""
|
||||
import aioice.ice as _ice
|
||||
|
||||
_original = _ice.Connection.get_component_candidates
|
||||
_state = {"next_port": min_port}
|
||||
|
||||
async def _patched_get_component_candidates(self, component, addresses, timeout=5):
|
||||
loop = asyncio.get_event_loop()
|
||||
_orig_create = loop.create_datagram_endpoint
|
||||
|
||||
async def _create_with_port_range(*args, **kwargs):
|
||||
local_addr = kwargs.get("local_addr")
|
||||
if local_addr and local_addr[1] == 0:
|
||||
addr = local_addr[0]
|
||||
# Try each port in the range (wrapping around)
|
||||
attempts = max_port - min_port + 1
|
||||
for _ in range(attempts):
|
||||
port = _state["next_port"]
|
||||
_state["next_port"] = (
|
||||
min_port
|
||||
if _state["next_port"] >= max_port
|
||||
else _state["next_port"] + 1
|
||||
)
|
||||
try:
|
||||
kwargs["local_addr"] = (addr, port)
|
||||
return await _orig_create(*args, **kwargs)
|
||||
except OSError:
|
||||
continue
|
||||
# All ports exhausted, fall back to OS assignment
|
||||
logger.warning(
|
||||
"All WebRTC ports in range exhausted, falling back to OS",
|
||||
min_port=min_port,
|
||||
max_port=max_port,
|
||||
)
|
||||
kwargs["local_addr"] = (addr, 0)
|
||||
return await _orig_create(*args, **kwargs)
|
||||
|
||||
loop.create_datagram_endpoint = _create_with_port_range
|
||||
try:
|
||||
return await _original(self, component, addresses, timeout)
|
||||
finally:
|
||||
loop.create_datagram_endpoint = _orig_create
|
||||
|
||||
_ice.Connection.get_component_candidates = _patched_get_component_candidates
|
||||
logger.info(
|
||||
"aioice patched for WebRTC port range",
|
||||
min_port=min_port,
|
||||
max_port=max_port,
|
||||
)
|
||||
|
||||
|
||||
def resolve_webrtc_host(host: str) -> str:
|
||||
"""Resolve a hostname or IP to an IP address for ICE candidate rewriting."""
|
||||
try:
|
||||
ip = socket.gethostbyname(host)
|
||||
logger.info("Resolved WEBRTC_HOST", host=host, ip=ip)
|
||||
return ip
|
||||
except socket.gaierror:
|
||||
logger.warning("Could not resolve WEBRTC_HOST, using as-is", host=host)
|
||||
return host
|
||||
|
||||
|
||||
def rewrite_sdp_host(sdp: str, target_ip: str) -> str:
|
||||
"""
|
||||
Replace container-internal IPs in SDP with target_ip so that
|
||||
ICE candidates advertise a routable address.
|
||||
"""
|
||||
import aioice.ice
|
||||
|
||||
container_ips = aioice.ice.get_host_addresses(use_ipv4=True, use_ipv6=False)
|
||||
for ip in container_ips:
|
||||
if ip != "127.0.0.1" and ip != target_ip:
|
||||
sdp = sdp.replace(ip, target_ip)
|
||||
return sdp
|
||||
@@ -8,21 +8,8 @@ from reflector.settings import settings
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
# Polling intervals (seconds)
|
||||
# CELERY_BEAT_POLL_INTERVAL overrides all sub-5-min intervals (e.g. 300 for selfhosted)
|
||||
_override = (
|
||||
float(settings.CELERY_BEAT_POLL_INTERVAL)
|
||||
if settings.CELERY_BEAT_POLL_INTERVAL > 0
|
||||
else 0
|
||||
)
|
||||
|
||||
# Webhook-aware: 180s when webhook configured (backup mode), 15s when no webhook (primary discovery)
|
||||
POLL_DAILY_RECORDINGS_INTERVAL_SEC = _override or (
|
||||
180.0 if settings.DAILY_WEBHOOK_SECRET else 15.0
|
||||
)
|
||||
SQS_POLL_INTERVAL = _override or float(settings.SQS_POLLING_TIMEOUT_SECONDS)
|
||||
RECONCILIATION_INTERVAL = _override or 30.0
|
||||
ICS_SYNC_INTERVAL = _override or 60.0
|
||||
UPCOMING_MEETINGS_INTERVAL = _override or 30.0
|
||||
POLL_DAILY_RECORDINGS_INTERVAL_SEC = 180.0 if settings.DAILY_WEBHOOK_SECRET else 15.0
|
||||
|
||||
if celery.current_app.main != "default":
|
||||
logger.info(f"Celery already configured ({celery.current_app})")
|
||||
@@ -46,11 +33,11 @@ else:
|
||||
app.conf.beat_schedule = {
|
||||
"process_messages": {
|
||||
"task": "reflector.worker.process.process_messages",
|
||||
"schedule": SQS_POLL_INTERVAL,
|
||||
"schedule": float(settings.SQS_POLLING_TIMEOUT_SECONDS),
|
||||
},
|
||||
"process_meetings": {
|
||||
"task": "reflector.worker.process.process_meetings",
|
||||
"schedule": SQS_POLL_INTERVAL,
|
||||
"schedule": float(settings.SQS_POLLING_TIMEOUT_SECONDS),
|
||||
},
|
||||
"reprocess_failed_recordings": {
|
||||
"task": "reflector.worker.process.reprocess_failed_recordings",
|
||||
@@ -66,15 +53,15 @@ else:
|
||||
},
|
||||
"trigger_daily_reconciliation": {
|
||||
"task": "reflector.worker.process.trigger_daily_reconciliation",
|
||||
"schedule": RECONCILIATION_INTERVAL,
|
||||
"schedule": 30.0, # Every 30 seconds (queues poll tasks for all active meetings)
|
||||
},
|
||||
"sync_all_ics_calendars": {
|
||||
"task": "reflector.worker.ics_sync.sync_all_ics_calendars",
|
||||
"schedule": ICS_SYNC_INTERVAL,
|
||||
"schedule": 60.0, # Run every minute to check which rooms need sync
|
||||
},
|
||||
"create_upcoming_meetings": {
|
||||
"task": "reflector.worker.ics_sync.create_upcoming_meetings",
|
||||
"schedule": UPCOMING_MEETINGS_INTERVAL,
|
||||
"schedule": 30.0, # Run every 30 seconds to create upcoming meetings
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
"""Typed WebSocket event models.
|
||||
|
||||
Defines Pydantic models with Literal discriminators for all WS events.
|
||||
Exposed via stub GET endpoints so ``pnpm openapi`` generates TS discriminated unions.
|
||||
"""
|
||||
|
||||
from typing import Annotated, Literal, Union
|
||||
|
||||
from pydantic import BaseModel, Discriminator
|
||||
|
||||
from reflector.db.transcripts import (
|
||||
TranscriptActionItems,
|
||||
TranscriptDuration,
|
||||
TranscriptFinalLongSummary,
|
||||
TranscriptFinalShortSummary,
|
||||
TranscriptFinalTitle,
|
||||
TranscriptStatus,
|
||||
TranscriptText,
|
||||
TranscriptWaveform,
|
||||
)
|
||||
from reflector.utils.string import NonEmptyString
|
||||
from reflector.views.transcripts import GetTranscriptTopic
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Transcript-level event name literal
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
TranscriptEventName = Literal[
|
||||
"TRANSCRIPT",
|
||||
"TOPIC",
|
||||
"STATUS",
|
||||
"FINAL_TITLE",
|
||||
"FINAL_LONG_SUMMARY",
|
||||
"FINAL_SHORT_SUMMARY",
|
||||
"ACTION_ITEMS",
|
||||
"DURATION",
|
||||
"WAVEFORM",
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Transcript-level WS event wrappers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TranscriptWsTranscript(BaseModel):
|
||||
event: Literal["TRANSCRIPT"] = "TRANSCRIPT"
|
||||
data: TranscriptText
|
||||
|
||||
|
||||
class TranscriptWsTopic(BaseModel):
|
||||
event: Literal["TOPIC"] = "TOPIC"
|
||||
data: GetTranscriptTopic
|
||||
|
||||
|
||||
class TranscriptWsStatusData(BaseModel):
|
||||
value: TranscriptStatus
|
||||
|
||||
|
||||
class TranscriptWsStatus(BaseModel):
|
||||
event: Literal["STATUS"] = "STATUS"
|
||||
data: TranscriptWsStatusData
|
||||
|
||||
|
||||
class TranscriptWsFinalTitle(BaseModel):
|
||||
event: Literal["FINAL_TITLE"] = "FINAL_TITLE"
|
||||
data: TranscriptFinalTitle
|
||||
|
||||
|
||||
class TranscriptWsFinalLongSummary(BaseModel):
|
||||
event: Literal["FINAL_LONG_SUMMARY"] = "FINAL_LONG_SUMMARY"
|
||||
data: TranscriptFinalLongSummary
|
||||
|
||||
|
||||
class TranscriptWsFinalShortSummary(BaseModel):
|
||||
event: Literal["FINAL_SHORT_SUMMARY"] = "FINAL_SHORT_SUMMARY"
|
||||
data: TranscriptFinalShortSummary
|
||||
|
||||
|
||||
class TranscriptWsActionItems(BaseModel):
|
||||
event: Literal["ACTION_ITEMS"] = "ACTION_ITEMS"
|
||||
data: TranscriptActionItems
|
||||
|
||||
|
||||
class TranscriptWsDuration(BaseModel):
|
||||
event: Literal["DURATION"] = "DURATION"
|
||||
data: TranscriptDuration
|
||||
|
||||
|
||||
class TranscriptWsWaveform(BaseModel):
|
||||
event: Literal["WAVEFORM"] = "WAVEFORM"
|
||||
data: TranscriptWaveform
|
||||
|
||||
|
||||
TranscriptWsEvent = Annotated[
|
||||
Union[
|
||||
TranscriptWsTranscript,
|
||||
TranscriptWsTopic,
|
||||
TranscriptWsStatus,
|
||||
TranscriptWsFinalTitle,
|
||||
TranscriptWsFinalLongSummary,
|
||||
TranscriptWsFinalShortSummary,
|
||||
TranscriptWsActionItems,
|
||||
TranscriptWsDuration,
|
||||
TranscriptWsWaveform,
|
||||
],
|
||||
Discriminator("event"),
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# User-level event name literal
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
UserEventName = Literal[
|
||||
"TRANSCRIPT_CREATED",
|
||||
"TRANSCRIPT_DELETED",
|
||||
"TRANSCRIPT_STATUS",
|
||||
"TRANSCRIPT_FINAL_TITLE",
|
||||
"TRANSCRIPT_DURATION",
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# User-level WS event data models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class UserTranscriptCreatedData(BaseModel):
|
||||
id: NonEmptyString
|
||||
|
||||
|
||||
class UserTranscriptDeletedData(BaseModel):
|
||||
id: NonEmptyString
|
||||
|
||||
|
||||
class UserTranscriptStatusData(BaseModel):
|
||||
id: NonEmptyString
|
||||
value: TranscriptStatus
|
||||
|
||||
|
||||
class UserTranscriptFinalTitleData(BaseModel):
|
||||
id: NonEmptyString
|
||||
title: NonEmptyString
|
||||
|
||||
|
||||
class UserTranscriptDurationData(BaseModel):
|
||||
id: NonEmptyString
|
||||
duration: float
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# User-level WS event wrappers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class UserWsTranscriptCreated(BaseModel):
|
||||
event: Literal["TRANSCRIPT_CREATED"] = "TRANSCRIPT_CREATED"
|
||||
data: UserTranscriptCreatedData
|
||||
|
||||
|
||||
class UserWsTranscriptDeleted(BaseModel):
|
||||
event: Literal["TRANSCRIPT_DELETED"] = "TRANSCRIPT_DELETED"
|
||||
data: UserTranscriptDeletedData
|
||||
|
||||
|
||||
class UserWsTranscriptStatus(BaseModel):
|
||||
event: Literal["TRANSCRIPT_STATUS"] = "TRANSCRIPT_STATUS"
|
||||
data: UserTranscriptStatusData
|
||||
|
||||
|
||||
class UserWsTranscriptFinalTitle(BaseModel):
|
||||
event: Literal["TRANSCRIPT_FINAL_TITLE"] = "TRANSCRIPT_FINAL_TITLE"
|
||||
data: UserTranscriptFinalTitleData
|
||||
|
||||
|
||||
class UserWsTranscriptDuration(BaseModel):
|
||||
event: Literal["TRANSCRIPT_DURATION"] = "TRANSCRIPT_DURATION"
|
||||
data: UserTranscriptDurationData
|
||||
|
||||
|
||||
UserWsEvent = Annotated[
|
||||
Union[
|
||||
UserWsTranscriptCreated,
|
||||
UserWsTranscriptDeleted,
|
||||
UserWsTranscriptStatus,
|
||||
UserWsTranscriptFinalTitle,
|
||||
UserWsTranscriptDuration,
|
||||
],
|
||||
Discriminator("event"),
|
||||
]
|
||||
@@ -48,15 +48,7 @@ class RedisPubSubManager:
|
||||
if not self.redis_connection:
|
||||
await self.connect()
|
||||
message = json.dumps(message)
|
||||
try:
|
||||
await self.redis_connection.publish(room_id, message)
|
||||
except RuntimeError:
|
||||
# Celery workers run each task in a new event loop (asyncio.run),
|
||||
# which closes the previous loop. Cached Redis connection is dead.
|
||||
# Reconnect on the current loop and retry.
|
||||
self.redis_connection = None
|
||||
await self.connect()
|
||||
await self.redis_connection.publish(room_id, message)
|
||||
await self.redis_connection.publish(room_id, message)
|
||||
|
||||
async def subscribe(self, room_id: str) -> redis.Redis:
|
||||
await self.pubsub.subscribe(room_id)
|
||||
|
||||
@@ -2,10 +2,6 @@
|
||||
|
||||
if [ "${ENTRYPOINT}" = "server" ]; then
|
||||
uv run alembic upgrade head
|
||||
# Provision admin user if password auth is configured
|
||||
if [ -n "${ADMIN_EMAIL:-}" ] && [ -n "${ADMIN_PASSWORD_HASH:-}" ]; then
|
||||
uv run python -m reflector.tools.provision_admin
|
||||
fi
|
||||
uv run uvicorn reflector.app:app --host 0.0.0.0 --port 1250
|
||||
elif [ "${ENTRYPOINT}" = "worker" ]; then
|
||||
uv run celery -A reflector.worker.app worker --loglevel=info
|
||||
|
||||
@@ -15,7 +15,8 @@ from reflector.settings import settings
|
||||
|
||||
async def setup_webhook(webhook_url: str):
|
||||
"""
|
||||
Create Daily.co webhook. Deletes any existing webhooks first, then creates the new one.
|
||||
Create or update Daily.co webhook for this environment using dailyco_api module.
|
||||
Uses DAILY_WEBHOOK_UUID to identify existing webhook.
|
||||
"""
|
||||
if not settings.DAILY_API_KEY:
|
||||
print("Error: DAILY_API_KEY not set")
|
||||
@@ -34,37 +35,79 @@ async def setup_webhook(webhook_url: str):
|
||||
]
|
||||
|
||||
async with DailyApiClient(api_key=settings.DAILY_API_KEY) as client:
|
||||
webhooks = await client.list_webhooks()
|
||||
for wh in webhooks:
|
||||
await client.delete_webhook(wh.uuid)
|
||||
print(f"Deleted webhook {wh.uuid}")
|
||||
webhook_uuid = settings.DAILY_WEBHOOK_UUID
|
||||
|
||||
request = CreateWebhookRequest(
|
||||
url=webhook_url,
|
||||
eventTypes=event_types,
|
||||
hmac=settings.DAILY_WEBHOOK_SECRET,
|
||||
)
|
||||
result = await client.create_webhook(request)
|
||||
webhook_uuid = result.uuid
|
||||
if webhook_uuid:
|
||||
print(f"Updating existing webhook {webhook_uuid}...")
|
||||
try:
|
||||
# Note: Daily.co doesn't support PATCH well, so we delete + recreate
|
||||
await client.delete_webhook(webhook_uuid)
|
||||
print(f"Deleted old webhook {webhook_uuid}")
|
||||
|
||||
print(f"✓ Created webhook {webhook_uuid} (state: {result.state})")
|
||||
print(f" URL: {result.url}")
|
||||
request = CreateWebhookRequest(
|
||||
url=webhook_url,
|
||||
eventTypes=event_types,
|
||||
hmac=settings.DAILY_WEBHOOK_SECRET,
|
||||
)
|
||||
result = await client.create_webhook(request)
|
||||
|
||||
env_file = Path(__file__).parent.parent / ".env"
|
||||
if env_file.exists():
|
||||
lines = env_file.read_text().splitlines()
|
||||
updated = False
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith("DAILY_WEBHOOK_UUID="):
|
||||
lines[i] = f"DAILY_WEBHOOK_UUID={webhook_uuid}"
|
||||
updated = True
|
||||
break
|
||||
if not updated:
|
||||
lines.append(f"DAILY_WEBHOOK_UUID={webhook_uuid}")
|
||||
env_file.write_text("\n".join(lines) + "\n")
|
||||
print("✓ Saved DAILY_WEBHOOK_UUID to .env")
|
||||
print(
|
||||
f"✓ Created replacement webhook {result.uuid} (state: {result.state})"
|
||||
)
|
||||
print(f" URL: {result.url}")
|
||||
|
||||
return 0
|
||||
webhook_uuid = result.uuid
|
||||
|
||||
except Exception as e:
|
||||
if hasattr(e, "response") and e.response.status_code == 404:
|
||||
print(f"Webhook {webhook_uuid} not found, creating new one...")
|
||||
webhook_uuid = None # Fall through to creation
|
||||
else:
|
||||
print(f"Error updating webhook: {e}")
|
||||
return 1
|
||||
|
||||
if not webhook_uuid:
|
||||
print("Creating new webhook...")
|
||||
request = CreateWebhookRequest(
|
||||
url=webhook_url,
|
||||
eventTypes=event_types,
|
||||
hmac=settings.DAILY_WEBHOOK_SECRET,
|
||||
)
|
||||
result = await client.create_webhook(request)
|
||||
webhook_uuid = result.uuid
|
||||
|
||||
print(f"✓ Created webhook {webhook_uuid} (state: {result.state})")
|
||||
print(f" URL: {result.url}")
|
||||
print()
|
||||
print("=" * 60)
|
||||
print("IMPORTANT: Add this to your environment variables:")
|
||||
print("=" * 60)
|
||||
print(f"DAILY_WEBHOOK_UUID: {webhook_uuid}")
|
||||
print("=" * 60)
|
||||
print()
|
||||
|
||||
# Try to write UUID to .env file
|
||||
env_file = Path(__file__).parent.parent / ".env"
|
||||
if env_file.exists():
|
||||
lines = env_file.read_text().splitlines()
|
||||
updated = False
|
||||
|
||||
# Update existing DAILY_WEBHOOK_UUID line or add it
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith("DAILY_WEBHOOK_UUID="):
|
||||
lines[i] = f"DAILY_WEBHOOK_UUID={webhook_uuid}"
|
||||
updated = True
|
||||
break
|
||||
|
||||
if not updated:
|
||||
lines.append(f"DAILY_WEBHOOK_UUID={webhook_uuid}")
|
||||
|
||||
env_file.write_text("\n".join(lines) + "\n")
|
||||
print(f"✓ Also saved to local .env file")
|
||||
else:
|
||||
print(f"⚠ Local .env file not found - please add manually")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -74,7 +117,11 @@ if __name__ == "__main__":
|
||||
"Example: python recreate_daily_webhook.py https://example.com/v1/daily/webhook"
|
||||
)
|
||||
print()
|
||||
print("Deletes all existing webhooks, then creates a new one.")
|
||||
print("Behavior:")
|
||||
print(" - If DAILY_WEBHOOK_UUID set: Deletes old webhook, creates new one")
|
||||
print(
|
||||
" - If DAILY_WEBHOOK_UUID empty: Creates new webhook, saves UUID to .env"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(asyncio.run(setup_webhook(sys.argv[1])))
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
"""Tests for the password auth backend."""
|
||||
|
||||
import pytest
|
||||
from httpx import AsyncClient
|
||||
from jose import jwt
|
||||
|
||||
from reflector.auth.password_utils import hash_password
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def password_app():
|
||||
"""Create a minimal FastAPI app with the password auth router."""
|
||||
from fastapi import FastAPI
|
||||
|
||||
from reflector.auth import auth_password
|
||||
|
||||
app = FastAPI()
|
||||
app.include_router(auth_password.router, prefix="/v1")
|
||||
# Reset rate limiter between tests
|
||||
auth_password._login_attempts.clear()
|
||||
return app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def password_client(password_app):
|
||||
"""Create a test client for the password auth app."""
|
||||
async with AsyncClient(app=password_app, base_url="http://test/v1") as client:
|
||||
yield client
|
||||
|
||||
|
||||
async def _create_user_with_password(email: str, password: str):
|
||||
"""Helper to create a user with a password hash in the DB."""
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
pw_hash = hash_password(password)
|
||||
return await user_controller.create_or_update(
|
||||
id=generate_uuid4(),
|
||||
authentik_uid=f"local:{email}",
|
||||
email=email,
|
||||
password_hash=pw_hash,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_login_success(password_client, setup_database):
|
||||
await _create_user_with_password("admin@test.com", "testpass123")
|
||||
|
||||
response = await password_client.post(
|
||||
"/auth/login",
|
||||
json={"email": "admin@test.com", "password": "testpass123"},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "access_token" in data
|
||||
assert data["token_type"] == "bearer"
|
||||
assert data["expires_in"] > 0
|
||||
|
||||
# Verify the JWT is valid
|
||||
payload = jwt.decode(
|
||||
data["access_token"],
|
||||
settings.SECRET_KEY,
|
||||
algorithms=["HS256"],
|
||||
)
|
||||
assert payload["email"] == "admin@test.com"
|
||||
assert "sub" in payload
|
||||
assert "exp" in payload
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_login_wrong_password(password_client, setup_database):
|
||||
await _create_user_with_password("user@test.com", "correctpassword")
|
||||
|
||||
response = await password_client.post(
|
||||
"/auth/login",
|
||||
json={"email": "user@test.com", "password": "wrongpassword"},
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_login_nonexistent_user(password_client, setup_database):
|
||||
response = await password_client.post(
|
||||
"/auth/login",
|
||||
json={"email": "nobody@test.com", "password": "anything"},
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_login_user_without_password_hash(password_client, setup_database):
|
||||
"""User exists but has no password_hash (e.g. Authentik user)."""
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
await user_controller.create_or_update(
|
||||
id=generate_uuid4(),
|
||||
authentik_uid="authentik:abc123",
|
||||
email="oidc@test.com",
|
||||
)
|
||||
|
||||
response = await password_client.post(
|
||||
"/auth/login",
|
||||
json={"email": "oidc@test.com", "password": "anything"},
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_login_rate_limiting(password_client, setup_database):
|
||||
from reflector.auth import auth_password
|
||||
|
||||
# Reset rate limiter
|
||||
auth_password._login_attempts.clear()
|
||||
|
||||
for _ in range(10):
|
||||
await password_client.post(
|
||||
"/auth/login",
|
||||
json={"email": "fake@test.com", "password": "wrong"},
|
||||
)
|
||||
|
||||
# 11th attempt should be rate-limited
|
||||
response = await password_client.post(
|
||||
"/auth/login",
|
||||
json={"email": "fake@test.com", "password": "wrong"},
|
||||
)
|
||||
|
||||
assert response.status_code == 429
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_jwt_create_and_verify():
|
||||
from reflector.auth.auth_password import _create_access_token, _verify_token
|
||||
|
||||
token, expires_in = _create_access_token("user-123", "test@example.com")
|
||||
assert expires_in > 0
|
||||
|
||||
payload = _verify_token(token)
|
||||
assert payload["sub"] == "user-123"
|
||||
assert payload["email"] == "test@example.com"
|
||||
assert "exp" in payload
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_authenticate_user_with_jwt():
|
||||
from reflector.auth.auth_password import (
|
||||
_authenticate_user,
|
||||
_create_access_token,
|
||||
)
|
||||
|
||||
token, _ = _create_access_token("user-abc", "abc@test.com")
|
||||
user = await _authenticate_user(token, None)
|
||||
|
||||
assert user is not None
|
||||
assert user.sub == "user-abc"
|
||||
assert user.email == "abc@test.com"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_authenticate_user_invalid_jwt():
|
||||
from fastapi import HTTPException
|
||||
|
||||
from reflector.auth.auth_password import _authenticate_user
|
||||
|
||||
with pytest.raises(HTTPException) as exc_info:
|
||||
await _authenticate_user("invalid.jwt.token", None)
|
||||
assert exc_info.value.status_code == 401
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_authenticate_user_no_credentials():
|
||||
from reflector.auth.auth_password import _authenticate_user
|
||||
|
||||
user = await _authenticate_user(None, None)
|
||||
assert user is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_current_user_raises_without_token():
|
||||
"""Verify that current_user dependency raises 401 without token."""
|
||||
from fastapi import Depends, FastAPI
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from reflector.auth import auth_password
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
@app.get("/test")
|
||||
async def test_endpoint(user=Depends(auth_password.current_user)):
|
||||
return {"user": user.sub}
|
||||
|
||||
# Use sync TestClient for simplicity
|
||||
client = TestClient(app)
|
||||
response = client.get("/test")
|
||||
# OAuth2PasswordBearer with auto_error=False returns None, then current_user raises 401
|
||||
assert response.status_code == 401
|
||||
@@ -1,97 +0,0 @@
|
||||
"""Tests for admin user creation logic (used by create_admin CLI tool)."""
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.auth.password_utils import hash_password, verify_password
|
||||
from reflector.db.users import user_controller
|
||||
from reflector.utils import generate_uuid4
|
||||
|
||||
|
||||
async def _provision_admin(email: str, password: str):
|
||||
"""Mirrors the logic in create_admin.create_admin() without managing DB connections."""
|
||||
password_hash = hash_password(password)
|
||||
|
||||
existing = await user_controller.get_by_email(email)
|
||||
if existing:
|
||||
await user_controller.set_password_hash(existing.id, password_hash)
|
||||
else:
|
||||
await user_controller.create_or_update(
|
||||
id=generate_uuid4(),
|
||||
authentik_uid=f"local:{email}",
|
||||
email=email,
|
||||
password_hash=password_hash,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_admin_new_user(setup_database):
|
||||
await _provision_admin("newadmin@test.com", "password123")
|
||||
|
||||
user = await user_controller.get_by_email("newadmin@test.com")
|
||||
assert user is not None
|
||||
assert user.email == "newadmin@test.com"
|
||||
assert user.authentik_uid == "local:newadmin@test.com"
|
||||
assert user.password_hash is not None
|
||||
assert verify_password("password123", user.password_hash)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_admin_updates_existing(setup_database):
|
||||
# Create first
|
||||
await _provision_admin("admin@test.com", "oldpassword")
|
||||
user1 = await user_controller.get_by_email("admin@test.com")
|
||||
|
||||
# Update password
|
||||
await _provision_admin("admin@test.com", "newpassword")
|
||||
user2 = await user_controller.get_by_email("admin@test.com")
|
||||
|
||||
assert user1.id == user2.id # same user, not duplicated
|
||||
assert verify_password("newpassword", user2.password_hash)
|
||||
assert not verify_password("oldpassword", user2.password_hash)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_admin_idempotent(setup_database):
|
||||
await _provision_admin("admin@test.com", "samepassword")
|
||||
await _provision_admin("admin@test.com", "samepassword")
|
||||
|
||||
# Should only have one user
|
||||
users = await user_controller.list_all()
|
||||
admin_users = [u for u in users if u.email == "admin@test.com"]
|
||||
assert len(admin_users) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_or_update_with_password_hash(setup_database):
|
||||
"""Test the extended create_or_update method with password_hash parameter."""
|
||||
pw_hash = hash_password("test123")
|
||||
user = await user_controller.create_or_update(
|
||||
id=generate_uuid4(),
|
||||
authentik_uid="local:test@example.com",
|
||||
email="test@example.com",
|
||||
password_hash=pw_hash,
|
||||
)
|
||||
|
||||
assert user.password_hash == pw_hash
|
||||
|
||||
fetched = await user_controller.get_by_email("test@example.com")
|
||||
assert fetched is not None
|
||||
assert verify_password("test123", fetched.password_hash)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_set_password_hash(setup_database):
|
||||
"""Test the set_password_hash method."""
|
||||
user = await user_controller.create_or_update(
|
||||
id=generate_uuid4(),
|
||||
authentik_uid="local:pw@test.com",
|
||||
email="pw@test.com",
|
||||
)
|
||||
assert user.password_hash is None
|
||||
|
||||
pw_hash = hash_password("newpass")
|
||||
await user_controller.set_password_hash(user.id, pw_hash)
|
||||
|
||||
updated = await user_controller.get_by_email("pw@test.com")
|
||||
assert updated is not None
|
||||
assert verify_password("newpass", updated.password_hash)
|
||||
959
server/tests/test_dag_progress.py
Normal file
959
server/tests/test_dag_progress.py
Normal file
@@ -0,0 +1,959 @@
|
||||
"""Tests for DAG progress models and transform function.
|
||||
|
||||
Tests the extract_dag_tasks function that converts Hatchet V1WorkflowRunDetails
|
||||
into structured DagTask list for frontend consumption.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.hatchet.constants import TaskName
|
||||
from reflector.hatchet.dag_progress import (
|
||||
DagStatusData,
|
||||
DagTask,
|
||||
DagTaskStatus,
|
||||
extract_dag_tasks,
|
||||
)
|
||||
|
||||
|
||||
def _make_shape_item(
|
||||
step_id: str,
|
||||
task_name: str,
|
||||
children_step_ids: list[str] | None = None,
|
||||
) -> MagicMock:
|
||||
"""Create a mock WorkflowRunShapeItemForWorkflowRunDetails."""
|
||||
item = MagicMock()
|
||||
item.step_id = step_id
|
||||
item.task_name = task_name
|
||||
item.children_step_ids = children_step_ids or []
|
||||
return item
|
||||
|
||||
|
||||
def _make_task_summary(
|
||||
step_id: str,
|
||||
status: str = "QUEUED",
|
||||
started_at: datetime | None = None,
|
||||
finished_at: datetime | None = None,
|
||||
duration: int | None = None,
|
||||
error_message: str | None = None,
|
||||
task_external_id: str | None = None,
|
||||
num_spawned_children: int | None = None,
|
||||
children: list | None = None,
|
||||
) -> MagicMock:
|
||||
"""Create a mock V1TaskSummary."""
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
task = MagicMock()
|
||||
task.step_id = step_id
|
||||
task.status = V1TaskStatus(status)
|
||||
task.started_at = started_at
|
||||
task.finished_at = finished_at
|
||||
task.duration = duration
|
||||
task.error_message = error_message
|
||||
task.task_external_id = task_external_id or f"ext-{step_id}"
|
||||
task.num_spawned_children = num_spawned_children
|
||||
task.children = children or []
|
||||
return task
|
||||
|
||||
|
||||
def _make_details(
|
||||
shape: list,
|
||||
tasks: list,
|
||||
run_id: str = "test-run-id",
|
||||
) -> MagicMock:
|
||||
"""Create a mock V1WorkflowRunDetails."""
|
||||
details = MagicMock()
|
||||
details.shape = shape
|
||||
details.tasks = tasks
|
||||
details.task_events = []
|
||||
details.run = MagicMock()
|
||||
details.run.metadata = MagicMock()
|
||||
details.run.metadata.id = run_id
|
||||
return details
|
||||
|
||||
|
||||
class TestExtractDagTasksBasic:
|
||||
"""Test basic extraction of DAG tasks from workflow run details."""
|
||||
|
||||
def test_empty_shape_returns_empty_list(self):
|
||||
details = _make_details(shape=[], tasks=[])
|
||||
result = extract_dag_tasks(details)
|
||||
assert result == []
|
||||
|
||||
def test_single_task_queued(self):
|
||||
shape = [_make_shape_item("s1", "get_recording")]
|
||||
tasks = [_make_task_summary("s1", status="QUEUED")]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].name == "get_recording"
|
||||
assert result[0].status == DagTaskStatus.QUEUED
|
||||
assert result[0].parents == []
|
||||
assert result[0].started_at is None
|
||||
assert result[0].finished_at is None
|
||||
assert result[0].duration_seconds is None
|
||||
assert result[0].error is None
|
||||
assert result[0].children_total is None
|
||||
assert result[0].children_completed is None
|
||||
assert result[0].progress_pct is None
|
||||
|
||||
def test_completed_task_with_duration(self):
|
||||
now = datetime.now(timezone.utc)
|
||||
shape = [_make_shape_item("s1", "get_recording")]
|
||||
tasks = [
|
||||
_make_task_summary(
|
||||
"s1",
|
||||
status="COMPLETED",
|
||||
started_at=now,
|
||||
finished_at=now,
|
||||
duration=1500, # milliseconds
|
||||
)
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert result[0].status == DagTaskStatus.COMPLETED
|
||||
assert result[0].duration_seconds == 1.5
|
||||
assert result[0].started_at == now
|
||||
assert result[0].finished_at == now
|
||||
|
||||
def test_failed_task_with_error(self):
|
||||
shape = [_make_shape_item("s1", "get_recording")]
|
||||
tasks = [
|
||||
_make_task_summary(
|
||||
"s1",
|
||||
status="FAILED",
|
||||
error_message="Traceback (most recent call last):\n File something\nConnectionError: connection refused",
|
||||
)
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert result[0].status == DagTaskStatus.FAILED
|
||||
assert result[0].error == "ConnectionError: connection refused"
|
||||
|
||||
def test_running_task(self):
|
||||
now = datetime.now(timezone.utc)
|
||||
shape = [_make_shape_item("s1", "mixdown_tracks")]
|
||||
tasks = [
|
||||
_make_task_summary(
|
||||
"s1",
|
||||
status="RUNNING",
|
||||
started_at=now,
|
||||
duration=5000,
|
||||
)
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert result[0].status == DagTaskStatus.RUNNING
|
||||
assert result[0].started_at == now
|
||||
assert result[0].duration_seconds == 5.0
|
||||
|
||||
def test_cancelled_task(self):
|
||||
shape = [_make_shape_item("s1", "post_zulip")]
|
||||
tasks = [_make_task_summary("s1", status="CANCELLED")]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert result[0].status == DagTaskStatus.CANCELLED
|
||||
|
||||
|
||||
class TestExtractDagTasksTopology:
|
||||
"""Test topological ordering and parent extraction."""
|
||||
|
||||
def test_linear_chain_parents(self):
|
||||
"""A -> B -> C should produce correct parents."""
|
||||
shape = [
|
||||
_make_shape_item("s1", "get_recording", children_step_ids=["s2"]),
|
||||
_make_shape_item("s2", "get_participants", children_step_ids=["s3"]),
|
||||
_make_shape_item("s3", "process_tracks"),
|
||||
]
|
||||
tasks = [
|
||||
_make_task_summary("s1", status="COMPLETED"),
|
||||
_make_task_summary("s2", status="COMPLETED"),
|
||||
_make_task_summary("s3", status="QUEUED"),
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert [t.name for t in result] == [
|
||||
"get_recording",
|
||||
"get_participants",
|
||||
"process_tracks",
|
||||
]
|
||||
assert result[0].parents == []
|
||||
assert result[1].parents == ["get_recording"]
|
||||
assert result[2].parents == ["get_participants"]
|
||||
|
||||
def test_diamond_dag(self):
|
||||
"""
|
||||
A -> B, A -> C, B -> D, C -> D
|
||||
D should have parents [B, C] (or [C, B] depending on sort).
|
||||
"""
|
||||
shape = [
|
||||
_make_shape_item("s1", "get_recording", children_step_ids=["s2", "s3"]),
|
||||
_make_shape_item("s2", "mixdown_tracks", children_step_ids=["s4"]),
|
||||
_make_shape_item("s3", "detect_topics", children_step_ids=["s4"]),
|
||||
_make_shape_item("s4", "finalize"),
|
||||
]
|
||||
tasks = [
|
||||
_make_task_summary("s1", status="COMPLETED"),
|
||||
_make_task_summary("s2", status="RUNNING"),
|
||||
_make_task_summary("s3", status="RUNNING"),
|
||||
_make_task_summary("s4", status="QUEUED"),
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
# Topological: s1 first, s2/s3 in some order, s4 last
|
||||
assert result[0].name == "get_recording"
|
||||
assert result[-1].name == "finalize"
|
||||
finalize = result[-1]
|
||||
assert set(finalize.parents) == {"mixdown_tracks", "detect_topics"}
|
||||
|
||||
def test_topological_order_is_stable(self):
|
||||
"""Verify deterministic ordering (sorted queue in Kahn's)."""
|
||||
shape = [
|
||||
_make_shape_item("s_c", "task_c"),
|
||||
_make_shape_item("s_a", "task_a", children_step_ids=["s_c"]),
|
||||
_make_shape_item("s_b", "task_b", children_step_ids=["s_c"]),
|
||||
]
|
||||
tasks = [
|
||||
_make_task_summary("s_c", status="QUEUED"),
|
||||
_make_task_summary("s_a", status="COMPLETED"),
|
||||
_make_task_summary("s_b", status="COMPLETED"),
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
# s_a and s_b both roots with in-degree 0; sorted alphabetically by step_id
|
||||
names = [t.name for t in result]
|
||||
assert names[-1] == "task_c"
|
||||
# First two should be task_a, task_b (sorted by step_id: s_a < s_b)
|
||||
assert names[0] == "task_a"
|
||||
assert names[1] == "task_b"
|
||||
|
||||
def test_production_dag_shape(self):
|
||||
"""Test the real 15-task pipeline topology with mixed statuses.
|
||||
|
||||
Simulates a mid-pipeline state where early tasks completed,
|
||||
middle tasks running, and later tasks still queued.
|
||||
"""
|
||||
# Production DAG edges (parent -> children):
|
||||
# get_recording -> get_participants
|
||||
# get_participants -> process_tracks
|
||||
# process_tracks -> mixdown_tracks, detect_topics, finalize
|
||||
# mixdown_tracks -> generate_waveform
|
||||
# detect_topics -> generate_title, extract_subjects
|
||||
# extract_subjects -> process_subjects, identify_action_items
|
||||
# process_subjects -> generate_recap
|
||||
# generate_title -> finalize
|
||||
# generate_recap -> finalize
|
||||
# identify_action_items -> finalize
|
||||
# finalize -> cleanup_consent
|
||||
# cleanup_consent -> post_zulip, send_webhook
|
||||
shape = [
|
||||
_make_shape_item(
|
||||
"s_get_recording", TaskName.GET_RECORDING, ["s_get_participants"]
|
||||
),
|
||||
_make_shape_item(
|
||||
"s_get_participants", TaskName.GET_PARTICIPANTS, ["s_process_tracks"]
|
||||
),
|
||||
_make_shape_item(
|
||||
"s_process_tracks",
|
||||
TaskName.PROCESS_TRACKS,
|
||||
["s_mixdown_tracks", "s_detect_topics", "s_finalize"],
|
||||
),
|
||||
_make_shape_item(
|
||||
"s_mixdown_tracks", TaskName.MIXDOWN_TRACKS, ["s_generate_waveform"]
|
||||
),
|
||||
_make_shape_item("s_generate_waveform", TaskName.GENERATE_WAVEFORM),
|
||||
_make_shape_item(
|
||||
"s_detect_topics",
|
||||
TaskName.DETECT_TOPICS,
|
||||
["s_generate_title", "s_extract_subjects"],
|
||||
),
|
||||
_make_shape_item(
|
||||
"s_generate_title", TaskName.GENERATE_TITLE, ["s_finalize"]
|
||||
),
|
||||
_make_shape_item(
|
||||
"s_extract_subjects",
|
||||
TaskName.EXTRACT_SUBJECTS,
|
||||
["s_process_subjects", "s_identify_action_items"],
|
||||
),
|
||||
_make_shape_item(
|
||||
"s_process_subjects", TaskName.PROCESS_SUBJECTS, ["s_generate_recap"]
|
||||
),
|
||||
_make_shape_item(
|
||||
"s_generate_recap", TaskName.GENERATE_RECAP, ["s_finalize"]
|
||||
),
|
||||
_make_shape_item(
|
||||
"s_identify_action_items",
|
||||
TaskName.IDENTIFY_ACTION_ITEMS,
|
||||
["s_finalize"],
|
||||
),
|
||||
_make_shape_item("s_finalize", TaskName.FINALIZE, ["s_cleanup_consent"]),
|
||||
_make_shape_item(
|
||||
"s_cleanup_consent",
|
||||
TaskName.CLEANUP_CONSENT,
|
||||
["s_post_zulip", "s_send_webhook"],
|
||||
),
|
||||
_make_shape_item("s_post_zulip", TaskName.POST_ZULIP),
|
||||
_make_shape_item("s_send_webhook", TaskName.SEND_WEBHOOK),
|
||||
]
|
||||
|
||||
# Mid-pipeline: early tasks done, middle running, later queued
|
||||
tasks = [
|
||||
_make_task_summary("s_get_recording", status="COMPLETED"),
|
||||
_make_task_summary("s_get_participants", status="COMPLETED"),
|
||||
_make_task_summary("s_process_tracks", status="COMPLETED"),
|
||||
_make_task_summary("s_mixdown_tracks", status="RUNNING"),
|
||||
_make_task_summary("s_generate_waveform", status="QUEUED"),
|
||||
_make_task_summary("s_detect_topics", status="RUNNING"),
|
||||
_make_task_summary("s_generate_title", status="QUEUED"),
|
||||
_make_task_summary("s_extract_subjects", status="QUEUED"),
|
||||
_make_task_summary("s_process_subjects", status="QUEUED"),
|
||||
_make_task_summary("s_generate_recap", status="QUEUED"),
|
||||
_make_task_summary("s_identify_action_items", status="QUEUED"),
|
||||
_make_task_summary("s_finalize", status="QUEUED"),
|
||||
_make_task_summary("s_cleanup_consent", status="QUEUED"),
|
||||
_make_task_summary("s_post_zulip", status="QUEUED"),
|
||||
_make_task_summary("s_send_webhook", status="QUEUED"),
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
# All 15 tasks present
|
||||
assert len(result) == 15
|
||||
result_names = [t.name for t in result]
|
||||
assert set(result_names) == {
|
||||
TaskName.GET_RECORDING,
|
||||
TaskName.GET_PARTICIPANTS,
|
||||
TaskName.PROCESS_TRACKS,
|
||||
TaskName.MIXDOWN_TRACKS,
|
||||
TaskName.GENERATE_WAVEFORM,
|
||||
TaskName.DETECT_TOPICS,
|
||||
TaskName.GENERATE_TITLE,
|
||||
TaskName.EXTRACT_SUBJECTS,
|
||||
TaskName.PROCESS_SUBJECTS,
|
||||
TaskName.GENERATE_RECAP,
|
||||
TaskName.IDENTIFY_ACTION_ITEMS,
|
||||
TaskName.FINALIZE,
|
||||
TaskName.CLEANUP_CONSENT,
|
||||
TaskName.POST_ZULIP,
|
||||
TaskName.SEND_WEBHOOK,
|
||||
}
|
||||
|
||||
# Topological order invariant: no task appears before its parents
|
||||
name_to_index = {t.name: i for i, t in enumerate(result)}
|
||||
for task in result:
|
||||
for parent_name in task.parents:
|
||||
assert name_to_index[parent_name] < name_to_index[task.name], (
|
||||
f"Parent {parent_name} (idx {name_to_index[parent_name]}) "
|
||||
f"must appear before {task.name} (idx {name_to_index[task.name]})"
|
||||
)
|
||||
|
||||
# finalize has exactly 4 parents
|
||||
finalize = next(t for t in result if t.name == TaskName.FINALIZE)
|
||||
assert set(finalize.parents) == {
|
||||
TaskName.PROCESS_TRACKS,
|
||||
TaskName.GENERATE_TITLE,
|
||||
TaskName.GENERATE_RECAP,
|
||||
TaskName.IDENTIFY_ACTION_ITEMS,
|
||||
}
|
||||
|
||||
# cleanup_consent has 1 parent (finalize)
|
||||
cleanup = next(t for t in result if t.name == TaskName.CLEANUP_CONSENT)
|
||||
assert cleanup.parents == [TaskName.FINALIZE]
|
||||
|
||||
# post_zulip and send_webhook both have cleanup_consent as parent
|
||||
post_zulip = next(t for t in result if t.name == TaskName.POST_ZULIP)
|
||||
send_webhook = next(t for t in result if t.name == TaskName.SEND_WEBHOOK)
|
||||
assert post_zulip.parents == [TaskName.CLEANUP_CONSENT]
|
||||
assert send_webhook.parents == [TaskName.CLEANUP_CONSENT]
|
||||
|
||||
# Verify statuses propagated correctly
|
||||
assert (
|
||||
next(t for t in result if t.name == TaskName.GET_RECORDING).status
|
||||
== DagTaskStatus.COMPLETED
|
||||
)
|
||||
assert (
|
||||
next(t for t in result if t.name == TaskName.MIXDOWN_TRACKS).status
|
||||
== DagTaskStatus.RUNNING
|
||||
)
|
||||
assert (
|
||||
next(t for t in result if t.name == TaskName.FINALIZE).status
|
||||
== DagTaskStatus.QUEUED
|
||||
)
|
||||
|
||||
def test_topological_sort_invariant_complex_dag(self):
|
||||
"""For a complex DAG, every task's parents appear earlier in the list.
|
||||
|
||||
Uses a wider branching/merging DAG than diamond to stress the invariant.
|
||||
"""
|
||||
# DAG: A -> B, A -> C, A -> D, B -> E, C -> E, C -> F, D -> F, E -> G, F -> G
|
||||
shape = [
|
||||
_make_shape_item("s_a", "task_a", ["s_b", "s_c", "s_d"]),
|
||||
_make_shape_item("s_b", "task_b", ["s_e"]),
|
||||
_make_shape_item("s_c", "task_c", ["s_e", "s_f"]),
|
||||
_make_shape_item("s_d", "task_d", ["s_f"]),
|
||||
_make_shape_item("s_e", "task_e", ["s_g"]),
|
||||
_make_shape_item("s_f", "task_f", ["s_g"]),
|
||||
_make_shape_item("s_g", "task_g"),
|
||||
]
|
||||
tasks = [
|
||||
_make_task_summary("s_a", status="COMPLETED"),
|
||||
_make_task_summary("s_b", status="COMPLETED"),
|
||||
_make_task_summary("s_c", status="RUNNING"),
|
||||
_make_task_summary("s_d", status="COMPLETED"),
|
||||
_make_task_summary("s_e", status="QUEUED"),
|
||||
_make_task_summary("s_f", status="QUEUED"),
|
||||
_make_task_summary("s_g", status="QUEUED"),
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert len(result) == 7
|
||||
name_to_index = {t.name: i for i, t in enumerate(result)}
|
||||
|
||||
# Verify invariant: every parent appears before its child
|
||||
for task in result:
|
||||
for parent_name in task.parents:
|
||||
assert name_to_index[parent_name] < name_to_index[task.name], (
|
||||
f"Parent {parent_name} (idx {name_to_index[parent_name]}) "
|
||||
f"must appear before {task.name} (idx {name_to_index[task.name]})"
|
||||
)
|
||||
|
||||
# task_g has 2 parents
|
||||
task_g = next(t for t in result if t.name == "task_g")
|
||||
assert set(task_g.parents) == {"task_e", "task_f"}
|
||||
|
||||
# task_e has 2 parents
|
||||
task_e = next(t for t in result if t.name == "task_e")
|
||||
assert set(task_e.parents) == {"task_b", "task_c"}
|
||||
|
||||
# task_a is root (first in topological order)
|
||||
assert result[0].name == "task_a"
|
||||
assert result[0].parents == []
|
||||
|
||||
|
||||
class TestExtractDagTasksFanOut:
|
||||
"""Test fan-out tasks with spawned children."""
|
||||
|
||||
def test_fan_out_children_counts(self):
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
child_mocks = []
|
||||
for status in ["COMPLETED", "COMPLETED", "RUNNING", "QUEUED"]:
|
||||
child = MagicMock()
|
||||
child.status = V1TaskStatus(status)
|
||||
child_mocks.append(child)
|
||||
|
||||
shape = [_make_shape_item("s1", "process_tracks")]
|
||||
tasks = [
|
||||
_make_task_summary(
|
||||
"s1",
|
||||
status="RUNNING",
|
||||
num_spawned_children=4,
|
||||
children=child_mocks,
|
||||
)
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert result[0].children_total == 4
|
||||
assert result[0].children_completed == 2
|
||||
|
||||
def test_no_children_when_no_spawn(self):
|
||||
shape = [_make_shape_item("s1", "get_recording")]
|
||||
tasks = [
|
||||
_make_task_summary("s1", status="COMPLETED", num_spawned_children=None)
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert result[0].children_total is None
|
||||
assert result[0].children_completed is None
|
||||
|
||||
def test_zero_spawned_children(self):
|
||||
shape = [_make_shape_item("s1", "process_tracks")]
|
||||
tasks = [_make_task_summary("s1", status="COMPLETED", num_spawned_children=0)]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert result[0].children_total is None
|
||||
assert result[0].children_completed is None
|
||||
|
||||
|
||||
class TestExtractDagTasksErrorExtraction:
|
||||
"""Test error message extraction logic."""
|
||||
|
||||
def test_simple_error(self):
|
||||
shape = [_make_shape_item("s1", "mixdown_tracks")]
|
||||
tasks = [
|
||||
_make_task_summary(
|
||||
"s1", status="FAILED", error_message="ValueError: no tracks"
|
||||
)
|
||||
]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
assert result[0].error == "ValueError: no tracks"
|
||||
|
||||
def test_traceback_extracts_meaningful_line(self):
|
||||
error = (
|
||||
"Traceback (most recent call last):\n"
|
||||
' File "/app/something.py", line 42\n'
|
||||
"RuntimeError: out of memory"
|
||||
)
|
||||
shape = [_make_shape_item("s1", "mixdown_tracks")]
|
||||
tasks = [_make_task_summary("s1", status="FAILED", error_message=error)]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
assert result[0].error == "RuntimeError: out of memory"
|
||||
|
||||
def test_no_error_when_none(self):
|
||||
shape = [_make_shape_item("s1", "get_recording")]
|
||||
tasks = [_make_task_summary("s1", status="COMPLETED", error_message=None)]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
assert result[0].error is None
|
||||
|
||||
def test_empty_error_message(self):
|
||||
shape = [_make_shape_item("s1", "get_recording")]
|
||||
tasks = [_make_task_summary("s1", status="FAILED", error_message="")]
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
assert result[0].error is None
|
||||
|
||||
|
||||
class TestExtractDagTasksMissingData:
|
||||
"""Test edge cases with missing task data."""
|
||||
|
||||
def test_shape_without_matching_task(self):
|
||||
"""Shape has a step but tasks list doesn't contain it."""
|
||||
shape = [_make_shape_item("s1", "get_recording")]
|
||||
tasks = [] # No matching task
|
||||
details = _make_details(shape, tasks)
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].name == "get_recording"
|
||||
assert result[0].status == DagTaskStatus.QUEUED # default when no task data
|
||||
assert result[0].started_at is None
|
||||
|
||||
def test_none_shape_returns_empty(self):
|
||||
details = _make_details(shape=[], tasks=[])
|
||||
details.shape = None
|
||||
|
||||
result = extract_dag_tasks(details)
|
||||
assert result == []
|
||||
|
||||
|
||||
class TestDagStatusData:
|
||||
"""Test DagStatusData model serialization."""
|
||||
|
||||
def test_serialization(self):
|
||||
task = DagTask(
|
||||
name="get_recording",
|
||||
status=DagTaskStatus.COMPLETED,
|
||||
started_at=datetime(2025, 1, 1, tzinfo=timezone.utc),
|
||||
finished_at=datetime(2025, 1, 1, 0, 0, 1, tzinfo=timezone.utc),
|
||||
duration_seconds=1.0,
|
||||
parents=[],
|
||||
error=None,
|
||||
children_total=None,
|
||||
children_completed=None,
|
||||
progress_pct=None,
|
||||
)
|
||||
data = DagStatusData(workflow_run_id="test-123", tasks=[task])
|
||||
dumped = data.model_dump(mode="json")
|
||||
|
||||
assert dumped["workflow_run_id"] == "test-123"
|
||||
assert len(dumped["tasks"]) == 1
|
||||
assert dumped["tasks"][0]["name"] == "get_recording"
|
||||
assert dumped["tasks"][0]["status"] == "completed"
|
||||
assert dumped["tasks"][0]["duration_seconds"] == 1.0
|
||||
|
||||
|
||||
class AsyncContextManager:
|
||||
"""No-op async context manager for mocking fresh_db_connection."""
|
||||
|
||||
async def __aenter__(self):
|
||||
return None
|
||||
|
||||
async def __aexit__(self, *args):
|
||||
return None
|
||||
|
||||
|
||||
class TestBroadcastDagStatus:
|
||||
"""Test broadcast_dag_status function.
|
||||
|
||||
broadcast_dag_status uses deferred imports inside its function body.
|
||||
We mock the source modules/objects before calling the function.
|
||||
Importing daily_multitrack_pipeline triggers a cascade
|
||||
(subject_processing -> HatchetClientManager.get_client at module level),
|
||||
so we set _instance before the import to prevent real SDK init.
|
||||
"""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _setup_hatchet_mock(self):
|
||||
"""Set HatchetClientManager._instance to a mock to prevent real SDK init.
|
||||
|
||||
Module-level code in workflow files calls get_client() during import.
|
||||
Setting _instance before import avoids ClientConfig validation.
|
||||
"""
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
|
||||
original = HatchetClientManager._instance
|
||||
HatchetClientManager._instance = MagicMock()
|
||||
yield
|
||||
HatchetClientManager._instance = original
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_broadcasts_dag_status(self):
|
||||
"""broadcast_dag_status fetches run, transforms, and broadcasts."""
|
||||
mock_transcript = MagicMock()
|
||||
mock_transcript.id = "t-123"
|
||||
|
||||
mock_details = _make_details(
|
||||
shape=[_make_shape_item("s1", "get_recording")],
|
||||
tasks=[_make_task_summary("s1", status="COMPLETED")],
|
||||
run_id="wf-abc",
|
||||
)
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_client.runs.aio_get = AsyncMock(return_value=mock_details)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.hatchet.client.HatchetClientManager.get_client",
|
||||
return_value=mock_client,
|
||||
),
|
||||
patch(
|
||||
"reflector.hatchet.broadcast.append_event_and_broadcast",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_broadcast,
|
||||
patch(
|
||||
"reflector.db.transcripts.transcripts_controller.get_by_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_transcript,
|
||||
),
|
||||
patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.fresh_db_connection",
|
||||
return_value=AsyncContextManager(),
|
||||
),
|
||||
):
|
||||
from reflector.hatchet.dag_progress import broadcast_dag_status
|
||||
|
||||
await broadcast_dag_status("t-123", "wf-abc")
|
||||
|
||||
mock_client.runs.aio_get.assert_called_once_with("wf-abc")
|
||||
mock_broadcast.assert_called_once()
|
||||
call_args = mock_broadcast.call_args
|
||||
assert call_args[0][0] == "t-123" # transcript_id
|
||||
assert call_args[0][1] is mock_transcript # transcript
|
||||
assert call_args[0][2] == "DAG_STATUS" # event_name
|
||||
data = call_args[0][3]
|
||||
assert isinstance(data, DagStatusData)
|
||||
assert data.workflow_run_id == "wf-abc"
|
||||
assert len(data.tasks) == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_swallows_exceptions(self):
|
||||
"""broadcast_dag_status never raises even when internals fail."""
|
||||
from reflector.hatchet.dag_progress import broadcast_dag_status
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.fresh_db_connection",
|
||||
side_effect=RuntimeError("db exploded"),
|
||||
):
|
||||
# Should not raise
|
||||
await broadcast_dag_status("t-123", "wf-abc")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_broadcast_when_transcript_not_found(self):
|
||||
"""broadcast_dag_status does not broadcast if transcript is None."""
|
||||
mock_details = _make_details(
|
||||
shape=[_make_shape_item("s1", "get_recording")],
|
||||
tasks=[_make_task_summary("s1", status="COMPLETED")],
|
||||
)
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_client.runs.aio_get = AsyncMock(return_value=mock_details)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.hatchet.client.HatchetClientManager.get_client",
|
||||
return_value=mock_client,
|
||||
),
|
||||
patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.fresh_db_connection",
|
||||
return_value=AsyncContextManager(),
|
||||
),
|
||||
patch(
|
||||
"reflector.db.transcripts.transcripts_controller.get_by_id",
|
||||
new_callable=AsyncMock,
|
||||
return_value=None,
|
||||
),
|
||||
patch(
|
||||
"reflector.hatchet.broadcast.append_event_and_broadcast",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_broadcast,
|
||||
):
|
||||
from reflector.hatchet.dag_progress import broadcast_dag_status
|
||||
|
||||
await broadcast_dag_status("t-123", "wf-abc")
|
||||
|
||||
mock_broadcast.assert_not_called()
|
||||
|
||||
|
||||
class TestMakeAudioProgressLoggerWithBroadcast:
|
||||
"""Test make_audio_progress_logger with transcript_id for transient broadcasts."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _setup_hatchet_mock(self):
|
||||
"""Set HatchetClientManager._instance to prevent real SDK init on import."""
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
|
||||
original = HatchetClientManager._instance
|
||||
if original is None:
|
||||
HatchetClientManager._instance = MagicMock()
|
||||
yield
|
||||
HatchetClientManager._instance = original
|
||||
|
||||
def test_broadcasts_transient_progress_event(self):
|
||||
"""When transcript_id provided and progress_pct not None, broadcasts event."""
|
||||
import asyncio
|
||||
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
make_audio_progress_logger,
|
||||
)
|
||||
|
||||
ctx = MagicMock()
|
||||
ctx.log = MagicMock()
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
mock_broadcast = AsyncMock()
|
||||
tasks_created = []
|
||||
|
||||
original_create_task = loop.create_task
|
||||
|
||||
def capture_create_task(coro):
|
||||
task = original_create_task(coro)
|
||||
tasks_created.append(task)
|
||||
return task
|
||||
|
||||
try:
|
||||
with (
|
||||
patch(
|
||||
"reflector.hatchet.broadcast.broadcast_event",
|
||||
mock_broadcast,
|
||||
),
|
||||
patch.object(loop, "create_task", side_effect=capture_create_task),
|
||||
):
|
||||
callback = make_audio_progress_logger(
|
||||
ctx, TaskName.MIXDOWN_TRACKS, interval=0.0, transcript_id="t-123"
|
||||
)
|
||||
callback(50.0, 100.0)
|
||||
|
||||
# Run pending tasks
|
||||
if tasks_created:
|
||||
loop.run_until_complete(asyncio.gather(*tasks_created))
|
||||
|
||||
mock_broadcast.assert_called_once()
|
||||
event_arg = mock_broadcast.call_args[0][1]
|
||||
assert event_arg.event == "DAG_TASK_PROGRESS"
|
||||
assert event_arg.data["task_name"] == TaskName.MIXDOWN_TRACKS
|
||||
assert event_arg.data["progress_pct"] == 50.0
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
def test_no_broadcast_without_transcript_id(self):
|
||||
"""When transcript_id is None, no broadcast happens."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
make_audio_progress_logger,
|
||||
)
|
||||
|
||||
ctx = MagicMock()
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.broadcast.broadcast_event",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_broadcast:
|
||||
callback = make_audio_progress_logger(
|
||||
ctx, TaskName.MIXDOWN_TRACKS, interval=0.0, transcript_id=None
|
||||
)
|
||||
callback(50.0, 100.0)
|
||||
mock_broadcast.assert_not_called()
|
||||
|
||||
def test_no_broadcast_when_progress_pct_is_none(self):
|
||||
"""When progress_pct is None, no broadcast happens even with transcript_id."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
make_audio_progress_logger,
|
||||
)
|
||||
|
||||
ctx = MagicMock()
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.broadcast.broadcast_event",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_broadcast:
|
||||
callback = make_audio_progress_logger(
|
||||
ctx, TaskName.MIXDOWN_TRACKS, interval=0.0, transcript_id="t-123"
|
||||
)
|
||||
callback(None, 100.0)
|
||||
mock_broadcast.assert_not_called()
|
||||
|
||||
def test_logging_throttled_by_interval(self):
|
||||
"""With interval=5.0, rapid calls only log once until interval elapses.
|
||||
|
||||
The throttle applies to ctx.log() calls. Broadcasts (fire-and-forget)
|
||||
are not throttled — they occur every call when transcript_id + progress_pct set.
|
||||
"""
|
||||
import asyncio
|
||||
import time as time_mod
|
||||
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
make_audio_progress_logger,
|
||||
)
|
||||
|
||||
ctx = MagicMock()
|
||||
ctx.log = MagicMock()
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
mock_broadcast = AsyncMock()
|
||||
tasks_created = []
|
||||
original_create_task = loop.create_task
|
||||
|
||||
def capture_create_task(coro):
|
||||
task = original_create_task(coro)
|
||||
tasks_created.append(task)
|
||||
return task
|
||||
|
||||
# Controlled monotonic values for the 4 calls from make_audio_progress_logger:
|
||||
# init (start_time, last_log_time), call1 (now), call2 (now), call3 (now)
|
||||
# After those, fall back to real time.monotonic() for asyncio internals.
|
||||
controlled_values = [100.0, 100.0, 101.0, 106.0]
|
||||
call_index = [0]
|
||||
real_monotonic = time_mod.monotonic
|
||||
|
||||
def mock_monotonic():
|
||||
if call_index[0] < len(controlled_values):
|
||||
val = controlled_values[call_index[0]]
|
||||
call_index[0] += 1
|
||||
return val
|
||||
return real_monotonic()
|
||||
|
||||
try:
|
||||
with (
|
||||
patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.time.monotonic",
|
||||
side_effect=mock_monotonic,
|
||||
),
|
||||
patch(
|
||||
"reflector.hatchet.broadcast.broadcast_event",
|
||||
mock_broadcast,
|
||||
),
|
||||
patch.object(loop, "create_task", side_effect=capture_create_task),
|
||||
):
|
||||
callback = make_audio_progress_logger(
|
||||
ctx, TaskName.MIXDOWN_TRACKS, interval=5.0, transcript_id="t-123"
|
||||
)
|
||||
|
||||
# Call 1 at t=100.0: 100.0 - 100.0 = 0.0 < 5.0 => no log
|
||||
callback(25.0, 50.0)
|
||||
assert ctx.log.call_count == 0
|
||||
|
||||
# Call 2 at t=101.0: 101.0 - 100.0 = 1.0 < 5.0 => no log
|
||||
callback(50.0, 100.0)
|
||||
assert ctx.log.call_count == 0
|
||||
|
||||
# Call 3 at t=106.0: 106.0 - 100.0 = 6.0 >= 5.0 => logs
|
||||
callback(75.0, 150.0)
|
||||
assert ctx.log.call_count == 1
|
||||
|
||||
# Run pending broadcast tasks
|
||||
if tasks_created:
|
||||
loop.run_until_complete(asyncio.gather(*tasks_created))
|
||||
|
||||
# Broadcasts happen on every call (not throttled) — 3 calls total
|
||||
assert mock_broadcast.call_count == 3
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
def test_uses_broadcast_event_not_append_event_and_broadcast(self):
|
||||
"""Progress events use broadcast_event (transient), not append_event_and_broadcast (persisted)."""
|
||||
import asyncio
|
||||
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
make_audio_progress_logger,
|
||||
)
|
||||
|
||||
ctx = MagicMock()
|
||||
ctx.log = MagicMock()
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
|
||||
mock_broadcast_event = AsyncMock()
|
||||
mock_append = AsyncMock()
|
||||
tasks_created = []
|
||||
original_create_task = loop.create_task
|
||||
|
||||
def capture_create_task(coro):
|
||||
task = original_create_task(coro)
|
||||
tasks_created.append(task)
|
||||
return task
|
||||
|
||||
try:
|
||||
with (
|
||||
patch(
|
||||
"reflector.hatchet.broadcast.broadcast_event",
|
||||
mock_broadcast_event,
|
||||
),
|
||||
patch(
|
||||
"reflector.hatchet.broadcast.append_event_and_broadcast",
|
||||
mock_append,
|
||||
),
|
||||
patch.object(loop, "create_task", side_effect=capture_create_task),
|
||||
):
|
||||
callback = make_audio_progress_logger(
|
||||
ctx, TaskName.MIXDOWN_TRACKS, interval=0.0, transcript_id="t-123"
|
||||
)
|
||||
callback(50.0, 100.0)
|
||||
|
||||
if tasks_created:
|
||||
loop.run_until_complete(asyncio.gather(*tasks_created))
|
||||
|
||||
# broadcast_event (transient) IS called
|
||||
mock_broadcast_event.assert_called_once()
|
||||
# append_event_and_broadcast (persisted) is NOT called
|
||||
mock_append.assert_not_called()
|
||||
finally:
|
||||
loop.close()
|
||||
181
server/tests/test_dag_progress_decorator.py
Normal file
181
server/tests/test_dag_progress_decorator.py
Normal file
@@ -0,0 +1,181 @@
|
||||
"""Tests for with_error_handling decorator integration with broadcast_dag_status.
|
||||
|
||||
The decorator wraps each pipeline task and calls broadcast_dag_status on both
|
||||
success and failure paths. These tests verify that integration rather than
|
||||
testing broadcast_dag_status in isolation (which test_dag_progress.py covers).
|
||||
"""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.hatchet.constants import TaskName
|
||||
|
||||
|
||||
class TestWithErrorHandlingBroadcast:
|
||||
"""Test with_error_handling decorator's integration with broadcast_dag_status."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _setup_hatchet_mock(self):
|
||||
"""Set HatchetClientManager._instance to a mock to prevent real SDK init.
|
||||
|
||||
Module-level code in workflow files calls get_client() during import.
|
||||
Setting _instance before import avoids ClientConfig validation.
|
||||
"""
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
|
||||
original = HatchetClientManager._instance
|
||||
HatchetClientManager._instance = MagicMock()
|
||||
yield
|
||||
HatchetClientManager._instance = original
|
||||
|
||||
def _make_input(self, transcript_id: str = "t-123") -> MagicMock:
|
||||
"""Create a mock PipelineInput with transcript_id."""
|
||||
inp = MagicMock()
|
||||
inp.transcript_id = transcript_id
|
||||
return inp
|
||||
|
||||
def _make_ctx(self, workflow_run_id: str = "wf-abc") -> MagicMock:
|
||||
"""Create a mock Context with workflow_run_id."""
|
||||
ctx = MagicMock()
|
||||
ctx.workflow_run_id = workflow_run_id
|
||||
return ctx
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_calls_broadcast_on_success(self):
|
||||
"""Decorator calls broadcast_dag_status once when task succeeds."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
with_error_handling,
|
||||
)
|
||||
|
||||
inner = AsyncMock(return_value="ok")
|
||||
wrapped = with_error_handling(TaskName.GET_RECORDING)(inner)
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.dag_progress.broadcast_dag_status",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_broadcast:
|
||||
result = await wrapped(self._make_input(), self._make_ctx())
|
||||
|
||||
assert result == "ok"
|
||||
mock_broadcast.assert_called_once_with("t-123", "wf-abc")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_calls_broadcast_on_failure(self):
|
||||
"""Decorator calls broadcast_dag_status once when task raises."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
with_error_handling,
|
||||
)
|
||||
|
||||
inner = AsyncMock(side_effect=RuntimeError("boom"))
|
||||
wrapped = with_error_handling(TaskName.GET_RECORDING)(inner)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.hatchet.dag_progress.broadcast_dag_status",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_broadcast,
|
||||
patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.set_workflow_error_status",
|
||||
new_callable=AsyncMock,
|
||||
),
|
||||
):
|
||||
with pytest.raises(RuntimeError, match="boom"):
|
||||
await wrapped(self._make_input(), self._make_ctx())
|
||||
|
||||
mock_broadcast.assert_called_once_with("t-123", "wf-abc")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_swallows_broadcast_exception_on_success(self):
|
||||
"""Broadcast failure does not crash the task on the success path."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
with_error_handling,
|
||||
)
|
||||
|
||||
inner = AsyncMock(return_value="ok")
|
||||
wrapped = with_error_handling(TaskName.GET_RECORDING)(inner)
|
||||
|
||||
with patch(
|
||||
"reflector.hatchet.dag_progress.broadcast_dag_status",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=RuntimeError("broadcast exploded"),
|
||||
):
|
||||
result = await wrapped(self._make_input(), self._make_ctx())
|
||||
|
||||
assert result == "ok"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_swallows_broadcast_exception_on_failure(self):
|
||||
"""Original task exception propagates even when broadcast also fails."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
with_error_handling,
|
||||
)
|
||||
|
||||
inner = AsyncMock(side_effect=ValueError("original error"))
|
||||
wrapped = with_error_handling(TaskName.GET_RECORDING)(inner)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.hatchet.dag_progress.broadcast_dag_status",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=RuntimeError("broadcast exploded"),
|
||||
),
|
||||
patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.set_workflow_error_status",
|
||||
new_callable=AsyncMock,
|
||||
),
|
||||
):
|
||||
with pytest.raises(ValueError, match="original error"):
|
||||
await wrapped(self._make_input(), self._make_ctx())
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_calls_set_workflow_error_status_on_failure(self):
|
||||
"""On task failure with set_error_status=True (default), calls set_workflow_error_status."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
with_error_handling,
|
||||
)
|
||||
|
||||
inner = AsyncMock(side_effect=RuntimeError("boom"))
|
||||
wrapped = with_error_handling(TaskName.GET_RECORDING)(inner)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.hatchet.dag_progress.broadcast_dag_status",
|
||||
new_callable=AsyncMock,
|
||||
),
|
||||
patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.set_workflow_error_status",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_set_error,
|
||||
):
|
||||
with pytest.raises(RuntimeError, match="boom"):
|
||||
await wrapped(self._make_input(), self._make_ctx())
|
||||
|
||||
mock_set_error.assert_called_once_with("t-123")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_set_workflow_error_status_when_disabled(self):
|
||||
"""With set_error_status=False, set_workflow_error_status is NOT called on failure."""
|
||||
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
|
||||
with_error_handling,
|
||||
)
|
||||
|
||||
inner = AsyncMock(side_effect=RuntimeError("boom"))
|
||||
wrapped = with_error_handling(TaskName.GET_RECORDING, set_error_status=False)(
|
||||
inner
|
||||
)
|
||||
|
||||
with (
|
||||
patch(
|
||||
"reflector.hatchet.dag_progress.broadcast_dag_status",
|
||||
new_callable=AsyncMock,
|
||||
),
|
||||
patch(
|
||||
"reflector.hatchet.workflows.daily_multitrack_pipeline.set_workflow_error_status",
|
||||
new_callable=AsyncMock,
|
||||
) as mock_set_error,
|
||||
):
|
||||
with pytest.raises(RuntimeError, match="boom"):
|
||||
await wrapped(self._make_input(), self._make_ctx())
|
||||
|
||||
mock_set_error.assert_not_called()
|
||||
421
server/tests/test_dag_progress_rest.py
Normal file
421
server/tests/test_dag_progress_rest.py
Normal file
@@ -0,0 +1,421 @@
|
||||
"""Tests for DAG status REST enrichment on search and transcript GET endpoints."""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
import reflector.db.search as search_module
|
||||
from reflector.db.search import SearchResult, _fetch_dag_statuses
|
||||
from reflector.db.transcripts import TranscriptEvent
|
||||
|
||||
|
||||
class TestFetchDagStatuses:
|
||||
"""Test the _fetch_dag_statuses helper."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_returns_empty_for_empty_ids(self):
|
||||
result = await _fetch_dag_statuses([])
|
||||
assert result == {}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extracts_last_dag_status(self):
|
||||
events = [
|
||||
{"event": "STATUS", "data": {"value": "processing"}},
|
||||
{
|
||||
"event": "DAG_STATUS",
|
||||
"data": {
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [{"name": "get_recording", "status": "completed"}],
|
||||
},
|
||||
},
|
||||
{
|
||||
"event": "DAG_STATUS",
|
||||
"data": {
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [
|
||||
{"name": "get_recording", "status": "completed"},
|
||||
{"name": "process_tracks", "status": "running"},
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
mock_row = {"id": "t1", "events": events}
|
||||
|
||||
with patch("reflector.db.search.get_database") as mock_db:
|
||||
mock_db.return_value.fetch_all = AsyncMock(return_value=[mock_row])
|
||||
result = await _fetch_dag_statuses(["t1"])
|
||||
|
||||
assert "t1" in result
|
||||
assert len(result["t1"]) == 2 # Last DAG_STATUS had 2 tasks
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_skips_transcripts_without_events(self):
|
||||
mock_row = {"id": "t1", "events": None}
|
||||
|
||||
with patch("reflector.db.search.get_database") as mock_db:
|
||||
mock_db.return_value.fetch_all = AsyncMock(return_value=[mock_row])
|
||||
result = await _fetch_dag_statuses(["t1"])
|
||||
|
||||
assert result == {}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_skips_transcripts_without_dag_status(self):
|
||||
events = [
|
||||
{"event": "STATUS", "data": {"value": "processing"}},
|
||||
{"event": "DURATION", "data": {"duration": 1000}},
|
||||
]
|
||||
mock_row = {"id": "t1", "events": events}
|
||||
|
||||
with patch("reflector.db.search.get_database") as mock_db:
|
||||
mock_db.return_value.fetch_all = AsyncMock(return_value=[mock_row])
|
||||
result = await _fetch_dag_statuses(["t1"])
|
||||
|
||||
assert result == {}
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handles_json_string_events(self):
|
||||
"""Events stored as JSON string rather than already-parsed list."""
|
||||
import json
|
||||
|
||||
events = [
|
||||
{
|
||||
"event": "DAG_STATUS",
|
||||
"data": {
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [{"name": "transcribe", "status": "running"}],
|
||||
},
|
||||
},
|
||||
]
|
||||
mock_row = {"id": "t1", "events": json.dumps(events)}
|
||||
|
||||
with patch("reflector.db.search.get_database") as mock_db:
|
||||
mock_db.return_value.fetch_all = AsyncMock(return_value=[mock_row])
|
||||
result = await _fetch_dag_statuses(["t1"])
|
||||
|
||||
assert "t1" in result
|
||||
assert len(result["t1"]) == 1
|
||||
assert result["t1"][0]["name"] == "transcribe"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_transcripts(self):
|
||||
"""Handles multiple transcripts in one call."""
|
||||
events_t1 = [
|
||||
{
|
||||
"event": "DAG_STATUS",
|
||||
"data": {
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [{"name": "a", "status": "completed"}],
|
||||
},
|
||||
},
|
||||
]
|
||||
events_t2 = [
|
||||
{
|
||||
"event": "DAG_STATUS",
|
||||
"data": {
|
||||
"workflow_run_id": "r2",
|
||||
"tasks": [{"name": "b", "status": "running"}],
|
||||
},
|
||||
},
|
||||
]
|
||||
mock_rows = [
|
||||
{"id": "t1", "events": events_t1},
|
||||
{"id": "t2", "events": events_t2},
|
||||
]
|
||||
|
||||
with patch("reflector.db.search.get_database") as mock_db:
|
||||
mock_db.return_value.fetch_all = AsyncMock(return_value=mock_rows)
|
||||
result = await _fetch_dag_statuses(["t1", "t2"])
|
||||
|
||||
assert "t1" in result
|
||||
assert "t2" in result
|
||||
assert result["t1"][0]["name"] == "a"
|
||||
assert result["t2"][0]["name"] == "b"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dag_status_without_tasks_key_skipped(self):
|
||||
"""DAG_STATUS event with no tasks key in data should be skipped."""
|
||||
events = [
|
||||
{"event": "DAG_STATUS", "data": {"workflow_run_id": "r1"}},
|
||||
]
|
||||
mock_row = {"id": "t1", "events": events}
|
||||
|
||||
with patch("reflector.db.search.get_database") as mock_db:
|
||||
mock_db.return_value.fetch_all = AsyncMock(return_value=[mock_row])
|
||||
result = await _fetch_dag_statuses(["t1"])
|
||||
|
||||
assert result == {}
|
||||
|
||||
|
||||
def _extract_dag_status_from_transcript(transcript):
|
||||
"""Replicate the dag_status extraction logic from transcript_get view.
|
||||
|
||||
This mirrors the code in reflector/views/transcripts.py lines 495-500:
|
||||
dag_status = None
|
||||
if transcript.status == "processing" and transcript.events:
|
||||
for ev in reversed(transcript.events):
|
||||
if ev.event == "DAG_STATUS":
|
||||
dag_status = ev.data.get("tasks") if isinstance(ev.data, dict) else None
|
||||
break
|
||||
"""
|
||||
dag_status = None
|
||||
if transcript.status == "processing" and transcript.events:
|
||||
for ev in reversed(transcript.events):
|
||||
if ev.event == "DAG_STATUS":
|
||||
dag_status = ev.data.get("tasks") if isinstance(ev.data, dict) else None
|
||||
break
|
||||
return dag_status
|
||||
|
||||
|
||||
class TestTranscriptGetDagStatusExtraction:
|
||||
"""Test dag_status extraction logic from transcript_get endpoint.
|
||||
|
||||
The actual endpoint is complex to set up, so we test the extraction
|
||||
logic directly using the same code pattern from the view.
|
||||
"""
|
||||
|
||||
def test_processing_transcript_with_dag_status_events(self):
|
||||
"""Processing transcript with DAG_STATUS events returns tasks from last event."""
|
||||
transcript = SimpleNamespace(
|
||||
status="processing",
|
||||
events=[
|
||||
TranscriptEvent(event="STATUS", data={"value": "processing"}),
|
||||
TranscriptEvent(
|
||||
event="DAG_STATUS",
|
||||
data={
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [{"name": "get_recording", "status": "completed"}],
|
||||
},
|
||||
),
|
||||
TranscriptEvent(
|
||||
event="DAG_STATUS",
|
||||
data={
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [
|
||||
{"name": "get_recording", "status": "completed"},
|
||||
{"name": "transcribe", "status": "running"},
|
||||
],
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
result = _extract_dag_status_from_transcript(transcript)
|
||||
|
||||
assert result is not None
|
||||
assert len(result) == 2
|
||||
assert result[0]["name"] == "get_recording"
|
||||
assert result[1]["name"] == "transcribe"
|
||||
assert result[1]["status"] == "running"
|
||||
|
||||
def test_processing_transcript_without_dag_status_events(self):
|
||||
"""Processing transcript with only non-DAG_STATUS events returns None."""
|
||||
transcript = SimpleNamespace(
|
||||
status="processing",
|
||||
events=[
|
||||
TranscriptEvent(event="STATUS", data={"value": "processing"}),
|
||||
TranscriptEvent(event="DURATION", data={"duration": 1000}),
|
||||
],
|
||||
)
|
||||
|
||||
result = _extract_dag_status_from_transcript(transcript)
|
||||
assert result is None
|
||||
|
||||
def test_ended_transcript_with_dag_status_events(self):
|
||||
"""Ended transcript with DAG_STATUS events returns None (status check)."""
|
||||
transcript = SimpleNamespace(
|
||||
status="ended",
|
||||
events=[
|
||||
TranscriptEvent(
|
||||
event="DAG_STATUS",
|
||||
data={
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [{"name": "transcribe", "status": "completed"}],
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
result = _extract_dag_status_from_transcript(transcript)
|
||||
assert result is None
|
||||
|
||||
def test_processing_transcript_with_empty_events(self):
|
||||
"""Processing transcript with empty events list returns None."""
|
||||
transcript = SimpleNamespace(
|
||||
status="processing",
|
||||
events=[],
|
||||
)
|
||||
|
||||
result = _extract_dag_status_from_transcript(transcript)
|
||||
assert result is None
|
||||
|
||||
def test_processing_transcript_with_none_events(self):
|
||||
"""Processing transcript with None events returns None."""
|
||||
transcript = SimpleNamespace(
|
||||
status="processing",
|
||||
events=None,
|
||||
)
|
||||
|
||||
result = _extract_dag_status_from_transcript(transcript)
|
||||
assert result is None
|
||||
|
||||
def test_extracts_last_dag_status_not_first(self):
|
||||
"""Should pick the last DAG_STATUS event (most recent), not the first."""
|
||||
transcript = SimpleNamespace(
|
||||
status="processing",
|
||||
events=[
|
||||
TranscriptEvent(
|
||||
event="DAG_STATUS",
|
||||
data={
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [{"name": "a", "status": "running"}],
|
||||
},
|
||||
),
|
||||
TranscriptEvent(event="STATUS", data={"value": "processing"}),
|
||||
TranscriptEvent(
|
||||
event="DAG_STATUS",
|
||||
data={
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [
|
||||
{"name": "a", "status": "completed"},
|
||||
{"name": "b", "status": "running"},
|
||||
],
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
result = _extract_dag_status_from_transcript(transcript)
|
||||
assert len(result) == 2
|
||||
assert result[0]["status"] == "completed"
|
||||
assert result[1]["name"] == "b"
|
||||
|
||||
|
||||
class TestSearchEnrichmentIntegration:
|
||||
"""Test DAG status enrichment in search results.
|
||||
|
||||
The search function enriches processing transcripts with dag_status
|
||||
by calling _fetch_dag_statuses for processing IDs and assigning results.
|
||||
We test this enrichment logic by mocking _fetch_dag_statuses.
|
||||
"""
|
||||
|
||||
def _make_search_result(self, id: str, status: str) -> SearchResult:
|
||||
"""Create a minimal SearchResult for testing."""
|
||||
return SearchResult(
|
||||
id=id,
|
||||
title=f"Transcript {id}",
|
||||
user_id="u1",
|
||||
room_id=None,
|
||||
room_name=None,
|
||||
source_kind="live",
|
||||
created_at=datetime(2024, 1, 1, tzinfo=timezone.utc),
|
||||
status=status,
|
||||
rank=1.0,
|
||||
duration=60.0,
|
||||
search_snippets=[],
|
||||
total_match_count=0,
|
||||
dag_status=None,
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_processing_result_gets_dag_status(self):
|
||||
"""SearchResult with status='processing' and matching DAG_STATUS events
|
||||
gets dag_status populated."""
|
||||
results = [self._make_search_result("t1", "processing")]
|
||||
dag_tasks = [
|
||||
{"name": "get_recording", "status": "completed"},
|
||||
{"name": "transcribe", "status": "running"},
|
||||
]
|
||||
|
||||
with patch.object(
|
||||
search_module,
|
||||
"_fetch_dag_statuses",
|
||||
new_callable=AsyncMock,
|
||||
return_value={"t1": dag_tasks},
|
||||
) as mock_fetch:
|
||||
# Replicate the enrichment logic from SearchController.search_transcripts
|
||||
processing_ids = [r.id for r in results if r.status == "processing"]
|
||||
if processing_ids:
|
||||
dag_statuses = await search_module._fetch_dag_statuses(processing_ids)
|
||||
for r in results:
|
||||
if r.id in dag_statuses:
|
||||
r.dag_status = dag_statuses[r.id]
|
||||
|
||||
mock_fetch.assert_called_once_with(["t1"])
|
||||
|
||||
assert results[0].dag_status == dag_tasks
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ended_result_does_not_trigger_fetch(self):
|
||||
"""SearchResult with status='ended' does NOT trigger _fetch_dag_statuses."""
|
||||
results = [self._make_search_result("t1", "ended")]
|
||||
|
||||
with patch.object(
|
||||
search_module,
|
||||
"_fetch_dag_statuses",
|
||||
new_callable=AsyncMock,
|
||||
return_value={},
|
||||
) as mock_fetch:
|
||||
processing_ids = [r.id for r in results if r.status == "processing"]
|
||||
if processing_ids:
|
||||
dag_statuses = await search_module._fetch_dag_statuses(processing_ids)
|
||||
for r in results:
|
||||
if r.id in dag_statuses:
|
||||
r.dag_status = dag_statuses[r.id]
|
||||
|
||||
mock_fetch.assert_not_called()
|
||||
|
||||
assert results[0].dag_status is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_mixed_processing_and_ended_results(self):
|
||||
"""Only processing results get enriched; ended results stay None."""
|
||||
results = [
|
||||
self._make_search_result("t1", "processing"),
|
||||
self._make_search_result("t2", "ended"),
|
||||
self._make_search_result("t3", "processing"),
|
||||
]
|
||||
dag_tasks_t1 = [{"name": "transcribe", "status": "running"}]
|
||||
dag_tasks_t3 = [{"name": "diarize", "status": "completed"}]
|
||||
|
||||
with patch.object(
|
||||
search_module,
|
||||
"_fetch_dag_statuses",
|
||||
new_callable=AsyncMock,
|
||||
return_value={"t1": dag_tasks_t1, "t3": dag_tasks_t3},
|
||||
) as mock_fetch:
|
||||
processing_ids = [r.id for r in results if r.status == "processing"]
|
||||
if processing_ids:
|
||||
dag_statuses = await search_module._fetch_dag_statuses(processing_ids)
|
||||
for r in results:
|
||||
if r.id in dag_statuses:
|
||||
r.dag_status = dag_statuses[r.id]
|
||||
|
||||
mock_fetch.assert_called_once_with(["t1", "t3"])
|
||||
|
||||
assert results[0].dag_status == dag_tasks_t1
|
||||
assert results[1].dag_status is None
|
||||
assert results[2].dag_status == dag_tasks_t3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_processing_result_without_dag_events_stays_none(self):
|
||||
"""Processing result with no DAG_STATUS events in DB stays dag_status=None."""
|
||||
results = [self._make_search_result("t1", "processing")]
|
||||
|
||||
with patch.object(
|
||||
search_module,
|
||||
"_fetch_dag_statuses",
|
||||
new_callable=AsyncMock,
|
||||
return_value={},
|
||||
) as mock_fetch:
|
||||
processing_ids = [r.id for r in results if r.status == "processing"]
|
||||
if processing_ids:
|
||||
dag_statuses = await search_module._fetch_dag_statuses(processing_ids)
|
||||
for r in results:
|
||||
if r.id in dag_statuses:
|
||||
r.dag_status = dag_statuses[r.id]
|
||||
|
||||
mock_fetch.assert_called_once_with(["t1"])
|
||||
|
||||
assert results[0].dag_status is None
|
||||
@@ -255,7 +255,7 @@ async def test_validation_locked_transcript():
|
||||
@pytest.mark.usefixtures("setup_database")
|
||||
@pytest.mark.asyncio
|
||||
async def test_validation_idle_transcript():
|
||||
"""Test that validation rejects idle transcripts without recording (file upload not ready)."""
|
||||
"""Test that validation rejects idle transcripts (not ready)."""
|
||||
from reflector.services.transcript_process import (
|
||||
ValidationNotReady,
|
||||
validate_transcript_for_processing,
|
||||
@@ -274,34 +274,6 @@ async def test_validation_idle_transcript():
|
||||
assert "not ready" in result.detail.lower()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("setup_database")
|
||||
@pytest.mark.asyncio
|
||||
async def test_validation_idle_transcript_with_recording_allowed():
|
||||
"""Test that validation allows idle transcripts with recording_id (multitrack ready/retry)."""
|
||||
from reflector.services.transcript_process import (
|
||||
ValidationOk,
|
||||
validate_transcript_for_processing,
|
||||
)
|
||||
|
||||
mock_transcript = Transcript(
|
||||
id="test-transcript-id",
|
||||
name="Test",
|
||||
status="idle",
|
||||
source_kind="room",
|
||||
recording_id="test-recording-id",
|
||||
)
|
||||
|
||||
with patch(
|
||||
"reflector.services.transcript_process.task_is_scheduled_or_active"
|
||||
) as mock_celery_check:
|
||||
mock_celery_check.return_value = False
|
||||
|
||||
result = await validate_transcript_for_processing(mock_transcript)
|
||||
|
||||
assert isinstance(result, ValidationOk)
|
||||
assert result.recording_id == "test-recording-id"
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("setup_database")
|
||||
@pytest.mark.asyncio
|
||||
async def test_prepare_multitrack_config():
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
"""Tests for LLM structured output with astructured_predict + reflection retry"""
|
||||
"""Tests for LLM parse error recovery using llama-index Workflow"""
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
from time import monotonic
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
from pydantic import BaseModel, Field
|
||||
from workflows.errors import WorkflowRuntimeError, WorkflowTimeoutError
|
||||
|
||||
from reflector.llm import LLM, LLMParseError
|
||||
from reflector.llm import LLM, LLMParseError, StructuredOutputWorkflow
|
||||
from reflector.utils.retry import RetryException
|
||||
|
||||
|
||||
@@ -17,43 +19,51 @@ class TestResponse(BaseModel):
|
||||
confidence: float = Field(description="Confidence score", ge=0, le=1)
|
||||
|
||||
|
||||
def make_completion_response(text: str):
|
||||
"""Create a mock CompletionResponse with .text attribute"""
|
||||
response = MagicMock()
|
||||
response.text = text
|
||||
return response
|
||||
|
||||
|
||||
class TestLLMParseErrorRecovery:
|
||||
"""Test parse error recovery with astructured_predict reflection loop"""
|
||||
"""Test parse error recovery with Workflow feedback loop"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_error_recovery_with_feedback(self, test_settings):
|
||||
"""Test that parse errors trigger retry with reflection prompt"""
|
||||
"""Test that parse errors trigger retry with error feedback"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# First call: raise ValidationError (missing fields)
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {"title": "Test"},
|
||||
}
|
||||
],
|
||||
)
|
||||
else:
|
||||
# Second call: should have reflection in the prompt
|
||||
assert "reflection" in kwargs
|
||||
assert "could not be parsed" in kwargs["reflection"]
|
||||
assert "Error:" in kwargs["reflection"]
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
# TreeSummarize returns plain text analysis (step 1)
|
||||
mock_summarizer.aget_response = AsyncMock(
|
||||
return_value="The analysis shows a test with summary and high confidence."
|
||||
)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(prompt, *args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# First JSON formatting call returns invalid JSON
|
||||
return make_completion_response('{"title": "Test"}')
|
||||
else:
|
||||
# Second call should have error feedback in prompt
|
||||
assert "Your previous response could not be parsed:" in prompt
|
||||
assert '{"title": "Test"}' in prompt
|
||||
assert "Error:" in prompt
|
||||
assert "Please try again" in prompt
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
@@ -61,6 +71,8 @@ class TestLLMParseErrorRecovery:
|
||||
assert result.title == "Test"
|
||||
assert result.summary == "Summary"
|
||||
assert result.confidence == 0.95
|
||||
# TreeSummarize called once, Settings.llm.acomplete called twice
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -68,22 +80,20 @@ class TestLLMParseErrorRecovery:
|
||||
"""Test that parse error retry stops after max attempts"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
# Always raise ValidationError
|
||||
async def always_fail(output_cls, prompt_tmpl, **kwargs):
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {},
|
||||
}
|
||||
],
|
||||
)
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(side_effect=always_fail)
|
||||
# Always return invalid JSON from acomplete
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"invalid": "missing required fields"}'
|
||||
)
|
||||
)
|
||||
|
||||
with pytest.raises(LLMParseError, match="Failed to parse"):
|
||||
await llm.get_structured_response(
|
||||
@@ -91,38 +101,35 @@ class TestLLMParseErrorRecovery:
|
||||
)
|
||||
|
||||
expected_attempts = test_settings.LLM_PARSE_MAX_RETRIES + 1
|
||||
assert mock_settings.llm.astructured_predict.call_count == expected_attempts
|
||||
# TreeSummarize called once, acomplete called max_retries times
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert mock_settings.llm.acomplete.call_count == expected_attempts
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_raw_response_logging_on_parse_error(self, test_settings, caplog):
|
||||
"""Test that raw response is logged when parse error occurs"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {"title": "Test"},
|
||||
}
|
||||
],
|
||||
)
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
caplog.at_level("ERROR"),
|
||||
):
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
)
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(*args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
return make_completion_response('{"title": "Test"}') # Invalid
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
@@ -136,45 +143,35 @@ class TestLLMParseErrorRecovery:
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_validation_errors_in_feedback(self, test_settings):
|
||||
"""Test that validation errors are included in reflection feedback"""
|
||||
"""Test that validation errors are included in feedback"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_count = {"count": 0}
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# Missing title and summary
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("title",),
|
||||
"msg": "Field required",
|
||||
"input": {},
|
||||
},
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {},
|
||||
},
|
||||
],
|
||||
)
|
||||
else:
|
||||
# Should have schema validation errors in reflection
|
||||
assert "reflection" in kwargs
|
||||
assert (
|
||||
"Schema validation errors" in kwargs["reflection"]
|
||||
or "error" in kwargs["reflection"].lower()
|
||||
)
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
call_count = {"count": 0}
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
)
|
||||
async def acomplete_handler(prompt, *args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# Missing title and summary
|
||||
return make_completion_response('{"confidence": 0.5}')
|
||||
else:
|
||||
# Should have schema validation errors in prompt
|
||||
assert (
|
||||
"Schema validation errors" in prompt
|
||||
or "error" in prompt.lower()
|
||||
)
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
@@ -188,10 +185,17 @@ class TestLLMParseErrorRecovery:
|
||||
"""Test that no retry happens when first attempt succeeds"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
return_value=TestResponse(
|
||||
title="Test", summary="Summary", confidence=0.95
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
)
|
||||
|
||||
@@ -202,28 +206,195 @@ class TestLLMParseErrorRecovery:
|
||||
assert result.title == "Test"
|
||||
assert result.summary == "Summary"
|
||||
assert result.confidence == 0.95
|
||||
assert mock_settings.llm.astructured_predict.call_count == 1
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert mock_settings.llm.acomplete.call_count == 1
|
||||
|
||||
|
||||
class TestStructuredOutputWorkflow:
|
||||
"""Direct tests for the StructuredOutputWorkflow"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_workflow_retries_on_validation_error(self):
|
||||
"""Test workflow retries when validation fails"""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=TestResponse,
|
||||
max_retries=3,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(*args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] < 2:
|
||||
return make_completion_response('{"title": "Only title"}')
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.9}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await workflow.run(
|
||||
prompt="Extract data",
|
||||
texts=["Some text"],
|
||||
tone_name=None,
|
||||
)
|
||||
|
||||
assert "success" in result
|
||||
assert result["success"].title == "Test"
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_workflow_returns_error_after_max_retries(self):
|
||||
"""Test workflow returns error after exhausting retries"""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=TestResponse,
|
||||
max_retries=2,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Always return invalid JSON
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response('{"invalid": true}')
|
||||
)
|
||||
|
||||
result = await workflow.run(
|
||||
prompt="Extract data",
|
||||
texts=["Some text"],
|
||||
tone_name=None,
|
||||
)
|
||||
|
||||
assert "error" in result
|
||||
# TreeSummarize called once, acomplete called max_retries times
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert mock_settings.llm.acomplete.call_count == 2
|
||||
|
||||
|
||||
class TestNetworkErrorRetries:
|
||||
"""Test that network errors are retried by the outer retry() wrapper"""
|
||||
"""Test that network error retries are handled by OpenAILike, not Workflow"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_retried_by_outer_wrapper(self, test_settings):
|
||||
"""Test that network errors trigger the outer retry wrapper"""
|
||||
async def test_network_error_propagates_after_openai_retries(self, test_settings):
|
||||
"""Test that network errors are retried by OpenAILike and then propagate.
|
||||
|
||||
Network retries are handled by OpenAILike (max_retries=3), not by our
|
||||
StructuredOutputWorkflow. This test verifies that network errors propagate
|
||||
up after OpenAILike exhausts its retries.
|
||||
"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Simulate network error from acomplete (after OpenAILike retries exhausted)
|
||||
network_error = ConnectionError("Connection refused")
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=network_error)
|
||||
|
||||
# Network error wrapped in WorkflowRuntimeError
|
||||
with pytest.raises(WorkflowRuntimeError, match="Connection refused"):
|
||||
await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
# acomplete called only once - network error propagates, not retried by Workflow
|
||||
assert mock_settings.llm.acomplete.call_count == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_not_retried_by_workflow(self, test_settings):
|
||||
"""Test that Workflow does NOT retry network errors (OpenAILike handles those).
|
||||
|
||||
This verifies the separation of concerns:
|
||||
- StructuredOutputWorkflow: retries parse/validation errors
|
||||
- OpenAILike: retries network errors (internally, max_retries=3)
|
||||
"""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=TestResponse,
|
||||
max_retries=3,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Network error should propagate immediately, not trigger Workflow retry
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
side_effect=TimeoutError("Request timed out")
|
||||
)
|
||||
|
||||
# Network error wrapped in WorkflowRuntimeError
|
||||
with pytest.raises(WorkflowRuntimeError, match="Request timed out"):
|
||||
await workflow.run(
|
||||
prompt="Extract data",
|
||||
texts=["Some text"],
|
||||
tone_name=None,
|
||||
)
|
||||
|
||||
# Only called once - Workflow doesn't retry network errors
|
||||
assert mock_settings.llm.acomplete.call_count == 1
|
||||
|
||||
|
||||
class TestWorkflowTimeoutRetry:
|
||||
"""Test timeout retry mechanism in get_structured_response"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_timeout_retry_succeeds_on_retry(self, test_settings):
|
||||
"""Test that WorkflowTimeoutError triggers retry and succeeds"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
async def workflow_run_side_effect(*args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
raise ConnectionError("Connection refused")
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
raise WorkflowTimeoutError("Operation timed out after 120 seconds")
|
||||
return {
|
||||
"success": TestResponse(
|
||||
title="Test", summary="Summary", confidence=0.95
|
||||
)
|
||||
}
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
with (
|
||||
patch("reflector.llm.StructuredOutputWorkflow") as mock_workflow_class,
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_workflow = MagicMock()
|
||||
mock_workflow.run = AsyncMock(side_effect=workflow_run_side_effect)
|
||||
mock_workflow_class.return_value = mock_workflow
|
||||
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
@@ -231,16 +402,36 @@ class TestNetworkErrorRetries:
|
||||
)
|
||||
|
||||
assert result.title == "Test"
|
||||
assert result.summary == "Summary"
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_exhausts_retries(self, test_settings):
|
||||
"""Test that persistent network errors exhaust retry attempts"""
|
||||
async def test_timeout_retry_exhausts_after_max_attempts(self, test_settings):
|
||||
"""Test that timeout retry stops after max attempts"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=ConnectionError("Connection refused")
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def workflow_run_side_effect(*args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
raise WorkflowTimeoutError("Operation timed out after 120 seconds")
|
||||
|
||||
with (
|
||||
patch("reflector.llm.StructuredOutputWorkflow") as mock_workflow_class,
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_workflow = MagicMock()
|
||||
mock_workflow.run = AsyncMock(side_effect=workflow_run_side_effect)
|
||||
mock_workflow_class.return_value = mock_workflow
|
||||
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
)
|
||||
|
||||
with pytest.raises(RetryException, match="Retry attempts exceeded"):
|
||||
@@ -248,129 +439,41 @@ class TestNetworkErrorRetries:
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
# 3 retry attempts
|
||||
assert mock_settings.llm.astructured_predict.call_count == 3
|
||||
|
||||
|
||||
class TestTextsInclusion:
|
||||
"""Test that texts parameter is included in the prompt sent to astructured_predict"""
|
||||
assert call_count["count"] == 3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_texts_included_in_prompt(self, test_settings):
|
||||
"""Test that texts content is appended to the prompt for astructured_predict"""
|
||||
async def test_timeout_retry_with_backoff(self, test_settings):
|
||||
"""Test that exponential backoff is applied between retries"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
captured_prompts = []
|
||||
call_times = []
|
||||
|
||||
async def capture_prompt(output_cls, prompt_tmpl, **kwargs):
|
||||
captured_prompts.append(kwargs.get("user_prompt", ""))
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=capture_prompt
|
||||
)
|
||||
|
||||
await llm.get_structured_response(
|
||||
prompt="Identify all participants",
|
||||
texts=["Alice: Hello everyone", "Bob: Hi Alice"],
|
||||
output_cls=TestResponse,
|
||||
)
|
||||
|
||||
assert len(captured_prompts) == 1
|
||||
prompt_sent = captured_prompts[0]
|
||||
assert "Identify all participants" in prompt_sent
|
||||
assert "Alice: Hello everyone" in prompt_sent
|
||||
assert "Bob: Hi Alice" in prompt_sent
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_texts_uses_prompt_only(self, test_settings):
|
||||
"""Test that empty texts list sends only the prompt"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
captured_prompts = []
|
||||
|
||||
async def capture_prompt(output_cls, prompt_tmpl, **kwargs):
|
||||
captured_prompts.append(kwargs.get("user_prompt", ""))
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=capture_prompt
|
||||
)
|
||||
|
||||
await llm.get_structured_response(
|
||||
prompt="Identify all participants",
|
||||
texts=[],
|
||||
output_cls=TestResponse,
|
||||
)
|
||||
|
||||
assert len(captured_prompts) == 1
|
||||
assert captured_prompts[0] == "Identify all participants"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_texts_included_in_reflection_retry(self, test_settings):
|
||||
"""Test that texts are included in the prompt even during reflection retries"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
captured_prompts = []
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def capture_and_fail_first(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
captured_prompts.append(kwargs.get("user_prompt", ""))
|
||||
if call_count["count"] == 1:
|
||||
raise ValidationError.from_exception_data(
|
||||
title="TestResponse",
|
||||
line_errors=[
|
||||
{
|
||||
"type": "missing",
|
||||
"loc": ("summary",),
|
||||
"msg": "Field required",
|
||||
"input": {},
|
||||
}
|
||||
],
|
||||
async def workflow_run_side_effect(*args, **kwargs):
|
||||
call_times.append(monotonic())
|
||||
if len(call_times) < 3:
|
||||
raise WorkflowTimeoutError("Operation timed out after 120 seconds")
|
||||
return {
|
||||
"success": TestResponse(
|
||||
title="Test", summary="Summary", confidence=0.95
|
||||
)
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
}
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=capture_and_fail_first
|
||||
)
|
||||
with (
|
||||
patch("reflector.llm.StructuredOutputWorkflow") as mock_workflow_class,
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_workflow = MagicMock()
|
||||
mock_workflow.run = AsyncMock(side_effect=workflow_run_side_effect)
|
||||
mock_workflow_class.return_value = mock_workflow
|
||||
|
||||
await llm.get_structured_response(
|
||||
prompt="Summarize this",
|
||||
texts=["The meeting covered project updates"],
|
||||
output_cls=TestResponse,
|
||||
)
|
||||
|
||||
# Both first attempt and reflection retry should include the texts
|
||||
assert len(captured_prompts) == 2
|
||||
for prompt_sent in captured_prompts:
|
||||
assert "Summarize this" in prompt_sent
|
||||
assert "The meeting covered project updates" in prompt_sent
|
||||
|
||||
|
||||
class TestReflectionRetryBackoff:
|
||||
"""Test the reflection retry timing behavior"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_value_error_triggers_reflection(self, test_settings):
|
||||
"""Test that ValueError (parse failure) also triggers reflection retry"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def astructured_predict_handler(output_cls, prompt_tmpl, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
raise ValueError("Could not parse output")
|
||||
assert "reflection" in kwargs
|
||||
return TestResponse(title="Test", summary="Summary", confidence=0.95)
|
||||
|
||||
with patch("reflector.llm.Settings") as mock_settings:
|
||||
mock_settings.llm.astructured_predict = AsyncMock(
|
||||
side_effect=astructured_predict_handler
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
@@ -378,20 +481,8 @@ class TestReflectionRetryBackoff:
|
||||
)
|
||||
|
||||
assert result.title == "Test"
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_format_validation_error_method(self, test_settings):
|
||||
"""Test _format_validation_error produces correct feedback"""
|
||||
# ValidationError
|
||||
try:
|
||||
TestResponse(title="x", summary="y", confidence=5.0) # confidence > 1
|
||||
except ValidationError as e:
|
||||
result = LLM._format_validation_error(e)
|
||||
assert "Schema validation errors" in result
|
||||
assert "confidence" in result
|
||||
|
||||
# ValueError
|
||||
result = LLM._format_validation_error(ValueError("bad input"))
|
||||
assert "Parse error:" in result
|
||||
assert "bad input" in result
|
||||
if len(call_times) >= 2:
|
||||
time_between_calls = call_times[1] - call_times[0]
|
||||
assert (
|
||||
time_between_calls >= 1.5
|
||||
), f"Expected ~2s backoff, got {time_between_calls}s"
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
"""Tests for password hashing utilities."""
|
||||
|
||||
from reflector.auth.password_utils import hash_password, verify_password
|
||||
|
||||
|
||||
def test_hash_and_verify():
|
||||
pw = "my-secret-password"
|
||||
h = hash_password(pw)
|
||||
assert verify_password(pw, h) is True
|
||||
|
||||
|
||||
def test_wrong_password():
|
||||
h = hash_password("correct")
|
||||
assert verify_password("wrong", h) is False
|
||||
|
||||
|
||||
def test_hash_format():
|
||||
h = hash_password("test")
|
||||
parts = h.split("$")
|
||||
assert len(parts) == 3
|
||||
assert parts[0] == "pbkdf2:sha256:100000"
|
||||
assert len(parts[1]) == 32 # 16 bytes hex = 32 chars
|
||||
assert len(parts[2]) == 64 # sha256 hex = 64 chars
|
||||
|
||||
|
||||
def test_different_salts():
|
||||
h1 = hash_password("same")
|
||||
h2 = hash_password("same")
|
||||
assert h1 != h2 # different salts produce different hashes
|
||||
assert verify_password("same", h1) is True
|
||||
assert verify_password("same", h2) is True
|
||||
|
||||
|
||||
def test_malformed_hash():
|
||||
assert verify_password("test", "garbage") is False
|
||||
assert verify_password("test", "") is False
|
||||
assert verify_password("test", "pbkdf2:sha256:100000$short") is False
|
||||
|
||||
|
||||
def test_empty_password():
|
||||
h = hash_password("")
|
||||
assert verify_password("", h) is True
|
||||
assert verify_password("notempty", h) is False
|
||||
|
||||
|
||||
def test_unicode_password():
|
||||
pw = "p\u00e4ssw\u00f6rd\U0001f512"
|
||||
h = hash_password(pw)
|
||||
assert verify_password(pw, h) is True
|
||||
assert verify_password("password", h) is False
|
||||
|
||||
|
||||
def test_constant_time_comparison():
|
||||
"""Verify that hmac.compare_digest is used (structural test)."""
|
||||
import inspect
|
||||
|
||||
source = inspect.getsource(verify_password)
|
||||
assert "hmac.compare_digest" in source
|
||||
@@ -319,51 +319,3 @@ def test_aws_storage_constructor_rejects_mixed_auth():
|
||||
aws_secret_access_key="test-secret",
|
||||
aws_role_arn="arn:aws:iam::123456789012:role/test-role",
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_custom_endpoint_url():
|
||||
"""Test that custom endpoint_url configures path-style addressing and passes endpoint to client."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="reflector-media",
|
||||
aws_region="garage",
|
||||
aws_access_key_id="GKtest",
|
||||
aws_secret_access_key="secret",
|
||||
aws_endpoint_url="http://garage:3900",
|
||||
)
|
||||
assert storage._endpoint_url == "http://garage:3900"
|
||||
assert storage.boto_config.s3["addressing_style"] == "path"
|
||||
assert storage.base_url == "http://garage:3900/reflector-media/"
|
||||
# retries config preserved (merge, not replace)
|
||||
assert storage.boto_config.retries["max_attempts"] == 3
|
||||
|
||||
mock_client = AsyncMock()
|
||||
mock_client.put_object = AsyncMock()
|
||||
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
|
||||
mock_client.__aexit__ = AsyncMock(return_value=None)
|
||||
mock_client.generate_presigned_url = AsyncMock(
|
||||
return_value="http://garage:3900/reflector-media/test.txt"
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
storage.session, "client", return_value=mock_client
|
||||
) as mock_session_client:
|
||||
await storage.put_file("test.txt", b"data")
|
||||
mock_session_client.assert_called_with(
|
||||
"s3", config=storage.boto_config, endpoint_url="http://garage:3900"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aws_storage_none_endpoint_url():
|
||||
"""Test that None endpoint preserves current AWS behavior."""
|
||||
storage = AwsStorage(
|
||||
aws_bucket_name="reflector-bucket",
|
||||
aws_region="us-east-1",
|
||||
aws_access_key_id="AKIAtest",
|
||||
aws_secret_access_key="secret",
|
||||
)
|
||||
assert storage._endpoint_url is None
|
||||
assert storage.base_url == "https://reflector-bucket.s3.amazonaws.com/"
|
||||
# No s3 addressing_style override — boto_config should only have retries
|
||||
assert not hasattr(storage.boto_config, "s3") or storage.boto_config.s3 is None
|
||||
|
||||
331
server/tests/test_ws_dag_broadcast.py
Normal file
331
server/tests/test_ws_dag_broadcast.py
Normal file
@@ -0,0 +1,331 @@
|
||||
"""WebSocket broadcast delivery tests for STATUS and DAG_STATUS events.
|
||||
|
||||
Tests the full chain identified in DEBUG.md:
|
||||
broadcast_event() → ws_manager.send_json() → Redis/in-memory pub/sub
|
||||
→ _pubsub_data_reader() → socket.send_json() → WebSocket client
|
||||
|
||||
Covers:
|
||||
1. STATUS event delivery to transcript room WS
|
||||
2. DAG_STATUS event delivery to transcript room WS
|
||||
3. Full broadcast_event() chain (requires broadcast.py patching)
|
||||
4. _pubsub_data_reader resilience when a client disconnects
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
import time
|
||||
|
||||
import pytest
|
||||
from httpx import AsyncClient
|
||||
from httpx_ws import aconnect_ws
|
||||
from uvicorn import Config, Server
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def appserver_ws_broadcast(setup_database, monkeypatch):
|
||||
"""Start real uvicorn server for WebSocket broadcast tests.
|
||||
|
||||
Also patches broadcast.py's get_ws_manager (missing from conftest autouse fixture).
|
||||
"""
|
||||
# Patch broadcast.py's get_ws_manager — conftest.py misses this module.
|
||||
# Without this, broadcast_event() creates a real Redis ws_manager.
|
||||
import reflector.ws_manager as ws_mod
|
||||
from reflector.app import app
|
||||
from reflector.db import get_database
|
||||
|
||||
monkeypatch.setattr(
|
||||
"reflector.hatchet.broadcast.get_ws_manager", ws_mod.get_ws_manager
|
||||
)
|
||||
|
||||
host = "127.0.0.1"
|
||||
port = 1259
|
||||
server_started = threading.Event()
|
||||
server_exception = None
|
||||
server_instance = None
|
||||
|
||||
def run_server():
|
||||
nonlocal server_exception, server_instance
|
||||
try:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
config = Config(app=app, host=host, port=port, loop=loop)
|
||||
server_instance = Server(config)
|
||||
|
||||
async def start_server():
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
await server_instance.serve()
|
||||
finally:
|
||||
await database.disconnect()
|
||||
|
||||
server_started.set()
|
||||
loop.run_until_complete(start_server())
|
||||
except Exception as e:
|
||||
server_exception = e
|
||||
server_started.set()
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
server_thread = threading.Thread(target=run_server, daemon=True)
|
||||
server_thread.start()
|
||||
|
||||
server_started.wait(timeout=30)
|
||||
if server_exception:
|
||||
raise server_exception
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
yield host, port
|
||||
|
||||
if server_instance:
|
||||
server_instance.should_exit = True
|
||||
server_thread.join(timeout=2.0)
|
||||
|
||||
from reflector.ws_manager import reset_ws_manager
|
||||
|
||||
reset_ws_manager()
|
||||
|
||||
|
||||
async def _create_transcript(host: str, port: int, name: str) -> str:
|
||||
"""Create a transcript via ASGI transport and return its ID."""
|
||||
from reflector.app import app
|
||||
|
||||
async with AsyncClient(app=app, base_url=f"http://{host}:{port}/v1") as ac:
|
||||
resp = await ac.post("/transcripts", json={"name": name})
|
||||
assert resp.status_code == 200, f"Failed to create transcript: {resp.text}"
|
||||
return resp.json()["id"]
|
||||
|
||||
|
||||
async def _drain_historical_events(ws, timeout: float = 0.5) -> list[dict]:
|
||||
"""Read all historical events sent on WS connect (non-blocking drain)."""
|
||||
events = []
|
||||
deadline = asyncio.get_event_loop().time() + timeout
|
||||
while asyncio.get_event_loop().time() < deadline:
|
||||
try:
|
||||
msg = await asyncio.wait_for(ws.receive_json(), timeout=0.1)
|
||||
events.append(msg)
|
||||
except (asyncio.TimeoutError, Exception):
|
||||
break
|
||||
return events
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test 1: STATUS event delivery via ws_manager.send_json
|
||||
# ---------------------------------------------------------------------------
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_ws_receives_status_via_send_json(appserver_ws_broadcast):
|
||||
"""STATUS event published via ws_manager.send_json() arrives at transcript room WS."""
|
||||
host, port = appserver_ws_broadcast
|
||||
transcript_id = await _create_transcript(host, port, "Status send_json test")
|
||||
|
||||
ws_url = f"http://{host}:{port}/v1/transcripts/{transcript_id}/events"
|
||||
async with aconnect_ws(ws_url) as ws:
|
||||
await _drain_historical_events(ws)
|
||||
|
||||
import reflector.ws_manager as ws_mod
|
||||
|
||||
ws_manager = ws_mod.get_ws_manager()
|
||||
await ws_manager.send_json(
|
||||
room_id=f"ts:{transcript_id}",
|
||||
message={"event": "STATUS", "data": {"value": "processing"}},
|
||||
)
|
||||
|
||||
msg = await asyncio.wait_for(ws.receive_json(), timeout=5.0)
|
||||
assert msg["event"] == "STATUS"
|
||||
assert msg["data"]["value"] == "processing"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test 2: DAG_STATUS event delivery via ws_manager.send_json
|
||||
# ---------------------------------------------------------------------------
|
||||
@pytest.mark.asyncio
|
||||
async def test_transcript_ws_receives_dag_status_via_send_json(appserver_ws_broadcast):
|
||||
"""DAG_STATUS event published via ws_manager.send_json() arrives at transcript room WS."""
|
||||
host, port = appserver_ws_broadcast
|
||||
transcript_id = await _create_transcript(host, port, "DAG_STATUS send_json test")
|
||||
|
||||
dag_payload = {
|
||||
"event": "DAG_STATUS",
|
||||
"data": {
|
||||
"workflow_run_id": "test-run-123",
|
||||
"tasks": [
|
||||
{
|
||||
"name": "get_recording",
|
||||
"status": "completed",
|
||||
"started_at": "2025-01-01T00:00:00Z",
|
||||
"finished_at": "2025-01-01T00:00:05Z",
|
||||
"duration_seconds": 5.0,
|
||||
"parents": [],
|
||||
"error": None,
|
||||
"children_total": None,
|
||||
"children_completed": None,
|
||||
"progress_pct": None,
|
||||
},
|
||||
{
|
||||
"name": "process_tracks",
|
||||
"status": "running",
|
||||
"started_at": "2025-01-01T00:00:05Z",
|
||||
"finished_at": None,
|
||||
"duration_seconds": None,
|
||||
"parents": ["get_recording"],
|
||||
"error": None,
|
||||
"children_total": 3,
|
||||
"children_completed": 1,
|
||||
"progress_pct": 33.3,
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
ws_url = f"http://{host}:{port}/v1/transcripts/{transcript_id}/events"
|
||||
async with aconnect_ws(ws_url) as ws:
|
||||
await _drain_historical_events(ws)
|
||||
|
||||
import reflector.ws_manager as ws_mod
|
||||
|
||||
ws_manager = ws_mod.get_ws_manager()
|
||||
await ws_manager.send_json(
|
||||
room_id=f"ts:{transcript_id}",
|
||||
message=dag_payload,
|
||||
)
|
||||
|
||||
msg = await asyncio.wait_for(ws.receive_json(), timeout=5.0)
|
||||
assert msg["event"] == "DAG_STATUS"
|
||||
assert msg["data"]["workflow_run_id"] == "test-run-123"
|
||||
assert len(msg["data"]["tasks"]) == 2
|
||||
assert msg["data"]["tasks"][0]["name"] == "get_recording"
|
||||
assert msg["data"]["tasks"][0]["status"] == "completed"
|
||||
assert msg["data"]["tasks"][1]["name"] == "process_tracks"
|
||||
assert msg["data"]["tasks"][1]["children_completed"] == 1
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test 3: Full broadcast_event() chain for STATUS
|
||||
# ---------------------------------------------------------------------------
|
||||
@pytest.mark.asyncio
|
||||
async def test_broadcast_event_delivers_status_to_transcript_ws(appserver_ws_broadcast):
|
||||
"""broadcast_event() end-to-end: STATUS event reaches transcript room WS."""
|
||||
host, port = appserver_ws_broadcast
|
||||
transcript_id = await _create_transcript(host, port, "broadcast_event STATUS test")
|
||||
|
||||
ws_url = f"http://{host}:{port}/v1/transcripts/{transcript_id}/events"
|
||||
async with aconnect_ws(ws_url) as ws:
|
||||
await _drain_historical_events(ws)
|
||||
|
||||
from reflector.db.transcripts import TranscriptEvent
|
||||
from reflector.hatchet.broadcast import broadcast_event
|
||||
from reflector.logger import logger
|
||||
|
||||
log = logger.bind(transcript_id=transcript_id)
|
||||
event = TranscriptEvent(event="STATUS", data={"value": "processing"})
|
||||
await broadcast_event(transcript_id, event, logger=log)
|
||||
|
||||
msg = await asyncio.wait_for(ws.receive_json(), timeout=5.0)
|
||||
assert msg["event"] == "STATUS"
|
||||
assert msg["data"]["value"] == "processing"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test 4: Full broadcast_event() chain for DAG_STATUS
|
||||
# ---------------------------------------------------------------------------
|
||||
@pytest.mark.asyncio
|
||||
async def test_broadcast_event_delivers_dag_status_to_transcript_ws(
|
||||
appserver_ws_broadcast,
|
||||
):
|
||||
"""broadcast_event() end-to-end: DAG_STATUS event reaches transcript room WS."""
|
||||
host, port = appserver_ws_broadcast
|
||||
transcript_id = await _create_transcript(host, port, "broadcast_event DAG test")
|
||||
|
||||
ws_url = f"http://{host}:{port}/v1/transcripts/{transcript_id}/events"
|
||||
async with aconnect_ws(ws_url) as ws:
|
||||
await _drain_historical_events(ws)
|
||||
|
||||
from reflector.db.transcripts import TranscriptEvent
|
||||
from reflector.hatchet.broadcast import broadcast_event
|
||||
from reflector.logger import logger
|
||||
|
||||
log = logger.bind(transcript_id=transcript_id)
|
||||
event = TranscriptEvent(
|
||||
event="DAG_STATUS",
|
||||
data={
|
||||
"workflow_run_id": "test-run-456",
|
||||
"tasks": [
|
||||
{
|
||||
"name": "get_recording",
|
||||
"status": "running",
|
||||
"started_at": None,
|
||||
"finished_at": None,
|
||||
"duration_seconds": None,
|
||||
"parents": [],
|
||||
"error": None,
|
||||
"children_total": None,
|
||||
"children_completed": None,
|
||||
"progress_pct": None,
|
||||
}
|
||||
],
|
||||
},
|
||||
)
|
||||
await broadcast_event(transcript_id, event, logger=log)
|
||||
|
||||
msg = await asyncio.wait_for(ws.receive_json(), timeout=5.0)
|
||||
assert msg["event"] == "DAG_STATUS"
|
||||
assert msg["data"]["tasks"][0]["name"] == "get_recording"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test 5: Multiple rapid events arrive in order
|
||||
# ---------------------------------------------------------------------------
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_events_arrive_in_order(appserver_ws_broadcast):
|
||||
"""Multiple STATUS then DAG_STATUS events arrive in correct order."""
|
||||
host, port = appserver_ws_broadcast
|
||||
transcript_id = await _create_transcript(host, port, "ordering test")
|
||||
|
||||
ws_url = f"http://{host}:{port}/v1/transcripts/{transcript_id}/events"
|
||||
async with aconnect_ws(ws_url) as ws:
|
||||
await _drain_historical_events(ws)
|
||||
|
||||
import reflector.ws_manager as ws_mod
|
||||
|
||||
ws_manager = ws_mod.get_ws_manager()
|
||||
|
||||
await ws_manager.send_json(
|
||||
room_id=f"ts:{transcript_id}",
|
||||
message={"event": "STATUS", "data": {"value": "processing"}},
|
||||
)
|
||||
await ws_manager.send_json(
|
||||
room_id=f"ts:{transcript_id}",
|
||||
message={
|
||||
"event": "DAG_STATUS",
|
||||
"data": {"workflow_run_id": "r1", "tasks": []},
|
||||
},
|
||||
)
|
||||
await ws_manager.send_json(
|
||||
room_id=f"ts:{transcript_id}",
|
||||
message={
|
||||
"event": "DAG_STATUS",
|
||||
"data": {
|
||||
"workflow_run_id": "r1",
|
||||
"tasks": [{"name": "a", "status": "running"}],
|
||||
},
|
||||
},
|
||||
)
|
||||
await ws_manager.send_json(
|
||||
room_id=f"ts:{transcript_id}",
|
||||
message={"event": "STATUS", "data": {"value": "ended"}},
|
||||
)
|
||||
|
||||
msgs = []
|
||||
for _ in range(4):
|
||||
msg = await asyncio.wait_for(ws.receive_json(), timeout=5.0)
|
||||
msgs.append(msg)
|
||||
|
||||
assert msgs[0]["event"] == "STATUS"
|
||||
assert msgs[0]["data"]["value"] == "processing"
|
||||
assert msgs[1]["event"] == "DAG_STATUS"
|
||||
assert msgs[1]["data"]["tasks"] == []
|
||||
assert msgs[2]["event"] == "DAG_STATUS"
|
||||
assert len(msgs[2]["data"]["tasks"]) == 1
|
||||
assert msgs[3]["event"] == "STATUS"
|
||||
assert msgs[3]["data"]["value"] == "ended"
|
||||
3755
server/uv.lock
generated
3755
server/uv.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -27,5 +27,5 @@ WEBSOCKET_URL=ws://127.0.0.1:1250
|
||||
AUTH_CALLBACK_URL=http://localhost:3000/auth-callback
|
||||
|
||||
# Sentry
|
||||
# NEXT_PUBLIC_SENTRY_DSN=https://your-dsn@sentry.io/project-id
|
||||
# SENTRY_DSN=https://your-dsn@sentry.io/project-id
|
||||
# SENTRY_IGNORE_API_RESOLUTION_ERROR=1
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
# =======================================================
|
||||
# Reflector Self-Hosted Production — Frontend Configuration
|
||||
# Generated by: ./scripts/setup-selfhosted.sh
|
||||
# =======================================================
|
||||
|
||||
# Site URL — set to your domain or server IP
|
||||
# The setup script auto-detects this on Linux.
|
||||
SITE_URL=https://localhost
|
||||
NEXTAUTH_URL=https://localhost
|
||||
NEXTAUTH_SECRET=changeme-generate-a-secure-random-string
|
||||
|
||||
# API URLs
|
||||
# Public-facing (what the browser uses):
|
||||
API_URL=https://localhost
|
||||
WEBSOCKET_URL=auto
|
||||
|
||||
# Internal Docker network (server-side rendering):
|
||||
SERVER_API_URL=http://server:1250
|
||||
KV_URL=redis://redis:6379
|
||||
|
||||
# Authentication
|
||||
# Set to true when Authentik or password auth is configured
|
||||
FEATURE_REQUIRE_LOGIN=false
|
||||
|
||||
# Auth provider: "authentik" or "credentials"
|
||||
# Set to "credentials" when using password auth backend
|
||||
# AUTH_PROVIDER=credentials
|
||||
|
||||
# Nullify auth vars when not using Authentik
|
||||
AUTHENTIK_ISSUER=
|
||||
AUTHENTIK_REFRESH_TOKEN_URL=
|
||||
|
||||
# =======================================================
|
||||
# Authentik OAuth/OIDC (Optional)
|
||||
# Uncomment and configure when enabling authentication.
|
||||
# See docsv2/selfhosted-production.md for setup instructions.
|
||||
# =======================================================
|
||||
# FEATURE_REQUIRE_LOGIN=true
|
||||
# AUTHENTIK_ISSUER=https://authentik.example.com/application/o/reflector
|
||||
# AUTHENTIK_REFRESH_TOKEN_URL=https://authentik.example.com/application/o/token/
|
||||
# AUTHENTIK_CLIENT_ID=your-client-id
|
||||
# AUTHENTIK_CLIENT_SECRET=your-client-secret
|
||||
|
||||
# =======================================================
|
||||
# Feature Flags
|
||||
# =======================================================
|
||||
# FEATURE_ROOMS=true
|
||||
# FEATURE_BROWSE=true
|
||||
|
||||
# =======================================================
|
||||
# Sentry (Optional)
|
||||
# =======================================================
|
||||
# NEXT_PUBLIC_SENTRY_DSN=
|
||||
61
www/app/(app)/browse/_components/DagProgressDots.tsx
Normal file
61
www/app/(app)/browse/_components/DagProgressDots.tsx
Normal file
@@ -0,0 +1,61 @@
|
||||
import React from "react";
|
||||
import { Box, Flex } from "@chakra-ui/react";
|
||||
import type { DagTask } from "../../../lib/UserEventsProvider";
|
||||
|
||||
const pulseKeyframes = `
|
||||
@keyframes dagDotPulse {
|
||||
0%, 100% { opacity: 1; }
|
||||
50% { opacity: 0.3; }
|
||||
}
|
||||
`;
|
||||
|
||||
function humanizeTaskName(name: string): string {
|
||||
return name
|
||||
.split("_")
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(" ");
|
||||
}
|
||||
|
||||
function dotProps(status: DagTask["status"]): Record<string, unknown> {
|
||||
switch (status) {
|
||||
case "completed":
|
||||
return { bg: "green.500" };
|
||||
case "running":
|
||||
return {
|
||||
bg: "blue.500",
|
||||
style: { animation: "dagDotPulse 1.5s ease-in-out infinite" },
|
||||
};
|
||||
case "failed":
|
||||
return { bg: "red.500" };
|
||||
case "cancelled":
|
||||
return { bg: "gray.400" };
|
||||
case "queued":
|
||||
default:
|
||||
return {
|
||||
bg: "transparent",
|
||||
border: "1px solid",
|
||||
borderColor: "gray.400",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default function DagProgressDots({ tasks }: { tasks: DagTask[] }) {
|
||||
return (
|
||||
<>
|
||||
<style>{pulseKeyframes}</style>
|
||||
<Flex gap="2px" alignItems="center" flexWrap="wrap">
|
||||
{tasks.map((task) => (
|
||||
<Box
|
||||
key={task.name}
|
||||
w="4px"
|
||||
h="4px"
|
||||
borderRadius="full"
|
||||
flexShrink={0}
|
||||
title={humanizeTaskName(task.name)}
|
||||
{...dotProps(task.status)}
|
||||
/>
|
||||
))}
|
||||
</Flex>
|
||||
</>
|
||||
);
|
||||
}
|
||||
@@ -19,6 +19,7 @@ import {
|
||||
generateTextFragment,
|
||||
} from "../../../lib/textHighlight";
|
||||
import type { components } from "../../../reflector-api";
|
||||
import type { DagTask } from "../../../lib/UserEventsProvider";
|
||||
|
||||
type SearchResult = components["schemas"]["SearchResult"];
|
||||
type SourceKind = components["schemas"]["SourceKind"];
|
||||
@@ -29,6 +30,7 @@ interface TranscriptCardsProps {
|
||||
isLoading?: boolean;
|
||||
onDelete: (transcriptId: string) => void;
|
||||
onReprocess: (transcriptId: string) => void;
|
||||
dagStatusMap?: Map<string, DagTask[]>;
|
||||
}
|
||||
|
||||
function highlightText(text: string, query: string): React.ReactNode {
|
||||
@@ -102,11 +104,13 @@ function TranscriptCard({
|
||||
query,
|
||||
onDelete,
|
||||
onReprocess,
|
||||
dagStatusMap,
|
||||
}: {
|
||||
result: SearchResult;
|
||||
query: string;
|
||||
onDelete: (transcriptId: string) => void;
|
||||
onReprocess: (transcriptId: string) => void;
|
||||
dagStatusMap?: Map<string, DagTask[]>;
|
||||
}) {
|
||||
const [isExpanded, setIsExpanded] = useState(false);
|
||||
|
||||
@@ -137,7 +141,16 @@ function TranscriptCard({
|
||||
<Box borderWidth={1} p={4} borderRadius="md" fontSize="sm">
|
||||
<Flex justify="space-between" alignItems="flex-start" gap="2">
|
||||
<Box>
|
||||
<TranscriptStatusIcon status={result.status} />
|
||||
<TranscriptStatusIcon
|
||||
status={result.status}
|
||||
dagStatus={
|
||||
dagStatusMap?.get(result.id) ??
|
||||
((result as Record<string, unknown>).dag_status as
|
||||
| DagTask[]
|
||||
| null) ??
|
||||
null
|
||||
}
|
||||
/>
|
||||
</Box>
|
||||
<Box flex="1">
|
||||
{/* Title with highlighting and text fragment for deep linking */}
|
||||
@@ -284,6 +297,7 @@ export default function TranscriptCards({
|
||||
isLoading,
|
||||
onDelete,
|
||||
onReprocess,
|
||||
dagStatusMap,
|
||||
}: TranscriptCardsProps) {
|
||||
return (
|
||||
<Box position="relative">
|
||||
@@ -315,6 +329,7 @@ export default function TranscriptCards({
|
||||
query={query}
|
||||
onDelete={onDelete}
|
||||
onReprocess={onReprocess}
|
||||
dagStatusMap={dagStatusMap}
|
||||
/>
|
||||
))}
|
||||
</Stack>
|
||||
|
||||
@@ -8,13 +8,17 @@ import {
|
||||
FaGear,
|
||||
} from "react-icons/fa6";
|
||||
import { TranscriptStatus } from "../../../lib/transcript";
|
||||
import type { DagTask } from "../../../lib/UserEventsProvider";
|
||||
import DagProgressDots from "./DagProgressDots";
|
||||
|
||||
interface TranscriptStatusIconProps {
|
||||
status: TranscriptStatus;
|
||||
dagStatus?: DagTask[] | null;
|
||||
}
|
||||
|
||||
export default function TranscriptStatusIcon({
|
||||
status,
|
||||
dagStatus,
|
||||
}: TranscriptStatusIconProps) {
|
||||
switch (status) {
|
||||
case "ended":
|
||||
@@ -36,6 +40,9 @@ export default function TranscriptStatusIcon({
|
||||
</Box>
|
||||
);
|
||||
case "processing":
|
||||
if (dagStatus && dagStatus.length > 0) {
|
||||
return <DagProgressDots tasks={dagStatus} />;
|
||||
}
|
||||
return (
|
||||
<Box as="span" title="Processing in progress">
|
||||
<Icon color="gray.500" as={FaGear} />
|
||||
|
||||
@@ -43,6 +43,7 @@ import DeleteTranscriptDialog from "./_components/DeleteTranscriptDialog";
|
||||
import { formatLocalDate } from "../../lib/time";
|
||||
import { RECORD_A_MEETING_URL } from "../../api/urls";
|
||||
import { useUserName } from "../../lib/useUserName";
|
||||
import { useDagStatusMap } from "../../lib/UserEventsProvider";
|
||||
|
||||
const SEARCH_FORM_QUERY_INPUT_NAME = "query" as const;
|
||||
|
||||
@@ -273,6 +274,7 @@ export default function TranscriptBrowser() {
|
||||
}, [JSON.stringify(searchFilters)]);
|
||||
|
||||
const userName = useUserName();
|
||||
const dagStatusMap = useDagStatusMap();
|
||||
const [deletionLoading, setDeletionLoading] = useState(false);
|
||||
const cancelRef = React.useRef(null);
|
||||
const [transcriptToDeleteId, setTranscriptToDeleteId] =
|
||||
@@ -408,6 +410,7 @@ export default function TranscriptBrowser() {
|
||||
isLoading={searchLoading}
|
||||
onDelete={setTranscriptToDeleteId}
|
||||
onReprocess={handleProcessTranscript}
|
||||
dagStatusMap={dagStatusMap}
|
||||
/>
|
||||
|
||||
{!searchLoading && results.length === 0 && (
|
||||
|
||||
@@ -0,0 +1,190 @@
|
||||
"use client";
|
||||
|
||||
import { useEffect, useState } from "react";
|
||||
import { Table, Box, Icon, Spinner, Text, Badge } from "@chakra-ui/react";
|
||||
import { FaCheck, FaXmark, FaClock, FaMinus } from "react-icons/fa6";
|
||||
import type { DagTask, DagTaskStatus } from "../../useWebSockets";
|
||||
|
||||
function humanizeTaskName(name: string): string {
|
||||
return name
|
||||
.split("_")
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(" ");
|
||||
}
|
||||
|
||||
function formatDuration(seconds: number): string {
|
||||
if (seconds < 60) {
|
||||
return `${Math.round(seconds)}s`;
|
||||
}
|
||||
const minutes = Math.floor(seconds / 60);
|
||||
const remainingSeconds = Math.round(seconds % 60);
|
||||
return `${minutes}m ${remainingSeconds}s`;
|
||||
}
|
||||
|
||||
function StatusIcon({ status }: { status: DagTaskStatus }) {
|
||||
switch (status) {
|
||||
case "completed":
|
||||
return (
|
||||
<Box as="span" title="Completed">
|
||||
<Icon color="green.500" as={FaCheck} />
|
||||
</Box>
|
||||
);
|
||||
case "running":
|
||||
return <Spinner size="sm" color="blue.500" />;
|
||||
case "failed":
|
||||
return (
|
||||
<Box as="span" title="Failed">
|
||||
<Icon color="red.500" as={FaXmark} />
|
||||
</Box>
|
||||
);
|
||||
case "queued":
|
||||
return (
|
||||
<Box as="span" title="Queued">
|
||||
<Icon color="gray.400" as={FaClock} />
|
||||
</Box>
|
||||
);
|
||||
case "cancelled":
|
||||
return (
|
||||
<Box as="span" title="Cancelled">
|
||||
<Icon color="gray.400" as={FaMinus} />
|
||||
</Box>
|
||||
);
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function ElapsedTimer({ startedAt }: { startedAt: string }) {
|
||||
const [elapsed, setElapsed] = useState<number>(() => {
|
||||
return (Date.now() - new Date(startedAt).getTime()) / 1000;
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
const interval = setInterval(() => {
|
||||
setElapsed((Date.now() - new Date(startedAt).getTime()) / 1000);
|
||||
}, 1000);
|
||||
return () => clearInterval(interval);
|
||||
}, [startedAt]);
|
||||
|
||||
return <Text fontSize="sm">{formatDuration(elapsed)}</Text>;
|
||||
}
|
||||
|
||||
function DurationCell({ task }: { task: DagTask }) {
|
||||
if (task.status === "completed" && task.duration_seconds !== null) {
|
||||
return <Text fontSize="sm">{formatDuration(task.duration_seconds)}</Text>;
|
||||
}
|
||||
if (task.status === "running" && task.started_at) {
|
||||
return <ElapsedTimer startedAt={task.started_at} />;
|
||||
}
|
||||
return (
|
||||
<Text fontSize="sm" color="gray.400">
|
||||
--
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
|
||||
function ProgressCell({ task }: { task: DagTask }) {
|
||||
if (task.progress_pct === null && task.children_total === null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<Box>
|
||||
{task.progress_pct !== null && (
|
||||
<Box
|
||||
w="100%"
|
||||
h="6px"
|
||||
bg="gray.200"
|
||||
borderRadius="full"
|
||||
overflow="hidden"
|
||||
>
|
||||
<Box
|
||||
h="100%"
|
||||
w={`${Math.min(100, Math.max(0, task.progress_pct))}%`}
|
||||
bg={task.status === "failed" ? "red.400" : "blue.400"}
|
||||
borderRadius="full"
|
||||
transition="width 0.3s ease"
|
||||
/>
|
||||
</Box>
|
||||
)}
|
||||
{task.children_total !== null && (
|
||||
<Badge
|
||||
size="sm"
|
||||
colorPalette="gray"
|
||||
mt={task.progress_pct !== null ? 1 : 0}
|
||||
>
|
||||
{task.children_completed ?? 0}/{task.children_total}
|
||||
</Badge>
|
||||
)}
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
|
||||
function TaskRow({ task }: { task: DagTask }) {
|
||||
const [expanded, setExpanded] = useState(false);
|
||||
const hasFailed = task.status === "failed" && task.error;
|
||||
|
||||
return (
|
||||
<>
|
||||
<Table.Row
|
||||
cursor={hasFailed ? "pointer" : "default"}
|
||||
onClick={hasFailed ? () => setExpanded((prev) => !prev) : undefined}
|
||||
_hover={hasFailed ? { bg: "gray.50" } : undefined}
|
||||
>
|
||||
<Table.Cell>
|
||||
<Text fontSize="sm" fontWeight="medium">
|
||||
{humanizeTaskName(task.name)}
|
||||
</Text>
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
<StatusIcon status={task.status} />
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
<DurationCell task={task} />
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
<ProgressCell task={task} />
|
||||
</Table.Cell>
|
||||
</Table.Row>
|
||||
{hasFailed && expanded && (
|
||||
<Table.Row>
|
||||
<Table.Cell colSpan={4}>
|
||||
<Box bg="red.50" p={3} borderRadius="md">
|
||||
<Text fontSize="xs" color="red.700" whiteSpace="pre-wrap">
|
||||
{task.error}
|
||||
</Text>
|
||||
</Box>
|
||||
</Table.Cell>
|
||||
</Table.Row>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
export default function DagProgressTable({ tasks }: { tasks: DagTask[] }) {
|
||||
return (
|
||||
<Box w="100%" overflowX="auto">
|
||||
<Table.Root size="sm">
|
||||
<Table.Header>
|
||||
<Table.Row>
|
||||
<Table.ColumnHeader fontWeight="600">Task</Table.ColumnHeader>
|
||||
<Table.ColumnHeader fontWeight="600" width="80px">
|
||||
Status
|
||||
</Table.ColumnHeader>
|
||||
<Table.ColumnHeader fontWeight="600" width="100px">
|
||||
Duration
|
||||
</Table.ColumnHeader>
|
||||
<Table.ColumnHeader fontWeight="600" width="140px">
|
||||
Progress
|
||||
</Table.ColumnHeader>
|
||||
</Table.Row>
|
||||
</Table.Header>
|
||||
<Table.Body>
|
||||
{tasks.map((task) => (
|
||||
<TaskRow key={task.name} task={task} />
|
||||
))}
|
||||
</Table.Body>
|
||||
</Table.Root>
|
||||
</Box>
|
||||
);
|
||||
}
|
||||
@@ -12,6 +12,9 @@ import { useRouter } from "next/navigation";
|
||||
import { useTranscriptGet } from "../../../../lib/apiHooks";
|
||||
import { parseNonEmptyString } from "../../../../lib/utils";
|
||||
import { useWebSockets } from "../../useWebSockets";
|
||||
import type { DagTask } from "../../useWebSockets";
|
||||
import { useDagStatusMap } from "../../../../lib/UserEventsProvider";
|
||||
import DagProgressTable from "./DagProgressTable";
|
||||
|
||||
type TranscriptProcessing = {
|
||||
params: Promise<{
|
||||
@@ -25,10 +28,21 @@ export default function TranscriptProcessing(details: TranscriptProcessing) {
|
||||
const router = useRouter();
|
||||
|
||||
const transcript = useTranscriptGet(transcriptId);
|
||||
useWebSockets(transcriptId);
|
||||
const { status: wsStatus, dagStatus: wsDagStatus } =
|
||||
useWebSockets(transcriptId);
|
||||
const userDagStatusMap = useDagStatusMap();
|
||||
const userDagStatus = userDagStatusMap.get(transcriptId) ?? null;
|
||||
|
||||
const restDagStatus: DagTask[] | null =
|
||||
((transcript.data as Record<string, unknown>)?.dag_status as
|
||||
| DagTask[]
|
||||
| null) ?? null;
|
||||
|
||||
// Prefer transcript room WS (most granular), then user room WS, then REST
|
||||
const dagStatus = wsDagStatus ?? userDagStatus ?? restDagStatus;
|
||||
|
||||
useEffect(() => {
|
||||
const status = transcript.data?.status;
|
||||
const status = wsStatus?.value ?? transcript.data?.status;
|
||||
if (!status) return;
|
||||
|
||||
if (status === "ended" || status === "error") {
|
||||
@@ -43,6 +57,7 @@ export default function TranscriptProcessing(details: TranscriptProcessing) {
|
||||
router.replace(dest);
|
||||
}
|
||||
}, [
|
||||
wsStatus?.value,
|
||||
transcript.data?.status,
|
||||
transcript.data?.source_kind,
|
||||
router,
|
||||
@@ -76,11 +91,29 @@ export default function TranscriptProcessing(details: TranscriptProcessing) {
|
||||
w={{ base: "full", md: "container.xl" }}
|
||||
>
|
||||
<Center h={"full"} w="full">
|
||||
<VStack gap={10} bg="gray.100" p={10} borderRadius="md" maxW="500px">
|
||||
<Spinner size="xl" color="blue.500" />
|
||||
<Heading size={"md"} textAlign="center">
|
||||
Processing recording
|
||||
</Heading>
|
||||
<VStack
|
||||
gap={10}
|
||||
bg="gray.100"
|
||||
p={10}
|
||||
borderRadius="md"
|
||||
maxW="600px"
|
||||
w="full"
|
||||
>
|
||||
{dagStatus ? (
|
||||
<>
|
||||
<Heading size={"md"} textAlign="center">
|
||||
Processing recording
|
||||
</Heading>
|
||||
<DagProgressTable tasks={dagStatus} />
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<Spinner size="xl" color="blue.500" />
|
||||
<Heading size={"md"} textAlign="center">
|
||||
Processing recording
|
||||
</Heading>
|
||||
</>
|
||||
)}
|
||||
<Text color="gray.600" textAlign="center">
|
||||
You can safely return to the library while your recording is being
|
||||
processed.
|
||||
|
||||
@@ -78,10 +78,7 @@ const useMp3 = (transcriptId: string, waiting?: boolean): Mp3Response => {
|
||||
|
||||
// Audio is not deleted, proceed to load it
|
||||
audioElement = document.createElement("audio");
|
||||
const audioUrl = `${API_URL}/v1/transcripts/${transcriptId}/audio/mp3`;
|
||||
audioElement.src = accessTokenInfo
|
||||
? `${audioUrl}?token=${encodeURIComponent(accessTokenInfo)}`
|
||||
: audioUrl;
|
||||
audioElement.src = `${API_URL}/v1/transcripts/${transcriptId}/audio/mp3`;
|
||||
audioElement.crossOrigin = "anonymous";
|
||||
audioElement.preload = "auto";
|
||||
|
||||
|
||||
@@ -23,16 +23,7 @@ const useWebRTC = (
|
||||
let p: Peer;
|
||||
|
||||
try {
|
||||
p = new Peer({
|
||||
initiator: true,
|
||||
stream: stream,
|
||||
// Disable trickle ICE: single SDP exchange (offer + answer) with all candidates.
|
||||
// Required for HTTP-based signaling; trickle needs WebSocket for candidate exchange.
|
||||
trickle: false,
|
||||
config: {
|
||||
iceServers: [{ urls: "stun:stun.l.google.com:19302" }],
|
||||
},
|
||||
});
|
||||
p = new Peer({ initiator: true, stream: stream });
|
||||
} catch (error) {
|
||||
setError(error as Error, "Error creating WebRTC");
|
||||
return;
|
||||
|
||||
@@ -1,22 +1,21 @@
|
||||
import { useEffect, useState } from "react";
|
||||
import { Topic, FinalSummary, Status } from "./webSocketTypes";
|
||||
import { useError } from "../../(errors)/errorContext";
|
||||
import type { components, operations } from "../../reflector-api";
|
||||
import type { components } from "../../reflector-api";
|
||||
type AudioWaveform = components["schemas"]["AudioWaveform"];
|
||||
type GetTranscriptSegmentTopic =
|
||||
components["schemas"]["GetTranscriptSegmentTopic"];
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { WEBSOCKET_URL } from "../../lib/apiClient";
|
||||
import { $api, WEBSOCKET_URL } from "../../lib/apiClient";
|
||||
import {
|
||||
invalidateTranscript,
|
||||
invalidateTranscriptTopics,
|
||||
invalidateTranscriptWaveform,
|
||||
} from "../../lib/apiHooks";
|
||||
import { useAuth } from "../../lib/AuthProvider";
|
||||
import { parseNonEmptyString } from "../../lib/utils";
|
||||
import { NonEmptyString } from "../../lib/utils";
|
||||
|
||||
type TranscriptWsEvent =
|
||||
operations["v1_transcript_get_websocket_events"]["responses"][200]["content"]["application/json"];
|
||||
import type { DagTask } from "../../lib/dagTypes";
|
||||
export type { DagTask, DagTaskStatus } from "../../lib/dagTypes";
|
||||
|
||||
export type UseWebSockets = {
|
||||
transcriptTextLive: string;
|
||||
@@ -28,10 +27,10 @@ export type UseWebSockets = {
|
||||
status: Status | null;
|
||||
waveform: AudioWaveform | null;
|
||||
duration: number | null;
|
||||
dagStatus: DagTask[] | null;
|
||||
};
|
||||
|
||||
export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
|
||||
const auth = useAuth();
|
||||
const [transcriptTextLive, setTranscriptTextLive] = useState<string>("");
|
||||
const [translateText, setTranslateText] = useState<string>("");
|
||||
const [title, setTitle] = useState<string>("");
|
||||
@@ -45,6 +44,7 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
|
||||
summary: "",
|
||||
});
|
||||
const [status, setStatus] = useState<Status | null>(null);
|
||||
const [dagStatus, setDagStatus] = useState<DagTask[] | null>(null);
|
||||
const { setError } = useError();
|
||||
|
||||
const queryClient = useQueryClient();
|
||||
@@ -336,169 +336,175 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
|
||||
};
|
||||
|
||||
if (!transcriptId) return;
|
||||
const tsId = parseNonEmptyString(transcriptId);
|
||||
|
||||
const MAX_RETRIES = 10;
|
||||
const url = `${WEBSOCKET_URL}/v1/transcripts/${transcriptId}/events`;
|
||||
let ws: WebSocket | null = null;
|
||||
let retryCount = 0;
|
||||
let retryTimeout: ReturnType<typeof setTimeout> | null = null;
|
||||
let intentionalClose = false;
|
||||
let ws = new WebSocket(url);
|
||||
|
||||
const connect = () => {
|
||||
const subprotocols =
|
||||
auth.status === "authenticated" && auth.accessToken
|
||||
? ["bearer", auth.accessToken]
|
||||
: undefined;
|
||||
ws = new WebSocket(url, subprotocols);
|
||||
ws.onopen = () => {
|
||||
console.debug("WebSocket connection opened");
|
||||
};
|
||||
|
||||
ws.onopen = () => {
|
||||
console.debug("WebSocket connection opened");
|
||||
retryCount = 0;
|
||||
};
|
||||
ws.onmessage = (event) => {
|
||||
const message = JSON.parse(event.data);
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
const message: TranscriptWsEvent = JSON.parse(event.data);
|
||||
try {
|
||||
switch (message.event) {
|
||||
case "TRANSCRIPT":
|
||||
const newText = (message.data.text ?? "").trim();
|
||||
const newTranslation = (message.data.translation ?? "").trim();
|
||||
|
||||
try {
|
||||
switch (message.event) {
|
||||
case "TRANSCRIPT": {
|
||||
const newText = (message.data.text ?? "").trim();
|
||||
const newTranslation = (message.data.translation ?? "").trim();
|
||||
if (!newText) break;
|
||||
|
||||
if (!newText) break;
|
||||
console.debug("TRANSCRIPT event:", newText);
|
||||
setTextQueue((prevQueue) => [...prevQueue, newText]);
|
||||
setTranslationQueue((prevQueue) => [...prevQueue, newTranslation]);
|
||||
|
||||
console.debug("TRANSCRIPT event:", newText);
|
||||
setTextQueue((prevQueue) => [...prevQueue, newText]);
|
||||
setTranslationQueue((prevQueue) => [
|
||||
...prevQueue,
|
||||
newTranslation,
|
||||
]);
|
||||
setAccumulatedText((prevText) => prevText + " " + newText);
|
||||
break;
|
||||
|
||||
setAccumulatedText((prevText) => prevText + " " + newText);
|
||||
break;
|
||||
}
|
||||
|
||||
case "TOPIC":
|
||||
setTopics((prevTopics) => {
|
||||
const topic = message.data;
|
||||
const index = prevTopics.findIndex(
|
||||
(prevTopic) => prevTopic.id === topic.id,
|
||||
);
|
||||
if (index >= 0) {
|
||||
prevTopics[index] = topic;
|
||||
return prevTopics;
|
||||
}
|
||||
setAccumulatedText((prevText) =>
|
||||
prevText.slice(topic.transcript?.length ?? 0),
|
||||
);
|
||||
return [...prevTopics, topic];
|
||||
});
|
||||
console.debug("TOPIC event:", message.data);
|
||||
invalidateTranscriptTopics(queryClient, tsId);
|
||||
break;
|
||||
|
||||
case "FINAL_SHORT_SUMMARY":
|
||||
console.debug("FINAL_SHORT_SUMMARY event:", message.data);
|
||||
break;
|
||||
|
||||
case "FINAL_LONG_SUMMARY":
|
||||
setFinalSummary({ summary: message.data.long_summary });
|
||||
invalidateTranscript(queryClient, tsId);
|
||||
break;
|
||||
|
||||
case "FINAL_TITLE":
|
||||
console.debug("FINAL_TITLE event:", message.data);
|
||||
setTitle(message.data.title);
|
||||
invalidateTranscript(queryClient, tsId);
|
||||
break;
|
||||
|
||||
case "WAVEFORM":
|
||||
console.debug(
|
||||
"WAVEFORM event length:",
|
||||
message.data.waveform.length,
|
||||
case "TOPIC":
|
||||
setTopics((prevTopics) => {
|
||||
const topic = message.data as Topic;
|
||||
const index = prevTopics.findIndex(
|
||||
(prevTopic) => prevTopic.id === topic.id,
|
||||
);
|
||||
setWaveForm({ data: message.data.waveform });
|
||||
invalidateTranscriptWaveform(queryClient, tsId);
|
||||
break;
|
||||
|
||||
case "DURATION":
|
||||
console.debug("DURATION event:", message.data);
|
||||
setDuration(message.data.duration);
|
||||
break;
|
||||
|
||||
case "STATUS":
|
||||
console.log("STATUS event:", message.data);
|
||||
if (message.data.value === "error") {
|
||||
setError(
|
||||
Error("Websocket error status"),
|
||||
"There was an error processing this meeting.",
|
||||
);
|
||||
if (index >= 0) {
|
||||
prevTopics[index] = topic;
|
||||
return prevTopics;
|
||||
}
|
||||
setStatus(message.data);
|
||||
invalidateTranscript(queryClient, tsId);
|
||||
if (message.data.value === "ended") {
|
||||
intentionalClose = true;
|
||||
ws?.close();
|
||||
}
|
||||
break;
|
||||
|
||||
case "ACTION_ITEMS":
|
||||
console.debug("ACTION_ITEMS event:", message.data);
|
||||
invalidateTranscript(queryClient, tsId);
|
||||
break;
|
||||
|
||||
default: {
|
||||
const _exhaustive: never = message;
|
||||
console.warn(
|
||||
`Received unknown WebSocket event: ${(_exhaustive as TranscriptWsEvent).event}`,
|
||||
setAccumulatedText((prevText) =>
|
||||
prevText.slice(topic.transcript.length),
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
setError(error);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = (error) => {
|
||||
console.error("WebSocket error:", error);
|
||||
};
|
||||
|
||||
ws.onclose = (event) => {
|
||||
console.debug("WebSocket connection closed, code:", event.code);
|
||||
if (intentionalClose) return;
|
||||
|
||||
const normalCodes = [1000, 1001, 1005];
|
||||
if (normalCodes.includes(event.code)) return;
|
||||
|
||||
if (retryCount < MAX_RETRIES) {
|
||||
const delay = Math.min(1000 * Math.pow(2, retryCount), 30000);
|
||||
console.log(
|
||||
`WebSocket reconnecting in ${delay}ms (attempt ${retryCount + 1}/${MAX_RETRIES})`,
|
||||
);
|
||||
if (retryCount === 0) {
|
||||
setError(
|
||||
new Error("WebSocket connection lost"),
|
||||
"Connection lost. Reconnecting...",
|
||||
return [...prevTopics, topic];
|
||||
});
|
||||
console.debug("TOPIC event:", message.data);
|
||||
// Invalidate topics query to sync with WebSocket data
|
||||
invalidateTranscriptTopics(
|
||||
queryClient,
|
||||
transcriptId as NonEmptyString,
|
||||
);
|
||||
}
|
||||
retryCount++;
|
||||
retryTimeout = setTimeout(connect, delay);
|
||||
} else {
|
||||
break;
|
||||
|
||||
case "FINAL_SHORT_SUMMARY":
|
||||
console.debug("FINAL_SHORT_SUMMARY event:", message.data);
|
||||
break;
|
||||
|
||||
case "FINAL_LONG_SUMMARY":
|
||||
if (message.data) {
|
||||
setFinalSummary(message.data);
|
||||
// Invalidate transcript query to sync summary
|
||||
invalidateTranscript(queryClient, transcriptId as NonEmptyString);
|
||||
}
|
||||
break;
|
||||
|
||||
case "FINAL_TITLE":
|
||||
console.debug("FINAL_TITLE event:", message.data);
|
||||
if (message.data) {
|
||||
setTitle(message.data.title);
|
||||
// Invalidate transcript query to sync title
|
||||
invalidateTranscript(queryClient, transcriptId as NonEmptyString);
|
||||
}
|
||||
break;
|
||||
|
||||
case "WAVEFORM":
|
||||
console.debug(
|
||||
"WAVEFORM event length:",
|
||||
message.data.waveform.length,
|
||||
);
|
||||
if (message.data) {
|
||||
setWaveForm(message.data.waveform);
|
||||
invalidateTranscriptWaveform(
|
||||
queryClient,
|
||||
transcriptId as NonEmptyString,
|
||||
);
|
||||
}
|
||||
break;
|
||||
case "DURATION":
|
||||
console.debug("DURATION event:", message.data);
|
||||
if (message.data) {
|
||||
setDuration(message.data.duration);
|
||||
}
|
||||
break;
|
||||
|
||||
case "STATUS":
|
||||
console.log("STATUS event:", message.data);
|
||||
if (message.data.value === "error") {
|
||||
setError(
|
||||
Error("Websocket error status"),
|
||||
"There was an error processing this meeting.",
|
||||
);
|
||||
}
|
||||
setStatus(message.data);
|
||||
invalidateTranscript(queryClient, transcriptId as NonEmptyString);
|
||||
if (message.data.value === "ended") {
|
||||
ws.close();
|
||||
}
|
||||
break;
|
||||
|
||||
case "DAG_STATUS":
|
||||
if (message.data?.tasks) {
|
||||
setDagStatus(message.data.tasks);
|
||||
}
|
||||
break;
|
||||
|
||||
case "DAG_TASK_PROGRESS":
|
||||
if (message.data) {
|
||||
setDagStatus(
|
||||
(prev) =>
|
||||
prev?.map((t) =>
|
||||
t.name === message.data.task_name
|
||||
? { ...t, progress_pct: message.data.progress_pct }
|
||||
: t,
|
||||
) ?? null,
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
setError(
|
||||
new Error(`Received unknown WebSocket event: ${message.event}`),
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
setError(error);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = (error) => {
|
||||
console.error("WebSocket error:", error);
|
||||
setError(new Error("A WebSocket error occurred."));
|
||||
};
|
||||
|
||||
ws.onclose = (event) => {
|
||||
console.debug("WebSocket connection closed");
|
||||
switch (event.code) {
|
||||
case 1000: // Normal Closure:
|
||||
break;
|
||||
case 1005: // Closure by client FF
|
||||
break;
|
||||
case 1001: // Navigate away
|
||||
break;
|
||||
case 1006: // Closed by client Chrome
|
||||
console.warn(
|
||||
"WebSocket closed by client, likely duplicated connection in react dev mode",
|
||||
);
|
||||
break;
|
||||
default:
|
||||
setError(
|
||||
new Error(`WebSocket closed unexpectedly with code: ${event.code}`),
|
||||
"Disconnected from the server. Please refresh the page.",
|
||||
);
|
||||
}
|
||||
};
|
||||
console.log(
|
||||
"Socket is closed. Reconnect will be attempted in 1 second.",
|
||||
event.reason,
|
||||
);
|
||||
// todo handle reconnect with socket.io
|
||||
}
|
||||
};
|
||||
|
||||
connect();
|
||||
|
||||
return () => {
|
||||
intentionalClose = true;
|
||||
if (retryTimeout) clearTimeout(retryTimeout);
|
||||
ws?.close();
|
||||
ws.close();
|
||||
};
|
||||
}, [transcriptId]);
|
||||
|
||||
@@ -512,5 +518,6 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
|
||||
status,
|
||||
waveform,
|
||||
duration,
|
||||
dagStatus,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -23,7 +23,7 @@ export default function UserInfo() {
|
||||
className="font-light px-2"
|
||||
onClick={(e) => {
|
||||
e.preventDefault();
|
||||
auth.signIn();
|
||||
auth.signIn("authentik");
|
||||
}}
|
||||
>
|
||||
Log in
|
||||
|
||||
@@ -1,15 +1,31 @@
|
||||
"use client";
|
||||
|
||||
import React, { useEffect, useRef } from "react";
|
||||
import React, { useEffect, useRef, useState } from "react";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { WEBSOCKET_URL } from "./apiClient";
|
||||
import { useAuth } from "./AuthProvider";
|
||||
import { invalidateTranscript, invalidateTranscriptLists } from "./apiHooks";
|
||||
import { parseNonEmptyString } from "./utils";
|
||||
import type { operations } from "../reflector-api";
|
||||
import { z } from "zod";
|
||||
import {
|
||||
invalidateTranscript,
|
||||
invalidateTranscriptLists,
|
||||
TRANSCRIPT_SEARCH_URL,
|
||||
} from "./apiHooks";
|
||||
import type { NonEmptyString } from "./utils";
|
||||
|
||||
type UserWsEvent =
|
||||
operations["v1_user_get_websocket_events"]["responses"][200]["content"]["application/json"];
|
||||
import type { DagTask } from "./dagTypes";
|
||||
export type { DagTask, DagTaskStatus } from "./dagTypes";
|
||||
|
||||
const DagStatusContext = React.createContext<Map<string, DagTask[]>>(new Map());
|
||||
|
||||
export function useDagStatusMap() {
|
||||
return React.useContext(DagStatusContext);
|
||||
}
|
||||
|
||||
const UserEvent = z.object({
|
||||
event: z.string(),
|
||||
});
|
||||
|
||||
type UserEvent = z.TypeOf<typeof UserEvent>;
|
||||
|
||||
class UserEventsStore {
|
||||
private socket: WebSocket | null = null;
|
||||
@@ -93,6 +109,9 @@ export function UserEventsProvider({
|
||||
const queryClient = useQueryClient();
|
||||
const tokenRef = useRef<string | null>(null);
|
||||
const detachRef = useRef<(() => void) | null>(null);
|
||||
const [dagStatusMap, setDagStatusMap] = useState<Map<string, DagTask[]>>(
|
||||
new Map(),
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
// Only tear down when the user is truly unauthenticated
|
||||
@@ -131,26 +150,55 @@ export function UserEventsProvider({
|
||||
if (!detachRef.current) {
|
||||
const onMessage = (event: MessageEvent) => {
|
||||
try {
|
||||
const msg: UserWsEvent = JSON.parse(event.data);
|
||||
const fullMsg = JSON.parse(event.data);
|
||||
const msg = UserEvent.parse(fullMsg);
|
||||
const eventName = msg.event;
|
||||
const invalidateList = () => invalidateTranscriptLists(queryClient);
|
||||
|
||||
switch (msg.event) {
|
||||
switch (eventName) {
|
||||
case "TRANSCRIPT_CREATED":
|
||||
case "TRANSCRIPT_DELETED":
|
||||
case "TRANSCRIPT_STATUS":
|
||||
case "TRANSCRIPT_FINAL_TITLE":
|
||||
case "TRANSCRIPT_DURATION":
|
||||
invalidateTranscriptLists(queryClient).then(() => {});
|
||||
invalidateTranscript(
|
||||
queryClient,
|
||||
parseNonEmptyString(msg.data.id),
|
||||
).then(() => {});
|
||||
invalidateList().then(() => {});
|
||||
break;
|
||||
|
||||
case "TRANSCRIPT_STATUS": {
|
||||
invalidateList().then(() => {});
|
||||
const transcriptId = fullMsg.data?.id as string | undefined;
|
||||
if (transcriptId) {
|
||||
invalidateTranscript(
|
||||
queryClient,
|
||||
transcriptId as NonEmptyString,
|
||||
).then(() => {});
|
||||
}
|
||||
const status = fullMsg.data?.value as string | undefined;
|
||||
if (transcriptId && status && status !== "processing") {
|
||||
setDagStatusMap((prev) => {
|
||||
const next = new Map(prev);
|
||||
next.delete(transcriptId);
|
||||
return next;
|
||||
});
|
||||
}
|
||||
break;
|
||||
default: {
|
||||
const _exhaustive: never = msg;
|
||||
console.warn(
|
||||
`Unknown user event: ${(_exhaustive as UserWsEvent).event}`,
|
||||
);
|
||||
}
|
||||
|
||||
case "TRANSCRIPT_DAG_STATUS": {
|
||||
const transcriptId = fullMsg.data?.id as string | undefined;
|
||||
const tasks = fullMsg.data?.tasks as DagTask[] | undefined;
|
||||
if (transcriptId && tasks) {
|
||||
setDagStatusMap((prev) => {
|
||||
const next = new Map(prev);
|
||||
next.set(transcriptId, tasks);
|
||||
return next;
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
// Ignore other content events for list updates
|
||||
break;
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn("Invalid user event message", event.data);
|
||||
@@ -177,5 +225,9 @@ export function UserEventsProvider({
|
||||
};
|
||||
}, []);
|
||||
|
||||
return <>{children}</>;
|
||||
return (
|
||||
<DagStatusContext.Provider value={dagStatusMap}>
|
||||
{children}
|
||||
</DagStatusContext.Provider>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
// env vars must be set before any module imports
|
||||
process.env.AUTHENTIK_REFRESH_TOKEN_URL =
|
||||
"https://authentik.example.com/application/o/token/";
|
||||
process.env.AUTHENTIK_ISSUER =
|
||||
"https://authentik.example.com/application/o/reflector/";
|
||||
process.env.AUTHENTIK_CLIENT_ID = "test-client-id";
|
||||
process.env.AUTHENTIK_CLIENT_SECRET = "test-client-secret";
|
||||
process.env.SERVER_API_URL = "http://localhost:1250";
|
||||
process.env.FEATURE_REQUIRE_LOGIN = "true";
|
||||
// must NOT be "credentials" so authOptions() returns the Authentik path
|
||||
delete process.env.AUTH_PROVIDER;
|
||||
|
||||
jest.mock("../next", () => ({ isBuildPhase: false }));
|
||||
|
||||
jest.mock("../features", () => ({
|
||||
featureEnabled: (name: string) => name === "requireLogin",
|
||||
}));
|
||||
|
||||
jest.mock("../redisClient", () => ({
|
||||
tokenCacheRedis: {},
|
||||
redlock: {
|
||||
using: jest.fn((_keys: string[], _ttl: number, fn: () => unknown) => fn()),
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock("../redisTokenCache", () => ({
|
||||
getTokenCache: jest.fn().mockResolvedValue(null),
|
||||
setTokenCache: jest.fn().mockResolvedValue(undefined),
|
||||
deleteTokenCache: jest.fn().mockResolvedValue(undefined),
|
||||
}));
|
||||
|
||||
const mockFetch = jest.fn();
|
||||
global.fetch = mockFetch;
|
||||
|
||||
import { authOptions } from "../authBackend";
|
||||
|
||||
describe("Authentik token refresh", () => {
|
||||
beforeEach(() => {
|
||||
mockFetch.mockReset();
|
||||
});
|
||||
|
||||
test("refresh request preserves trailing slash in token URL", async () => {
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
access_token: "new-access-token",
|
||||
expires_in: 300,
|
||||
refresh_token: "new-refresh-token",
|
||||
}),
|
||||
});
|
||||
|
||||
const options = authOptions();
|
||||
const jwtCallback = options.callbacks!.jwt!;
|
||||
|
||||
// Simulate a returning user whose access token has expired (no account/user = not initial login)
|
||||
const expiredToken = {
|
||||
sub: "test-user-123",
|
||||
accessToken: "expired-access-token",
|
||||
accessTokenExpires: Date.now() - 60_000,
|
||||
refreshToken: "old-refresh-token",
|
||||
};
|
||||
|
||||
await jwtCallback({
|
||||
token: expiredToken,
|
||||
user: undefined as any,
|
||||
account: null,
|
||||
profile: undefined,
|
||||
trigger: "update",
|
||||
isNewUser: false,
|
||||
session: undefined,
|
||||
});
|
||||
|
||||
// The refresh POST must go to the exact URL from the env var (trailing slash included)
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"https://authentik.example.com/application/o/token/",
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
body: expect.any(String),
|
||||
}),
|
||||
);
|
||||
|
||||
const body = new URLSearchParams(mockFetch.mock.calls[0][1].body);
|
||||
expect(body.get("grant_type")).toBe("refresh_token");
|
||||
expect(body.get("refresh_token")).toBe("old-refresh-token");
|
||||
expect(body.get("client_id")).toBe("test-client-id");
|
||||
expect(body.get("client_secret")).toBe("test-client-secret");
|
||||
});
|
||||
});
|
||||
@@ -13,33 +13,9 @@ export const API_URL = !isBuildPhase
|
||||
? getClientEnv().API_URL
|
||||
: "http://localhost";
|
||||
|
||||
/**
|
||||
* Derive a WebSocket URL from the API_URL.
|
||||
* Handles full URLs (http://host/api, https://host/api) and relative paths (/api).
|
||||
* For full URLs, ws/wss is derived from the URL's own protocol.
|
||||
* For relative URLs, ws/wss is derived from window.location.protocol.
|
||||
*/
|
||||
const deriveWebSocketUrl = (apiUrl: string): string => {
|
||||
if (typeof window === "undefined") {
|
||||
return "ws://localhost";
|
||||
}
|
||||
const parsed = new URL(apiUrl, window.location.origin);
|
||||
const wsProtocol = parsed.protocol === "https:" ? "wss:" : "ws:";
|
||||
// Normalize: remove trailing slash from pathname
|
||||
const pathname = parsed.pathname.replace(/\/+$/, "");
|
||||
return `${wsProtocol}//${parsed.host}${pathname}`;
|
||||
};
|
||||
|
||||
const resolveWebSocketUrl = (): string => {
|
||||
if (isBuildPhase) return "ws://localhost";
|
||||
const raw = getClientEnv().WEBSOCKET_URL;
|
||||
if (!raw || raw === "auto") {
|
||||
return deriveWebSocketUrl(API_URL);
|
||||
}
|
||||
return raw;
|
||||
};
|
||||
|
||||
export const WEBSOCKET_URL = resolveWebSocketUrl();
|
||||
export const WEBSOCKET_URL = !isBuildPhase
|
||||
? getClientEnv().WEBSOCKET_URL || "ws://127.0.0.1:1250"
|
||||
: "ws://localhost";
|
||||
|
||||
export const client = createClient<paths>({
|
||||
baseUrl: API_URL,
|
||||
|
||||
@@ -7,7 +7,6 @@ import type { components } from "../reflector-api";
|
||||
import { useAuth } from "./AuthProvider";
|
||||
import { MeetingId } from "./types";
|
||||
import { NonEmptyString } from "./utils";
|
||||
import type { TranscriptStatus } from "./transcript";
|
||||
|
||||
/*
|
||||
* XXX error types returned from the hooks are not always correct; declared types are ValidationError but real type could be string or any other
|
||||
@@ -105,12 +104,6 @@ export function useTranscriptProcess() {
|
||||
});
|
||||
}
|
||||
|
||||
const ACTIVE_TRANSCRIPT_STATUSES = new Set<TranscriptStatus>([
|
||||
"processing",
|
||||
"uploaded",
|
||||
"recording",
|
||||
]);
|
||||
|
||||
export function useTranscriptGet(transcriptId: NonEmptyString | null) {
|
||||
return $api.useQuery(
|
||||
"get",
|
||||
@@ -124,10 +117,6 @@ export function useTranscriptGet(transcriptId: NonEmptyString | null) {
|
||||
},
|
||||
{
|
||||
enabled: !!transcriptId,
|
||||
refetchInterval: (query) => {
|
||||
const status = query.state.data?.status;
|
||||
return status && ACTIVE_TRANSCRIPT_STATUSES.has(status) ? 5000 : false;
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { AuthOptions } from "next-auth";
|
||||
import AuthentikProvider from "next-auth/providers/authentik";
|
||||
import CredentialsProvider from "next-auth/providers/credentials";
|
||||
import type { JWT } from "next-auth/jwt";
|
||||
import { JWTWithAccessToken, CustomSession } from "./types";
|
||||
import {
|
||||
@@ -65,191 +64,110 @@ const getAuthentikIssuer = () => {
|
||||
return stringUrl;
|
||||
};
|
||||
|
||||
export const authOptions = (): AuthOptions => {
|
||||
if (!featureEnabled("requireLogin")) {
|
||||
return { providers: [] };
|
||||
}
|
||||
|
||||
const authProvider = process.env.AUTH_PROVIDER;
|
||||
|
||||
if (authProvider === "credentials") {
|
||||
return credentialsAuthOptions();
|
||||
}
|
||||
|
||||
return authentikAuthOptions();
|
||||
};
|
||||
|
||||
function credentialsAuthOptions(): AuthOptions {
|
||||
return {
|
||||
providers: [
|
||||
CredentialsProvider({
|
||||
name: "Password",
|
||||
credentials: {
|
||||
email: { label: "Email", type: "email" },
|
||||
password: { label: "Password", type: "password" },
|
||||
},
|
||||
async authorize(credentials) {
|
||||
if (!credentials?.email || !credentials?.password) return null;
|
||||
const apiUrl = getNextEnvVar("SERVER_API_URL");
|
||||
const response = await fetch(`${apiUrl}/v1/auth/login`, {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
email: credentials.email,
|
||||
password: credentials.password,
|
||||
}),
|
||||
});
|
||||
if (!response.ok) return null;
|
||||
const data = await response.json();
|
||||
return {
|
||||
id: "pending",
|
||||
email: credentials.email,
|
||||
accessToken: data.access_token,
|
||||
expiresIn: data.expires_in,
|
||||
};
|
||||
},
|
||||
}),
|
||||
],
|
||||
session: { strategy: "jwt" },
|
||||
pages: {
|
||||
signIn: "/login",
|
||||
},
|
||||
callbacks: {
|
||||
async jwt({ token, user }) {
|
||||
if (user) {
|
||||
// First login - user comes from authorize()
|
||||
const typedUser = user as any;
|
||||
token.accessToken = typedUser.accessToken;
|
||||
token.accessTokenExpires = Date.now() + typedUser.expiresIn * 1000;
|
||||
|
||||
// Resolve actual user ID from backend
|
||||
const userId = await getUserId(typedUser.accessToken);
|
||||
if (userId) {
|
||||
token.sub = userId;
|
||||
}
|
||||
token.email = typedUser.email;
|
||||
}
|
||||
return token;
|
||||
},
|
||||
async session({ session, token }) {
|
||||
const extendedToken = token as JWTWithAccessToken;
|
||||
return {
|
||||
...session,
|
||||
accessToken: extendedToken.accessToken,
|
||||
accessTokenExpires: extendedToken.accessTokenExpires,
|
||||
error: extendedToken.error,
|
||||
user: {
|
||||
id: assertExistsAndNonEmptyString(token.sub, "User ID required"),
|
||||
name: extendedToken.name,
|
||||
email: extendedToken.email,
|
||||
},
|
||||
} satisfies CustomSession;
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function authentikAuthOptions(): AuthOptions {
|
||||
return {
|
||||
providers: [
|
||||
AuthentikProvider({
|
||||
...(() => {
|
||||
const [clientId, clientSecret, issuer] = sequenceThrows(
|
||||
getAuthentikClientId,
|
||||
getAuthentikClientSecret,
|
||||
getAuthentikIssuer,
|
||||
);
|
||||
return {
|
||||
clientId,
|
||||
clientSecret,
|
||||
issuer,
|
||||
};
|
||||
})(),
|
||||
authorization: {
|
||||
params: {
|
||||
scope: "openid email profile offline_access",
|
||||
},
|
||||
},
|
||||
}),
|
||||
],
|
||||
session: {
|
||||
strategy: "jwt",
|
||||
},
|
||||
callbacks: {
|
||||
async jwt({ token, account, user }) {
|
||||
if (account && !account.access_token) {
|
||||
await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`);
|
||||
}
|
||||
|
||||
if (account && user) {
|
||||
// called only on first login
|
||||
// XXX account.expires_in used in example is not defined for authentik backend, but expires_at is
|
||||
if (account.access_token) {
|
||||
const expiresAtS = assertExists(account.expires_at);
|
||||
const expiresAtMs = expiresAtS * 1000;
|
||||
const jwtToken: JWTWithAccessToken = {
|
||||
...token,
|
||||
accessToken: account.access_token,
|
||||
accessTokenExpires: expiresAtMs,
|
||||
refreshToken: account.refresh_token,
|
||||
};
|
||||
if (jwtToken.error) {
|
||||
await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`);
|
||||
} else {
|
||||
assertNotExists(
|
||||
jwtToken.error,
|
||||
`panic! trying to cache token with error in jwt: ${jwtToken.error}`,
|
||||
export const authOptions = (): AuthOptions =>
|
||||
featureEnabled("requireLogin")
|
||||
? {
|
||||
providers: [
|
||||
AuthentikProvider({
|
||||
...(() => {
|
||||
const [clientId, clientSecret, issuer] = sequenceThrows(
|
||||
getAuthentikClientId,
|
||||
getAuthentikClientSecret,
|
||||
getAuthentikIssuer,
|
||||
);
|
||||
await setTokenCache(tokenCacheRedis, `token:${token.sub}`, {
|
||||
token: jwtToken,
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
return jwtToken;
|
||||
return {
|
||||
clientId,
|
||||
clientSecret,
|
||||
issuer,
|
||||
};
|
||||
})(),
|
||||
authorization: {
|
||||
params: {
|
||||
scope: "openid email profile offline_access",
|
||||
},
|
||||
},
|
||||
}),
|
||||
],
|
||||
session: {
|
||||
strategy: "jwt",
|
||||
},
|
||||
callbacks: {
|
||||
async jwt({ token, account, user }) {
|
||||
if (account && !account.access_token) {
|
||||
await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const currentToken = await getTokenCache(
|
||||
tokenCacheRedis,
|
||||
`token:${token.sub}`,
|
||||
);
|
||||
console.debug(
|
||||
"currentToken from cache",
|
||||
JSON.stringify(currentToken, null, 2),
|
||||
"will be returned?",
|
||||
currentToken &&
|
||||
!shouldRefreshToken(currentToken.token.accessTokenExpires),
|
||||
);
|
||||
if (
|
||||
currentToken &&
|
||||
!shouldRefreshToken(currentToken.token.accessTokenExpires)
|
||||
) {
|
||||
return currentToken.token;
|
||||
}
|
||||
if (account && user) {
|
||||
// called only on first login
|
||||
// XXX account.expires_in used in example is not defined for authentik backend, but expires_at is
|
||||
if (account.access_token) {
|
||||
const expiresAtS = assertExists(account.expires_at);
|
||||
const expiresAtMs = expiresAtS * 1000;
|
||||
const jwtToken: JWTWithAccessToken = {
|
||||
...token,
|
||||
accessToken: account.access_token,
|
||||
accessTokenExpires: expiresAtMs,
|
||||
refreshToken: account.refresh_token,
|
||||
};
|
||||
if (jwtToken.error) {
|
||||
await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`);
|
||||
} else {
|
||||
assertNotExists(
|
||||
jwtToken.error,
|
||||
`panic! trying to cache token with error in jwt: ${jwtToken.error}`,
|
||||
);
|
||||
await setTokenCache(tokenCacheRedis, `token:${token.sub}`, {
|
||||
token: jwtToken,
|
||||
timestamp: Date.now(),
|
||||
});
|
||||
return jwtToken;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// access token has expired, try to update it
|
||||
return await lockedRefreshAccessToken(token);
|
||||
},
|
||||
async session({ session, token }) {
|
||||
const extendedToken = token as JWTWithAccessToken;
|
||||
console.log("extendedToken", extendedToken);
|
||||
const userId = await getUserId(extendedToken.accessToken);
|
||||
const currentToken = await getTokenCache(
|
||||
tokenCacheRedis,
|
||||
`token:${token.sub}`,
|
||||
);
|
||||
console.debug(
|
||||
"currentToken from cache",
|
||||
JSON.stringify(currentToken, null, 2),
|
||||
"will be returned?",
|
||||
currentToken &&
|
||||
!shouldRefreshToken(currentToken.token.accessTokenExpires),
|
||||
);
|
||||
if (
|
||||
currentToken &&
|
||||
!shouldRefreshToken(currentToken.token.accessTokenExpires)
|
||||
) {
|
||||
return currentToken.token;
|
||||
}
|
||||
|
||||
return {
|
||||
...session,
|
||||
accessToken: extendedToken.accessToken,
|
||||
accessTokenExpires: extendedToken.accessTokenExpires,
|
||||
error: extendedToken.error,
|
||||
user: {
|
||||
id: assertExistsAndNonEmptyString(userId, "User ID required"),
|
||||
name: extendedToken.name,
|
||||
email: extendedToken.email,
|
||||
// access token has expired, try to update it
|
||||
return await lockedRefreshAccessToken(token);
|
||||
},
|
||||
} satisfies CustomSession;
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
async session({ session, token }) {
|
||||
const extendedToken = token as JWTWithAccessToken;
|
||||
console.log("extendedToken", extendedToken);
|
||||
const userId = await getUserId(extendedToken.accessToken);
|
||||
|
||||
return {
|
||||
...session,
|
||||
accessToken: extendedToken.accessToken,
|
||||
accessTokenExpires: extendedToken.accessTokenExpires,
|
||||
error: extendedToken.error,
|
||||
user: {
|
||||
id: assertExistsAndNonEmptyString(userId, "User ID required"),
|
||||
name: extendedToken.name,
|
||||
email: extendedToken.email,
|
||||
},
|
||||
} satisfies CustomSession;
|
||||
},
|
||||
},
|
||||
}
|
||||
: {
|
||||
providers: [],
|
||||
};
|
||||
|
||||
async function lockedRefreshAccessToken(
|
||||
token: JWT,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user