Compare commits

...

17 Commits

Author SHA1 Message Date
Igor Loskutov
cdc495499b fix: convert DB models to view models in bulk-status endpoint
model_validate(from_attributes=True) needed to convert DB Meeting and
CalendarEvent to their view-layer Pydantic counterparts.
2026-02-05 21:56:45 -05:00
ae44f5227b Merge branch 'main' into fix-room-query-batching 2026-02-05 19:42:29 -05:00
Igor Loskutov
4339ffffcf fix: use $api.queryOptions for batcher query keys
Replace custom meetingStatusKeys with $api.queryOptions()-derived keys
so cache identity matches the original per-room GET endpoints.
2026-02-05 19:40:13 -05:00
Igor Loskutov
9dc6c20ef8 fix: address code review findings
- Add max_length=100 on BulkStatusRequest.room_names to prevent abuse
- Filter bulk endpoint results to rooms user can see (owned or shared)
- Throw on bulk-status fetch error instead of silently returning empty data
- Fix room_by_id type annotation: dict[str, DbRoom] instead of Any
- Remove stale "200ms" comment in test
- Enable strict: true in jest tsconfig
- Remove working docs from tracked files
- Simplify redundant ternary in test helper
2026-02-05 19:36:17 -05:00
Igor Loskutov
931c344ddf feat: add frontend test infrastructure and fix CI workflow
- Fix pnpm version mismatch in test_next_server.yml (8 → auto-detect 10)
- Add concurrency group to cancel stale CI runs
- Remove redundant setup-node step
- Update jest.config.js for jsdom + tsx support
- Add meetingStatusBatcher integration test (3 tests)
- Extract createMeetingStatusBatcher factory for testability
2026-02-05 19:06:32 -05:00
15ab2e306e feat: Daily+hatchet default (#846)
* feat: set Daily as default video platform

Daily.co has been battle-tested and is ready to be the default.
Whereby remains available for rooms that explicitly set it.

* feat: enforce Hatchet for all multitrack processing

Remove use_celery option from rooms - multitrack (Daily) recordings
now always use Hatchet workflows. Celery remains for single-track
(Whereby) file processing only.

- Remove use_celery column from room table
- Simplify dispatch logic to always use Hatchet for multitracks
- Update tests to mock Hatchet instead of Celery

* fix: update whereby test to patch Hatchet instead of removed Celery import

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2026-02-05 18:38:08 -05:00
Igor Loskutov
129290517e docs: add handoff report and frontend testing research
BATSHIT_REPORT.md: full context on bulk query batching — business goal,
approach, all changes, verification status, and how to run.
FRONTEND_TEST_RESEARCH.md: research on unit testing react-query hooks
with jest.mock, renderHook, and batcher testing patterns.
2026-02-05 17:49:23 -05:00
Igor Loskutov
7e072219bf feat: batch room meeting status queries into single bulk endpoint
Reduces rooms list page from 2N+2 HTTP requests to 1 POST request.
Backend: POST /v1/rooms/meetings/bulk-status with 3 DB queries total.
Frontend: @yornaath/batshit DataLoader-style batcher with 10ms window.
2026-02-05 17:47:58 -05:00
1ce1c7a910 fix: websocket tests (#825)
* fix websocket tests

* fix: restore timeout and fix celery test infrastructure

- Re-add timeout=1.0 to ws_manager pubsub loop (prevents CPU spin?)
- Use Redis for Celery tests (memory:// broker doesn't support chords)
- Add timeout param to in-memory subscriber mock
- Remove duplicate celery_includes fixture from rtc_ws tests

* fix: remove redundant inline imports in test files

* fix: update gitleaks ignore for moved s3_key line

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2026-02-05 14:23:31 -05:00
Rémi Pauchet
984795357e - fix nvidia repo blocked by apt (sha1) (#845)
- use build cache for apt and uv
- limit concurency for uv to prevent crashes with too many cores
2026-02-05 13:59:34 -05:00
fa3cf5da0f chore(main): release 0.32.2 (#842) 2026-02-03 22:05:22 -05:00
8707c6694a fix: use Daily API recording.duration as master source for transcript duration (#844)
Set duration early in get_participants from Daily API (seconds -> ms),
ensuring post_zulip has the value before mixdown_tracks completes.

Removes redundant duration update from mixdown_tracks.

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2026-02-03 17:15:03 -05:00
4acde4b7fd fix: increase TIMEOUT_MEDIUM from 2m to 5m for LLM tasks (#843)
Topic detection was timing out on longer transcripts when LLM
responses are slow. This affects detect_chunk_topic and other
LLM-calling tasks that use TIMEOUT_MEDIUM.

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2026-02-03 16:05:16 -05:00
a2ed7d60d5 fix: make caddy optional (#841) 2026-02-03 00:18:47 +01:00
a08f94a5bf chore(main): release 0.32.1 (#840) 2026-01-30 17:34:48 -05:00
Igor Loskutov
c05d1f03cd fix: match httpx pad with hatchet audio timeout 2026-01-30 15:56:18 -05:00
Igor Loskutov
23eb1371cb fix: daily multitrack pipeline finalze dependency fix 2026-01-30 15:19:27 -05:00
32 changed files with 1674 additions and 386 deletions

View File

@@ -13,6 +13,9 @@ on:
jobs:
test-next-server:
runs-on: ubuntu-latest
concurrency:
group: test-next-server-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
@@ -21,17 +24,12 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 8
package_json_file: './www/package.json'
- name: Setup Node.js cache
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'

View File

@@ -4,3 +4,4 @@ docs/docs/installation/daily-setup.md:curl-auth-header:277
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:74
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:83
server/reflector/worker/process.py:generic-api-key:465
server/reflector/worker/process.py:generic-api-key:594

View File

@@ -1,5 +1,22 @@
# Changelog
## [0.32.2](https://github.com/Monadical-SAS/reflector/compare/v0.32.1...v0.32.2) (2026-02-03)
### Bug Fixes
* increase TIMEOUT_MEDIUM from 2m to 5m for LLM tasks ([#843](https://github.com/Monadical-SAS/reflector/issues/843)) ([4acde4b](https://github.com/Monadical-SAS/reflector/commit/4acde4b7fdef88cc02ca12cf38c9020b05ed96ac))
* make caddy optional ([#841](https://github.com/Monadical-SAS/reflector/issues/841)) ([a2ed7d6](https://github.com/Monadical-SAS/reflector/commit/a2ed7d60d557b551a5b64e4dfd909b63a791d9fc))
* use Daily API recording.duration as master source for transcript duration ([#844](https://github.com/Monadical-SAS/reflector/issues/844)) ([8707c66](https://github.com/Monadical-SAS/reflector/commit/8707c6694a80c939b6214bbc13331741f192e082))
## [0.32.1](https://github.com/Monadical-SAS/reflector/compare/v0.32.0...v0.32.1) (2026-01-30)
### Bug Fixes
* daily multitrack pipeline finalze dependency fix ([23eb137](https://github.com/Monadical-SAS/reflector/commit/23eb1371cb9348c4b81eb12ad506b582f8a4799e))
* match httpx pad with hatchet audio timeout ([c05d1f0](https://github.com/Monadical-SAS/reflector/commit/c05d1f03cd8369fc06efd455527e50246887efd0))
## [0.32.0](https://github.com/Monadical-SAS/reflector/compare/v0.31.0...v0.32.0) (2026-01-30)

View File

@@ -1,6 +1,8 @@
# Reflector Caddyfile
# Replace example.com with your actual domains
# CORS is handled by the backend - Caddy just proxies
# Reflector Caddyfile (optional reverse proxy)
# Use this only when you run Caddy via: docker compose -f docker-compose.prod.yml --profile caddy up -d
# If Coolify, Traefik, or nginx already use ports 80/443, do NOT start Caddy; point your proxy at web:3000 and server:1250.
#
# Replace example.com with your actual domains. CORS is handled by the backend - Caddy just proxies.
#
# For environment variable substitution, set:
# FRONTEND_DOMAIN=app.example.com

View File

@@ -1,9 +1,14 @@
# Production Docker Compose configuration
# Usage: docker compose -f docker-compose.prod.yml up -d
#
# Caddy (reverse proxy on ports 80/443) is OPTIONAL and behind the "caddy" profile:
# - With Caddy (self-hosted, you manage SSL): docker compose -f docker-compose.prod.yml --profile caddy up -d
# - Without Caddy (Coolify/Traefik/nginx already on 80/443): docker compose -f docker-compose.prod.yml up -d
# Then point your proxy at web:3000 (frontend) and server:1250 (API).
#
# Prerequisites:
# 1. Copy .env.example to .env and configure for both server/ and www/
# 2. Copy Caddyfile.example to Caddyfile and edit with your domains
# 2. If using Caddy: copy Caddyfile.example to Caddyfile and edit your domains
# 3. Deploy Modal GPU functions (see gpu/modal_deployments/deploy-all.sh)
services:
@@ -84,6 +89,8 @@ services:
retries: 3
caddy:
profiles:
- caddy
image: caddy:2-alpine
restart: unless-stopped
ports:

View File

@@ -12,14 +12,14 @@ This page documents the Docker Compose configuration for Reflector. For the comp
The `docker-compose.prod.yml` includes these services:
| Service | Image | Purpose |
|---------|-------|---------|
| ---------- | --------------------------------- | --------------------------------------------------------------------------- |
| `web` | `monadicalsas/reflector-frontend` | Next.js frontend |
| `server` | `monadicalsas/reflector-backend` | FastAPI backend |
| `worker` | `monadicalsas/reflector-backend` | Celery worker for background tasks |
| `beat` | `monadicalsas/reflector-backend` | Celery beat scheduler |
| `redis` | `redis:7.2-alpine` | Message broker and cache |
| `postgres` | `postgres:17-alpine` | Primary database |
| `caddy` | `caddy:2-alpine` | Reverse proxy with auto-SSL |
| `caddy` | `caddy:2-alpine` | Reverse proxy with auto-SSL (optional; see [Caddy profile](#caddy-profile)) |
## Environment Files
@@ -30,6 +30,7 @@ Reflector uses two separate environment files:
Used by: `server`, `worker`, `beat`
Key variables:
```env
# Database connection
DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
@@ -54,6 +55,7 @@ TRANSCRIPT_MODAL_API_KEY=...
Used by: `web`
Key variables:
```env
# Domain configuration
SITE_URL=https://app.example.com
@@ -71,7 +73,7 @@ Note: `API_URL` is used client-side (browser), `SERVER_API_URL` is used server-s
## Volumes
| Volume | Purpose |
|--------|---------|
| --------------- | ----------------------------- |
| `redis_data` | Redis persistence |
| `postgres_data` | PostgreSQL data |
| `server_data` | Uploaded files, local storage |
@@ -82,14 +84,30 @@ Note: `API_URL` is used client-side (browser), `SERVER_API_URL` is used server-s
All services share the default network. The network is marked `attachable: true` to allow external containers (like Authentik) to join.
## Caddy profile
Caddy (ports 80 and 443) is **optional** and behind the `caddy` profile so it does not conflict with an existing reverse proxy (e.g. Coolify, Traefik, nginx).
- **With Caddy** (you want Reflector to handle SSL):
`docker compose -f docker-compose.prod.yml --profile caddy up -d`
- **Without Caddy** (Coolify or another proxy already on 80/443):
`docker compose -f docker-compose.prod.yml up -d`
Then configure your proxy to send traffic to `web:3000` (frontend) and `server:1250` (API).
## Common Commands
### Start all services
```bash
# Without Caddy (e.g. when using Coolify)
docker compose -f docker-compose.prod.yml up -d
# With Caddy as reverse proxy
docker compose -f docker-compose.prod.yml --profile caddy up -d
```
### View logs
```bash
# All services
docker compose -f docker-compose.prod.yml logs -f
@@ -99,6 +117,7 @@ docker compose -f docker-compose.prod.yml logs server --tail 50
```
### Restart a service
```bash
# Quick restart (doesn't reload .env changes)
docker compose -f docker-compose.prod.yml restart server
@@ -108,27 +127,32 @@ docker compose -f docker-compose.prod.yml up -d server
```
### Run database migrations
```bash
docker compose -f docker-compose.prod.yml exec server uv run alembic upgrade head
```
### Access database
```bash
docker compose -f docker-compose.prod.yml exec postgres psql -U reflector
```
### Pull latest images
```bash
docker compose -f docker-compose.prod.yml pull
docker compose -f docker-compose.prod.yml up -d
```
### Stop all services
```bash
docker compose -f docker-compose.prod.yml down
```
### Full reset (WARNING: deletes data)
```bash
docker compose -f docker-compose.prod.yml down -v
```
@@ -187,6 +211,7 @@ The Caddyfile supports environment variable substitution:
Set `FRONTEND_DOMAIN` and `API_DOMAIN` environment variables, or edit the file directly.
### Reload Caddy after changes
```bash
docker compose -f docker-compose.prod.yml exec caddy caddy reload --config /etc/caddy/Caddyfile
```

View File

@@ -61,7 +61,7 @@ Type: A Name: api Value: <your-server-ip>
Reflector requires GPU processing for transcription and speaker diarization. Choose one option:
| | **Modal.com (Cloud)** | **Self-Hosted GPU** |
|---|---|---|
| ------------ | --------------------------------- | ---------------------------- |
| **Best for** | No GPU hardware, zero maintenance | Own GPU server, full control |
| **Pricing** | Pay-per-use | Fixed infrastructure cost |
@@ -70,6 +70,7 @@ Reflector requires GPU processing for transcription and speaker diarization. Cho
#### Accept HuggingFace Licenses
Visit both pages and click "Accept":
- https://huggingface.co/pyannote/speaker-diarization-3.1
- https://huggingface.co/pyannote/segmentation-3.0
@@ -179,6 +180,7 @@ Save these credentials - you'll need them in the next step.
## Configure Environment
Reflector has two env files:
- `server/.env` - Backend configuration
- `www/.env` - Frontend configuration
@@ -190,6 +192,7 @@ nano server/.env
```
**Required settings:**
```env
# Database (defaults work with docker-compose.prod.yml)
DATABASE_URL=postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
@@ -249,6 +252,7 @@ nano www/.env
```
**Required settings:**
```env
# Your domains
SITE_URL=https://app.example.com
@@ -266,7 +270,11 @@ FEATURE_REQUIRE_LOGIN=false
---
## Configure Caddy
## Reverse proxy (Caddy or existing)
**If Coolify, Traefik, or nginx already use ports 80/443** (e.g. Coolify on your host): skip Caddy. Start the stack without the Caddy profile (see [Start Services](#start-services) below), then point your proxy at `web:3000` (frontend) and `server:1250` (API).
**If you want Reflector to provide the reverse proxy and SSL:**
```bash
cp Caddyfile.example Caddyfile
@@ -289,10 +297,18 @@ Replace `example.com` with your domains. The `{$VAR:default}` syntax uses Caddy'
## Start Services
**Without Caddy** (e.g. Coolify already on 80/443):
```bash
docker compose -f docker-compose.prod.yml up -d
```
**With Caddy** (Reflector handles SSL):
```bash
docker compose -f docker-compose.prod.yml --profile caddy up -d
```
Wait for containers to start (first run may take 1-2 minutes to pull images and initialize).
---
@@ -300,18 +316,21 @@ Wait for containers to start (first run may take 1-2 minutes to pull images and
## Verify Deployment
### Check services
```bash
docker compose -f docker-compose.prod.yml ps
# All should show "Up"
```
### Test API
```bash
curl https://api.example.com/health
# Should return: {"status":"healthy"}
```
### Test Frontend
- Visit https://app.example.com
- You should see the Reflector interface
- Try uploading an audio file to test transcription
@@ -327,6 +346,7 @@ By default, Reflector is open (no login required). **Authentication is required
See [Authentication Setup](./auth-setup) for full Authentik OAuth configuration.
Quick summary:
1. Deploy Authentik on your server
2. Create OAuth provider in Authentik
3. Extract public key for JWT verification
@@ -358,6 +378,7 @@ DAILYCO_STORAGE_AWS_ROLE_ARN=<arn:aws:iam::ACCOUNT:role/DailyCo>
```
Reload env and restart:
```bash
docker compose -f docker-compose.prod.yml up -d server worker
```
@@ -367,35 +388,43 @@ docker compose -f docker-compose.prod.yml up -d server worker
## Troubleshooting
### Check logs for errors
```bash
docker compose -f docker-compose.prod.yml logs server --tail 20
docker compose -f docker-compose.prod.yml logs worker --tail 20
```
### Services won't start
```bash
docker compose -f docker-compose.prod.yml logs
```
### CORS errors in browser
- Verify `CORS_ORIGIN` in `server/.env` matches your frontend domain exactly (including `https://`)
- Reload env: `docker compose -f docker-compose.prod.yml up -d server`
### SSL certificate errors
### SSL certificate errors (when using Caddy)
- Caddy auto-provisions Let's Encrypt certificates
- Ensure ports 80 and 443 are open
- Ensure ports 80 and 443 are open and not used by another proxy
- Check: `docker compose -f docker-compose.prod.yml logs caddy`
- If port 80 is already in use (e.g. by Coolify), run without Caddy: `docker compose -f docker-compose.prod.yml up -d` and use your existing proxy
### Transcription not working
- Check Modal dashboard: https://modal.com/apps
- Verify URLs in `server/.env` match deployed functions
- Check worker logs: `docker compose -f docker-compose.prod.yml logs worker`
### "Login required" but auth not configured
- Set `FEATURE_REQUIRE_LOGIN=false` in `www/.env`
- Rebuild frontend: `docker compose -f docker-compose.prod.yml up -d --force-recreate web`
### Database migrations or connectivity issues
Migrations run automatically on server startup. To check database connectivity or debug migration failures:
```bash
@@ -408,4 +437,3 @@ docker compose -f docker-compose.prod.yml exec server uv run python -c "from ref
# Manually run migrations (if needed)
docker compose -f docker-compose.prod.yml exec server uv run alembic upgrade head
```

View File

@@ -4,27 +4,31 @@ ENV PYTHONUNBUFFERED=1 \
UV_LINK_MODE=copy \
UV_NO_CACHE=1
# patch until nvidia updates the sha1 repo
ADD sequoia.config /etc/crypto-policies/back-ends/sequoia.config
WORKDIR /tmp
RUN apt-get update \
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
ffmpeg \
curl \
ca-certificates \
gnupg \
wget \
&& apt-get clean
wget
# Add NVIDIA CUDA repo for Debian 12 (bookworm) and install cuDNN 9 for CUDA 12
ADD https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/cuda-keyring_1.1-1_all.deb /cuda-keyring.deb
RUN dpkg -i /cuda-keyring.deb \
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
dpkg -i /cuda-keyring.deb \
&& rm /cuda-keyring.deb \
&& apt-get update \
&& apt-get install -y --no-install-recommends \
cuda-cudart-12-6 \
libcublas-12-6 \
libcudnn9-cuda-12 \
libcudnn9-dev-cuda-12 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
libcudnn9-dev-cuda-12
ADD https://astral.sh/uv/install.sh /uv-installer.sh
RUN sh /uv-installer.sh && rm /uv-installer.sh
ENV PATH="/root/.local/bin/:$PATH"
@@ -39,6 +43,13 @@ COPY ./app /app/app
COPY ./main.py /app/
COPY ./runserver.sh /app/
# prevent uv failing with too many open files on big cpus
ENV UV_CONCURRENT_INSTALLS=16
# first install
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --compile-bytecode --locked
EXPOSE 8000
CMD ["sh", "/app/runserver.sh"]

View File

@@ -0,0 +1,2 @@
[hash_algorithms]
sha1 = "always"

View File

@@ -0,0 +1,35 @@
"""drop_use_celery_column
Revision ID: 3aa20b96d963
Revises: e69f08ead8ea
Create Date: 2026-02-05 10:12:44.065279
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "3aa20b96d963"
down_revision: Union[str, None] = "e69f08ead8ea"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
with op.batch_alter_table("room", schema=None) as batch_op:
batch_op.drop_column("use_celery")
def downgrade() -> None:
with op.batch_alter_table("room", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"use_celery",
sa.Boolean(),
server_default=sa.text("false"),
nullable=False,
)
)

View File

@@ -104,6 +104,26 @@ class CalendarEventController:
results = await get_database().fetch_all(query)
return [CalendarEvent(**result) for result in results]
async def get_upcoming_for_rooms(
self, room_ids: list[str], minutes_ahead: int = 120
) -> list[CalendarEvent]:
now = datetime.now(timezone.utc)
future_time = now + timedelta(minutes=minutes_ahead)
query = (
calendar_events.select()
.where(
sa.and_(
calendar_events.c.room_id.in_(room_ids),
calendar_events.c.is_deleted == False,
calendar_events.c.start_time <= future_time,
calendar_events.c.end_time >= now,
)
)
.order_by(calendar_events.c.start_time.asc())
)
results = await get_database().fetch_all(query)
return [CalendarEvent(**result) for result in results]
async def get_by_id(self, event_id: str) -> CalendarEvent | None:
query = calendar_events.select().where(calendar_events.c.id == event_id)
result = await get_database().fetch_one(query)

View File

@@ -301,6 +301,23 @@ class MeetingController:
results = await get_database().fetch_all(query)
return [Meeting(**result) for result in results]
async def get_all_active_for_rooms(
self, room_ids: list[str], current_time: datetime
) -> list[Meeting]:
query = (
meetings.select()
.where(
sa.and_(
meetings.c.room_id.in_(room_ids),
meetings.c.end_date > current_time,
meetings.c.is_active,
)
)
.order_by(meetings.c.end_date.desc())
)
results = await get_database().fetch_all(query)
return [Meeting(**result) for result in results]
async def get_active_by_calendar_event(
self, room: Room, calendar_event_id: str, current_time: datetime
) -> Meeting | None:

View File

@@ -57,12 +57,6 @@ rooms = sqlalchemy.Table(
sqlalchemy.String,
nullable=False,
),
sqlalchemy.Column(
"use_celery",
sqlalchemy.Boolean,
nullable=False,
server_default=false(),
),
sqlalchemy.Column(
"skip_consent",
sqlalchemy.Boolean,
@@ -97,7 +91,6 @@ class Room(BaseModel):
ics_last_sync: datetime | None = None
ics_last_etag: str | None = None
platform: Platform = Field(default_factory=lambda: settings.DEFAULT_VIDEO_PLATFORM)
use_celery: bool = False
skip_consent: bool = False
@@ -245,6 +238,11 @@ class RoomController:
return room
async def get_by_names(self, names: list[str]) -> list[Room]:
query = rooms.select().where(rooms.c.name.in_(names))
results = await get_database().fetch_all(query)
return [Room(**r) for r in results]
async def get_ics_enabled(self) -> list[Room]:
query = rooms.select().where(
rooms.c.ics_enabled == True, rooms.c.ics_url != None

View File

@@ -35,7 +35,9 @@ LLM_RATE_LIMIT_PER_SECOND = 10
# Task execution timeouts (seconds)
TIMEOUT_SHORT = 60 # Quick operations: API calls, DB updates
TIMEOUT_MEDIUM = 120 # Single LLM calls, waveform generation
TIMEOUT_MEDIUM = (
300 # Single LLM calls, waveform generation (5m for slow LLM responses)
)
TIMEOUT_LONG = 180 # Action items (larger context LLM)
TIMEOUT_AUDIO = 720 # Audio processing: padding, mixdown
TIMEOUT_HEAVY = 600 # Transcription, fan-out LLM tasks

View File

@@ -322,6 +322,7 @@ async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsRe
mtg_session_id = recording.mtg_session_id
async with fresh_db_connection():
from reflector.db.transcripts import ( # noqa: PLC0415
TranscriptDuration,
TranscriptParticipant,
transcripts_controller,
)
@@ -330,15 +331,26 @@ async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsRe
if not transcript:
raise ValueError(f"Transcript {input.transcript_id} not found")
# Note: title NOT cleared - preserves existing titles
# Duration from Daily API (seconds -> milliseconds) - master source
duration_ms = recording.duration * 1000 if recording.duration else 0
await transcripts_controller.update(
transcript,
{
"events": [],
"topics": [],
"participants": [],
"duration": duration_ms,
},
)
await append_event_and_broadcast(
input.transcript_id,
transcript,
"DURATION",
TranscriptDuration(duration=duration_ms),
logger=logger,
)
mtg_session_id = assert_non_none_and_non_empty(
mtg_session_id, "mtg_session_id is required"
)
@@ -1095,7 +1107,7 @@ async def identify_action_items(
@daily_multitrack_pipeline.task(
parents=[generate_title, generate_recap, identify_action_items],
parents=[process_tracks, generate_title, generate_recap, identify_action_items],
execution_timeout=timedelta(seconds=TIMEOUT_SHORT),
retries=3,
)
@@ -1108,12 +1120,8 @@ async def finalize(input: PipelineInput, ctx: Context) -> FinalizeResult:
"""
ctx.log("finalize: saving transcript and setting status to 'ended'")
mixdown_result = ctx.task_output(mixdown_tracks)
track_result = ctx.task_output(process_tracks)
duration = mixdown_result.duration
all_words = track_result.all_words
# Cleanup temporary padded S3 files (deferred until finalize for semantic parity with Celery)
created_padded_files = track_result.created_padded_files
if created_padded_files:
@@ -1133,7 +1141,6 @@ async def finalize(input: PipelineInput, ctx: Context) -> FinalizeResult:
async with fresh_db_connection():
from reflector.db.transcripts import ( # noqa: PLC0415
TranscriptDuration,
TranscriptText,
transcripts_controller,
)
@@ -1142,8 +1149,6 @@ async def finalize(input: PipelineInput, ctx: Context) -> FinalizeResult:
if transcript is None:
raise ValueError(f"Transcript {input.transcript_id} not found in database")
merged_transcript = TranscriptType(words=all_words, translation=None)
await append_event_and_broadcast(
input.transcript_id,
transcript,
@@ -1155,21 +1160,15 @@ async def finalize(input: PipelineInput, ctx: Context) -> FinalizeResult:
logger=logger,
)
# Save duration and clear workflow_run_id (workflow completed successfully)
# Note: title/long_summary/short_summary already saved by their callbacks
# Clear workflow_run_id (workflow completed successfully)
# Note: title/long_summary/short_summary/duration already saved by their callbacks
await transcripts_controller.update(
transcript,
{
"duration": duration,
"workflow_run_id": None, # Clear on success - no need to resume
},
)
duration_data = TranscriptDuration(duration=duration)
await append_event_and_broadcast(
input.transcript_id, transcript, "DURATION", duration_data, logger=logger
)
await set_status_and_broadcast(input.transcript_id, "ended", logger=logger)
ctx.log(

View File

@@ -8,6 +8,7 @@ import os
import httpx
from pydantic import BaseModel
from reflector.hatchet.constants import TIMEOUT_AUDIO
from reflector.logger import logger
@@ -63,7 +64,7 @@ class AudioPaddingModalProcessor:
headers["Authorization"] = f"Bearer {self.modal_api_key}"
try:
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(timeout=TIMEOUT_AUDIO) as client:
response = await client.post(
url,
headers=headers,

View File

@@ -15,14 +15,10 @@ from hatchet_sdk.clients.rest.exceptions import ApiException, NotFoundException
from hatchet_sdk.clients.rest.models import V1TaskStatus
from reflector.db.recordings import recordings_controller
from reflector.db.rooms import rooms_controller
from reflector.db.transcripts import Transcript, transcripts_controller
from reflector.hatchet.client import HatchetClientManager
from reflector.logger import logger
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
from reflector.pipelines.main_multitrack_pipeline import (
task_pipeline_multitrack_process,
)
from reflector.utils.string import NonEmptyString
@@ -181,21 +177,7 @@ async def dispatch_transcript_processing(
Returns AsyncResult for Celery tasks, None for Hatchet workflows.
"""
if isinstance(config, MultitrackProcessingConfig):
use_celery = False
if config.room_id:
room = await rooms_controller.get_by_id(config.room_id)
use_celery = room.use_celery if room else False
use_hatchet = not use_celery
if use_celery:
logger.info(
"Room uses legacy Celery processing",
room_id=config.room_id,
transcript_id=config.transcript_id,
)
if use_hatchet:
# Multitrack processing always uses Hatchet (no Celery fallback)
# First check if we can replay (outside transaction since it's read-only)
transcript = await transcripts_controller.get_by_id(config.transcript_id)
if transcript and transcript.workflow_run_id and not force:
@@ -203,9 +185,7 @@ async def dispatch_transcript_processing(
transcript.workflow_run_id
)
if can_replay:
await HatchetClientManager.replay_workflow(
transcript.workflow_run_id
)
await HatchetClientManager.replay_workflow(transcript.workflow_run_id)
logger.info(
"Replaying Hatchet workflow",
workflow_id=transcript.workflow_run_id,
@@ -233,9 +213,7 @@ async def dispatch_transcript_processing(
# Force: cancel old workflow if exists
if force and transcript and transcript.workflow_run_id:
try:
await HatchetClientManager.cancel_workflow(
transcript.workflow_run_id
)
await HatchetClientManager.cancel_workflow(transcript.workflow_run_id)
logger.info(
"Cancelled old workflow (--force)",
workflow_id=transcript.workflow_run_id,
@@ -245,9 +223,7 @@ async def dispatch_transcript_processing(
"Old workflow already deleted (--force)",
workflow_id=transcript.workflow_run_id,
)
await transcripts_controller.update(
transcript, {"workflow_run_id": None}
)
await transcripts_controller.update(transcript, {"workflow_run_id": None})
# Re-fetch and check for concurrent dispatch (optimistic approach).
# No database lock - worst case is duplicate dispatch, but Hatchet
@@ -293,12 +269,6 @@ async def dispatch_transcript_processing(
logger.info("Hatchet workflow dispatched", workflow_id=workflow_id)
return None
# Celery pipeline (durable workflows disabled)
return task_pipeline_multitrack_process.delay(
transcript_id=config.transcript_id,
bucket_name=config.bucket_name,
track_keys=config.track_keys,
)
elif isinstance(config, FileProcessingConfig):
return task_pipeline_file_process.delay(transcript_id=config.transcript_id)
else:

View File

@@ -1,7 +1,7 @@
from pydantic.types import PositiveInt
from pydantic_settings import BaseSettings, SettingsConfigDict
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform
from reflector.schemas.platform import DAILY_PLATFORM, Platform
from reflector.utils.string import NonEmptyString
@@ -155,7 +155,7 @@ class Settings(BaseSettings):
None # Webhook UUID for this environment. Not used by production code
)
# Platform Configuration
DEFAULT_VIDEO_PLATFORM: Platform = WHEREBY_PLATFORM
DEFAULT_VIDEO_PLATFORM: Platform = DAILY_PLATFORM
# Zulip integration
ZULIP_REALM: str | None = None

View File

@@ -1,4 +1,6 @@
import asyncio
import logging
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from enum import Enum
from typing import Annotated, Any, Literal, Optional
@@ -6,13 +8,14 @@ from typing import Annotated, Any, Literal, Optional
from fastapi import APIRouter, Depends, HTTPException
from fastapi_pagination import Page
from fastapi_pagination.ext.databases import apaginate
from pydantic import BaseModel
from pydantic import BaseModel, Field
from redis.exceptions import LockError
import reflector.auth as auth
from reflector.db import get_database
from reflector.db.calendar_events import calendar_events_controller
from reflector.db.meetings import meetings_controller
from reflector.db.rooms import Room as DbRoom
from reflector.db.rooms import rooms_controller
from reflector.redis_cache import RedisAsyncLock
from reflector.schemas.platform import Platform
@@ -195,6 +198,73 @@ async def rooms_list(
return paginated
class BulkStatusRequest(BaseModel):
room_names: list[str] = Field(max_length=100)
class RoomMeetingStatus(BaseModel):
active_meetings: list[Meeting]
upcoming_events: list[CalendarEventResponse]
@router.post("/rooms/meetings/bulk-status", response_model=dict[str, RoomMeetingStatus])
async def rooms_bulk_meeting_status(
request: BulkStatusRequest,
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
):
user_id = user["sub"] if user else None
all_rooms = await rooms_controller.get_by_names(request.room_names)
# Filter to rooms the user can see (owned or shared), matching rooms_list behavior
rooms = [
r
for r in all_rooms
if r.is_shared or (user_id is not None and r.user_id == user_id)
]
room_by_id: dict[str, DbRoom] = {r.id: r for r in rooms}
room_ids = list(room_by_id.keys())
current_time = datetime.now(timezone.utc)
active_meetings, upcoming_events = await asyncio.gather(
meetings_controller.get_all_active_for_rooms(room_ids, current_time),
calendar_events_controller.get_upcoming_for_rooms(room_ids),
)
# Group by room name
active_by_room: dict[str, list[Meeting]] = defaultdict(list)
for m in active_meetings:
room = room_by_id.get(m.room_id)
if not room:
continue
m.platform = room.platform
if user_id != room.user_id and m.platform == "whereby":
m.host_room_url = ""
active_by_room[room.name].append(
Meeting.model_validate(m, from_attributes=True)
)
upcoming_by_room: dict[str, list[CalendarEventResponse]] = defaultdict(list)
for e in upcoming_events:
room = room_by_id.get(e.room_id)
if not room:
continue
if user_id != room.user_id:
e.description = None
e.attendees = None
upcoming_by_room[room.name].append(
CalendarEventResponse.model_validate(e, from_attributes=True)
)
result: dict[str, RoomMeetingStatus] = {}
for name in request.room_names:
result[name] = RoomMeetingStatus(
active_meetings=active_by_room.get(name, []),
upcoming_events=upcoming_by_room.get(name, []),
)
return result
@router.get("/rooms/{room_id}", response_model=RoomDetails)
async def rooms_get(
room_id: str,

View File

@@ -27,9 +27,6 @@ from reflector.db.transcripts import (
from reflector.hatchet.client import HatchetClientManager
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
from reflector.pipelines.main_live_pipeline import asynctask
from reflector.pipelines.main_multitrack_pipeline import (
task_pipeline_multitrack_process,
)
from reflector.pipelines.topic_processing import EmptyPipeline
from reflector.processors import AudioFileWriterProcessor
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
@@ -351,17 +348,7 @@ async def _process_multitrack_recording_inner(
room_id=room.id,
)
use_celery = room and room.use_celery
use_hatchet = not use_celery
if use_celery:
logger.info(
"Room uses legacy Celery processing",
room_id=room.id,
transcript_id=transcript.id,
)
if use_hatchet:
# Multitrack processing always uses Hatchet (no Celery fallback)
workflow_id = await HatchetClientManager.start_workflow(
workflow_name="DiarizationPipeline",
input_data={
@@ -383,17 +370,7 @@ async def _process_multitrack_recording_inner(
transcript_id=transcript.id,
)
await transcripts_controller.update(
transcript, {"workflow_run_id": workflow_id}
)
return
# Celery pipeline (runs when durable workflows disabled)
task_pipeline_multitrack_process.delay(
transcript_id=transcript.id,
bucket_name=bucket_name,
track_keys=filter_cam_audio_tracks(track_keys),
)
await transcripts_controller.update(transcript, {"workflow_run_id": workflow_id})
@shared_task
@@ -1072,10 +1049,7 @@ async def reprocess_failed_daily_recordings():
)
continue
use_celery = room and room.use_celery
use_hatchet = not use_celery
if use_hatchet:
# Multitrack reprocessing always uses Hatchet (no Celery fallback)
if not transcript:
logger.warning(
"No transcript for Hatchet reprocessing, skipping",
@@ -1112,26 +1086,6 @@ async def reprocess_failed_daily_recordings():
room_name=meeting.room_name,
track_count=len(recording.track_keys),
)
else:
logger.info(
"Queueing Daily recording for Celery reprocessing",
recording_id=recording.id,
room_name=meeting.room_name,
track_count=len(recording.track_keys),
transcript_status=transcript.status if transcript else None,
)
# For reprocessing, pass actual recording time (though it's ignored - see _process_multitrack_recording_inner)
# Reprocessing uses recording.meeting_id directly instead of time-based matching
recording_start_ts = int(recording.recorded_at.timestamp())
process_multitrack_recording.delay(
bucket_name=bucket_name,
daily_room_name=meeting.room_name,
recording_id=recording.id,
track_keys=recording.track_keys,
recording_start_ts=recording_start_ts,
)
reprocessed_count += 1

View File

@@ -11,7 +11,6 @@ broadcast messages to all connected websockets.
import asyncio
import json
import threading
import redis.asyncio as redis
from fastapi import WebSocket
@@ -98,6 +97,7 @@ class WebsocketManager:
async def _pubsub_data_reader(self, pubsub_subscriber):
while True:
# timeout=1.0 prevents tight CPU loop when no messages available
message = await pubsub_subscriber.get_message(
ignore_subscribe_messages=True
)
@@ -109,29 +109,38 @@ class WebsocketManager:
await socket.send_json(data)
# Process-global singleton to ensure only one WebsocketManager instance exists.
# Multiple instances would cause resource leaks and CPU issues.
_ws_manager: WebsocketManager | None = None
def get_ws_manager() -> WebsocketManager:
"""
Returns the WebsocketManager instance for managing websockets.
Returns the global WebsocketManager singleton.
This function initializes and returns the WebsocketManager instance,
which is responsible for managing websockets and handling websocket
connections.
Creates instance on first call, subsequent calls return cached instance.
Thread-safe via GIL. Concurrent initialization may create duplicate
instances but last write wins (acceptable for this use case).
Returns:
WebsocketManager: The initialized WebsocketManager instance.
Raises:
ImportError: If the 'reflector.settings' module cannot be imported.
RedisConnectionError: If there is an error connecting to the Redis server.
WebsocketManager: The global WebsocketManager instance.
"""
local = threading.local()
if hasattr(local, "ws_manager"):
return local.ws_manager
global _ws_manager
if _ws_manager is not None:
return _ws_manager
# No lock needed - GIL makes this safe enough
# Worst case: race creates two instances, last assignment wins
pubsub_client = RedisPubSubManager(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
)
ws_manager = WebsocketManager(pubsub_client=pubsub_client)
local.ws_manager = ws_manager
return ws_manager
_ws_manager = WebsocketManager(pubsub_client=pubsub_client)
return _ws_manager
def reset_ws_manager() -> None:
"""Reset singleton for testing. DO NOT use in production."""
global _ws_manager
_ws_manager = None

View File

@@ -1,11 +1,10 @@
import os
from contextlib import asynccontextmanager
from tempfile import NamedTemporaryFile
from unittest.mock import patch
import pytest
from reflector.schemas.platform import WHEREBY_PLATFORM
from reflector.schemas.platform import DAILY_PLATFORM, WHEREBY_PLATFORM
@pytest.fixture(scope="session", autouse=True)
@@ -15,6 +14,7 @@ def register_mock_platform():
from reflector.video_platforms.registry import register_platform
register_platform(WHEREBY_PLATFORM, MockPlatformClient)
register_platform(DAILY_PLATFORM, MockPlatformClient)
yield
@@ -333,10 +333,13 @@ def celery_enable_logging():
@pytest.fixture(scope="session")
def celery_config():
with NamedTemporaryFile() as f:
redis_host = os.environ.get("REDIS_HOST", "localhost")
redis_port = os.environ.get("REDIS_PORT", "6379")
# Use db 2 to avoid conflicts with main app
redis_url = f"redis://{redis_host}:{redis_port}/2"
yield {
"broker_url": "memory://",
"result_backend": f"db+sqlite:///{f.name}",
"broker_url": redis_url,
"result_backend": redis_url,
}
@@ -370,9 +373,12 @@ async def ws_manager_in_memory(monkeypatch):
def __init__(self, queue: asyncio.Queue):
self.queue = queue
async def get_message(self, ignore_subscribe_messages: bool = True):
async def get_message(
self, ignore_subscribe_messages: bool = True, timeout: float | None = None
):
wait_timeout = timeout if timeout is not None else 0.05
try:
return await asyncio.wait_for(self.queue.get(), timeout=0.05)
return await asyncio.wait_for(self.queue.get(), timeout=wait_timeout)
except Exception:
return None

View File

@@ -1,6 +1,6 @@
import asyncio
import time
from unittest.mock import patch
from unittest.mock import AsyncMock, patch
import pytest
from httpx import ASGITransport, AsyncClient
@@ -142,17 +142,17 @@ async def test_whereby_recording_uses_file_pipeline(client):
"reflector.services.transcript_process.task_pipeline_file_process"
) as mock_file_pipeline,
patch(
"reflector.services.transcript_process.task_pipeline_multitrack_process"
) as mock_multitrack_pipeline,
"reflector.services.transcript_process.HatchetClientManager"
) as mock_hatchet,
):
response = await client.post(f"/transcripts/{transcript.id}/process")
assert response.status_code == 200
assert response.json()["status"] == "ok"
# Whereby recordings should use file pipeline
# Whereby recordings should use file pipeline, not Hatchet
mock_file_pipeline.delay.assert_called_once_with(transcript_id=transcript.id)
mock_multitrack_pipeline.delay.assert_not_called()
mock_hatchet.start_workflow.assert_not_called()
@pytest.mark.usefixtures("setup_database")
@@ -177,8 +177,6 @@ async def test_dailyco_recording_uses_multitrack_pipeline(client):
recording_trigger="automatic-2nd-participant",
is_shared=False,
)
# Force Celery backend for test
await rooms_controller.update(room, {"use_celery": True})
transcript = await transcripts_controller.add(
"",
@@ -213,18 +211,23 @@ async def test_dailyco_recording_uses_multitrack_pipeline(client):
"reflector.services.transcript_process.task_pipeline_file_process"
) as mock_file_pipeline,
patch(
"reflector.services.transcript_process.task_pipeline_multitrack_process"
) as mock_multitrack_pipeline,
"reflector.services.transcript_process.HatchetClientManager"
) as mock_hatchet,
):
mock_hatchet.start_workflow = AsyncMock(return_value="test-workflow-id")
response = await client.post(f"/transcripts/{transcript.id}/process")
assert response.status_code == 200
assert response.json()["status"] == "ok"
# Daily.co multitrack recordings should use multitrack pipeline
mock_multitrack_pipeline.delay.assert_called_once_with(
transcript_id=transcript.id,
bucket_name="daily-bucket",
track_keys=track_keys,
)
# Daily.co multitrack recordings should use Hatchet workflow
mock_hatchet.start_workflow.assert_called_once()
call_kwargs = mock_hatchet.start_workflow.call_args.kwargs
assert call_kwargs["workflow_name"] == "DiarizationPipeline"
assert call_kwargs["input_data"]["transcript_id"] == transcript.id
assert call_kwargs["input_data"]["bucket_name"] == "daily-bucket"
assert call_kwargs["input_data"]["tracks"] == [
{"s3_key": k} for k in track_keys
]
mock_file_pipeline.delay.assert_not_called()

View File

@@ -115,9 +115,7 @@ def appserver(tmpdir, setup_database, celery_session_app, celery_session_worker)
settings.DATA_DIR = DATA_DIR
@pytest.fixture(scope="session")
def celery_includes():
return ["reflector.pipelines.main_live_pipeline"]
# Using celery_includes from conftest.py which includes both pipelines
@pytest.mark.usefixtures("setup_database")

View File

@@ -56,7 +56,12 @@ def appserver_ws_user(setup_database):
if server_instance:
server_instance.should_exit = True
server_thread.join(timeout=30)
server_thread.join(timeout=2.0)
# Reset global singleton for test isolation
from reflector.ws_manager import reset_ws_manager
reset_ws_manager()
@pytest.fixture(autouse=True)
@@ -133,6 +138,8 @@ async def test_user_ws_accepts_valid_token_and_receives_events(appserver_ws_user
# Connect and then trigger an event via HTTP create
async with aconnect_ws(base_ws, subprotocols=subprotocols) as ws:
await asyncio.sleep(0.2)
# Emit an event to the user's room via a standard HTTP action
from httpx import AsyncClient
@@ -150,6 +157,7 @@ async def test_user_ws_accepts_valid_token_and_receives_events(appserver_ws_user
"email": "user-abc@example.com",
}
# Use in-memory client (global singleton makes it share ws_manager)
async with AsyncClient(app=app, base_url=f"http://{host}:{port}/v1") as ac:
# Create a transcript as this user so that the server publishes TRANSCRIPT_CREATED to user room
resp = await ac.post("/transcripts", json={"name": "WS Test"})

View File

@@ -0,0 +1,217 @@
import "@testing-library/jest-dom";
// --- Module mocks (hoisted before imports) ---
jest.mock("../apiClient", () => ({
client: {
GET: jest.fn(),
POST: jest.fn(),
PUT: jest.fn(),
PATCH: jest.fn(),
DELETE: jest.fn(),
use: jest.fn(),
},
$api: {
useQuery: jest.fn(),
useMutation: jest.fn(),
queryOptions: (method: string, path: string, init?: unknown) =>
init === undefined
? { queryKey: [method, path] }
: { queryKey: [method, path, init] },
},
API_URL: "http://test",
WEBSOCKET_URL: "ws://test",
configureApiAuth: jest.fn(),
}));
jest.mock("../AuthProvider", () => ({
useAuth: () => ({
status: "authenticated" as const,
accessToken: "test-token",
accessTokenExpires: Date.now() + 3600000,
user: { id: "user1", name: "Test User" },
update: jest.fn(),
signIn: jest.fn(),
signOut: jest.fn(),
lastUserId: "user1",
}),
}));
// Recreate the batcher with a 0ms window. setTimeout(fn, 0) defers to the next
// macrotask boundary — after all synchronous React rendering completes. All
// useQuery queryFns fire within the same macrotask, so they all queue into one
// batch before the timer fires. This is deterministic and avoids fake timers.
jest.mock("../meetingStatusBatcher", () => {
const actual = jest.requireActual("../meetingStatusBatcher");
return {
...actual,
meetingStatusBatcher: actual.createMeetingStatusBatcher(0),
};
});
// --- Imports (after mocks) ---
import React from "react";
import { render, waitFor, screen } from "@testing-library/react";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import { useRoomActiveMeetings, useRoomUpcomingMeetings } from "../apiHooks";
import { client } from "../apiClient";
import { ErrorProvider } from "../../(errors)/errorContext";
const mockClient = client as { POST: jest.Mock };
// --- Helpers ---
function mockBulkStatusEndpoint(
roomData?: Record<
string,
{ active_meetings: unknown[]; upcoming_events: unknown[] }
>,
) {
mockClient.POST.mockImplementation(
async (_path: string, options: { body: { room_names: string[] } }) => {
const roomNames: string[] = options.body.room_names;
const src = roomData ?? {};
const data = Object.fromEntries(
roomNames.map((name) => [
name,
src[name] ?? { active_meetings: [], upcoming_events: [] },
]),
);
return { data, error: undefined, response: {} };
},
);
}
// --- Test component: renders N room cards, each using both hooks ---
function RoomCard({ roomName }: { roomName: string }) {
const active = useRoomActiveMeetings(roomName);
const upcoming = useRoomUpcomingMeetings(roomName);
if (active.isLoading || upcoming.isLoading) {
return <div data-testid={`room-${roomName}`}>loading</div>;
}
return (
<div data-testid={`room-${roomName}`}>
{active.data?.length ?? 0} active, {upcoming.data?.length ?? 0} upcoming
</div>
);
}
function RoomList({ roomNames }: { roomNames: string[] }) {
return (
<>
{roomNames.map((name) => (
<RoomCard key={name} roomName={name} />
))}
</>
);
}
function createWrapper() {
const queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
return function Wrapper({ children }: { children: React.ReactNode }) {
return (
<QueryClientProvider client={queryClient}>
<ErrorProvider>{children}</ErrorProvider>
</QueryClientProvider>
);
};
}
// --- Tests ---
describe("meeting status batcher integration", () => {
afterEach(() => jest.clearAllMocks());
it("batches multiple room queries into a single POST request", async () => {
const rooms = Array.from({ length: 10 }, (_, i) => `room-${i}`);
mockBulkStatusEndpoint();
render(<RoomList roomNames={rooms} />, { wrapper: createWrapper() });
await waitFor(() => {
for (const name of rooms) {
expect(screen.getByTestId(`room-${name}`)).toHaveTextContent(
"0 active, 0 upcoming",
);
}
});
const postCalls = mockClient.POST.mock.calls.filter(
([path]: [string]) => path === "/v1/rooms/meetings/bulk-status",
);
// Without batching this would be 20 calls (2 hooks x 10 rooms).
expect(postCalls).toHaveLength(1);
// The single call should contain all 10 rooms (deduplicated)
const requestedRooms: string[] = postCalls[0][1].body.room_names;
for (const name of rooms) {
expect(requestedRooms).toContain(name);
}
});
it("batcher fetcher returns room-specific data", async () => {
const {
meetingStatusBatcher: batcher,
} = require("../meetingStatusBatcher");
mockBulkStatusEndpoint({
"room-a": {
active_meetings: [{ id: "m1", room_name: "room-a" }],
upcoming_events: [],
},
"room-b": {
active_meetings: [],
upcoming_events: [{ id: "e1", title: "Standup" }],
},
});
const [resultA, resultB] = await Promise.all([
batcher.fetch("room-a"),
batcher.fetch("room-b"),
]);
expect(mockClient.POST).toHaveBeenCalledTimes(1);
expect(resultA.active_meetings).toEqual([
{ id: "m1", room_name: "room-a" },
]);
expect(resultA.upcoming_events).toEqual([]);
expect(resultB.active_meetings).toEqual([]);
expect(resultB.upcoming_events).toEqual([{ id: "e1", title: "Standup" }]);
});
it("renders room-specific meeting data through hooks", async () => {
mockBulkStatusEndpoint({
"room-a": {
active_meetings: [{ id: "m1", room_name: "room-a" }],
upcoming_events: [],
},
"room-b": {
active_meetings: [],
upcoming_events: [{ id: "e1", title: "Standup" }],
},
});
render(<RoomList roomNames={["room-a", "room-b"]} />, {
wrapper: createWrapper(),
});
await waitFor(() => {
expect(screen.getByTestId("room-room-a")).toHaveTextContent(
"1 active, 0 upcoming",
);
expect(screen.getByTestId("room-room-b")).toHaveTextContent(
"0 active, 1 upcoming",
);
});
});
});

View File

@@ -2,9 +2,10 @@
import { $api } from "./apiClient";
import { useError } from "../(errors)/errorContext";
import { QueryClient, useQueryClient } from "@tanstack/react-query";
import { QueryClient, useQuery, useQueryClient } from "@tanstack/react-query";
import type { components } from "../reflector-api";
import { useAuth } from "./AuthProvider";
import { meetingStatusBatcher } from "./meetingStatusBatcher";
import { MeetingId } from "./types";
import { NonEmptyString } from "./utils";
@@ -697,15 +698,7 @@ export function useRoomsCreateMeeting() {
queryKey: $api.queryOptions("get", "/v1/rooms").queryKey,
}),
queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
{
params: {
path: { room_name: roomName },
},
},
).queryKey,
queryKey: meetingStatusKeys.active(roomName),
}),
]);
},
@@ -734,42 +727,39 @@ export function useRoomGetByName(roomName: string | null) {
export function useRoomUpcomingMeetings(roomName: string | null) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/rooms/{room_name}/meetings/upcoming" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_UPCOMING_PATH_PARTIAL}`,
{
params: {
path: { room_name: roomName! },
return useQuery({
queryKey: meetingStatusKeys.upcoming(roomName!),
queryFn: async () => {
const result = await meetingStatusBatcher.fetch(roomName!);
return result.upcoming_events;
},
},
{
enabled: !!roomName && isAuthenticated,
},
);
});
}
const MEETINGS_PATH_PARTIAL = "meetings" as const;
const MEETINGS_ACTIVE_PATH_PARTIAL = `${MEETINGS_PATH_PARTIAL}/active` as const;
const MEETINGS_UPCOMING_PATH_PARTIAL =
`${MEETINGS_PATH_PARTIAL}/upcoming` as const;
const MEETING_LIST_PATH_PARTIALS = [
MEETINGS_ACTIVE_PATH_PARTIAL,
MEETINGS_UPCOMING_PATH_PARTIAL,
];
// Query keys reuse $api.queryOptions so cache identity matches the original
// per-room GET endpoints. The actual fetch goes through the batcher, but the
// keys stay consistent with the rest of the codebase.
const meetingStatusKeys = {
active: (roomName: string) =>
$api.queryOptions("get", "/v1/rooms/{room_name}/meetings/active", {
params: { path: { room_name: roomName } },
}).queryKey,
upcoming: (roomName: string) =>
$api.queryOptions("get", "/v1/rooms/{room_name}/meetings/upcoming", {
params: { path: { room_name: roomName } },
}).queryKey,
};
export function useRoomActiveMeetings(roomName: string | null) {
return $api.useQuery(
"get",
"/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
{
params: {
path: { room_name: roomName! },
return useQuery({
queryKey: meetingStatusKeys.active(roomName!),
queryFn: async () => {
const result = await meetingStatusBatcher.fetch(roomName!);
return result.active_meetings;
},
},
{
enabled: !!roomName,
},
);
});
}
export function useRoomGetMeeting(

View File

@@ -0,0 +1,37 @@
import { create, keyResolver, windowScheduler } from "@yornaath/batshit";
import { client } from "./apiClient";
import type { components } from "../reflector-api";
type MeetingStatusResult = {
roomName: string;
active_meetings: components["schemas"]["Meeting"][];
upcoming_events: components["schemas"]["CalendarEventResponse"][];
};
const BATCH_WINDOW_MS = 10;
export function createMeetingStatusBatcher(windowMs: number = BATCH_WINDOW_MS) {
return create({
fetcher: async (roomNames: string[]): Promise<MeetingStatusResult[]> => {
const unique = [...new Set(roomNames)];
const { data, error } = await client.POST(
"/v1/rooms/meetings/bulk-status",
{ body: { room_names: unique } },
);
if (error || !data) {
throw new Error(
`bulk-status fetch failed: ${JSON.stringify(error ?? "no data")}`,
);
}
return roomNames.map((name) => ({
roomName: name,
active_meetings: data[name]?.active_meetings ?? [],
upcoming_events: data[name]?.upcoming_events ?? [],
}));
},
resolver: keyResolver("roomName"),
scheduler: windowScheduler(windowMs),
});
}
export const meetingStatusBatcher = createMeetingStatusBatcher();

View File

@@ -118,6 +118,23 @@ export interface paths {
patch?: never;
trace?: never;
};
"/v1/rooms/meetings/bulk-status": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
get?: never;
put?: never;
/** Rooms Bulk Meeting Status */
post: operations["v1_rooms_bulk_meeting_status"];
delete?: never;
options?: never;
head?: never;
patch?: never;
trace?: never;
};
"/v1/rooms/{room_id}": {
parameters: {
query?: never;
@@ -799,6 +816,11 @@ export interface components {
*/
chunk: string;
};
/** BulkStatusRequest */
BulkStatusRequest: {
/** Room Names */
room_names: string[];
};
/** CalendarEventResponse */
CalendarEventResponse: {
/** Id */
@@ -1735,6 +1757,13 @@ export interface components {
/** Webhook Secret */
webhook_secret: string | null;
};
/** RoomMeetingStatus */
RoomMeetingStatus: {
/** Active Meetings */
active_meetings: components["schemas"]["Meeting"][];
/** Upcoming Events */
upcoming_events: components["schemas"]["CalendarEventResponse"][];
};
/** RtcOffer */
RtcOffer: {
/** Sdp */
@@ -2272,6 +2301,41 @@ export interface operations {
};
};
};
v1_rooms_bulk_meeting_status: {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
requestBody: {
content: {
"application/json": components["schemas"]["BulkStatusRequest"];
};
};
responses: {
/** @description Successful Response */
200: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": {
[key: string]: components["schemas"]["RoomMeetingStatus"];
};
};
};
/** @description Validation Error */
422: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["HTTPValidationError"];
};
};
};
};
v1_rooms_get: {
parameters: {
query?: never;

View File

@@ -1,8 +1,22 @@
module.exports = {
preset: "ts-jest",
testEnvironment: "node",
testEnvironment: "jest-environment-jsdom",
roots: ["<rootDir>/app"],
testMatch: ["**/__tests__/**/*.test.ts"],
collectCoverage: true,
collectCoverageFrom: ["app/**/*.ts", "!app/**/*.d.ts"],
testMatch: ["**/__tests__/**/*.test.ts", "**/__tests__/**/*.test.tsx"],
collectCoverage: false,
transform: {
"^.+\\.[jt]sx?$": [
"ts-jest",
{
tsconfig: {
jsx: "react-jsx",
module: "esnext",
moduleResolution: "bundler",
esModuleInterop: true,
strict: true,
downlevelIteration: true,
lib: ["dom", "dom.iterable", "esnext"],
},
},
],
},
};

View File

@@ -23,6 +23,7 @@
"@tanstack/react-query": "^5.85.9",
"@types/ioredis": "^5.0.0",
"@whereby.com/browser-sdk": "^3.3.4",
"@yornaath/batshit": "^0.14.0",
"autoprefixer": "10.4.20",
"axios": "^1.8.2",
"eslint": "^9.33.0",
@@ -61,9 +62,13 @@
"author": "Andreas <andreas@monadical.com>",
"license": "All Rights Reserved",
"devDependencies": {
"@testing-library/dom": "^10.4.1",
"@testing-library/jest-dom": "^6.9.1",
"@testing-library/react": "^16.3.2",
"@types/jest": "^30.0.0",
"@types/react": "18.2.20",
"jest": "^30.1.3",
"jest-environment-jsdom": "^30.2.0",
"openapi-typescript": "^7.9.1",
"prettier": "^3.0.0",
"ts-jest": "^29.4.1"

808
www/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff