mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2026-02-06 18:56:48 +00:00
Compare commits
8 Commits
fix-presen
...
fix-room-q
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cdc495499b | ||
| ae44f5227b | |||
|
|
4339ffffcf | ||
|
|
9dc6c20ef8 | ||
|
|
931c344ddf | ||
| 15ab2e306e | |||
|
|
129290517e | ||
|
|
7e072219bf |
12
.github/workflows/test_next_server.yml
vendored
12
.github/workflows/test_next_server.yml
vendored
@@ -13,6 +13,9 @@ on:
|
||||
jobs:
|
||||
test-next-server:
|
||||
runs-on: ubuntu-latest
|
||||
concurrency:
|
||||
group: test-next-server-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -21,17 +24,12 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 8
|
||||
package_json_file: './www/package.json'
|
||||
|
||||
- name: Setup Node.js cache
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
"""drop_use_celery_column
|
||||
|
||||
Revision ID: 3aa20b96d963
|
||||
Revises: e69f08ead8ea
|
||||
Create Date: 2026-02-05 10:12:44.065279
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "3aa20b96d963"
|
||||
down_revision: Union[str, None] = "e69f08ead8ea"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.drop_column("use_celery")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"use_celery",
|
||||
sa.Boolean(),
|
||||
server_default=sa.text("false"),
|
||||
nullable=False,
|
||||
)
|
||||
)
|
||||
@@ -104,6 +104,26 @@ class CalendarEventController:
|
||||
results = await get_database().fetch_all(query)
|
||||
return [CalendarEvent(**result) for result in results]
|
||||
|
||||
async def get_upcoming_for_rooms(
|
||||
self, room_ids: list[str], minutes_ahead: int = 120
|
||||
) -> list[CalendarEvent]:
|
||||
now = datetime.now(timezone.utc)
|
||||
future_time = now + timedelta(minutes=minutes_ahead)
|
||||
query = (
|
||||
calendar_events.select()
|
||||
.where(
|
||||
sa.and_(
|
||||
calendar_events.c.room_id.in_(room_ids),
|
||||
calendar_events.c.is_deleted == False,
|
||||
calendar_events.c.start_time <= future_time,
|
||||
calendar_events.c.end_time >= now,
|
||||
)
|
||||
)
|
||||
.order_by(calendar_events.c.start_time.asc())
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [CalendarEvent(**result) for result in results]
|
||||
|
||||
async def get_by_id(self, event_id: str) -> CalendarEvent | None:
|
||||
query = calendar_events.select().where(calendar_events.c.id == event_id)
|
||||
result = await get_database().fetch_one(query)
|
||||
|
||||
@@ -301,6 +301,23 @@ class MeetingController:
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Meeting(**result) for result in results]
|
||||
|
||||
async def get_all_active_for_rooms(
|
||||
self, room_ids: list[str], current_time: datetime
|
||||
) -> list[Meeting]:
|
||||
query = (
|
||||
meetings.select()
|
||||
.where(
|
||||
sa.and_(
|
||||
meetings.c.room_id.in_(room_ids),
|
||||
meetings.c.end_date > current_time,
|
||||
meetings.c.is_active,
|
||||
)
|
||||
)
|
||||
.order_by(meetings.c.end_date.desc())
|
||||
)
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Meeting(**result) for result in results]
|
||||
|
||||
async def get_active_by_calendar_event(
|
||||
self, room: Room, calendar_event_id: str, current_time: datetime
|
||||
) -> Meeting | None:
|
||||
|
||||
@@ -57,12 +57,6 @@ rooms = sqlalchemy.Table(
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"use_celery",
|
||||
sqlalchemy.Boolean,
|
||||
nullable=False,
|
||||
server_default=false(),
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"skip_consent",
|
||||
sqlalchemy.Boolean,
|
||||
@@ -97,7 +91,6 @@ class Room(BaseModel):
|
||||
ics_last_sync: datetime | None = None
|
||||
ics_last_etag: str | None = None
|
||||
platform: Platform = Field(default_factory=lambda: settings.DEFAULT_VIDEO_PLATFORM)
|
||||
use_celery: bool = False
|
||||
skip_consent: bool = False
|
||||
|
||||
|
||||
@@ -245,6 +238,11 @@ class RoomController:
|
||||
|
||||
return room
|
||||
|
||||
async def get_by_names(self, names: list[str]) -> list[Room]:
|
||||
query = rooms.select().where(rooms.c.name.in_(names))
|
||||
results = await get_database().fetch_all(query)
|
||||
return [Room(**r) for r in results]
|
||||
|
||||
async def get_ics_enabled(self) -> list[Room]:
|
||||
query = rooms.select().where(
|
||||
rooms.c.ics_enabled == True, rooms.c.ics_url != None
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
"""Presence tracking for meetings."""
|
||||
|
||||
from reflector.presence.pending_joins import (
|
||||
PENDING_JOIN_PREFIX,
|
||||
PENDING_JOIN_TTL,
|
||||
create_pending_join,
|
||||
delete_pending_join,
|
||||
has_pending_joins,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"PENDING_JOIN_PREFIX",
|
||||
"PENDING_JOIN_TTL",
|
||||
"create_pending_join",
|
||||
"delete_pending_join",
|
||||
"has_pending_joins",
|
||||
]
|
||||
@@ -1,59 +0,0 @@
|
||||
"""Track pending join intents in Redis.
|
||||
|
||||
When a user signals intent to join a meeting (before WebRTC handshake completes),
|
||||
we store a pending join record. This prevents the meeting from being deactivated
|
||||
while users are still connecting.
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
from redis.asyncio import Redis
|
||||
|
||||
from reflector.logger import logger
|
||||
|
||||
PENDING_JOIN_TTL = 30 # seconds
|
||||
PENDING_JOIN_PREFIX = "pending_join"
|
||||
# Max keys to scan per Redis SCAN iteration
|
||||
SCAN_BATCH_SIZE = 100
|
||||
|
||||
|
||||
async def create_pending_join(redis: Redis, meeting_id: str, user_id: str) -> None:
|
||||
"""Create a pending join record. Called before WebRTC handshake."""
|
||||
key = f"{PENDING_JOIN_PREFIX}:{meeting_id}:{user_id}"
|
||||
log = logger.bind(meeting_id=meeting_id, user_id=user_id, key=key)
|
||||
await redis.setex(key, PENDING_JOIN_TTL, str(time.time()))
|
||||
log.debug("Created pending join")
|
||||
|
||||
|
||||
async def delete_pending_join(redis: Redis, meeting_id: str, user_id: str) -> None:
|
||||
"""Delete pending join. Called after WebRTC connection established."""
|
||||
key = f"{PENDING_JOIN_PREFIX}:{meeting_id}:{user_id}"
|
||||
log = logger.bind(meeting_id=meeting_id, user_id=user_id, key=key)
|
||||
await redis.delete(key)
|
||||
log.debug("Deleted pending join")
|
||||
|
||||
|
||||
async def has_pending_joins(redis: Redis, meeting_id: str) -> bool:
|
||||
"""Check if meeting has any pending joins.
|
||||
|
||||
Uses Redis SCAN to iterate through all keys matching the pattern.
|
||||
Properly iterates until cursor returns 0 to ensure all keys are checked.
|
||||
"""
|
||||
pattern = f"{PENDING_JOIN_PREFIX}:{meeting_id}:*"
|
||||
log = logger.bind(meeting_id=meeting_id, pattern=pattern)
|
||||
|
||||
cursor = 0
|
||||
iterations = 0
|
||||
while True:
|
||||
cursor, keys = await redis.scan(
|
||||
cursor=cursor, match=pattern, count=SCAN_BATCH_SIZE
|
||||
)
|
||||
iterations += 1
|
||||
if keys:
|
||||
log.debug("Found pending joins", count=len(keys), iterations=iterations)
|
||||
return True
|
||||
if cursor == 0:
|
||||
break
|
||||
|
||||
log.debug("No pending joins found", iterations=iterations)
|
||||
return False
|
||||
@@ -15,14 +15,10 @@ from hatchet_sdk.clients.rest.exceptions import ApiException, NotFoundException
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import Transcript, transcripts_controller
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.pipelines.main_multitrack_pipeline import (
|
||||
task_pipeline_multitrack_process,
|
||||
)
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
@@ -181,124 +177,98 @@ async def dispatch_transcript_processing(
|
||||
Returns AsyncResult for Celery tasks, None for Hatchet workflows.
|
||||
"""
|
||||
if isinstance(config, MultitrackProcessingConfig):
|
||||
use_celery = False
|
||||
if config.room_id:
|
||||
room = await rooms_controller.get_by_id(config.room_id)
|
||||
use_celery = room.use_celery if room else False
|
||||
|
||||
use_hatchet = not use_celery
|
||||
|
||||
if use_celery:
|
||||
logger.info(
|
||||
"Room uses legacy Celery processing",
|
||||
room_id=config.room_id,
|
||||
transcript_id=config.transcript_id,
|
||||
# Multitrack processing always uses Hatchet (no Celery fallback)
|
||||
# First check if we can replay (outside transaction since it's read-only)
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id and not force:
|
||||
can_replay = await HatchetClientManager.can_replay(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
|
||||
if use_hatchet:
|
||||
# First check if we can replay (outside transaction since it's read-only)
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id and not force:
|
||||
can_replay = await HatchetClientManager.can_replay(
|
||||
transcript.workflow_run_id
|
||||
if can_replay:
|
||||
await HatchetClientManager.replay_workflow(transcript.workflow_run_id)
|
||||
logger.info(
|
||||
"Replaying Hatchet workflow",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
if can_replay:
|
||||
await HatchetClientManager.replay_workflow(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
logger.info(
|
||||
"Replaying Hatchet workflow",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
return None
|
||||
else:
|
||||
# Workflow can't replay (CANCELLED, COMPLETED, or 404 deleted)
|
||||
# Log and proceed to start new workflow
|
||||
try:
|
||||
status = await HatchetClientManager.get_workflow_run_status(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
logger.info(
|
||||
"Old workflow not replayable, starting new",
|
||||
old_workflow_id=transcript.workflow_run_id,
|
||||
old_status=status.value,
|
||||
)
|
||||
except NotFoundException:
|
||||
# Workflow deleted from Hatchet but ID still in DB
|
||||
logger.info(
|
||||
"Old workflow not found in Hatchet, starting new",
|
||||
old_workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
|
||||
# Force: cancel old workflow if exists
|
||||
if force and transcript and transcript.workflow_run_id:
|
||||
try:
|
||||
await HatchetClientManager.cancel_workflow(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
logger.info(
|
||||
"Cancelled old workflow (--force)",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
except NotFoundException:
|
||||
logger.info(
|
||||
"Old workflow already deleted (--force)",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": None}
|
||||
)
|
||||
|
||||
# Re-fetch and check for concurrent dispatch (optimistic approach).
|
||||
# No database lock - worst case is duplicate dispatch, but Hatchet
|
||||
# workflows are idempotent so this is acceptable.
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id:
|
||||
# Another process started a workflow between validation and now
|
||||
return None
|
||||
else:
|
||||
# Workflow can't replay (CANCELLED, COMPLETED, or 404 deleted)
|
||||
# Log and proceed to start new workflow
|
||||
try:
|
||||
status = await HatchetClientManager.get_workflow_run_status(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
if status in (V1TaskStatus.RUNNING, V1TaskStatus.QUEUED):
|
||||
logger.info(
|
||||
"Concurrent workflow detected, skipping dispatch",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
return None
|
||||
except ApiException:
|
||||
# Workflow might be gone (404) or API issue - proceed with new workflow
|
||||
pass
|
||||
logger.info(
|
||||
"Old workflow not replayable, starting new",
|
||||
old_workflow_id=transcript.workflow_run_id,
|
||||
old_status=status.value,
|
||||
)
|
||||
except NotFoundException:
|
||||
# Workflow deleted from Hatchet but ID still in DB
|
||||
logger.info(
|
||||
"Old workflow not found in Hatchet, starting new",
|
||||
old_workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": config.recording_id,
|
||||
"tracks": [{"s3_key": k} for k in config.track_keys],
|
||||
"bucket_name": config.bucket_name,
|
||||
"transcript_id": config.transcript_id,
|
||||
"room_id": config.room_id,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": config.transcript_id,
|
||||
"recording_id": config.recording_id,
|
||||
"daily_recording_id": config.recording_id,
|
||||
},
|
||||
# Force: cancel old workflow if exists
|
||||
if force and transcript and transcript.workflow_run_id:
|
||||
try:
|
||||
await HatchetClientManager.cancel_workflow(transcript.workflow_run_id)
|
||||
logger.info(
|
||||
"Cancelled old workflow (--force)",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
except NotFoundException:
|
||||
logger.info(
|
||||
"Old workflow already deleted (--force)",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
await transcripts_controller.update(transcript, {"workflow_run_id": None})
|
||||
|
||||
# Re-fetch and check for concurrent dispatch (optimistic approach).
|
||||
# No database lock - worst case is duplicate dispatch, but Hatchet
|
||||
# workflows are idempotent so this is acceptable.
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id:
|
||||
# Another process started a workflow between validation and now
|
||||
try:
|
||||
status = await HatchetClientManager.get_workflow_run_status(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
if status in (V1TaskStatus.RUNNING, V1TaskStatus.QUEUED):
|
||||
logger.info(
|
||||
"Concurrent workflow detected, skipping dispatch",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
return None
|
||||
except ApiException:
|
||||
# Workflow might be gone (404) or API issue - proceed with new workflow
|
||||
pass
|
||||
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": config.recording_id,
|
||||
"tracks": [{"s3_key": k} for k in config.track_keys],
|
||||
"bucket_name": config.bucket_name,
|
||||
"transcript_id": config.transcript_id,
|
||||
"room_id": config.room_id,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": config.transcript_id,
|
||||
"recording_id": config.recording_id,
|
||||
"daily_recording_id": config.recording_id,
|
||||
},
|
||||
)
|
||||
|
||||
if transcript:
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
|
||||
if transcript:
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
logger.info("Hatchet workflow dispatched", workflow_id=workflow_id)
|
||||
return None
|
||||
|
||||
logger.info("Hatchet workflow dispatched", workflow_id=workflow_id)
|
||||
return None
|
||||
|
||||
# Celery pipeline (durable workflows disabled)
|
||||
return task_pipeline_multitrack_process.delay(
|
||||
transcript_id=config.transcript_id,
|
||||
bucket_name=config.bucket_name,
|
||||
track_keys=config.track_keys,
|
||||
)
|
||||
elif isinstance(config, FileProcessingConfig):
|
||||
return task_pipeline_file_process.delay(transcript_id=config.transcript_id)
|
||||
else:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from pydantic.types import PositiveInt
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform
|
||||
from reflector.schemas.platform import DAILY_PLATFORM, Platform
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
@@ -155,7 +155,7 @@ class Settings(BaseSettings):
|
||||
None # Webhook UUID for this environment. Not used by production code
|
||||
)
|
||||
# Platform Configuration
|
||||
DEFAULT_VIDEO_PLATFORM: Platform = WHEREBY_PLATFORM
|
||||
DEFAULT_VIDEO_PLATFORM: Platform = DAILY_PLATFORM
|
||||
|
||||
# Zulip integration
|
||||
ZULIP_REALM: str | None = None
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from enum import Enum
|
||||
from typing import Annotated, Any, Literal, Optional
|
||||
@@ -5,26 +8,25 @@ from typing import Annotated, Any, Literal, Optional
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from fastapi_pagination import Page
|
||||
from fastapi_pagination.ext.databases import apaginate
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
from redis.exceptions import LockError
|
||||
|
||||
import reflector.auth as auth
|
||||
from reflector.db import get_database
|
||||
from reflector.db.calendar_events import calendar_events_controller
|
||||
from reflector.db.meetings import meetings_controller
|
||||
from reflector.db.rooms import Room as DbRoom
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.logger import logger
|
||||
from reflector.presence.pending_joins import create_pending_join, delete_pending_join
|
||||
from reflector.redis_cache import RedisAsyncLock, get_async_redis_client
|
||||
from reflector.redis_cache import RedisAsyncLock
|
||||
from reflector.schemas.platform import Platform
|
||||
from reflector.services.ics_sync import ics_sync_service
|
||||
from reflector.settings import settings
|
||||
from reflector.utils.string import NonEmptyString
|
||||
from reflector.utils.url import add_query_param
|
||||
from reflector.video_platforms.factory import create_platform_client
|
||||
from reflector.worker.process import poll_daily_room_presence_task
|
||||
from reflector.worker.webhook import test_webhook
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Room(BaseModel):
|
||||
id: str
|
||||
@@ -196,6 +198,73 @@ async def rooms_list(
|
||||
return paginated
|
||||
|
||||
|
||||
class BulkStatusRequest(BaseModel):
|
||||
room_names: list[str] = Field(max_length=100)
|
||||
|
||||
|
||||
class RoomMeetingStatus(BaseModel):
|
||||
active_meetings: list[Meeting]
|
||||
upcoming_events: list[CalendarEventResponse]
|
||||
|
||||
|
||||
@router.post("/rooms/meetings/bulk-status", response_model=dict[str, RoomMeetingStatus])
|
||||
async def rooms_bulk_meeting_status(
|
||||
request: BulkStatusRequest,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
):
|
||||
user_id = user["sub"] if user else None
|
||||
|
||||
all_rooms = await rooms_controller.get_by_names(request.room_names)
|
||||
# Filter to rooms the user can see (owned or shared), matching rooms_list behavior
|
||||
rooms = [
|
||||
r
|
||||
for r in all_rooms
|
||||
if r.is_shared or (user_id is not None and r.user_id == user_id)
|
||||
]
|
||||
room_by_id: dict[str, DbRoom] = {r.id: r for r in rooms}
|
||||
room_ids = list(room_by_id.keys())
|
||||
|
||||
current_time = datetime.now(timezone.utc)
|
||||
active_meetings, upcoming_events = await asyncio.gather(
|
||||
meetings_controller.get_all_active_for_rooms(room_ids, current_time),
|
||||
calendar_events_controller.get_upcoming_for_rooms(room_ids),
|
||||
)
|
||||
|
||||
# Group by room name
|
||||
active_by_room: dict[str, list[Meeting]] = defaultdict(list)
|
||||
for m in active_meetings:
|
||||
room = room_by_id.get(m.room_id)
|
||||
if not room:
|
||||
continue
|
||||
m.platform = room.platform
|
||||
if user_id != room.user_id and m.platform == "whereby":
|
||||
m.host_room_url = ""
|
||||
active_by_room[room.name].append(
|
||||
Meeting.model_validate(m, from_attributes=True)
|
||||
)
|
||||
|
||||
upcoming_by_room: dict[str, list[CalendarEventResponse]] = defaultdict(list)
|
||||
for e in upcoming_events:
|
||||
room = room_by_id.get(e.room_id)
|
||||
if not room:
|
||||
continue
|
||||
if user_id != room.user_id:
|
||||
e.description = None
|
||||
e.attendees = None
|
||||
upcoming_by_room[room.name].append(
|
||||
CalendarEventResponse.model_validate(e, from_attributes=True)
|
||||
)
|
||||
|
||||
result: dict[str, RoomMeetingStatus] = {}
|
||||
for name in request.room_names:
|
||||
result[name] = RoomMeetingStatus(
|
||||
active_meetings=active_by_room.get(name, []),
|
||||
upcoming_events=upcoming_by_room.get(name, []),
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/rooms/{room_id}", response_model=RoomDetails)
|
||||
async def rooms_get(
|
||||
room_id: str,
|
||||
@@ -598,158 +667,3 @@ async def rooms_join_meeting(
|
||||
meeting.room_url = add_query_param(meeting.room_url, "t", token)
|
||||
|
||||
return meeting
|
||||
|
||||
|
||||
class JoiningRequest(BaseModel):
|
||||
"""Request body for /joining endpoint (before WebRTC handshake)."""
|
||||
|
||||
connection_id: NonEmptyString
|
||||
"""Unique identifier for this connection. Generated by client via crypto.randomUUID()."""
|
||||
|
||||
|
||||
class JoiningResponse(BaseModel):
|
||||
status: Literal["ok"]
|
||||
|
||||
|
||||
class JoinedRequest(BaseModel):
|
||||
"""Request body for /joined endpoint (after WebRTC connection established)."""
|
||||
|
||||
connection_id: NonEmptyString
|
||||
"""Must match the connection_id sent to /joining."""
|
||||
|
||||
|
||||
class JoinedResponse(BaseModel):
|
||||
status: Literal["ok"]
|
||||
|
||||
|
||||
def _get_pending_join_key(
|
||||
user: Optional[auth.UserInfo], connection_id: NonEmptyString
|
||||
) -> str:
|
||||
"""Get a unique key for pending join tracking.
|
||||
|
||||
Uses user ID for authenticated users, connection_id for anonymous users.
|
||||
This ensures each browser tab has its own unique pending join record.
|
||||
"""
|
||||
if user:
|
||||
return f"{user['sub']}:{connection_id}"
|
||||
return f"anon:{connection_id}"
|
||||
|
||||
|
||||
@router.post(
|
||||
"/rooms/{room_name}/meetings/{meeting_id}/joining", response_model=JoiningResponse
|
||||
)
|
||||
async def meeting_joining(
|
||||
room_name: str,
|
||||
meeting_id: str,
|
||||
body: JoiningRequest,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
) -> JoiningResponse:
|
||||
"""Signal intent to join meeting. Called before WebRTC handshake starts.
|
||||
|
||||
This creates a pending join record that prevents the meeting from being
|
||||
deactivated while the WebRTC handshake is in progress. The record expires
|
||||
automatically after 30 seconds if the connection is not established.
|
||||
"""
|
||||
log = logger.bind(
|
||||
room_name=room_name, meeting_id=meeting_id, connection_id=body.connection_id
|
||||
)
|
||||
|
||||
room = await rooms_controller.get_by_name(room_name)
|
||||
if not room:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
|
||||
meeting = await meetings_controller.get_by_id(meeting_id, room=room)
|
||||
if not meeting:
|
||||
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||
|
||||
if not meeting.is_active:
|
||||
raise HTTPException(status_code=400, detail="Meeting is not active")
|
||||
|
||||
join_key = _get_pending_join_key(user, body.connection_id)
|
||||
|
||||
redis = await get_async_redis_client()
|
||||
try:
|
||||
await create_pending_join(redis, meeting_id, join_key)
|
||||
log.debug("Created pending join intent", join_key=join_key)
|
||||
finally:
|
||||
await redis.aclose()
|
||||
|
||||
return JoiningResponse(status="ok")
|
||||
|
||||
|
||||
@router.post(
|
||||
"/rooms/{room_name}/meetings/{meeting_id}/joined", response_model=JoinedResponse
|
||||
)
|
||||
async def meeting_joined(
|
||||
room_name: str,
|
||||
meeting_id: str,
|
||||
body: JoinedRequest,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
) -> JoinedResponse:
|
||||
"""Signal that WebRTC connection is established.
|
||||
|
||||
This clears the pending join record, confirming the user has successfully
|
||||
connected to the meeting. Safe to call even if meeting was deactivated
|
||||
during the handshake (idempotent cleanup).
|
||||
"""
|
||||
log = logger.bind(
|
||||
room_name=room_name, meeting_id=meeting_id, connection_id=body.connection_id
|
||||
)
|
||||
|
||||
room = await rooms_controller.get_by_name(room_name)
|
||||
if not room:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
|
||||
meeting = await meetings_controller.get_by_id(meeting_id, room=room)
|
||||
if not meeting:
|
||||
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||
|
||||
# Note: We don't check is_active here - the /joined call is a cleanup operation
|
||||
# and should succeed even if the meeting was deactivated during the handshake
|
||||
|
||||
join_key = _get_pending_join_key(user, body.connection_id)
|
||||
|
||||
redis = await get_async_redis_client()
|
||||
try:
|
||||
await delete_pending_join(redis, meeting_id, join_key)
|
||||
log.debug("Cleared pending join intent", join_key=join_key)
|
||||
finally:
|
||||
await redis.aclose()
|
||||
|
||||
# Trigger presence poll to detect the new participant faster than periodic poll
|
||||
if meeting.platform == "daily":
|
||||
poll_daily_room_presence_task.delay(meeting_id)
|
||||
|
||||
return JoinedResponse(status="ok")
|
||||
|
||||
|
||||
class LeaveResponse(BaseModel):
|
||||
status: Literal["ok"]
|
||||
|
||||
|
||||
@router.post(
|
||||
"/rooms/{room_name}/meetings/{meeting_id}/leave", response_model=LeaveResponse
|
||||
)
|
||||
async def meeting_leave(
|
||||
room_name: str,
|
||||
meeting_id: str,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
) -> LeaveResponse:
|
||||
"""Trigger presence recheck when user leaves meeting.
|
||||
|
||||
Called on tab close/navigation via sendBeacon(). Immediately queues presence
|
||||
poll to detect dirty disconnects faster than 30s periodic poll.
|
||||
Daily.co webhooks handle clean disconnects, but tab close/crash need this.
|
||||
"""
|
||||
room = await rooms_controller.get_by_name(room_name)
|
||||
if not room:
|
||||
raise HTTPException(status_code=404, detail="Room not found")
|
||||
|
||||
meeting = await meetings_controller.get_by_id(meeting_id, room=room)
|
||||
if not meeting:
|
||||
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||
|
||||
if meeting.platform == "daily":
|
||||
poll_daily_room_presence_task.delay(meeting_id)
|
||||
|
||||
return LeaveResponse(status="ok")
|
||||
|
||||
@@ -27,14 +27,10 @@ from reflector.db.transcripts import (
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.pipelines.main_live_pipeline import asynctask
|
||||
from reflector.pipelines.main_multitrack_pipeline import (
|
||||
task_pipeline_multitrack_process,
|
||||
)
|
||||
from reflector.pipelines.topic_processing import EmptyPipeline
|
||||
from reflector.presence.pending_joins import has_pending_joins
|
||||
from reflector.processors import AudioFileWriterProcessor
|
||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||
from reflector.redis_cache import RedisAsyncLock, get_async_redis_client
|
||||
from reflector.redis_cache import RedisAsyncLock
|
||||
from reflector.settings import settings
|
||||
from reflector.storage import get_transcripts_storage
|
||||
from reflector.utils.daily import (
|
||||
@@ -352,49 +348,29 @@ async def _process_multitrack_recording_inner(
|
||||
room_id=room.id,
|
||||
)
|
||||
|
||||
use_celery = room and room.use_celery
|
||||
use_hatchet = not use_celery
|
||||
|
||||
if use_celery:
|
||||
logger.info(
|
||||
"Room uses legacy Celery processing",
|
||||
room_id=room.id,
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
|
||||
if use_hatchet:
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": recording_id,
|
||||
"tracks": [{"s3_key": k} for k in filter_cam_audio_tracks(track_keys)],
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
"recording_id": recording_id,
|
||||
"daily_recording_id": recording_id,
|
||||
},
|
||||
)
|
||||
logger.info(
|
||||
"Started Hatchet workflow",
|
||||
workflow_id=workflow_id,
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
return
|
||||
|
||||
# Celery pipeline (runs when durable workflows disabled)
|
||||
task_pipeline_multitrack_process.delay(
|
||||
transcript_id=transcript.id,
|
||||
bucket_name=bucket_name,
|
||||
track_keys=filter_cam_audio_tracks(track_keys),
|
||||
# Multitrack processing always uses Hatchet (no Celery fallback)
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": recording_id,
|
||||
"tracks": [{"s3_key": k} for k in filter_cam_audio_tracks(track_keys)],
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
"recording_id": recording_id,
|
||||
"daily_recording_id": recording_id,
|
||||
},
|
||||
)
|
||||
logger.info(
|
||||
"Started Hatchet workflow",
|
||||
workflow_id=workflow_id,
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
|
||||
await transcripts_controller.update(transcript, {"workflow_run_id": workflow_id})
|
||||
|
||||
|
||||
@shared_task
|
||||
@@ -870,18 +846,6 @@ async def process_meetings():
|
||||
logger_.debug("Meeting not yet started, keep it")
|
||||
|
||||
if should_deactivate:
|
||||
# Check for pending joins before deactivating
|
||||
# Users might be in the process of connecting via WebRTC
|
||||
redis = await get_async_redis_client()
|
||||
try:
|
||||
if await has_pending_joins(redis, meeting.id):
|
||||
logger_.info(
|
||||
"Meeting has pending joins, skipping deactivation"
|
||||
)
|
||||
continue
|
||||
finally:
|
||||
await redis.aclose()
|
||||
|
||||
await meetings_controller.update_meeting(
|
||||
meeting.id, is_active=False
|
||||
)
|
||||
@@ -1085,66 +1049,43 @@ async def reprocess_failed_daily_recordings():
|
||||
)
|
||||
continue
|
||||
|
||||
use_celery = room and room.use_celery
|
||||
use_hatchet = not use_celery
|
||||
|
||||
if use_hatchet:
|
||||
if not transcript:
|
||||
logger.warning(
|
||||
"No transcript for Hatchet reprocessing, skipping",
|
||||
recording_id=recording.id,
|
||||
)
|
||||
continue
|
||||
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": recording.id,
|
||||
"tracks": [
|
||||
{"s3_key": k}
|
||||
for k in filter_cam_audio_tracks(recording.track_keys)
|
||||
],
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id if room else None,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
"recording_id": recording.id,
|
||||
"reprocess": True,
|
||||
},
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Queued Daily recording for Hatchet reprocessing",
|
||||
# Multitrack reprocessing always uses Hatchet (no Celery fallback)
|
||||
if not transcript:
|
||||
logger.warning(
|
||||
"No transcript for Hatchet reprocessing, skipping",
|
||||
recording_id=recording.id,
|
||||
workflow_id=workflow_id,
|
||||
room_name=meeting.room_name,
|
||||
track_count=len(recording.track_keys),
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"Queueing Daily recording for Celery reprocessing",
|
||||
recording_id=recording.id,
|
||||
room_name=meeting.room_name,
|
||||
track_count=len(recording.track_keys),
|
||||
transcript_status=transcript.status if transcript else None,
|
||||
)
|
||||
continue
|
||||
|
||||
# For reprocessing, pass actual recording time (though it's ignored - see _process_multitrack_recording_inner)
|
||||
# Reprocessing uses recording.meeting_id directly instead of time-based matching
|
||||
recording_start_ts = int(recording.recorded_at.timestamp())
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": recording.id,
|
||||
"tracks": [
|
||||
{"s3_key": k}
|
||||
for k in filter_cam_audio_tracks(recording.track_keys)
|
||||
],
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id if room else None,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
"recording_id": recording.id,
|
||||
"reprocess": True,
|
||||
},
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
|
||||
process_multitrack_recording.delay(
|
||||
bucket_name=bucket_name,
|
||||
daily_room_name=meeting.room_name,
|
||||
recording_id=recording.id,
|
||||
track_keys=recording.track_keys,
|
||||
recording_start_ts=recording_start_ts,
|
||||
)
|
||||
logger.info(
|
||||
"Queued Daily recording for Hatchet reprocessing",
|
||||
recording_id=recording.id,
|
||||
workflow_id=workflow_id,
|
||||
room_name=meeting.room_name,
|
||||
track_count=len(recording.track_keys),
|
||||
)
|
||||
|
||||
reprocessed_count += 1
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM
|
||||
from reflector.schemas.platform import DAILY_PLATFORM, WHEREBY_PLATFORM
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
@@ -14,6 +14,7 @@ def register_mock_platform():
|
||||
from reflector.video_platforms.registry import register_platform
|
||||
|
||||
register_platform(WHEREBY_PLATFORM, MockPlatformClient)
|
||||
register_platform(DAILY_PLATFORM, MockPlatformClient)
|
||||
yield
|
||||
|
||||
|
||||
|
||||
@@ -1,367 +0,0 @@
|
||||
"""Integration tests for /joining and /joined endpoints.
|
||||
|
||||
Tests for the join intent tracking to prevent race conditions during
|
||||
WebRTC handshake when users join meetings.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.db.meetings import Meeting
|
||||
from reflector.presence.pending_joins import PENDING_JOIN_PREFIX
|
||||
|
||||
TEST_CONNECTION_ID = "test-connection-uuid-12345"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_room():
|
||||
"""Mock room object."""
|
||||
from reflector.db.rooms import Room
|
||||
|
||||
return Room(
|
||||
id="room-123",
|
||||
name="test-room",
|
||||
user_id="owner-user",
|
||||
created_at=datetime.now(timezone.utc),
|
||||
zulip_auto_post=False,
|
||||
zulip_stream="",
|
||||
zulip_topic="",
|
||||
is_locked=False,
|
||||
room_mode="normal",
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic",
|
||||
is_shared=True,
|
||||
platform="daily",
|
||||
skip_consent=False,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_meeting():
|
||||
"""Mock meeting object."""
|
||||
now = datetime.now(timezone.utc)
|
||||
return Meeting(
|
||||
id="meeting-456",
|
||||
room_id="room-123",
|
||||
room_name="test-room-20251118120000",
|
||||
room_url="https://daily.co/test-room-20251118120000",
|
||||
host_room_url="https://daily.co/test-room-20251118120000?t=host",
|
||||
platform="daily",
|
||||
num_clients=0,
|
||||
is_active=True,
|
||||
start_date=now,
|
||||
end_date=now + timedelta(hours=1),
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||
@patch("reflector.views.rooms.get_async_redis_client")
|
||||
async def test_joining_endpoint_creates_pending_join(
|
||||
mock_get_redis,
|
||||
mock_get_meeting,
|
||||
mock_get_room,
|
||||
mock_room,
|
||||
mock_meeting,
|
||||
client,
|
||||
authenticated_client,
|
||||
):
|
||||
"""Test that /joining endpoint creates pending join in Redis."""
|
||||
mock_get_room.return_value = mock_room
|
||||
mock_get_meeting.return_value = mock_meeting
|
||||
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.setex = AsyncMock()
|
||||
mock_redis.aclose = AsyncMock()
|
||||
mock_get_redis.return_value = mock_redis
|
||||
|
||||
response = await client.post(
|
||||
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||
json={"connection_id": TEST_CONNECTION_ID},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {"status": "ok"}
|
||||
|
||||
# Verify Redis setex was called with correct key pattern
|
||||
mock_redis.setex.assert_called_once()
|
||||
call_args = mock_redis.setex.call_args[0]
|
||||
assert call_args[0].startswith(f"{PENDING_JOIN_PREFIX}:{mock_meeting.id}:")
|
||||
assert TEST_CONNECTION_ID in call_args[0]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.views.rooms.poll_daily_room_presence_task")
|
||||
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||
@patch("reflector.views.rooms.get_async_redis_client")
|
||||
async def test_joined_endpoint_deletes_pending_join(
|
||||
mock_get_redis,
|
||||
mock_get_meeting,
|
||||
mock_get_room,
|
||||
mock_poll_task,
|
||||
mock_room,
|
||||
mock_meeting,
|
||||
client,
|
||||
authenticated_client,
|
||||
):
|
||||
"""Test that /joined endpoint deletes pending join from Redis."""
|
||||
mock_get_room.return_value = mock_room
|
||||
mock_get_meeting.return_value = mock_meeting
|
||||
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.delete = AsyncMock()
|
||||
mock_redis.aclose = AsyncMock()
|
||||
mock_get_redis.return_value = mock_redis
|
||||
|
||||
response = await client.post(
|
||||
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joined",
|
||||
json={"connection_id": TEST_CONNECTION_ID},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {"status": "ok"}
|
||||
|
||||
# Verify Redis delete was called with correct key pattern
|
||||
mock_redis.delete.assert_called_once()
|
||||
call_args = mock_redis.delete.call_args[0]
|
||||
assert call_args[0].startswith(f"{PENDING_JOIN_PREFIX}:{mock_meeting.id}:")
|
||||
assert TEST_CONNECTION_ID in call_args[0]
|
||||
|
||||
# Verify presence poll was triggered for Daily meetings
|
||||
mock_poll_task.delay.assert_called_once_with(mock_meeting.id)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||
async def test_joining_endpoint_room_not_found(
|
||||
mock_get_room,
|
||||
client,
|
||||
authenticated_client,
|
||||
):
|
||||
"""Test that /joining returns 404 when room not found."""
|
||||
mock_get_room.return_value = None
|
||||
|
||||
response = await client.post(
|
||||
"/rooms/nonexistent-room/meetings/meeting-123/joining",
|
||||
json={"connection_id": TEST_CONNECTION_ID},
|
||||
)
|
||||
|
||||
assert response.status_code == 404
|
||||
assert response.json()["detail"] == "Room not found"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||
async def test_joining_endpoint_meeting_not_found(
|
||||
mock_get_meeting,
|
||||
mock_get_room,
|
||||
mock_room,
|
||||
client,
|
||||
authenticated_client,
|
||||
):
|
||||
"""Test that /joining returns 404 when meeting not found."""
|
||||
mock_get_room.return_value = mock_room
|
||||
mock_get_meeting.return_value = None
|
||||
|
||||
response = await client.post(
|
||||
f"/rooms/{mock_room.name}/meetings/nonexistent-meeting/joining",
|
||||
json={"connection_id": TEST_CONNECTION_ID},
|
||||
)
|
||||
|
||||
assert response.status_code == 404
|
||||
assert response.json()["detail"] == "Meeting not found"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||
async def test_joining_endpoint_meeting_not_active(
|
||||
mock_get_meeting,
|
||||
mock_get_room,
|
||||
mock_room,
|
||||
mock_meeting,
|
||||
client,
|
||||
authenticated_client,
|
||||
):
|
||||
"""Test that /joining returns 400 when meeting is not active."""
|
||||
mock_get_room.return_value = mock_room
|
||||
inactive_meeting = mock_meeting.model_copy(update={"is_active": False})
|
||||
mock_get_meeting.return_value = inactive_meeting
|
||||
|
||||
response = await client.post(
|
||||
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||
json={"connection_id": TEST_CONNECTION_ID},
|
||||
)
|
||||
|
||||
assert response.status_code == 400
|
||||
assert response.json()["detail"] == "Meeting is not active"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||
@patch("reflector.views.rooms.get_async_redis_client")
|
||||
async def test_joining_endpoint_anonymous_user(
|
||||
mock_get_redis,
|
||||
mock_get_meeting,
|
||||
mock_get_room,
|
||||
mock_room,
|
||||
mock_meeting,
|
||||
client,
|
||||
):
|
||||
"""Test that /joining works for anonymous users with unique connection_id."""
|
||||
mock_get_room.return_value = mock_room
|
||||
mock_get_meeting.return_value = mock_meeting
|
||||
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.setex = AsyncMock()
|
||||
mock_redis.aclose = AsyncMock()
|
||||
mock_get_redis.return_value = mock_redis
|
||||
|
||||
response = await client.post(
|
||||
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||
json={"connection_id": TEST_CONNECTION_ID},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {"status": "ok"}
|
||||
|
||||
# Verify Redis setex was called with "anon:" prefix and connection_id
|
||||
call_args = mock_redis.setex.call_args[0]
|
||||
assert ":anon:" in call_args[0]
|
||||
assert TEST_CONNECTION_ID in call_args[0]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||
@patch("reflector.views.rooms.get_async_redis_client")
|
||||
async def test_joining_endpoint_redis_closed_on_success(
|
||||
mock_get_redis,
|
||||
mock_get_meeting,
|
||||
mock_get_room,
|
||||
mock_room,
|
||||
mock_meeting,
|
||||
client,
|
||||
authenticated_client,
|
||||
):
|
||||
"""Test that Redis connection is closed after successful operation."""
|
||||
mock_get_room.return_value = mock_room
|
||||
mock_get_meeting.return_value = mock_meeting
|
||||
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.setex = AsyncMock()
|
||||
mock_redis.aclose = AsyncMock()
|
||||
mock_get_redis.return_value = mock_redis
|
||||
|
||||
await client.post(
|
||||
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||
json={"connection_id": TEST_CONNECTION_ID},
|
||||
)
|
||||
|
||||
mock_redis.aclose.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||
@patch("reflector.views.rooms.get_async_redis_client")
|
||||
async def test_joining_endpoint_redis_closed_on_error(
|
||||
mock_get_redis,
|
||||
mock_get_meeting,
|
||||
mock_get_room,
|
||||
mock_room,
|
||||
mock_meeting,
|
||||
client,
|
||||
authenticated_client,
|
||||
):
|
||||
"""Test that Redis connection is closed even when operation fails."""
|
||||
mock_get_room.return_value = mock_room
|
||||
mock_get_meeting.return_value = mock_meeting
|
||||
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.setex = AsyncMock(side_effect=Exception("Redis error"))
|
||||
mock_redis.aclose = AsyncMock()
|
||||
mock_get_redis.return_value = mock_redis
|
||||
|
||||
with pytest.raises(Exception):
|
||||
await client.post(
|
||||
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||
json={"connection_id": TEST_CONNECTION_ID},
|
||||
)
|
||||
|
||||
mock_redis.aclose.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_joining_endpoint_requires_connection_id(
|
||||
client,
|
||||
):
|
||||
"""Test that /joining returns 422 when connection_id is missing."""
|
||||
response = await client.post(
|
||||
"/rooms/test-room/meetings/meeting-123/joining",
|
||||
json={},
|
||||
)
|
||||
|
||||
assert response.status_code == 422 # Validation error
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_joining_endpoint_rejects_empty_connection_id(
|
||||
client,
|
||||
):
|
||||
"""Test that /joining returns 422 when connection_id is empty string."""
|
||||
response = await client.post(
|
||||
"/rooms/test-room/meetings/meeting-123/joining",
|
||||
json={"connection_id": ""},
|
||||
)
|
||||
|
||||
assert response.status_code == 422 # Validation error (NonEmptyString)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||
@patch("reflector.views.rooms.get_async_redis_client")
|
||||
async def test_different_connection_ids_create_different_keys(
|
||||
mock_get_redis,
|
||||
mock_get_meeting,
|
||||
mock_get_room,
|
||||
mock_room,
|
||||
mock_meeting,
|
||||
client,
|
||||
):
|
||||
"""Test that different connection_ids create different Redis keys."""
|
||||
mock_get_room.return_value = mock_room
|
||||
mock_get_meeting.return_value = mock_meeting
|
||||
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.setex = AsyncMock()
|
||||
mock_redis.aclose = AsyncMock()
|
||||
mock_get_redis.return_value = mock_redis
|
||||
|
||||
# First connection
|
||||
await client.post(
|
||||
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||
json={"connection_id": "connection-1"},
|
||||
)
|
||||
key1 = mock_redis.setex.call_args[0][0]
|
||||
|
||||
mock_redis.setex.reset_mock()
|
||||
|
||||
# Second connection (different tab)
|
||||
await client.post(
|
||||
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||
json={"connection_id": "connection-2"},
|
||||
)
|
||||
key2 = mock_redis.setex.call_args[0][0]
|
||||
|
||||
# Keys should be different
|
||||
assert key1 != key2
|
||||
assert "connection-1" in key1
|
||||
assert "connection-2" in key2
|
||||
@@ -1,153 +0,0 @@
|
||||
"""Tests for pending joins Redis helper functions.
|
||||
|
||||
TDD tests for tracking join intent to prevent race conditions during
|
||||
WebRTC handshake when users join meetings.
|
||||
"""
|
||||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.presence.pending_joins import (
|
||||
PENDING_JOIN_PREFIX,
|
||||
PENDING_JOIN_TTL,
|
||||
create_pending_join,
|
||||
delete_pending_join,
|
||||
has_pending_joins,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_redis():
|
||||
"""Mock async Redis client."""
|
||||
redis = AsyncMock()
|
||||
redis.setex = AsyncMock()
|
||||
redis.delete = AsyncMock()
|
||||
redis.scan = AsyncMock(return_value=(0, []))
|
||||
return redis
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_pending_join_sets_key_with_ttl(mock_redis):
|
||||
"""Test that create_pending_join stores key with correct TTL."""
|
||||
meeting_id = "meeting-123"
|
||||
user_id = "user-456"
|
||||
|
||||
await create_pending_join(mock_redis, meeting_id, user_id)
|
||||
|
||||
expected_key = f"{PENDING_JOIN_PREFIX}:{meeting_id}:{user_id}"
|
||||
mock_redis.setex.assert_called_once()
|
||||
call_args = mock_redis.setex.call_args
|
||||
assert call_args[0][0] == expected_key
|
||||
assert call_args[0][1] == PENDING_JOIN_TTL
|
||||
# Value should be a timestamp string
|
||||
assert call_args[0][2] is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_delete_pending_join_removes_key(mock_redis):
|
||||
"""Test that delete_pending_join removes the key."""
|
||||
meeting_id = "meeting-123"
|
||||
user_id = "user-456"
|
||||
|
||||
await delete_pending_join(mock_redis, meeting_id, user_id)
|
||||
|
||||
expected_key = f"{PENDING_JOIN_PREFIX}:{meeting_id}:{user_id}"
|
||||
mock_redis.delete.assert_called_once_with(expected_key)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_has_pending_joins_returns_false_when_no_keys(mock_redis):
|
||||
"""Test has_pending_joins returns False when no matching keys."""
|
||||
mock_redis.scan.return_value = (0, [])
|
||||
|
||||
result = await has_pending_joins(mock_redis, "meeting-123")
|
||||
|
||||
assert result is False
|
||||
mock_redis.scan.assert_called_once()
|
||||
call_kwargs = mock_redis.scan.call_args.kwargs
|
||||
assert call_kwargs["match"] == f"{PENDING_JOIN_PREFIX}:meeting-123:*"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_has_pending_joins_returns_true_when_keys_exist(mock_redis):
|
||||
"""Test has_pending_joins returns True when matching keys found."""
|
||||
mock_redis.scan.return_value = (0, [b"pending_join:meeting-123:user-1"])
|
||||
|
||||
result = await has_pending_joins(mock_redis, "meeting-123")
|
||||
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_has_pending_joins_scans_with_correct_pattern(mock_redis):
|
||||
"""Test has_pending_joins uses correct scan pattern."""
|
||||
meeting_id = "meeting-abc-def"
|
||||
mock_redis.scan.return_value = (0, [])
|
||||
|
||||
await has_pending_joins(mock_redis, meeting_id)
|
||||
|
||||
expected_pattern = f"{PENDING_JOIN_PREFIX}:{meeting_id}:*"
|
||||
mock_redis.scan.assert_called_once()
|
||||
call_kwargs = mock_redis.scan.call_args.kwargs
|
||||
assert call_kwargs["match"] == expected_pattern
|
||||
assert call_kwargs["count"] == 100
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_users_pending_joins(mock_redis):
|
||||
"""Test that multiple users can have pending joins for same meeting."""
|
||||
meeting_id = "meeting-123"
|
||||
# Simulate two pending joins
|
||||
mock_redis.scan.return_value = (
|
||||
0,
|
||||
[b"pending_join:meeting-123:user-1", b"pending_join:meeting-123:user-2"],
|
||||
)
|
||||
|
||||
result = await has_pending_joins(mock_redis, meeting_id)
|
||||
|
||||
assert result is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pending_join_ttl_value():
|
||||
"""Test that PENDING_JOIN_TTL has expected value."""
|
||||
# 30 seconds should be enough for WebRTC handshake but not too long
|
||||
assert PENDING_JOIN_TTL == 30
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pending_join_prefix_value():
|
||||
"""Test that PENDING_JOIN_PREFIX has expected value."""
|
||||
assert PENDING_JOIN_PREFIX == "pending_join"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_has_pending_joins_multi_iteration_scan_no_keys(mock_redis):
|
||||
"""Test has_pending_joins iterates until cursor returns 0."""
|
||||
# Simulate multi-iteration scan: cursor 100 -> cursor 50 -> cursor 0
|
||||
mock_redis.scan.side_effect = [
|
||||
(100, []), # First iteration, no keys, continue
|
||||
(50, []), # Second iteration, no keys, continue
|
||||
(0, []), # Third iteration, cursor 0, done
|
||||
]
|
||||
|
||||
result = await has_pending_joins(mock_redis, "meeting-123")
|
||||
|
||||
assert result is False
|
||||
assert mock_redis.scan.call_count == 3
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_has_pending_joins_multi_iteration_finds_key_later(mock_redis):
|
||||
"""Test has_pending_joins finds key on second iteration."""
|
||||
# Simulate finding key on second scan iteration
|
||||
mock_redis.scan.side_effect = [
|
||||
(100, []), # First iteration, no keys
|
||||
(0, [b"pending_join:meeting-123:user-1"]), # Second iteration, found key
|
||||
]
|
||||
|
||||
result = await has_pending_joins(mock_redis, "meeting-123")
|
||||
|
||||
assert result is True
|
||||
assert mock_redis.scan.call_count == 2
|
||||
@@ -1,241 +0,0 @@
|
||||
"""Tests for process_meetings pending joins check.
|
||||
|
||||
Tests that process_meetings correctly skips deactivation when
|
||||
pending joins exist for a meeting.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.db.meetings import Meeting
|
||||
|
||||
|
||||
def _get_process_meetings_fn():
|
||||
"""Get the underlying async function without Celery/asynctask decorators."""
|
||||
from reflector.worker import process
|
||||
|
||||
fn = process.process_meetings
|
||||
# Get through both decorator layers (@shared_task and @asynctask)
|
||||
if hasattr(fn, "__wrapped__"):
|
||||
fn = fn.__wrapped__
|
||||
if hasattr(fn, "__wrapped__"):
|
||||
fn = fn.__wrapped__
|
||||
return fn
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_active_meeting():
|
||||
"""Mock an active meeting that should be considered for deactivation."""
|
||||
now = datetime.now(timezone.utc)
|
||||
return Meeting(
|
||||
id="meeting-123",
|
||||
room_id="room-456",
|
||||
room_name="test-room-20251118120000",
|
||||
room_url="https://daily.co/test-room-20251118120000",
|
||||
host_room_url="https://daily.co/test-room-20251118120000?t=host",
|
||||
platform="daily",
|
||||
num_clients=0,
|
||||
is_active=True,
|
||||
start_date=now - timedelta(hours=1),
|
||||
end_date=now - timedelta(minutes=30), # Already ended
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_all_active")
|
||||
@patch("reflector.worker.process.RedisAsyncLock")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch("reflector.worker.process.get_async_redis_client")
|
||||
@patch("reflector.worker.process.has_pending_joins")
|
||||
@patch("reflector.worker.process.meetings_controller.update_meeting")
|
||||
async def test_process_meetings_skips_deactivation_with_pending_joins(
|
||||
mock_update_meeting,
|
||||
mock_has_pending_joins,
|
||||
mock_get_redis,
|
||||
mock_create_client,
|
||||
mock_redis_lock_class,
|
||||
mock_get_all_active,
|
||||
mock_active_meeting,
|
||||
):
|
||||
"""Test that process_meetings skips deactivation when pending joins exist."""
|
||||
process_meetings = _get_process_meetings_fn()
|
||||
|
||||
mock_get_all_active.return_value = [mock_active_meeting]
|
||||
|
||||
# Mock lock acquired
|
||||
mock_lock_instance = AsyncMock()
|
||||
mock_lock_instance.acquired = True
|
||||
mock_lock_instance.__aenter__ = AsyncMock(return_value=mock_lock_instance)
|
||||
mock_lock_instance.__aexit__ = AsyncMock()
|
||||
mock_redis_lock_class.return_value = mock_lock_instance
|
||||
|
||||
# Mock platform client - no active sessions, but had sessions (triggers deactivation)
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_session = AsyncMock()
|
||||
mock_session.ended_at = datetime.now(timezone.utc) # Session ended
|
||||
mock_daily_client.get_room_sessions = AsyncMock(return_value=[mock_session])
|
||||
mock_create_client.return_value = mock_daily_client
|
||||
|
||||
# Mock Redis client
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.aclose = AsyncMock()
|
||||
mock_get_redis.return_value = mock_redis
|
||||
|
||||
# Mock pending joins exist
|
||||
mock_has_pending_joins.return_value = True
|
||||
|
||||
await process_meetings()
|
||||
|
||||
# Verify has_pending_joins was called
|
||||
mock_has_pending_joins.assert_called_once_with(mock_redis, mock_active_meeting.id)
|
||||
|
||||
# Verify meeting was NOT deactivated
|
||||
mock_update_meeting.assert_not_called()
|
||||
|
||||
# Verify Redis was closed
|
||||
mock_redis.aclose.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_all_active")
|
||||
@patch("reflector.worker.process.RedisAsyncLock")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch("reflector.worker.process.get_async_redis_client")
|
||||
@patch("reflector.worker.process.has_pending_joins")
|
||||
@patch("reflector.worker.process.meetings_controller.update_meeting")
|
||||
async def test_process_meetings_deactivates_without_pending_joins(
|
||||
mock_update_meeting,
|
||||
mock_has_pending_joins,
|
||||
mock_get_redis,
|
||||
mock_create_client,
|
||||
mock_redis_lock_class,
|
||||
mock_get_all_active,
|
||||
mock_active_meeting,
|
||||
):
|
||||
"""Test that process_meetings deactivates when no pending joins."""
|
||||
process_meetings = _get_process_meetings_fn()
|
||||
|
||||
mock_get_all_active.return_value = [mock_active_meeting]
|
||||
|
||||
# Mock lock acquired
|
||||
mock_lock_instance = AsyncMock()
|
||||
mock_lock_instance.acquired = True
|
||||
mock_lock_instance.__aenter__ = AsyncMock(return_value=mock_lock_instance)
|
||||
mock_lock_instance.__aexit__ = AsyncMock()
|
||||
mock_redis_lock_class.return_value = mock_lock_instance
|
||||
|
||||
# Mock platform client - no active sessions, but had sessions
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_session = AsyncMock()
|
||||
mock_session.ended_at = datetime.now(timezone.utc)
|
||||
mock_daily_client.get_room_sessions = AsyncMock(return_value=[mock_session])
|
||||
mock_create_client.return_value = mock_daily_client
|
||||
|
||||
# Mock Redis client
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.aclose = AsyncMock()
|
||||
mock_get_redis.return_value = mock_redis
|
||||
|
||||
# Mock no pending joins
|
||||
mock_has_pending_joins.return_value = False
|
||||
|
||||
await process_meetings()
|
||||
|
||||
# Verify meeting was deactivated
|
||||
mock_update_meeting.assert_called_once_with(mock_active_meeting.id, is_active=False)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_all_active")
|
||||
@patch("reflector.worker.process.RedisAsyncLock")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
async def test_process_meetings_no_check_when_active_sessions(
|
||||
mock_create_client,
|
||||
mock_redis_lock_class,
|
||||
mock_get_all_active,
|
||||
mock_active_meeting,
|
||||
):
|
||||
"""Test that pending joins check is skipped when there are active sessions."""
|
||||
process_meetings = _get_process_meetings_fn()
|
||||
|
||||
mock_get_all_active.return_value = [mock_active_meeting]
|
||||
|
||||
# Mock lock acquired
|
||||
mock_lock_instance = AsyncMock()
|
||||
mock_lock_instance.acquired = True
|
||||
mock_lock_instance.__aenter__ = AsyncMock(return_value=mock_lock_instance)
|
||||
mock_lock_instance.__aexit__ = AsyncMock()
|
||||
mock_redis_lock_class.return_value = mock_lock_instance
|
||||
|
||||
# Mock platform client - has active session
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_session = AsyncMock()
|
||||
mock_session.ended_at = None # Still active
|
||||
mock_daily_client.get_room_sessions = AsyncMock(return_value=[mock_session])
|
||||
mock_create_client.return_value = mock_daily_client
|
||||
|
||||
with (
|
||||
patch("reflector.worker.process.get_async_redis_client") as mock_get_redis,
|
||||
patch("reflector.worker.process.has_pending_joins") as mock_has_pending_joins,
|
||||
patch(
|
||||
"reflector.worker.process.meetings_controller.update_meeting"
|
||||
) as mock_update_meeting,
|
||||
):
|
||||
await process_meetings()
|
||||
|
||||
# Verify pending joins check was NOT called (no need - active sessions exist)
|
||||
mock_has_pending_joins.assert_not_called()
|
||||
|
||||
# Verify meeting was NOT deactivated
|
||||
mock_update_meeting.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@patch("reflector.worker.process.meetings_controller.get_all_active")
|
||||
@patch("reflector.worker.process.RedisAsyncLock")
|
||||
@patch("reflector.worker.process.create_platform_client")
|
||||
@patch("reflector.worker.process.get_async_redis_client")
|
||||
@patch("reflector.worker.process.has_pending_joins")
|
||||
@patch("reflector.worker.process.meetings_controller.update_meeting")
|
||||
async def test_process_meetings_closes_redis_even_on_continue(
|
||||
mock_update_meeting,
|
||||
mock_has_pending_joins,
|
||||
mock_get_redis,
|
||||
mock_create_client,
|
||||
mock_redis_lock_class,
|
||||
mock_get_all_active,
|
||||
mock_active_meeting,
|
||||
):
|
||||
"""Test that Redis connection is always closed, even when skipping deactivation."""
|
||||
process_meetings = _get_process_meetings_fn()
|
||||
|
||||
mock_get_all_active.return_value = [mock_active_meeting]
|
||||
|
||||
# Mock lock acquired
|
||||
mock_lock_instance = AsyncMock()
|
||||
mock_lock_instance.acquired = True
|
||||
mock_lock_instance.__aenter__ = AsyncMock(return_value=mock_lock_instance)
|
||||
mock_lock_instance.__aexit__ = AsyncMock()
|
||||
mock_redis_lock_class.return_value = mock_lock_instance
|
||||
|
||||
# Mock platform client - no active sessions
|
||||
mock_daily_client = AsyncMock()
|
||||
mock_session = AsyncMock()
|
||||
mock_session.ended_at = datetime.now(timezone.utc)
|
||||
mock_daily_client.get_room_sessions = AsyncMock(return_value=[mock_session])
|
||||
mock_create_client.return_value = mock_daily_client
|
||||
|
||||
# Mock Redis client
|
||||
mock_redis = AsyncMock()
|
||||
mock_redis.aclose = AsyncMock()
|
||||
mock_get_redis.return_value = mock_redis
|
||||
|
||||
# Mock pending joins exist (will trigger continue)
|
||||
mock_has_pending_joins.return_value = True
|
||||
|
||||
await process_meetings()
|
||||
|
||||
# Verify Redis was closed
|
||||
mock_redis.aclose.assert_called_once()
|
||||
@@ -1,6 +1,6 @@
|
||||
import asyncio
|
||||
import time
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
from httpx import ASGITransport, AsyncClient
|
||||
@@ -142,17 +142,17 @@ async def test_whereby_recording_uses_file_pipeline(client):
|
||||
"reflector.services.transcript_process.task_pipeline_file_process"
|
||||
) as mock_file_pipeline,
|
||||
patch(
|
||||
"reflector.services.transcript_process.task_pipeline_multitrack_process"
|
||||
) as mock_multitrack_pipeline,
|
||||
"reflector.services.transcript_process.HatchetClientManager"
|
||||
) as mock_hatchet,
|
||||
):
|
||||
response = await client.post(f"/transcripts/{transcript.id}/process")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json()["status"] == "ok"
|
||||
|
||||
# Whereby recordings should use file pipeline
|
||||
# Whereby recordings should use file pipeline, not Hatchet
|
||||
mock_file_pipeline.delay.assert_called_once_with(transcript_id=transcript.id)
|
||||
mock_multitrack_pipeline.delay.assert_not_called()
|
||||
mock_hatchet.start_workflow.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("setup_database")
|
||||
@@ -177,8 +177,6 @@ async def test_dailyco_recording_uses_multitrack_pipeline(client):
|
||||
recording_trigger="automatic-2nd-participant",
|
||||
is_shared=False,
|
||||
)
|
||||
# Force Celery backend for test
|
||||
await rooms_controller.update(room, {"use_celery": True})
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
"",
|
||||
@@ -213,18 +211,23 @@ async def test_dailyco_recording_uses_multitrack_pipeline(client):
|
||||
"reflector.services.transcript_process.task_pipeline_file_process"
|
||||
) as mock_file_pipeline,
|
||||
patch(
|
||||
"reflector.services.transcript_process.task_pipeline_multitrack_process"
|
||||
) as mock_multitrack_pipeline,
|
||||
"reflector.services.transcript_process.HatchetClientManager"
|
||||
) as mock_hatchet,
|
||||
):
|
||||
mock_hatchet.start_workflow = AsyncMock(return_value="test-workflow-id")
|
||||
|
||||
response = await client.post(f"/transcripts/{transcript.id}/process")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json()["status"] == "ok"
|
||||
|
||||
# Daily.co multitrack recordings should use multitrack pipeline
|
||||
mock_multitrack_pipeline.delay.assert_called_once_with(
|
||||
transcript_id=transcript.id,
|
||||
bucket_name="daily-bucket",
|
||||
track_keys=track_keys,
|
||||
)
|
||||
# Daily.co multitrack recordings should use Hatchet workflow
|
||||
mock_hatchet.start_workflow.assert_called_once()
|
||||
call_kwargs = mock_hatchet.start_workflow.call_args.kwargs
|
||||
assert call_kwargs["workflow_name"] == "DiarizationPipeline"
|
||||
assert call_kwargs["input_data"]["transcript_id"] == transcript.id
|
||||
assert call_kwargs["input_data"]["bucket_name"] == "daily-bucket"
|
||||
assert call_kwargs["input_data"]["tracks"] == [
|
||||
{"s3_key": k} for k in track_keys
|
||||
]
|
||||
mock_file_pipeline.delay.assert_not_called()
|
||||
|
||||
@@ -25,8 +25,6 @@ import { useConsentDialog } from "../../lib/consent";
|
||||
import {
|
||||
useRoomJoinMeeting,
|
||||
useMeetingStartRecording,
|
||||
useMeetingJoining,
|
||||
useMeetingJoined,
|
||||
} from "../../lib/apiHooks";
|
||||
import { omit } from "remeda";
|
||||
import {
|
||||
@@ -189,14 +187,8 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
||||
const [container, setContainer] = useState<HTMLDivElement | null>(null);
|
||||
const joinMutation = useRoomJoinMeeting();
|
||||
const startRecordingMutation = useMeetingStartRecording();
|
||||
const joiningMutation = useMeetingJoining();
|
||||
const joinedMutation = useMeetingJoined();
|
||||
const [joinedMeeting, setJoinedMeeting] = useState<Meeting | null>(null);
|
||||
|
||||
// Generate a stable connection ID for this component instance
|
||||
// Used to track pending joins per browser tab (prevents key collision for anonymous users)
|
||||
const connectionId = useMemo(() => crypto.randomUUID(), []);
|
||||
|
||||
// Generate deterministic instanceIds so all participants use SAME IDs
|
||||
const cloudInstanceId = parseNonEmptyString(meeting.id);
|
||||
const rawTracksInstanceId = parseNonEmptyString(
|
||||
@@ -245,20 +237,6 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
||||
router.push("/browse");
|
||||
}, [router]);
|
||||
|
||||
// Trigger presence recheck on dirty disconnects (tab close, navigation away)
|
||||
useEffect(() => {
|
||||
if (!meeting?.id || !roomName) return;
|
||||
|
||||
const handleBeforeUnload = () => {
|
||||
// sendBeacon guarantees delivery even if tab closes mid-request
|
||||
const url = `/v1/rooms/${roomName}/meetings/${meeting.id}/leave`;
|
||||
navigator.sendBeacon(url, JSON.stringify({}));
|
||||
};
|
||||
|
||||
window.addEventListener("beforeunload", handleBeforeUnload);
|
||||
return () => window.removeEventListener("beforeunload", handleBeforeUnload);
|
||||
}, [meeting?.id, roomName]);
|
||||
|
||||
const handleCustomButtonClick = useCallback(
|
||||
(ev: DailyEventObjectCustomButtonClick) => {
|
||||
if (ev.button_id === CONSENT_BUTTON_ID) {
|
||||
@@ -271,28 +249,6 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
||||
);
|
||||
|
||||
const handleFrameJoinMeeting = useCallback(() => {
|
||||
// Signal that WebRTC connection is established
|
||||
// This clears the pending join intent, confirming successful connection
|
||||
joinedMutation.mutate(
|
||||
{
|
||||
params: {
|
||||
path: {
|
||||
room_name: roomName,
|
||||
meeting_id: meeting.id,
|
||||
},
|
||||
},
|
||||
body: {
|
||||
connection_id: connectionId,
|
||||
},
|
||||
},
|
||||
{
|
||||
onError: (error: unknown) => {
|
||||
// Non-blocking: log but don't fail - this is cleanup, not critical
|
||||
console.warn("Failed to signal joined:", error);
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
if (meeting.recording_type === "cloud") {
|
||||
console.log("Starting dual recording via REST API", {
|
||||
cloudInstanceId,
|
||||
@@ -354,9 +310,6 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
||||
}, [
|
||||
meeting.recording_type,
|
||||
meeting.id,
|
||||
roomName,
|
||||
connectionId,
|
||||
joinedMutation,
|
||||
startRecordingMutation,
|
||||
cloudInstanceId,
|
||||
rawTracksInstanceId,
|
||||
@@ -375,28 +328,8 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
||||
|
||||
useEffect(() => {
|
||||
if (!frame || !roomUrl) return;
|
||||
|
||||
const joinRoom = async () => {
|
||||
// Signal intent to join before WebRTC handshake starts
|
||||
// This prevents race condition where meeting is deactivated during handshake
|
||||
try {
|
||||
await joiningMutation.mutateAsync({
|
||||
params: {
|
||||
path: {
|
||||
room_name: roomName,
|
||||
meeting_id: meeting.id,
|
||||
},
|
||||
},
|
||||
body: {
|
||||
connection_id: connectionId,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
// Non-blocking: log but continue with join
|
||||
console.warn("Failed to signal joining intent:", error);
|
||||
}
|
||||
|
||||
await frame.join({
|
||||
frame
|
||||
.join({
|
||||
url: roomUrl,
|
||||
sendSettings: {
|
||||
video: {
|
||||
@@ -408,13 +341,9 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
||||
},
|
||||
// Note: screenVideo intentionally not configured to preserve full quality for screen shares
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
joinRoom().catch(console.error.bind(console, "Failed to join daily room:"));
|
||||
// joiningMutation excluded from deps - it's a stable hook reference
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [frame, roomUrl, roomName, meeting.id, connectionId]);
|
||||
})
|
||||
.catch(console.error.bind(console, "Failed to join daily room:"));
|
||||
}, [frame, roomUrl]);
|
||||
|
||||
useEffect(() => {
|
||||
setCustomTrayButton(
|
||||
|
||||
217
www/app/lib/__tests__/meetingStatusBatcher.test.tsx
Normal file
217
www/app/lib/__tests__/meetingStatusBatcher.test.tsx
Normal file
@@ -0,0 +1,217 @@
|
||||
import "@testing-library/jest-dom";
|
||||
|
||||
// --- Module mocks (hoisted before imports) ---
|
||||
|
||||
jest.mock("../apiClient", () => ({
|
||||
client: {
|
||||
GET: jest.fn(),
|
||||
POST: jest.fn(),
|
||||
PUT: jest.fn(),
|
||||
PATCH: jest.fn(),
|
||||
DELETE: jest.fn(),
|
||||
use: jest.fn(),
|
||||
},
|
||||
$api: {
|
||||
useQuery: jest.fn(),
|
||||
useMutation: jest.fn(),
|
||||
queryOptions: (method: string, path: string, init?: unknown) =>
|
||||
init === undefined
|
||||
? { queryKey: [method, path] }
|
||||
: { queryKey: [method, path, init] },
|
||||
},
|
||||
API_URL: "http://test",
|
||||
WEBSOCKET_URL: "ws://test",
|
||||
configureApiAuth: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock("../AuthProvider", () => ({
|
||||
useAuth: () => ({
|
||||
status: "authenticated" as const,
|
||||
accessToken: "test-token",
|
||||
accessTokenExpires: Date.now() + 3600000,
|
||||
user: { id: "user1", name: "Test User" },
|
||||
update: jest.fn(),
|
||||
signIn: jest.fn(),
|
||||
signOut: jest.fn(),
|
||||
lastUserId: "user1",
|
||||
}),
|
||||
}));
|
||||
|
||||
// Recreate the batcher with a 0ms window. setTimeout(fn, 0) defers to the next
|
||||
// macrotask boundary — after all synchronous React rendering completes. All
|
||||
// useQuery queryFns fire within the same macrotask, so they all queue into one
|
||||
// batch before the timer fires. This is deterministic and avoids fake timers.
|
||||
jest.mock("../meetingStatusBatcher", () => {
|
||||
const actual = jest.requireActual("../meetingStatusBatcher");
|
||||
return {
|
||||
...actual,
|
||||
meetingStatusBatcher: actual.createMeetingStatusBatcher(0),
|
||||
};
|
||||
});
|
||||
|
||||
// --- Imports (after mocks) ---
|
||||
|
||||
import React from "react";
|
||||
import { render, waitFor, screen } from "@testing-library/react";
|
||||
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
|
||||
import { useRoomActiveMeetings, useRoomUpcomingMeetings } from "../apiHooks";
|
||||
import { client } from "../apiClient";
|
||||
import { ErrorProvider } from "../../(errors)/errorContext";
|
||||
|
||||
const mockClient = client as { POST: jest.Mock };
|
||||
|
||||
// --- Helpers ---
|
||||
|
||||
function mockBulkStatusEndpoint(
|
||||
roomData?: Record<
|
||||
string,
|
||||
{ active_meetings: unknown[]; upcoming_events: unknown[] }
|
||||
>,
|
||||
) {
|
||||
mockClient.POST.mockImplementation(
|
||||
async (_path: string, options: { body: { room_names: string[] } }) => {
|
||||
const roomNames: string[] = options.body.room_names;
|
||||
const src = roomData ?? {};
|
||||
const data = Object.fromEntries(
|
||||
roomNames.map((name) => [
|
||||
name,
|
||||
src[name] ?? { active_meetings: [], upcoming_events: [] },
|
||||
]),
|
||||
);
|
||||
return { data, error: undefined, response: {} };
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// --- Test component: renders N room cards, each using both hooks ---
|
||||
|
||||
function RoomCard({ roomName }: { roomName: string }) {
|
||||
const active = useRoomActiveMeetings(roomName);
|
||||
const upcoming = useRoomUpcomingMeetings(roomName);
|
||||
|
||||
if (active.isLoading || upcoming.isLoading) {
|
||||
return <div data-testid={`room-${roomName}`}>loading</div>;
|
||||
}
|
||||
|
||||
return (
|
||||
<div data-testid={`room-${roomName}`}>
|
||||
{active.data?.length ?? 0} active, {upcoming.data?.length ?? 0} upcoming
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function RoomList({ roomNames }: { roomNames: string[] }) {
|
||||
return (
|
||||
<>
|
||||
{roomNames.map((name) => (
|
||||
<RoomCard key={name} roomName={name} />
|
||||
))}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
function createWrapper() {
|
||||
const queryClient = new QueryClient({
|
||||
defaultOptions: {
|
||||
queries: { retry: false },
|
||||
},
|
||||
});
|
||||
return function Wrapper({ children }: { children: React.ReactNode }) {
|
||||
return (
|
||||
<QueryClientProvider client={queryClient}>
|
||||
<ErrorProvider>{children}</ErrorProvider>
|
||||
</QueryClientProvider>
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
// --- Tests ---
|
||||
|
||||
describe("meeting status batcher integration", () => {
|
||||
afterEach(() => jest.clearAllMocks());
|
||||
|
||||
it("batches multiple room queries into a single POST request", async () => {
|
||||
const rooms = Array.from({ length: 10 }, (_, i) => `room-${i}`);
|
||||
|
||||
mockBulkStatusEndpoint();
|
||||
|
||||
render(<RoomList roomNames={rooms} />, { wrapper: createWrapper() });
|
||||
|
||||
await waitFor(() => {
|
||||
for (const name of rooms) {
|
||||
expect(screen.getByTestId(`room-${name}`)).toHaveTextContent(
|
||||
"0 active, 0 upcoming",
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
const postCalls = mockClient.POST.mock.calls.filter(
|
||||
([path]: [string]) => path === "/v1/rooms/meetings/bulk-status",
|
||||
);
|
||||
|
||||
// Without batching this would be 20 calls (2 hooks x 10 rooms).
|
||||
expect(postCalls).toHaveLength(1);
|
||||
|
||||
// The single call should contain all 10 rooms (deduplicated)
|
||||
const requestedRooms: string[] = postCalls[0][1].body.room_names;
|
||||
for (const name of rooms) {
|
||||
expect(requestedRooms).toContain(name);
|
||||
}
|
||||
});
|
||||
|
||||
it("batcher fetcher returns room-specific data", async () => {
|
||||
const {
|
||||
meetingStatusBatcher: batcher,
|
||||
} = require("../meetingStatusBatcher");
|
||||
|
||||
mockBulkStatusEndpoint({
|
||||
"room-a": {
|
||||
active_meetings: [{ id: "m1", room_name: "room-a" }],
|
||||
upcoming_events: [],
|
||||
},
|
||||
"room-b": {
|
||||
active_meetings: [],
|
||||
upcoming_events: [{ id: "e1", title: "Standup" }],
|
||||
},
|
||||
});
|
||||
|
||||
const [resultA, resultB] = await Promise.all([
|
||||
batcher.fetch("room-a"),
|
||||
batcher.fetch("room-b"),
|
||||
]);
|
||||
|
||||
expect(mockClient.POST).toHaveBeenCalledTimes(1);
|
||||
expect(resultA.active_meetings).toEqual([
|
||||
{ id: "m1", room_name: "room-a" },
|
||||
]);
|
||||
expect(resultA.upcoming_events).toEqual([]);
|
||||
expect(resultB.active_meetings).toEqual([]);
|
||||
expect(resultB.upcoming_events).toEqual([{ id: "e1", title: "Standup" }]);
|
||||
});
|
||||
|
||||
it("renders room-specific meeting data through hooks", async () => {
|
||||
mockBulkStatusEndpoint({
|
||||
"room-a": {
|
||||
active_meetings: [{ id: "m1", room_name: "room-a" }],
|
||||
upcoming_events: [],
|
||||
},
|
||||
"room-b": {
|
||||
active_meetings: [],
|
||||
upcoming_events: [{ id: "e1", title: "Standup" }],
|
||||
},
|
||||
});
|
||||
|
||||
render(<RoomList roomNames={["room-a", "room-b"]} />, {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByTestId("room-room-a")).toHaveTextContent(
|
||||
"1 active, 0 upcoming",
|
||||
);
|
||||
expect(screen.getByTestId("room-room-b")).toHaveTextContent(
|
||||
"0 active, 1 upcoming",
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -2,9 +2,10 @@
|
||||
|
||||
import { $api } from "./apiClient";
|
||||
import { useError } from "../(errors)/errorContext";
|
||||
import { QueryClient, useQueryClient } from "@tanstack/react-query";
|
||||
import { QueryClient, useQuery, useQueryClient } from "@tanstack/react-query";
|
||||
import type { components } from "../reflector-api";
|
||||
import { useAuth } from "./AuthProvider";
|
||||
import { meetingStatusBatcher } from "./meetingStatusBatcher";
|
||||
import { MeetingId } from "./types";
|
||||
import { NonEmptyString } from "./utils";
|
||||
|
||||
@@ -697,15 +698,7 @@ export function useRoomsCreateMeeting() {
|
||||
queryKey: $api.queryOptions("get", "/v1/rooms").queryKey,
|
||||
}),
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: $api.queryOptions(
|
||||
"get",
|
||||
"/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
|
||||
{
|
||||
params: {
|
||||
path: { room_name: roomName },
|
||||
},
|
||||
},
|
||||
).queryKey,
|
||||
queryKey: meetingStatusKeys.active(roomName),
|
||||
}),
|
||||
]);
|
||||
},
|
||||
@@ -734,42 +727,39 @@ export function useRoomGetByName(roomName: string | null) {
|
||||
export function useRoomUpcomingMeetings(roomName: string | null) {
|
||||
const { isAuthenticated } = useAuthReady();
|
||||
|
||||
return $api.useQuery(
|
||||
"get",
|
||||
"/v1/rooms/{room_name}/meetings/upcoming" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_UPCOMING_PATH_PARTIAL}`,
|
||||
{
|
||||
params: {
|
||||
path: { room_name: roomName! },
|
||||
},
|
||||
return useQuery({
|
||||
queryKey: meetingStatusKeys.upcoming(roomName!),
|
||||
queryFn: async () => {
|
||||
const result = await meetingStatusBatcher.fetch(roomName!);
|
||||
return result.upcoming_events;
|
||||
},
|
||||
{
|
||||
enabled: !!roomName && isAuthenticated,
|
||||
},
|
||||
);
|
||||
enabled: !!roomName && isAuthenticated,
|
||||
});
|
||||
}
|
||||
|
||||
const MEETINGS_PATH_PARTIAL = "meetings" as const;
|
||||
const MEETINGS_ACTIVE_PATH_PARTIAL = `${MEETINGS_PATH_PARTIAL}/active` as const;
|
||||
const MEETINGS_UPCOMING_PATH_PARTIAL =
|
||||
`${MEETINGS_PATH_PARTIAL}/upcoming` as const;
|
||||
const MEETING_LIST_PATH_PARTIALS = [
|
||||
MEETINGS_ACTIVE_PATH_PARTIAL,
|
||||
MEETINGS_UPCOMING_PATH_PARTIAL,
|
||||
];
|
||||
// Query keys reuse $api.queryOptions so cache identity matches the original
|
||||
// per-room GET endpoints. The actual fetch goes through the batcher, but the
|
||||
// keys stay consistent with the rest of the codebase.
|
||||
const meetingStatusKeys = {
|
||||
active: (roomName: string) =>
|
||||
$api.queryOptions("get", "/v1/rooms/{room_name}/meetings/active", {
|
||||
params: { path: { room_name: roomName } },
|
||||
}).queryKey,
|
||||
upcoming: (roomName: string) =>
|
||||
$api.queryOptions("get", "/v1/rooms/{room_name}/meetings/upcoming", {
|
||||
params: { path: { room_name: roomName } },
|
||||
}).queryKey,
|
||||
};
|
||||
|
||||
export function useRoomActiveMeetings(roomName: string | null) {
|
||||
return $api.useQuery(
|
||||
"get",
|
||||
"/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
|
||||
{
|
||||
params: {
|
||||
path: { room_name: roomName! },
|
||||
},
|
||||
return useQuery({
|
||||
queryKey: meetingStatusKeys.active(roomName!),
|
||||
queryFn: async () => {
|
||||
const result = await meetingStatusBatcher.fetch(roomName!);
|
||||
return result.active_meetings;
|
||||
},
|
||||
{
|
||||
enabled: !!roomName,
|
||||
},
|
||||
);
|
||||
enabled: !!roomName,
|
||||
});
|
||||
}
|
||||
|
||||
export function useRoomGetMeeting(
|
||||
@@ -807,35 +797,6 @@ export function useRoomJoinMeeting() {
|
||||
);
|
||||
}
|
||||
|
||||
// Presence race fix endpoints (not yet in OpenAPI spec)
|
||||
// These signal join intent to prevent race conditions during WebRTC handshake
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
export function useMeetingJoining(): any {
|
||||
return ($api as any).useMutation(
|
||||
"post",
|
||||
"/v1/rooms/{room_name}/meetings/{meeting_id}/joining",
|
||||
{},
|
||||
);
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
export function useMeetingJoined(): any {
|
||||
return ($api as any).useMutation(
|
||||
"post",
|
||||
"/v1/rooms/{room_name}/meetings/{meeting_id}/joined",
|
||||
{},
|
||||
);
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
export function useMeetingLeave(): any {
|
||||
return ($api as any).useMutation(
|
||||
"post",
|
||||
"/v1/rooms/{room_name}/meetings/{meeting_id}/leave",
|
||||
{},
|
||||
);
|
||||
}
|
||||
|
||||
export function useRoomIcsSync() {
|
||||
const { setError } = useError();
|
||||
|
||||
|
||||
37
www/app/lib/meetingStatusBatcher.ts
Normal file
37
www/app/lib/meetingStatusBatcher.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import { create, keyResolver, windowScheduler } from "@yornaath/batshit";
|
||||
import { client } from "./apiClient";
|
||||
import type { components } from "../reflector-api";
|
||||
|
||||
type MeetingStatusResult = {
|
||||
roomName: string;
|
||||
active_meetings: components["schemas"]["Meeting"][];
|
||||
upcoming_events: components["schemas"]["CalendarEventResponse"][];
|
||||
};
|
||||
|
||||
const BATCH_WINDOW_MS = 10;
|
||||
|
||||
export function createMeetingStatusBatcher(windowMs: number = BATCH_WINDOW_MS) {
|
||||
return create({
|
||||
fetcher: async (roomNames: string[]): Promise<MeetingStatusResult[]> => {
|
||||
const unique = [...new Set(roomNames)];
|
||||
const { data, error } = await client.POST(
|
||||
"/v1/rooms/meetings/bulk-status",
|
||||
{ body: { room_names: unique } },
|
||||
);
|
||||
if (error || !data) {
|
||||
throw new Error(
|
||||
`bulk-status fetch failed: ${JSON.stringify(error ?? "no data")}`,
|
||||
);
|
||||
}
|
||||
return roomNames.map((name) => ({
|
||||
roomName: name,
|
||||
active_meetings: data[name]?.active_meetings ?? [],
|
||||
upcoming_events: data[name]?.upcoming_events ?? [],
|
||||
}));
|
||||
},
|
||||
resolver: keyResolver("roomName"),
|
||||
scheduler: windowScheduler(windowMs),
|
||||
});
|
||||
}
|
||||
|
||||
export const meetingStatusBatcher = createMeetingStatusBatcher();
|
||||
64
www/app/reflector-api.d.ts
vendored
64
www/app/reflector-api.d.ts
vendored
@@ -118,6 +118,23 @@ export interface paths {
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/v1/rooms/meetings/bulk-status": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
get?: never;
|
||||
put?: never;
|
||||
/** Rooms Bulk Meeting Status */
|
||||
post: operations["v1_rooms_bulk_meeting_status"];
|
||||
delete?: never;
|
||||
options?: never;
|
||||
head?: never;
|
||||
patch?: never;
|
||||
trace?: never;
|
||||
};
|
||||
"/v1/rooms/{room_id}": {
|
||||
parameters: {
|
||||
query?: never;
|
||||
@@ -799,6 +816,11 @@ export interface components {
|
||||
*/
|
||||
chunk: string;
|
||||
};
|
||||
/** BulkStatusRequest */
|
||||
BulkStatusRequest: {
|
||||
/** Room Names */
|
||||
room_names: string[];
|
||||
};
|
||||
/** CalendarEventResponse */
|
||||
CalendarEventResponse: {
|
||||
/** Id */
|
||||
@@ -1735,6 +1757,13 @@ export interface components {
|
||||
/** Webhook Secret */
|
||||
webhook_secret: string | null;
|
||||
};
|
||||
/** RoomMeetingStatus */
|
||||
RoomMeetingStatus: {
|
||||
/** Active Meetings */
|
||||
active_meetings: components["schemas"]["Meeting"][];
|
||||
/** Upcoming Events */
|
||||
upcoming_events: components["schemas"]["CalendarEventResponse"][];
|
||||
};
|
||||
/** RtcOffer */
|
||||
RtcOffer: {
|
||||
/** Sdp */
|
||||
@@ -2272,6 +2301,41 @@ export interface operations {
|
||||
};
|
||||
};
|
||||
};
|
||||
v1_rooms_bulk_meeting_status: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
header?: never;
|
||||
path?: never;
|
||||
cookie?: never;
|
||||
};
|
||||
requestBody: {
|
||||
content: {
|
||||
"application/json": components["schemas"]["BulkStatusRequest"];
|
||||
};
|
||||
};
|
||||
responses: {
|
||||
/** @description Successful Response */
|
||||
200: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": {
|
||||
[key: string]: components["schemas"]["RoomMeetingStatus"];
|
||||
};
|
||||
};
|
||||
};
|
||||
/** @description Validation Error */
|
||||
422: {
|
||||
headers: {
|
||||
[name: string]: unknown;
|
||||
};
|
||||
content: {
|
||||
"application/json": components["schemas"]["HTTPValidationError"];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
v1_rooms_get: {
|
||||
parameters: {
|
||||
query?: never;
|
||||
|
||||
@@ -1,8 +1,22 @@
|
||||
module.exports = {
|
||||
preset: "ts-jest",
|
||||
testEnvironment: "node",
|
||||
testEnvironment: "jest-environment-jsdom",
|
||||
roots: ["<rootDir>/app"],
|
||||
testMatch: ["**/__tests__/**/*.test.ts"],
|
||||
collectCoverage: true,
|
||||
collectCoverageFrom: ["app/**/*.ts", "!app/**/*.d.ts"],
|
||||
testMatch: ["**/__tests__/**/*.test.ts", "**/__tests__/**/*.test.tsx"],
|
||||
collectCoverage: false,
|
||||
transform: {
|
||||
"^.+\\.[jt]sx?$": [
|
||||
"ts-jest",
|
||||
{
|
||||
tsconfig: {
|
||||
jsx: "react-jsx",
|
||||
module: "esnext",
|
||||
moduleResolution: "bundler",
|
||||
esModuleInterop: true,
|
||||
strict: true,
|
||||
downlevelIteration: true,
|
||||
lib: ["dom", "dom.iterable", "esnext"],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
"@tanstack/react-query": "^5.85.9",
|
||||
"@types/ioredis": "^5.0.0",
|
||||
"@whereby.com/browser-sdk": "^3.3.4",
|
||||
"@yornaath/batshit": "^0.14.0",
|
||||
"autoprefixer": "10.4.20",
|
||||
"axios": "^1.8.2",
|
||||
"eslint": "^9.33.0",
|
||||
@@ -61,9 +62,13 @@
|
||||
"author": "Andreas <andreas@monadical.com>",
|
||||
"license": "All Rights Reserved",
|
||||
"devDependencies": {
|
||||
"@testing-library/dom": "^10.4.1",
|
||||
"@testing-library/jest-dom": "^6.9.1",
|
||||
"@testing-library/react": "^16.3.2",
|
||||
"@types/jest": "^30.0.0",
|
||||
"@types/react": "18.2.20",
|
||||
"jest": "^30.1.3",
|
||||
"jest-environment-jsdom": "^30.2.0",
|
||||
"openapi-typescript": "^7.9.1",
|
||||
"prettier": "^3.0.0",
|
||||
"ts-jest": "^29.4.1"
|
||||
|
||||
808
www/pnpm-lock.yaml
generated
808
www/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user