mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2026-02-06 02:36:47 +00:00
Compare commits
2 Commits
fix-room-q
...
fix-room-q
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
df6916385b | ||
|
|
083a50cbcd |
@@ -1,35 +0,0 @@
|
||||
"""drop_use_celery_column
|
||||
|
||||
Revision ID: 3aa20b96d963
|
||||
Revises: e69f08ead8ea
|
||||
Create Date: 2026-02-05 10:12:44.065279
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = "3aa20b96d963"
|
||||
down_revision: Union[str, None] = "e69f08ead8ea"
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.drop_column("use_celery")
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
with op.batch_alter_table("room", schema=None) as batch_op:
|
||||
batch_op.add_column(
|
||||
sa.Column(
|
||||
"use_celery",
|
||||
sa.Boolean(),
|
||||
server_default=sa.text("false"),
|
||||
nullable=False,
|
||||
)
|
||||
)
|
||||
@@ -57,6 +57,12 @@ rooms = sqlalchemy.Table(
|
||||
sqlalchemy.String,
|
||||
nullable=False,
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"use_celery",
|
||||
sqlalchemy.Boolean,
|
||||
nullable=False,
|
||||
server_default=false(),
|
||||
),
|
||||
sqlalchemy.Column(
|
||||
"skip_consent",
|
||||
sqlalchemy.Boolean,
|
||||
@@ -91,6 +97,7 @@ class Room(BaseModel):
|
||||
ics_last_sync: datetime | None = None
|
||||
ics_last_etag: str | None = None
|
||||
platform: Platform = Field(default_factory=lambda: settings.DEFAULT_VIDEO_PLATFORM)
|
||||
use_celery: bool = False
|
||||
skip_consent: bool = False
|
||||
|
||||
|
||||
|
||||
@@ -15,10 +15,14 @@ from hatchet_sdk.clients.rest.exceptions import ApiException, NotFoundException
|
||||
from hatchet_sdk.clients.rest.models import V1TaskStatus
|
||||
|
||||
from reflector.db.recordings import recordings_controller
|
||||
from reflector.db.rooms import rooms_controller
|
||||
from reflector.db.transcripts import Transcript, transcripts_controller
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.logger import logger
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.pipelines.main_multitrack_pipeline import (
|
||||
task_pipeline_multitrack_process,
|
||||
)
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
@@ -177,98 +181,124 @@ async def dispatch_transcript_processing(
|
||||
Returns AsyncResult for Celery tasks, None for Hatchet workflows.
|
||||
"""
|
||||
if isinstance(config, MultitrackProcessingConfig):
|
||||
# Multitrack processing always uses Hatchet (no Celery fallback)
|
||||
# First check if we can replay (outside transaction since it's read-only)
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id and not force:
|
||||
can_replay = await HatchetClientManager.can_replay(
|
||||
transcript.workflow_run_id
|
||||
use_celery = False
|
||||
if config.room_id:
|
||||
room = await rooms_controller.get_by_id(config.room_id)
|
||||
use_celery = room.use_celery if room else False
|
||||
|
||||
use_hatchet = not use_celery
|
||||
|
||||
if use_celery:
|
||||
logger.info(
|
||||
"Room uses legacy Celery processing",
|
||||
room_id=config.room_id,
|
||||
transcript_id=config.transcript_id,
|
||||
)
|
||||
if can_replay:
|
||||
await HatchetClientManager.replay_workflow(transcript.workflow_run_id)
|
||||
logger.info(
|
||||
"Replaying Hatchet workflow",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
|
||||
if use_hatchet:
|
||||
# First check if we can replay (outside transaction since it's read-only)
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id and not force:
|
||||
can_replay = await HatchetClientManager.can_replay(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
return None
|
||||
else:
|
||||
# Workflow can't replay (CANCELLED, COMPLETED, or 404 deleted)
|
||||
# Log and proceed to start new workflow
|
||||
if can_replay:
|
||||
await HatchetClientManager.replay_workflow(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
logger.info(
|
||||
"Replaying Hatchet workflow",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
return None
|
||||
else:
|
||||
# Workflow can't replay (CANCELLED, COMPLETED, or 404 deleted)
|
||||
# Log and proceed to start new workflow
|
||||
try:
|
||||
status = await HatchetClientManager.get_workflow_run_status(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
logger.info(
|
||||
"Old workflow not replayable, starting new",
|
||||
old_workflow_id=transcript.workflow_run_id,
|
||||
old_status=status.value,
|
||||
)
|
||||
except NotFoundException:
|
||||
# Workflow deleted from Hatchet but ID still in DB
|
||||
logger.info(
|
||||
"Old workflow not found in Hatchet, starting new",
|
||||
old_workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
|
||||
# Force: cancel old workflow if exists
|
||||
if force and transcript and transcript.workflow_run_id:
|
||||
try:
|
||||
await HatchetClientManager.cancel_workflow(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
logger.info(
|
||||
"Cancelled old workflow (--force)",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
except NotFoundException:
|
||||
logger.info(
|
||||
"Old workflow already deleted (--force)",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": None}
|
||||
)
|
||||
|
||||
# Re-fetch and check for concurrent dispatch (optimistic approach).
|
||||
# No database lock - worst case is duplicate dispatch, but Hatchet
|
||||
# workflows are idempotent so this is acceptable.
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id:
|
||||
# Another process started a workflow between validation and now
|
||||
try:
|
||||
status = await HatchetClientManager.get_workflow_run_status(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
logger.info(
|
||||
"Old workflow not replayable, starting new",
|
||||
old_workflow_id=transcript.workflow_run_id,
|
||||
old_status=status.value,
|
||||
)
|
||||
except NotFoundException:
|
||||
# Workflow deleted from Hatchet but ID still in DB
|
||||
logger.info(
|
||||
"Old workflow not found in Hatchet, starting new",
|
||||
old_workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
if status in (V1TaskStatus.RUNNING, V1TaskStatus.QUEUED):
|
||||
logger.info(
|
||||
"Concurrent workflow detected, skipping dispatch",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
return None
|
||||
except ApiException:
|
||||
# Workflow might be gone (404) or API issue - proceed with new workflow
|
||||
pass
|
||||
|
||||
# Force: cancel old workflow if exists
|
||||
if force and transcript and transcript.workflow_run_id:
|
||||
try:
|
||||
await HatchetClientManager.cancel_workflow(transcript.workflow_run_id)
|
||||
logger.info(
|
||||
"Cancelled old workflow (--force)",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
except NotFoundException:
|
||||
logger.info(
|
||||
"Old workflow already deleted (--force)",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
await transcripts_controller.update(transcript, {"workflow_run_id": None})
|
||||
|
||||
# Re-fetch and check for concurrent dispatch (optimistic approach).
|
||||
# No database lock - worst case is duplicate dispatch, but Hatchet
|
||||
# workflows are idempotent so this is acceptable.
|
||||
transcript = await transcripts_controller.get_by_id(config.transcript_id)
|
||||
if transcript and transcript.workflow_run_id:
|
||||
# Another process started a workflow between validation and now
|
||||
try:
|
||||
status = await HatchetClientManager.get_workflow_run_status(
|
||||
transcript.workflow_run_id
|
||||
)
|
||||
if status in (V1TaskStatus.RUNNING, V1TaskStatus.QUEUED):
|
||||
logger.info(
|
||||
"Concurrent workflow detected, skipping dispatch",
|
||||
workflow_id=transcript.workflow_run_id,
|
||||
)
|
||||
return None
|
||||
except ApiException:
|
||||
# Workflow might be gone (404) or API issue - proceed with new workflow
|
||||
pass
|
||||
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": config.recording_id,
|
||||
"tracks": [{"s3_key": k} for k in config.track_keys],
|
||||
"bucket_name": config.bucket_name,
|
||||
"transcript_id": config.transcript_id,
|
||||
"room_id": config.room_id,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": config.transcript_id,
|
||||
"recording_id": config.recording_id,
|
||||
"daily_recording_id": config.recording_id,
|
||||
},
|
||||
)
|
||||
|
||||
if transcript:
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": config.recording_id,
|
||||
"tracks": [{"s3_key": k} for k in config.track_keys],
|
||||
"bucket_name": config.bucket_name,
|
||||
"transcript_id": config.transcript_id,
|
||||
"room_id": config.room_id,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": config.transcript_id,
|
||||
"recording_id": config.recording_id,
|
||||
"daily_recording_id": config.recording_id,
|
||||
},
|
||||
)
|
||||
|
||||
logger.info("Hatchet workflow dispatched", workflow_id=workflow_id)
|
||||
return None
|
||||
if transcript:
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
|
||||
logger.info("Hatchet workflow dispatched", workflow_id=workflow_id)
|
||||
return None
|
||||
|
||||
# Celery pipeline (durable workflows disabled)
|
||||
return task_pipeline_multitrack_process.delay(
|
||||
transcript_id=config.transcript_id,
|
||||
bucket_name=config.bucket_name,
|
||||
track_keys=config.track_keys,
|
||||
)
|
||||
elif isinstance(config, FileProcessingConfig):
|
||||
return task_pipeline_file_process.delay(transcript_id=config.transcript_id)
|
||||
else:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from pydantic.types import PositiveInt
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
from reflector.schemas.platform import DAILY_PLATFORM, Platform
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM, Platform
|
||||
from reflector.utils.string import NonEmptyString
|
||||
|
||||
|
||||
@@ -155,7 +155,7 @@ class Settings(BaseSettings):
|
||||
None # Webhook UUID for this environment. Not used by production code
|
||||
)
|
||||
# Platform Configuration
|
||||
DEFAULT_VIDEO_PLATFORM: Platform = DAILY_PLATFORM
|
||||
DEFAULT_VIDEO_PLATFORM: Platform = WHEREBY_PLATFORM
|
||||
|
||||
# Zulip integration
|
||||
ZULIP_REALM: str | None = None
|
||||
|
||||
@@ -212,6 +212,9 @@ async def rooms_bulk_meeting_status(
|
||||
request: BulkStatusRequest,
|
||||
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||
):
|
||||
if not user and not settings.PUBLIC_MODE:
|
||||
raise HTTPException(status_code=401, detail="Not authenticated")
|
||||
|
||||
user_id = user["sub"] if user else None
|
||||
|
||||
all_rooms = await rooms_controller.get_by_names(request.room_names)
|
||||
@@ -224,13 +227,19 @@ async def rooms_bulk_meeting_status(
|
||||
room_by_id: dict[str, DbRoom] = {r.id: r for r in rooms}
|
||||
room_ids = list(room_by_id.keys())
|
||||
|
||||
if not room_ids:
|
||||
return {
|
||||
name: RoomMeetingStatus(active_meetings=[], upcoming_events=[])
|
||||
for name in request.room_names
|
||||
}
|
||||
|
||||
current_time = datetime.now(timezone.utc)
|
||||
active_meetings, upcoming_events = await asyncio.gather(
|
||||
meetings_controller.get_all_active_for_rooms(room_ids, current_time),
|
||||
calendar_events_controller.get_upcoming_for_rooms(room_ids),
|
||||
)
|
||||
|
||||
# Group by room name
|
||||
# Group by room name, converting DB models to view models
|
||||
active_by_room: dict[str, list[Meeting]] = defaultdict(list)
|
||||
for m in active_meetings:
|
||||
room = room_by_id.get(m.room_id)
|
||||
@@ -239,7 +248,9 @@ async def rooms_bulk_meeting_status(
|
||||
m.platform = room.platform
|
||||
if user_id != room.user_id and m.platform == "whereby":
|
||||
m.host_room_url = ""
|
||||
active_by_room[room.name].append(m)
|
||||
active_by_room[room.name].append(
|
||||
Meeting.model_validate(m, from_attributes=True)
|
||||
)
|
||||
|
||||
upcoming_by_room: dict[str, list[CalendarEventResponse]] = defaultdict(list)
|
||||
for e in upcoming_events:
|
||||
@@ -249,7 +260,9 @@ async def rooms_bulk_meeting_status(
|
||||
if user_id != room.user_id:
|
||||
e.description = None
|
||||
e.attendees = None
|
||||
upcoming_by_room[room.name].append(e)
|
||||
upcoming_by_room[room.name].append(
|
||||
CalendarEventResponse.model_validate(e, from_attributes=True)
|
||||
)
|
||||
|
||||
result: dict[str, RoomMeetingStatus] = {}
|
||||
for name in request.room_names:
|
||||
|
||||
@@ -27,6 +27,9 @@ from reflector.db.transcripts import (
|
||||
from reflector.hatchet.client import HatchetClientManager
|
||||
from reflector.pipelines.main_file_pipeline import task_pipeline_file_process
|
||||
from reflector.pipelines.main_live_pipeline import asynctask
|
||||
from reflector.pipelines.main_multitrack_pipeline import (
|
||||
task_pipeline_multitrack_process,
|
||||
)
|
||||
from reflector.pipelines.topic_processing import EmptyPipeline
|
||||
from reflector.processors import AudioFileWriterProcessor
|
||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||
@@ -348,29 +351,49 @@ async def _process_multitrack_recording_inner(
|
||||
room_id=room.id,
|
||||
)
|
||||
|
||||
# Multitrack processing always uses Hatchet (no Celery fallback)
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": recording_id,
|
||||
"tracks": [{"s3_key": k} for k in filter_cam_audio_tracks(track_keys)],
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
"recording_id": recording_id,
|
||||
"daily_recording_id": recording_id,
|
||||
},
|
||||
)
|
||||
logger.info(
|
||||
"Started Hatchet workflow",
|
||||
workflow_id=workflow_id,
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
use_celery = room and room.use_celery
|
||||
use_hatchet = not use_celery
|
||||
|
||||
await transcripts_controller.update(transcript, {"workflow_run_id": workflow_id})
|
||||
if use_celery:
|
||||
logger.info(
|
||||
"Room uses legacy Celery processing",
|
||||
room_id=room.id,
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
|
||||
if use_hatchet:
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": recording_id,
|
||||
"tracks": [{"s3_key": k} for k in filter_cam_audio_tracks(track_keys)],
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
"recording_id": recording_id,
|
||||
"daily_recording_id": recording_id,
|
||||
},
|
||||
)
|
||||
logger.info(
|
||||
"Started Hatchet workflow",
|
||||
workflow_id=workflow_id,
|
||||
transcript_id=transcript.id,
|
||||
)
|
||||
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
return
|
||||
|
||||
# Celery pipeline (runs when durable workflows disabled)
|
||||
task_pipeline_multitrack_process.delay(
|
||||
transcript_id=transcript.id,
|
||||
bucket_name=bucket_name,
|
||||
track_keys=filter_cam_audio_tracks(track_keys),
|
||||
)
|
||||
|
||||
|
||||
@shared_task
|
||||
@@ -1049,43 +1072,66 @@ async def reprocess_failed_daily_recordings():
|
||||
)
|
||||
continue
|
||||
|
||||
# Multitrack reprocessing always uses Hatchet (no Celery fallback)
|
||||
if not transcript:
|
||||
logger.warning(
|
||||
"No transcript for Hatchet reprocessing, skipping",
|
||||
recording_id=recording.id,
|
||||
use_celery = room and room.use_celery
|
||||
use_hatchet = not use_celery
|
||||
|
||||
if use_hatchet:
|
||||
if not transcript:
|
||||
logger.warning(
|
||||
"No transcript for Hatchet reprocessing, skipping",
|
||||
recording_id=recording.id,
|
||||
)
|
||||
continue
|
||||
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": recording.id,
|
||||
"tracks": [
|
||||
{"s3_key": k}
|
||||
for k in filter_cam_audio_tracks(recording.track_keys)
|
||||
],
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id if room else None,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
"recording_id": recording.id,
|
||||
"reprocess": True,
|
||||
},
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
continue
|
||||
|
||||
workflow_id = await HatchetClientManager.start_workflow(
|
||||
workflow_name="DiarizationPipeline",
|
||||
input_data={
|
||||
"recording_id": recording.id,
|
||||
"tracks": [
|
||||
{"s3_key": k}
|
||||
for k in filter_cam_audio_tracks(recording.track_keys)
|
||||
],
|
||||
"bucket_name": bucket_name,
|
||||
"transcript_id": transcript.id,
|
||||
"room_id": room.id if room else None,
|
||||
},
|
||||
additional_metadata={
|
||||
"transcript_id": transcript.id,
|
||||
"recording_id": recording.id,
|
||||
"reprocess": True,
|
||||
},
|
||||
)
|
||||
await transcripts_controller.update(
|
||||
transcript, {"workflow_run_id": workflow_id}
|
||||
)
|
||||
logger.info(
|
||||
"Queued Daily recording for Hatchet reprocessing",
|
||||
recording_id=recording.id,
|
||||
workflow_id=workflow_id,
|
||||
room_name=meeting.room_name,
|
||||
track_count=len(recording.track_keys),
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"Queueing Daily recording for Celery reprocessing",
|
||||
recording_id=recording.id,
|
||||
room_name=meeting.room_name,
|
||||
track_count=len(recording.track_keys),
|
||||
transcript_status=transcript.status if transcript else None,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Queued Daily recording for Hatchet reprocessing",
|
||||
recording_id=recording.id,
|
||||
workflow_id=workflow_id,
|
||||
room_name=meeting.room_name,
|
||||
track_count=len(recording.track_keys),
|
||||
)
|
||||
# For reprocessing, pass actual recording time (though it's ignored - see _process_multitrack_recording_inner)
|
||||
# Reprocessing uses recording.meeting_id directly instead of time-based matching
|
||||
recording_start_ts = int(recording.recorded_at.timestamp())
|
||||
|
||||
process_multitrack_recording.delay(
|
||||
bucket_name=bucket_name,
|
||||
daily_room_name=meeting.room_name,
|
||||
recording_id=recording.id,
|
||||
track_keys=recording.track_keys,
|
||||
recording_start_ts=recording_start_ts,
|
||||
)
|
||||
|
||||
reprocessed_count += 1
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from reflector.schemas.platform import DAILY_PLATFORM, WHEREBY_PLATFORM
|
||||
from reflector.schemas.platform import WHEREBY_PLATFORM
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
@@ -14,7 +14,6 @@ def register_mock_platform():
|
||||
from reflector.video_platforms.registry import register_platform
|
||||
|
||||
register_platform(WHEREBY_PLATFORM, MockPlatformClient)
|
||||
register_platform(DAILY_PLATFORM, MockPlatformClient)
|
||||
yield
|
||||
|
||||
|
||||
|
||||
184
server/tests/test_rooms_bulk_status.py
Normal file
184
server/tests/test_rooms_bulk_status.py
Normal file
@@ -0,0 +1,184 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import pytest
|
||||
from conftest import authenticated_client_ctx
|
||||
|
||||
from reflector.db.calendar_events import CalendarEvent, calendar_events_controller
|
||||
from reflector.db.meetings import meetings_controller
|
||||
from reflector.db.rooms import Room, rooms_controller
|
||||
from reflector.settings import settings
|
||||
|
||||
|
||||
async def _create_room(name: str, user_id: str, is_shared: bool = False) -> Room:
|
||||
return await rooms_controller.add(
|
||||
name=name,
|
||||
user_id=user_id,
|
||||
zulip_auto_post=False,
|
||||
zulip_stream="",
|
||||
zulip_topic="",
|
||||
is_locked=False,
|
||||
room_mode="normal",
|
||||
recording_type="cloud",
|
||||
recording_trigger="automatic-2nd-participant",
|
||||
is_shared=is_shared,
|
||||
webhook_url="",
|
||||
webhook_secret="",
|
||||
)
|
||||
|
||||
|
||||
async def _create_meeting(room: Room, active: bool = True):
|
||||
now = datetime.now(timezone.utc)
|
||||
return await meetings_controller.create(
|
||||
id=f"meeting-{room.name}-{now.timestamp()}",
|
||||
room_name=room.name,
|
||||
room_url="room-url",
|
||||
host_room_url="host-url",
|
||||
start_date=now - timedelta(minutes=10),
|
||||
end_date=now + timedelta(minutes=50) if active else now - timedelta(minutes=1),
|
||||
room=room,
|
||||
)
|
||||
|
||||
|
||||
async def _create_calendar_event(room: Room):
|
||||
now = datetime.now(timezone.utc)
|
||||
return await calendar_events_controller.upsert(
|
||||
CalendarEvent(
|
||||
room_id=room.id,
|
||||
ics_uid=f"event-{room.name}",
|
||||
title=f"Upcoming in {room.name}",
|
||||
description="secret description",
|
||||
start_time=now + timedelta(minutes=30),
|
||||
end_time=now + timedelta(minutes=90),
|
||||
attendees=[{"name": "Alice", "email": "alice@example.com"}],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bulk_status_returns_empty_for_no_rooms(client):
|
||||
"""Empty room_names returns empty dict."""
|
||||
async with authenticated_client_ctx():
|
||||
resp = await client.post("/rooms/meetings/bulk-status", json={"room_names": []})
|
||||
assert resp.status_code == 200
|
||||
assert resp.json() == {}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bulk_status_returns_active_meetings_and_upcoming_events(client):
|
||||
"""Owner sees active meetings and upcoming events for their rooms."""
|
||||
room = await _create_room("bulk-test-room", "randomuserid")
|
||||
await _create_meeting(room, active=True)
|
||||
await _create_calendar_event(room)
|
||||
|
||||
async with authenticated_client_ctx():
|
||||
resp = await client.post(
|
||||
"/rooms/meetings/bulk-status",
|
||||
json={"room_names": ["bulk-test-room"]},
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
data = resp.json()
|
||||
assert "bulk-test-room" in data
|
||||
status = data["bulk-test-room"]
|
||||
assert len(status["active_meetings"]) == 1
|
||||
assert len(status["upcoming_events"]) == 1
|
||||
# Owner sees description
|
||||
assert status["upcoming_events"][0]["description"] == "secret description"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bulk_status_redacts_data_for_non_owner(client):
|
||||
"""Non-owner of a shared room gets redacted calendar events and no whereby host_room_url."""
|
||||
room = await _create_room("shared-bulk", "other-user-id", is_shared=True)
|
||||
await _create_meeting(room, active=True)
|
||||
await _create_calendar_event(room)
|
||||
|
||||
# authenticated as "randomuserid" but room owned by "other-user-id"
|
||||
async with authenticated_client_ctx():
|
||||
resp = await client.post(
|
||||
"/rooms/meetings/bulk-status",
|
||||
json={"room_names": ["shared-bulk"]},
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
status = resp.json()["shared-bulk"]
|
||||
assert len(status["active_meetings"]) == 1
|
||||
assert len(status["upcoming_events"]) == 1
|
||||
# Non-owner: description and attendees redacted
|
||||
assert status["upcoming_events"][0]["description"] is None
|
||||
assert status["upcoming_events"][0]["attendees"] is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bulk_status_filters_private_rooms_of_other_users(client):
|
||||
"""User cannot see private rooms owned by others."""
|
||||
await _create_room("private-other", "other-user-id", is_shared=False)
|
||||
|
||||
async with authenticated_client_ctx():
|
||||
resp = await client.post(
|
||||
"/rooms/meetings/bulk-status",
|
||||
json={"room_names": ["private-other"]},
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
status = resp.json()["private-other"]
|
||||
assert status["active_meetings"] == []
|
||||
assert status["upcoming_events"] == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bulk_status_redacts_whereby_host_room_url_for_non_owner(client):
|
||||
"""Non-owner of a shared whereby room gets empty host_room_url."""
|
||||
room = await _create_room("shared-whereby", "other-user-id", is_shared=True)
|
||||
# Force platform to whereby
|
||||
from reflector.db import get_database
|
||||
from reflector.db.rooms import rooms as rooms_table
|
||||
|
||||
await get_database().execute(
|
||||
rooms_table.update()
|
||||
.where(rooms_table.c.id == room.id)
|
||||
.values(platform="whereby")
|
||||
)
|
||||
|
||||
await _create_meeting(room, active=True)
|
||||
|
||||
async with authenticated_client_ctx():
|
||||
resp = await client.post(
|
||||
"/rooms/meetings/bulk-status",
|
||||
json={"room_names": ["shared-whereby"]},
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
status = resp.json()["shared-whereby"]
|
||||
assert len(status["active_meetings"]) == 1
|
||||
assert status["active_meetings"][0]["host_room_url"] == ""
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bulk_status_unauthenticated_rejected_non_public(client):
|
||||
"""Unauthenticated request on non-PUBLIC_MODE instance returns 401."""
|
||||
original = settings.PUBLIC_MODE
|
||||
try:
|
||||
settings.PUBLIC_MODE = False
|
||||
resp = await client.post(
|
||||
"/rooms/meetings/bulk-status",
|
||||
json={"room_names": ["any-room"]},
|
||||
)
|
||||
assert resp.status_code == 401
|
||||
finally:
|
||||
settings.PUBLIC_MODE = original
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_bulk_status_nonexistent_room_returns_empty(client):
|
||||
"""Requesting a room that doesn't exist returns empty lists."""
|
||||
async with authenticated_client_ctx():
|
||||
resp = await client.post(
|
||||
"/rooms/meetings/bulk-status",
|
||||
json={"room_names": ["does-not-exist"]},
|
||||
)
|
||||
|
||||
assert resp.status_code == 200
|
||||
status = resp.json()["does-not-exist"]
|
||||
assert status["active_meetings"] == []
|
||||
assert status["upcoming_events"] == []
|
||||
@@ -1,6 +1,6 @@
|
||||
import asyncio
|
||||
import time
|
||||
from unittest.mock import AsyncMock, patch
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from httpx import ASGITransport, AsyncClient
|
||||
@@ -142,17 +142,17 @@ async def test_whereby_recording_uses_file_pipeline(client):
|
||||
"reflector.services.transcript_process.task_pipeline_file_process"
|
||||
) as mock_file_pipeline,
|
||||
patch(
|
||||
"reflector.services.transcript_process.HatchetClientManager"
|
||||
) as mock_hatchet,
|
||||
"reflector.services.transcript_process.task_pipeline_multitrack_process"
|
||||
) as mock_multitrack_pipeline,
|
||||
):
|
||||
response = await client.post(f"/transcripts/{transcript.id}/process")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json()["status"] == "ok"
|
||||
|
||||
# Whereby recordings should use file pipeline, not Hatchet
|
||||
# Whereby recordings should use file pipeline
|
||||
mock_file_pipeline.delay.assert_called_once_with(transcript_id=transcript.id)
|
||||
mock_hatchet.start_workflow.assert_not_called()
|
||||
mock_multitrack_pipeline.delay.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("setup_database")
|
||||
@@ -177,6 +177,8 @@ async def test_dailyco_recording_uses_multitrack_pipeline(client):
|
||||
recording_trigger="automatic-2nd-participant",
|
||||
is_shared=False,
|
||||
)
|
||||
# Force Celery backend for test
|
||||
await rooms_controller.update(room, {"use_celery": True})
|
||||
|
||||
transcript = await transcripts_controller.add(
|
||||
"",
|
||||
@@ -211,23 +213,18 @@ async def test_dailyco_recording_uses_multitrack_pipeline(client):
|
||||
"reflector.services.transcript_process.task_pipeline_file_process"
|
||||
) as mock_file_pipeline,
|
||||
patch(
|
||||
"reflector.services.transcript_process.HatchetClientManager"
|
||||
) as mock_hatchet,
|
||||
"reflector.services.transcript_process.task_pipeline_multitrack_process"
|
||||
) as mock_multitrack_pipeline,
|
||||
):
|
||||
mock_hatchet.start_workflow = AsyncMock(return_value="test-workflow-id")
|
||||
|
||||
response = await client.post(f"/transcripts/{transcript.id}/process")
|
||||
|
||||
assert response.status_code == 200
|
||||
assert response.json()["status"] == "ok"
|
||||
|
||||
# Daily.co multitrack recordings should use Hatchet workflow
|
||||
mock_hatchet.start_workflow.assert_called_once()
|
||||
call_kwargs = mock_hatchet.start_workflow.call_args.kwargs
|
||||
assert call_kwargs["workflow_name"] == "DiarizationPipeline"
|
||||
assert call_kwargs["input_data"]["transcript_id"] == transcript.id
|
||||
assert call_kwargs["input_data"]["bucket_name"] == "daily-bucket"
|
||||
assert call_kwargs["input_data"]["tracks"] == [
|
||||
{"s3_key": k} for k in track_keys
|
||||
]
|
||||
# Daily.co multitrack recordings should use multitrack pipeline
|
||||
mock_multitrack_pipeline.delay.assert_called_once_with(
|
||||
transcript_id=transcript.id,
|
||||
bucket_name="daily-bucket",
|
||||
track_keys=track_keys,
|
||||
)
|
||||
mock_file_pipeline.delay.assert_not_called()
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
import { useMemo } from "react";
|
||||
import { Box, Heading, Text, VStack } from "@chakra-ui/react";
|
||||
import type { components } from "../../../reflector-api";
|
||||
import {
|
||||
useRoomsBulkMeetingStatus,
|
||||
BulkMeetingStatusMap,
|
||||
} from "../../../lib/apiHooks";
|
||||
|
||||
type Room = components["schemas"]["Room"];
|
||||
import { RoomTable } from "./RoomTable";
|
||||
@@ -31,6 +36,10 @@ export function RoomList({
|
||||
pt,
|
||||
loading,
|
||||
}: RoomListProps) {
|
||||
const roomNames = useMemo(() => rooms.map((r) => r.name), [rooms]);
|
||||
const bulkStatusQuery = useRoomsBulkMeetingStatus(roomNames);
|
||||
const meetingStatusMap: BulkMeetingStatusMap = bulkStatusQuery.data ?? {};
|
||||
|
||||
return (
|
||||
<VStack alignItems="start" gap={4} mb={mb} pt={pt}>
|
||||
<Heading size="md">{title}</Heading>
|
||||
@@ -43,6 +52,8 @@ export function RoomList({
|
||||
onEdit={onEdit}
|
||||
onDelete={onDelete}
|
||||
loading={loading}
|
||||
meetingStatusMap={meetingStatusMap}
|
||||
meetingStatusLoading={bulkStatusQuery.isLoading}
|
||||
/>
|
||||
<RoomCards
|
||||
rooms={rooms}
|
||||
|
||||
@@ -14,11 +14,7 @@ import {
|
||||
import { LuLink, LuRefreshCw } from "react-icons/lu";
|
||||
import { FaCalendarAlt } from "react-icons/fa";
|
||||
import type { components } from "../../../reflector-api";
|
||||
import {
|
||||
useRoomActiveMeetings,
|
||||
useRoomUpcomingMeetings,
|
||||
useRoomIcsSync,
|
||||
} from "../../../lib/apiHooks";
|
||||
import { useRoomIcsSync, BulkMeetingStatusMap } from "../../../lib/apiHooks";
|
||||
|
||||
type Room = components["schemas"]["Room"];
|
||||
type Meeting = components["schemas"]["Meeting"];
|
||||
@@ -62,6 +58,8 @@ interface RoomTableProps {
|
||||
onEdit: (roomId: string, roomData: any) => void;
|
||||
onDelete: (roomId: string) => void;
|
||||
loading?: boolean;
|
||||
meetingStatusMap: BulkMeetingStatusMap;
|
||||
meetingStatusLoading: boolean;
|
||||
}
|
||||
|
||||
const getRoomModeDisplay = (mode: string): string => {
|
||||
@@ -104,14 +102,16 @@ const getZulipDisplay = (
|
||||
return "Enabled";
|
||||
};
|
||||
|
||||
function MeetingStatus({ roomName }: { roomName: string }) {
|
||||
const activeMeetingsQuery = useRoomActiveMeetings(roomName);
|
||||
const upcomingMeetingsQuery = useRoomUpcomingMeetings(roomName);
|
||||
|
||||
const activeMeetings = activeMeetingsQuery.data || [];
|
||||
const upcomingMeetings = upcomingMeetingsQuery.data || [];
|
||||
|
||||
if (activeMeetingsQuery.isLoading || upcomingMeetingsQuery.isLoading) {
|
||||
function MeetingStatus({
|
||||
activeMeetings,
|
||||
upcomingMeetings,
|
||||
isLoading,
|
||||
}: {
|
||||
activeMeetings: Meeting[];
|
||||
upcomingMeetings: CalendarEventResponse[];
|
||||
isLoading: boolean;
|
||||
}) {
|
||||
if (isLoading) {
|
||||
return <Spinner size="sm" />;
|
||||
}
|
||||
|
||||
@@ -176,6 +176,8 @@ export function RoomTable({
|
||||
onEdit,
|
||||
onDelete,
|
||||
loading,
|
||||
meetingStatusMap,
|
||||
meetingStatusLoading,
|
||||
}: RoomTableProps) {
|
||||
const [syncingRooms, setSyncingRooms] = useState<Set<NonEmptyString>>(
|
||||
new Set(),
|
||||
@@ -252,7 +254,15 @@ export function RoomTable({
|
||||
<Link href={`/${room.name}`}>{room.name}</Link>
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
<MeetingStatus roomName={room.name} />
|
||||
<MeetingStatus
|
||||
activeMeetings={
|
||||
meetingStatusMap[room.name]?.active_meetings ?? []
|
||||
}
|
||||
upcomingMeetings={
|
||||
meetingStatusMap[room.name]?.upcoming_events ?? []
|
||||
}
|
||||
isLoading={meetingStatusLoading}
|
||||
/>
|
||||
</Table.Cell>
|
||||
<Table.Cell>
|
||||
{getZulipDisplay(
|
||||
|
||||
@@ -37,24 +37,12 @@ jest.mock("../AuthProvider", () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
// Recreate the batcher with a 0ms window. setTimeout(fn, 0) defers to the next
|
||||
// macrotask boundary — after all synchronous React rendering completes. All
|
||||
// useQuery queryFns fire within the same macrotask, so they all queue into one
|
||||
// batch before the timer fires. This is deterministic and avoids fake timers.
|
||||
jest.mock("../meetingStatusBatcher", () => {
|
||||
const actual = jest.requireActual("../meetingStatusBatcher");
|
||||
return {
|
||||
...actual,
|
||||
meetingStatusBatcher: actual.createMeetingStatusBatcher(0),
|
||||
};
|
||||
});
|
||||
|
||||
// --- Imports (after mocks) ---
|
||||
|
||||
import React from "react";
|
||||
import { render, waitFor, screen } from "@testing-library/react";
|
||||
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
|
||||
import { useRoomActiveMeetings, useRoomUpcomingMeetings } from "../apiHooks";
|
||||
import { useRoomsBulkMeetingStatus, BulkMeetingStatusMap } from "../apiHooks";
|
||||
import { client } from "../apiClient";
|
||||
import { ErrorProvider } from "../../(errors)/errorContext";
|
||||
|
||||
@@ -83,33 +71,34 @@ function mockBulkStatusEndpoint(
|
||||
);
|
||||
}
|
||||
|
||||
// --- Test component: renders N room cards, each using both hooks ---
|
||||
// --- Test component: uses the bulk hook and displays results ---
|
||||
|
||||
function RoomCard({ roomName }: { roomName: string }) {
|
||||
const active = useRoomActiveMeetings(roomName);
|
||||
const upcoming = useRoomUpcomingMeetings(roomName);
|
||||
function BulkStatusDisplay({ roomNames }: { roomNames: string[] }) {
|
||||
const { data, isLoading } = useRoomsBulkMeetingStatus(roomNames);
|
||||
|
||||
if (active.isLoading || upcoming.isLoading) {
|
||||
return <div data-testid={`room-${roomName}`}>loading</div>;
|
||||
if (isLoading) {
|
||||
return <div data-testid="status">loading</div>;
|
||||
}
|
||||
|
||||
if (!data) {
|
||||
return <div data-testid="status">no data</div>;
|
||||
}
|
||||
|
||||
return (
|
||||
<div data-testid={`room-${roomName}`}>
|
||||
{active.data?.length ?? 0} active, {upcoming.data?.length ?? 0} upcoming
|
||||
<div data-testid="status">
|
||||
{roomNames.map((name) => {
|
||||
const status = data[name];
|
||||
return (
|
||||
<div key={name} data-testid={`room-${name}`}>
|
||||
{status?.active_meetings?.length ?? 0} active,{" "}
|
||||
{status?.upcoming_events?.length ?? 0} upcoming
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function RoomList({ roomNames }: { roomNames: string[] }) {
|
||||
return (
|
||||
<>
|
||||
{roomNames.map((name) => (
|
||||
<RoomCard key={name} roomName={name} />
|
||||
))}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
function createWrapper() {
|
||||
const queryClient = new QueryClient({
|
||||
defaultOptions: {
|
||||
@@ -127,15 +116,17 @@ function createWrapper() {
|
||||
|
||||
// --- Tests ---
|
||||
|
||||
describe("meeting status batcher integration", () => {
|
||||
describe("bulk meeting status (prop-drilling)", () => {
|
||||
afterEach(() => jest.clearAllMocks());
|
||||
|
||||
it("batches multiple room queries into a single POST request", async () => {
|
||||
it("fetches all room statuses in a single POST request", async () => {
|
||||
const rooms = Array.from({ length: 10 }, (_, i) => `room-${i}`);
|
||||
|
||||
mockBulkStatusEndpoint();
|
||||
|
||||
render(<RoomList roomNames={rooms} />, { wrapper: createWrapper() });
|
||||
render(<BulkStatusDisplay roomNames={rooms} />, {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
for (const name of rooms) {
|
||||
@@ -149,21 +140,18 @@ describe("meeting status batcher integration", () => {
|
||||
([path]: [string]) => path === "/v1/rooms/meetings/bulk-status",
|
||||
);
|
||||
|
||||
// Without batching this would be 20 calls (2 hooks x 10 rooms).
|
||||
// Prop-drilling: exactly 1 POST for all rooms (no batcher needed)
|
||||
expect(postCalls).toHaveLength(1);
|
||||
|
||||
// The single call should contain all 10 rooms (deduplicated)
|
||||
// The single call contains all room names
|
||||
const requestedRooms: string[] = postCalls[0][1].body.room_names;
|
||||
expect(requestedRooms).toHaveLength(10);
|
||||
for (const name of rooms) {
|
||||
expect(requestedRooms).toContain(name);
|
||||
}
|
||||
});
|
||||
|
||||
it("batcher fetcher returns room-specific data", async () => {
|
||||
const {
|
||||
meetingStatusBatcher: batcher,
|
||||
} = require("../meetingStatusBatcher");
|
||||
|
||||
it("returns room-specific data correctly", async () => {
|
||||
mockBulkStatusEndpoint({
|
||||
"room-a": {
|
||||
active_meetings: [{ id: "m1", room_name: "room-a" }],
|
||||
@@ -175,33 +163,7 @@ describe("meeting status batcher integration", () => {
|
||||
},
|
||||
});
|
||||
|
||||
const [resultA, resultB] = await Promise.all([
|
||||
batcher.fetch("room-a"),
|
||||
batcher.fetch("room-b"),
|
||||
]);
|
||||
|
||||
expect(mockClient.POST).toHaveBeenCalledTimes(1);
|
||||
expect(resultA.active_meetings).toEqual([
|
||||
{ id: "m1", room_name: "room-a" },
|
||||
]);
|
||||
expect(resultA.upcoming_events).toEqual([]);
|
||||
expect(resultB.active_meetings).toEqual([]);
|
||||
expect(resultB.upcoming_events).toEqual([{ id: "e1", title: "Standup" }]);
|
||||
});
|
||||
|
||||
it("renders room-specific meeting data through hooks", async () => {
|
||||
mockBulkStatusEndpoint({
|
||||
"room-a": {
|
||||
active_meetings: [{ id: "m1", room_name: "room-a" }],
|
||||
upcoming_events: [],
|
||||
},
|
||||
"room-b": {
|
||||
active_meetings: [],
|
||||
upcoming_events: [{ id: "e1", title: "Standup" }],
|
||||
},
|
||||
});
|
||||
|
||||
render(<RoomList roomNames={["room-a", "room-b"]} />, {
|
||||
render(<BulkStatusDisplay roomNames={["room-a", "room-b"]} />, {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
@@ -213,5 +175,72 @@ describe("meeting status batcher integration", () => {
|
||||
"0 active, 1 upcoming",
|
||||
);
|
||||
});
|
||||
|
||||
// Still just 1 POST
|
||||
expect(mockClient.POST).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it("does not fetch when roomNames is empty", async () => {
|
||||
mockBulkStatusEndpoint();
|
||||
|
||||
render(<BulkStatusDisplay roomNames={[]} />, {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByTestId("status")).toHaveTextContent("no data");
|
||||
});
|
||||
|
||||
// No POST calls when no rooms
|
||||
expect(mockClient.POST).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("surfaces error when POST fails", async () => {
|
||||
mockClient.POST.mockResolvedValue({
|
||||
data: undefined,
|
||||
error: { detail: "server error" },
|
||||
response: {},
|
||||
});
|
||||
|
||||
function ErrorDisplay({ roomNames }: { roomNames: string[] }) {
|
||||
const { error } = useRoomsBulkMeetingStatus(roomNames);
|
||||
if (error) return <div data-testid="error">{error.message}</div>;
|
||||
return <div data-testid="error">no error</div>;
|
||||
}
|
||||
|
||||
render(<ErrorDisplay roomNames={["room-x"]} />, {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByTestId("error")).toHaveTextContent(
|
||||
"bulk-status fetch failed",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("does not fetch when unauthenticated", async () => {
|
||||
// Override useAuth to return unauthenticated
|
||||
const authModule = jest.requireMock("../AuthProvider");
|
||||
const originalUseAuth = authModule.useAuth;
|
||||
authModule.useAuth = () => ({
|
||||
...originalUseAuth(),
|
||||
status: "unauthenticated",
|
||||
});
|
||||
|
||||
mockBulkStatusEndpoint();
|
||||
|
||||
render(<BulkStatusDisplay roomNames={["room-1"]} />, {
|
||||
wrapper: createWrapper(),
|
||||
});
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByTestId("status")).toHaveTextContent("no data");
|
||||
});
|
||||
|
||||
expect(mockClient.POST).not.toHaveBeenCalled();
|
||||
|
||||
// Restore
|
||||
authModule.useAuth = originalUseAuth;
|
||||
});
|
||||
});
|
||||
@@ -1,11 +1,10 @@
|
||||
"use client";
|
||||
|
||||
import { $api } from "./apiClient";
|
||||
import { $api, client } from "./apiClient";
|
||||
import { useError } from "../(errors)/errorContext";
|
||||
import { QueryClient, useQuery, useQueryClient } from "@tanstack/react-query";
|
||||
import type { components } from "../reflector-api";
|
||||
import { useAuth } from "./AuthProvider";
|
||||
import { meetingStatusBatcher } from "./meetingStatusBatcher";
|
||||
import { MeetingId } from "./types";
|
||||
import { NonEmptyString } from "./utils";
|
||||
|
||||
@@ -642,16 +641,21 @@ export function useMeetingDeactivate() {
|
||||
setError(error as Error, "Failed to end meeting");
|
||||
},
|
||||
onSuccess: () => {
|
||||
return queryClient.invalidateQueries({
|
||||
predicate: (query) => {
|
||||
const key = query.queryKey;
|
||||
return key.some(
|
||||
(k) =>
|
||||
typeof k === "string" &&
|
||||
!!MEETING_LIST_PATH_PARTIALS.find((e) => k.includes(e)),
|
||||
);
|
||||
},
|
||||
});
|
||||
return Promise.all([
|
||||
queryClient.invalidateQueries({
|
||||
predicate: (query) => {
|
||||
const key = query.queryKey;
|
||||
return key.some(
|
||||
(k) =>
|
||||
typeof k === "string" &&
|
||||
!!MEETING_LIST_PATH_PARTIALS.find((e) => k.includes(e)),
|
||||
);
|
||||
},
|
||||
}),
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["bulk-meeting-status"],
|
||||
}),
|
||||
]);
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -698,7 +702,18 @@ export function useRoomsCreateMeeting() {
|
||||
queryKey: $api.queryOptions("get", "/v1/rooms").queryKey,
|
||||
}),
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: meetingStatusKeys.active(roomName),
|
||||
queryKey: $api.queryOptions(
|
||||
"get",
|
||||
"/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
|
||||
{
|
||||
params: {
|
||||
path: { room_name: roomName },
|
||||
},
|
||||
},
|
||||
).queryKey,
|
||||
}),
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["bulk-meeting-status"],
|
||||
}),
|
||||
]);
|
||||
},
|
||||
@@ -727,38 +742,67 @@ export function useRoomGetByName(roomName: string | null) {
|
||||
export function useRoomUpcomingMeetings(roomName: string | null) {
|
||||
const { isAuthenticated } = useAuthReady();
|
||||
|
||||
return useQuery({
|
||||
queryKey: meetingStatusKeys.upcoming(roomName!),
|
||||
queryFn: async () => {
|
||||
const result = await meetingStatusBatcher.fetch(roomName!);
|
||||
return result.upcoming_events;
|
||||
return $api.useQuery(
|
||||
"get",
|
||||
"/v1/rooms/{room_name}/meetings/upcoming" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_UPCOMING_PATH_PARTIAL}`,
|
||||
{
|
||||
params: {
|
||||
path: { room_name: roomName! },
|
||||
},
|
||||
},
|
||||
enabled: !!roomName && isAuthenticated,
|
||||
});
|
||||
{
|
||||
enabled: !!roomName && isAuthenticated,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Query keys reuse $api.queryOptions so cache identity matches the original
|
||||
// per-room GET endpoints. The actual fetch goes through the batcher, but the
|
||||
// keys stay consistent with the rest of the codebase.
|
||||
const meetingStatusKeys = {
|
||||
active: (roomName: string) =>
|
||||
$api.queryOptions("get", "/v1/rooms/{room_name}/meetings/active", {
|
||||
params: { path: { room_name: roomName } },
|
||||
}).queryKey,
|
||||
upcoming: (roomName: string) =>
|
||||
$api.queryOptions("get", "/v1/rooms/{room_name}/meetings/upcoming", {
|
||||
params: { path: { room_name: roomName } },
|
||||
}).queryKey,
|
||||
};
|
||||
const MEETINGS_PATH_PARTIAL = "meetings" as const;
|
||||
const MEETINGS_ACTIVE_PATH_PARTIAL = `${MEETINGS_PATH_PARTIAL}/active` as const;
|
||||
const MEETINGS_UPCOMING_PATH_PARTIAL =
|
||||
`${MEETINGS_PATH_PARTIAL}/upcoming` as const;
|
||||
const MEETING_LIST_PATH_PARTIALS = [
|
||||
MEETINGS_ACTIVE_PATH_PARTIAL,
|
||||
MEETINGS_UPCOMING_PATH_PARTIAL,
|
||||
];
|
||||
|
||||
export function useRoomActiveMeetings(roomName: string | null) {
|
||||
return useQuery({
|
||||
queryKey: meetingStatusKeys.active(roomName!),
|
||||
queryFn: async () => {
|
||||
const result = await meetingStatusBatcher.fetch(roomName!);
|
||||
return result.active_meetings;
|
||||
return $api.useQuery(
|
||||
"get",
|
||||
"/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
|
||||
{
|
||||
params: {
|
||||
path: { room_name: roomName! },
|
||||
},
|
||||
},
|
||||
enabled: !!roomName,
|
||||
{
|
||||
enabled: !!roomName,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
type RoomMeetingStatus = components["schemas"]["RoomMeetingStatus"];
|
||||
|
||||
export type BulkMeetingStatusMap = Partial<Record<string, RoomMeetingStatus>>;
|
||||
|
||||
export function useRoomsBulkMeetingStatus(roomNames: string[]) {
|
||||
const { isAuthenticated } = useAuthReady();
|
||||
const sortedNames = [...roomNames].sort();
|
||||
|
||||
return useQuery({
|
||||
queryKey: ["bulk-meeting-status", sortedNames],
|
||||
queryFn: async (): Promise<BulkMeetingStatusMap> => {
|
||||
const { data, error } = await client.POST(
|
||||
"/v1/rooms/meetings/bulk-status",
|
||||
{ body: { room_names: roomNames } },
|
||||
);
|
||||
if (error || !data) {
|
||||
throw new Error(
|
||||
`bulk-status fetch failed: ${JSON.stringify(error ?? "no data")}`,
|
||||
);
|
||||
}
|
||||
return data;
|
||||
},
|
||||
enabled: sortedNames.length > 0 && isAuthenticated,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
import { create, keyResolver, windowScheduler } from "@yornaath/batshit";
|
||||
import { client } from "./apiClient";
|
||||
import type { components } from "../reflector-api";
|
||||
|
||||
type MeetingStatusResult = {
|
||||
roomName: string;
|
||||
active_meetings: components["schemas"]["Meeting"][];
|
||||
upcoming_events: components["schemas"]["CalendarEventResponse"][];
|
||||
};
|
||||
|
||||
const BATCH_WINDOW_MS = 10;
|
||||
|
||||
export function createMeetingStatusBatcher(windowMs: number = BATCH_WINDOW_MS) {
|
||||
return create({
|
||||
fetcher: async (roomNames: string[]): Promise<MeetingStatusResult[]> => {
|
||||
const unique = [...new Set(roomNames)];
|
||||
const { data, error } = await client.POST(
|
||||
"/v1/rooms/meetings/bulk-status",
|
||||
{ body: { room_names: unique } },
|
||||
);
|
||||
if (error || !data) {
|
||||
throw new Error(
|
||||
`bulk-status fetch failed: ${JSON.stringify(error ?? "no data")}`,
|
||||
);
|
||||
}
|
||||
return roomNames.map((name) => ({
|
||||
roomName: name,
|
||||
active_meetings: data[name]?.active_meetings ?? [],
|
||||
upcoming_events: data[name]?.upcoming_events ?? [],
|
||||
}));
|
||||
},
|
||||
resolver: keyResolver("roomName"),
|
||||
scheduler: windowScheduler(windowMs),
|
||||
});
|
||||
}
|
||||
|
||||
export const meetingStatusBatcher = createMeetingStatusBatcher();
|
||||
14
www/app/reflector-api.d.ts
vendored
14
www/app/reflector-api.d.ts
vendored
@@ -1697,6 +1697,13 @@ export interface components {
|
||||
*/
|
||||
skip_consent: boolean;
|
||||
};
|
||||
/** RoomMeetingStatus */
|
||||
RoomMeetingStatus: {
|
||||
/** Active Meetings */
|
||||
active_meetings: components["schemas"]["Meeting"][];
|
||||
/** Upcoming Events */
|
||||
upcoming_events: components["schemas"]["CalendarEventResponse"][];
|
||||
};
|
||||
/** RoomDetails */
|
||||
RoomDetails: {
|
||||
/** Id */
|
||||
@@ -1757,13 +1764,6 @@ export interface components {
|
||||
/** Webhook Secret */
|
||||
webhook_secret: string | null;
|
||||
};
|
||||
/** RoomMeetingStatus */
|
||||
RoomMeetingStatus: {
|
||||
/** Active Meetings */
|
||||
active_meetings: components["schemas"]["Meeting"][];
|
||||
/** Upcoming Events */
|
||||
upcoming_events: components["schemas"]["CalendarEventResponse"][];
|
||||
};
|
||||
/** RtcOffer */
|
||||
RtcOffer: {
|
||||
/** Sdp */
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
"@tanstack/react-query": "^5.85.9",
|
||||
"@types/ioredis": "^5.0.0",
|
||||
"@whereby.com/browser-sdk": "^3.3.4",
|
||||
"@yornaath/batshit": "^0.14.0",
|
||||
"autoprefixer": "10.4.20",
|
||||
"axios": "^1.8.2",
|
||||
"eslint": "^9.33.0",
|
||||
|
||||
21
www/pnpm-lock.yaml
generated
21
www/pnpm-lock.yaml
generated
@@ -37,9 +37,6 @@ importers:
|
||||
"@whereby.com/browser-sdk":
|
||||
specifier: ^3.3.4
|
||||
version: 3.13.1(@types/react@18.2.20)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
"@yornaath/batshit":
|
||||
specifier: ^0.14.0
|
||||
version: 0.14.0
|
||||
autoprefixer:
|
||||
specifier: 10.4.20
|
||||
version: 10.4.20(postcss@8.4.31)
|
||||
@@ -3469,18 +3466,6 @@ packages:
|
||||
integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==,
|
||||
}
|
||||
|
||||
"@yornaath/batshit-devtools@1.7.1":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-AyttV1Njj5ug+XqEWY1smV45dTWMlWKtj1B8jcFYgBKUFyUlF/qEhD+iP1E5UaRYW6hQRYD9T2WNDwFTrOMWzQ==,
|
||||
}
|
||||
|
||||
"@yornaath/batshit@0.14.0":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-0I+xMi5JoRs3+qVXXhk2AmsEl43MwrG+L+VW+nqw/qQqMFtgRPszLaxhJCfsBKnjfJ0gJzTI1Q9Q9+y903HyHQ==,
|
||||
}
|
||||
|
||||
"@zag-js/accordion@1.21.0":
|
||||
resolution:
|
||||
{
|
||||
@@ -11927,12 +11912,6 @@ snapshots:
|
||||
|
||||
"@xtuc/long@4.2.2": {}
|
||||
|
||||
"@yornaath/batshit-devtools@1.7.1": {}
|
||||
|
||||
"@yornaath/batshit@0.14.0":
|
||||
dependencies:
|
||||
"@yornaath/batshit-devtools": 1.7.1
|
||||
|
||||
"@zag-js/accordion@1.21.0":
|
||||
dependencies:
|
||||
"@zag-js/anatomy": 1.21.0
|
||||
|
||||
Reference in New Issue
Block a user