Compare commits

...

5 Commits

Author SHA1 Message Date
Igor Loskutov
df6916385b fix: address review feedback
- Add PUBLIC_MODE auth guard on bulk-status endpoint
- Convert DB models to view models via model_validate()
- Early return when no accessible rooms (skip DB queries)
- BulkMeetingStatusMap: Partial<Record> for type honesty
- Sort roomNames in query key for cache stability
- Remove redundant empty-guard in queryFn
- Add 7 backend tests: auth, redaction, whereby host_room_url, 401, empty
- Add 2 frontend tests: error handling, unauthenticated case
2026-02-05 20:30:26 -05:00
Igor Loskutov
083a50cbcd fix: batch room meeting status queries via prop-drilling
Alternative to the batcher approach (#848): parent fetches all room
meeting statuses in a single bulk POST and passes data down as props.
No extra dependency (@yornaath/batshit), no implicit batching magic.

Backend: POST /v1/rooms/meetings/bulk-status + bulk DB methods.
Frontend: useRoomsBulkMeetingStatus hook in RoomList, MeetingStatus
receives data as props instead of calling per-room hooks.
CI: fix pnpm 8→10 auto-detect, add concurrency group.
Tests: Jest+jsdom+testing-library for bulk hook.
2026-02-05 20:04:31 -05:00
1ce1c7a910 fix: websocket tests (#825)
* fix websocket tests

* fix: restore timeout and fix celery test infrastructure

- Re-add timeout=1.0 to ws_manager pubsub loop (prevents CPU spin?)
- Use Redis for Celery tests (memory:// broker doesn't support chords)
- Add timeout param to in-memory subscriber mock
- Remove duplicate celery_includes fixture from rtc_ws tests

* fix: remove redundant inline imports in test files

* fix: update gitleaks ignore for moved s3_key line

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2026-02-05 14:23:31 -05:00
Rémi Pauchet
984795357e - fix nvidia repo blocked by apt (sha1) (#845)
- use build cache for apt and uv
- limit concurency for uv to prevent crashes with too many cores
2026-02-05 13:59:34 -05:00
fa3cf5da0f chore(main): release 0.32.2 (#842) 2026-02-03 22:05:22 -05:00
22 changed files with 1577 additions and 89 deletions

View File

@@ -13,6 +13,9 @@ on:
jobs:
test-next-server:
runs-on: ubuntu-latest
concurrency:
group: test-next-server-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
@@ -21,17 +24,12 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install pnpm
uses: pnpm/action-setup@v4
with:
version: 8
package_json_file: './www/package.json'
- name: Setup Node.js cache
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'

View File

@@ -4,3 +4,4 @@ docs/docs/installation/daily-setup.md:curl-auth-header:277
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:74
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:83
server/reflector/worker/process.py:generic-api-key:465
server/reflector/worker/process.py:generic-api-key:594

View File

@@ -1,5 +1,14 @@
# Changelog
## [0.32.2](https://github.com/Monadical-SAS/reflector/compare/v0.32.1...v0.32.2) (2026-02-03)
### Bug Fixes
* increase TIMEOUT_MEDIUM from 2m to 5m for LLM tasks ([#843](https://github.com/Monadical-SAS/reflector/issues/843)) ([4acde4b](https://github.com/Monadical-SAS/reflector/commit/4acde4b7fdef88cc02ca12cf38c9020b05ed96ac))
* make caddy optional ([#841](https://github.com/Monadical-SAS/reflector/issues/841)) ([a2ed7d6](https://github.com/Monadical-SAS/reflector/commit/a2ed7d60d557b551a5b64e4dfd909b63a791d9fc))
* use Daily API recording.duration as master source for transcript duration ([#844](https://github.com/Monadical-SAS/reflector/issues/844)) ([8707c66](https://github.com/Monadical-SAS/reflector/commit/8707c6694a80c939b6214bbc13331741f192e082))
## [0.32.1](https://github.com/Monadical-SAS/reflector/compare/v0.32.0...v0.32.1) (2026-01-30)

View File

@@ -4,27 +4,31 @@ ENV PYTHONUNBUFFERED=1 \
UV_LINK_MODE=copy \
UV_NO_CACHE=1
# patch until nvidia updates the sha1 repo
ADD sequoia.config /etc/crypto-policies/back-ends/sequoia.config
WORKDIR /tmp
RUN apt-get update \
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt-get update \
&& apt-get install -y \
ffmpeg \
curl \
ca-certificates \
gnupg \
wget \
&& apt-get clean
wget
# Add NVIDIA CUDA repo for Debian 12 (bookworm) and install cuDNN 9 for CUDA 12
ADD https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/cuda-keyring_1.1-1_all.deb /cuda-keyring.deb
RUN dpkg -i /cuda-keyring.deb \
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
dpkg -i /cuda-keyring.deb \
&& rm /cuda-keyring.deb \
&& apt-get update \
&& apt-get install -y --no-install-recommends \
cuda-cudart-12-6 \
libcublas-12-6 \
libcudnn9-cuda-12 \
libcudnn9-dev-cuda-12 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
libcudnn9-dev-cuda-12
ADD https://astral.sh/uv/install.sh /uv-installer.sh
RUN sh /uv-installer.sh && rm /uv-installer.sh
ENV PATH="/root/.local/bin/:$PATH"
@@ -39,6 +43,13 @@ COPY ./app /app/app
COPY ./main.py /app/
COPY ./runserver.sh /app/
# prevent uv failing with too many open files on big cpus
ENV UV_CONCURRENT_INSTALLS=16
# first install
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --compile-bytecode --locked
EXPOSE 8000
CMD ["sh", "/app/runserver.sh"]

View File

@@ -0,0 +1,2 @@
[hash_algorithms]
sha1 = "always"

View File

@@ -104,6 +104,26 @@ class CalendarEventController:
results = await get_database().fetch_all(query)
return [CalendarEvent(**result) for result in results]
async def get_upcoming_for_rooms(
self, room_ids: list[str], minutes_ahead: int = 120
) -> list[CalendarEvent]:
now = datetime.now(timezone.utc)
future_time = now + timedelta(minutes=minutes_ahead)
query = (
calendar_events.select()
.where(
sa.and_(
calendar_events.c.room_id.in_(room_ids),
calendar_events.c.is_deleted == False,
calendar_events.c.start_time <= future_time,
calendar_events.c.end_time >= now,
)
)
.order_by(calendar_events.c.start_time.asc())
)
results = await get_database().fetch_all(query)
return [CalendarEvent(**result) for result in results]
async def get_by_id(self, event_id: str) -> CalendarEvent | None:
query = calendar_events.select().where(calendar_events.c.id == event_id)
result = await get_database().fetch_one(query)

View File

@@ -301,6 +301,23 @@ class MeetingController:
results = await get_database().fetch_all(query)
return [Meeting(**result) for result in results]
async def get_all_active_for_rooms(
self, room_ids: list[str], current_time: datetime
) -> list[Meeting]:
query = (
meetings.select()
.where(
sa.and_(
meetings.c.room_id.in_(room_ids),
meetings.c.end_date > current_time,
meetings.c.is_active,
)
)
.order_by(meetings.c.end_date.desc())
)
results = await get_database().fetch_all(query)
return [Meeting(**result) for result in results]
async def get_active_by_calendar_event(
self, room: Room, calendar_event_id: str, current_time: datetime
) -> Meeting | None:

View File

@@ -245,6 +245,11 @@ class RoomController:
return room
async def get_by_names(self, names: list[str]) -> list[Room]:
query = rooms.select().where(rooms.c.name.in_(names))
results = await get_database().fetch_all(query)
return [Room(**r) for r in results]
async def get_ics_enabled(self) -> list[Room]:
query = rooms.select().where(
rooms.c.ics_enabled == True, rooms.c.ics_url != None

View File

@@ -1,4 +1,6 @@
import asyncio
import logging
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from enum import Enum
from typing import Annotated, Any, Literal, Optional
@@ -6,13 +8,14 @@ from typing import Annotated, Any, Literal, Optional
from fastapi import APIRouter, Depends, HTTPException
from fastapi_pagination import Page
from fastapi_pagination.ext.databases import apaginate
from pydantic import BaseModel
from pydantic import BaseModel, Field
from redis.exceptions import LockError
import reflector.auth as auth
from reflector.db import get_database
from reflector.db.calendar_events import calendar_events_controller
from reflector.db.meetings import meetings_controller
from reflector.db.rooms import Room as DbRoom
from reflector.db.rooms import rooms_controller
from reflector.redis_cache import RedisAsyncLock
from reflector.schemas.platform import Platform
@@ -195,6 +198,82 @@ async def rooms_list(
return paginated
class BulkStatusRequest(BaseModel):
room_names: list[str] = Field(max_length=100)
class RoomMeetingStatus(BaseModel):
active_meetings: list[Meeting]
upcoming_events: list[CalendarEventResponse]
@router.post("/rooms/meetings/bulk-status", response_model=dict[str, RoomMeetingStatus])
async def rooms_bulk_meeting_status(
request: BulkStatusRequest,
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
):
if not user and not settings.PUBLIC_MODE:
raise HTTPException(status_code=401, detail="Not authenticated")
user_id = user["sub"] if user else None
all_rooms = await rooms_controller.get_by_names(request.room_names)
# Filter to rooms the user can see (owned or shared), matching rooms_list behavior
rooms = [
r
for r in all_rooms
if r.is_shared or (user_id is not None and r.user_id == user_id)
]
room_by_id: dict[str, DbRoom] = {r.id: r for r in rooms}
room_ids = list(room_by_id.keys())
if not room_ids:
return {
name: RoomMeetingStatus(active_meetings=[], upcoming_events=[])
for name in request.room_names
}
current_time = datetime.now(timezone.utc)
active_meetings, upcoming_events = await asyncio.gather(
meetings_controller.get_all_active_for_rooms(room_ids, current_time),
calendar_events_controller.get_upcoming_for_rooms(room_ids),
)
# Group by room name, converting DB models to view models
active_by_room: dict[str, list[Meeting]] = defaultdict(list)
for m in active_meetings:
room = room_by_id.get(m.room_id)
if not room:
continue
m.platform = room.platform
if user_id != room.user_id and m.platform == "whereby":
m.host_room_url = ""
active_by_room[room.name].append(
Meeting.model_validate(m, from_attributes=True)
)
upcoming_by_room: dict[str, list[CalendarEventResponse]] = defaultdict(list)
for e in upcoming_events:
room = room_by_id.get(e.room_id)
if not room:
continue
if user_id != room.user_id:
e.description = None
e.attendees = None
upcoming_by_room[room.name].append(
CalendarEventResponse.model_validate(e, from_attributes=True)
)
result: dict[str, RoomMeetingStatus] = {}
for name in request.room_names:
result[name] = RoomMeetingStatus(
active_meetings=active_by_room.get(name, []),
upcoming_events=upcoming_by_room.get(name, []),
)
return result
@router.get("/rooms/{room_id}", response_model=RoomDetails)
async def rooms_get(
room_id: str,

View File

@@ -11,7 +11,6 @@ broadcast messages to all connected websockets.
import asyncio
import json
import threading
import redis.asyncio as redis
from fastapi import WebSocket
@@ -98,6 +97,7 @@ class WebsocketManager:
async def _pubsub_data_reader(self, pubsub_subscriber):
while True:
# timeout=1.0 prevents tight CPU loop when no messages available
message = await pubsub_subscriber.get_message(
ignore_subscribe_messages=True
)
@@ -109,29 +109,38 @@ class WebsocketManager:
await socket.send_json(data)
# Process-global singleton to ensure only one WebsocketManager instance exists.
# Multiple instances would cause resource leaks and CPU issues.
_ws_manager: WebsocketManager | None = None
def get_ws_manager() -> WebsocketManager:
"""
Returns the WebsocketManager instance for managing websockets.
Returns the global WebsocketManager singleton.
This function initializes and returns the WebsocketManager instance,
which is responsible for managing websockets and handling websocket
connections.
Creates instance on first call, subsequent calls return cached instance.
Thread-safe via GIL. Concurrent initialization may create duplicate
instances but last write wins (acceptable for this use case).
Returns:
WebsocketManager: The initialized WebsocketManager instance.
Raises:
ImportError: If the 'reflector.settings' module cannot be imported.
RedisConnectionError: If there is an error connecting to the Redis server.
WebsocketManager: The global WebsocketManager instance.
"""
local = threading.local()
if hasattr(local, "ws_manager"):
return local.ws_manager
global _ws_manager
if _ws_manager is not None:
return _ws_manager
# No lock needed - GIL makes this safe enough
# Worst case: race creates two instances, last assignment wins
pubsub_client = RedisPubSubManager(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
)
ws_manager = WebsocketManager(pubsub_client=pubsub_client)
local.ws_manager = ws_manager
return ws_manager
_ws_manager = WebsocketManager(pubsub_client=pubsub_client)
return _ws_manager
def reset_ws_manager() -> None:
"""Reset singleton for testing. DO NOT use in production."""
global _ws_manager
_ws_manager = None

View File

@@ -1,6 +1,5 @@
import os
from contextlib import asynccontextmanager
from tempfile import NamedTemporaryFile
from unittest.mock import patch
import pytest
@@ -333,10 +332,13 @@ def celery_enable_logging():
@pytest.fixture(scope="session")
def celery_config():
with NamedTemporaryFile() as f:
redis_host = os.environ.get("REDIS_HOST", "localhost")
redis_port = os.environ.get("REDIS_PORT", "6379")
# Use db 2 to avoid conflicts with main app
redis_url = f"redis://{redis_host}:{redis_port}/2"
yield {
"broker_url": "memory://",
"result_backend": f"db+sqlite:///{f.name}",
"broker_url": redis_url,
"result_backend": redis_url,
}
@@ -370,9 +372,12 @@ async def ws_manager_in_memory(monkeypatch):
def __init__(self, queue: asyncio.Queue):
self.queue = queue
async def get_message(self, ignore_subscribe_messages: bool = True):
async def get_message(
self, ignore_subscribe_messages: bool = True, timeout: float | None = None
):
wait_timeout = timeout if timeout is not None else 0.05
try:
return await asyncio.wait_for(self.queue.get(), timeout=0.05)
return await asyncio.wait_for(self.queue.get(), timeout=wait_timeout)
except Exception:
return None

View File

@@ -0,0 +1,184 @@
from datetime import datetime, timedelta, timezone
import pytest
from conftest import authenticated_client_ctx
from reflector.db.calendar_events import CalendarEvent, calendar_events_controller
from reflector.db.meetings import meetings_controller
from reflector.db.rooms import Room, rooms_controller
from reflector.settings import settings
async def _create_room(name: str, user_id: str, is_shared: bool = False) -> Room:
return await rooms_controller.add(
name=name,
user_id=user_id,
zulip_auto_post=False,
zulip_stream="",
zulip_topic="",
is_locked=False,
room_mode="normal",
recording_type="cloud",
recording_trigger="automatic-2nd-participant",
is_shared=is_shared,
webhook_url="",
webhook_secret="",
)
async def _create_meeting(room: Room, active: bool = True):
now = datetime.now(timezone.utc)
return await meetings_controller.create(
id=f"meeting-{room.name}-{now.timestamp()}",
room_name=room.name,
room_url="room-url",
host_room_url="host-url",
start_date=now - timedelta(minutes=10),
end_date=now + timedelta(minutes=50) if active else now - timedelta(minutes=1),
room=room,
)
async def _create_calendar_event(room: Room):
now = datetime.now(timezone.utc)
return await calendar_events_controller.upsert(
CalendarEvent(
room_id=room.id,
ics_uid=f"event-{room.name}",
title=f"Upcoming in {room.name}",
description="secret description",
start_time=now + timedelta(minutes=30),
end_time=now + timedelta(minutes=90),
attendees=[{"name": "Alice", "email": "alice@example.com"}],
)
)
@pytest.mark.asyncio
async def test_bulk_status_returns_empty_for_no_rooms(client):
"""Empty room_names returns empty dict."""
async with authenticated_client_ctx():
resp = await client.post("/rooms/meetings/bulk-status", json={"room_names": []})
assert resp.status_code == 200
assert resp.json() == {}
@pytest.mark.asyncio
async def test_bulk_status_returns_active_meetings_and_upcoming_events(client):
"""Owner sees active meetings and upcoming events for their rooms."""
room = await _create_room("bulk-test-room", "randomuserid")
await _create_meeting(room, active=True)
await _create_calendar_event(room)
async with authenticated_client_ctx():
resp = await client.post(
"/rooms/meetings/bulk-status",
json={"room_names": ["bulk-test-room"]},
)
assert resp.status_code == 200
data = resp.json()
assert "bulk-test-room" in data
status = data["bulk-test-room"]
assert len(status["active_meetings"]) == 1
assert len(status["upcoming_events"]) == 1
# Owner sees description
assert status["upcoming_events"][0]["description"] == "secret description"
@pytest.mark.asyncio
async def test_bulk_status_redacts_data_for_non_owner(client):
"""Non-owner of a shared room gets redacted calendar events and no whereby host_room_url."""
room = await _create_room("shared-bulk", "other-user-id", is_shared=True)
await _create_meeting(room, active=True)
await _create_calendar_event(room)
# authenticated as "randomuserid" but room owned by "other-user-id"
async with authenticated_client_ctx():
resp = await client.post(
"/rooms/meetings/bulk-status",
json={"room_names": ["shared-bulk"]},
)
assert resp.status_code == 200
status = resp.json()["shared-bulk"]
assert len(status["active_meetings"]) == 1
assert len(status["upcoming_events"]) == 1
# Non-owner: description and attendees redacted
assert status["upcoming_events"][0]["description"] is None
assert status["upcoming_events"][0]["attendees"] is None
@pytest.mark.asyncio
async def test_bulk_status_filters_private_rooms_of_other_users(client):
"""User cannot see private rooms owned by others."""
await _create_room("private-other", "other-user-id", is_shared=False)
async with authenticated_client_ctx():
resp = await client.post(
"/rooms/meetings/bulk-status",
json={"room_names": ["private-other"]},
)
assert resp.status_code == 200
status = resp.json()["private-other"]
assert status["active_meetings"] == []
assert status["upcoming_events"] == []
@pytest.mark.asyncio
async def test_bulk_status_redacts_whereby_host_room_url_for_non_owner(client):
"""Non-owner of a shared whereby room gets empty host_room_url."""
room = await _create_room("shared-whereby", "other-user-id", is_shared=True)
# Force platform to whereby
from reflector.db import get_database
from reflector.db.rooms import rooms as rooms_table
await get_database().execute(
rooms_table.update()
.where(rooms_table.c.id == room.id)
.values(platform="whereby")
)
await _create_meeting(room, active=True)
async with authenticated_client_ctx():
resp = await client.post(
"/rooms/meetings/bulk-status",
json={"room_names": ["shared-whereby"]},
)
assert resp.status_code == 200
status = resp.json()["shared-whereby"]
assert len(status["active_meetings"]) == 1
assert status["active_meetings"][0]["host_room_url"] == ""
@pytest.mark.asyncio
async def test_bulk_status_unauthenticated_rejected_non_public(client):
"""Unauthenticated request on non-PUBLIC_MODE instance returns 401."""
original = settings.PUBLIC_MODE
try:
settings.PUBLIC_MODE = False
resp = await client.post(
"/rooms/meetings/bulk-status",
json={"room_names": ["any-room"]},
)
assert resp.status_code == 401
finally:
settings.PUBLIC_MODE = original
@pytest.mark.asyncio
async def test_bulk_status_nonexistent_room_returns_empty(client):
"""Requesting a room that doesn't exist returns empty lists."""
async with authenticated_client_ctx():
resp = await client.post(
"/rooms/meetings/bulk-status",
json={"room_names": ["does-not-exist"]},
)
assert resp.status_code == 200
status = resp.json()["does-not-exist"]
assert status["active_meetings"] == []
assert status["upcoming_events"] == []

View File

@@ -115,9 +115,7 @@ def appserver(tmpdir, setup_database, celery_session_app, celery_session_worker)
settings.DATA_DIR = DATA_DIR
@pytest.fixture(scope="session")
def celery_includes():
return ["reflector.pipelines.main_live_pipeline"]
# Using celery_includes from conftest.py which includes both pipelines
@pytest.mark.usefixtures("setup_database")

View File

@@ -56,7 +56,12 @@ def appserver_ws_user(setup_database):
if server_instance:
server_instance.should_exit = True
server_thread.join(timeout=30)
server_thread.join(timeout=2.0)
# Reset global singleton for test isolation
from reflector.ws_manager import reset_ws_manager
reset_ws_manager()
@pytest.fixture(autouse=True)
@@ -133,6 +138,8 @@ async def test_user_ws_accepts_valid_token_and_receives_events(appserver_ws_user
# Connect and then trigger an event via HTTP create
async with aconnect_ws(base_ws, subprotocols=subprotocols) as ws:
await asyncio.sleep(0.2)
# Emit an event to the user's room via a standard HTTP action
from httpx import AsyncClient
@@ -150,6 +157,7 @@ async def test_user_ws_accepts_valid_token_and_receives_events(appserver_ws_user
"email": "user-abc@example.com",
}
# Use in-memory client (global singleton makes it share ws_manager)
async with AsyncClient(app=app, base_url=f"http://{host}:{port}/v1") as ac:
# Create a transcript as this user so that the server publishes TRANSCRIPT_CREATED to user room
resp = await ac.post("/transcripts", json={"name": "WS Test"})

View File

@@ -1,5 +1,10 @@
import { useMemo } from "react";
import { Box, Heading, Text, VStack } from "@chakra-ui/react";
import type { components } from "../../../reflector-api";
import {
useRoomsBulkMeetingStatus,
BulkMeetingStatusMap,
} from "../../../lib/apiHooks";
type Room = components["schemas"]["Room"];
import { RoomTable } from "./RoomTable";
@@ -31,6 +36,10 @@ export function RoomList({
pt,
loading,
}: RoomListProps) {
const roomNames = useMemo(() => rooms.map((r) => r.name), [rooms]);
const bulkStatusQuery = useRoomsBulkMeetingStatus(roomNames);
const meetingStatusMap: BulkMeetingStatusMap = bulkStatusQuery.data ?? {};
return (
<VStack alignItems="start" gap={4} mb={mb} pt={pt}>
<Heading size="md">{title}</Heading>
@@ -43,6 +52,8 @@ export function RoomList({
onEdit={onEdit}
onDelete={onDelete}
loading={loading}
meetingStatusMap={meetingStatusMap}
meetingStatusLoading={bulkStatusQuery.isLoading}
/>
<RoomCards
rooms={rooms}

View File

@@ -14,11 +14,7 @@ import {
import { LuLink, LuRefreshCw } from "react-icons/lu";
import { FaCalendarAlt } from "react-icons/fa";
import type { components } from "../../../reflector-api";
import {
useRoomActiveMeetings,
useRoomUpcomingMeetings,
useRoomIcsSync,
} from "../../../lib/apiHooks";
import { useRoomIcsSync, BulkMeetingStatusMap } from "../../../lib/apiHooks";
type Room = components["schemas"]["Room"];
type Meeting = components["schemas"]["Meeting"];
@@ -62,6 +58,8 @@ interface RoomTableProps {
onEdit: (roomId: string, roomData: any) => void;
onDelete: (roomId: string) => void;
loading?: boolean;
meetingStatusMap: BulkMeetingStatusMap;
meetingStatusLoading: boolean;
}
const getRoomModeDisplay = (mode: string): string => {
@@ -104,14 +102,16 @@ const getZulipDisplay = (
return "Enabled";
};
function MeetingStatus({ roomName }: { roomName: string }) {
const activeMeetingsQuery = useRoomActiveMeetings(roomName);
const upcomingMeetingsQuery = useRoomUpcomingMeetings(roomName);
const activeMeetings = activeMeetingsQuery.data || [];
const upcomingMeetings = upcomingMeetingsQuery.data || [];
if (activeMeetingsQuery.isLoading || upcomingMeetingsQuery.isLoading) {
function MeetingStatus({
activeMeetings,
upcomingMeetings,
isLoading,
}: {
activeMeetings: Meeting[];
upcomingMeetings: CalendarEventResponse[];
isLoading: boolean;
}) {
if (isLoading) {
return <Spinner size="sm" />;
}
@@ -176,6 +176,8 @@ export function RoomTable({
onEdit,
onDelete,
loading,
meetingStatusMap,
meetingStatusLoading,
}: RoomTableProps) {
const [syncingRooms, setSyncingRooms] = useState<Set<NonEmptyString>>(
new Set(),
@@ -252,7 +254,15 @@ export function RoomTable({
<Link href={`/${room.name}`}>{room.name}</Link>
</Table.Cell>
<Table.Cell>
<MeetingStatus roomName={room.name} />
<MeetingStatus
activeMeetings={
meetingStatusMap[room.name]?.active_meetings ?? []
}
upcomingMeetings={
meetingStatusMap[room.name]?.upcoming_events ?? []
}
isLoading={meetingStatusLoading}
/>
</Table.Cell>
<Table.Cell>
{getZulipDisplay(

View File

@@ -0,0 +1,246 @@
import "@testing-library/jest-dom";
// --- Module mocks (hoisted before imports) ---
jest.mock("../apiClient", () => ({
client: {
GET: jest.fn(),
POST: jest.fn(),
PUT: jest.fn(),
PATCH: jest.fn(),
DELETE: jest.fn(),
use: jest.fn(),
},
$api: {
useQuery: jest.fn(),
useMutation: jest.fn(),
queryOptions: (method: string, path: string, init?: unknown) =>
init === undefined
? { queryKey: [method, path] }
: { queryKey: [method, path, init] },
},
API_URL: "http://test",
WEBSOCKET_URL: "ws://test",
configureApiAuth: jest.fn(),
}));
jest.mock("../AuthProvider", () => ({
useAuth: () => ({
status: "authenticated" as const,
accessToken: "test-token",
accessTokenExpires: Date.now() + 3600000,
user: { id: "user1", name: "Test User" },
update: jest.fn(),
signIn: jest.fn(),
signOut: jest.fn(),
lastUserId: "user1",
}),
}));
// --- Imports (after mocks) ---
import React from "react";
import { render, waitFor, screen } from "@testing-library/react";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import { useRoomsBulkMeetingStatus, BulkMeetingStatusMap } from "../apiHooks";
import { client } from "../apiClient";
import { ErrorProvider } from "../../(errors)/errorContext";
const mockClient = client as { POST: jest.Mock };
// --- Helpers ---
function mockBulkStatusEndpoint(
roomData?: Record<
string,
{ active_meetings: unknown[]; upcoming_events: unknown[] }
>,
) {
mockClient.POST.mockImplementation(
async (_path: string, options: { body: { room_names: string[] } }) => {
const roomNames: string[] = options.body.room_names;
const src = roomData ?? {};
const data = Object.fromEntries(
roomNames.map((name) => [
name,
src[name] ?? { active_meetings: [], upcoming_events: [] },
]),
);
return { data, error: undefined, response: {} };
},
);
}
// --- Test component: uses the bulk hook and displays results ---
function BulkStatusDisplay({ roomNames }: { roomNames: string[] }) {
const { data, isLoading } = useRoomsBulkMeetingStatus(roomNames);
if (isLoading) {
return <div data-testid="status">loading</div>;
}
if (!data) {
return <div data-testid="status">no data</div>;
}
return (
<div data-testid="status">
{roomNames.map((name) => {
const status = data[name];
return (
<div key={name} data-testid={`room-${name}`}>
{status?.active_meetings?.length ?? 0} active,{" "}
{status?.upcoming_events?.length ?? 0} upcoming
</div>
);
})}
</div>
);
}
function createWrapper() {
const queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
return function Wrapper({ children }: { children: React.ReactNode }) {
return (
<QueryClientProvider client={queryClient}>
<ErrorProvider>{children}</ErrorProvider>
</QueryClientProvider>
);
};
}
// --- Tests ---
describe("bulk meeting status (prop-drilling)", () => {
afterEach(() => jest.clearAllMocks());
it("fetches all room statuses in a single POST request", async () => {
const rooms = Array.from({ length: 10 }, (_, i) => `room-${i}`);
mockBulkStatusEndpoint();
render(<BulkStatusDisplay roomNames={rooms} />, {
wrapper: createWrapper(),
});
await waitFor(() => {
for (const name of rooms) {
expect(screen.getByTestId(`room-${name}`)).toHaveTextContent(
"0 active, 0 upcoming",
);
}
});
const postCalls = mockClient.POST.mock.calls.filter(
([path]: [string]) => path === "/v1/rooms/meetings/bulk-status",
);
// Prop-drilling: exactly 1 POST for all rooms (no batcher needed)
expect(postCalls).toHaveLength(1);
// The single call contains all room names
const requestedRooms: string[] = postCalls[0][1].body.room_names;
expect(requestedRooms).toHaveLength(10);
for (const name of rooms) {
expect(requestedRooms).toContain(name);
}
});
it("returns room-specific data correctly", async () => {
mockBulkStatusEndpoint({
"room-a": {
active_meetings: [{ id: "m1", room_name: "room-a" }],
upcoming_events: [],
},
"room-b": {
active_meetings: [],
upcoming_events: [{ id: "e1", title: "Standup" }],
},
});
render(<BulkStatusDisplay roomNames={["room-a", "room-b"]} />, {
wrapper: createWrapper(),
});
await waitFor(() => {
expect(screen.getByTestId("room-room-a")).toHaveTextContent(
"1 active, 0 upcoming",
);
expect(screen.getByTestId("room-room-b")).toHaveTextContent(
"0 active, 1 upcoming",
);
});
// Still just 1 POST
expect(mockClient.POST).toHaveBeenCalledTimes(1);
});
it("does not fetch when roomNames is empty", async () => {
mockBulkStatusEndpoint();
render(<BulkStatusDisplay roomNames={[]} />, {
wrapper: createWrapper(),
});
await waitFor(() => {
expect(screen.getByTestId("status")).toHaveTextContent("no data");
});
// No POST calls when no rooms
expect(mockClient.POST).not.toHaveBeenCalled();
});
it("surfaces error when POST fails", async () => {
mockClient.POST.mockResolvedValue({
data: undefined,
error: { detail: "server error" },
response: {},
});
function ErrorDisplay({ roomNames }: { roomNames: string[] }) {
const { error } = useRoomsBulkMeetingStatus(roomNames);
if (error) return <div data-testid="error">{error.message}</div>;
return <div data-testid="error">no error</div>;
}
render(<ErrorDisplay roomNames={["room-x"]} />, {
wrapper: createWrapper(),
});
await waitFor(() => {
expect(screen.getByTestId("error")).toHaveTextContent(
"bulk-status fetch failed",
);
});
});
it("does not fetch when unauthenticated", async () => {
// Override useAuth to return unauthenticated
const authModule = jest.requireMock("../AuthProvider");
const originalUseAuth = authModule.useAuth;
authModule.useAuth = () => ({
...originalUseAuth(),
status: "unauthenticated",
});
mockBulkStatusEndpoint();
render(<BulkStatusDisplay roomNames={["room-1"]} />, {
wrapper: createWrapper(),
});
await waitFor(() => {
expect(screen.getByTestId("status")).toHaveTextContent("no data");
});
expect(mockClient.POST).not.toHaveBeenCalled();
// Restore
authModule.useAuth = originalUseAuth;
});
});

View File

@@ -1,8 +1,8 @@
"use client";
import { $api } from "./apiClient";
import { $api, client } from "./apiClient";
import { useError } from "../(errors)/errorContext";
import { QueryClient, useQueryClient } from "@tanstack/react-query";
import { QueryClient, useQuery, useQueryClient } from "@tanstack/react-query";
import type { components } from "../reflector-api";
import { useAuth } from "./AuthProvider";
import { MeetingId } from "./types";
@@ -641,7 +641,8 @@ export function useMeetingDeactivate() {
setError(error as Error, "Failed to end meeting");
},
onSuccess: () => {
return queryClient.invalidateQueries({
return Promise.all([
queryClient.invalidateQueries({
predicate: (query) => {
const key = query.queryKey;
return key.some(
@@ -650,7 +651,11 @@ export function useMeetingDeactivate() {
!!MEETING_LIST_PATH_PARTIALS.find((e) => k.includes(e)),
);
},
});
}),
queryClient.invalidateQueries({
queryKey: ["bulk-meeting-status"],
}),
]);
},
});
}
@@ -707,6 +712,9 @@ export function useRoomsCreateMeeting() {
},
).queryKey,
}),
queryClient.invalidateQueries({
queryKey: ["bulk-meeting-status"],
}),
]);
},
onError: (error) => {
@@ -772,6 +780,32 @@ export function useRoomActiveMeetings(roomName: string | null) {
);
}
type RoomMeetingStatus = components["schemas"]["RoomMeetingStatus"];
export type BulkMeetingStatusMap = Partial<Record<string, RoomMeetingStatus>>;
export function useRoomsBulkMeetingStatus(roomNames: string[]) {
const { isAuthenticated } = useAuthReady();
const sortedNames = [...roomNames].sort();
return useQuery({
queryKey: ["bulk-meeting-status", sortedNames],
queryFn: async (): Promise<BulkMeetingStatusMap> => {
const { data, error } = await client.POST(
"/v1/rooms/meetings/bulk-status",
{ body: { room_names: roomNames } },
);
if (error || !data) {
throw new Error(
`bulk-status fetch failed: ${JSON.stringify(error ?? "no data")}`,
);
}
return data;
},
enabled: sortedNames.length > 0 && isAuthenticated,
});
}
export function useRoomGetMeeting(
roomName: string | null,
meetingId: MeetingId | null,

View File

@@ -118,6 +118,23 @@ export interface paths {
patch?: never;
trace?: never;
};
"/v1/rooms/meetings/bulk-status": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
get?: never;
put?: never;
/** Rooms Bulk Meeting Status */
post: operations["v1_rooms_bulk_meeting_status"];
delete?: never;
options?: never;
head?: never;
patch?: never;
trace?: never;
};
"/v1/rooms/{room_id}": {
parameters: {
query?: never;
@@ -799,6 +816,11 @@ export interface components {
*/
chunk: string;
};
/** BulkStatusRequest */
BulkStatusRequest: {
/** Room Names */
room_names: string[];
};
/** CalendarEventResponse */
CalendarEventResponse: {
/** Id */
@@ -1675,6 +1697,13 @@ export interface components {
*/
skip_consent: boolean;
};
/** RoomMeetingStatus */
RoomMeetingStatus: {
/** Active Meetings */
active_meetings: components["schemas"]["Meeting"][];
/** Upcoming Events */
upcoming_events: components["schemas"]["CalendarEventResponse"][];
};
/** RoomDetails */
RoomDetails: {
/** Id */
@@ -2272,6 +2301,41 @@ export interface operations {
};
};
};
v1_rooms_bulk_meeting_status: {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
requestBody: {
content: {
"application/json": components["schemas"]["BulkStatusRequest"];
};
};
responses: {
/** @description Successful Response */
200: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": {
[key: string]: components["schemas"]["RoomMeetingStatus"];
};
};
};
/** @description Validation Error */
422: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["HTTPValidationError"];
};
};
};
};
v1_rooms_get: {
parameters: {
query?: never;

View File

@@ -1,8 +1,22 @@
module.exports = {
preset: "ts-jest",
testEnvironment: "node",
testEnvironment: "jest-environment-jsdom",
roots: ["<rootDir>/app"],
testMatch: ["**/__tests__/**/*.test.ts"],
collectCoverage: true,
collectCoverageFrom: ["app/**/*.ts", "!app/**/*.d.ts"],
testMatch: ["**/__tests__/**/*.test.ts", "**/__tests__/**/*.test.tsx"],
collectCoverage: false,
transform: {
"^.+\\.[jt]sx?$": [
"ts-jest",
{
tsconfig: {
jsx: "react-jsx",
module: "esnext",
moduleResolution: "bundler",
esModuleInterop: true,
strict: true,
downlevelIteration: true,
lib: ["dom", "dom.iterable", "esnext"],
},
},
],
},
};

View File

@@ -61,9 +61,13 @@
"author": "Andreas <andreas@monadical.com>",
"license": "All Rights Reserved",
"devDependencies": {
"@testing-library/dom": "^10.4.1",
"@testing-library/jest-dom": "^6.9.1",
"@testing-library/react": "^16.3.2",
"@types/jest": "^30.0.0",
"@types/react": "18.2.20",
"jest": "^30.1.3",
"jest-environment-jsdom": "^30.2.0",
"openapi-typescript": "^7.9.1",
"prettier": "^3.0.0",
"ts-jest": "^29.4.1"

787
www/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff