mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2026-02-06 18:56:48 +00:00
Compare commits
4 Commits
brady-bunc
...
fix-presen
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08462338de | ||
| 1ce1c7a910 | |||
|
|
984795357e | ||
| fa3cf5da0f |
@@ -4,3 +4,4 @@ docs/docs/installation/daily-setup.md:curl-auth-header:277
|
|||||||
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:74
|
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:74
|
||||||
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:83
|
gpu/self_hosted/DEV_SETUP.md:curl-auth-header:83
|
||||||
server/reflector/worker/process.py:generic-api-key:465
|
server/reflector/worker/process.py:generic-api-key:465
|
||||||
|
server/reflector/worker/process.py:generic-api-key:594
|
||||||
|
|||||||
@@ -1,5 +1,14 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## [0.32.2](https://github.com/Monadical-SAS/reflector/compare/v0.32.1...v0.32.2) (2026-02-03)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* increase TIMEOUT_MEDIUM from 2m to 5m for LLM tasks ([#843](https://github.com/Monadical-SAS/reflector/issues/843)) ([4acde4b](https://github.com/Monadical-SAS/reflector/commit/4acde4b7fdef88cc02ca12cf38c9020b05ed96ac))
|
||||||
|
* make caddy optional ([#841](https://github.com/Monadical-SAS/reflector/issues/841)) ([a2ed7d6](https://github.com/Monadical-SAS/reflector/commit/a2ed7d60d557b551a5b64e4dfd909b63a791d9fc))
|
||||||
|
* use Daily API recording.duration as master source for transcript duration ([#844](https://github.com/Monadical-SAS/reflector/issues/844)) ([8707c66](https://github.com/Monadical-SAS/reflector/commit/8707c6694a80c939b6214bbc13331741f192e082))
|
||||||
|
|
||||||
## [0.32.1](https://github.com/Monadical-SAS/reflector/compare/v0.32.0...v0.32.1) (2026-01-30)
|
## [0.32.1](https://github.com/Monadical-SAS/reflector/compare/v0.32.0...v0.32.1) (2026-01-30)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -4,27 +4,31 @@ ENV PYTHONUNBUFFERED=1 \
|
|||||||
UV_LINK_MODE=copy \
|
UV_LINK_MODE=copy \
|
||||||
UV_NO_CACHE=1
|
UV_NO_CACHE=1
|
||||||
|
|
||||||
|
# patch until nvidia updates the sha1 repo
|
||||||
|
ADD sequoia.config /etc/crypto-policies/back-ends/sequoia.config
|
||||||
|
|
||||||
WORKDIR /tmp
|
WORKDIR /tmp
|
||||||
RUN apt-get update \
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
ffmpeg \
|
ffmpeg \
|
||||||
curl \
|
curl \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
gnupg \
|
gnupg \
|
||||||
wget \
|
wget
|
||||||
&& apt-get clean
|
|
||||||
# Add NVIDIA CUDA repo for Debian 12 (bookworm) and install cuDNN 9 for CUDA 12
|
# Add NVIDIA CUDA repo for Debian 12 (bookworm) and install cuDNN 9 for CUDA 12
|
||||||
ADD https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/cuda-keyring_1.1-1_all.deb /cuda-keyring.deb
|
ADD https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_64/cuda-keyring_1.1-1_all.deb /cuda-keyring.deb
|
||||||
RUN dpkg -i /cuda-keyring.deb \
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
dpkg -i /cuda-keyring.deb \
|
||||||
&& rm /cuda-keyring.deb \
|
&& rm /cuda-keyring.deb \
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get install -y --no-install-recommends \
|
&& apt-get install -y --no-install-recommends \
|
||||||
cuda-cudart-12-6 \
|
cuda-cudart-12-6 \
|
||||||
libcublas-12-6 \
|
libcublas-12-6 \
|
||||||
libcudnn9-cuda-12 \
|
libcudnn9-cuda-12 \
|
||||||
libcudnn9-dev-cuda-12 \
|
libcudnn9-dev-cuda-12
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
ADD https://astral.sh/uv/install.sh /uv-installer.sh
|
ADD https://astral.sh/uv/install.sh /uv-installer.sh
|
||||||
RUN sh /uv-installer.sh && rm /uv-installer.sh
|
RUN sh /uv-installer.sh && rm /uv-installer.sh
|
||||||
ENV PATH="/root/.local/bin/:$PATH"
|
ENV PATH="/root/.local/bin/:$PATH"
|
||||||
@@ -39,6 +43,13 @@ COPY ./app /app/app
|
|||||||
COPY ./main.py /app/
|
COPY ./main.py /app/
|
||||||
COPY ./runserver.sh /app/
|
COPY ./runserver.sh /app/
|
||||||
|
|
||||||
|
# prevent uv failing with too many open files on big cpus
|
||||||
|
ENV UV_CONCURRENT_INSTALLS=16
|
||||||
|
|
||||||
|
# first install
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
|
uv sync --compile-bytecode --locked
|
||||||
|
|
||||||
EXPOSE 8000
|
EXPOSE 8000
|
||||||
|
|
||||||
CMD ["sh", "/app/runserver.sh"]
|
CMD ["sh", "/app/runserver.sh"]
|
||||||
|
|||||||
2
gpu/self_hosted/sequoia.config
Normal file
2
gpu/self_hosted/sequoia.config
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[hash_algorithms]
|
||||||
|
sha1 = "always"
|
||||||
17
server/reflector/presence/__init__.py
Normal file
17
server/reflector/presence/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
"""Presence tracking for meetings."""
|
||||||
|
|
||||||
|
from reflector.presence.pending_joins import (
|
||||||
|
PENDING_JOIN_PREFIX,
|
||||||
|
PENDING_JOIN_TTL,
|
||||||
|
create_pending_join,
|
||||||
|
delete_pending_join,
|
||||||
|
has_pending_joins,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"PENDING_JOIN_PREFIX",
|
||||||
|
"PENDING_JOIN_TTL",
|
||||||
|
"create_pending_join",
|
||||||
|
"delete_pending_join",
|
||||||
|
"has_pending_joins",
|
||||||
|
]
|
||||||
59
server/reflector/presence/pending_joins.py
Normal file
59
server/reflector/presence/pending_joins.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
"""Track pending join intents in Redis.
|
||||||
|
|
||||||
|
When a user signals intent to join a meeting (before WebRTC handshake completes),
|
||||||
|
we store a pending join record. This prevents the meeting from being deactivated
|
||||||
|
while users are still connecting.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from redis.asyncio import Redis
|
||||||
|
|
||||||
|
from reflector.logger import logger
|
||||||
|
|
||||||
|
PENDING_JOIN_TTL = 30 # seconds
|
||||||
|
PENDING_JOIN_PREFIX = "pending_join"
|
||||||
|
# Max keys to scan per Redis SCAN iteration
|
||||||
|
SCAN_BATCH_SIZE = 100
|
||||||
|
|
||||||
|
|
||||||
|
async def create_pending_join(redis: Redis, meeting_id: str, user_id: str) -> None:
|
||||||
|
"""Create a pending join record. Called before WebRTC handshake."""
|
||||||
|
key = f"{PENDING_JOIN_PREFIX}:{meeting_id}:{user_id}"
|
||||||
|
log = logger.bind(meeting_id=meeting_id, user_id=user_id, key=key)
|
||||||
|
await redis.setex(key, PENDING_JOIN_TTL, str(time.time()))
|
||||||
|
log.debug("Created pending join")
|
||||||
|
|
||||||
|
|
||||||
|
async def delete_pending_join(redis: Redis, meeting_id: str, user_id: str) -> None:
|
||||||
|
"""Delete pending join. Called after WebRTC connection established."""
|
||||||
|
key = f"{PENDING_JOIN_PREFIX}:{meeting_id}:{user_id}"
|
||||||
|
log = logger.bind(meeting_id=meeting_id, user_id=user_id, key=key)
|
||||||
|
await redis.delete(key)
|
||||||
|
log.debug("Deleted pending join")
|
||||||
|
|
||||||
|
|
||||||
|
async def has_pending_joins(redis: Redis, meeting_id: str) -> bool:
|
||||||
|
"""Check if meeting has any pending joins.
|
||||||
|
|
||||||
|
Uses Redis SCAN to iterate through all keys matching the pattern.
|
||||||
|
Properly iterates until cursor returns 0 to ensure all keys are checked.
|
||||||
|
"""
|
||||||
|
pattern = f"{PENDING_JOIN_PREFIX}:{meeting_id}:*"
|
||||||
|
log = logger.bind(meeting_id=meeting_id, pattern=pattern)
|
||||||
|
|
||||||
|
cursor = 0
|
||||||
|
iterations = 0
|
||||||
|
while True:
|
||||||
|
cursor, keys = await redis.scan(
|
||||||
|
cursor=cursor, match=pattern, count=SCAN_BATCH_SIZE
|
||||||
|
)
|
||||||
|
iterations += 1
|
||||||
|
if keys:
|
||||||
|
log.debug("Found pending joins", count=len(keys), iterations=iterations)
|
||||||
|
return True
|
||||||
|
if cursor == 0:
|
||||||
|
break
|
||||||
|
|
||||||
|
log.debug("No pending joins found", iterations=iterations)
|
||||||
|
return False
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
import logging
|
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Annotated, Any, Literal, Optional
|
from typing import Annotated, Any, Literal, Optional
|
||||||
@@ -14,16 +13,18 @@ from reflector.db import get_database
|
|||||||
from reflector.db.calendar_events import calendar_events_controller
|
from reflector.db.calendar_events import calendar_events_controller
|
||||||
from reflector.db.meetings import meetings_controller
|
from reflector.db.meetings import meetings_controller
|
||||||
from reflector.db.rooms import rooms_controller
|
from reflector.db.rooms import rooms_controller
|
||||||
from reflector.redis_cache import RedisAsyncLock
|
from reflector.logger import logger
|
||||||
|
from reflector.presence.pending_joins import create_pending_join, delete_pending_join
|
||||||
|
from reflector.redis_cache import RedisAsyncLock, get_async_redis_client
|
||||||
from reflector.schemas.platform import Platform
|
from reflector.schemas.platform import Platform
|
||||||
from reflector.services.ics_sync import ics_sync_service
|
from reflector.services.ics_sync import ics_sync_service
|
||||||
from reflector.settings import settings
|
from reflector.settings import settings
|
||||||
|
from reflector.utils.string import NonEmptyString
|
||||||
from reflector.utils.url import add_query_param
|
from reflector.utils.url import add_query_param
|
||||||
from reflector.video_platforms.factory import create_platform_client
|
from reflector.video_platforms.factory import create_platform_client
|
||||||
|
from reflector.worker.process import poll_daily_room_presence_task
|
||||||
from reflector.worker.webhook import test_webhook
|
from reflector.worker.webhook import test_webhook
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Room(BaseModel):
|
class Room(BaseModel):
|
||||||
id: str
|
id: str
|
||||||
@@ -597,3 +598,158 @@ async def rooms_join_meeting(
|
|||||||
meeting.room_url = add_query_param(meeting.room_url, "t", token)
|
meeting.room_url = add_query_param(meeting.room_url, "t", token)
|
||||||
|
|
||||||
return meeting
|
return meeting
|
||||||
|
|
||||||
|
|
||||||
|
class JoiningRequest(BaseModel):
|
||||||
|
"""Request body for /joining endpoint (before WebRTC handshake)."""
|
||||||
|
|
||||||
|
connection_id: NonEmptyString
|
||||||
|
"""Unique identifier for this connection. Generated by client via crypto.randomUUID()."""
|
||||||
|
|
||||||
|
|
||||||
|
class JoiningResponse(BaseModel):
|
||||||
|
status: Literal["ok"]
|
||||||
|
|
||||||
|
|
||||||
|
class JoinedRequest(BaseModel):
|
||||||
|
"""Request body for /joined endpoint (after WebRTC connection established)."""
|
||||||
|
|
||||||
|
connection_id: NonEmptyString
|
||||||
|
"""Must match the connection_id sent to /joining."""
|
||||||
|
|
||||||
|
|
||||||
|
class JoinedResponse(BaseModel):
|
||||||
|
status: Literal["ok"]
|
||||||
|
|
||||||
|
|
||||||
|
def _get_pending_join_key(
|
||||||
|
user: Optional[auth.UserInfo], connection_id: NonEmptyString
|
||||||
|
) -> str:
|
||||||
|
"""Get a unique key for pending join tracking.
|
||||||
|
|
||||||
|
Uses user ID for authenticated users, connection_id for anonymous users.
|
||||||
|
This ensures each browser tab has its own unique pending join record.
|
||||||
|
"""
|
||||||
|
if user:
|
||||||
|
return f"{user['sub']}:{connection_id}"
|
||||||
|
return f"anon:{connection_id}"
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/rooms/{room_name}/meetings/{meeting_id}/joining", response_model=JoiningResponse
|
||||||
|
)
|
||||||
|
async def meeting_joining(
|
||||||
|
room_name: str,
|
||||||
|
meeting_id: str,
|
||||||
|
body: JoiningRequest,
|
||||||
|
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||||
|
) -> JoiningResponse:
|
||||||
|
"""Signal intent to join meeting. Called before WebRTC handshake starts.
|
||||||
|
|
||||||
|
This creates a pending join record that prevents the meeting from being
|
||||||
|
deactivated while the WebRTC handshake is in progress. The record expires
|
||||||
|
automatically after 30 seconds if the connection is not established.
|
||||||
|
"""
|
||||||
|
log = logger.bind(
|
||||||
|
room_name=room_name, meeting_id=meeting_id, connection_id=body.connection_id
|
||||||
|
)
|
||||||
|
|
||||||
|
room = await rooms_controller.get_by_name(room_name)
|
||||||
|
if not room:
|
||||||
|
raise HTTPException(status_code=404, detail="Room not found")
|
||||||
|
|
||||||
|
meeting = await meetings_controller.get_by_id(meeting_id, room=room)
|
||||||
|
if not meeting:
|
||||||
|
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||||
|
|
||||||
|
if not meeting.is_active:
|
||||||
|
raise HTTPException(status_code=400, detail="Meeting is not active")
|
||||||
|
|
||||||
|
join_key = _get_pending_join_key(user, body.connection_id)
|
||||||
|
|
||||||
|
redis = await get_async_redis_client()
|
||||||
|
try:
|
||||||
|
await create_pending_join(redis, meeting_id, join_key)
|
||||||
|
log.debug("Created pending join intent", join_key=join_key)
|
||||||
|
finally:
|
||||||
|
await redis.aclose()
|
||||||
|
|
||||||
|
return JoiningResponse(status="ok")
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/rooms/{room_name}/meetings/{meeting_id}/joined", response_model=JoinedResponse
|
||||||
|
)
|
||||||
|
async def meeting_joined(
|
||||||
|
room_name: str,
|
||||||
|
meeting_id: str,
|
||||||
|
body: JoinedRequest,
|
||||||
|
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||||
|
) -> JoinedResponse:
|
||||||
|
"""Signal that WebRTC connection is established.
|
||||||
|
|
||||||
|
This clears the pending join record, confirming the user has successfully
|
||||||
|
connected to the meeting. Safe to call even if meeting was deactivated
|
||||||
|
during the handshake (idempotent cleanup).
|
||||||
|
"""
|
||||||
|
log = logger.bind(
|
||||||
|
room_name=room_name, meeting_id=meeting_id, connection_id=body.connection_id
|
||||||
|
)
|
||||||
|
|
||||||
|
room = await rooms_controller.get_by_name(room_name)
|
||||||
|
if not room:
|
||||||
|
raise HTTPException(status_code=404, detail="Room not found")
|
||||||
|
|
||||||
|
meeting = await meetings_controller.get_by_id(meeting_id, room=room)
|
||||||
|
if not meeting:
|
||||||
|
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||||
|
|
||||||
|
# Note: We don't check is_active here - the /joined call is a cleanup operation
|
||||||
|
# and should succeed even if the meeting was deactivated during the handshake
|
||||||
|
|
||||||
|
join_key = _get_pending_join_key(user, body.connection_id)
|
||||||
|
|
||||||
|
redis = await get_async_redis_client()
|
||||||
|
try:
|
||||||
|
await delete_pending_join(redis, meeting_id, join_key)
|
||||||
|
log.debug("Cleared pending join intent", join_key=join_key)
|
||||||
|
finally:
|
||||||
|
await redis.aclose()
|
||||||
|
|
||||||
|
# Trigger presence poll to detect the new participant faster than periodic poll
|
||||||
|
if meeting.platform == "daily":
|
||||||
|
poll_daily_room_presence_task.delay(meeting_id)
|
||||||
|
|
||||||
|
return JoinedResponse(status="ok")
|
||||||
|
|
||||||
|
|
||||||
|
class LeaveResponse(BaseModel):
|
||||||
|
status: Literal["ok"]
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/rooms/{room_name}/meetings/{meeting_id}/leave", response_model=LeaveResponse
|
||||||
|
)
|
||||||
|
async def meeting_leave(
|
||||||
|
room_name: str,
|
||||||
|
meeting_id: str,
|
||||||
|
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
|
||||||
|
) -> LeaveResponse:
|
||||||
|
"""Trigger presence recheck when user leaves meeting.
|
||||||
|
|
||||||
|
Called on tab close/navigation via sendBeacon(). Immediately queues presence
|
||||||
|
poll to detect dirty disconnects faster than 30s periodic poll.
|
||||||
|
Daily.co webhooks handle clean disconnects, but tab close/crash need this.
|
||||||
|
"""
|
||||||
|
room = await rooms_controller.get_by_name(room_name)
|
||||||
|
if not room:
|
||||||
|
raise HTTPException(status_code=404, detail="Room not found")
|
||||||
|
|
||||||
|
meeting = await meetings_controller.get_by_id(meeting_id, room=room)
|
||||||
|
if not meeting:
|
||||||
|
raise HTTPException(status_code=404, detail="Meeting not found")
|
||||||
|
|
||||||
|
if meeting.platform == "daily":
|
||||||
|
poll_daily_room_presence_task.delay(meeting_id)
|
||||||
|
|
||||||
|
return LeaveResponse(status="ok")
|
||||||
|
|||||||
@@ -31,9 +31,10 @@ from reflector.pipelines.main_multitrack_pipeline import (
|
|||||||
task_pipeline_multitrack_process,
|
task_pipeline_multitrack_process,
|
||||||
)
|
)
|
||||||
from reflector.pipelines.topic_processing import EmptyPipeline
|
from reflector.pipelines.topic_processing import EmptyPipeline
|
||||||
|
from reflector.presence.pending_joins import has_pending_joins
|
||||||
from reflector.processors import AudioFileWriterProcessor
|
from reflector.processors import AudioFileWriterProcessor
|
||||||
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
|
||||||
from reflector.redis_cache import RedisAsyncLock
|
from reflector.redis_cache import RedisAsyncLock, get_async_redis_client
|
||||||
from reflector.settings import settings
|
from reflector.settings import settings
|
||||||
from reflector.storage import get_transcripts_storage
|
from reflector.storage import get_transcripts_storage
|
||||||
from reflector.utils.daily import (
|
from reflector.utils.daily import (
|
||||||
@@ -869,6 +870,18 @@ async def process_meetings():
|
|||||||
logger_.debug("Meeting not yet started, keep it")
|
logger_.debug("Meeting not yet started, keep it")
|
||||||
|
|
||||||
if should_deactivate:
|
if should_deactivate:
|
||||||
|
# Check for pending joins before deactivating
|
||||||
|
# Users might be in the process of connecting via WebRTC
|
||||||
|
redis = await get_async_redis_client()
|
||||||
|
try:
|
||||||
|
if await has_pending_joins(redis, meeting.id):
|
||||||
|
logger_.info(
|
||||||
|
"Meeting has pending joins, skipping deactivation"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
finally:
|
||||||
|
await redis.aclose()
|
||||||
|
|
||||||
await meetings_controller.update_meeting(
|
await meetings_controller.update_meeting(
|
||||||
meeting.id, is_active=False
|
meeting.id, is_active=False
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ broadcast messages to all connected websockets.
|
|||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import json
|
import json
|
||||||
import threading
|
|
||||||
|
|
||||||
import redis.asyncio as redis
|
import redis.asyncio as redis
|
||||||
from fastapi import WebSocket
|
from fastapi import WebSocket
|
||||||
@@ -98,6 +97,7 @@ class WebsocketManager:
|
|||||||
|
|
||||||
async def _pubsub_data_reader(self, pubsub_subscriber):
|
async def _pubsub_data_reader(self, pubsub_subscriber):
|
||||||
while True:
|
while True:
|
||||||
|
# timeout=1.0 prevents tight CPU loop when no messages available
|
||||||
message = await pubsub_subscriber.get_message(
|
message = await pubsub_subscriber.get_message(
|
||||||
ignore_subscribe_messages=True
|
ignore_subscribe_messages=True
|
||||||
)
|
)
|
||||||
@@ -109,29 +109,38 @@ class WebsocketManager:
|
|||||||
await socket.send_json(data)
|
await socket.send_json(data)
|
||||||
|
|
||||||
|
|
||||||
|
# Process-global singleton to ensure only one WebsocketManager instance exists.
|
||||||
|
# Multiple instances would cause resource leaks and CPU issues.
|
||||||
|
_ws_manager: WebsocketManager | None = None
|
||||||
|
|
||||||
|
|
||||||
def get_ws_manager() -> WebsocketManager:
|
def get_ws_manager() -> WebsocketManager:
|
||||||
"""
|
"""
|
||||||
Returns the WebsocketManager instance for managing websockets.
|
Returns the global WebsocketManager singleton.
|
||||||
|
|
||||||
This function initializes and returns the WebsocketManager instance,
|
Creates instance on first call, subsequent calls return cached instance.
|
||||||
which is responsible for managing websockets and handling websocket
|
Thread-safe via GIL. Concurrent initialization may create duplicate
|
||||||
connections.
|
instances but last write wins (acceptable for this use case).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
WebsocketManager: The initialized WebsocketManager instance.
|
WebsocketManager: The global WebsocketManager instance.
|
||||||
|
|
||||||
Raises:
|
|
||||||
ImportError: If the 'reflector.settings' module cannot be imported.
|
|
||||||
RedisConnectionError: If there is an error connecting to the Redis server.
|
|
||||||
"""
|
"""
|
||||||
local = threading.local()
|
global _ws_manager
|
||||||
if hasattr(local, "ws_manager"):
|
|
||||||
return local.ws_manager
|
|
||||||
|
|
||||||
|
if _ws_manager is not None:
|
||||||
|
return _ws_manager
|
||||||
|
|
||||||
|
# No lock needed - GIL makes this safe enough
|
||||||
|
# Worst case: race creates two instances, last assignment wins
|
||||||
pubsub_client = RedisPubSubManager(
|
pubsub_client = RedisPubSubManager(
|
||||||
host=settings.REDIS_HOST,
|
host=settings.REDIS_HOST,
|
||||||
port=settings.REDIS_PORT,
|
port=settings.REDIS_PORT,
|
||||||
)
|
)
|
||||||
ws_manager = WebsocketManager(pubsub_client=pubsub_client)
|
_ws_manager = WebsocketManager(pubsub_client=pubsub_client)
|
||||||
local.ws_manager = ws_manager
|
return _ws_manager
|
||||||
return ws_manager
|
|
||||||
|
|
||||||
|
def reset_ws_manager() -> None:
|
||||||
|
"""Reset singleton for testing. DO NOT use in production."""
|
||||||
|
global _ws_manager
|
||||||
|
_ws_manager = None
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import os
|
import os
|
||||||
from contextlib import asynccontextmanager
|
from contextlib import asynccontextmanager
|
||||||
from tempfile import NamedTemporaryFile
|
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@@ -333,10 +332,13 @@ def celery_enable_logging():
|
|||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def celery_config():
|
def celery_config():
|
||||||
with NamedTemporaryFile() as f:
|
redis_host = os.environ.get("REDIS_HOST", "localhost")
|
||||||
|
redis_port = os.environ.get("REDIS_PORT", "6379")
|
||||||
|
# Use db 2 to avoid conflicts with main app
|
||||||
|
redis_url = f"redis://{redis_host}:{redis_port}/2"
|
||||||
yield {
|
yield {
|
||||||
"broker_url": "memory://",
|
"broker_url": redis_url,
|
||||||
"result_backend": f"db+sqlite:///{f.name}",
|
"result_backend": redis_url,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -370,9 +372,12 @@ async def ws_manager_in_memory(monkeypatch):
|
|||||||
def __init__(self, queue: asyncio.Queue):
|
def __init__(self, queue: asyncio.Queue):
|
||||||
self.queue = queue
|
self.queue = queue
|
||||||
|
|
||||||
async def get_message(self, ignore_subscribe_messages: bool = True):
|
async def get_message(
|
||||||
|
self, ignore_subscribe_messages: bool = True, timeout: float | None = None
|
||||||
|
):
|
||||||
|
wait_timeout = timeout if timeout is not None else 0.05
|
||||||
try:
|
try:
|
||||||
return await asyncio.wait_for(self.queue.get(), timeout=0.05)
|
return await asyncio.wait_for(self.queue.get(), timeout=wait_timeout)
|
||||||
except Exception:
|
except Exception:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|||||||
367
server/tests/test_joining_endpoint.py
Normal file
367
server/tests/test_joining_endpoint.py
Normal file
@@ -0,0 +1,367 @@
|
|||||||
|
"""Integration tests for /joining and /joined endpoints.
|
||||||
|
|
||||||
|
Tests for the join intent tracking to prevent race conditions during
|
||||||
|
WebRTC handshake when users join meetings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from reflector.db.meetings import Meeting
|
||||||
|
from reflector.presence.pending_joins import PENDING_JOIN_PREFIX
|
||||||
|
|
||||||
|
TEST_CONNECTION_ID = "test-connection-uuid-12345"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_room():
|
||||||
|
"""Mock room object."""
|
||||||
|
from reflector.db.rooms import Room
|
||||||
|
|
||||||
|
return Room(
|
||||||
|
id="room-123",
|
||||||
|
name="test-room",
|
||||||
|
user_id="owner-user",
|
||||||
|
created_at=datetime.now(timezone.utc),
|
||||||
|
zulip_auto_post=False,
|
||||||
|
zulip_stream="",
|
||||||
|
zulip_topic="",
|
||||||
|
is_locked=False,
|
||||||
|
room_mode="normal",
|
||||||
|
recording_type="cloud",
|
||||||
|
recording_trigger="automatic",
|
||||||
|
is_shared=True,
|
||||||
|
platform="daily",
|
||||||
|
skip_consent=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_meeting():
|
||||||
|
"""Mock meeting object."""
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
return Meeting(
|
||||||
|
id="meeting-456",
|
||||||
|
room_id="room-123",
|
||||||
|
room_name="test-room-20251118120000",
|
||||||
|
room_url="https://daily.co/test-room-20251118120000",
|
||||||
|
host_room_url="https://daily.co/test-room-20251118120000?t=host",
|
||||||
|
platform="daily",
|
||||||
|
num_clients=0,
|
||||||
|
is_active=True,
|
||||||
|
start_date=now,
|
||||||
|
end_date=now + timedelta(hours=1),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||||
|
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||||
|
@patch("reflector.views.rooms.get_async_redis_client")
|
||||||
|
async def test_joining_endpoint_creates_pending_join(
|
||||||
|
mock_get_redis,
|
||||||
|
mock_get_meeting,
|
||||||
|
mock_get_room,
|
||||||
|
mock_room,
|
||||||
|
mock_meeting,
|
||||||
|
client,
|
||||||
|
authenticated_client,
|
||||||
|
):
|
||||||
|
"""Test that /joining endpoint creates pending join in Redis."""
|
||||||
|
mock_get_room.return_value = mock_room
|
||||||
|
mock_get_meeting.return_value = mock_meeting
|
||||||
|
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.setex = AsyncMock()
|
||||||
|
mock_redis.aclose = AsyncMock()
|
||||||
|
mock_get_redis.return_value = mock_redis
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||||
|
json={"connection_id": TEST_CONNECTION_ID},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
assert response.json() == {"status": "ok"}
|
||||||
|
|
||||||
|
# Verify Redis setex was called with correct key pattern
|
||||||
|
mock_redis.setex.assert_called_once()
|
||||||
|
call_args = mock_redis.setex.call_args[0]
|
||||||
|
assert call_args[0].startswith(f"{PENDING_JOIN_PREFIX}:{mock_meeting.id}:")
|
||||||
|
assert TEST_CONNECTION_ID in call_args[0]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.views.rooms.poll_daily_room_presence_task")
|
||||||
|
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||||
|
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||||
|
@patch("reflector.views.rooms.get_async_redis_client")
|
||||||
|
async def test_joined_endpoint_deletes_pending_join(
|
||||||
|
mock_get_redis,
|
||||||
|
mock_get_meeting,
|
||||||
|
mock_get_room,
|
||||||
|
mock_poll_task,
|
||||||
|
mock_room,
|
||||||
|
mock_meeting,
|
||||||
|
client,
|
||||||
|
authenticated_client,
|
||||||
|
):
|
||||||
|
"""Test that /joined endpoint deletes pending join from Redis."""
|
||||||
|
mock_get_room.return_value = mock_room
|
||||||
|
mock_get_meeting.return_value = mock_meeting
|
||||||
|
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.delete = AsyncMock()
|
||||||
|
mock_redis.aclose = AsyncMock()
|
||||||
|
mock_get_redis.return_value = mock_redis
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joined",
|
||||||
|
json={"connection_id": TEST_CONNECTION_ID},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
assert response.json() == {"status": "ok"}
|
||||||
|
|
||||||
|
# Verify Redis delete was called with correct key pattern
|
||||||
|
mock_redis.delete.assert_called_once()
|
||||||
|
call_args = mock_redis.delete.call_args[0]
|
||||||
|
assert call_args[0].startswith(f"{PENDING_JOIN_PREFIX}:{mock_meeting.id}:")
|
||||||
|
assert TEST_CONNECTION_ID in call_args[0]
|
||||||
|
|
||||||
|
# Verify presence poll was triggered for Daily meetings
|
||||||
|
mock_poll_task.delay.assert_called_once_with(mock_meeting.id)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||||
|
async def test_joining_endpoint_room_not_found(
|
||||||
|
mock_get_room,
|
||||||
|
client,
|
||||||
|
authenticated_client,
|
||||||
|
):
|
||||||
|
"""Test that /joining returns 404 when room not found."""
|
||||||
|
mock_get_room.return_value = None
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
"/rooms/nonexistent-room/meetings/meeting-123/joining",
|
||||||
|
json={"connection_id": TEST_CONNECTION_ID},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 404
|
||||||
|
assert response.json()["detail"] == "Room not found"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||||
|
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||||
|
async def test_joining_endpoint_meeting_not_found(
|
||||||
|
mock_get_meeting,
|
||||||
|
mock_get_room,
|
||||||
|
mock_room,
|
||||||
|
client,
|
||||||
|
authenticated_client,
|
||||||
|
):
|
||||||
|
"""Test that /joining returns 404 when meeting not found."""
|
||||||
|
mock_get_room.return_value = mock_room
|
||||||
|
mock_get_meeting.return_value = None
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
f"/rooms/{mock_room.name}/meetings/nonexistent-meeting/joining",
|
||||||
|
json={"connection_id": TEST_CONNECTION_ID},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 404
|
||||||
|
assert response.json()["detail"] == "Meeting not found"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||||
|
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||||
|
async def test_joining_endpoint_meeting_not_active(
|
||||||
|
mock_get_meeting,
|
||||||
|
mock_get_room,
|
||||||
|
mock_room,
|
||||||
|
mock_meeting,
|
||||||
|
client,
|
||||||
|
authenticated_client,
|
||||||
|
):
|
||||||
|
"""Test that /joining returns 400 when meeting is not active."""
|
||||||
|
mock_get_room.return_value = mock_room
|
||||||
|
inactive_meeting = mock_meeting.model_copy(update={"is_active": False})
|
||||||
|
mock_get_meeting.return_value = inactive_meeting
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||||
|
json={"connection_id": TEST_CONNECTION_ID},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 400
|
||||||
|
assert response.json()["detail"] == "Meeting is not active"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||||
|
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||||
|
@patch("reflector.views.rooms.get_async_redis_client")
|
||||||
|
async def test_joining_endpoint_anonymous_user(
|
||||||
|
mock_get_redis,
|
||||||
|
mock_get_meeting,
|
||||||
|
mock_get_room,
|
||||||
|
mock_room,
|
||||||
|
mock_meeting,
|
||||||
|
client,
|
||||||
|
):
|
||||||
|
"""Test that /joining works for anonymous users with unique connection_id."""
|
||||||
|
mock_get_room.return_value = mock_room
|
||||||
|
mock_get_meeting.return_value = mock_meeting
|
||||||
|
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.setex = AsyncMock()
|
||||||
|
mock_redis.aclose = AsyncMock()
|
||||||
|
mock_get_redis.return_value = mock_redis
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||||
|
json={"connection_id": TEST_CONNECTION_ID},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 200
|
||||||
|
assert response.json() == {"status": "ok"}
|
||||||
|
|
||||||
|
# Verify Redis setex was called with "anon:" prefix and connection_id
|
||||||
|
call_args = mock_redis.setex.call_args[0]
|
||||||
|
assert ":anon:" in call_args[0]
|
||||||
|
assert TEST_CONNECTION_ID in call_args[0]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||||
|
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||||
|
@patch("reflector.views.rooms.get_async_redis_client")
|
||||||
|
async def test_joining_endpoint_redis_closed_on_success(
|
||||||
|
mock_get_redis,
|
||||||
|
mock_get_meeting,
|
||||||
|
mock_get_room,
|
||||||
|
mock_room,
|
||||||
|
mock_meeting,
|
||||||
|
client,
|
||||||
|
authenticated_client,
|
||||||
|
):
|
||||||
|
"""Test that Redis connection is closed after successful operation."""
|
||||||
|
mock_get_room.return_value = mock_room
|
||||||
|
mock_get_meeting.return_value = mock_meeting
|
||||||
|
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.setex = AsyncMock()
|
||||||
|
mock_redis.aclose = AsyncMock()
|
||||||
|
mock_get_redis.return_value = mock_redis
|
||||||
|
|
||||||
|
await client.post(
|
||||||
|
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||||
|
json={"connection_id": TEST_CONNECTION_ID},
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_redis.aclose.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||||
|
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||||
|
@patch("reflector.views.rooms.get_async_redis_client")
|
||||||
|
async def test_joining_endpoint_redis_closed_on_error(
|
||||||
|
mock_get_redis,
|
||||||
|
mock_get_meeting,
|
||||||
|
mock_get_room,
|
||||||
|
mock_room,
|
||||||
|
mock_meeting,
|
||||||
|
client,
|
||||||
|
authenticated_client,
|
||||||
|
):
|
||||||
|
"""Test that Redis connection is closed even when operation fails."""
|
||||||
|
mock_get_room.return_value = mock_room
|
||||||
|
mock_get_meeting.return_value = mock_meeting
|
||||||
|
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.setex = AsyncMock(side_effect=Exception("Redis error"))
|
||||||
|
mock_redis.aclose = AsyncMock()
|
||||||
|
mock_get_redis.return_value = mock_redis
|
||||||
|
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
await client.post(
|
||||||
|
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||||
|
json={"connection_id": TEST_CONNECTION_ID},
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_redis.aclose.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_joining_endpoint_requires_connection_id(
|
||||||
|
client,
|
||||||
|
):
|
||||||
|
"""Test that /joining returns 422 when connection_id is missing."""
|
||||||
|
response = await client.post(
|
||||||
|
"/rooms/test-room/meetings/meeting-123/joining",
|
||||||
|
json={},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 422 # Validation error
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_joining_endpoint_rejects_empty_connection_id(
|
||||||
|
client,
|
||||||
|
):
|
||||||
|
"""Test that /joining returns 422 when connection_id is empty string."""
|
||||||
|
response = await client.post(
|
||||||
|
"/rooms/test-room/meetings/meeting-123/joining",
|
||||||
|
json={"connection_id": ""},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert response.status_code == 422 # Validation error (NonEmptyString)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.views.rooms.rooms_controller.get_by_name")
|
||||||
|
@patch("reflector.views.rooms.meetings_controller.get_by_id")
|
||||||
|
@patch("reflector.views.rooms.get_async_redis_client")
|
||||||
|
async def test_different_connection_ids_create_different_keys(
|
||||||
|
mock_get_redis,
|
||||||
|
mock_get_meeting,
|
||||||
|
mock_get_room,
|
||||||
|
mock_room,
|
||||||
|
mock_meeting,
|
||||||
|
client,
|
||||||
|
):
|
||||||
|
"""Test that different connection_ids create different Redis keys."""
|
||||||
|
mock_get_room.return_value = mock_room
|
||||||
|
mock_get_meeting.return_value = mock_meeting
|
||||||
|
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.setex = AsyncMock()
|
||||||
|
mock_redis.aclose = AsyncMock()
|
||||||
|
mock_get_redis.return_value = mock_redis
|
||||||
|
|
||||||
|
# First connection
|
||||||
|
await client.post(
|
||||||
|
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||||
|
json={"connection_id": "connection-1"},
|
||||||
|
)
|
||||||
|
key1 = mock_redis.setex.call_args[0][0]
|
||||||
|
|
||||||
|
mock_redis.setex.reset_mock()
|
||||||
|
|
||||||
|
# Second connection (different tab)
|
||||||
|
await client.post(
|
||||||
|
f"/rooms/{mock_room.name}/meetings/{mock_meeting.id}/joining",
|
||||||
|
json={"connection_id": "connection-2"},
|
||||||
|
)
|
||||||
|
key2 = mock_redis.setex.call_args[0][0]
|
||||||
|
|
||||||
|
# Keys should be different
|
||||||
|
assert key1 != key2
|
||||||
|
assert "connection-1" in key1
|
||||||
|
assert "connection-2" in key2
|
||||||
153
server/tests/test_pending_joins.py
Normal file
153
server/tests/test_pending_joins.py
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
"""Tests for pending joins Redis helper functions.
|
||||||
|
|
||||||
|
TDD tests for tracking join intent to prevent race conditions during
|
||||||
|
WebRTC handshake when users join meetings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from unittest.mock import AsyncMock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from reflector.presence.pending_joins import (
|
||||||
|
PENDING_JOIN_PREFIX,
|
||||||
|
PENDING_JOIN_TTL,
|
||||||
|
create_pending_join,
|
||||||
|
delete_pending_join,
|
||||||
|
has_pending_joins,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_redis():
|
||||||
|
"""Mock async Redis client."""
|
||||||
|
redis = AsyncMock()
|
||||||
|
redis.setex = AsyncMock()
|
||||||
|
redis.delete = AsyncMock()
|
||||||
|
redis.scan = AsyncMock(return_value=(0, []))
|
||||||
|
return redis
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_create_pending_join_sets_key_with_ttl(mock_redis):
|
||||||
|
"""Test that create_pending_join stores key with correct TTL."""
|
||||||
|
meeting_id = "meeting-123"
|
||||||
|
user_id = "user-456"
|
||||||
|
|
||||||
|
await create_pending_join(mock_redis, meeting_id, user_id)
|
||||||
|
|
||||||
|
expected_key = f"{PENDING_JOIN_PREFIX}:{meeting_id}:{user_id}"
|
||||||
|
mock_redis.setex.assert_called_once()
|
||||||
|
call_args = mock_redis.setex.call_args
|
||||||
|
assert call_args[0][0] == expected_key
|
||||||
|
assert call_args[0][1] == PENDING_JOIN_TTL
|
||||||
|
# Value should be a timestamp string
|
||||||
|
assert call_args[0][2] is not None
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_delete_pending_join_removes_key(mock_redis):
|
||||||
|
"""Test that delete_pending_join removes the key."""
|
||||||
|
meeting_id = "meeting-123"
|
||||||
|
user_id = "user-456"
|
||||||
|
|
||||||
|
await delete_pending_join(mock_redis, meeting_id, user_id)
|
||||||
|
|
||||||
|
expected_key = f"{PENDING_JOIN_PREFIX}:{meeting_id}:{user_id}"
|
||||||
|
mock_redis.delete.assert_called_once_with(expected_key)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_has_pending_joins_returns_false_when_no_keys(mock_redis):
|
||||||
|
"""Test has_pending_joins returns False when no matching keys."""
|
||||||
|
mock_redis.scan.return_value = (0, [])
|
||||||
|
|
||||||
|
result = await has_pending_joins(mock_redis, "meeting-123")
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
mock_redis.scan.assert_called_once()
|
||||||
|
call_kwargs = mock_redis.scan.call_args.kwargs
|
||||||
|
assert call_kwargs["match"] == f"{PENDING_JOIN_PREFIX}:meeting-123:*"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_has_pending_joins_returns_true_when_keys_exist(mock_redis):
|
||||||
|
"""Test has_pending_joins returns True when matching keys found."""
|
||||||
|
mock_redis.scan.return_value = (0, [b"pending_join:meeting-123:user-1"])
|
||||||
|
|
||||||
|
result = await has_pending_joins(mock_redis, "meeting-123")
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_has_pending_joins_scans_with_correct_pattern(mock_redis):
|
||||||
|
"""Test has_pending_joins uses correct scan pattern."""
|
||||||
|
meeting_id = "meeting-abc-def"
|
||||||
|
mock_redis.scan.return_value = (0, [])
|
||||||
|
|
||||||
|
await has_pending_joins(mock_redis, meeting_id)
|
||||||
|
|
||||||
|
expected_pattern = f"{PENDING_JOIN_PREFIX}:{meeting_id}:*"
|
||||||
|
mock_redis.scan.assert_called_once()
|
||||||
|
call_kwargs = mock_redis.scan.call_args.kwargs
|
||||||
|
assert call_kwargs["match"] == expected_pattern
|
||||||
|
assert call_kwargs["count"] == 100
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_multiple_users_pending_joins(mock_redis):
|
||||||
|
"""Test that multiple users can have pending joins for same meeting."""
|
||||||
|
meeting_id = "meeting-123"
|
||||||
|
# Simulate two pending joins
|
||||||
|
mock_redis.scan.return_value = (
|
||||||
|
0,
|
||||||
|
[b"pending_join:meeting-123:user-1", b"pending_join:meeting-123:user-2"],
|
||||||
|
)
|
||||||
|
|
||||||
|
result = await has_pending_joins(mock_redis, meeting_id)
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pending_join_ttl_value():
|
||||||
|
"""Test that PENDING_JOIN_TTL has expected value."""
|
||||||
|
# 30 seconds should be enough for WebRTC handshake but not too long
|
||||||
|
assert PENDING_JOIN_TTL == 30
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_pending_join_prefix_value():
|
||||||
|
"""Test that PENDING_JOIN_PREFIX has expected value."""
|
||||||
|
assert PENDING_JOIN_PREFIX == "pending_join"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_has_pending_joins_multi_iteration_scan_no_keys(mock_redis):
|
||||||
|
"""Test has_pending_joins iterates until cursor returns 0."""
|
||||||
|
# Simulate multi-iteration scan: cursor 100 -> cursor 50 -> cursor 0
|
||||||
|
mock_redis.scan.side_effect = [
|
||||||
|
(100, []), # First iteration, no keys, continue
|
||||||
|
(50, []), # Second iteration, no keys, continue
|
||||||
|
(0, []), # Third iteration, cursor 0, done
|
||||||
|
]
|
||||||
|
|
||||||
|
result = await has_pending_joins(mock_redis, "meeting-123")
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
assert mock_redis.scan.call_count == 3
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_has_pending_joins_multi_iteration_finds_key_later(mock_redis):
|
||||||
|
"""Test has_pending_joins finds key on second iteration."""
|
||||||
|
# Simulate finding key on second scan iteration
|
||||||
|
mock_redis.scan.side_effect = [
|
||||||
|
(100, []), # First iteration, no keys
|
||||||
|
(0, [b"pending_join:meeting-123:user-1"]), # Second iteration, found key
|
||||||
|
]
|
||||||
|
|
||||||
|
result = await has_pending_joins(mock_redis, "meeting-123")
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
assert mock_redis.scan.call_count == 2
|
||||||
241
server/tests/test_process_meetings_pending_joins.py
Normal file
241
server/tests/test_process_meetings_pending_joins.py
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
"""Tests for process_meetings pending joins check.
|
||||||
|
|
||||||
|
Tests that process_meetings correctly skips deactivation when
|
||||||
|
pending joins exist for a meeting.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime, timedelta, timezone
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from reflector.db.meetings import Meeting
|
||||||
|
|
||||||
|
|
||||||
|
def _get_process_meetings_fn():
|
||||||
|
"""Get the underlying async function without Celery/asynctask decorators."""
|
||||||
|
from reflector.worker import process
|
||||||
|
|
||||||
|
fn = process.process_meetings
|
||||||
|
# Get through both decorator layers (@shared_task and @asynctask)
|
||||||
|
if hasattr(fn, "__wrapped__"):
|
||||||
|
fn = fn.__wrapped__
|
||||||
|
if hasattr(fn, "__wrapped__"):
|
||||||
|
fn = fn.__wrapped__
|
||||||
|
return fn
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_active_meeting():
|
||||||
|
"""Mock an active meeting that should be considered for deactivation."""
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
return Meeting(
|
||||||
|
id="meeting-123",
|
||||||
|
room_id="room-456",
|
||||||
|
room_name="test-room-20251118120000",
|
||||||
|
room_url="https://daily.co/test-room-20251118120000",
|
||||||
|
host_room_url="https://daily.co/test-room-20251118120000?t=host",
|
||||||
|
platform="daily",
|
||||||
|
num_clients=0,
|
||||||
|
is_active=True,
|
||||||
|
start_date=now - timedelta(hours=1),
|
||||||
|
end_date=now - timedelta(minutes=30), # Already ended
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.worker.process.meetings_controller.get_all_active")
|
||||||
|
@patch("reflector.worker.process.RedisAsyncLock")
|
||||||
|
@patch("reflector.worker.process.create_platform_client")
|
||||||
|
@patch("reflector.worker.process.get_async_redis_client")
|
||||||
|
@patch("reflector.worker.process.has_pending_joins")
|
||||||
|
@patch("reflector.worker.process.meetings_controller.update_meeting")
|
||||||
|
async def test_process_meetings_skips_deactivation_with_pending_joins(
|
||||||
|
mock_update_meeting,
|
||||||
|
mock_has_pending_joins,
|
||||||
|
mock_get_redis,
|
||||||
|
mock_create_client,
|
||||||
|
mock_redis_lock_class,
|
||||||
|
mock_get_all_active,
|
||||||
|
mock_active_meeting,
|
||||||
|
):
|
||||||
|
"""Test that process_meetings skips deactivation when pending joins exist."""
|
||||||
|
process_meetings = _get_process_meetings_fn()
|
||||||
|
|
||||||
|
mock_get_all_active.return_value = [mock_active_meeting]
|
||||||
|
|
||||||
|
# Mock lock acquired
|
||||||
|
mock_lock_instance = AsyncMock()
|
||||||
|
mock_lock_instance.acquired = True
|
||||||
|
mock_lock_instance.__aenter__ = AsyncMock(return_value=mock_lock_instance)
|
||||||
|
mock_lock_instance.__aexit__ = AsyncMock()
|
||||||
|
mock_redis_lock_class.return_value = mock_lock_instance
|
||||||
|
|
||||||
|
# Mock platform client - no active sessions, but had sessions (triggers deactivation)
|
||||||
|
mock_daily_client = AsyncMock()
|
||||||
|
mock_session = AsyncMock()
|
||||||
|
mock_session.ended_at = datetime.now(timezone.utc) # Session ended
|
||||||
|
mock_daily_client.get_room_sessions = AsyncMock(return_value=[mock_session])
|
||||||
|
mock_create_client.return_value = mock_daily_client
|
||||||
|
|
||||||
|
# Mock Redis client
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.aclose = AsyncMock()
|
||||||
|
mock_get_redis.return_value = mock_redis
|
||||||
|
|
||||||
|
# Mock pending joins exist
|
||||||
|
mock_has_pending_joins.return_value = True
|
||||||
|
|
||||||
|
await process_meetings()
|
||||||
|
|
||||||
|
# Verify has_pending_joins was called
|
||||||
|
mock_has_pending_joins.assert_called_once_with(mock_redis, mock_active_meeting.id)
|
||||||
|
|
||||||
|
# Verify meeting was NOT deactivated
|
||||||
|
mock_update_meeting.assert_not_called()
|
||||||
|
|
||||||
|
# Verify Redis was closed
|
||||||
|
mock_redis.aclose.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.worker.process.meetings_controller.get_all_active")
|
||||||
|
@patch("reflector.worker.process.RedisAsyncLock")
|
||||||
|
@patch("reflector.worker.process.create_platform_client")
|
||||||
|
@patch("reflector.worker.process.get_async_redis_client")
|
||||||
|
@patch("reflector.worker.process.has_pending_joins")
|
||||||
|
@patch("reflector.worker.process.meetings_controller.update_meeting")
|
||||||
|
async def test_process_meetings_deactivates_without_pending_joins(
|
||||||
|
mock_update_meeting,
|
||||||
|
mock_has_pending_joins,
|
||||||
|
mock_get_redis,
|
||||||
|
mock_create_client,
|
||||||
|
mock_redis_lock_class,
|
||||||
|
mock_get_all_active,
|
||||||
|
mock_active_meeting,
|
||||||
|
):
|
||||||
|
"""Test that process_meetings deactivates when no pending joins."""
|
||||||
|
process_meetings = _get_process_meetings_fn()
|
||||||
|
|
||||||
|
mock_get_all_active.return_value = [mock_active_meeting]
|
||||||
|
|
||||||
|
# Mock lock acquired
|
||||||
|
mock_lock_instance = AsyncMock()
|
||||||
|
mock_lock_instance.acquired = True
|
||||||
|
mock_lock_instance.__aenter__ = AsyncMock(return_value=mock_lock_instance)
|
||||||
|
mock_lock_instance.__aexit__ = AsyncMock()
|
||||||
|
mock_redis_lock_class.return_value = mock_lock_instance
|
||||||
|
|
||||||
|
# Mock platform client - no active sessions, but had sessions
|
||||||
|
mock_daily_client = AsyncMock()
|
||||||
|
mock_session = AsyncMock()
|
||||||
|
mock_session.ended_at = datetime.now(timezone.utc)
|
||||||
|
mock_daily_client.get_room_sessions = AsyncMock(return_value=[mock_session])
|
||||||
|
mock_create_client.return_value = mock_daily_client
|
||||||
|
|
||||||
|
# Mock Redis client
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.aclose = AsyncMock()
|
||||||
|
mock_get_redis.return_value = mock_redis
|
||||||
|
|
||||||
|
# Mock no pending joins
|
||||||
|
mock_has_pending_joins.return_value = False
|
||||||
|
|
||||||
|
await process_meetings()
|
||||||
|
|
||||||
|
# Verify meeting was deactivated
|
||||||
|
mock_update_meeting.assert_called_once_with(mock_active_meeting.id, is_active=False)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.worker.process.meetings_controller.get_all_active")
|
||||||
|
@patch("reflector.worker.process.RedisAsyncLock")
|
||||||
|
@patch("reflector.worker.process.create_platform_client")
|
||||||
|
async def test_process_meetings_no_check_when_active_sessions(
|
||||||
|
mock_create_client,
|
||||||
|
mock_redis_lock_class,
|
||||||
|
mock_get_all_active,
|
||||||
|
mock_active_meeting,
|
||||||
|
):
|
||||||
|
"""Test that pending joins check is skipped when there are active sessions."""
|
||||||
|
process_meetings = _get_process_meetings_fn()
|
||||||
|
|
||||||
|
mock_get_all_active.return_value = [mock_active_meeting]
|
||||||
|
|
||||||
|
# Mock lock acquired
|
||||||
|
mock_lock_instance = AsyncMock()
|
||||||
|
mock_lock_instance.acquired = True
|
||||||
|
mock_lock_instance.__aenter__ = AsyncMock(return_value=mock_lock_instance)
|
||||||
|
mock_lock_instance.__aexit__ = AsyncMock()
|
||||||
|
mock_redis_lock_class.return_value = mock_lock_instance
|
||||||
|
|
||||||
|
# Mock platform client - has active session
|
||||||
|
mock_daily_client = AsyncMock()
|
||||||
|
mock_session = AsyncMock()
|
||||||
|
mock_session.ended_at = None # Still active
|
||||||
|
mock_daily_client.get_room_sessions = AsyncMock(return_value=[mock_session])
|
||||||
|
mock_create_client.return_value = mock_daily_client
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch("reflector.worker.process.get_async_redis_client") as mock_get_redis,
|
||||||
|
patch("reflector.worker.process.has_pending_joins") as mock_has_pending_joins,
|
||||||
|
patch(
|
||||||
|
"reflector.worker.process.meetings_controller.update_meeting"
|
||||||
|
) as mock_update_meeting,
|
||||||
|
):
|
||||||
|
await process_meetings()
|
||||||
|
|
||||||
|
# Verify pending joins check was NOT called (no need - active sessions exist)
|
||||||
|
mock_has_pending_joins.assert_not_called()
|
||||||
|
|
||||||
|
# Verify meeting was NOT deactivated
|
||||||
|
mock_update_meeting.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
@patch("reflector.worker.process.meetings_controller.get_all_active")
|
||||||
|
@patch("reflector.worker.process.RedisAsyncLock")
|
||||||
|
@patch("reflector.worker.process.create_platform_client")
|
||||||
|
@patch("reflector.worker.process.get_async_redis_client")
|
||||||
|
@patch("reflector.worker.process.has_pending_joins")
|
||||||
|
@patch("reflector.worker.process.meetings_controller.update_meeting")
|
||||||
|
async def test_process_meetings_closes_redis_even_on_continue(
|
||||||
|
mock_update_meeting,
|
||||||
|
mock_has_pending_joins,
|
||||||
|
mock_get_redis,
|
||||||
|
mock_create_client,
|
||||||
|
mock_redis_lock_class,
|
||||||
|
mock_get_all_active,
|
||||||
|
mock_active_meeting,
|
||||||
|
):
|
||||||
|
"""Test that Redis connection is always closed, even when skipping deactivation."""
|
||||||
|
process_meetings = _get_process_meetings_fn()
|
||||||
|
|
||||||
|
mock_get_all_active.return_value = [mock_active_meeting]
|
||||||
|
|
||||||
|
# Mock lock acquired
|
||||||
|
mock_lock_instance = AsyncMock()
|
||||||
|
mock_lock_instance.acquired = True
|
||||||
|
mock_lock_instance.__aenter__ = AsyncMock(return_value=mock_lock_instance)
|
||||||
|
mock_lock_instance.__aexit__ = AsyncMock()
|
||||||
|
mock_redis_lock_class.return_value = mock_lock_instance
|
||||||
|
|
||||||
|
# Mock platform client - no active sessions
|
||||||
|
mock_daily_client = AsyncMock()
|
||||||
|
mock_session = AsyncMock()
|
||||||
|
mock_session.ended_at = datetime.now(timezone.utc)
|
||||||
|
mock_daily_client.get_room_sessions = AsyncMock(return_value=[mock_session])
|
||||||
|
mock_create_client.return_value = mock_daily_client
|
||||||
|
|
||||||
|
# Mock Redis client
|
||||||
|
mock_redis = AsyncMock()
|
||||||
|
mock_redis.aclose = AsyncMock()
|
||||||
|
mock_get_redis.return_value = mock_redis
|
||||||
|
|
||||||
|
# Mock pending joins exist (will trigger continue)
|
||||||
|
mock_has_pending_joins.return_value = True
|
||||||
|
|
||||||
|
await process_meetings()
|
||||||
|
|
||||||
|
# Verify Redis was closed
|
||||||
|
mock_redis.aclose.assert_called_once()
|
||||||
@@ -115,9 +115,7 @@ def appserver(tmpdir, setup_database, celery_session_app, celery_session_worker)
|
|||||||
settings.DATA_DIR = DATA_DIR
|
settings.DATA_DIR = DATA_DIR
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
# Using celery_includes from conftest.py which includes both pipelines
|
||||||
def celery_includes():
|
|
||||||
return ["reflector.pipelines.main_live_pipeline"]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.usefixtures("setup_database")
|
@pytest.mark.usefixtures("setup_database")
|
||||||
|
|||||||
@@ -56,7 +56,12 @@ def appserver_ws_user(setup_database):
|
|||||||
|
|
||||||
if server_instance:
|
if server_instance:
|
||||||
server_instance.should_exit = True
|
server_instance.should_exit = True
|
||||||
server_thread.join(timeout=30)
|
server_thread.join(timeout=2.0)
|
||||||
|
|
||||||
|
# Reset global singleton for test isolation
|
||||||
|
from reflector.ws_manager import reset_ws_manager
|
||||||
|
|
||||||
|
reset_ws_manager()
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture(autouse=True)
|
||||||
@@ -133,6 +138,8 @@ async def test_user_ws_accepts_valid_token_and_receives_events(appserver_ws_user
|
|||||||
|
|
||||||
# Connect and then trigger an event via HTTP create
|
# Connect and then trigger an event via HTTP create
|
||||||
async with aconnect_ws(base_ws, subprotocols=subprotocols) as ws:
|
async with aconnect_ws(base_ws, subprotocols=subprotocols) as ws:
|
||||||
|
await asyncio.sleep(0.2)
|
||||||
|
|
||||||
# Emit an event to the user's room via a standard HTTP action
|
# Emit an event to the user's room via a standard HTTP action
|
||||||
from httpx import AsyncClient
|
from httpx import AsyncClient
|
||||||
|
|
||||||
@@ -150,6 +157,7 @@ async def test_user_ws_accepts_valid_token_and_receives_events(appserver_ws_user
|
|||||||
"email": "user-abc@example.com",
|
"email": "user-abc@example.com",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Use in-memory client (global singleton makes it share ws_manager)
|
||||||
async with AsyncClient(app=app, base_url=f"http://{host}:{port}/v1") as ac:
|
async with AsyncClient(app=app, base_url=f"http://{host}:{port}/v1") as ac:
|
||||||
# Create a transcript as this user so that the server publishes TRANSCRIPT_CREATED to user room
|
# Create a transcript as this user so that the server publishes TRANSCRIPT_CREATED to user room
|
||||||
resp = await ac.post("/transcripts", json={"name": "WS Test"})
|
resp = await ac.post("/transcripts", json={"name": "WS Test"})
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ import { useConsentDialog } from "../../lib/consent";
|
|||||||
import {
|
import {
|
||||||
useRoomJoinMeeting,
|
useRoomJoinMeeting,
|
||||||
useMeetingStartRecording,
|
useMeetingStartRecording,
|
||||||
|
useMeetingJoining,
|
||||||
|
useMeetingJoined,
|
||||||
} from "../../lib/apiHooks";
|
} from "../../lib/apiHooks";
|
||||||
import { omit } from "remeda";
|
import { omit } from "remeda";
|
||||||
import {
|
import {
|
||||||
@@ -187,8 +189,14 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
|||||||
const [container, setContainer] = useState<HTMLDivElement | null>(null);
|
const [container, setContainer] = useState<HTMLDivElement | null>(null);
|
||||||
const joinMutation = useRoomJoinMeeting();
|
const joinMutation = useRoomJoinMeeting();
|
||||||
const startRecordingMutation = useMeetingStartRecording();
|
const startRecordingMutation = useMeetingStartRecording();
|
||||||
|
const joiningMutation = useMeetingJoining();
|
||||||
|
const joinedMutation = useMeetingJoined();
|
||||||
const [joinedMeeting, setJoinedMeeting] = useState<Meeting | null>(null);
|
const [joinedMeeting, setJoinedMeeting] = useState<Meeting | null>(null);
|
||||||
|
|
||||||
|
// Generate a stable connection ID for this component instance
|
||||||
|
// Used to track pending joins per browser tab (prevents key collision for anonymous users)
|
||||||
|
const connectionId = useMemo(() => crypto.randomUUID(), []);
|
||||||
|
|
||||||
// Generate deterministic instanceIds so all participants use SAME IDs
|
// Generate deterministic instanceIds so all participants use SAME IDs
|
||||||
const cloudInstanceId = parseNonEmptyString(meeting.id);
|
const cloudInstanceId = parseNonEmptyString(meeting.id);
|
||||||
const rawTracksInstanceId = parseNonEmptyString(
|
const rawTracksInstanceId = parseNonEmptyString(
|
||||||
@@ -237,6 +245,20 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
|||||||
router.push("/browse");
|
router.push("/browse");
|
||||||
}, [router]);
|
}, [router]);
|
||||||
|
|
||||||
|
// Trigger presence recheck on dirty disconnects (tab close, navigation away)
|
||||||
|
useEffect(() => {
|
||||||
|
if (!meeting?.id || !roomName) return;
|
||||||
|
|
||||||
|
const handleBeforeUnload = () => {
|
||||||
|
// sendBeacon guarantees delivery even if tab closes mid-request
|
||||||
|
const url = `/v1/rooms/${roomName}/meetings/${meeting.id}/leave`;
|
||||||
|
navigator.sendBeacon(url, JSON.stringify({}));
|
||||||
|
};
|
||||||
|
|
||||||
|
window.addEventListener("beforeunload", handleBeforeUnload);
|
||||||
|
return () => window.removeEventListener("beforeunload", handleBeforeUnload);
|
||||||
|
}, [meeting?.id, roomName]);
|
||||||
|
|
||||||
const handleCustomButtonClick = useCallback(
|
const handleCustomButtonClick = useCallback(
|
||||||
(ev: DailyEventObjectCustomButtonClick) => {
|
(ev: DailyEventObjectCustomButtonClick) => {
|
||||||
if (ev.button_id === CONSENT_BUTTON_ID) {
|
if (ev.button_id === CONSENT_BUTTON_ID) {
|
||||||
@@ -249,6 +271,28 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
|||||||
);
|
);
|
||||||
|
|
||||||
const handleFrameJoinMeeting = useCallback(() => {
|
const handleFrameJoinMeeting = useCallback(() => {
|
||||||
|
// Signal that WebRTC connection is established
|
||||||
|
// This clears the pending join intent, confirming successful connection
|
||||||
|
joinedMutation.mutate(
|
||||||
|
{
|
||||||
|
params: {
|
||||||
|
path: {
|
||||||
|
room_name: roomName,
|
||||||
|
meeting_id: meeting.id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
body: {
|
||||||
|
connection_id: connectionId,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
onError: (error: unknown) => {
|
||||||
|
// Non-blocking: log but don't fail - this is cleanup, not critical
|
||||||
|
console.warn("Failed to signal joined:", error);
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
if (meeting.recording_type === "cloud") {
|
if (meeting.recording_type === "cloud") {
|
||||||
console.log("Starting dual recording via REST API", {
|
console.log("Starting dual recording via REST API", {
|
||||||
cloudInstanceId,
|
cloudInstanceId,
|
||||||
@@ -310,6 +354,9 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
|||||||
}, [
|
}, [
|
||||||
meeting.recording_type,
|
meeting.recording_type,
|
||||||
meeting.id,
|
meeting.id,
|
||||||
|
roomName,
|
||||||
|
connectionId,
|
||||||
|
joinedMutation,
|
||||||
startRecordingMutation,
|
startRecordingMutation,
|
||||||
cloudInstanceId,
|
cloudInstanceId,
|
||||||
rawTracksInstanceId,
|
rawTracksInstanceId,
|
||||||
@@ -328,8 +375,28 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
|||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (!frame || !roomUrl) return;
|
if (!frame || !roomUrl) return;
|
||||||
frame
|
|
||||||
.join({
|
const joinRoom = async () => {
|
||||||
|
// Signal intent to join before WebRTC handshake starts
|
||||||
|
// This prevents race condition where meeting is deactivated during handshake
|
||||||
|
try {
|
||||||
|
await joiningMutation.mutateAsync({
|
||||||
|
params: {
|
||||||
|
path: {
|
||||||
|
room_name: roomName,
|
||||||
|
meeting_id: meeting.id,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
body: {
|
||||||
|
connection_id: connectionId,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
// Non-blocking: log but continue with join
|
||||||
|
console.warn("Failed to signal joining intent:", error);
|
||||||
|
}
|
||||||
|
|
||||||
|
await frame.join({
|
||||||
url: roomUrl,
|
url: roomUrl,
|
||||||
sendSettings: {
|
sendSettings: {
|
||||||
video: {
|
video: {
|
||||||
@@ -341,9 +408,13 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
|
|||||||
},
|
},
|
||||||
// Note: screenVideo intentionally not configured to preserve full quality for screen shares
|
// Note: screenVideo intentionally not configured to preserve full quality for screen shares
|
||||||
},
|
},
|
||||||
})
|
});
|
||||||
.catch(console.error.bind(console, "Failed to join daily room:"));
|
};
|
||||||
}, [frame, roomUrl]);
|
|
||||||
|
joinRoom().catch(console.error.bind(console, "Failed to join daily room:"));
|
||||||
|
// joiningMutation excluded from deps - it's a stable hook reference
|
||||||
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
|
}, [frame, roomUrl, roomName, meeting.id, connectionId]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
setCustomTrayButton(
|
setCustomTrayButton(
|
||||||
|
|||||||
@@ -807,6 +807,35 @@ export function useRoomJoinMeeting() {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Presence race fix endpoints (not yet in OpenAPI spec)
|
||||||
|
// These signal join intent to prevent race conditions during WebRTC handshake
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
export function useMeetingJoining(): any {
|
||||||
|
return ($api as any).useMutation(
|
||||||
|
"post",
|
||||||
|
"/v1/rooms/{room_name}/meetings/{meeting_id}/joining",
|
||||||
|
{},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
export function useMeetingJoined(): any {
|
||||||
|
return ($api as any).useMutation(
|
||||||
|
"post",
|
||||||
|
"/v1/rooms/{room_name}/meetings/{meeting_id}/joined",
|
||||||
|
{},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||||
|
export function useMeetingLeave(): any {
|
||||||
|
return ($api as any).useMutation(
|
||||||
|
"post",
|
||||||
|
"/v1/rooms/{room_name}/meetings/{meeting_id}/leave",
|
||||||
|
{},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
export function useRoomIcsSync() {
|
export function useRoomIcsSync() {
|
||||||
const { setError } = useError();
|
const { setError } = useError();
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user