From 7030e0f23649a8cf6c1eb6d5889684a41ce849ec Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Wed, 27 Aug 2025 10:32:04 -0600 Subject: [PATCH 01/77] fix: optimize parakeet transcription batching algorithm (#577) * refactor: optimize transcription batching to accumulate speech segments - Changed VAD segment generator to return full audio array instead of segments - Removed segment filtering step - Modified batch_segments to accumulate maximum speech including silence - Transcribe larger continuous chunks instead of individual speech segments * fix: correct transcribe_batch call to use list and fix batch unpacking * fix: simplify * fix: remove unused variables * fix: add typing --- .../reflector_transcriber_parakeet.py | 129 +++++++----------- 1 file changed, 53 insertions(+), 76 deletions(-) diff --git a/server/gpu/modal_deployments/reflector_transcriber_parakeet.py b/server/gpu/modal_deployments/reflector_transcriber_parakeet.py index 97e150e3..3b6f6ad0 100644 --- a/server/gpu/modal_deployments/reflector_transcriber_parakeet.py +++ b/server/gpu/modal_deployments/reflector_transcriber_parakeet.py @@ -3,7 +3,7 @@ import os import sys import threading import uuid -from typing import Mapping, NewType +from typing import Generator, Mapping, NewType from urllib.parse import urlparse import modal @@ -14,10 +14,7 @@ SAMPLERATE = 16000 UPLOADS_PATH = "/uploads" CACHE_PATH = "/cache" VAD_CONFIG = { - "max_segment_duration": 30.0, - "batch_max_files": 10, - "batch_max_duration": 5.0, - "min_segment_duration": 0.02, + "batch_max_duration": 30.0, "silence_padding": 0.5, "window_size": 512, } @@ -271,7 +268,9 @@ class TranscriberParakeetFile: audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True) return audio_array - def vad_segment_generator(audio_array): + def vad_segment_generator( + audio_array, + ) -> Generator[tuple[float, float], None, None]: """Generate speech segments using VAD with start/end sample indices""" vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE) window_size = VAD_CONFIG["window_size"] @@ -297,76 +296,65 @@ class TranscriberParakeetFile: start_time = start / float(SAMPLERATE) end_time = end / float(SAMPLERATE) - # Extract the actual audio segment - audio_segment = audio_array[start:end] - - yield (start_time, end_time, audio_segment) + yield (start_time, end_time) start = None vad_iterator.reset_states() - def vad_segment_filter(segments): - """Filter VAD segments by duration and chunk large segments""" - min_dur = VAD_CONFIG["min_segment_duration"] - max_dur = VAD_CONFIG["max_segment_duration"] + def batch_speech_segments( + segments: Generator[tuple[float, float], None, None], max_duration: int + ) -> Generator[tuple[float, float], None, None]: + """ + Input segments: + [0-2] [3-5] [6-8] [10-11] [12-15] [17-19] [20-22] - for start_time, end_time, audio_segment in segments: - segment_duration = end_time - start_time + ↓ (max_duration=10) - # Skip very small segments - if segment_duration < min_dur: + Output batches: + [0-8] [10-19] [20-22] + + Note: silences are kept for better transcription, previous implementation was + passing segments separatly, but the output was less accurate. + """ + batch_start_time = None + batch_end_time = None + + for start_time, end_time in segments: + if batch_start_time is None or batch_end_time is None: + batch_start_time = start_time + batch_end_time = end_time continue - # If segment is within max duration, yield as-is - if segment_duration <= max_dur: - yield (start_time, end_time, audio_segment) + total_duration = end_time - batch_start_time + + if total_duration <= max_duration: + batch_end_time = end_time continue - # Chunk large segments into smaller pieces - chunk_samples = int(max_dur * SAMPLERATE) - current_start = start_time + yield (batch_start_time, batch_end_time) + batch_start_time = start_time + batch_end_time = end_time - for chunk_offset in range(0, len(audio_segment), chunk_samples): - chunk_audio = audio_segment[ - chunk_offset : chunk_offset + chunk_samples - ] - if len(chunk_audio) == 0: - break + if batch_start_time is None or batch_end_time is None: + return - chunk_duration = len(chunk_audio) / float(SAMPLERATE) - chunk_end = current_start + chunk_duration + yield (batch_start_time, batch_end_time) - # Only yield chunks that meet minimum duration - if chunk_duration >= min_dur: - yield (current_start, chunk_end, chunk_audio) + def batch_segment_to_audio_segment(segments, audio_array): + for start_time, end_time in segments: + start_sample = int(start_time * SAMPLERATE) + end_sample = int(end_time * SAMPLERATE) + audio_segment = audio_array[start_sample:end_sample] - current_start = chunk_end - - def batch_segments(segments, max_files=10, max_duration=5.0): - batch = [] - batch_duration = 0.0 - - for start_time, end_time, audio_segment in segments: - segment_duration = end_time - start_time - - if segment_duration < VAD_CONFIG["silence_padding"]: + if end_time - start_time < VAD_CONFIG["silence_padding"]: silence_samples = int( - (VAD_CONFIG["silence_padding"] - segment_duration) * SAMPLERATE + (VAD_CONFIG["silence_padding"] - (end_time - start_time)) + * SAMPLERATE ) padding = np.zeros(silence_samples, dtype=np.float32) audio_segment = np.concatenate([audio_segment, padding]) - segment_duration = VAD_CONFIG["silence_padding"] - batch.append((start_time, end_time, audio_segment)) - batch_duration += segment_duration - - if len(batch) >= max_files or batch_duration >= max_duration: - yield batch - batch = [] - batch_duration = 0.0 - - if batch: - yield batch + yield start_time, end_time, audio_segment def transcribe_batch(model, audio_segments): with NoStdStreams(): @@ -376,8 +364,6 @@ class TranscriberParakeetFile: def emit_results( results, segments_info, - batch_index, - total_batches, ): """Yield transcribed text and word timings from model output, adjusting timestamps to absolute positions.""" for i, (output, (start_time, end_time, _)) in enumerate( @@ -413,35 +399,26 @@ class TranscriberParakeetFile: all_words = [] raw_segments = vad_segment_generator(audio_array) - filtered_segments = vad_segment_filter(raw_segments) - batches = batch_segments( - filtered_segments, - VAD_CONFIG["batch_max_files"], + speech_segments = batch_speech_segments( + raw_segments, VAD_CONFIG["batch_max_duration"], ) + audio_segments = batch_segment_to_audio_segment(speech_segments, audio_array) - batch_index = 0 - total_batches = max( - 1, int(total_duration / VAD_CONFIG["batch_max_duration"]) + 1 - ) - - for batch in batches: - batch_index += 1 - audio_segments = [seg[2] for seg in batch] - results = transcribe_batch(self.model, audio_segments) + for batch in audio_segments: + _, _, audio_segment = batch + results = transcribe_batch(self.model, [audio_segment]) for text, words in emit_results( results, - batch, - batch_index, - total_batches, + [batch], ): if not text: continue all_text_parts.append(text) all_words.extend(words) - processed_duration += sum(len(seg[2]) / float(SAMPLERATE) for seg in batch) + processed_duration += len(audio_segment) / float(SAMPLERATE) combined_text = " ".join(all_text_parts) return {"text": combined_text, "words": all_words} From 124ce03bf86044c18313d27228a25da4bc20c9c5 Mon Sep 17 00:00:00 2001 From: Igor Loskutov Date: Thu, 28 Aug 2025 12:07:34 -0400 Subject: [PATCH 02/77] fix: Igor/evaluation (#575) * fix: impossible import error (#563) * evaluation cli - database events experiment * hallucinations * evaluation - unhallucinate * evaluation - unhallucinate * roll back reliability link * self reviewio * lint * self review * add file pipeline to cli * add file pipeline to cli + sorting * remove cli tests * remove ai comments * comments --- .../reflector/pipelines/main_live_pipeline.py | 2 +- .../processors/file_transcript_modal.py | 3 + server/reflector/tools/process.py | 496 ++++++------------ .../tools/process_with_diarization.py | 318 ----------- server/reflector/tools/test_diarization.py | 96 ---- server/tests/test_processors_pipeline.py | 61 --- 6 files changed, 173 insertions(+), 803 deletions(-) delete mode 100644 server/reflector/tools/process_with_diarization.py delete mode 100644 server/reflector/tools/test_diarization.py delete mode 100644 server/tests/test_processors_pipeline.py diff --git a/server/reflector/pipelines/main_live_pipeline.py b/server/reflector/pipelines/main_live_pipeline.py index b15fcb05..812847db 100644 --- a/server/reflector/pipelines/main_live_pipeline.py +++ b/server/reflector/pipelines/main_live_pipeline.py @@ -794,7 +794,7 @@ def pipeline_post(*, transcript_id: str): chain_final_summaries, ) | task_pipeline_post_to_zulip.si(transcript_id=transcript_id) - chain.delay() + return chain.delay() @get_transcript diff --git a/server/reflector/processors/file_transcript_modal.py b/server/reflector/processors/file_transcript_modal.py index 21c378ec..b99cf806 100644 --- a/server/reflector/processors/file_transcript_modal.py +++ b/server/reflector/processors/file_transcript_modal.py @@ -67,6 +67,9 @@ class FileTranscriptModalProcessor(FileTranscriptProcessor): for word_info in result.get("words", []) ] + # words come not in order + words.sort(key=lambda w: w.start) + return Transcript(words=words) diff --git a/server/reflector/tools/process.py b/server/reflector/tools/process.py index 4f1cafdd..eb770f76 100644 --- a/server/reflector/tools/process.py +++ b/server/reflector/tools/process.py @@ -1,294 +1,204 @@ """ Process audio file with diarization support -=========================================== - -Extended version of process.py that includes speaker diarization. -This tool processes audio files locally without requiring the full server infrastructure. """ +import argparse import asyncio -import tempfile -import uuid +import json +import shutil +import sys +import time from pathlib import Path -from typing import List - -import av +from typing import Any, Dict, List, Literal +from reflector.db.transcripts import SourceKind, TranscriptTopic, transcripts_controller from reflector.logger import logger -from reflector.processors import ( - AudioChunkerAutoProcessor, - AudioDownscaleProcessor, - AudioFileWriterProcessor, - AudioMergeProcessor, - AudioTranscriptAutoProcessor, - Pipeline, - PipelineEvent, - TranscriptFinalSummaryProcessor, - TranscriptFinalTitleProcessor, - TranscriptLinerProcessor, - TranscriptTopicDetectorProcessor, - TranscriptTranslatorAutoProcessor, +from reflector.pipelines.main_file_pipeline import ( + task_pipeline_file_process as task_pipeline_file_process, ) -from reflector.processors.base import BroadcastProcessor, Processor -from reflector.processors.types import ( - AudioDiarizationInput, - TitleSummary, - TitleSummaryWithId, +from reflector.pipelines.main_live_pipeline import pipeline_post as live_pipeline_post +from reflector.pipelines.main_live_pipeline import ( + pipeline_process as live_pipeline_process, ) -class TopicCollectorProcessor(Processor): - """Collect topics for diarization""" +def serialize_topics(topics: List[TranscriptTopic]) -> List[Dict[str, Any]]: + """Convert TranscriptTopic objects to JSON-serializable dicts""" + serialized = [] + for topic in topics: + topic_dict = topic.model_dump() + serialized.append(topic_dict) + return serialized - INPUT_TYPE = TitleSummary - OUTPUT_TYPE = TitleSummary - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.topics: List[TitleSummaryWithId] = [] - self._topic_id = 0 +def debug_print_speakers(serialized_topics: List[Dict[str, Any]]) -> None: + """Print debug info about speakers found in topics""" + all_speakers = set() + for topic_dict in serialized_topics: + for word in topic_dict.get("words", []): + all_speakers.add(word.get("speaker", 0)) - async def _push(self, data: TitleSummary): - # Convert to TitleSummaryWithId and collect - self._topic_id += 1 - topic_with_id = TitleSummaryWithId( - id=str(self._topic_id), - title=data.title, - summary=data.summary, - timestamp=data.timestamp, - duration=data.duration, - transcript=data.transcript, + print( + f"Found {len(serialized_topics)} topics with speakers: {all_speakers}", + file=sys.stderr, + ) + + +TranscriptId = str + + +# common interface for every flow: it needs an Entry in db with specific ceremony (file path + status + actual file in file system) +# ideally we want to get rid of it at some point +async def prepare_entry( + source_path: str, + source_language: str, + target_language: str, +) -> TranscriptId: + file_path = Path(source_path) + + transcript = await transcripts_controller.add( + file_path.name, + # note that the real file upload has SourceKind: LIVE for the reason of it's an error + source_kind=SourceKind.FILE, + source_language=source_language, + target_language=target_language, + user_id=None, + ) + + logger.info( + f"Created empty transcript {transcript.id} for file {file_path.name} because technically we need an empty transcript before we start transcript" + ) + + # pipelines expect files as upload.* + + extension = file_path.suffix + upload_path = transcript.data_path / f"upload{extension}" + upload_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(source_path, upload_path) + logger.info(f"Copied {source_path} to {upload_path}") + + # pipelines expect entity status "uploaded" + await transcripts_controller.update(transcript, {"status": "uploaded"}) + + return transcript.id + + +# same reason as prepare_entry +async def extract_result_from_entry( + transcript_id: TranscriptId, output_path: str +) -> None: + post_final_transcript = await transcripts_controller.get_by_id(transcript_id) + + # assert post_final_transcript.status == "ended" + # File pipeline doesn't set status to "ended", only live pipeline does https://github.com/Monadical-SAS/reflector/issues/582 + topics = post_final_transcript.topics + if not topics: + raise RuntimeError( + f"No topics found for transcript {transcript_id} after processing" ) - self.topics.append(topic_with_id) - # Pass through the original topic - await self.emit(data) + serialized_topics = serialize_topics(topics) - def get_topics(self) -> List[TitleSummaryWithId]: - return self.topics + if output_path: + # Write to JSON file + with open(output_path, "w") as f: + for topic_dict in serialized_topics: + json.dump(topic_dict, f) + f.write("\n") + print(f"Results written to {output_path}", file=sys.stderr) + else: + # Write to stdout as JSONL + for topic_dict in serialized_topics: + print(json.dumps(topic_dict)) + + debug_print_speakers(serialized_topics) -async def process_audio_file( - filename, - event_callback, - only_transcript=False, - source_language="en", - target_language="en", - enable_diarization=True, - diarization_backend="pyannote", +async def process_live_pipeline( + transcript_id: TranscriptId, ): - # Create temp file for audio if diarization is enabled - audio_temp_path = None - if enable_diarization: - audio_temp_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False) - audio_temp_path = audio_temp_file.name - audio_temp_file.close() + """Process transcript_id with transcription and diarization""" - # Create processor for collecting topics - topic_collector = TopicCollectorProcessor() + print(f"Processing transcript_id {transcript_id}...", file=sys.stderr) + await live_pipeline_process(transcript_id=transcript_id) + print(f"Processing complete for transcript {transcript_id}", file=sys.stderr) - # Build pipeline for audio processing - processors = [] + pre_final_transcript = await transcripts_controller.get_by_id(transcript_id) - # Add audio file writer at the beginning if diarization is enabled - if enable_diarization: - processors.append(AudioFileWriterProcessor(audio_temp_path)) + # assert documented behaviour: after process, the pipeline isn't ended. this is the reason of calling pipeline_post + assert pre_final_transcript.status != "ended" - # Add the rest of the processors - processors += [ - AudioDownscaleProcessor(), - AudioChunkerAutoProcessor(), - AudioMergeProcessor(), - AudioTranscriptAutoProcessor.as_threaded(), - TranscriptLinerProcessor(), - TranscriptTranslatorAutoProcessor.as_threaded(), - ] + # at this point, diarization is running but we have no access to it. run diarization in parallel - one will hopefully win after polling + result = live_pipeline_post(transcript_id=transcript_id) - if not only_transcript: - processors += [ - TranscriptTopicDetectorProcessor.as_threaded(), - # Collect topics for diarization - topic_collector, - BroadcastProcessor( - processors=[ - TranscriptFinalTitleProcessor.as_threaded(), - TranscriptFinalSummaryProcessor.as_threaded(), - ], - ), - ] - - # Create main pipeline - pipeline = Pipeline(*processors) - pipeline.set_pref("audio:source_language", source_language) - pipeline.set_pref("audio:target_language", target_language) - pipeline.describe() - pipeline.on(event_callback) - - # Start processing audio - logger.info(f"Opening {filename}") - container = av.open(filename) - try: - logger.info("Start pushing audio into the pipeline") - for frame in container.decode(audio=0): - await pipeline.push(frame) - finally: - logger.info("Flushing the pipeline") - await pipeline.flush() - - # Run diarization if enabled and we have topics - if enable_diarization and not only_transcript and audio_temp_path: - topics = topic_collector.get_topics() - - if topics: - logger.info(f"Starting diarization with {len(topics)} topics") - - try: - from reflector.processors import AudioDiarizationAutoProcessor - - diarization_processor = AudioDiarizationAutoProcessor( - name=diarization_backend - ) - - diarization_processor.set_pipeline(pipeline) - - # For Modal backend, we need to upload the file to S3 first - if diarization_backend == "modal": - from datetime import datetime - - from reflector.storage import get_transcripts_storage - from reflector.utils.s3_temp_file import S3TemporaryFile - - storage = get_transcripts_storage() - - # Generate a unique filename in evaluation folder - timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S") - audio_filename = f"evaluation/diarization_temp/{timestamp}_{uuid.uuid4().hex}.wav" - - # Use context manager for automatic cleanup - async with S3TemporaryFile(storage, audio_filename) as s3_file: - # Read and upload the audio file - with open(audio_temp_path, "rb") as f: - audio_data = f.read() - - audio_url = await s3_file.upload(audio_data) - logger.info(f"Uploaded audio to S3: {audio_filename}") - - # Create diarization input with S3 URL - diarization_input = AudioDiarizationInput( - audio_url=audio_url, topics=topics - ) - - # Run diarization - await diarization_processor.push(diarization_input) - await diarization_processor.flush() - - logger.info("Diarization complete") - # File will be automatically cleaned up when exiting the context - else: - # For local backend, use local file path - audio_url = audio_temp_path - - # Create diarization input - diarization_input = AudioDiarizationInput( - audio_url=audio_url, topics=topics - ) - - # Run diarization - await diarization_processor.push(diarization_input) - await diarization_processor.flush() - - logger.info("Diarization complete") - - except ImportError as e: - logger.error(f"Failed to import diarization dependencies: {e}") - logger.error( - "Install with: uv pip install pyannote.audio torch torchaudio" - ) - logger.error( - "And set HF_TOKEN environment variable for pyannote models" - ) - raise SystemExit(1) - except Exception as e: - logger.error(f"Diarization failed: {e}") - raise SystemExit(1) - else: - logger.warning("Skipping diarization: no topics available") - - # Clean up temp file - if audio_temp_path: - try: - Path(audio_temp_path).unlink() - except Exception as e: - logger.warning(f"Failed to clean up temp file {audio_temp_path}: {e}") - - logger.info("All done!") + # result.ready() blocks even without await; it mutates result also + while not result.ready(): + print(f"Status: {result.state}") + time.sleep(2) async def process_file_pipeline( - filename: str, - event_callback, - source_language="en", - target_language="en", - enable_diarization=True, - diarization_backend="modal", + transcript_id: TranscriptId, ): """Process audio/video file using the optimized file pipeline""" + + # task_pipeline_file_process is a Celery task, need to use .delay() for async execution + result = task_pipeline_file_process.delay(transcript_id=transcript_id) + + # Wait for the Celery task to complete + while not result.ready(): + print(f"File pipeline status: {result.state}", file=sys.stderr) + time.sleep(2) + + logger.info("File pipeline processing complete") + + +async def process( + source_path: str, + source_language: str, + target_language: str, + pipeline: Literal["live", "file"], + output_path: str = None, +): + from reflector.db import get_database + + database = get_database() + # db connect is a part of ceremony + await database.connect() + try: - from reflector.db import database - from reflector.db.transcripts import SourceKind, transcripts_controller - from reflector.pipelines.main_file_pipeline import PipelineMainFile - - await database.connect() - try: - # Create a temporary transcript for processing - transcript = await transcripts_controller.add( - "", - source_kind=SourceKind.FILE, - source_language=source_language, - target_language=target_language, - ) - - # Process the file - pipeline = PipelineMainFile(transcript_id=transcript.id) - await pipeline.process(Path(filename)) - - logger.info("File pipeline processing complete") - - finally: - await database.disconnect() - except ImportError as e: - logger.error(f"File pipeline not available: {e}") - logger.info("Falling back to stream pipeline") - # Fall back to stream pipeline - await process_audio_file( - filename, - event_callback, - only_transcript=False, - source_language=source_language, - target_language=target_language, - enable_diarization=enable_diarization, - diarization_backend=diarization_backend, + transcript_id = await prepare_entry( + source_path, + source_language, + target_language, ) + pipeline_handlers = { + "live": process_live_pipeline, + "file": process_file_pipeline, + } + + handler = pipeline_handlers.get(pipeline) + if not handler: + raise ValueError(f"Unknown pipeline type: {pipeline}") + + await handler(transcript_id) + + await extract_result_from_entry(transcript_id, output_path) + finally: + await database.disconnect() + if __name__ == "__main__": - import argparse - import os - parser = argparse.ArgumentParser( - description="Process audio files with optional speaker diarization" + description="Process audio files with speaker diarization" ) parser.add_argument("source", help="Source file (mp3, wav, mp4...)") parser.add_argument( - "--stream", - action="store_true", - help="Use streaming pipeline (original frame-based processing)", - ) - parser.add_argument( - "--only-transcript", - "-t", - action="store_true", - help="Only generate transcript without topics/summaries", + "--pipeline", + required=True, + choices=["live", "file"], + help="Pipeline type to use for processing (live: streaming/incremental, file: batch/parallel)", ) parser.add_argument( "--source-language", default="en", help="Source language code (default: en)" @@ -297,82 +207,14 @@ if __name__ == "__main__": "--target-language", default="en", help="Target language code (default: en)" ) parser.add_argument("--output", "-o", help="Output file (output.jsonl)") - parser.add_argument( - "--enable-diarization", - "-d", - action="store_true", - help="Enable speaker diarization", - ) - parser.add_argument( - "--diarization-backend", - default="pyannote", - choices=["pyannote", "modal"], - help="Diarization backend to use (default: pyannote)", - ) args = parser.parse_args() - if "REDIS_HOST" not in os.environ: - os.environ["REDIS_HOST"] = "localhost" - - output_fd = None - if args.output: - output_fd = open(args.output, "w") - - async def event_callback(event: PipelineEvent): - processor = event.processor - data = event.data - - # Ignore internal processors - if processor in ( - "AudioDownscaleProcessor", - "AudioChunkerAutoProcessor", - "AudioMergeProcessor", - "AudioFileWriterProcessor", - "TopicCollectorProcessor", - "BroadcastProcessor", - ): - return - - # If diarization is enabled, skip the original topic events from the pipeline - # The diarization processor will emit the same topics but with speaker info - if processor == "TranscriptTopicDetectorProcessor" and args.enable_diarization: - return - - # Log all events - logger.info(f"Event: {processor} - {type(data).__name__}") - - # Write to output - if output_fd: - output_fd.write(event.model_dump_json()) - output_fd.write("\n") - output_fd.flush() - - if args.stream: - # Use original streaming pipeline - asyncio.run( - process_audio_file( - args.source, - event_callback, - only_transcript=args.only_transcript, - source_language=args.source_language, - target_language=args.target_language, - enable_diarization=args.enable_diarization, - diarization_backend=args.diarization_backend, - ) + asyncio.run( + process( + args.source, + args.source_language, + args.target_language, + args.pipeline, + args.output, ) - else: - # Use optimized file pipeline (default) - asyncio.run( - process_file_pipeline( - args.source, - event_callback, - source_language=args.source_language, - target_language=args.target_language, - enable_diarization=args.enable_diarization, - diarization_backend=args.diarization_backend, - ) - ) - - if output_fd: - output_fd.close() - logger.info(f"Output written to {args.output}") + ) diff --git a/server/reflector/tools/process_with_diarization.py b/server/reflector/tools/process_with_diarization.py deleted file mode 100644 index f1415e1a..00000000 --- a/server/reflector/tools/process_with_diarization.py +++ /dev/null @@ -1,318 +0,0 @@ -""" -@vibe-generated -Process audio file with diarization support -=========================================== - -Extended version of process.py that includes speaker diarization. -This tool processes audio files locally without requiring the full server infrastructure. -""" - -import asyncio -import tempfile -import uuid -from pathlib import Path -from typing import List - -import av - -from reflector.logger import logger -from reflector.processors import ( - AudioChunkerAutoProcessor, - AudioDownscaleProcessor, - AudioFileWriterProcessor, - AudioMergeProcessor, - AudioTranscriptAutoProcessor, - Pipeline, - PipelineEvent, - TranscriptFinalSummaryProcessor, - TranscriptFinalTitleProcessor, - TranscriptLinerProcessor, - TranscriptTopicDetectorProcessor, - TranscriptTranslatorAutoProcessor, -) -from reflector.processors.base import BroadcastProcessor, Processor -from reflector.processors.types import ( - AudioDiarizationInput, - TitleSummary, - TitleSummaryWithId, -) - - -class TopicCollectorProcessor(Processor): - """Collect topics for diarization""" - - INPUT_TYPE = TitleSummary - OUTPUT_TYPE = TitleSummary - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.topics: List[TitleSummaryWithId] = [] - self._topic_id = 0 - - async def _push(self, data: TitleSummary): - # Convert to TitleSummaryWithId and collect - self._topic_id += 1 - topic_with_id = TitleSummaryWithId( - id=str(self._topic_id), - title=data.title, - summary=data.summary, - timestamp=data.timestamp, - duration=data.duration, - transcript=data.transcript, - ) - self.topics.append(topic_with_id) - - # Pass through the original topic - await self.emit(data) - - def get_topics(self) -> List[TitleSummaryWithId]: - return self.topics - - -async def process_audio_file_with_diarization( - filename, - event_callback, - only_transcript=False, - source_language="en", - target_language="en", - enable_diarization=True, - diarization_backend="modal", -): - # Create temp file for audio if diarization is enabled - audio_temp_path = None - if enable_diarization: - audio_temp_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False) - audio_temp_path = audio_temp_file.name - audio_temp_file.close() - - # Create processor for collecting topics - topic_collector = TopicCollectorProcessor() - - # Build pipeline for audio processing - processors = [] - - # Add audio file writer at the beginning if diarization is enabled - if enable_diarization: - processors.append(AudioFileWriterProcessor(audio_temp_path)) - - # Add the rest of the processors - processors += [ - AudioDownscaleProcessor(), - AudioChunkerAutoProcessor(), - AudioMergeProcessor(), - AudioTranscriptAutoProcessor.as_threaded(), - ] - - processors += [ - TranscriptLinerProcessor(), - TranscriptTranslatorAutoProcessor.as_threaded(), - ] - - if not only_transcript: - processors += [ - TranscriptTopicDetectorProcessor.as_threaded(), - # Collect topics for diarization - topic_collector, - BroadcastProcessor( - processors=[ - TranscriptFinalTitleProcessor.as_threaded(), - TranscriptFinalSummaryProcessor.as_threaded(), - ], - ), - ] - - # Create main pipeline - pipeline = Pipeline(*processors) - pipeline.set_pref("audio:source_language", source_language) - pipeline.set_pref("audio:target_language", target_language) - pipeline.describe() - pipeline.on(event_callback) - - # Start processing audio - logger.info(f"Opening {filename}") - container = av.open(filename) - try: - logger.info("Start pushing audio into the pipeline") - for frame in container.decode(audio=0): - await pipeline.push(frame) - finally: - logger.info("Flushing the pipeline") - await pipeline.flush() - - # Run diarization if enabled and we have topics - if enable_diarization and not only_transcript and audio_temp_path: - topics = topic_collector.get_topics() - - if topics: - logger.info(f"Starting diarization with {len(topics)} topics") - - try: - from reflector.processors import AudioDiarizationAutoProcessor - - diarization_processor = AudioDiarizationAutoProcessor( - name=diarization_backend - ) - - diarization_processor.set_pipeline(pipeline) - - # For Modal backend, we need to upload the file to S3 first - if diarization_backend == "modal": - from datetime import datetime, timezone - - from reflector.storage import get_transcripts_storage - from reflector.utils.s3_temp_file import S3TemporaryFile - - storage = get_transcripts_storage() - - # Generate a unique filename in evaluation folder - timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S") - audio_filename = f"evaluation/diarization_temp/{timestamp}_{uuid.uuid4().hex}.wav" - - # Use context manager for automatic cleanup - async with S3TemporaryFile(storage, audio_filename) as s3_file: - # Read and upload the audio file - with open(audio_temp_path, "rb") as f: - audio_data = f.read() - - audio_url = await s3_file.upload(audio_data) - logger.info(f"Uploaded audio to S3: {audio_filename}") - - # Create diarization input with S3 URL - diarization_input = AudioDiarizationInput( - audio_url=audio_url, topics=topics - ) - - # Run diarization - await diarization_processor.push(diarization_input) - await diarization_processor.flush() - - logger.info("Diarization complete") - # File will be automatically cleaned up when exiting the context - else: - # For local backend, use local file path - audio_url = audio_temp_path - - # Create diarization input - diarization_input = AudioDiarizationInput( - audio_url=audio_url, topics=topics - ) - - # Run diarization - await diarization_processor.push(diarization_input) - await diarization_processor.flush() - - logger.info("Diarization complete") - - except ImportError as e: - logger.error(f"Failed to import diarization dependencies: {e}") - logger.error( - "Install with: uv pip install pyannote.audio torch torchaudio" - ) - logger.error( - "And set HF_TOKEN environment variable for pyannote models" - ) - raise SystemExit(1) - except Exception as e: - logger.error(f"Diarization failed: {e}") - raise SystemExit(1) - else: - logger.warning("Skipping diarization: no topics available") - - # Clean up temp file - if audio_temp_path: - try: - Path(audio_temp_path).unlink() - except Exception as e: - logger.warning(f"Failed to clean up temp file {audio_temp_path}: {e}") - - logger.info("All done!") - - -if __name__ == "__main__": - import argparse - import os - - parser = argparse.ArgumentParser( - description="Process audio files with optional speaker diarization" - ) - parser.add_argument("source", help="Source file (mp3, wav, mp4...)") - parser.add_argument( - "--only-transcript", - "-t", - action="store_true", - help="Only generate transcript without topics/summaries", - ) - parser.add_argument( - "--source-language", default="en", help="Source language code (default: en)" - ) - parser.add_argument( - "--target-language", default="en", help="Target language code (default: en)" - ) - parser.add_argument("--output", "-o", help="Output file (output.jsonl)") - parser.add_argument( - "--enable-diarization", - "-d", - action="store_true", - help="Enable speaker diarization", - ) - parser.add_argument( - "--diarization-backend", - default="modal", - choices=["modal"], - help="Diarization backend to use (default: modal)", - ) - args = parser.parse_args() - - # Set REDIS_HOST to localhost if not provided - if "REDIS_HOST" not in os.environ: - os.environ["REDIS_HOST"] = "localhost" - logger.info("REDIS_HOST not set, defaulting to localhost") - - output_fd = None - if args.output: - output_fd = open(args.output, "w") - - async def event_callback(event: PipelineEvent): - processor = event.processor - data = event.data - - # Ignore internal processors - if processor in ( - "AudioDownscaleProcessor", - "AudioChunkerAutoProcessor", - "AudioMergeProcessor", - "AudioFileWriterProcessor", - "TopicCollectorProcessor", - "BroadcastProcessor", - ): - return - - # If diarization is enabled, skip the original topic events from the pipeline - # The diarization processor will emit the same topics but with speaker info - if processor == "TranscriptTopicDetectorProcessor" and args.enable_diarization: - return - - # Log all events - logger.info(f"Event: {processor} - {type(data).__name__}") - - # Write to output - if output_fd: - output_fd.write(event.model_dump_json()) - output_fd.write("\n") - output_fd.flush() - - asyncio.run( - process_audio_file_with_diarization( - args.source, - event_callback, - only_transcript=args.only_transcript, - source_language=args.source_language, - target_language=args.target_language, - enable_diarization=args.enable_diarization, - diarization_backend=args.diarization_backend, - ) - ) - - if output_fd: - output_fd.close() - logger.info(f"Output written to {args.output}") diff --git a/server/reflector/tools/test_diarization.py b/server/reflector/tools/test_diarization.py deleted file mode 100644 index bd071d96..00000000 --- a/server/reflector/tools/test_diarization.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python3 -""" -@vibe-generated -Test script for the diarization CLI tool -========================================= - -This script helps test the diarization functionality with sample audio files. -""" - -import asyncio -import sys -from pathlib import Path - -from reflector.logger import logger - - -async def test_diarization(audio_file: str): - """Test the diarization functionality""" - - # Import the processing function - from process_with_diarization import process_audio_file_with_diarization - - # Collect events - events = [] - - async def event_callback(event): - events.append({"processor": event.processor, "data": event.data}) - logger.info(f"Event from {event.processor}") - - # Process the audio file - logger.info(f"Processing audio file: {audio_file}") - - try: - await process_audio_file_with_diarization( - audio_file, - event_callback, - only_transcript=False, - source_language="en", - target_language="en", - enable_diarization=True, - diarization_backend="modal", - ) - - # Analyze results - logger.info(f"Processing complete. Received {len(events)} events") - - # Look for diarization results - diarized_topics = [] - for event in events: - if "TitleSummary" in event["processor"]: - # Check if words have speaker information - if hasattr(event["data"], "transcript") and event["data"].transcript: - words = event["data"].transcript.words - if words and hasattr(words[0], "speaker"): - speakers = set( - w.speaker for w in words if hasattr(w, "speaker") - ) - logger.info( - f"Found {len(speakers)} speakers in topic: {event['data'].title}" - ) - diarized_topics.append(event["data"]) - - if diarized_topics: - logger.info(f"Successfully diarized {len(diarized_topics)} topics") - - # Print sample output - sample_topic = diarized_topics[0] - logger.info("Sample diarized output:") - for i, word in enumerate(sample_topic.transcript.words[:10]): - logger.info(f" Word {i}: '{word.text}' - Speaker {word.speaker}") - else: - logger.warning("No diarization results found in output") - - return events - - except Exception as e: - logger.error(f"Error during processing: {e}") - raise - - -def main(): - if len(sys.argv) < 2: - print("Usage: python test_diarization.py ") - sys.exit(1) - - audio_file = sys.argv[1] - if not Path(audio_file).exists(): - print(f"Error: Audio file '{audio_file}' not found") - sys.exit(1) - - # Run the test - asyncio.run(test_diarization(audio_file)) - - -if __name__ == "__main__": - main() diff --git a/server/tests/test_processors_pipeline.py b/server/tests/test_processors_pipeline.py deleted file mode 100644 index 7ae22a6c..00000000 --- a/server/tests/test_processors_pipeline.py +++ /dev/null @@ -1,61 +0,0 @@ -import pytest - - -@pytest.mark.asyncio -@pytest.mark.parametrize("enable_diarization", [False, True]) -async def test_basic_process( - dummy_transcript, - dummy_llm, - dummy_processors, - enable_diarization, - dummy_diarization, -): - # goal is to start the server, and send rtc audio to it - # validate the events received - from pathlib import Path - - from reflector.settings import settings - from reflector.tools.process import process_audio_file - - # LLM_BACKEND no longer exists in settings - # settings.LLM_BACKEND = "test" - settings.TRANSCRIPT_BACKEND = "whisper" - - # event callback - marks = {} - - async def event_callback(event): - if event.processor not in marks: - marks[event.processor] = 0 - marks[event.processor] += 1 - - # invoke the process and capture events - path = Path(__file__).parent / "records" / "test_mathieu_hello.wav" - - if enable_diarization: - # Test with diarization - may fail if pyannote.audio is not installed - try: - await process_audio_file( - path.as_posix(), event_callback, enable_diarization=True - ) - except SystemExit: - pytest.skip("pyannote.audio not installed - skipping diarization test") - else: - # Test without diarization - should always work - await process_audio_file( - path.as_posix(), event_callback, enable_diarization=False - ) - - print(f"Diarization: {enable_diarization}, Marks: {marks}") - - # validate the events - # Each processor should be called for each audio segment processed - # The final processors (Topic, Title, Summary) should be called once at the end - assert marks["TranscriptLinerProcessor"] > 0 - assert marks["TranscriptTranslatorPassthroughProcessor"] > 0 - assert marks["TranscriptTopicDetectorProcessor"] == 1 - assert marks["TranscriptFinalSummaryProcessor"] == 1 - assert marks["TranscriptFinalTitleProcessor"] == 1 - - if enable_diarization: - assert marks["TestAudioDiarizationProcessor"] == 1 From f5331a210732ef9e8e449e7435f0f75407649390 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Thu, 28 Aug 2025 12:22:07 -0600 Subject: [PATCH 03/77] style: more type annotations to parakeet transcriber (#581) * feat: add comprehensive type annotations to Parakeet transcriber - Add TypedDict for WordTiming with word, start, end fields - Add NamedTuple for TimeSegment, AudioSegment, and TranscriptResult - Add type hints to all generator functions (vad_segment_generator, batch_speech_segments, etc.) - Add enforce_word_timing_constraints function to prevent word timing overlaps - Refactor batch_segment_to_audio_segment to reuse pad_audio function * doc: add note about space --- .../reflector_transcriber_parakeet.py | 169 ++++++++++++------ 1 file changed, 114 insertions(+), 55 deletions(-) diff --git a/server/gpu/modal_deployments/reflector_transcriber_parakeet.py b/server/gpu/modal_deployments/reflector_transcriber_parakeet.py index 3b6f6ad0..0827f0cc 100644 --- a/server/gpu/modal_deployments/reflector_transcriber_parakeet.py +++ b/server/gpu/modal_deployments/reflector_transcriber_parakeet.py @@ -3,7 +3,7 @@ import os import sys import threading import uuid -from typing import Generator, Mapping, NewType +from typing import Generator, Mapping, NamedTuple, NewType, TypedDict from urllib.parse import urlparse import modal @@ -22,6 +22,37 @@ VAD_CONFIG = { ParakeetUniqFilename = NewType("ParakeetUniqFilename", str) AudioFileExtension = NewType("AudioFileExtension", str) + +class TimeSegment(NamedTuple): + """Represents a time segment with start and end times.""" + + start: float + end: float + + +class AudioSegment(NamedTuple): + """Represents an audio segment with timing and audio data.""" + + start: float + end: float + audio: any + + +class TranscriptResult(NamedTuple): + """Represents a transcription result with text and word timings.""" + + text: str + words: list["WordTiming"] + + +class WordTiming(TypedDict): + """Represents a word with its timing information.""" + + word: str + start: float + end: float + + app = modal.App("reflector-transcriber-parakeet") # Volume for caching model weights @@ -167,12 +198,14 @@ class TranscriberParakeetLive: (output,) = self.model.transcribe([padded_audio], timestamps=True) text = output.text.strip() - words = [ - { - "word": word_info["word"] + " ", - "start": round(word_info["start"], 2), - "end": round(word_info["end"], 2), - } + words: list[WordTiming] = [ + WordTiming( + # XXX the space added here is to match the output of whisper + # whisper add space to each words, while parakeet don't + word=word_info["word"] + " ", + start=round(word_info["start"], 2), + end=round(word_info["end"], 2), + ) for word_info in output.timestamp["word"] ] @@ -208,12 +241,12 @@ class TranscriberParakeetLive: for i, (filename, output) in enumerate(zip(filenames, outputs)): text = output.text.strip() - words = [ - { - "word": word_info["word"] + " ", - "start": round(word_info["start"], 2), - "end": round(word_info["end"], 2), - } + words: list[WordTiming] = [ + WordTiming( + word=word_info["word"] + " ", + start=round(word_info["start"], 2), + end=round(word_info["end"], 2), + ) for word_info in output.timestamp["word"] ] @@ -270,7 +303,7 @@ class TranscriberParakeetFile: def vad_segment_generator( audio_array, - ) -> Generator[tuple[float, float], None, None]: + ) -> Generator[TimeSegment, None, None]: """Generate speech segments using VAD with start/end sample indices""" vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE) window_size = VAD_CONFIG["window_size"] @@ -296,14 +329,14 @@ class TranscriberParakeetFile: start_time = start / float(SAMPLERATE) end_time = end / float(SAMPLERATE) - yield (start_time, end_time) + yield TimeSegment(start_time, end_time) start = None vad_iterator.reset_states() def batch_speech_segments( - segments: Generator[tuple[float, float], None, None], max_duration: int - ) -> Generator[tuple[float, float], None, None]: + segments: Generator[TimeSegment, None, None], max_duration: int + ) -> Generator[TimeSegment, None, None]: """ Input segments: [0-2] [3-5] [6-8] [10-11] [12-15] [17-19] [20-22] @@ -319,7 +352,8 @@ class TranscriberParakeetFile: batch_start_time = None batch_end_time = None - for start_time, end_time in segments: + for segment in segments: + start_time, end_time = segment.start, segment.end if batch_start_time is None or batch_end_time is None: batch_start_time = start_time batch_end_time = end_time @@ -331,59 +365,85 @@ class TranscriberParakeetFile: batch_end_time = end_time continue - yield (batch_start_time, batch_end_time) + yield TimeSegment(batch_start_time, batch_end_time) batch_start_time = start_time batch_end_time = end_time if batch_start_time is None or batch_end_time is None: return - yield (batch_start_time, batch_end_time) + yield TimeSegment(batch_start_time, batch_end_time) - def batch_segment_to_audio_segment(segments, audio_array): - for start_time, end_time in segments: + def batch_segment_to_audio_segment( + segments: Generator[TimeSegment, None, None], + audio_array, + ) -> Generator[AudioSegment, None, None]: + """Extract audio segments and apply padding for Parakeet compatibility. + + Uses pad_audio to ensure segments are at least 0.5s long, preventing + Parakeet crashes. This padding may cause slight timing overlaps between + segments, which are corrected by enforce_word_timing_constraints. + """ + for segment in segments: + start_time, end_time = segment.start, segment.end start_sample = int(start_time * SAMPLERATE) end_sample = int(end_time * SAMPLERATE) audio_segment = audio_array[start_sample:end_sample] - if end_time - start_time < VAD_CONFIG["silence_padding"]: - silence_samples = int( - (VAD_CONFIG["silence_padding"] - (end_time - start_time)) - * SAMPLERATE - ) - padding = np.zeros(silence_samples, dtype=np.float32) - audio_segment = np.concatenate([audio_segment, padding]) + padded_segment = pad_audio(audio_segment, SAMPLERATE) - yield start_time, end_time, audio_segment + yield AudioSegment(start_time, end_time, padded_segment) - def transcribe_batch(model, audio_segments): + def transcribe_batch(model, audio_segments: list) -> list: with NoStdStreams(): outputs = model.transcribe(audio_segments, timestamps=True) return outputs + def enforce_word_timing_constraints( + words: list[WordTiming], + ) -> list[WordTiming]: + """Enforce that word end times don't exceed the start time of the next word. + + Due to silence padding added in batch_segment_to_audio_segment for better + transcription accuracy, word timings from different segments may overlap. + This function ensures there are no overlaps by adjusting end times. + """ + if len(words) <= 1: + return words + + enforced_words = [] + for i, word in enumerate(words): + enforced_word = word.copy() + + if i < len(words) - 1: + next_start = words[i + 1]["start"] + if enforced_word["end"] > next_start: + enforced_word["end"] = next_start + + enforced_words.append(enforced_word) + + return enforced_words + def emit_results( - results, - segments_info, - ): + results: list, + segments_info: list[AudioSegment], + ) -> Generator[TranscriptResult, None, None]: """Yield transcribed text and word timings from model output, adjusting timestamps to absolute positions.""" - for i, (output, (start_time, end_time, _)) in enumerate( - zip(results, segments_info) - ): + for i, (output, segment) in enumerate(zip(results, segments_info)): + start_time, end_time = segment.start, segment.end text = output.text.strip() - words = [ - { - "word": word_info["word"] + " ", - "start": round( + words: list[WordTiming] = [ + WordTiming( + word=word_info["word"] + " ", + start=round( word_info["start"] + start_time + timestamp_offset, 2 ), - "end": round( - word_info["end"] + start_time + timestamp_offset, 2 - ), - } + end=round(word_info["end"] + start_time + timestamp_offset, 2), + ) for word_info in output.timestamp["word"] ] - yield text, words + yield TranscriptResult(text, words) upload_volume.reload() @@ -393,10 +453,9 @@ class TranscriberParakeetFile: audio_array = load_and_convert_audio(file_path) total_duration = len(audio_array) / float(SAMPLERATE) - processed_duration = 0.0 - all_text_parts = [] - all_words = [] + all_text_parts: list[str] = [] + all_words: list[WordTiming] = [] raw_segments = vad_segment_generator(audio_array) speech_segments = batch_speech_segments( @@ -406,19 +465,19 @@ class TranscriberParakeetFile: audio_segments = batch_segment_to_audio_segment(speech_segments, audio_array) for batch in audio_segments: - _, _, audio_segment = batch + audio_segment = batch.audio results = transcribe_batch(self.model, [audio_segment]) - for text, words in emit_results( + for result in emit_results( results, [batch], ): - if not text: + if not result.text: continue - all_text_parts.append(text) - all_words.extend(words) + all_text_parts.append(result.text) + all_words.extend(result.words) - processed_duration += len(audio_segment) / float(SAMPLERATE) + all_words = enforce_word_timing_constraints(all_words) combined_text = " ".join(all_text_parts) return {"text": combined_text, "words": all_words} From 55cc8637c6d3668f2a9c460b23f4fea295ea0904 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Thu, 28 Aug 2025 16:43:17 -0600 Subject: [PATCH 04/77] ci: restrict workflow execution to main branch and add concurrency (#586) * ci: try adding concurrency * ci: restrict push on main branch * ci: fix concurrency key * ci: fix build concurrency * refactor: apply suggestion from @pr-agent-monadical[bot] Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com> --------- Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com> --- .github/workflows/db_migrations.yml | 5 +++++ .github/workflows/test_server.yml | 11 +++++++++++ 2 files changed, 16 insertions(+) diff --git a/.github/workflows/db_migrations.yml b/.github/workflows/db_migrations.yml index ff8ad59a..2b80c3a1 100644 --- a/.github/workflows/db_migrations.yml +++ b/.github/workflows/db_migrations.yml @@ -2,6 +2,8 @@ name: Test Database Migrations on: push: + branches: + - main paths: - "server/migrations/**" - "server/reflector/db/**" @@ -17,6 +19,9 @@ on: jobs: test-migrations: runs-on: ubuntu-latest + concurrency: + group: db-ubuntu-latest-${{ github.ref }} + cancel-in-progress: true services: postgres: image: postgres:17 diff --git a/.github/workflows/test_server.yml b/.github/workflows/test_server.yml index 262e0e05..f03d020e 100644 --- a/.github/workflows/test_server.yml +++ b/.github/workflows/test_server.yml @@ -5,12 +5,17 @@ on: paths: - "server/**" push: + branches: + - main paths: - "server/**" jobs: pytest: runs-on: ubuntu-latest + concurrency: + group: pytest-${{ github.ref }} + cancel-in-progress: true services: redis: image: redis:6 @@ -30,6 +35,9 @@ jobs: docker-amd64: runs-on: linux-amd64 + concurrency: + group: docker-amd64-${{ github.ref }} + cancel-in-progress: true steps: - uses: actions/checkout@v4 - name: Set up Docker Buildx @@ -45,6 +53,9 @@ jobs: docker-arm64: runs-on: linux-arm64 + concurrency: + group: docker-arm64-${{ github.ref }} + cancel-in-progress: true steps: - uses: actions/checkout@v4 - name: Set up Docker Buildx From 9dfd76996f851cc52be54feea078adbc0816dc57 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Fri, 29 Aug 2025 00:58:14 -0600 Subject: [PATCH 05/77] fix: file pipeline status reporting and websocket updates (#589) * feat: use file pipeline for upload and reprocess action * fix: make file pipeline correctly report status events * fix: duplication of transcripts_controller * fix: tests * test: fix file upload test * test: fix reprocess * fix: also patch from main_file_pipeline (how patch is done is dependent of file import unfortunately) --- server/reflector/db/transcripts.py | 33 ++++++++- .../reflector/pipelines/main_file_pipeline.py | 51 +++++++++++--- .../reflector/pipelines/main_live_pipeline.py | 32 ++++----- server/reflector/views/transcripts_process.py | 4 +- server/reflector/views/transcripts_upload.py | 4 +- server/tests/conftest.py | 68 ++++++++++++++++++- .../tests/test_transcripts_audio_download.py | 2 +- server/tests/test_transcripts_process.py | 15 ++-- server/tests/test_transcripts_upload.py | 11 +-- 9 files changed, 170 insertions(+), 50 deletions(-) diff --git a/server/reflector/db/transcripts.py b/server/reflector/db/transcripts.py index 9dbcba9f..47148995 100644 --- a/server/reflector/db/transcripts.py +++ b/server/reflector/db/transcripts.py @@ -122,6 +122,15 @@ def generate_transcript_name() -> str: return f"Transcript {now.strftime('%Y-%m-%d %H:%M:%S')}" +TranscriptStatus = Literal[ + "idle", "uploaded", "recording", "processing", "error", "ended" +] + + +class StrValue(BaseModel): + value: str + + class AudioWaveform(BaseModel): data: list[float] @@ -185,7 +194,7 @@ class Transcript(BaseModel): id: str = Field(default_factory=generate_uuid4) user_id: str | None = None name: str = Field(default_factory=generate_transcript_name) - status: str = "idle" + status: TranscriptStatus = "idle" duration: float = 0 created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) title: str | None = None @@ -732,5 +741,27 @@ class TranscriptController: transcript.delete_participant(participant_id) await self.update(transcript, {"participants": transcript.participants_dump()}) + async def set_status( + self, transcript_id: str, status: TranscriptStatus + ) -> TranscriptEvent | None: + """ + Update the status of a transcript + + Will add an event STATUS + update the status field of transcript + """ + async with self.transaction(): + transcript = await self.get_by_id(transcript_id) + if not transcript: + raise Exception(f"Transcript {transcript_id} not found") + if transcript.status == status: + return + resp = await self.append_event( + transcript=transcript, + event="STATUS", + data=StrValue(value=status), + ) + await self.update(transcript, {"status": status}) + return resp + transcripts_controller = TranscriptController() diff --git a/server/reflector/pipelines/main_file_pipeline.py b/server/reflector/pipelines/main_file_pipeline.py index f2c8fb85..f11cddca 100644 --- a/server/reflector/pipelines/main_file_pipeline.py +++ b/server/reflector/pipelines/main_file_pipeline.py @@ -15,10 +15,15 @@ from celery import shared_task from reflector.db.transcripts import ( Transcript, + TranscriptStatus, transcripts_controller, ) from reflector.logger import logger -from reflector.pipelines.main_live_pipeline import PipelineMainBase, asynctask +from reflector.pipelines.main_live_pipeline import ( + PipelineMainBase, + asynctask, + broadcast_to_sockets, +) from reflector.processors import ( AudioFileWriterProcessor, TranscriptFinalSummaryProcessor, @@ -83,12 +88,27 @@ class PipelineMainFile(PipelineMainBase): exc_info=result, ) + @broadcast_to_sockets + async def set_status(self, transcript_id: str, status: TranscriptStatus): + async with self.lock_transaction(): + return await transcripts_controller.set_status(transcript_id, status) + async def process(self, file_path: Path): """Main entry point for file processing""" self.logger.info(f"Starting file pipeline for {file_path}") transcript = await self.get_transcript() + # Clear transcript as we're going to regenerate everything + async with self.transaction(): + await transcripts_controller.update( + transcript, + { + "events": [], + "topics": [], + }, + ) + # Extract audio and write to transcript location audio_path = await self.extract_and_write_audio(file_path, transcript) @@ -105,6 +125,8 @@ class PipelineMainFile(PipelineMainBase): self.logger.info("File pipeline complete") + await transcripts_controller.set_status(transcript.id, "ended") + async def extract_and_write_audio( self, file_path: Path, transcript: Transcript ) -> Path: @@ -362,14 +384,21 @@ async def task_pipeline_file_process(*, transcript_id: str): if not transcript: raise Exception(f"Transcript {transcript_id} not found") - # Find the file to process - audio_file = next(transcript.data_path.glob("upload.*"), None) - if not audio_file: - audio_file = next(transcript.data_path.glob("audio.*"), None) - - if not audio_file: - raise Exception("No audio file found to process") - - # Run file pipeline pipeline = PipelineMainFile(transcript_id=transcript_id) - await pipeline.process(audio_file) + + try: + await pipeline.set_status(transcript_id, "processing") + + # Find the file to process + audio_file = next(transcript.data_path.glob("upload.*"), None) + if not audio_file: + audio_file = next(transcript.data_path.glob("audio.*"), None) + + if not audio_file: + raise Exception("No audio file found to process") + + await pipeline.process(audio_file) + + except Exception: + await pipeline.set_status(transcript_id, "error") + raise diff --git a/server/reflector/pipelines/main_live_pipeline.py b/server/reflector/pipelines/main_live_pipeline.py index 812847db..30c8777b 100644 --- a/server/reflector/pipelines/main_live_pipeline.py +++ b/server/reflector/pipelines/main_live_pipeline.py @@ -32,6 +32,7 @@ from reflector.db.transcripts import ( TranscriptFinalLongSummary, TranscriptFinalShortSummary, TranscriptFinalTitle, + TranscriptStatus, TranscriptText, TranscriptTopic, TranscriptWaveform, @@ -188,8 +189,15 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage] ] @asynccontextmanager - async def transaction(self): + async def lock_transaction(self): + # This lock is to prevent multiple processor starting adding + # into event array at the same time async with self._lock: + yield + + @asynccontextmanager + async def transaction(self): + async with self.lock_transaction(): async with transcripts_controller.transaction(): yield @@ -198,14 +206,14 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage] # if it's the first part, update the status of the transcript # but do not set the ended status yet. if isinstance(self, PipelineMainLive): - status_mapping = { + status_mapping: dict[str, TranscriptStatus] = { "started": "recording", "push": "recording", "flush": "processing", "error": "error", } elif isinstance(self, PipelineMainFinalSummaries): - status_mapping = { + status_mapping: dict[str, TranscriptStatus] = { "push": "processing", "flush": "processing", "error": "error", @@ -221,22 +229,8 @@ class PipelineMainBase(PipelineRunner[PipelineMessage], Generic[PipelineMessage] return # when the status of the pipeline changes, update the transcript - async with self.transaction(): - transcript = await self.get_transcript() - if status == transcript.status: - return - resp = await transcripts_controller.append_event( - transcript=transcript, - event="STATUS", - data=StrValue(value=status), - ) - await transcripts_controller.update( - transcript, - { - "status": status, - }, - ) - return resp + async with self._lock: + return await transcripts_controller.set_status(self.transcript_id, status) @broadcast_to_sockets async def on_transcript(self, data): diff --git a/server/reflector/views/transcripts_process.py b/server/reflector/views/transcripts_process.py index 8f6d3ab6..0200e7f8 100644 --- a/server/reflector/views/transcripts_process.py +++ b/server/reflector/views/transcripts_process.py @@ -6,7 +6,7 @@ from pydantic import BaseModel import reflector.auth as auth from reflector.db.transcripts import transcripts_controller -from reflector.pipelines.main_live_pipeline import task_pipeline_process +from reflector.pipelines.main_file_pipeline import task_pipeline_file_process router = APIRouter() @@ -40,7 +40,7 @@ async def transcript_process( return ProcessStatus(status="already running") # schedule a background task process the file - task_pipeline_process.delay(transcript_id=transcript_id) + task_pipeline_file_process.delay(transcript_id=transcript_id) return ProcessStatus(status="ok") diff --git a/server/reflector/views/transcripts_upload.py b/server/reflector/views/transcripts_upload.py index 18e75dac..8efbc274 100644 --- a/server/reflector/views/transcripts_upload.py +++ b/server/reflector/views/transcripts_upload.py @@ -6,7 +6,7 @@ from pydantic import BaseModel import reflector.auth as auth from reflector.db.transcripts import transcripts_controller -from reflector.pipelines.main_live_pipeline import task_pipeline_process +from reflector.pipelines.main_file_pipeline import task_pipeline_file_process router = APIRouter() @@ -92,6 +92,6 @@ async def transcript_record_upload( await transcripts_controller.update(transcript, {"status": "uploaded"}) # launch a background task to process the file - task_pipeline_process.delay(transcript_id=transcript_id) + task_pipeline_file_process.delay(transcript_id=transcript_id) return UploadStatus(status="ok") diff --git a/server/tests/conftest.py b/server/tests/conftest.py index d739751d..22fe4193 100644 --- a/server/tests/conftest.py +++ b/server/tests/conftest.py @@ -178,6 +178,63 @@ async def dummy_diarization(): yield +@pytest.fixture +async def dummy_file_transcript(): + from reflector.processors.file_transcript import FileTranscriptProcessor + from reflector.processors.types import Transcript, Word + + class TestFileTranscriptProcessor(FileTranscriptProcessor): + async def _transcript(self, data): + return Transcript( + text="Hello world. How are you today?", + words=[ + Word(start=0.0, end=0.5, text="Hello", speaker=0), + Word(start=0.5, end=0.6, text=" ", speaker=0), + Word(start=0.6, end=1.0, text="world", speaker=0), + Word(start=1.0, end=1.1, text=".", speaker=0), + Word(start=1.1, end=1.2, text=" ", speaker=0), + Word(start=1.2, end=1.5, text="How", speaker=0), + Word(start=1.5, end=1.6, text=" ", speaker=0), + Word(start=1.6, end=1.8, text="are", speaker=0), + Word(start=1.8, end=1.9, text=" ", speaker=0), + Word(start=1.9, end=2.1, text="you", speaker=0), + Word(start=2.1, end=2.2, text=" ", speaker=0), + Word(start=2.2, end=2.5, text="today", speaker=0), + Word(start=2.5, end=2.6, text="?", speaker=0), + ], + ) + + with patch( + "reflector.processors.file_transcript_auto.FileTranscriptAutoProcessor.__new__" + ) as mock_auto: + mock_auto.return_value = TestFileTranscriptProcessor() + yield + + +@pytest.fixture +async def dummy_file_diarization(): + from reflector.processors.file_diarization import ( + FileDiarizationOutput, + FileDiarizationProcessor, + ) + from reflector.processors.types import DiarizationSegment + + class TestFileDiarizationProcessor(FileDiarizationProcessor): + async def _diarize(self, data): + return FileDiarizationOutput( + diarization=[ + DiarizationSegment(start=0.0, end=1.1, speaker=0), + DiarizationSegment(start=1.2, end=2.6, speaker=1), + ] + ) + + with patch( + "reflector.processors.file_diarization_auto.FileDiarizationAutoProcessor.__new__" + ) as mock_auto: + mock_auto.return_value = TestFileDiarizationProcessor() + yield + + @pytest.fixture async def dummy_transcript_translator(): from reflector.processors.transcript_translator import TranscriptTranslatorProcessor @@ -238,9 +295,13 @@ async def dummy_storage(): with ( patch("reflector.storage.base.Storage.get_instance") as mock_storage, patch("reflector.storage.get_transcripts_storage") as mock_get_transcripts, + patch( + "reflector.pipelines.main_file_pipeline.get_transcripts_storage" + ) as mock_get_transcripts2, ): mock_storage.return_value = dummy mock_get_transcripts.return_value = dummy + mock_get_transcripts2.return_value = dummy yield @@ -260,7 +321,10 @@ def celery_config(): @pytest.fixture(scope="session") def celery_includes(): - return ["reflector.pipelines.main_live_pipeline"] + return [ + "reflector.pipelines.main_live_pipeline", + "reflector.pipelines.main_file_pipeline", + ] @pytest.fixture @@ -302,7 +366,7 @@ async def fake_transcript_with_topics(tmpdir, client): transcript = await transcripts_controller.get_by_id(tid) assert transcript is not None - await transcripts_controller.update(transcript, {"status": "finished"}) + await transcripts_controller.update(transcript, {"status": "ended"}) # manually copy a file at the expected location audio_filename = transcript.audio_mp3_filename diff --git a/server/tests/test_transcripts_audio_download.py b/server/tests/test_transcripts_audio_download.py index 81b74def..e40d0ade 100644 --- a/server/tests/test_transcripts_audio_download.py +++ b/server/tests/test_transcripts_audio_download.py @@ -19,7 +19,7 @@ async def fake_transcript(tmpdir, client): transcript = await transcripts_controller.get_by_id(tid) assert transcript is not None - await transcripts_controller.update(transcript, {"status": "finished"}) + await transcripts_controller.update(transcript, {"status": "ended"}) # manually copy a file at the expected location audio_filename = transcript.audio_mp3_filename diff --git a/server/tests/test_transcripts_process.py b/server/tests/test_transcripts_process.py index 3551d718..5f45cf4b 100644 --- a/server/tests/test_transcripts_process.py +++ b/server/tests/test_transcripts_process.py @@ -29,10 +29,10 @@ async def client(app_lifespan): @pytest.mark.asyncio async def test_transcript_process( tmpdir, - whisper_transcript, dummy_llm, dummy_processors, - dummy_diarization, + dummy_file_transcript, + dummy_file_diarization, dummy_storage, client, ): @@ -56,8 +56,8 @@ async def test_transcript_process( assert response.status_code == 200 assert response.json()["status"] == "ok" - # wait for processing to finish (max 10 minutes) - timeout_seconds = 600 # 10 minutes + # wait for processing to finish (max 1 minute) + timeout_seconds = 60 start_time = time.monotonic() while (time.monotonic() - start_time) < timeout_seconds: # fetch the transcript and check if it is ended @@ -75,9 +75,10 @@ async def test_transcript_process( ) assert response.status_code == 200 assert response.json()["status"] == "ok" + await asyncio.sleep(2) - # wait for processing to finish (max 10 minutes) - timeout_seconds = 600 # 10 minutes + # wait for processing to finish (max 1 minute) + timeout_seconds = 60 start_time = time.monotonic() while (time.monotonic() - start_time) < timeout_seconds: # fetch the transcript and check if it is ended @@ -99,4 +100,4 @@ async def test_transcript_process( response = await client.get(f"/transcripts/{tid}/topics") assert response.status_code == 200 assert len(response.json()) == 1 - assert "want to share" in response.json()[0]["transcript"] + assert "Hello world. How are you today?" in response.json()[0]["transcript"] diff --git a/server/tests/test_transcripts_upload.py b/server/tests/test_transcripts_upload.py index ee08b1be..e9a90c7a 100644 --- a/server/tests/test_transcripts_upload.py +++ b/server/tests/test_transcripts_upload.py @@ -12,7 +12,8 @@ async def test_transcript_upload_file( tmpdir, dummy_llm, dummy_processors, - dummy_diarization, + dummy_file_transcript, + dummy_file_diarization, dummy_storage, client, ): @@ -36,8 +37,8 @@ async def test_transcript_upload_file( assert response.status_code == 200 assert response.json()["status"] == "ok" - # wait the processing to finish (max 10 minutes) - timeout_seconds = 600 # 10 minutes + # wait the processing to finish (max 1 minute) + timeout_seconds = 60 start_time = time.monotonic() while (time.monotonic() - start_time) < timeout_seconds: # fetch the transcript and check if it is ended @@ -47,7 +48,7 @@ async def test_transcript_upload_file( break await asyncio.sleep(1) else: - pytest.fail(f"Processing timed out after {timeout_seconds} seconds") + return pytest.fail(f"Processing timed out after {timeout_seconds} seconds") # check the transcript is ended transcript = resp.json() @@ -59,4 +60,4 @@ async def test_transcript_upload_file( response = await client.get(f"/transcripts/{tid}/topics") assert response.status_code == 200 assert len(response.json()) == 1 - assert "want to share" in response.json()[0]["transcript"] + assert "Hello world. How are you today?" in response.json()[0]["transcript"] From 6f0c7c1a5e751713366886c8e764c2009e12ba72 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Fri, 29 Aug 2025 08:47:14 -0600 Subject: [PATCH 06/77] feat(cleanup): add automatic data retention for public instances (#574) * feat(cleanup): add automatic data retention for public instances - Add Celery task to clean up anonymous data after configurable retention period - Delete transcripts, meetings, and orphaned recordings older than retention days - Only runs when PUBLIC_MODE is enabled to prevent accidental data loss - Properly removes all associated files (local and S3 storage) - Add manual cleanup tool for testing and intervention - Configure retention via PUBLIC_DATA_RETENTION_DAYS setting (default: 7 days) Fixes #571 * fix: apply pre-commit formatting fixes * fix: properly delete recording files from storage during cleanup - Add storage deletion for orphaned recordings in both cleanup task and manual tool - Delete from storage before removing database records - Log warnings if storage deletion fails but continue with database cleanup * Apply suggestion from @pr-agent-monadical[bot] Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com> * Apply suggestion from @pr-agent-monadical[bot] Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com> * refactor: cleanup_old_data for better logging * fix: linting * test: fix meeting cleanup test to not require room controller - Simplify test by directly inserting meetings into database - Remove dependency on non-existent rooms_controller.create method - Tests now pass successfully * fix: linting * refactor: simplify cleanup tool to use worker implementation - Remove duplicate cleanup logic from manual tool - Use the same _cleanup_old_public_data function from worker - Remove dry-run feature as requested - Prevent code duplication and ensure consistency - Update documentation to reflect changes * refactor: split cleanup worker into smaller functions - Move all imports to the top of the file - Extract cleanup logic into separate functions: - cleanup_old_transcripts() - cleanup_old_meetings() - cleanup_orphaned_recordings() - log_cleanup_results() - Make code more maintainable and testable - Add days parameter support to Celery task - Update manual tool to work with refactored code * feat: add TypedDict typing for cleanup stats - Add CleanupStats TypedDict for better type safety - Update all function signatures to use proper typing - Add return type annotations to _cleanup_old_public_data - Improves code maintainability and IDE support * feat: add CASCADE DELETE to meeting_consent foreign key - Add ondelete="CASCADE" to meeting_consent.meeting_id foreign key - Generate and apply migration to update existing constraint - Remove manual consent deletion from cleanup code - Add unit test to verify CASCADE DELETE behavior * style: linting * fix: alembic migration branchpoint * fix: correct downgrade constraint name in CASCADE DELETE migration * fix: regenerate CASCADE DELETE migration with proper constraint names - Delete problematic migration and regenerate with correct names - Use explicit constraint name in both upgrade and downgrade - Ensure migration works bidirectionally - All tests passing including CASCADE DELETE test * style: linting * refactor: simplify cleanup to use transcripts as entry point - Remove orphaned_recordings cleanup (not part of this PR scope) - Remove separate old_meetings cleanup - Transcripts are now the main entry point for cleanup - Associated meetings and recordings are deleted with their transcript - Use single database connection for all operations - Update tests to reflect new approach * refactor: cleanup and rename functions for clarity - Rename _cleanup_old_public_data to cleanup_old_public_data (make public) - Rename celery task to cleanup_old_public_data_task for clarity - Update docstrings and improve code organization - Remove unnecessary comments and simplify deletion logic - Update tests to use new function names - All tests passing * style: linting\ * style: typing and review * fix: add transaction on cleanup_single_transcript * fix: naming --------- Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com> --- server/docs/data_retention.md | 95 ++++++ ..._add_cascade_delete_to_meeting_consent_.py | 50 +++ server/reflector/asynctask.py | 27 ++ server/reflector/db/meetings.py | 7 +- .../reflector/pipelines/main_file_pipeline.py | 2 +- .../reflector/pipelines/main_live_pipeline.py | 25 +- server/reflector/settings.py | 4 +- server/reflector/tools/cleanup_old_data.py | 72 +++++ server/reflector/worker/app.py | 11 + server/reflector/worker/cleanup.py | 156 ++++++++++ server/tests/test_cleanup.py | 287 ++++++++++++++++++ 11 files changed, 708 insertions(+), 28 deletions(-) create mode 100644 server/docs/data_retention.md create mode 100644 server/migrations/versions/5a8907fd1d78_add_cascade_delete_to_meeting_consent_.py create mode 100644 server/reflector/asynctask.py create mode 100644 server/reflector/tools/cleanup_old_data.py create mode 100644 server/reflector/worker/cleanup.py create mode 100644 server/tests/test_cleanup.py diff --git a/server/docs/data_retention.md b/server/docs/data_retention.md new file mode 100644 index 00000000..1a21b59d --- /dev/null +++ b/server/docs/data_retention.md @@ -0,0 +1,95 @@ +# Data Retention and Cleanup + +## Overview + +For public instances of Reflector, a data retention policy is automatically enforced to delete anonymous user data after a configurable period (default: 7 days). This ensures compliance with privacy expectations and prevents unbounded storage growth. + +## Configuration + +### Environment Variables + +- `PUBLIC_MODE` (bool): Must be set to `true` to enable automatic cleanup +- `PUBLIC_DATA_RETENTION_DAYS` (int): Number of days to retain anonymous data (default: 7) + +### What Gets Deleted + +When data reaches the retention period, the following items are automatically removed: + +1. **Transcripts** from anonymous users (where `user_id` is NULL): + - Database records + - Local files (audio.wav, audio.mp3, audio.json waveform) + - Storage files (cloud storage if configured) + +## Automatic Cleanup + +### Celery Beat Schedule + +When `PUBLIC_MODE=true`, a Celery beat task runs daily at 3 AM to clean up old data: + +```python +# Automatically scheduled when PUBLIC_MODE=true +"cleanup_old_public_data": { + "task": "reflector.worker.cleanup.cleanup_old_public_data", + "schedule": crontab(hour=3, minute=0), # Daily at 3 AM +} +``` + +### Running the Worker + +Ensure both Celery worker and beat scheduler are running: + +```bash +# Start Celery worker +uv run celery -A reflector.worker.app worker --loglevel=info + +# Start Celery beat scheduler (in another terminal) +uv run celery -A reflector.worker.app beat +``` + +## Manual Cleanup + +For testing or manual intervention, use the cleanup tool: + +```bash +# Delete data older than 7 days (default) +uv run python -m reflector.tools.cleanup_old_data + +# Delete data older than 30 days +uv run python -m reflector.tools.cleanup_old_data --days 30 +``` + +Note: The manual tool uses the same implementation as the Celery worker task to ensure consistency. + +## Important Notes + +1. **User Data Deletion**: Only anonymous data (where `user_id` is NULL) is deleted. Authenticated user data is preserved. + +2. **Storage Cleanup**: The system properly cleans up both local files and cloud storage when configured. + +3. **Error Handling**: If individual deletions fail, the cleanup continues and logs errors. Failed deletions are reported in the task output. + +4. **Public Instance Only**: The automatic cleanup task only runs when `PUBLIC_MODE=true` to prevent accidental data loss in private deployments. + +## Testing + +Run the cleanup tests: + +```bash +uv run pytest tests/test_cleanup.py -v +``` + +## Monitoring + +Check Celery logs for cleanup task execution: + +```bash +# Look for cleanup task logs +grep "cleanup_old_public_data" celery.log +grep "Starting cleanup of old public data" celery.log +``` + +Task statistics are logged after each run: +- Number of transcripts deleted +- Number of meetings deleted +- Number of orphaned recordings deleted +- Any errors encountered diff --git a/server/migrations/versions/5a8907fd1d78_add_cascade_delete_to_meeting_consent_.py b/server/migrations/versions/5a8907fd1d78_add_cascade_delete_to_meeting_consent_.py new file mode 100644 index 00000000..af6a5c22 --- /dev/null +++ b/server/migrations/versions/5a8907fd1d78_add_cascade_delete_to_meeting_consent_.py @@ -0,0 +1,50 @@ +"""add cascade delete to meeting consent foreign key + +Revision ID: 5a8907fd1d78 +Revises: 0ab2d7ffaa16 +Create Date: 2025-08-26 17:26:50.945491 + +""" + +from typing import Sequence, Union + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "5a8907fd1d78" +down_revision: Union[str, None] = "0ab2d7ffaa16" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("meeting_consent", schema=None) as batch_op: + batch_op.drop_constraint( + batch_op.f("meeting_consent_meeting_id_fkey"), type_="foreignkey" + ) + batch_op.create_foreign_key( + batch_op.f("meeting_consent_meeting_id_fkey"), + "meeting", + ["meeting_id"], + ["id"], + ondelete="CASCADE", + ) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("meeting_consent", schema=None) as batch_op: + batch_op.drop_constraint( + batch_op.f("meeting_consent_meeting_id_fkey"), type_="foreignkey" + ) + batch_op.create_foreign_key( + batch_op.f("meeting_consent_meeting_id_fkey"), + "meeting", + ["meeting_id"], + ["id"], + ) + + # ### end Alembic commands ### diff --git a/server/reflector/asynctask.py b/server/reflector/asynctask.py new file mode 100644 index 00000000..61523a6f --- /dev/null +++ b/server/reflector/asynctask.py @@ -0,0 +1,27 @@ +import asyncio +import functools + +from reflector.db import get_database + + +def asynctask(f): + @functools.wraps(f) + def wrapper(*args, **kwargs): + async def run_with_db(): + database = get_database() + await database.connect() + try: + return await f(*args, **kwargs) + finally: + await database.disconnect() + + coro = run_with_db() + try: + loop = asyncio.get_running_loop() + except RuntimeError: + loop = None + if loop and loop.is_running(): + return loop.run_until_complete(coro) + return asyncio.run(coro) + + return wrapper diff --git a/server/reflector/db/meetings.py b/server/reflector/db/meetings.py index 40bd6f8a..85178351 100644 --- a/server/reflector/db/meetings.py +++ b/server/reflector/db/meetings.py @@ -54,7 +54,12 @@ meeting_consent = sa.Table( "meeting_consent", metadata, sa.Column("id", sa.String, primary_key=True), - sa.Column("meeting_id", sa.String, sa.ForeignKey("meeting.id"), nullable=False), + sa.Column( + "meeting_id", + sa.String, + sa.ForeignKey("meeting.id", ondelete="CASCADE"), + nullable=False, + ), sa.Column("user_id", sa.String), sa.Column("consent_given", sa.Boolean, nullable=False), sa.Column("consent_timestamp", sa.DateTime(timezone=True), nullable=False), diff --git a/server/reflector/pipelines/main_file_pipeline.py b/server/reflector/pipelines/main_file_pipeline.py index f11cddca..42333aa9 100644 --- a/server/reflector/pipelines/main_file_pipeline.py +++ b/server/reflector/pipelines/main_file_pipeline.py @@ -13,6 +13,7 @@ import av import structlog from celery import shared_task +from reflector.asynctask import asynctask from reflector.db.transcripts import ( Transcript, TranscriptStatus, @@ -21,7 +22,6 @@ from reflector.db.transcripts import ( from reflector.logger import logger from reflector.pipelines.main_live_pipeline import ( PipelineMainBase, - asynctask, broadcast_to_sockets, ) from reflector.processors import ( diff --git a/server/reflector/pipelines/main_live_pipeline.py b/server/reflector/pipelines/main_live_pipeline.py index 30c8777b..64904952 100644 --- a/server/reflector/pipelines/main_live_pipeline.py +++ b/server/reflector/pipelines/main_live_pipeline.py @@ -22,7 +22,7 @@ from celery import chord, current_task, group, shared_task from pydantic import BaseModel from structlog import BoundLogger as Logger -from reflector.db import get_database +from reflector.asynctask import asynctask from reflector.db.meetings import meeting_consent_controller, meetings_controller from reflector.db.recordings import recordings_controller from reflector.db.rooms import rooms_controller @@ -70,29 +70,6 @@ from reflector.zulip import ( ) -def asynctask(f): - @functools.wraps(f) - def wrapper(*args, **kwargs): - async def run_with_db(): - database = get_database() - await database.connect() - try: - return await f(*args, **kwargs) - finally: - await database.disconnect() - - coro = run_with_db() - try: - loop = asyncio.get_running_loop() - except RuntimeError: - loop = None - if loop and loop.is_running(): - return loop.run_until_complete(coro) - return asyncio.run(coro) - - return wrapper - - def broadcast_to_sockets(func): """ Decorator to broadcast transcript event to websockets diff --git a/server/reflector/settings.py b/server/reflector/settings.py index bbc835cd..686f67c1 100644 --- a/server/reflector/settings.py +++ b/server/reflector/settings.py @@ -1,3 +1,4 @@ +from pydantic.types import PositiveInt from pydantic_settings import BaseSettings, SettingsConfigDict @@ -90,9 +91,8 @@ class Settings(BaseSettings): AUTH_JWT_PUBLIC_KEY: str | None = "authentik.monadical.com_public.pem" AUTH_JWT_AUDIENCE: str | None = None - # API public mode - # if set, all anonymous record will be public PUBLIC_MODE: bool = False + PUBLIC_DATA_RETENTION_DAYS: PositiveInt = 7 # Min transcript length to generate topic + summary MIN_TRANSCRIPT_LENGTH: int = 750 diff --git a/server/reflector/tools/cleanup_old_data.py b/server/reflector/tools/cleanup_old_data.py new file mode 100644 index 00000000..9ffa4684 --- /dev/null +++ b/server/reflector/tools/cleanup_old_data.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +""" +Manual cleanup tool for old public data. +Uses the same implementation as the Celery worker task. +""" + +import argparse +import asyncio +import sys + +import structlog + +from reflector.settings import settings +from reflector.worker.cleanup import _cleanup_old_public_data + +logger = structlog.get_logger(__name__) + + +async def cleanup_old_data(days: int = 7): + logger.info( + "Starting manual cleanup", + retention_days=days, + public_mode=settings.PUBLIC_MODE, + ) + + if not settings.PUBLIC_MODE: + logger.critical( + "WARNING: PUBLIC_MODE is False. " + "This tool is intended for public instances only." + ) + raise Exception("Tool intended for public instances only") + + result = await _cleanup_old_public_data(days=days) + + if result: + logger.info( + "Cleanup completed", + transcripts_deleted=result.get("transcripts_deleted", 0), + meetings_deleted=result.get("meetings_deleted", 0), + recordings_deleted=result.get("recordings_deleted", 0), + errors_count=len(result.get("errors", [])), + ) + if result.get("errors"): + logger.warning( + "Errors encountered during cleanup:", errors=result["errors"][:10] + ) + else: + logger.info("Cleanup skipped or completed without results") + + +def main(): + parser = argparse.ArgumentParser( + description="Clean up old transcripts and meetings" + ) + parser.add_argument( + "--days", + type=int, + default=7, + help="Number of days to keep data (default: 7)", + ) + + args = parser.parse_args() + + if args.days < 1: + logger.error("Days must be at least 1") + sys.exit(1) + + asyncio.run(cleanup_old_data(days=args.days)) + + +if __name__ == "__main__": + main() diff --git a/server/reflector/worker/app.py b/server/reflector/worker/app.py index 7e888f41..e9468bd2 100644 --- a/server/reflector/worker/app.py +++ b/server/reflector/worker/app.py @@ -19,6 +19,7 @@ else: "reflector.pipelines.main_live_pipeline", "reflector.worker.healthcheck", "reflector.worker.process", + "reflector.worker.cleanup", ] ) @@ -38,6 +39,16 @@ else: }, } + if settings.PUBLIC_MODE: + app.conf.beat_schedule["cleanup_old_public_data"] = { + "task": "reflector.worker.cleanup.cleanup_old_public_data_task", + "schedule": crontab(hour=3, minute=0), + } + logger.info( + "Public mode cleanup enabled", + retention_days=settings.PUBLIC_DATA_RETENTION_DAYS, + ) + if settings.HEALTHCHECK_URL: app.conf.beat_schedule["healthcheck_ping"] = { "task": "reflector.worker.healthcheck.healthcheck_ping", diff --git a/server/reflector/worker/cleanup.py b/server/reflector/worker/cleanup.py new file mode 100644 index 00000000..e634994d --- /dev/null +++ b/server/reflector/worker/cleanup.py @@ -0,0 +1,156 @@ +""" +Main task for cleanup old public data. + +Deletes old anonymous transcripts and their associated meetings/recordings. +Transcripts are the main entry point - any associated data is also removed. +""" + +import asyncio +from datetime import datetime, timedelta, timezone +from typing import TypedDict + +import structlog +from celery import shared_task +from databases import Database +from pydantic.types import PositiveInt + +from reflector.asynctask import asynctask +from reflector.db import get_database +from reflector.db.meetings import meetings +from reflector.db.recordings import recordings +from reflector.db.transcripts import transcripts, transcripts_controller +from reflector.settings import settings +from reflector.storage import get_recordings_storage + +logger = structlog.get_logger(__name__) + + +class CleanupStats(TypedDict): + """Statistics for cleanup operation.""" + + transcripts_deleted: int + meetings_deleted: int + recordings_deleted: int + errors: list[str] + + +async def delete_single_transcript( + db: Database, transcript_data: dict, stats: CleanupStats +): + transcript_id = transcript_data["id"] + meeting_id = transcript_data["meeting_id"] + recording_id = transcript_data["recording_id"] + + try: + async with db.transaction(isolation="serializable"): + if meeting_id: + await db.execute(meetings.delete().where(meetings.c.id == meeting_id)) + stats["meetings_deleted"] += 1 + logger.info("Deleted associated meeting", meeting_id=meeting_id) + + if recording_id: + recording = await db.fetch_one( + recordings.select().where(recordings.c.id == recording_id) + ) + if recording: + try: + await get_recordings_storage().delete_file( + recording["object_key"] + ) + except Exception as storage_error: + logger.warning( + "Failed to delete recording from storage", + recording_id=recording_id, + object_key=recording["object_key"], + error=str(storage_error), + ) + + await db.execute( + recordings.delete().where(recordings.c.id == recording_id) + ) + stats["recordings_deleted"] += 1 + logger.info( + "Deleted associated recording", recording_id=recording_id + ) + + await transcripts_controller.remove_by_id(transcript_id) + stats["transcripts_deleted"] += 1 + logger.info( + "Deleted transcript", + transcript_id=transcript_id, + created_at=transcript_data["created_at"].isoformat(), + ) + except Exception as e: + error_msg = f"Failed to delete transcript {transcript_id}: {str(e)}" + logger.error(error_msg, exc_info=e) + stats["errors"].append(error_msg) + + +async def cleanup_old_transcripts( + db: Database, cutoff_date: datetime, stats: CleanupStats +): + """Delete old anonymous transcripts and their associated recordings/meetings.""" + query = transcripts.select().where( + (transcripts.c.created_at < cutoff_date) & (transcripts.c.user_id.is_(None)) + ) + old_transcripts = await db.fetch_all(query) + + logger.info(f"Found {len(old_transcripts)} old transcripts to delete") + + for transcript_data in old_transcripts: + await delete_single_transcript(db, transcript_data, stats) + + +def log_cleanup_results(stats: CleanupStats): + logger.info( + "Cleanup completed", + transcripts_deleted=stats["transcripts_deleted"], + meetings_deleted=stats["meetings_deleted"], + recordings_deleted=stats["recordings_deleted"], + errors_count=len(stats["errors"]), + ) + + if stats["errors"]: + logger.warning( + "Cleanup completed with errors", + errors=stats["errors"][:10], + ) + + +async def cleanup_old_public_data( + days: PositiveInt | None = None, +) -> CleanupStats | None: + if days is None: + days = settings.PUBLIC_DATA_RETENTION_DAYS + + if not settings.PUBLIC_MODE: + logger.info("Skipping cleanup - not a public instance") + return None + + cutoff_date = datetime.now(timezone.utc) - timedelta(days=days) + logger.info( + "Starting cleanup of old public data", + cutoff_date=cutoff_date.isoformat(), + ) + + stats: CleanupStats = { + "transcripts_deleted": 0, + "meetings_deleted": 0, + "recordings_deleted": 0, + "errors": [], + } + + db = get_database() + await cleanup_old_transcripts(db, cutoff_date, stats) + + log_cleanup_results(stats) + return stats + + +@shared_task( + autoretry_for=(Exception,), + retry_kwargs={"max_retries": 3, "countdown": 300}, +) +@asynctask +def cleanup_old_public_data_task(days: int | None = None): + asyncio.run(cleanup_old_public_data(days=days)) diff --git a/server/tests/test_cleanup.py b/server/tests/test_cleanup.py new file mode 100644 index 00000000..3c5149ae --- /dev/null +++ b/server/tests/test_cleanup.py @@ -0,0 +1,287 @@ +from datetime import datetime, timedelta, timezone +from unittest.mock import AsyncMock, patch + +import pytest + +from reflector.db.recordings import Recording, recordings_controller +from reflector.db.transcripts import SourceKind, transcripts_controller +from reflector.worker.cleanup import cleanup_old_public_data + + +@pytest.mark.asyncio +async def test_cleanup_old_public_data_skips_when_not_public(): + """Test that cleanup is skipped when PUBLIC_MODE is False.""" + with patch("reflector.worker.cleanup.settings") as mock_settings: + mock_settings.PUBLIC_MODE = False + + result = await cleanup_old_public_data() + + # Should return early without doing anything + assert result is None + + +@pytest.mark.asyncio +async def test_cleanup_old_public_data_deletes_old_anonymous_transcripts(): + """Test that old anonymous transcripts are deleted.""" + # Create old and new anonymous transcripts + old_date = datetime.now(timezone.utc) - timedelta(days=8) + new_date = datetime.now(timezone.utc) - timedelta(days=2) + + # Create old anonymous transcript (should be deleted) + old_transcript = await transcripts_controller.add( + name="Old Anonymous Transcript", + source_kind=SourceKind.FILE, + user_id=None, # Anonymous + ) + # Manually update created_at to be old + from reflector.db import get_database + from reflector.db.transcripts import transcripts + + await get_database().execute( + transcripts.update() + .where(transcripts.c.id == old_transcript.id) + .values(created_at=old_date) + ) + + # Create new anonymous transcript (should NOT be deleted) + new_transcript = await transcripts_controller.add( + name="New Anonymous Transcript", + source_kind=SourceKind.FILE, + user_id=None, # Anonymous + ) + + # Create old transcript with user (should NOT be deleted) + old_user_transcript = await transcripts_controller.add( + name="Old User Transcript", + source_kind=SourceKind.FILE, + user_id="user123", + ) + await get_database().execute( + transcripts.update() + .where(transcripts.c.id == old_user_transcript.id) + .values(created_at=old_date) + ) + + with patch("reflector.worker.cleanup.settings") as mock_settings: + mock_settings.PUBLIC_MODE = True + mock_settings.PUBLIC_DATA_RETENTION_DAYS = 7 + + # Mock the storage deletion + with patch("reflector.db.transcripts.get_transcripts_storage") as mock_storage: + mock_storage.return_value.delete_file = AsyncMock() + + result = await cleanup_old_public_data() + + # Check results + assert result["transcripts_deleted"] == 1 + assert result["errors"] == [] + + # Verify old anonymous transcript was deleted + assert await transcripts_controller.get_by_id(old_transcript.id) is None + + # Verify new anonymous transcript still exists + assert await transcripts_controller.get_by_id(new_transcript.id) is not None + + # Verify user transcript still exists + assert await transcripts_controller.get_by_id(old_user_transcript.id) is not None + + +@pytest.mark.asyncio +async def test_cleanup_deletes_associated_meeting_and_recording(): + """Test that meetings and recordings associated with old transcripts are deleted.""" + from reflector.db import get_database + from reflector.db.meetings import meetings + from reflector.db.transcripts import transcripts + + old_date = datetime.now(timezone.utc) - timedelta(days=8) + + # Create a meeting + meeting_id = "test-meeting-for-transcript" + await get_database().execute( + meetings.insert().values( + id=meeting_id, + room_name="Meeting with Transcript", + room_url="https://example.com/meeting", + host_room_url="https://example.com/meeting-host", + start_date=old_date, + end_date=old_date + timedelta(hours=1), + user_id=None, + room_id=None, + ) + ) + + # Create a recording + recording = await recordings_controller.create( + Recording( + bucket_name="test-bucket", + object_key="test-recording.mp4", + recorded_at=old_date, + ) + ) + + # Create an old transcript with both meeting and recording + old_transcript = await transcripts_controller.add( + name="Old Transcript with Meeting and Recording", + source_kind=SourceKind.ROOM, + user_id=None, + meeting_id=meeting_id, + recording_id=recording.id, + ) + + # Update created_at to be old + await get_database().execute( + transcripts.update() + .where(transcripts.c.id == old_transcript.id) + .values(created_at=old_date) + ) + + with patch("reflector.worker.cleanup.settings") as mock_settings: + mock_settings.PUBLIC_MODE = True + mock_settings.PUBLIC_DATA_RETENTION_DAYS = 7 + + # Mock storage deletion + with patch("reflector.db.transcripts.get_transcripts_storage") as mock_storage: + mock_storage.return_value.delete_file = AsyncMock() + with patch( + "reflector.worker.cleanup.get_recordings_storage" + ) as mock_rec_storage: + mock_rec_storage.return_value.delete_file = AsyncMock() + + result = await cleanup_old_public_data() + + # Check results + assert result["transcripts_deleted"] == 1 + assert result["meetings_deleted"] == 1 + assert result["recordings_deleted"] == 1 + assert result["errors"] == [] + + # Verify transcript was deleted + assert await transcripts_controller.get_by_id(old_transcript.id) is None + + # Verify meeting was deleted + query = meetings.select().where(meetings.c.id == meeting_id) + meeting_result = await get_database().fetch_one(query) + assert meeting_result is None + + # Verify recording was deleted + assert await recordings_controller.get_by_id(recording.id) is None + + +@pytest.mark.asyncio +async def test_cleanup_handles_errors_gracefully(): + """Test that cleanup continues even when individual deletions fail.""" + old_date = datetime.now(timezone.utc) - timedelta(days=8) + + # Create multiple old transcripts + transcript1 = await transcripts_controller.add( + name="Transcript 1", + source_kind=SourceKind.FILE, + user_id=None, + ) + transcript2 = await transcripts_controller.add( + name="Transcript 2", + source_kind=SourceKind.FILE, + user_id=None, + ) + + # Update created_at to be old + from reflector.db import get_database + from reflector.db.transcripts import transcripts + + for t_id in [transcript1.id, transcript2.id]: + await get_database().execute( + transcripts.update() + .where(transcripts.c.id == t_id) + .values(created_at=old_date) + ) + + with patch("reflector.worker.cleanup.settings") as mock_settings: + mock_settings.PUBLIC_MODE = True + mock_settings.PUBLIC_DATA_RETENTION_DAYS = 7 + + # Mock remove_by_id to fail for the first transcript + original_remove = transcripts_controller.remove_by_id + call_count = 0 + + async def mock_remove_by_id(transcript_id, user_id=None): + nonlocal call_count + call_count += 1 + if call_count == 1: + raise Exception("Simulated deletion error") + return await original_remove(transcript_id, user_id) + + with patch.object( + transcripts_controller, "remove_by_id", side_effect=mock_remove_by_id + ): + result = await cleanup_old_public_data() + + # Should have one successful deletion and one error + assert result["transcripts_deleted"] == 1 + assert len(result["errors"]) == 1 + assert "Failed to delete transcript" in result["errors"][0] + + +@pytest.mark.asyncio +async def test_meeting_consent_cascade_delete(): + """Test that meeting_consent records are automatically deleted when meeting is deleted.""" + from reflector.db import get_database + from reflector.db.meetings import ( + meeting_consent, + meeting_consent_controller, + meetings, + ) + + # Create a meeting + meeting_id = "test-cascade-meeting" + await get_database().execute( + meetings.insert().values( + id=meeting_id, + room_name="Test Meeting for CASCADE", + room_url="https://example.com/cascade-test", + host_room_url="https://example.com/cascade-test-host", + start_date=datetime.now(timezone.utc), + end_date=datetime.now(timezone.utc) + timedelta(hours=1), + user_id="test-user", + room_id=None, + ) + ) + + # Create consent records for this meeting + consent1_id = "consent-1" + consent2_id = "consent-2" + + await get_database().execute( + meeting_consent.insert().values( + id=consent1_id, + meeting_id=meeting_id, + user_id="user1", + consent_given=True, + consent_timestamp=datetime.now(timezone.utc), + ) + ) + + await get_database().execute( + meeting_consent.insert().values( + id=consent2_id, + meeting_id=meeting_id, + user_id="user2", + consent_given=False, + consent_timestamp=datetime.now(timezone.utc), + ) + ) + + # Verify consent records exist + consents = await meeting_consent_controller.get_by_meeting_id(meeting_id) + assert len(consents) == 2 + + # Delete the meeting + await get_database().execute(meetings.delete().where(meetings.c.id == meeting_id)) + + # Verify meeting is deleted + query = meetings.select().where(meetings.c.id == meeting_id) + result = await get_database().fetch_one(query) + assert result is None + + # Verify consent records are automatically deleted (CASCADE DELETE) + consents_after = await meeting_consent_controller.get_by_meeting_id(meeting_id) + assert len(consents_after) == 0 From 88ed7cfa7804794b9b54cad4c3facc8a98cf85fd Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Fri, 29 Aug 2025 10:07:49 -0600 Subject: [PATCH 07/77] feat(rooms): add webhook for transcript completion (#578) * feat(rooms): add webhook notifications for transcript completion - Add webhook_url and webhook_secret fields to rooms table - Create Celery task with 24-hour retry window using exponential backoff - Send transcript metadata, diarized text, topics, and summaries via webhook - Add HMAC signature verification for webhook security - Add test endpoint POST /v1/rooms/{room_id}/webhook/test - Update frontend with webhook configuration UI and test button - Auto-generate webhook secret if not provided - Trigger webhook after successful file pipeline processing for room recordings * style: linting * fix: remove unwanted files * fix: update openapi gen * fix: self-review * docs: add comprehensive webhook documentation - Document webhook configuration, events, and payloads - Include transcript.completed and test event examples - Add security considerations and best practices - Provide example webhook receiver implementation - Document retry policy and signature verification * fix: remove audio_mp3_url from webhook payload - Remove audio download URL generation from webhook - Update documentation to reflect the change - Keep only frontend_url for accessing transcripts * docs: remove unwanted section * fix: correct API method name and type imports for rooms - Fix v1RoomsRetrieve to v1RoomsGet - Update Room type to RoomDetails throughout frontend - Fix type imports in useRoomList, RoomList, RoomTable, and RoomCards * feat: add show/hide toggle for webhook secret field - Add eye icon button to reveal/hide webhook secret when editing - Show password dots when webhook secret is hidden - Reset visibility state when opening/closing dialog - Only show toggle button when editing existing room with secret * fix: resolve event loop conflict in webhook test endpoint - Extract webhook test logic into shared async function - Call async function directly from FastAPI endpoint - Keep Celery task wrapper for background processing - Fixes RuntimeError: event loop already running * refactor: remove unnecessary Celery task for webhook testing - Webhook testing is synchronous and provides immediate feedback - No need for background processing via Celery - Keep only the async function called directly from API endpoint * feat: improve webhook test error messages and display - Show HTTP status code in error messages - Parse JSON error responses to extract meaningful messages - Improved UI layout for webhook test results - Added colored background for success/error states - Better text wrapping for long error messages * docs: adjust doc * fix: review * fix: update attempts to match close 24h * fix: add event_id * fix: changed to uuid, to have new event_id when reprocess. * style: linting * fix: alembic revision --- server/docs/webhook.md | 212 ++++++++++++++ ...194f65cd6d3_add_webhook_fields_to_rooms.py | 36 +++ server/reflector/db/rooms.py | 15 + .../reflector/pipelines/main_file_pipeline.py | 19 +- server/reflector/views/rooms.py | 59 +++- server/reflector/worker/webhook.py | 258 ++++++++++++++++++ www/app/(app)/rooms/_components/RoomCards.tsx | 4 +- www/app/(app)/rooms/_components/RoomList.tsx | 4 +- www/app/(app)/rooms/_components/RoomTable.tsx | 4 +- www/app/(app)/rooms/page.tsx | 243 +++++++++++++++-- www/app/(app)/rooms/useRoomList.tsx | 6 +- www/app/api/schemas.gen.ts | 150 +++++++++- www/app/api/services.gen.ts | 53 +++- www/app/api/types.gen.ts | 81 +++++- 14 files changed, 1102 insertions(+), 42 deletions(-) create mode 100644 server/docs/webhook.md create mode 100644 server/migrations/versions/0194f65cd6d3_add_webhook_fields_to_rooms.py create mode 100644 server/reflector/worker/webhook.py diff --git a/server/docs/webhook.md b/server/docs/webhook.md new file mode 100644 index 00000000..9fe88fb9 --- /dev/null +++ b/server/docs/webhook.md @@ -0,0 +1,212 @@ +# Reflector Webhook Documentation + +## Overview + +Reflector supports webhook notifications to notify external systems when transcript processing is completed. Webhooks can be configured per room and are triggered automatically after a transcript is successfully processed. + +## Configuration + +Webhooks are configured at the room level with two fields: +- `webhook_url`: The HTTPS endpoint to receive webhook notifications +- `webhook_secret`: Optional secret key for HMAC signature verification (auto-generated if not provided) + +## Events + +### `transcript.completed` + +Triggered when a transcript has been fully processed, including transcription, diarization, summarization, and topic detection. + +### `test` + +A test event that can be triggered manually to verify webhook configuration. + +## Webhook Request Format + +### Headers + +All webhook requests include the following headers: + +| Header | Description | Example | +|--------|-------------|---------| +| `Content-Type` | Always `application/json` | `application/json` | +| `User-Agent` | Identifies Reflector as the source | `Reflector-Webhook/1.0` | +| `X-Webhook-Event` | The event type | `transcript.completed` or `test` | +| `X-Webhook-Retry` | Current retry attempt number | `0`, `1`, `2`... | +| `X-Webhook-Signature` | HMAC signature (if secret configured) | `t=1735306800,v1=abc123...` | + +### Signature Verification + +If a webhook secret is configured, Reflector includes an HMAC-SHA256 signature in the `X-Webhook-Signature` header to verify the webhook authenticity. + +The signature format is: `t={timestamp},v1={signature}` + +To verify the signature: +1. Extract the timestamp and signature from the header +2. Create the signed payload: `{timestamp}.{request_body}` +3. Compute HMAC-SHA256 of the signed payload using your webhook secret +4. Compare the computed signature with the received signature + +Example verification (Python): +```python +import hmac +import hashlib + +def verify_webhook_signature(payload: bytes, signature_header: str, secret: str) -> bool: + # Parse header: "t=1735306800,v1=abc123..." + parts = dict(part.split("=") for part in signature_header.split(",")) + timestamp = parts["t"] + received_signature = parts["v1"] + + # Create signed payload + signed_payload = f"{timestamp}.{payload.decode('utf-8')}" + + # Compute expected signature + expected_signature = hmac.new( + secret.encode("utf-8"), + signed_payload.encode("utf-8"), + hashlib.sha256 + ).hexdigest() + + # Compare signatures + return hmac.compare_digest(expected_signature, received_signature) +``` + +## Event Payloads + +### `transcript.completed` Event + +This event includes a convenient URL for accessing the transcript: +- `frontend_url`: Direct link to view the transcript in the web interface + +```json +{ + "event": "transcript.completed", + "event_id": "transcript.completed-abc-123-def-456", + "timestamp": "2025-08-27T12:34:56.789012Z", + "transcript": { + "id": "abc-123-def-456", + "room_id": "room-789", + "created_at": "2025-08-27T12:00:00Z", + "duration": 1800.5, + "title": "Q3 Product Planning Meeting", + "short_summary": "Team discussed Q3 product roadmap, prioritizing mobile app features and API improvements.", + "long_summary": "The product team met to finalize the Q3 roadmap. Key decisions included...", + "webvtt": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\nWelcome everyone to today's meeting...", + "topics": [ + { + "title": "Introduction and Agenda", + "summary": "Meeting kickoff with agenda review", + "timestamp": 0.0, + "duration": 120.0, + "webvtt": "WEBVTT\n\n00:00:00.000 --> 00:00:05.000\nWelcome everyone..." + }, + { + "title": "Mobile App Features Discussion", + "summary": "Team reviewed proposed mobile app features for Q3", + "timestamp": 120.0, + "duration": 600.0, + "webvtt": "WEBVTT\n\n00:02:00.000 --> 00:02:10.000\nLet's talk about the mobile app..." + } + ], + "participants": [ + { + "id": "participant-1", + "name": "John Doe", + "speaker": "Speaker 1" + }, + { + "id": "participant-2", + "name": "Jane Smith", + "speaker": "Speaker 2" + } + ], + "source_language": "en", + "target_language": "en", + "status": "completed", + "frontend_url": "https://app.reflector.com/transcripts/abc-123-def-456" + }, + "room": { + "id": "room-789", + "name": "Product Team Room" + } +} +``` + +### `test` Event + +```json +{ + "event": "test", + "event_id": "test.2025-08-27T12:34:56.789012Z", + "timestamp": "2025-08-27T12:34:56.789012Z", + "message": "This is a test webhook from Reflector", + "room": { + "id": "room-789", + "name": "Product Team Room" + } +} +``` + +## Retry Policy + +Webhooks are delivered with automatic retry logic to handle transient failures. When a webhook delivery fails due to server errors or network issues, Reflector will automatically retry the delivery multiple times over an extended period. + +### Retry Mechanism + +Reflector implements an exponential backoff strategy for webhook retries: + +- **Initial retry delay**: 60 seconds after the first failure +- **Exponential backoff**: Each subsequent retry waits approximately twice as long as the previous one +- **Maximum retry interval**: 1 hour (backoff is capped at this duration) +- **Maximum retry attempts**: 30 attempts total +- **Total retry duration**: Retries continue for approximately 24 hours + +### How Retries Work + +When a webhook fails, Reflector will: +1. Wait 60 seconds, then retry (attempt #1) +2. If it fails again, wait ~2 minutes, then retry (attempt #2) +3. Continue doubling the wait time up to a maximum of 1 hour between attempts +4. Keep retrying at 1-hour intervals until successful or 30 attempts are exhausted + +The `X-Webhook-Retry` header indicates the current retry attempt number (0 for the initial attempt, 1 for first retry, etc.), allowing your endpoint to track retry attempts. + +### Retry Behavior by HTTP Status Code + +| Status Code | Behavior | +|-------------|----------| +| 2xx (Success) | No retry, webhook marked as delivered | +| 4xx (Client Error) | No retry, request is considered permanently failed | +| 5xx (Server Error) | Automatic retry with exponential backoff | +| Network/Timeout Error | Automatic retry with exponential backoff | + +**Important Notes:** +- Webhooks timeout after 30 seconds. If your endpoint takes longer to respond, it will be considered a timeout error and retried. +- During the retry period (~24 hours), you may receive the same webhook multiple times if your endpoint experiences intermittent failures. +- There is no mechanism to manually retry failed webhooks after the retry period expires. + +## Testing Webhooks + +You can test your webhook configuration before processing transcripts: + +```http +POST /v1/rooms/{room_id}/webhook/test +``` + +Response: +```json +{ + "success": true, + "status_code": 200, + "message": "Webhook test successful", + "response_preview": "OK" +} +``` + +Or in case of failure: +```json +{ + "success": false, + "error": "Webhook request timed out (10 seconds)" +} +``` diff --git a/server/migrations/versions/0194f65cd6d3_add_webhook_fields_to_rooms.py b/server/migrations/versions/0194f65cd6d3_add_webhook_fields_to_rooms.py new file mode 100644 index 00000000..21dc1260 --- /dev/null +++ b/server/migrations/versions/0194f65cd6d3_add_webhook_fields_to_rooms.py @@ -0,0 +1,36 @@ +"""Add webhook fields to rooms + +Revision ID: 0194f65cd6d3 +Revises: 5a8907fd1d78 +Create Date: 2025-08-27 09:03:19.610995 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "0194f65cd6d3" +down_revision: Union[str, None] = "5a8907fd1d78" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("room", schema=None) as batch_op: + batch_op.add_column(sa.Column("webhook_url", sa.String(), nullable=True)) + batch_op.add_column(sa.Column("webhook_secret", sa.String(), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("room", schema=None) as batch_op: + batch_op.drop_column("webhook_secret") + batch_op.drop_column("webhook_url") + + # ### end Alembic commands ### diff --git a/server/reflector/db/rooms.py b/server/reflector/db/rooms.py index a38e6b7f..08a6748d 100644 --- a/server/reflector/db/rooms.py +++ b/server/reflector/db/rooms.py @@ -1,3 +1,4 @@ +import secrets from datetime import datetime, timezone from sqlite3 import IntegrityError from typing import Literal @@ -40,6 +41,8 @@ rooms = sqlalchemy.Table( sqlalchemy.Column( "is_shared", sqlalchemy.Boolean, nullable=False, server_default=false() ), + sqlalchemy.Column("webhook_url", sqlalchemy.String), + sqlalchemy.Column("webhook_secret", sqlalchemy.String), sqlalchemy.Index("idx_room_is_shared", "is_shared"), ) @@ -59,6 +62,8 @@ class Room(BaseModel): "none", "prompt", "automatic", "automatic-2nd-participant" ] = "automatic-2nd-participant" is_shared: bool = False + webhook_url: str = "" + webhook_secret: str = "" class RoomController: @@ -107,10 +112,15 @@ class RoomController: recording_type: str, recording_trigger: str, is_shared: bool, + webhook_url: str = "", + webhook_secret: str = "", ): """ Add a new room """ + if webhook_url and not webhook_secret: + webhook_secret = secrets.token_urlsafe(32) + room = Room( name=name, user_id=user_id, @@ -122,6 +132,8 @@ class RoomController: recording_type=recording_type, recording_trigger=recording_trigger, is_shared=is_shared, + webhook_url=webhook_url, + webhook_secret=webhook_secret, ) query = rooms.insert().values(**room.model_dump()) try: @@ -134,6 +146,9 @@ class RoomController: """ Update a room fields with key/values in values """ + if values.get("webhook_url") and not values.get("webhook_secret"): + values["webhook_secret"] = secrets.token_urlsafe(32) + query = rooms.update().where(rooms.c.id == room.id).values(**values) try: await get_database().execute(query) diff --git a/server/reflector/pipelines/main_file_pipeline.py b/server/reflector/pipelines/main_file_pipeline.py index 42333aa9..5c57dddb 100644 --- a/server/reflector/pipelines/main_file_pipeline.py +++ b/server/reflector/pipelines/main_file_pipeline.py @@ -7,6 +7,7 @@ Uses parallel processing for transcription, diarization, and waveform generation """ import asyncio +import uuid from pathlib import Path import av @@ -14,7 +15,9 @@ import structlog from celery import shared_task from reflector.asynctask import asynctask +from reflector.db.rooms import rooms_controller from reflector.db.transcripts import ( + SourceKind, Transcript, TranscriptStatus, transcripts_controller, @@ -48,6 +51,7 @@ from reflector.processors.types import ( ) from reflector.settings import settings from reflector.storage import get_transcripts_storage +from reflector.worker.webhook import send_transcript_webhook class EmptyPipeline: @@ -385,7 +389,6 @@ async def task_pipeline_file_process(*, transcript_id: str): raise Exception(f"Transcript {transcript_id} not found") pipeline = PipelineMainFile(transcript_id=transcript_id) - try: await pipeline.set_status(transcript_id, "processing") @@ -402,3 +405,17 @@ async def task_pipeline_file_process(*, transcript_id: str): except Exception: await pipeline.set_status(transcript_id, "error") raise + + # Trigger webhook if this is a room recording with webhook configured + if transcript.source_kind == SourceKind.ROOM and transcript.room_id: + room = await rooms_controller.get_by_id(transcript.room_id) + if room and room.webhook_url: + logger.info( + "Dispatching webhook task", + transcript_id=transcript_id, + room_id=room.id, + webhook_url=room.webhook_url, + ) + send_transcript_webhook.delay( + transcript_id, room.id, event_id=uuid.uuid4().hex + ) diff --git a/server/reflector/views/rooms.py b/server/reflector/views/rooms.py index d4278e1f..82c172f2 100644 --- a/server/reflector/views/rooms.py +++ b/server/reflector/views/rooms.py @@ -15,6 +15,7 @@ from reflector.db.meetings import meetings_controller from reflector.db.rooms import rooms_controller from reflector.settings import settings from reflector.whereby import create_meeting, upload_logo +from reflector.worker.webhook import test_webhook logger = logging.getLogger(__name__) @@ -44,6 +45,11 @@ class Room(BaseModel): is_shared: bool +class RoomDetails(Room): + webhook_url: str + webhook_secret: str + + class Meeting(BaseModel): id: str room_name: str @@ -64,6 +70,8 @@ class CreateRoom(BaseModel): recording_type: str recording_trigger: str is_shared: bool + webhook_url: str + webhook_secret: str class UpdateRoom(BaseModel): @@ -76,16 +84,26 @@ class UpdateRoom(BaseModel): recording_type: str recording_trigger: str is_shared: bool + webhook_url: str + webhook_secret: str class DeletionStatus(BaseModel): status: str -@router.get("/rooms", response_model=Page[Room]) +class WebhookTestResult(BaseModel): + success: bool + message: str = "" + error: str = "" + status_code: int | None = None + response_preview: str | None = None + + +@router.get("/rooms", response_model=Page[RoomDetails]) async def rooms_list( user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)], -) -> list[Room]: +) -> list[RoomDetails]: if not user and not settings.PUBLIC_MODE: raise HTTPException(status_code=401, detail="Not authenticated") @@ -99,6 +117,18 @@ async def rooms_list( ) +@router.get("/rooms/{room_id}", response_model=RoomDetails) +async def rooms_get( + room_id: str, + user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)], +): + user_id = user["sub"] if user else None + room = await rooms_controller.get_by_id_for_http(room_id, user_id=user_id) + if not room: + raise HTTPException(status_code=404, detail="Room not found") + return room + + @router.post("/rooms", response_model=Room) async def rooms_create( room: CreateRoom, @@ -117,10 +147,12 @@ async def rooms_create( recording_type=room.recording_type, recording_trigger=room.recording_trigger, is_shared=room.is_shared, + webhook_url=room.webhook_url, + webhook_secret=room.webhook_secret, ) -@router.patch("/rooms/{room_id}", response_model=Room) +@router.patch("/rooms/{room_id}", response_model=RoomDetails) async def rooms_update( room_id: str, info: UpdateRoom, @@ -209,3 +241,24 @@ async def rooms_create_meeting( meeting.host_room_url = "" return meeting + + +@router.post("/rooms/{room_id}/webhook/test", response_model=WebhookTestResult) +async def rooms_test_webhook( + room_id: str, + user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)], +): + """Test webhook configuration by sending a sample payload.""" + user_id = user["sub"] if user else None + + room = await rooms_controller.get_by_id(room_id) + if not room: + raise HTTPException(status_code=404, detail="Room not found") + + if user_id and room.user_id != user_id: + raise HTTPException( + status_code=403, detail="Not authorized to test this room's webhook" + ) + + result = await test_webhook(room_id) + return WebhookTestResult(**result) diff --git a/server/reflector/worker/webhook.py b/server/reflector/worker/webhook.py new file mode 100644 index 00000000..64368b2e --- /dev/null +++ b/server/reflector/worker/webhook.py @@ -0,0 +1,258 @@ +"""Webhook task for sending transcript notifications.""" + +import hashlib +import hmac +import json +import uuid +from datetime import datetime, timezone + +import httpx +import structlog +from celery import shared_task +from celery.utils.log import get_task_logger + +from reflector.db.rooms import rooms_controller +from reflector.db.transcripts import transcripts_controller +from reflector.pipelines.main_live_pipeline import asynctask +from reflector.settings import settings +from reflector.utils.webvtt import topics_to_webvtt + +logger = structlog.wrap_logger(get_task_logger(__name__)) + + +def generate_webhook_signature(payload: bytes, secret: str, timestamp: str) -> str: + """Generate HMAC signature for webhook payload.""" + signed_payload = f"{timestamp}.{payload.decode('utf-8')}" + hmac_obj = hmac.new( + secret.encode("utf-8"), + signed_payload.encode("utf-8"), + hashlib.sha256, + ) + return hmac_obj.hexdigest() + + +@shared_task( + bind=True, + max_retries=30, + default_retry_delay=60, + retry_backoff=True, + retry_backoff_max=3600, # Max 1 hour between retries +) +@asynctask +async def send_transcript_webhook( + self, + transcript_id: str, + room_id: str, + event_id: str, +): + log = logger.bind( + transcript_id=transcript_id, + room_id=room_id, + retry_count=self.request.retries, + ) + + try: + # Fetch transcript and room + transcript = await transcripts_controller.get_by_id(transcript_id) + if not transcript: + log.error("Transcript not found, skipping webhook") + return + + room = await rooms_controller.get_by_id(room_id) + if not room: + log.error("Room not found, skipping webhook") + return + + if not room.webhook_url: + log.info("No webhook URL configured for room, skipping") + return + + # Generate WebVTT content from topics + topics_data = [] + + if transcript.topics: + # Build topics data with diarized content per topic + for topic in transcript.topics: + topic_webvtt = topics_to_webvtt([topic]) if topic.words else "" + topics_data.append( + { + "title": topic.title, + "summary": topic.summary, + "timestamp": topic.timestamp, + "duration": topic.duration, + "webvtt": topic_webvtt, + } + ) + + # Build webhook payload + frontend_url = f"{settings.UI_BASE_URL}/transcripts/{transcript.id}" + participants = [ + {"id": p.id, "name": p.name, "speaker": p.speaker} + for p in (transcript.participants or []) + ] + payload_data = { + "event": "transcript.completed", + "event_id": event_id, + "timestamp": datetime.now(timezone.utc).isoformat(), + "transcript": { + "id": transcript.id, + "room_id": transcript.room_id, + "created_at": transcript.created_at.isoformat(), + "duration": transcript.duration, + "title": transcript.title, + "short_summary": transcript.short_summary, + "long_summary": transcript.long_summary, + "webvtt": transcript.webvtt, + "topics": topics_data, + "participants": participants, + "source_language": transcript.source_language, + "target_language": transcript.target_language, + "status": transcript.status, + "frontend_url": frontend_url, + }, + "room": { + "id": room.id, + "name": room.name, + }, + } + + # Convert to JSON + payload_json = json.dumps(payload_data, separators=(",", ":")) + payload_bytes = payload_json.encode("utf-8") + + # Generate signature if secret is configured + headers = { + "Content-Type": "application/json", + "User-Agent": "Reflector-Webhook/1.0", + "X-Webhook-Event": "transcript.completed", + "X-Webhook-Retry": str(self.request.retries), + } + + if room.webhook_secret: + timestamp = str(int(datetime.now(timezone.utc).timestamp())) + signature = generate_webhook_signature( + payload_bytes, room.webhook_secret, timestamp + ) + headers["X-Webhook-Signature"] = f"t={timestamp},v1={signature}" + + # Send webhook with timeout + async with httpx.AsyncClient(timeout=30.0) as client: + log.info( + "Sending webhook", + url=room.webhook_url, + payload_size=len(payload_bytes), + ) + + response = await client.post( + room.webhook_url, + content=payload_bytes, + headers=headers, + ) + + response.raise_for_status() + + log.info( + "Webhook sent successfully", + status_code=response.status_code, + response_size=len(response.content), + ) + + except httpx.HTTPStatusError as e: + log.error( + "Webhook failed with HTTP error", + status_code=e.response.status_code, + response_text=e.response.text[:500], # First 500 chars + ) + + # Don't retry on client errors (4xx) + if 400 <= e.response.status_code < 500: + log.error("Client error, not retrying") + return + + # Retry on server errors (5xx) + raise self.retry(exc=e) + + except (httpx.ConnectError, httpx.TimeoutException) as e: + # Retry on network errors + log.error("Webhook failed with connection error", error=str(e)) + raise self.retry(exc=e) + + except Exception as e: + # Retry on unexpected errors + log.exception("Unexpected error in webhook task", error=str(e)) + raise self.retry(exc=e) + + +async def test_webhook(room_id: str) -> dict: + """ + Test webhook configuration by sending a sample payload. + Returns immediately with success/failure status. + This is the shared implementation used by both the API endpoint and Celery task. + """ + try: + room = await rooms_controller.get_by_id(room_id) + if not room: + return {"success": False, "error": "Room not found"} + + if not room.webhook_url: + return {"success": False, "error": "No webhook URL configured"} + + now = (datetime.now(timezone.utc).isoformat(),) + payload_data = { + "event": "test", + "event_id": uuid.uuid4().hex, + "timestamp": now, + "message": "This is a test webhook from Reflector", + "room": { + "id": room.id, + "name": room.name, + }, + } + + payload_json = json.dumps(payload_data, separators=(",", ":")) + payload_bytes = payload_json.encode("utf-8") + + # Generate headers with signature + headers = { + "Content-Type": "application/json", + "User-Agent": "Reflector-Webhook/1.0", + "X-Webhook-Event": "test", + } + + if room.webhook_secret: + timestamp = str(int(datetime.now(timezone.utc).timestamp())) + signature = generate_webhook_signature( + payload_bytes, room.webhook_secret, timestamp + ) + headers["X-Webhook-Signature"] = f"t={timestamp},v1={signature}" + + # Send test webhook with short timeout + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post( + room.webhook_url, + content=payload_bytes, + headers=headers, + ) + + return { + "success": response.is_success, + "status_code": response.status_code, + "message": f"Webhook test {'successful' if response.is_success else 'failed'}", + "response_preview": response.text if response.text else None, + } + + except httpx.TimeoutException: + return { + "success": False, + "error": "Webhook request timed out (10 seconds)", + } + except httpx.ConnectError as e: + return { + "success": False, + "error": f"Could not connect to webhook URL: {str(e)}", + } + except Exception as e: + return { + "success": False, + "error": f"Unexpected error: {str(e)}", + } diff --git a/www/app/(app)/rooms/_components/RoomCards.tsx b/www/app/(app)/rooms/_components/RoomCards.tsx index 15079a7a..16748d90 100644 --- a/www/app/(app)/rooms/_components/RoomCards.tsx +++ b/www/app/(app)/rooms/_components/RoomCards.tsx @@ -12,11 +12,11 @@ import { HStack, } from "@chakra-ui/react"; import { LuLink } from "react-icons/lu"; -import { Room } from "../../../api"; +import { RoomDetails } from "../../../api"; import { RoomActionsMenu } from "./RoomActionsMenu"; interface RoomCardsProps { - rooms: Room[]; + rooms: RoomDetails[]; linkCopied: string; onCopyUrl: (roomName: string) => void; onEdit: (roomId: string, roomData: any) => void; diff --git a/www/app/(app)/rooms/_components/RoomList.tsx b/www/app/(app)/rooms/_components/RoomList.tsx index 17cd5fc5..73fe8a5c 100644 --- a/www/app/(app)/rooms/_components/RoomList.tsx +++ b/www/app/(app)/rooms/_components/RoomList.tsx @@ -1,11 +1,11 @@ import { Box, Heading, Text, VStack } from "@chakra-ui/react"; -import { Room } from "../../../api"; +import { RoomDetails } from "../../../api"; import { RoomTable } from "./RoomTable"; import { RoomCards } from "./RoomCards"; interface RoomListProps { title: string; - rooms: Room[]; + rooms: RoomDetails[]; linkCopied: string; onCopyUrl: (roomName: string) => void; onEdit: (roomId: string, roomData: any) => void; diff --git a/www/app/(app)/rooms/_components/RoomTable.tsx b/www/app/(app)/rooms/_components/RoomTable.tsx index 092fccdc..93d05b61 100644 --- a/www/app/(app)/rooms/_components/RoomTable.tsx +++ b/www/app/(app)/rooms/_components/RoomTable.tsx @@ -9,11 +9,11 @@ import { Spinner, } from "@chakra-ui/react"; import { LuLink } from "react-icons/lu"; -import { Room } from "../../../api"; +import { RoomDetails } from "../../../api"; import { RoomActionsMenu } from "./RoomActionsMenu"; interface RoomTableProps { - rooms: Room[]; + rooms: RoomDetails[]; linkCopied: string; onCopyUrl: (roomName: string) => void; onEdit: (roomId: string, roomData: any) => void; diff --git a/www/app/(app)/rooms/page.tsx b/www/app/(app)/rooms/page.tsx index 305087f9..33cfa6b3 100644 --- a/www/app/(app)/rooms/page.tsx +++ b/www/app/(app)/rooms/page.tsx @@ -11,13 +11,15 @@ import { Input, Select, Spinner, + IconButton, createListCollection, useDisclosure, } from "@chakra-ui/react"; import { useEffect, useState } from "react"; +import { LuEye, LuEyeOff } from "react-icons/lu"; import useApi from "../../lib/useApi"; import useRoomList from "./useRoomList"; -import { ApiError, Room } from "../../api"; +import { ApiError, RoomDetails } from "../../api"; import { RoomList } from "./_components/RoomList"; import { PaginationPage } from "../browse/_components/Pagination"; @@ -55,6 +57,8 @@ const roomInitialState = { recordingType: "cloud", recordingTrigger: "automatic-2nd-participant", isShared: false, + webhookUrl: "", + webhookSecret: "", }; export default function RoomsList() { @@ -83,6 +87,11 @@ export default function RoomsList() { const [topics, setTopics] = useState([]); const [nameError, setNameError] = useState(""); const [linkCopied, setLinkCopied] = useState(""); + const [testingWebhook, setTestingWebhook] = useState(false); + const [webhookTestResult, setWebhookTestResult] = useState( + null, + ); + const [showWebhookSecret, setShowWebhookSecret] = useState(false); interface Stream { stream_id: number; name: string; @@ -155,6 +164,69 @@ export default function RoomsList() { }, 2000); }; + const handleCloseDialog = () => { + setShowWebhookSecret(false); + setWebhookTestResult(null); + onClose(); + }; + + const handleTestWebhook = async () => { + if (!room.webhookUrl || !editRoomId) { + setWebhookTestResult("Please enter a webhook URL first"); + return; + } + + setTestingWebhook(true); + setWebhookTestResult(null); + + try { + const response = await api?.v1RoomsTestWebhook({ + roomId: editRoomId, + }); + + if (response?.success) { + setWebhookTestResult( + `✅ Webhook test successful! Status: ${response.status_code}`, + ); + } else { + let errorMsg = `❌ Webhook test failed`; + if (response?.status_code) { + errorMsg += ` (Status: ${response.status_code})`; + } + if (response?.error) { + errorMsg += `: ${response.error}`; + } else if (response?.response_preview) { + // Try to parse and extract meaningful error from response + // Specific to N8N at the moment, as there is no specification for that + // We could just display as is, but decided here to dig a little bit more. + try { + const preview = JSON.parse(response.response_preview); + if (preview.message) { + errorMsg += `: ${preview.message}`; + } + } catch { + // If not JSON, just show the preview text (truncated) + const previewText = response.response_preview.substring(0, 150); + errorMsg += `: ${previewText}`; + } + } else if (response?.message) { + errorMsg += `: ${response.message}`; + } + setWebhookTestResult(errorMsg); + } + } catch (error) { + console.error("Error testing webhook:", error); + setWebhookTestResult("❌ Failed to test webhook. Please check your URL."); + } finally { + setTestingWebhook(false); + } + + // Clear result after 5 seconds + setTimeout(() => { + setWebhookTestResult(null); + }, 5000); + }; + const handleSaveRoom = async () => { try { if (RESERVED_PATHS.includes(room.name)) { @@ -172,6 +244,8 @@ export default function RoomsList() { recording_type: room.recordingType, recording_trigger: room.recordingTrigger, is_shared: room.isShared, + webhook_url: room.webhookUrl, + webhook_secret: room.webhookSecret, }; if (isEditing) { @@ -190,7 +264,7 @@ export default function RoomsList() { setEditRoomId(""); setNameError(""); refetch(); - onClose(); + handleCloseDialog(); } catch (err) { if ( err instanceof ApiError && @@ -206,18 +280,46 @@ export default function RoomsList() { } }; - const handleEditRoom = (roomId, roomData) => { - setRoom({ - name: roomData.name, - zulipAutoPost: roomData.zulip_auto_post, - zulipStream: roomData.zulip_stream, - zulipTopic: roomData.zulip_topic, - isLocked: roomData.is_locked, - roomMode: roomData.room_mode, - recordingType: roomData.recording_type, - recordingTrigger: roomData.recording_trigger, - isShared: roomData.is_shared, - }); + const handleEditRoom = async (roomId, roomData) => { + // Reset states + setShowWebhookSecret(false); + setWebhookTestResult(null); + + // Fetch full room details to get webhook fields + try { + const detailedRoom = await api?.v1RoomsGet({ roomId }); + if (detailedRoom) { + setRoom({ + name: detailedRoom.name, + zulipAutoPost: detailedRoom.zulip_auto_post, + zulipStream: detailedRoom.zulip_stream, + zulipTopic: detailedRoom.zulip_topic, + isLocked: detailedRoom.is_locked, + roomMode: detailedRoom.room_mode, + recordingType: detailedRoom.recording_type, + recordingTrigger: detailedRoom.recording_trigger, + isShared: detailedRoom.is_shared, + webhookUrl: detailedRoom.webhook_url || "", + webhookSecret: detailedRoom.webhook_secret || "", + }); + } + } catch (error) { + console.error("Failed to fetch room details, using list data:", error); + // Fallback to using the data from the list + setRoom({ + name: roomData.name, + zulipAutoPost: roomData.zulip_auto_post, + zulipStream: roomData.zulip_stream, + zulipTopic: roomData.zulip_topic, + isLocked: roomData.is_locked, + roomMode: roomData.room_mode, + recordingType: roomData.recording_type, + recordingTrigger: roomData.recording_trigger, + isShared: roomData.is_shared, + webhookUrl: roomData.webhook_url || "", + webhookSecret: roomData.webhook_secret || "", + }); + } setEditRoomId(roomId); setIsEditing(true); setNameError(""); @@ -250,9 +352,9 @@ export default function RoomsList() { }); }; - const myRooms: Room[] = + const myRooms: RoomDetails[] = response?.items.filter((roomData) => !roomData.is_shared) || []; - const sharedRooms: Room[] = + const sharedRooms: RoomDetails[] = response?.items.filter((roomData) => roomData.is_shared) || []; if (loading && !response) @@ -287,6 +389,8 @@ export default function RoomsList() { setIsEditing(false); setRoom(roomInitialState); setNameError(""); + setShowWebhookSecret(false); + setWebhookTestResult(null); onOpen(); }} > @@ -296,7 +400,7 @@ export default function RoomsList() { (e.open ? onOpen() : onClose())} + onOpenChange={(e) => (e.open ? onOpen() : handleCloseDialog())} size="lg" > @@ -533,6 +637,109 @@ export default function RoomsList() { + + {/* Webhook Configuration Section */} + + Webhook URL + + + Optional: URL to receive notifications when transcripts are + ready + + + + {room.webhookUrl && ( + <> + + Webhook Secret + + + {isEditing && room.webhookSecret && ( + + setShowWebhookSecret(!showWebhookSecret) + } + > + {showWebhookSecret ? : } + + )} + + + Used for HMAC signature verification (auto-generated if + left empty) + + + + {isEditing && ( + <> + + + {webhookTestResult && ( +
+ {webhookTestResult} +
+ )} +
+ + )} + + )} + - - + )} {!isEditMode && ( diff --git a/www/app/(app)/transcripts/[transcriptId]/page.tsx b/www/app/(app)/transcripts/[transcriptId]/page.tsx index 0a2dba47..ce48e951 100644 --- a/www/app/(app)/transcripts/[transcriptId]/page.tsx +++ b/www/app/(app)/transcripts/[transcriptId]/page.tsx @@ -86,7 +86,7 @@ export default function TranscriptDetails(details: TranscriptDetails) { useActiveTopic={useActiveTopic} waveform={waveform.waveform} media={mp3.media} - mediaDuration={transcript.response.duration} + mediaDuration={transcript.response?.duration || null} /> ) : !mp3.loading && (waveform.error || mp3.error) ? ( @@ -116,7 +116,7 @@ export default function TranscriptDetails(details: TranscriptDetails) { { transcript.reload(); diff --git a/www/app/(app)/transcripts/[transcriptId]/upload/page.tsx b/www/app/(app)/transcripts/[transcriptId]/upload/page.tsx index 3a13052e..567272ff 100644 --- a/www/app/(app)/transcripts/[transcriptId]/upload/page.tsx +++ b/www/app/(app)/transcripts/[transcriptId]/upload/page.tsx @@ -24,10 +24,16 @@ const TranscriptUpload = (details: TranscriptUpload) => { const router = useRouter(); - const [status, setStatus] = useState( + const [status_, setStatus] = useState( webSockets.status.value || transcript.response?.status || "idle", ); + // status is obviously done if we have transcript + const status = + !transcript.loading && transcript.response?.status === "ended" + ? transcript.response?.status + : status_; + useEffect(() => { if (!transcriptStarted && webSockets.transcriptTextLive.length !== 0) setTranscriptStarted(true); @@ -35,8 +41,11 @@ const TranscriptUpload = (details: TranscriptUpload) => { useEffect(() => { //TODO HANDLE ERROR STATUS BETTER + // TODO deprecate webSockets.status.value / depend on transcript.response?.status from query lib const newStatus = - webSockets.status.value || transcript.response?.status || "idle"; + transcript.response?.status === "ended" + ? "ended" + : webSockets.status.value || transcript.response?.status || "idle"; setStatus(newStatus); if (newStatus && (newStatus == "ended" || newStatus == "error")) { console.log(newStatus, "redirecting"); diff --git a/www/app/(app)/transcripts/createTranscript.ts b/www/app/(app)/transcripts/createTranscript.ts index 015c82de..8a235161 100644 --- a/www/app/(app)/transcripts/createTranscript.ts +++ b/www/app/(app)/transcripts/createTranscript.ts @@ -1,45 +1,33 @@ -import { useEffect, useState } from "react"; +import type { components } from "../../reflector-api"; +import { useTranscriptCreate } from "../../lib/apiHooks"; -import { useError } from "../../(errors)/errorContext"; -import { CreateTranscript, GetTranscript } from "../../api"; -import useApi from "../../lib/useApi"; +type CreateTranscript = components["schemas"]["CreateTranscript"]; +type GetTranscript = components["schemas"]["GetTranscript"]; type UseCreateTranscript = { transcript: GetTranscript | null; loading: boolean; error: Error | null; - create: (transcriptCreationDetails: CreateTranscript) => void; + create: (transcriptCreationDetails: CreateTranscript) => Promise; }; const useCreateTranscript = (): UseCreateTranscript => { - const [transcript, setTranscript] = useState(null); - const [loading, setLoading] = useState(false); - const [error, setErrorState] = useState(null); - const { setError } = useError(); - const api = useApi(); + const createMutation = useTranscriptCreate(); - const create = (transcriptCreationDetails: CreateTranscript) => { - if (loading || !api) return; + const create = async (transcriptCreationDetails: CreateTranscript) => { + if (createMutation.isPending) return; - setLoading(true); - - api - .v1TranscriptsCreate({ requestBody: transcriptCreationDetails }) - .then((transcript) => { - setTranscript(transcript); - setLoading(false); - }) - .catch((err) => { - setError( - err, - "There was an issue creating a transcript, please try again.", - ); - setErrorState(err); - setLoading(false); - }); + await createMutation.mutateAsync({ + body: transcriptCreationDetails, + }); }; - return { transcript, loading, error, create }; + return { + transcript: createMutation.data || null, + loading: createMutation.isPending, + error: createMutation.error as Error | null, + create, + }; }; export default useCreateTranscript; diff --git a/www/app/(app)/transcripts/fileUploadButton.tsx b/www/app/(app)/transcripts/fileUploadButton.tsx index 1b4101e8..1f5d72eb 100644 --- a/www/app/(app)/transcripts/fileUploadButton.tsx +++ b/www/app/(app)/transcripts/fileUploadButton.tsx @@ -1,6 +1,7 @@ import React, { useState } from "react"; -import useApi from "../../lib/useApi"; +import { useTranscriptUploadAudio } from "../../lib/apiHooks"; import { Button, Spinner } from "@chakra-ui/react"; +import { useError } from "../../(errors)/errorContext"; type FileUploadButton = { transcriptId: string; @@ -8,13 +9,16 @@ type FileUploadButton = { export default function FileUploadButton(props: FileUploadButton) { const fileInputRef = React.useRef(null); - const api = useApi(); + const uploadMutation = useTranscriptUploadAudio(); + const { setError } = useError(); const [progress, setProgress] = useState(0); const triggerFileUpload = () => { fileInputRef.current?.click(); }; - const handleFileUpload = (event: React.ChangeEvent) => { + const handleFileUpload = async ( + event: React.ChangeEvent, + ) => { const file = event.target.files?.[0]; if (file) { @@ -24,37 +28,45 @@ export default function FileUploadButton(props: FileUploadButton) { let start = 0; let uploadedSize = 0; - api?.httpRequest.config.interceptors.request.use((request) => { - request.onUploadProgress = (progressEvent) => { - const currentProgress = Math.floor( - ((uploadedSize + progressEvent.loaded) / file.size) * 100, - ); - setProgress(currentProgress); - }; - return request; - }); - const uploadNextChunk = async () => { - if (chunkNumber == totalChunks) return; + if (chunkNumber == totalChunks) { + setProgress(0); + return; + } const chunkSize = Math.min(maxChunkSize, file.size - start); const end = start + chunkSize; const chunk = file.slice(start, end); - await api?.v1TranscriptRecordUpload({ - transcriptId: props.transcriptId, - formData: { - chunk, - }, - chunkNumber, - totalChunks, - }); + try { + const formData = new FormData(); + formData.append("chunk", chunk); - uploadedSize += chunkSize; - chunkNumber++; - start = end; + await uploadMutation.mutateAsync({ + params: { + path: { + transcript_id: props.transcriptId, + }, + query: { + chunk_number: chunkNumber, + total_chunks: totalChunks, + }, + }, + body: formData as any, + }); - uploadNextChunk(); + uploadedSize += chunkSize; + const currentProgress = Math.floor((uploadedSize / file.size) * 100); + setProgress(currentProgress); + + chunkNumber++; + start = end; + + await uploadNextChunk(); + } catch (error) { + setError(error as Error, "Failed to upload file"); + setProgress(0); + } }; uploadNextChunk(); diff --git a/www/app/(app)/transcripts/new/page.tsx b/www/app/(app)/transcripts/new/page.tsx index 2670fd39..0410bd97 100644 --- a/www/app/(app)/transcripts/new/page.tsx +++ b/www/app/(app)/transcripts/new/page.tsx @@ -7,36 +7,29 @@ import About from "../../../(aboutAndPrivacy)/about"; import Privacy from "../../../(aboutAndPrivacy)/privacy"; import { useRouter } from "next/navigation"; import useCreateTranscript from "../createTranscript"; -import { SourceKind } from "../../../api"; import SelectSearch from "react-select-search"; import { supportedLanguages } from "../../../supportedLanguages"; -import useSessionStatus from "../../../lib/useSessionStatus"; import { featureEnabled } from "../../../domainContext"; -import { signIn } from "next-auth/react"; import { Flex, Box, Spinner, Heading, Button, - Card, Center, - Link, - CardBody, - Stack, Text, - Icon, - Grid, - IconButton, Spacer, - Menu, - Tooltip, - Input, } from "@chakra-ui/react"; +import { useAuth } from "../../../lib/AuthProvider"; +import type { components } from "../../../reflector-api"; + const TranscriptCreate = () => { const isClient = typeof window !== "undefined"; const router = useRouter(); - const { isLoading, isAuthenticated } = useSessionStatus(); + const auth = useAuth(); + const isAuthenticated = auth.status === "authenticated"; + const isAuthRefreshing = auth.status === "refreshing"; + const isLoading = auth.status === "loading"; const requireLogin = featureEnabled("requireLogin"); const [name, setName] = useState(""); @@ -55,27 +48,31 @@ const TranscriptCreate = () => { const [loadingUpload, setLoadingUpload] = useState(false); const getTargetLanguage = () => { - if (targetLanguage === "NOTRANSLATION") return; + if (targetLanguage === "NOTRANSLATION") return undefined; return targetLanguage; }; const send = () => { if (loadingRecord || createTranscript.loading || permissionDenied) return; setLoadingRecord(true); + const targetLang = getTargetLanguage(); createTranscript.create({ name, - target_language: getTargetLanguage(), - source_kind: "live" as SourceKind, + source_language: "en", + target_language: targetLang || "en", + source_kind: "live", }); }; const uploadFile = () => { if (loadingUpload || createTranscript.loading || permissionDenied) return; setLoadingUpload(true); + const targetLang = getTargetLanguage(); createTranscript.create({ name, - target_language: getTargetLanguage(), - source_kind: "file" as SourceKind, + source_language: "en", + target_language: targetLang || "en", + source_kind: "file", }); }; @@ -141,8 +138,8 @@ const TranscriptCreate = () => {
{isLoading ? ( - ) : requireLogin && !isAuthenticated ? ( - + ) : requireLogin && !isAuthenticated && !isAuthRefreshing ? ( + ) : ( { - if (!api) - throw new Error("ShareLink's API should always be ready at this point"); - const selectedOption = shareOptionsData.find( (option) => option.value === selectedValue, ); @@ -67,19 +66,27 @@ export default function ShareAndPrivacy(props: ShareAndPrivacyProps) { share_mode: selectedValue as "public" | "semi-private" | "private", }; - const updatedTranscript = await api.v1TranscriptUpdate({ - transcriptId: props.transcriptResponse.id, - requestBody, - }); - setShareMode( - shareOptionsData.find( - (option) => option.value === updatedTranscript.share_mode, - ) || shareOptionsData[0], - ); - setShareLoading(false); + try { + const updatedTranscript = await updateTranscriptMutation.mutateAsync({ + params: { + path: { transcript_id: props.transcriptResponse.id }, + }, + body: requestBody, + }); + setShareMode( + shareOptionsData.find( + (option) => option.value === updatedTranscript.share_mode, + ) || shareOptionsData[0], + ); + } catch (err) { + console.error("Failed to update share mode:", err); + } finally { + setShareLoading(false); + } }; - const userId = useSessionUser().id; + const auth = useAuth(); + const userId = auth.status === "authenticated" ? auth.user?.id : null; useEffect(() => { setIsOwner(!!(requireLogin && userId === props.transcriptResponse.user_id)); @@ -124,7 +131,7 @@ export default function ShareAndPrivacy(props: ShareAndPrivacyProps) { "This transcript is public. Everyone can access it."} - {isOwner && api && ( + {isOwner && ( (undefined); + const [selectedStreamId, setSelectedStreamId] = useState(null); const [topic, setTopic] = useState(undefined); const [includeTopics, setIncludeTopics] = useState(false); - const [isLoading, setIsLoading] = useState(true); - const [streams, setStreams] = useState([]); - const [topics, setTopics] = useState([]); - const api = useApi(); + + const { data: streams = [], isLoading: isLoadingStreams } = useZulipStreams(); + const { data: topics = [] } = useZulipTopics(selectedStreamId); + const postToZulipMutation = useTranscriptPostToZulip(); + const { contains } = useFilter({ sensitivity: "base" }); - const { - collection: streamItemsCollection, - filter: streamItemsFilter, - set: streamItemsSet, - } = useListCollection({ - initialItems: [] as { label: string; value: string }[], - filter: contains, - }); + const streamItems = useMemo(() => { + return streams.map((stream: Stream) => ({ + label: stream.name, + value: stream.name, + })); + }, [streams]); - const { - collection: topicItemsCollection, - filter: topicItemsFilter, - set: topicItemsSet, - } = useListCollection({ - initialItems: [] as { label: string; value: string }[], - filter: contains, - }); + const topicItems = useMemo(() => { + return topics.map(({ name }) => ({ + label: name, + value: name, + })); + }, [topics]); + const { collection: streamItemsCollection, filter: streamItemsFilter } = + useListCollection({ + initialItems: streamItems, + filter: contains, + }); + + const { collection: topicItemsCollection, filter: topicItemsFilter } = + useListCollection({ + initialItems: topicItems, + filter: contains, + }); + + // Update selected stream ID when stream changes useEffect(() => { - const fetchZulipStreams = async () => { - if (!api) return; - - try { - const response = await api.v1ZulipGetStreams(); - setStreams(response); - - streamItemsSet( - response.map((stream) => ({ - label: stream.name, - value: stream.name, - })), - ); - - setIsLoading(false); - } catch (error) { - console.error("Error fetching Zulip streams:", error); - } - }; - - fetchZulipStreams(); - }, [!api]); - - useEffect(() => { - const fetchZulipTopics = async () => { - if (!api || !stream) return; - try { - const selectedStream = streams.find((s) => s.name === stream); - if (selectedStream) { - const response = await api.v1ZulipGetTopics({ - streamId: selectedStream.stream_id, - }); - setTopics(response); - topicItemsSet( - response.map((topic) => ({ - label: topic.name, - value: topic.name, - })), - ); - } else { - topicItemsSet([]); - } - } catch (error) { - console.error("Error fetching Zulip topics:", error); - } - }; - - fetchZulipTopics(); - }, [stream, streams, api]); + if (stream && streams) { + const selectedStream = streams.find((s: Stream) => s.name === stream); + setSelectedStreamId(selectedStream ? selectedStream.stream_id : null); + } else { + setSelectedStreamId(null); + } + }, [stream, streams]); const handleSendToZulip = async () => { - if (!api || !props.transcriptResponse) return; + if (!props.transcriptResponse) return; if (stream && topic) { try { - await api.v1TranscriptPostToZulip({ - transcriptId: props.transcriptResponse.id, - stream, - topic, - includeTopics, + await postToZulipMutation.mutateAsync({ + params: { + path: { + transcript_id: props.transcriptResponse.id, + }, + query: { + stream, + topic, + include_topics: includeTopics, + }, + }, }); setShowModal(false); } catch (error) { - console.log(error); + console.error("Error posting to Zulip:", error); } } }; @@ -155,7 +132,7 @@ export default function ShareZulip(props: ShareZulipProps & BoxProps) { - {isLoading ? ( + {isLoadingStreams ? ( diff --git a/www/app/(app)/transcripts/transcriptTitle.tsx b/www/app/(app)/transcripts/transcriptTitle.tsx index 4678818f..72421f48 100644 --- a/www/app/(app)/transcripts/transcriptTitle.tsx +++ b/www/app/(app)/transcripts/transcriptTitle.tsx @@ -1,6 +1,8 @@ import { useState } from "react"; -import { UpdateTranscript } from "../../api"; -import useApi from "../../lib/useApi"; +import type { components } from "../../reflector-api"; + +type UpdateTranscript = components["schemas"]["UpdateTranscript"]; +import { useTranscriptUpdate } from "../../lib/apiHooks"; import { Heading, IconButton, Input, Flex, Spacer } from "@chakra-ui/react"; import { LuPen } from "react-icons/lu"; @@ -14,24 +16,27 @@ const TranscriptTitle = (props: TranscriptTitle) => { const [displayedTitle, setDisplayedTitle] = useState(props.title); const [preEditTitle, setPreEditTitle] = useState(props.title); const [isEditing, setIsEditing] = useState(false); - const api = useApi(); + const updateTranscriptMutation = useTranscriptUpdate(); const updateTitle = async (newTitle: string, transcriptId: string) => { - if (!api) return; try { const requestBody: UpdateTranscript = { title: newTitle, }; - const updatedTranscript = await api?.v1TranscriptUpdate({ - transcriptId, - requestBody, + await updateTranscriptMutation.mutateAsync({ + params: { + path: { transcript_id: transcriptId }, + }, + body: requestBody, }); if (props.onUpdate) { props.onUpdate(newTitle); } - console.log("Updated transcript:", updatedTranscript); + console.log("Updated transcript title:", newTitle); } catch (err) { console.error("Failed to update transcript:", err); + // Revert title on error + setDisplayedTitle(preEditTitle); } }; diff --git a/www/app/(app)/transcripts/useMp3.ts b/www/app/(app)/transcripts/useMp3.ts index 3e8344ad..223a9a4a 100644 --- a/www/app/(app)/transcripts/useMp3.ts +++ b/www/app/(app)/transcripts/useMp3.ts @@ -1,6 +1,7 @@ import { useContext, useEffect, useState } from "react"; import { DomainContext } from "../../domainContext"; -import getApi from "../../lib/useApi"; +import { useTranscriptGet } from "../../lib/apiHooks"; +import { useAuth } from "../../lib/AuthProvider"; export type Mp3Response = { media: HTMLMediaElement | null; @@ -17,14 +18,17 @@ const useMp3 = (transcriptId: string, waiting?: boolean): Mp3Response => { const [audioLoadingError, setAudioLoadingError] = useState( null, ); - const [transcriptMetadataLoading, setTranscriptMetadataLoading] = - useState(true); - const [transcriptMetadataLoadingError, setTranscriptMetadataLoadingError] = - useState(null); const [audioDeleted, setAudioDeleted] = useState(null); - const api = getApi(); const { api_url } = useContext(DomainContext); - const accessTokenInfo = api?.httpRequest?.config?.TOKEN; + const auth = useAuth(); + const accessTokenInfo = + auth.status === "authenticated" ? auth.accessToken : null; + + const { + data: transcript, + isLoading: transcriptMetadataLoading, + error: transcriptError, + } = useTranscriptGet(later ? null : transcriptId); const [serviceWorker, setServiceWorker] = useState(null); @@ -52,72 +56,50 @@ const useMp3 = (transcriptId: string, waiting?: boolean): Mp3Response => { }, [navigator.serviceWorker, !serviceWorker, accessTokenInfo]); useEffect(() => { - if (!transcriptId || !api || later) return; + if (!transcriptId || later || !transcript) return; let stopped = false; let audioElement: HTMLAudioElement | null = null; let handleCanPlay: (() => void) | null = null; let handleError: (() => void) | null = null; - setTranscriptMetadataLoading(true); setAudioLoading(true); - // First fetch transcript info to check if audio is deleted - api - .v1TranscriptGet({ transcriptId }) - .then((transcript) => { - if (stopped) { - return; - } + const deleted = transcript.audio_deleted || false; + setAudioDeleted(deleted); - const deleted = transcript.audio_deleted || false; - setAudioDeleted(deleted); - setTranscriptMetadataLoadingError(null); + if (deleted) { + // Audio is deleted, don't attempt to load it + setMedia(null); + setAudioLoadingError(null); + setAudioLoading(false); + return; + } - if (deleted) { - // Audio is deleted, don't attempt to load it - setMedia(null); - setAudioLoadingError(null); - setAudioLoading(false); - return; - } + // Audio is not deleted, proceed to load it + audioElement = document.createElement("audio"); + audioElement.src = `${api_url}/v1/transcripts/${transcriptId}/audio/mp3`; + audioElement.crossOrigin = "anonymous"; + audioElement.preload = "auto"; - // Audio is not deleted, proceed to load it - audioElement = document.createElement("audio"); - audioElement.src = `${api_url}/v1/transcripts/${transcriptId}/audio/mp3`; - audioElement.crossOrigin = "anonymous"; - audioElement.preload = "auto"; + handleCanPlay = () => { + if (stopped) return; + setAudioLoading(false); + setAudioLoadingError(null); + }; - handleCanPlay = () => { - if (stopped) return; - setAudioLoading(false); - setAudioLoadingError(null); - }; + handleError = () => { + if (stopped) return; + setAudioLoading(false); + setAudioLoadingError("Failed to load audio"); + }; - handleError = () => { - if (stopped) return; - setAudioLoading(false); - setAudioLoadingError("Failed to load audio"); - }; + audioElement.addEventListener("canplay", handleCanPlay); + audioElement.addEventListener("error", handleError); - audioElement.addEventListener("canplay", handleCanPlay); - audioElement.addEventListener("error", handleError); - - if (!stopped) { - setMedia(audioElement); - } - }) - .catch((error) => { - if (stopped) return; - console.error("Failed to fetch transcript:", error); - setAudioDeleted(null); - setTranscriptMetadataLoadingError(error.message); - setAudioLoading(false); - }) - .finally(() => { - if (stopped) return; - setTranscriptMetadataLoading(false); - }); + if (!stopped) { + setMedia(audioElement); + } return () => { stopped = true; @@ -128,14 +110,18 @@ const useMp3 = (transcriptId: string, waiting?: boolean): Mp3Response => { if (handleError) audioElement.removeEventListener("error", handleError); } }; - }, [transcriptId, api, later, api_url]); + }, [transcriptId, transcript, later, api_url]); const getNow = () => { setLater(false); }; const loading = audioLoading || transcriptMetadataLoading; - const error = audioLoadingError || transcriptMetadataLoadingError; + const error = + audioLoadingError || + (transcriptError + ? (transcriptError as any).message || String(transcriptError) + : null); return { media, loading, error, getNow, audioDeleted }; }; diff --git a/www/app/(app)/transcripts/useParticipants.ts b/www/app/(app)/transcripts/useParticipants.ts index 38f5aa35..a3674597 100644 --- a/www/app/(app)/transcripts/useParticipants.ts +++ b/www/app/(app)/transcripts/useParticipants.ts @@ -1,8 +1,6 @@ -import { useEffect, useState } from "react"; -import { Participant } from "../../api"; -import { useError } from "../../(errors)/errorContext"; -import useApi from "../../lib/useApi"; -import { shouldShowError } from "../../lib/errorUtils"; +import type { components } from "../../reflector-api"; +type Participant = components["schemas"]["Participant"]; +import { useTranscriptParticipants } from "../../lib/apiHooks"; type ErrorParticipants = { error: Error; @@ -29,46 +27,38 @@ export type UseParticipants = ( ) & { refetch: () => void }; const useParticipants = (transcriptId: string): UseParticipants => { - const [response, setResponse] = useState(null); - const [loading, setLoading] = useState(true); - const [error, setErrorState] = useState(null); - const { setError } = useError(); - const api = useApi(); - const [count, setCount] = useState(0); + const { + data: response, + isLoading: loading, + error, + refetch, + } = useTranscriptParticipants(transcriptId || null); - const refetch = () => { - if (!loading) { - setCount(count + 1); - setLoading(true); - setErrorState(null); - } - }; + // Type-safe return based on state + if (error) { + return { + error: error as Error, + loading: false, + response: null, + refetch, + } satisfies ErrorParticipants & { refetch: () => void }; + } - useEffect(() => { - if (!transcriptId || !api) return; + if (loading || !response) { + return { + response: response || null, + loading: true, + error: null, + refetch, + } satisfies LoadingParticipants & { refetch: () => void }; + } - setLoading(true); - api - .v1TranscriptGetParticipants({ transcriptId }) - .then((result) => { - setResponse(result); - setLoading(false); - console.debug("Participants Loaded:", result); - }) - .catch((error) => { - const shouldShowHuman = shouldShowError(error); - if (shouldShowHuman) { - setError(error, "There was an error loading the participants"); - } else { - setError(error); - } - setErrorState(error); - setResponse(null); - setLoading(false); - }); - }, [transcriptId, !api, count]); - - return { response, loading, error, refetch } as UseParticipants; + return { + response, + loading: false, + error: null, + refetch, + } satisfies SuccessParticipants & { refetch: () => void }; }; export default useParticipants; diff --git a/www/app/(app)/transcripts/useSearchTranscripts.ts b/www/app/(app)/transcripts/useSearchTranscripts.ts deleted file mode 100644 index 2e6a7311..00000000 --- a/www/app/(app)/transcripts/useSearchTranscripts.ts +++ /dev/null @@ -1,123 +0,0 @@ -// this hook is not great, we want to substitute it with a proper state management solution that is also not re-invention - -import { useEffect, useRef, useState } from "react"; -import { SearchResult, SourceKind } from "../../api"; -import useApi from "../../lib/useApi"; -import { - PaginationPage, - paginationPageTo0Based, -} from "../browse/_components/Pagination"; - -interface SearchFilters { - roomIds: readonly string[] | null; - sourceKind: SourceKind | null; -} - -const EMPTY_SEARCH_FILTERS: SearchFilters = { - roomIds: null, - sourceKind: null, -}; - -type UseSearchTranscriptsOptions = { - pageSize: number; - page: PaginationPage; -}; - -interface UseSearchTranscriptsReturn { - results: SearchResult[]; - totalCount: number; - isLoading: boolean; - error: unknown; - reload: () => void; -} - -function hashEffectFilters(filters: SearchFilters): string { - return JSON.stringify(filters); -} - -export function useSearchTranscripts( - query: string = "", - filters: SearchFilters = EMPTY_SEARCH_FILTERS, - options: UseSearchTranscriptsOptions = { - pageSize: 20, - page: PaginationPage(1), - }, -): UseSearchTranscriptsReturn { - const { pageSize, page } = options; - - const [reloadCount, setReloadCount] = useState(0); - - const api = useApi(); - const abortControllerRef = useRef(); - - const [data, setData] = useState<{ results: SearchResult[]; total: number }>({ - results: [], - total: 0, - }); - const [error, setError] = useState(); - const [isLoading, setIsLoading] = useState(false); - - const filterHash = hashEffectFilters(filters); - - useEffect(() => { - if (!api) { - setData({ results: [], total: 0 }); - setError(undefined); - setIsLoading(false); - return; - } - - if (abortControllerRef.current) { - abortControllerRef.current.abort(); - } - - const abortController = new AbortController(); - abortControllerRef.current = abortController; - - const performSearch = async () => { - setIsLoading(true); - - try { - const response = await api.v1TranscriptsSearch({ - q: query || "", - limit: pageSize, - offset: paginationPageTo0Based(page) * pageSize, - roomId: filters.roomIds?.[0], - sourceKind: filters.sourceKind || undefined, - }); - - if (abortController.signal.aborted) return; - setData(response); - setError(undefined); - } catch (err: unknown) { - if ((err as Error).name === "AbortError") { - return; - } - if (abortController.signal.aborted) { - console.error("Aborted search but error", err); - return; - } - - setError(err); - } finally { - if (!abortController.signal.aborted) { - setIsLoading(false); - } - } - }; - - performSearch().then(() => {}); - - return () => { - abortController.abort(); - }; - }, [api, query, page, filterHash, pageSize, reloadCount]); - - return { - results: data.results, - totalCount: data.total, - isLoading, - error, - reload: () => setReloadCount(reloadCount + 1), - }; -} diff --git a/www/app/(app)/transcripts/useTopicWithWords.ts b/www/app/(app)/transcripts/useTopicWithWords.ts index 29d0b982..31e184cc 100644 --- a/www/app/(app)/transcripts/useTopicWithWords.ts +++ b/www/app/(app)/transcripts/useTopicWithWords.ts @@ -1,9 +1,8 @@ -import { useEffect, useState } from "react"; +import type { components } from "../../reflector-api"; +import { useTranscriptTopicsWithWordsPerSpeaker } from "../../lib/apiHooks"; -import { GetTranscriptTopicWithWordsPerSpeaker } from "../../api"; -import { useError } from "../../(errors)/errorContext"; -import useApi from "../../lib/useApi"; -import { shouldShowError } from "../../lib/errorUtils"; +type GetTranscriptTopicWithWordsPerSpeaker = + components["schemas"]["GetTranscriptTopicWithWordsPerSpeaker"]; type ErrorTopicWithWords = { error: Error; @@ -33,47 +32,40 @@ const useTopicWithWords = ( topicId: string | undefined, transcriptId: string, ): UseTopicWithWords => { - const [response, setResponse] = - useState(null); - const [loading, setLoading] = useState(false); - const [error, setErrorState] = useState(null); - const { setError } = useError(); - const api = useApi(); + const { + data: response, + isLoading: loading, + error, + refetch, + } = useTranscriptTopicsWithWordsPerSpeaker( + transcriptId || null, + topicId || null, + ); - const [count, setCount] = useState(0); + if (error) { + return { + error: error as Error, + loading: false, + response: null, + refetch, + } satisfies ErrorTopicWithWords & { refetch: () => void }; + } - const refetch = () => { - if (!loading) { - setCount(count + 1); - setLoading(true); - setErrorState(null); - } - }; + if (loading || !response) { + return { + response: response || null, + loading: true, + error: false, + refetch, + } satisfies LoadingTopicWithWords & { refetch: () => void }; + } - useEffect(() => { - if (!transcriptId || !topicId || !api) return; - - setLoading(true); - - api - .v1TranscriptGetTopicsWithWordsPerSpeaker({ transcriptId, topicId }) - .then((result) => { - setResponse(result); - setLoading(false); - console.debug("Topics with words Loaded:", result); - }) - .catch((error) => { - const shouldShowHuman = shouldShowError(error); - if (shouldShowHuman) { - setError(error, "There was an error loading the topics with words"); - } else { - setError(error); - } - setErrorState(error); - }); - }, [transcriptId, !api, topicId, count]); - - return { response, loading, error, refetch } as UseTopicWithWords; + return { + response, + loading: false, + error: null, + refetch, + } satisfies SuccessTopicWithWords & { refetch: () => void }; }; export default useTopicWithWords; diff --git a/www/app/(app)/transcripts/useTopics.ts b/www/app/(app)/transcripts/useTopics.ts index ff17beaf..7f337582 100644 --- a/www/app/(app)/transcripts/useTopics.ts +++ b/www/app/(app)/transcripts/useTopics.ts @@ -1,10 +1,7 @@ -import { useEffect, useState } from "react"; +import { useTranscriptTopics } from "../../lib/apiHooks"; +import type { components } from "../../reflector-api"; -import { useError } from "../../(errors)/errorContext"; -import { Topic } from "./webSocketTypes"; -import useApi from "../../lib/useApi"; -import { shouldShowError } from "../../lib/errorUtils"; -import { GetTranscriptTopic } from "../../api"; +type GetTranscriptTopic = components["schemas"]["GetTranscriptTopic"]; type TranscriptTopics = { topics: GetTranscriptTopic[] | null; @@ -13,34 +10,13 @@ type TranscriptTopics = { }; const useTopics = (id: string): TranscriptTopics => { - const [topics, setTopics] = useState(null); - const [loading, setLoading] = useState(true); - const [error, setErrorState] = useState(null); - const { setError } = useError(); - const api = useApi(); - useEffect(() => { - if (!id || !api) return; + const { data: topics, isLoading: loading, error } = useTranscriptTopics(id); - setLoading(true); - api - .v1TranscriptGetTopics({ transcriptId: id }) - .then((result) => { - setTopics(result); - setLoading(false); - console.debug("Transcript topics loaded:", result); - }) - .catch((err) => { - setErrorState(err); - const shouldShowHuman = shouldShowError(err); - if (shouldShowHuman) { - setError(err, "There was an error loading the topics"); - } else { - setError(err); - } - }); - }, [id, !api]); - - return { topics, loading, error }; + return { + topics: topics || null, + loading, + error: error as Error | null, + }; }; export default useTopics; diff --git a/www/app/(app)/transcripts/useTranscript.ts b/www/app/(app)/transcripts/useTranscript.ts index 49d257f0..3e56fb9e 100644 --- a/www/app/(app)/transcripts/useTranscript.ts +++ b/www/app/(app)/transcripts/useTranscript.ts @@ -1,8 +1,7 @@ -import { useEffect, useState } from "react"; -import { GetTranscript } from "../../api"; -import { useError } from "../../(errors)/errorContext"; -import { shouldShowError } from "../../lib/errorUtils"; -import useApi from "../../lib/useApi"; +import type { components } from "../../reflector-api"; +import { useTranscriptGet } from "../../lib/apiHooks"; + +type GetTranscript = components["schemas"]["GetTranscript"]; type ErrorTranscript = { error: Error; @@ -28,43 +27,43 @@ type SuccessTranscript = { const useTranscript = ( id: string | null, ): ErrorTranscript | LoadingTranscript | SuccessTranscript => { - const [response, setResponse] = useState(null); - const [loading, setLoading] = useState(true); - const [error, setErrorState] = useState(null); - const [reload, setReload] = useState(0); - const { setError } = useError(); - const api = useApi(); - const reloadHandler = () => setReload((prev) => prev + 1); + const { data, isLoading, error, refetch } = useTranscriptGet(id); - useEffect(() => { - if (!id || !api) return; + // Map to the expected return format + if (isLoading) { + return { + response: null, + loading: true, + error: false, + reload: refetch, + }; + } - if (!response) { - setLoading(true); - } + if (error) { + return { + error: error as Error, + loading: false, + response: null, + reload: refetch, + }; + } - api - .v1TranscriptGet({ transcriptId: id }) - .then((result) => { - setResponse(result); - setLoading(false); - console.debug("Transcript Loaded:", result); - }) - .catch((error) => { - const shouldShowHuman = shouldShowError(error); - if (shouldShowHuman) { - setError(error, "There was an error loading the transcript"); - } else { - setError(error); - } - setErrorState(error); - }); - }, [id, !api, reload]); + // Check if data is undefined or null + if (!data) { + return { + response: null, + loading: true, + error: false, + reload: refetch, + }; + } - return { response, loading, error, reload: reloadHandler } as - | ErrorTranscript - | LoadingTranscript - | SuccessTranscript; + return { + response: data, + loading: false, + error: null, + reload: refetch, + }; }; export default useTranscript; diff --git a/www/app/(app)/transcripts/useWaveform.ts b/www/app/(app)/transcripts/useWaveform.ts index 19b2a265..8bb8c4c9 100644 --- a/www/app/(app)/transcripts/useWaveform.ts +++ b/www/app/(app)/transcripts/useWaveform.ts @@ -1,8 +1,7 @@ -import { useEffect, useState } from "react"; -import { AudioWaveform } from "../../api"; -import { useError } from "../../(errors)/errorContext"; -import useApi from "../../lib/useApi"; -import { shouldShowError } from "../../lib/errorUtils"; +import type { components } from "../../reflector-api"; +import { useTranscriptWaveform } from "../../lib/apiHooks"; + +type AudioWaveform = components["schemas"]["AudioWaveform"]; type AudioWaveFormResponse = { waveform: AudioWaveform | null; @@ -11,35 +10,17 @@ type AudioWaveFormResponse = { }; const useWaveform = (id: string, skip: boolean): AudioWaveFormResponse => { - const [waveform, setWaveform] = useState(null); - const [loading, setLoading] = useState(false); - const [error, setErrorState] = useState(null); - const { setError } = useError(); - const api = useApi(); + const { + data: waveform, + isLoading: loading, + error, + } = useTranscriptWaveform(skip ? null : id); - useEffect(() => { - if (!id || !api || skip) { - setLoading(false); - setErrorState(null); - setWaveform(null); - return; - } - setLoading(true); - setErrorState(null); - api - .v1TranscriptGetAudioWaveform({ transcriptId: id }) - .then((result) => { - setWaveform(result); - setLoading(false); - console.debug("Transcript waveform loaded:", result); - }) - .catch((err) => { - setErrorState(err); - setLoading(false); - }); - }, [id, api, skip]); - - return { waveform, loading, error }; + return { + waveform: waveform || null, + loading, + error: error as Error | null, + }; }; export default useWaveform; diff --git a/www/app/(app)/transcripts/useWebRTC.ts b/www/app/(app)/transcripts/useWebRTC.ts index c8370aa4..89a2a946 100644 --- a/www/app/(app)/transcripts/useWebRTC.ts +++ b/www/app/(app)/transcripts/useWebRTC.ts @@ -1,8 +1,9 @@ import { useEffect, useState } from "react"; import Peer from "simple-peer"; import { useError } from "../../(errors)/errorContext"; -import useApi from "../../lib/useApi"; -import { RtcOffer } from "../../api"; +import { useTranscriptWebRTC } from "../../lib/apiHooks"; +import type { components } from "../../reflector-api"; +type RtcOffer = components["schemas"]["RtcOffer"]; const useWebRTC = ( stream: MediaStream | null, @@ -10,10 +11,10 @@ const useWebRTC = ( ): Peer => { const [peer, setPeer] = useState(null); const { setError } = useError(); - const api = useApi(); + const { mutateAsync: mutateWebRtcTranscriptAsync } = useTranscriptWebRTC(); useEffect(() => { - if (!stream || !transcriptId || !api) { + if (!stream || !transcriptId) { return; } @@ -24,7 +25,7 @@ const useWebRTC = ( try { p = new Peer({ initiator: true, stream: stream }); } catch (error) { - setError(error, "Error creating WebRTC"); + setError(error as Error, "Error creating WebRTC"); return; } @@ -32,26 +33,31 @@ const useWebRTC = ( setError(new Error(`WebRTC error: ${err}`)); }); - p.on("signal", (data: any) => { - if (!api) return; + p.on("signal", async (data: any) => { if ("sdp" in data) { const rtcOffer: RtcOffer = { sdp: data.sdp, type: data.type, }; - api - .v1TranscriptRecordWebrtc({ transcriptId, requestBody: rtcOffer }) - .then((answer) => { - try { - p.signal(answer); - } catch (error) { - setError(error); - } - }) - .catch((error) => { - setError(error, "Error loading WebRTCOffer"); + try { + const answer = await mutateWebRtcTranscriptAsync({ + params: { + path: { + transcript_id: transcriptId, + }, + }, + body: rtcOffer, }); + + try { + p.signal(answer); + } catch (error) { + setError(error as Error); + } + } catch (error) { + setError(error as Error, "Error loading WebRTCOffer"); + } } }); @@ -63,7 +69,7 @@ const useWebRTC = ( return () => { p.destroy(); }; - }, [stream, transcriptId, !api]); + }, [stream, transcriptId, mutateWebRtcTranscriptAsync]); return peer; }; diff --git a/www/app/(app)/transcripts/useWebSockets.ts b/www/app/(app)/transcripts/useWebSockets.ts index 6fa5edc7..2b3205c4 100644 --- a/www/app/(app)/transcripts/useWebSockets.ts +++ b/www/app/(app)/transcripts/useWebSockets.ts @@ -2,8 +2,12 @@ import { useContext, useEffect, useState } from "react"; import { Topic, FinalSummary, Status } from "./webSocketTypes"; import { useError } from "../../(errors)/errorContext"; import { DomainContext } from "../../domainContext"; -import { AudioWaveform, GetTranscriptSegmentTopic } from "../../api"; -import useApi from "../../lib/useApi"; +import type { components } from "../../reflector-api"; +type AudioWaveform = components["schemas"]["AudioWaveform"]; +type GetTranscriptSegmentTopic = + components["schemas"]["GetTranscriptSegmentTopic"]; +import { useQueryClient } from "@tanstack/react-query"; +import { $api } from "../../lib/apiClient"; export type UseWebSockets = { transcriptTextLive: string; @@ -33,8 +37,8 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { const [status, setStatus] = useState({ value: "" }); const { setError } = useError(); - const { websocket_url } = useContext(DomainContext); - const api = useApi(); + const { websocket_url: websocketUrl } = useContext(DomainContext); + const queryClient = useQueryClient(); const [accumulatedText, setAccumulatedText] = useState(""); @@ -105,6 +109,13 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { title: "Topic 1: Introduction to Quantum Mechanics", transcript: "A brief overview of quantum mechanics and its principles.", + segments: [ + { + speaker: 1, + start: 0, + text: "This is the transcription of an example title", + }, + ], }, { id: "2", @@ -315,11 +326,9 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { } }; - if (!transcriptId || !api) return; + if (!transcriptId) return; - api?.v1TranscriptGetWebsocketEvents({ transcriptId }).then((result) => {}); - - const url = `${websocket_url}/v1/transcripts/${transcriptId}/events`; + const url = `${websocketUrl}/v1/transcripts/${transcriptId}/events`; let ws = new WebSocket(url); ws.onopen = () => { @@ -361,6 +370,16 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { return [...prevTopics, topic]; }); console.debug("TOPIC event:", message.data); + // Invalidate topics query to sync with WebSocket data + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}/topics", + { + params: { path: { transcript_id: transcriptId } }, + }, + ).queryKey, + }); break; case "FINAL_SHORT_SUMMARY": @@ -370,6 +389,16 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { case "FINAL_LONG_SUMMARY": if (message.data) { setFinalSummary(message.data); + // Invalidate transcript query to sync summary + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}", + { + params: { path: { transcript_id: transcriptId } }, + }, + ).queryKey, + }); } break; @@ -377,6 +406,16 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { console.debug("FINAL_TITLE event:", message.data); if (message.data) { setTitle(message.data.title); + // Invalidate transcript query to sync title + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}", + { + params: { path: { transcript_id: transcriptId } }, + }, + ).queryKey, + }); } break; @@ -434,6 +473,11 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { break; case 1001: // Navigate away break; + case 1006: // Closed by client Chrome + console.warn( + "WebSocket closed by client, likely duplicated connection in react dev mode", + ); + break; default: setError( new Error(`WebSocket closed unexpectedly with code: ${event.code}`), @@ -450,7 +494,7 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { return () => { ws.close(); }; - }, [transcriptId, !api]); + }, [transcriptId, websocketUrl]); return { transcriptTextLive, diff --git a/www/app/(app)/transcripts/webSocketTypes.ts b/www/app/(app)/transcripts/webSocketTypes.ts index edd35eb6..4ec98946 100644 --- a/www/app/(app)/transcripts/webSocketTypes.ts +++ b/www/app/(app)/transcripts/webSocketTypes.ts @@ -1,4 +1,6 @@ -import { GetTranscriptTopic } from "../../api"; +import type { components } from "../../reflector-api"; + +type GetTranscriptTopic = components["schemas"]["GetTranscriptTopic"]; export type Topic = GetTranscriptTopic; diff --git a/www/app/(auth)/userInfo.tsx b/www/app/(auth)/userInfo.tsx index ffb286b3..bf6a5b62 100644 --- a/www/app/(auth)/userInfo.tsx +++ b/www/app/(auth)/userInfo.tsx @@ -1,18 +1,21 @@ "use client"; -import { signOut, signIn } from "next-auth/react"; -import useSessionStatus from "../lib/useSessionStatus"; + import { Spinner, Link } from "@chakra-ui/react"; +import { useAuth } from "../lib/AuthProvider"; export default function UserInfo() { - const { isLoading, isAuthenticated } = useSessionStatus(); - + const auth = useAuth(); + const status = auth.status; + const isLoading = status === "loading"; + const isAuthenticated = status === "authenticated"; + const isRefreshing = status === "refreshing"; return isLoading ? ( - ) : !isAuthenticated ? ( + ) : !isAuthenticated && !isRefreshing ? ( signIn("authentik")} + onClick={() => auth.signIn("authentik")} > Log in @@ -20,7 +23,7 @@ export default function UserInfo() { signOut({ callbackUrl: "/" })} + onClick={() => auth.signOut({ callbackUrl: "/" })} > Log out diff --git a/www/app/[roomName]/page.tsx b/www/app/[roomName]/page.tsx index b03a7e4f..0130588b 100644 --- a/www/app/[roomName]/page.tsx +++ b/www/app/[roomName]/page.tsx @@ -21,11 +21,13 @@ import { toaster } from "../components/ui/toaster"; import useRoomMeeting from "./useRoomMeeting"; import { useRouter } from "next/navigation"; import { notFound } from "next/navigation"; -import useSessionStatus from "../lib/useSessionStatus"; import { useRecordingConsent } from "../recordingConsentContext"; -import useApi from "../lib/useApi"; -import { Meeting } from "../api"; +import { useMeetingAudioConsent } from "../lib/apiHooks"; +import type { components } from "../reflector-api"; + +type Meeting = components["schemas"]["Meeting"]; import { FaBars } from "react-icons/fa6"; +import { useAuth } from "../lib/AuthProvider"; export type RoomDetails = { params: { @@ -76,31 +78,30 @@ const useConsentDialog = ( wherebyRef: RefObject /*accessibility*/, ) => { const { state: consentState, touch, hasConsent } = useRecordingConsent(); - const [consentLoading, setConsentLoading] = useState(false); // toast would open duplicates, even with using "id=" prop const [modalOpen, setModalOpen] = useState(false); - const api = useApi(); + const audioConsentMutation = useMeetingAudioConsent(); const handleConsent = useCallback( async (meetingId: string, given: boolean) => { - if (!api) return; - - setConsentLoading(true); - try { - await api.v1MeetingAudioConsent({ - meetingId, - requestBody: { consent_given: given }, + await audioConsentMutation.mutateAsync({ + params: { + path: { + meeting_id: meetingId, + }, + }, + body: { + consent_given: given, + }, }); touch(meetingId); } catch (error) { console.error("Error submitting consent:", error); - } finally { - setConsentLoading(false); } }, - [api, touch], + [audioConsentMutation, touch], ); const showConsentModal = useCallback(() => { @@ -194,7 +195,12 @@ const useConsentDialog = ( return cleanup; }, [meetingId, handleConsent, wherebyRef, modalOpen]); - return { showConsentModal, consentState, hasConsent, consentLoading }; + return { + showConsentModal, + consentState, + hasConsent, + consentLoading: audioConsentMutation.isPending, + }; }; function ConsentDialogButton({ @@ -254,7 +260,9 @@ export default function Room(details: RoomDetails) { const roomName = details.params.roomName; const meeting = useRoomMeeting(roomName); const router = useRouter(); - const { isLoading, isAuthenticated } = useSessionStatus(); + const status = useAuth().status; + const isAuthenticated = status === "authenticated"; + const isLoading = status === "loading" || meeting.loading; const roomUrl = meeting?.response?.host_room_url ? meeting?.response?.host_room_url diff --git a/www/app/[roomName]/useRoomMeeting.tsx b/www/app/[roomName]/useRoomMeeting.tsx index 98c2f1f2..93491a05 100644 --- a/www/app/[roomName]/useRoomMeeting.tsx +++ b/www/app/[roomName]/useRoomMeeting.tsx @@ -1,8 +1,10 @@ import { useEffect, useState } from "react"; import { useError } from "../(errors)/errorContext"; -import { Meeting } from "../api"; +import type { components } from "../reflector-api"; import { shouldShowError } from "../lib/errorUtils"; -import useApi from "../lib/useApi"; + +type Meeting = components["schemas"]["Meeting"]; +import { useRoomsCreateMeeting } from "../lib/apiHooks"; import { notFound } from "next/navigation"; type ErrorMeeting = { @@ -30,27 +32,25 @@ const useRoomMeeting = ( roomName: string | null | undefined, ): ErrorMeeting | LoadingMeeting | SuccessMeeting => { const [response, setResponse] = useState(null); - const [loading, setLoading] = useState(true); - const [error, setErrorState] = useState(null); const [reload, setReload] = useState(0); const { setError } = useError(); - const api = useApi(); + const createMeetingMutation = useRoomsCreateMeeting(); const reloadHandler = () => setReload((prev) => prev + 1); useEffect(() => { - if (!roomName || !api) return; + if (!roomName) return; - if (!response) { - setLoading(true); - } - - api - .v1RoomsCreateMeeting({ roomName }) - .then((result) => { + const createMeeting = async () => { + try { + const result = await createMeetingMutation.mutateAsync({ + params: { + path: { + room_name: roomName, + }, + }, + }); setResponse(result); - setLoading(false); - }) - .catch((error) => { + } catch (error: any) { const shouldShowHuman = shouldShowError(error); if (shouldShowHuman && error.status !== 404) { setError( @@ -60,9 +60,14 @@ const useRoomMeeting = ( } else { setError(error); } - setErrorState(error); - }); - }, [roomName, !api, reload]); + } + }; + + createMeeting(); + }, [roomName, reload]); + + const loading = createMeetingMutation.isPending && !response; + const error = createMeetingMutation.error as Error | null; return { response, loading, error, reload: reloadHandler } as | ErrorMeeting diff --git a/www/app/api/OpenApi.ts b/www/app/api/OpenApi.ts deleted file mode 100644 index 23cc35f3..00000000 --- a/www/app/api/OpenApi.ts +++ /dev/null @@ -1,37 +0,0 @@ -import type { BaseHttpRequest } from "./core/BaseHttpRequest"; -import type { OpenAPIConfig } from "./core/OpenAPI"; -import { Interceptors } from "./core/OpenAPI"; -import { AxiosHttpRequest } from "./core/AxiosHttpRequest"; - -import { DefaultService } from "./services.gen"; - -type HttpRequestConstructor = new (config: OpenAPIConfig) => BaseHttpRequest; - -export class OpenApi { - public readonly default: DefaultService; - - public readonly request: BaseHttpRequest; - - constructor( - config?: Partial, - HttpRequest: HttpRequestConstructor = AxiosHttpRequest, - ) { - this.request = new HttpRequest({ - BASE: config?.BASE ?? "", - VERSION: config?.VERSION ?? "0.1.0", - WITH_CREDENTIALS: config?.WITH_CREDENTIALS ?? false, - CREDENTIALS: config?.CREDENTIALS ?? "include", - TOKEN: config?.TOKEN, - USERNAME: config?.USERNAME, - PASSWORD: config?.PASSWORD, - HEADERS: config?.HEADERS, - ENCODE_PATH: config?.ENCODE_PATH, - interceptors: { - request: config?.interceptors?.request ?? new Interceptors(), - response: config?.interceptors?.response ?? new Interceptors(), - }, - }); - - this.default = new DefaultService(this.request); - } -} diff --git a/www/app/api/auth/[...nextauth]/route.ts b/www/app/api/auth/[...nextauth]/route.ts index 915ed04d..7b73c22a 100644 --- a/www/app/api/auth/[...nextauth]/route.ts +++ b/www/app/api/auth/[...nextauth]/route.ts @@ -1,8 +1,5 @@ -// NextAuth route handler for Authentik -// Refresh rotation has been taken from https://next-auth.js.org/v3/tutorials/refresh-token-rotation even if we are using 4.x - import NextAuth from "next-auth"; -import { authOptions } from "../../../lib/auth"; +import { authOptions } from "../../../lib/authBackend"; const handler = NextAuth(authOptions); diff --git a/www/app/api/core/ApiError.ts b/www/app/api/core/ApiError.ts deleted file mode 100644 index 1d07bb31..00000000 --- a/www/app/api/core/ApiError.ts +++ /dev/null @@ -1,25 +0,0 @@ -import type { ApiRequestOptions } from "./ApiRequestOptions"; -import type { ApiResult } from "./ApiResult"; - -export class ApiError extends Error { - public readonly url: string; - public readonly status: number; - public readonly statusText: string; - public readonly body: unknown; - public readonly request: ApiRequestOptions; - - constructor( - request: ApiRequestOptions, - response: ApiResult, - message: string, - ) { - super(message); - - this.name = "ApiError"; - this.url = response.url; - this.status = response.status; - this.statusText = response.statusText; - this.body = response.body; - this.request = request; - } -} diff --git a/www/app/api/core/ApiRequestOptions.ts b/www/app/api/core/ApiRequestOptions.ts deleted file mode 100644 index 57fbb095..00000000 --- a/www/app/api/core/ApiRequestOptions.ts +++ /dev/null @@ -1,21 +0,0 @@ -export type ApiRequestOptions = { - readonly method: - | "GET" - | "PUT" - | "POST" - | "DELETE" - | "OPTIONS" - | "HEAD" - | "PATCH"; - readonly url: string; - readonly path?: Record; - readonly cookies?: Record; - readonly headers?: Record; - readonly query?: Record; - readonly formData?: Record; - readonly body?: any; - readonly mediaType?: string; - readonly responseHeader?: string; - readonly responseTransformer?: (data: unknown) => Promise; - readonly errors?: Record; -}; diff --git a/www/app/api/core/ApiResult.ts b/www/app/api/core/ApiResult.ts deleted file mode 100644 index 05040ba8..00000000 --- a/www/app/api/core/ApiResult.ts +++ /dev/null @@ -1,7 +0,0 @@ -export type ApiResult = { - readonly body: TData; - readonly ok: boolean; - readonly status: number; - readonly statusText: string; - readonly url: string; -}; diff --git a/www/app/api/core/AxiosHttpRequest.ts b/www/app/api/core/AxiosHttpRequest.ts deleted file mode 100644 index aba5096e..00000000 --- a/www/app/api/core/AxiosHttpRequest.ts +++ /dev/null @@ -1,23 +0,0 @@ -import type { ApiRequestOptions } from "./ApiRequestOptions"; -import { BaseHttpRequest } from "./BaseHttpRequest"; -import type { CancelablePromise } from "./CancelablePromise"; -import type { OpenAPIConfig } from "./OpenAPI"; -import { request as __request } from "./request"; - -export class AxiosHttpRequest extends BaseHttpRequest { - constructor(config: OpenAPIConfig) { - super(config); - } - - /** - * Request method - * @param options The request options from the service - * @returns CancelablePromise - * @throws ApiError - */ - public override request( - options: ApiRequestOptions, - ): CancelablePromise { - return __request(this.config, options); - } -} diff --git a/www/app/api/core/BaseHttpRequest.ts b/www/app/api/core/BaseHttpRequest.ts deleted file mode 100644 index 3f89861c..00000000 --- a/www/app/api/core/BaseHttpRequest.ts +++ /dev/null @@ -1,11 +0,0 @@ -import type { ApiRequestOptions } from "./ApiRequestOptions"; -import type { CancelablePromise } from "./CancelablePromise"; -import type { OpenAPIConfig } from "./OpenAPI"; - -export abstract class BaseHttpRequest { - constructor(public readonly config: OpenAPIConfig) {} - - public abstract request( - options: ApiRequestOptions, - ): CancelablePromise; -} diff --git a/www/app/api/core/CancelablePromise.ts b/www/app/api/core/CancelablePromise.ts deleted file mode 100644 index 0640e989..00000000 --- a/www/app/api/core/CancelablePromise.ts +++ /dev/null @@ -1,126 +0,0 @@ -export class CancelError extends Error { - constructor(message: string) { - super(message); - this.name = "CancelError"; - } - - public get isCancelled(): boolean { - return true; - } -} - -export interface OnCancel { - readonly isResolved: boolean; - readonly isRejected: boolean; - readonly isCancelled: boolean; - - (cancelHandler: () => void): void; -} - -export class CancelablePromise implements Promise { - private _isResolved: boolean; - private _isRejected: boolean; - private _isCancelled: boolean; - readonly cancelHandlers: (() => void)[]; - readonly promise: Promise; - private _resolve?: (value: T | PromiseLike) => void; - private _reject?: (reason?: unknown) => void; - - constructor( - executor: ( - resolve: (value: T | PromiseLike) => void, - reject: (reason?: unknown) => void, - onCancel: OnCancel, - ) => void, - ) { - this._isResolved = false; - this._isRejected = false; - this._isCancelled = false; - this.cancelHandlers = []; - this.promise = new Promise((resolve, reject) => { - this._resolve = resolve; - this._reject = reject; - - const onResolve = (value: T | PromiseLike): void => { - if (this._isResolved || this._isRejected || this._isCancelled) { - return; - } - this._isResolved = true; - if (this._resolve) this._resolve(value); - }; - - const onReject = (reason?: unknown): void => { - if (this._isResolved || this._isRejected || this._isCancelled) { - return; - } - this._isRejected = true; - if (this._reject) this._reject(reason); - }; - - const onCancel = (cancelHandler: () => void): void => { - if (this._isResolved || this._isRejected || this._isCancelled) { - return; - } - this.cancelHandlers.push(cancelHandler); - }; - - Object.defineProperty(onCancel, "isResolved", { - get: (): boolean => this._isResolved, - }); - - Object.defineProperty(onCancel, "isRejected", { - get: (): boolean => this._isRejected, - }); - - Object.defineProperty(onCancel, "isCancelled", { - get: (): boolean => this._isCancelled, - }); - - return executor(onResolve, onReject, onCancel as OnCancel); - }); - } - - get [Symbol.toStringTag]() { - return "Cancellable Promise"; - } - - public then( - onFulfilled?: ((value: T) => TResult1 | PromiseLike) | null, - onRejected?: ((reason: unknown) => TResult2 | PromiseLike) | null, - ): Promise { - return this.promise.then(onFulfilled, onRejected); - } - - public catch( - onRejected?: ((reason: unknown) => TResult | PromiseLike) | null, - ): Promise { - return this.promise.catch(onRejected); - } - - public finally(onFinally?: (() => void) | null): Promise { - return this.promise.finally(onFinally); - } - - public cancel(): void { - if (this._isResolved || this._isRejected || this._isCancelled) { - return; - } - this._isCancelled = true; - if (this.cancelHandlers.length) { - try { - for (const cancelHandler of this.cancelHandlers) { - cancelHandler(); - } - } catch (error) { - console.warn("Cancellation threw an error", error); - return; - } - } - this.cancelHandlers.length = 0; - if (this._reject) this._reject(new CancelError("Request aborted")); - } - - public get isCancelled(): boolean { - return this._isCancelled; - } -} diff --git a/www/app/api/core/OpenAPI.ts b/www/app/api/core/OpenAPI.ts deleted file mode 100644 index 20ea0ed9..00000000 --- a/www/app/api/core/OpenAPI.ts +++ /dev/null @@ -1,57 +0,0 @@ -import type { AxiosRequestConfig, AxiosResponse } from "axios"; -import type { ApiRequestOptions } from "./ApiRequestOptions"; - -type Headers = Record; -type Middleware = (value: T) => T | Promise; -type Resolver = (options: ApiRequestOptions) => Promise; - -export class Interceptors { - _fns: Middleware[]; - - constructor() { - this._fns = []; - } - - eject(fn: Middleware): void { - const index = this._fns.indexOf(fn); - if (index !== -1) { - this._fns = [...this._fns.slice(0, index), ...this._fns.slice(index + 1)]; - } - } - - use(fn: Middleware): void { - this._fns = [...this._fns, fn]; - } -} - -export type OpenAPIConfig = { - BASE: string; - CREDENTIALS: "include" | "omit" | "same-origin"; - ENCODE_PATH?: ((path: string) => string) | undefined; - HEADERS?: Headers | Resolver | undefined; - PASSWORD?: string | Resolver | undefined; - TOKEN?: string | Resolver | undefined; - USERNAME?: string | Resolver | undefined; - VERSION: string; - WITH_CREDENTIALS: boolean; - interceptors: { - request: Interceptors; - response: Interceptors; - }; -}; - -export const OpenAPI: OpenAPIConfig = { - BASE: "", - CREDENTIALS: "include", - ENCODE_PATH: undefined, - HEADERS: undefined, - PASSWORD: undefined, - TOKEN: undefined, - USERNAME: undefined, - VERSION: "0.1.0", - WITH_CREDENTIALS: false, - interceptors: { - request: new Interceptors(), - response: new Interceptors(), - }, -}; diff --git a/www/app/api/core/request.ts b/www/app/api/core/request.ts deleted file mode 100644 index b576207e..00000000 --- a/www/app/api/core/request.ts +++ /dev/null @@ -1,387 +0,0 @@ -import axios from "axios"; -import type { - AxiosError, - AxiosRequestConfig, - AxiosResponse, - AxiosInstance, -} from "axios"; - -import { ApiError } from "./ApiError"; -import type { ApiRequestOptions } from "./ApiRequestOptions"; -import type { ApiResult } from "./ApiResult"; -import { CancelablePromise } from "./CancelablePromise"; -import type { OnCancel } from "./CancelablePromise"; -import type { OpenAPIConfig } from "./OpenAPI"; - -export const isString = (value: unknown): value is string => { - return typeof value === "string"; -}; - -export const isStringWithValue = (value: unknown): value is string => { - return isString(value) && value !== ""; -}; - -export const isBlob = (value: any): value is Blob => { - return value instanceof Blob; -}; - -export const isFormData = (value: unknown): value is FormData => { - return value instanceof FormData; -}; - -export const isSuccess = (status: number): boolean => { - return status >= 200 && status < 300; -}; - -export const base64 = (str: string): string => { - try { - return btoa(str); - } catch (err) { - // @ts-ignore - return Buffer.from(str).toString("base64"); - } -}; - -export const getQueryString = (params: Record): string => { - const qs: string[] = []; - - const append = (key: string, value: unknown) => { - qs.push(`${encodeURIComponent(key)}=${encodeURIComponent(String(value))}`); - }; - - const encodePair = (key: string, value: unknown) => { - if (value === undefined || value === null) { - return; - } - - if (value instanceof Date) { - append(key, value.toISOString()); - } else if (Array.isArray(value)) { - value.forEach((v) => encodePair(key, v)); - } else if (typeof value === "object") { - Object.entries(value).forEach(([k, v]) => encodePair(`${key}[${k}]`, v)); - } else { - append(key, value); - } - }; - - Object.entries(params).forEach(([key, value]) => encodePair(key, value)); - - return qs.length ? `?${qs.join("&")}` : ""; -}; - -const getUrl = (config: OpenAPIConfig, options: ApiRequestOptions): string => { - const encoder = config.ENCODE_PATH || encodeURI; - - const path = options.url - .replace("{api-version}", config.VERSION) - .replace(/{(.*?)}/g, (substring: string, group: string) => { - if (options.path?.hasOwnProperty(group)) { - return encoder(String(options.path[group])); - } - return substring; - }); - - const url = config.BASE + path; - return options.query ? url + getQueryString(options.query) : url; -}; - -export const getFormData = ( - options: ApiRequestOptions, -): FormData | undefined => { - if (options.formData) { - const formData = new FormData(); - - const process = (key: string, value: unknown) => { - if (isString(value) || isBlob(value)) { - formData.append(key, value); - } else { - formData.append(key, JSON.stringify(value)); - } - }; - - Object.entries(options.formData) - .filter(([, value]) => value !== undefined && value !== null) - .forEach(([key, value]) => { - if (Array.isArray(value)) { - value.forEach((v) => process(key, v)); - } else { - process(key, value); - } - }); - - return formData; - } - return undefined; -}; - -type Resolver = (options: ApiRequestOptions) => Promise; - -export const resolve = async ( - options: ApiRequestOptions, - resolver?: T | Resolver, -): Promise => { - if (typeof resolver === "function") { - return (resolver as Resolver)(options); - } - return resolver; -}; - -export const getHeaders = async ( - config: OpenAPIConfig, - options: ApiRequestOptions, -): Promise> => { - const [token, username, password, additionalHeaders] = await Promise.all([ - // @ts-ignore - resolve(options, config.TOKEN), - // @ts-ignore - resolve(options, config.USERNAME), - // @ts-ignore - resolve(options, config.PASSWORD), - // @ts-ignore - resolve(options, config.HEADERS), - ]); - - const headers = Object.entries({ - Accept: "application/json", - ...additionalHeaders, - ...options.headers, - }) - .filter(([, value]) => value !== undefined && value !== null) - .reduce( - (headers, [key, value]) => ({ - ...headers, - [key]: String(value), - }), - {} as Record, - ); - - if (isStringWithValue(token)) { - headers["Authorization"] = `Bearer ${token}`; - } - - if (isStringWithValue(username) && isStringWithValue(password)) { - const credentials = base64(`${username}:${password}`); - headers["Authorization"] = `Basic ${credentials}`; - } - - if (options.body !== undefined) { - if (options.mediaType) { - headers["Content-Type"] = options.mediaType; - } else if (isBlob(options.body)) { - headers["Content-Type"] = options.body.type || "application/octet-stream"; - } else if (isString(options.body)) { - headers["Content-Type"] = "text/plain"; - } else if (!isFormData(options.body)) { - headers["Content-Type"] = "application/json"; - } - } else if (options.formData !== undefined) { - if (options.mediaType) { - headers["Content-Type"] = options.mediaType; - } - } - - return headers; -}; - -export const getRequestBody = (options: ApiRequestOptions): unknown => { - if (options.body) { - return options.body; - } - return undefined; -}; - -export const sendRequest = async ( - config: OpenAPIConfig, - options: ApiRequestOptions, - url: string, - body: unknown, - formData: FormData | undefined, - headers: Record, - onCancel: OnCancel, - axiosClient: AxiosInstance, -): Promise> => { - const controller = new AbortController(); - - let requestConfig: AxiosRequestConfig = { - data: body ?? formData, - headers, - method: options.method, - signal: controller.signal, - url, - withCredentials: config.WITH_CREDENTIALS, - }; - - onCancel(() => controller.abort()); - - for (const fn of config.interceptors.request._fns) { - requestConfig = await fn(requestConfig); - } - - try { - return await axiosClient.request(requestConfig); - } catch (error) { - const axiosError = error as AxiosError; - if (axiosError.response) { - return axiosError.response; - } - throw error; - } -}; - -export const getResponseHeader = ( - response: AxiosResponse, - responseHeader?: string, -): string | undefined => { - if (responseHeader) { - const content = response.headers[responseHeader]; - if (isString(content)) { - return content; - } - } - return undefined; -}; - -export const getResponseBody = (response: AxiosResponse): unknown => { - if (response.status !== 204) { - return response.data; - } - return undefined; -}; - -export const catchErrorCodes = ( - options: ApiRequestOptions, - result: ApiResult, -): void => { - const errors: Record = { - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", - 403: "Forbidden", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Payload Too Large", - 414: "URI Too Long", - 415: "Unsupported Media Type", - 416: "Range Not Satisfiable", - 417: "Expectation Failed", - 418: "Im a teapot", - 421: "Misdirected Request", - 422: "Unprocessable Content", - 423: "Locked", - 424: "Failed Dependency", - 425: "Too Early", - 426: "Upgrade Required", - 428: "Precondition Required", - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 451: "Unavailable For Legal Reasons", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", - 507: "Insufficient Storage", - 508: "Loop Detected", - 510: "Not Extended", - 511: "Network Authentication Required", - ...options.errors, - }; - - const error = errors[result.status]; - if (error) { - throw new ApiError(options, result, error); - } - - if (!result.ok) { - const errorStatus = result.status ?? "unknown"; - const errorStatusText = result.statusText ?? "unknown"; - const errorBody = (() => { - try { - return JSON.stringify(result.body, null, 2); - } catch (e) { - return undefined; - } - })(); - - throw new ApiError( - options, - result, - `Generic Error: status: ${errorStatus}; status text: ${errorStatusText}; body: ${errorBody}`, - ); - } -}; - -/** - * Request method - * @param config The OpenAPI configuration object - * @param options The request options from the service - * @param axiosClient The axios client instance to use - * @returns CancelablePromise - * @throws ApiError - */ -export const request = ( - config: OpenAPIConfig, - options: ApiRequestOptions, - axiosClient: AxiosInstance = axios, -): CancelablePromise => { - return new CancelablePromise(async (resolve, reject, onCancel) => { - try { - const url = getUrl(config, options); - const formData = getFormData(options); - const body = getRequestBody(options); - const headers = await getHeaders(config, options); - - if (!onCancel.isCancelled) { - let response = await sendRequest( - config, - options, - url, - body, - formData, - headers, - onCancel, - axiosClient, - ); - - for (const fn of config.interceptors.response._fns) { - response = await fn(response); - } - - const responseBody = getResponseBody(response); - const responseHeader = getResponseHeader( - response, - options.responseHeader, - ); - - let transformedBody = responseBody; - if (options.responseTransformer && isSuccess(response.status)) { - transformedBody = await options.responseTransformer(responseBody); - } - - const result: ApiResult = { - url, - ok: isSuccess(response.status), - status: response.status, - statusText: response.statusText, - body: responseHeader ?? transformedBody, - }; - - catchErrorCodes(options, result); - - resolve(result.body); - } - } catch (error) { - reject(error); - } - }); -}; diff --git a/www/app/api/index.ts b/www/app/api/index.ts deleted file mode 100644 index 27fbb57d..00000000 --- a/www/app/api/index.ts +++ /dev/null @@ -1,9 +0,0 @@ -// This file is auto-generated by @hey-api/openapi-ts -export { OpenApi } from "./OpenApi"; -export { ApiError } from "./core/ApiError"; -export { BaseHttpRequest } from "./core/BaseHttpRequest"; -export { CancelablePromise, CancelError } from "./core/CancelablePromise"; -export { OpenAPI, type OpenAPIConfig } from "./core/OpenAPI"; -export * from "./schemas.gen"; -export * from "./services.gen"; -export * from "./types.gen"; diff --git a/www/app/api/schemas.gen.ts b/www/app/api/schemas.gen.ts index 03091a5f..e69de29b 100644 --- a/www/app/api/schemas.gen.ts +++ b/www/app/api/schemas.gen.ts @@ -1,1776 +0,0 @@ -// This file is auto-generated by @hey-api/openapi-ts - -export const $AudioWaveform = { - properties: { - data: { - items: { - type: "number", - }, - type: "array", - title: "Data", - }, - }, - type: "object", - required: ["data"], - title: "AudioWaveform", -} as const; - -export const $Body_transcript_record_upload_v1_transcripts__transcript_id__record_upload_post = - { - properties: { - chunk: { - type: "string", - format: "binary", - title: "Chunk", - }, - }, - type: "object", - required: ["chunk"], - title: - "Body_transcript_record_upload_v1_transcripts__transcript_id__record_upload_post", - } as const; - -export const $CreateParticipant = { - properties: { - speaker: { - anyOf: [ - { - type: "integer", - }, - { - type: "null", - }, - ], - title: "Speaker", - }, - name: { - type: "string", - title: "Name", - }, - }, - type: "object", - required: ["name"], - title: "CreateParticipant", -} as const; - -export const $CreateRoom = { - properties: { - name: { - type: "string", - title: "Name", - }, - zulip_auto_post: { - type: "boolean", - title: "Zulip Auto Post", - }, - zulip_stream: { - type: "string", - title: "Zulip Stream", - }, - zulip_topic: { - type: "string", - title: "Zulip Topic", - }, - is_locked: { - type: "boolean", - title: "Is Locked", - }, - room_mode: { - type: "string", - title: "Room Mode", - }, - recording_type: { - type: "string", - title: "Recording Type", - }, - recording_trigger: { - type: "string", - title: "Recording Trigger", - }, - is_shared: { - type: "boolean", - title: "Is Shared", - }, - webhook_url: { - type: "string", - title: "Webhook Url", - }, - webhook_secret: { - type: "string", - title: "Webhook Secret", - }, - }, - type: "object", - required: [ - "name", - "zulip_auto_post", - "zulip_stream", - "zulip_topic", - "is_locked", - "room_mode", - "recording_type", - "recording_trigger", - "is_shared", - "webhook_url", - "webhook_secret", - ], - title: "CreateRoom", -} as const; - -export const $CreateTranscript = { - properties: { - name: { - type: "string", - title: "Name", - }, - source_language: { - type: "string", - title: "Source Language", - default: "en", - }, - target_language: { - type: "string", - title: "Target Language", - default: "en", - }, - source_kind: { - anyOf: [ - { - $ref: "#/components/schemas/SourceKind", - }, - { - type: "null", - }, - ], - }, - }, - type: "object", - required: ["name"], - title: "CreateTranscript", -} as const; - -export const $DeletionStatus = { - properties: { - status: { - type: "string", - title: "Status", - }, - }, - type: "object", - required: ["status"], - title: "DeletionStatus", -} as const; - -export const $GetTranscript = { - properties: { - id: { - type: "string", - title: "Id", - }, - user_id: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "User Id", - }, - name: { - type: "string", - title: "Name", - }, - status: { - type: "string", - title: "Status", - }, - locked: { - type: "boolean", - title: "Locked", - }, - duration: { - type: "number", - title: "Duration", - }, - title: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Title", - }, - short_summary: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Short Summary", - }, - long_summary: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Long Summary", - }, - created_at: { - type: "string", - title: "Created At", - }, - share_mode: { - type: "string", - title: "Share Mode", - default: "private", - }, - source_language: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Source Language", - }, - target_language: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Target Language", - }, - reviewed: { - type: "boolean", - title: "Reviewed", - }, - meeting_id: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Meeting Id", - }, - source_kind: { - $ref: "#/components/schemas/SourceKind", - }, - room_id: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Room Id", - }, - room_name: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Room Name", - }, - audio_deleted: { - anyOf: [ - { - type: "boolean", - }, - { - type: "null", - }, - ], - title: "Audio Deleted", - }, - participants: { - anyOf: [ - { - items: { - $ref: "#/components/schemas/TranscriptParticipant", - }, - type: "array", - }, - { - type: "null", - }, - ], - title: "Participants", - }, - }, - type: "object", - required: [ - "id", - "user_id", - "name", - "status", - "locked", - "duration", - "title", - "short_summary", - "long_summary", - "created_at", - "source_language", - "target_language", - "reviewed", - "meeting_id", - "source_kind", - "participants", - ], - title: "GetTranscript", -} as const; - -export const $GetTranscriptMinimal = { - properties: { - id: { - type: "string", - title: "Id", - }, - user_id: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "User Id", - }, - name: { - type: "string", - title: "Name", - }, - status: { - type: "string", - title: "Status", - }, - locked: { - type: "boolean", - title: "Locked", - }, - duration: { - type: "number", - title: "Duration", - }, - title: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Title", - }, - short_summary: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Short Summary", - }, - long_summary: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Long Summary", - }, - created_at: { - type: "string", - title: "Created At", - }, - share_mode: { - type: "string", - title: "Share Mode", - default: "private", - }, - source_language: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Source Language", - }, - target_language: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Target Language", - }, - reviewed: { - type: "boolean", - title: "Reviewed", - }, - meeting_id: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Meeting Id", - }, - source_kind: { - $ref: "#/components/schemas/SourceKind", - }, - room_id: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Room Id", - }, - room_name: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Room Name", - }, - audio_deleted: { - anyOf: [ - { - type: "boolean", - }, - { - type: "null", - }, - ], - title: "Audio Deleted", - }, - }, - type: "object", - required: [ - "id", - "user_id", - "name", - "status", - "locked", - "duration", - "title", - "short_summary", - "long_summary", - "created_at", - "source_language", - "target_language", - "reviewed", - "meeting_id", - "source_kind", - ], - title: "GetTranscriptMinimal", -} as const; - -export const $GetTranscriptSegmentTopic = { - properties: { - text: { - type: "string", - title: "Text", - }, - start: { - type: "number", - title: "Start", - }, - speaker: { - type: "integer", - title: "Speaker", - }, - }, - type: "object", - required: ["text", "start", "speaker"], - title: "GetTranscriptSegmentTopic", -} as const; - -export const $GetTranscriptTopic = { - properties: { - id: { - type: "string", - title: "Id", - }, - title: { - type: "string", - title: "Title", - }, - summary: { - type: "string", - title: "Summary", - }, - timestamp: { - type: "number", - title: "Timestamp", - }, - duration: { - anyOf: [ - { - type: "number", - }, - { - type: "null", - }, - ], - title: "Duration", - }, - transcript: { - type: "string", - title: "Transcript", - }, - segments: { - items: { - $ref: "#/components/schemas/GetTranscriptSegmentTopic", - }, - type: "array", - title: "Segments", - default: [], - }, - }, - type: "object", - required: ["id", "title", "summary", "timestamp", "duration", "transcript"], - title: "GetTranscriptTopic", -} as const; - -export const $GetTranscriptTopicWithWords = { - properties: { - id: { - type: "string", - title: "Id", - }, - title: { - type: "string", - title: "Title", - }, - summary: { - type: "string", - title: "Summary", - }, - timestamp: { - type: "number", - title: "Timestamp", - }, - duration: { - anyOf: [ - { - type: "number", - }, - { - type: "null", - }, - ], - title: "Duration", - }, - transcript: { - type: "string", - title: "Transcript", - }, - segments: { - items: { - $ref: "#/components/schemas/GetTranscriptSegmentTopic", - }, - type: "array", - title: "Segments", - default: [], - }, - words: { - items: { - $ref: "#/components/schemas/Word", - }, - type: "array", - title: "Words", - default: [], - }, - }, - type: "object", - required: ["id", "title", "summary", "timestamp", "duration", "transcript"], - title: "GetTranscriptTopicWithWords", -} as const; - -export const $GetTranscriptTopicWithWordsPerSpeaker = { - properties: { - id: { - type: "string", - title: "Id", - }, - title: { - type: "string", - title: "Title", - }, - summary: { - type: "string", - title: "Summary", - }, - timestamp: { - type: "number", - title: "Timestamp", - }, - duration: { - anyOf: [ - { - type: "number", - }, - { - type: "null", - }, - ], - title: "Duration", - }, - transcript: { - type: "string", - title: "Transcript", - }, - segments: { - items: { - $ref: "#/components/schemas/GetTranscriptSegmentTopic", - }, - type: "array", - title: "Segments", - default: [], - }, - words_per_speaker: { - items: { - $ref: "#/components/schemas/SpeakerWords", - }, - type: "array", - title: "Words Per Speaker", - default: [], - }, - }, - type: "object", - required: ["id", "title", "summary", "timestamp", "duration", "transcript"], - title: "GetTranscriptTopicWithWordsPerSpeaker", -} as const; - -export const $HTTPValidationError = { - properties: { - detail: { - items: { - $ref: "#/components/schemas/ValidationError", - }, - type: "array", - title: "Detail", - }, - }, - type: "object", - title: "HTTPValidationError", -} as const; - -export const $Meeting = { - properties: { - id: { - type: "string", - title: "Id", - }, - room_name: { - type: "string", - title: "Room Name", - }, - room_url: { - type: "string", - title: "Room Url", - }, - host_room_url: { - type: "string", - title: "Host Room Url", - }, - start_date: { - type: "string", - format: "date-time", - title: "Start Date", - }, - end_date: { - type: "string", - format: "date-time", - title: "End Date", - }, - recording_type: { - type: "string", - enum: ["none", "local", "cloud"], - title: "Recording Type", - default: "cloud", - }, - }, - type: "object", - required: [ - "id", - "room_name", - "room_url", - "host_room_url", - "start_date", - "end_date", - ], - title: "Meeting", -} as const; - -export const $MeetingConsentRequest = { - properties: { - consent_given: { - type: "boolean", - title: "Consent Given", - }, - }, - type: "object", - required: ["consent_given"], - title: "MeetingConsentRequest", -} as const; - -export const $Page_GetTranscriptMinimal_ = { - properties: { - items: { - items: { - $ref: "#/components/schemas/GetTranscriptMinimal", - }, - type: "array", - title: "Items", - }, - total: { - anyOf: [ - { - type: "integer", - minimum: 0, - }, - { - type: "null", - }, - ], - title: "Total", - }, - page: { - anyOf: [ - { - type: "integer", - minimum: 1, - }, - { - type: "null", - }, - ], - title: "Page", - }, - size: { - anyOf: [ - { - type: "integer", - minimum: 1, - }, - { - type: "null", - }, - ], - title: "Size", - }, - pages: { - anyOf: [ - { - type: "integer", - minimum: 0, - }, - { - type: "null", - }, - ], - title: "Pages", - }, - }, - type: "object", - required: ["items", "page", "size"], - title: "Page[GetTranscriptMinimal]", -} as const; - -export const $Page_RoomDetails_ = { - properties: { - items: { - items: { - $ref: "#/components/schemas/RoomDetails", - }, - type: "array", - title: "Items", - }, - total: { - anyOf: [ - { - type: "integer", - minimum: 0, - }, - { - type: "null", - }, - ], - title: "Total", - }, - page: { - anyOf: [ - { - type: "integer", - minimum: 1, - }, - { - type: "null", - }, - ], - title: "Page", - }, - size: { - anyOf: [ - { - type: "integer", - minimum: 1, - }, - { - type: "null", - }, - ], - title: "Size", - }, - pages: { - anyOf: [ - { - type: "integer", - minimum: 0, - }, - { - type: "null", - }, - ], - title: "Pages", - }, - }, - type: "object", - required: ["items", "page", "size"], - title: "Page[RoomDetails]", -} as const; - -export const $Participant = { - properties: { - id: { - type: "string", - title: "Id", - }, - speaker: { - anyOf: [ - { - type: "integer", - }, - { - type: "null", - }, - ], - title: "Speaker", - }, - name: { - type: "string", - title: "Name", - }, - }, - type: "object", - required: ["id", "speaker", "name"], - title: "Participant", -} as const; - -export const $Room = { - properties: { - id: { - type: "string", - title: "Id", - }, - name: { - type: "string", - title: "Name", - }, - user_id: { - type: "string", - title: "User Id", - }, - created_at: { - type: "string", - format: "date-time", - title: "Created At", - }, - zulip_auto_post: { - type: "boolean", - title: "Zulip Auto Post", - }, - zulip_stream: { - type: "string", - title: "Zulip Stream", - }, - zulip_topic: { - type: "string", - title: "Zulip Topic", - }, - is_locked: { - type: "boolean", - title: "Is Locked", - }, - room_mode: { - type: "string", - title: "Room Mode", - }, - recording_type: { - type: "string", - title: "Recording Type", - }, - recording_trigger: { - type: "string", - title: "Recording Trigger", - }, - is_shared: { - type: "boolean", - title: "Is Shared", - }, - }, - type: "object", - required: [ - "id", - "name", - "user_id", - "created_at", - "zulip_auto_post", - "zulip_stream", - "zulip_topic", - "is_locked", - "room_mode", - "recording_type", - "recording_trigger", - "is_shared", - ], - title: "Room", -} as const; - -export const $RoomDetails = { - properties: { - id: { - type: "string", - title: "Id", - }, - name: { - type: "string", - title: "Name", - }, - user_id: { - type: "string", - title: "User Id", - }, - created_at: { - type: "string", - format: "date-time", - title: "Created At", - }, - zulip_auto_post: { - type: "boolean", - title: "Zulip Auto Post", - }, - zulip_stream: { - type: "string", - title: "Zulip Stream", - }, - zulip_topic: { - type: "string", - title: "Zulip Topic", - }, - is_locked: { - type: "boolean", - title: "Is Locked", - }, - room_mode: { - type: "string", - title: "Room Mode", - }, - recording_type: { - type: "string", - title: "Recording Type", - }, - recording_trigger: { - type: "string", - title: "Recording Trigger", - }, - is_shared: { - type: "boolean", - title: "Is Shared", - }, - webhook_url: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Webhook Url", - }, - webhook_secret: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Webhook Secret", - }, - }, - type: "object", - required: [ - "id", - "name", - "user_id", - "created_at", - "zulip_auto_post", - "zulip_stream", - "zulip_topic", - "is_locked", - "room_mode", - "recording_type", - "recording_trigger", - "is_shared", - "webhook_url", - "webhook_secret", - ], - title: "RoomDetails", -} as const; - -export const $RtcOffer = { - properties: { - sdp: { - type: "string", - title: "Sdp", - }, - type: { - type: "string", - title: "Type", - }, - }, - type: "object", - required: ["sdp", "type"], - title: "RtcOffer", -} as const; - -export const $SearchResponse = { - properties: { - results: { - items: { - $ref: "#/components/schemas/SearchResult", - }, - type: "array", - title: "Results", - }, - total: { - type: "integer", - minimum: 0, - title: "Total", - description: "Total number of search results", - }, - query: { - anyOf: [ - { - type: "string", - minLength: 1, - description: "Search query text", - }, - { - type: "null", - }, - ], - title: "Query", - }, - limit: { - type: "integer", - maximum: 100, - minimum: 1, - title: "Limit", - description: "Results per page", - }, - offset: { - type: "integer", - minimum: 0, - title: "Offset", - description: "Number of results to skip", - }, - }, - type: "object", - required: ["results", "total", "limit", "offset"], - title: "SearchResponse", -} as const; - -export const $SearchResult = { - properties: { - id: { - type: "string", - minLength: 1, - title: "Id", - }, - title: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Title", - }, - user_id: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "User Id", - }, - room_id: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Room Id", - }, - room_name: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Room Name", - }, - source_kind: { - $ref: "#/components/schemas/SourceKind", - }, - created_at: { - type: "string", - title: "Created At", - }, - status: { - type: "string", - minLength: 1, - title: "Status", - }, - rank: { - type: "number", - maximum: 1, - minimum: 0, - title: "Rank", - }, - duration: { - anyOf: [ - { - type: "number", - minimum: 0, - }, - { - type: "null", - }, - ], - title: "Duration", - description: "Duration in seconds", - }, - search_snippets: { - items: { - type: "string", - }, - type: "array", - title: "Search Snippets", - description: "Text snippets around search matches", - }, - total_match_count: { - type: "integer", - minimum: 0, - title: "Total Match Count", - description: "Total number of matches found in the transcript", - default: 0, - }, - }, - type: "object", - required: [ - "id", - "source_kind", - "created_at", - "status", - "rank", - "duration", - "search_snippets", - ], - title: "SearchResult", - description: "Public search result model with computed fields.", -} as const; - -export const $SourceKind = { - type: "string", - enum: ["room", "live", "file"], - title: "SourceKind", -} as const; - -export const $SpeakerAssignment = { - properties: { - speaker: { - anyOf: [ - { - type: "integer", - minimum: 0, - }, - { - type: "null", - }, - ], - title: "Speaker", - }, - participant: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Participant", - }, - timestamp_from: { - type: "number", - title: "Timestamp From", - }, - timestamp_to: { - type: "number", - title: "Timestamp To", - }, - }, - type: "object", - required: ["timestamp_from", "timestamp_to"], - title: "SpeakerAssignment", -} as const; - -export const $SpeakerAssignmentStatus = { - properties: { - status: { - type: "string", - title: "Status", - }, - }, - type: "object", - required: ["status"], - title: "SpeakerAssignmentStatus", -} as const; - -export const $SpeakerMerge = { - properties: { - speaker_from: { - type: "integer", - title: "Speaker From", - }, - speaker_to: { - type: "integer", - title: "Speaker To", - }, - }, - type: "object", - required: ["speaker_from", "speaker_to"], - title: "SpeakerMerge", -} as const; - -export const $SpeakerWords = { - properties: { - speaker: { - type: "integer", - title: "Speaker", - }, - words: { - items: { - $ref: "#/components/schemas/Word", - }, - type: "array", - title: "Words", - }, - }, - type: "object", - required: ["speaker", "words"], - title: "SpeakerWords", -} as const; - -export const $Stream = { - properties: { - stream_id: { - type: "integer", - title: "Stream Id", - }, - name: { - type: "string", - title: "Name", - }, - }, - type: "object", - required: ["stream_id", "name"], - title: "Stream", -} as const; - -export const $Topic = { - properties: { - name: { - type: "string", - title: "Name", - }, - }, - type: "object", - required: ["name"], - title: "Topic", -} as const; - -export const $TranscriptParticipant = { - properties: { - id: { - type: "string", - title: "Id", - }, - speaker: { - anyOf: [ - { - type: "integer", - }, - { - type: "null", - }, - ], - title: "Speaker", - }, - name: { - type: "string", - title: "Name", - }, - }, - type: "object", - required: ["speaker", "name"], - title: "TranscriptParticipant", -} as const; - -export const $UpdateParticipant = { - properties: { - speaker: { - anyOf: [ - { - type: "integer", - }, - { - type: "null", - }, - ], - title: "Speaker", - }, - name: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Name", - }, - }, - type: "object", - title: "UpdateParticipant", -} as const; - -export const $UpdateRoom = { - properties: { - name: { - type: "string", - title: "Name", - }, - zulip_auto_post: { - type: "boolean", - title: "Zulip Auto Post", - }, - zulip_stream: { - type: "string", - title: "Zulip Stream", - }, - zulip_topic: { - type: "string", - title: "Zulip Topic", - }, - is_locked: { - type: "boolean", - title: "Is Locked", - }, - room_mode: { - type: "string", - title: "Room Mode", - }, - recording_type: { - type: "string", - title: "Recording Type", - }, - recording_trigger: { - type: "string", - title: "Recording Trigger", - }, - is_shared: { - type: "boolean", - title: "Is Shared", - }, - webhook_url: { - type: "string", - title: "Webhook Url", - }, - webhook_secret: { - type: "string", - title: "Webhook Secret", - }, - }, - type: "object", - required: [ - "name", - "zulip_auto_post", - "zulip_stream", - "zulip_topic", - "is_locked", - "room_mode", - "recording_type", - "recording_trigger", - "is_shared", - "webhook_url", - "webhook_secret", - ], - title: "UpdateRoom", -} as const; - -export const $UpdateTranscript = { - properties: { - name: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Name", - }, - locked: { - anyOf: [ - { - type: "boolean", - }, - { - type: "null", - }, - ], - title: "Locked", - }, - title: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Title", - }, - short_summary: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Short Summary", - }, - long_summary: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Long Summary", - }, - share_mode: { - anyOf: [ - { - type: "string", - enum: ["public", "semi-private", "private"], - }, - { - type: "null", - }, - ], - title: "Share Mode", - }, - participants: { - anyOf: [ - { - items: { - $ref: "#/components/schemas/TranscriptParticipant", - }, - type: "array", - }, - { - type: "null", - }, - ], - title: "Participants", - }, - reviewed: { - anyOf: [ - { - type: "boolean", - }, - { - type: "null", - }, - ], - title: "Reviewed", - }, - audio_deleted: { - anyOf: [ - { - type: "boolean", - }, - { - type: "null", - }, - ], - title: "Audio Deleted", - }, - }, - type: "object", - title: "UpdateTranscript", -} as const; - -export const $UserInfo = { - properties: { - sub: { - type: "string", - title: "Sub", - }, - email: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Email", - }, - email_verified: { - anyOf: [ - { - type: "boolean", - }, - { - type: "null", - }, - ], - title: "Email Verified", - }, - }, - type: "object", - required: ["sub", "email", "email_verified"], - title: "UserInfo", -} as const; - -export const $ValidationError = { - properties: { - loc: { - items: { - anyOf: [ - { - type: "string", - }, - { - type: "integer", - }, - ], - }, - type: "array", - title: "Location", - }, - msg: { - type: "string", - title: "Message", - }, - type: { - type: "string", - title: "Error Type", - }, - }, - type: "object", - required: ["loc", "msg", "type"], - title: "ValidationError", -} as const; - -export const $WebhookTestResult = { - properties: { - success: { - type: "boolean", - title: "Success", - }, - message: { - type: "string", - title: "Message", - default: "", - }, - error: { - type: "string", - title: "Error", - default: "", - }, - status_code: { - anyOf: [ - { - type: "integer", - }, - { - type: "null", - }, - ], - title: "Status Code", - }, - response_preview: { - anyOf: [ - { - type: "string", - }, - { - type: "null", - }, - ], - title: "Response Preview", - }, - }, - type: "object", - required: ["success"], - title: "WebhookTestResult", -} as const; - -export const $WherebyWebhookEvent = { - properties: { - apiVersion: { - type: "string", - title: "Apiversion", - }, - id: { - type: "string", - title: "Id", - }, - createdAt: { - type: "string", - format: "date-time", - title: "Createdat", - }, - type: { - type: "string", - title: "Type", - }, - data: { - additionalProperties: true, - type: "object", - title: "Data", - }, - }, - type: "object", - required: ["apiVersion", "id", "createdAt", "type", "data"], - title: "WherebyWebhookEvent", -} as const; - -export const $Word = { - properties: { - text: { - type: "string", - title: "Text", - }, - start: { - type: "number", - minimum: 0, - title: "Start", - description: "Time in seconds with float part", - }, - end: { - type: "number", - minimum: 0, - title: "End", - description: "Time in seconds with float part", - }, - speaker: { - type: "integer", - title: "Speaker", - default: 0, - }, - }, - type: "object", - required: ["text", "start", "end"], - title: "Word", -} as const; diff --git a/www/app/api/services.gen.ts b/www/app/api/services.gen.ts index c9e027fb..e69de29b 100644 --- a/www/app/api/services.gen.ts +++ b/www/app/api/services.gen.ts @@ -1,942 +0,0 @@ -// This file is auto-generated by @hey-api/openapi-ts - -import type { CancelablePromise } from "./core/CancelablePromise"; -import type { BaseHttpRequest } from "./core/BaseHttpRequest"; -import type { - MetricsResponse, - V1MeetingAudioConsentData, - V1MeetingAudioConsentResponse, - V1RoomsListData, - V1RoomsListResponse, - V1RoomsCreateData, - V1RoomsCreateResponse, - V1RoomsGetData, - V1RoomsGetResponse, - V1RoomsUpdateData, - V1RoomsUpdateResponse, - V1RoomsDeleteData, - V1RoomsDeleteResponse, - V1RoomsCreateMeetingData, - V1RoomsCreateMeetingResponse, - V1RoomsTestWebhookData, - V1RoomsTestWebhookResponse, - V1TranscriptsListData, - V1TranscriptsListResponse, - V1TranscriptsCreateData, - V1TranscriptsCreateResponse, - V1TranscriptsSearchData, - V1TranscriptsSearchResponse, - V1TranscriptGetData, - V1TranscriptGetResponse, - V1TranscriptUpdateData, - V1TranscriptUpdateResponse, - V1TranscriptDeleteData, - V1TranscriptDeleteResponse, - V1TranscriptGetTopicsData, - V1TranscriptGetTopicsResponse, - V1TranscriptGetTopicsWithWordsData, - V1TranscriptGetTopicsWithWordsResponse, - V1TranscriptGetTopicsWithWordsPerSpeakerData, - V1TranscriptGetTopicsWithWordsPerSpeakerResponse, - V1TranscriptPostToZulipData, - V1TranscriptPostToZulipResponse, - V1TranscriptHeadAudioMp3Data, - V1TranscriptHeadAudioMp3Response, - V1TranscriptGetAudioMp3Data, - V1TranscriptGetAudioMp3Response, - V1TranscriptGetAudioWaveformData, - V1TranscriptGetAudioWaveformResponse, - V1TranscriptGetParticipantsData, - V1TranscriptGetParticipantsResponse, - V1TranscriptAddParticipantData, - V1TranscriptAddParticipantResponse, - V1TranscriptGetParticipantData, - V1TranscriptGetParticipantResponse, - V1TranscriptUpdateParticipantData, - V1TranscriptUpdateParticipantResponse, - V1TranscriptDeleteParticipantData, - V1TranscriptDeleteParticipantResponse, - V1TranscriptAssignSpeakerData, - V1TranscriptAssignSpeakerResponse, - V1TranscriptMergeSpeakerData, - V1TranscriptMergeSpeakerResponse, - V1TranscriptRecordUploadData, - V1TranscriptRecordUploadResponse, - V1TranscriptGetWebsocketEventsData, - V1TranscriptGetWebsocketEventsResponse, - V1TranscriptRecordWebrtcData, - V1TranscriptRecordWebrtcResponse, - V1TranscriptProcessData, - V1TranscriptProcessResponse, - V1UserMeResponse, - V1ZulipGetStreamsResponse, - V1ZulipGetTopicsData, - V1ZulipGetTopicsResponse, - V1WherebyWebhookData, - V1WherebyWebhookResponse, -} from "./types.gen"; - -export class DefaultService { - constructor(public readonly httpRequest: BaseHttpRequest) {} - - /** - * Metrics - * Endpoint that serves Prometheus metrics. - * @returns unknown Successful Response - * @throws ApiError - */ - public metrics(): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/metrics", - }); - } - - /** - * Meeting Audio Consent - * @param data The data for the request. - * @param data.meetingId - * @param data.requestBody - * @returns unknown Successful Response - * @throws ApiError - */ - public v1MeetingAudioConsent( - data: V1MeetingAudioConsentData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/meetings/{meeting_id}/consent", - path: { - meeting_id: data.meetingId, - }, - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Rooms List - * @param data The data for the request. - * @param data.page Page number - * @param data.size Page size - * @returns Page_RoomDetails_ Successful Response - * @throws ApiError - */ - public v1RoomsList( - data: V1RoomsListData = {}, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/rooms", - query: { - page: data.page, - size: data.size, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Rooms Create - * @param data The data for the request. - * @param data.requestBody - * @returns Room Successful Response - * @throws ApiError - */ - public v1RoomsCreate( - data: V1RoomsCreateData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/rooms", - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Rooms Get - * @param data The data for the request. - * @param data.roomId - * @returns RoomDetails Successful Response - * @throws ApiError - */ - public v1RoomsGet( - data: V1RoomsGetData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/rooms/{room_id}", - path: { - room_id: data.roomId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Rooms Update - * @param data The data for the request. - * @param data.roomId - * @param data.requestBody - * @returns RoomDetails Successful Response - * @throws ApiError - */ - public v1RoomsUpdate( - data: V1RoomsUpdateData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "PATCH", - url: "/v1/rooms/{room_id}", - path: { - room_id: data.roomId, - }, - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Rooms Delete - * @param data The data for the request. - * @param data.roomId - * @returns DeletionStatus Successful Response - * @throws ApiError - */ - public v1RoomsDelete( - data: V1RoomsDeleteData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "DELETE", - url: "/v1/rooms/{room_id}", - path: { - room_id: data.roomId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Rooms Create Meeting - * @param data The data for the request. - * @param data.roomName - * @returns Meeting Successful Response - * @throws ApiError - */ - public v1RoomsCreateMeeting( - data: V1RoomsCreateMeetingData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/rooms/{room_name}/meeting", - path: { - room_name: data.roomName, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Rooms Test Webhook - * Test webhook configuration by sending a sample payload. - * @param data The data for the request. - * @param data.roomId - * @returns WebhookTestResult Successful Response - * @throws ApiError - */ - public v1RoomsTestWebhook( - data: V1RoomsTestWebhookData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/rooms/{room_id}/webhook/test", - path: { - room_id: data.roomId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcripts List - * @param data The data for the request. - * @param data.sourceKind - * @param data.roomId - * @param data.searchTerm - * @param data.page Page number - * @param data.size Page size - * @returns Page_GetTranscriptMinimal_ Successful Response - * @throws ApiError - */ - public v1TranscriptsList( - data: V1TranscriptsListData = {}, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts", - query: { - source_kind: data.sourceKind, - room_id: data.roomId, - search_term: data.searchTerm, - page: data.page, - size: data.size, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcripts Create - * @param data The data for the request. - * @param data.requestBody - * @returns GetTranscript Successful Response - * @throws ApiError - */ - public v1TranscriptsCreate( - data: V1TranscriptsCreateData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/transcripts", - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcripts Search - * Full-text search across transcript titles and content. - * @param data The data for the request. - * @param data.q Search query text - * @param data.limit Results per page - * @param data.offset Number of results to skip - * @param data.roomId - * @param data.sourceKind - * @returns SearchResponse Successful Response - * @throws ApiError - */ - public v1TranscriptsSearch( - data: V1TranscriptsSearchData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/search", - query: { - q: data.q, - limit: data.limit, - offset: data.offset, - room_id: data.roomId, - source_kind: data.sourceKind, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get - * @param data The data for the request. - * @param data.transcriptId - * @returns GetTranscript Successful Response - * @throws ApiError - */ - public v1TranscriptGet( - data: V1TranscriptGetData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/{transcript_id}", - path: { - transcript_id: data.transcriptId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Update - * @param data The data for the request. - * @param data.transcriptId - * @param data.requestBody - * @returns GetTranscript Successful Response - * @throws ApiError - */ - public v1TranscriptUpdate( - data: V1TranscriptUpdateData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "PATCH", - url: "/v1/transcripts/{transcript_id}", - path: { - transcript_id: data.transcriptId, - }, - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Delete - * @param data The data for the request. - * @param data.transcriptId - * @returns DeletionStatus Successful Response - * @throws ApiError - */ - public v1TranscriptDelete( - data: V1TranscriptDeleteData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "DELETE", - url: "/v1/transcripts/{transcript_id}", - path: { - transcript_id: data.transcriptId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get Topics - * @param data The data for the request. - * @param data.transcriptId - * @returns GetTranscriptTopic Successful Response - * @throws ApiError - */ - public v1TranscriptGetTopics( - data: V1TranscriptGetTopicsData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/{transcript_id}/topics", - path: { - transcript_id: data.transcriptId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get Topics With Words - * @param data The data for the request. - * @param data.transcriptId - * @returns GetTranscriptTopicWithWords Successful Response - * @throws ApiError - */ - public v1TranscriptGetTopicsWithWords( - data: V1TranscriptGetTopicsWithWordsData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/{transcript_id}/topics/with-words", - path: { - transcript_id: data.transcriptId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get Topics With Words Per Speaker - * @param data The data for the request. - * @param data.transcriptId - * @param data.topicId - * @returns GetTranscriptTopicWithWordsPerSpeaker Successful Response - * @throws ApiError - */ - public v1TranscriptGetTopicsWithWordsPerSpeaker( - data: V1TranscriptGetTopicsWithWordsPerSpeakerData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/{transcript_id}/topics/{topic_id}/words-per-speaker", - path: { - transcript_id: data.transcriptId, - topic_id: data.topicId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Post To Zulip - * @param data The data for the request. - * @param data.transcriptId - * @param data.stream - * @param data.topic - * @param data.includeTopics - * @returns unknown Successful Response - * @throws ApiError - */ - public v1TranscriptPostToZulip( - data: V1TranscriptPostToZulipData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/transcripts/{transcript_id}/zulip", - path: { - transcript_id: data.transcriptId, - }, - query: { - stream: data.stream, - topic: data.topic, - include_topics: data.includeTopics, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get Audio Mp3 - * @param data The data for the request. - * @param data.transcriptId - * @param data.token - * @returns unknown Successful Response - * @throws ApiError - */ - public v1TranscriptHeadAudioMp3( - data: V1TranscriptHeadAudioMp3Data, - ): CancelablePromise { - return this.httpRequest.request({ - method: "HEAD", - url: "/v1/transcripts/{transcript_id}/audio/mp3", - path: { - transcript_id: data.transcriptId, - }, - query: { - token: data.token, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get Audio Mp3 - * @param data The data for the request. - * @param data.transcriptId - * @param data.token - * @returns unknown Successful Response - * @throws ApiError - */ - public v1TranscriptGetAudioMp3( - data: V1TranscriptGetAudioMp3Data, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/{transcript_id}/audio/mp3", - path: { - transcript_id: data.transcriptId, - }, - query: { - token: data.token, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get Audio Waveform - * @param data The data for the request. - * @param data.transcriptId - * @returns AudioWaveform Successful Response - * @throws ApiError - */ - public v1TranscriptGetAudioWaveform( - data: V1TranscriptGetAudioWaveformData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/{transcript_id}/audio/waveform", - path: { - transcript_id: data.transcriptId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get Participants - * @param data The data for the request. - * @param data.transcriptId - * @returns Participant Successful Response - * @throws ApiError - */ - public v1TranscriptGetParticipants( - data: V1TranscriptGetParticipantsData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/{transcript_id}/participants", - path: { - transcript_id: data.transcriptId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Add Participant - * @param data The data for the request. - * @param data.transcriptId - * @param data.requestBody - * @returns Participant Successful Response - * @throws ApiError - */ - public v1TranscriptAddParticipant( - data: V1TranscriptAddParticipantData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/transcripts/{transcript_id}/participants", - path: { - transcript_id: data.transcriptId, - }, - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get Participant - * @param data The data for the request. - * @param data.transcriptId - * @param data.participantId - * @returns Participant Successful Response - * @throws ApiError - */ - public v1TranscriptGetParticipant( - data: V1TranscriptGetParticipantData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/{transcript_id}/participants/{participant_id}", - path: { - transcript_id: data.transcriptId, - participant_id: data.participantId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Update Participant - * @param data The data for the request. - * @param data.transcriptId - * @param data.participantId - * @param data.requestBody - * @returns Participant Successful Response - * @throws ApiError - */ - public v1TranscriptUpdateParticipant( - data: V1TranscriptUpdateParticipantData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "PATCH", - url: "/v1/transcripts/{transcript_id}/participants/{participant_id}", - path: { - transcript_id: data.transcriptId, - participant_id: data.participantId, - }, - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Delete Participant - * @param data The data for the request. - * @param data.transcriptId - * @param data.participantId - * @returns DeletionStatus Successful Response - * @throws ApiError - */ - public v1TranscriptDeleteParticipant( - data: V1TranscriptDeleteParticipantData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "DELETE", - url: "/v1/transcripts/{transcript_id}/participants/{participant_id}", - path: { - transcript_id: data.transcriptId, - participant_id: data.participantId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Assign Speaker - * @param data The data for the request. - * @param data.transcriptId - * @param data.requestBody - * @returns SpeakerAssignmentStatus Successful Response - * @throws ApiError - */ - public v1TranscriptAssignSpeaker( - data: V1TranscriptAssignSpeakerData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "PATCH", - url: "/v1/transcripts/{transcript_id}/speaker/assign", - path: { - transcript_id: data.transcriptId, - }, - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Merge Speaker - * @param data The data for the request. - * @param data.transcriptId - * @param data.requestBody - * @returns SpeakerAssignmentStatus Successful Response - * @throws ApiError - */ - public v1TranscriptMergeSpeaker( - data: V1TranscriptMergeSpeakerData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "PATCH", - url: "/v1/transcripts/{transcript_id}/speaker/merge", - path: { - transcript_id: data.transcriptId, - }, - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Record Upload - * @param data The data for the request. - * @param data.transcriptId - * @param data.chunkNumber - * @param data.totalChunks - * @param data.formData - * @returns unknown Successful Response - * @throws ApiError - */ - public v1TranscriptRecordUpload( - data: V1TranscriptRecordUploadData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/transcripts/{transcript_id}/record/upload", - path: { - transcript_id: data.transcriptId, - }, - query: { - chunk_number: data.chunkNumber, - total_chunks: data.totalChunks, - }, - formData: data.formData, - mediaType: "multipart/form-data", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Get Websocket Events - * @param data The data for the request. - * @param data.transcriptId - * @returns unknown Successful Response - * @throws ApiError - */ - public v1TranscriptGetWebsocketEvents( - data: V1TranscriptGetWebsocketEventsData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/transcripts/{transcript_id}/events", - path: { - transcript_id: data.transcriptId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Record Webrtc - * @param data The data for the request. - * @param data.transcriptId - * @param data.requestBody - * @returns unknown Successful Response - * @throws ApiError - */ - public v1TranscriptRecordWebrtc( - data: V1TranscriptRecordWebrtcData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/transcripts/{transcript_id}/record/webrtc", - path: { - transcript_id: data.transcriptId, - }, - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Transcript Process - * @param data The data for the request. - * @param data.transcriptId - * @returns unknown Successful Response - * @throws ApiError - */ - public v1TranscriptProcess( - data: V1TranscriptProcessData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/transcripts/{transcript_id}/process", - path: { - transcript_id: data.transcriptId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * User Me - * @returns unknown Successful Response - * @throws ApiError - */ - public v1UserMe(): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/me", - }); - } - - /** - * Zulip Get Streams - * Get all Zulip streams. - * @returns Stream Successful Response - * @throws ApiError - */ - public v1ZulipGetStreams(): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/zulip/streams", - }); - } - - /** - * Zulip Get Topics - * Get all topics for a specific Zulip stream. - * @param data The data for the request. - * @param data.streamId - * @returns Topic Successful Response - * @throws ApiError - */ - public v1ZulipGetTopics( - data: V1ZulipGetTopicsData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "GET", - url: "/v1/zulip/streams/{stream_id}/topics", - path: { - stream_id: data.streamId, - }, - errors: { - 422: "Validation Error", - }, - }); - } - - /** - * Whereby Webhook - * @param data The data for the request. - * @param data.requestBody - * @returns unknown Successful Response - * @throws ApiError - */ - public v1WherebyWebhook( - data: V1WherebyWebhookData, - ): CancelablePromise { - return this.httpRequest.request({ - method: "POST", - url: "/v1/whereby", - body: data.requestBody, - mediaType: "application/json", - errors: { - 422: "Validation Error", - }, - }); - } -} diff --git a/www/app/api/types.gen.ts b/www/app/api/types.gen.ts index d724fc98..e69de29b 100644 --- a/www/app/api/types.gen.ts +++ b/www/app/api/types.gen.ts @@ -1,1143 +0,0 @@ -// This file is auto-generated by @hey-api/openapi-ts - -export type AudioWaveform = { - data: Array; -}; - -export type Body_transcript_record_upload_v1_transcripts__transcript_id__record_upload_post = - { - chunk: Blob | File; - }; - -export type CreateParticipant = { - speaker?: number | null; - name: string; -}; - -export type CreateRoom = { - name: string; - zulip_auto_post: boolean; - zulip_stream: string; - zulip_topic: string; - is_locked: boolean; - room_mode: string; - recording_type: string; - recording_trigger: string; - is_shared: boolean; - webhook_url: string; - webhook_secret: string; -}; - -export type CreateTranscript = { - name: string; - source_language?: string; - target_language?: string; - source_kind?: SourceKind | null; -}; - -export type DeletionStatus = { - status: string; -}; - -export type GetTranscript = { - id: string; - user_id: string | null; - name: string; - status: string; - locked: boolean; - duration: number; - title: string | null; - short_summary: string | null; - long_summary: string | null; - created_at: string; - share_mode?: string; - source_language: string | null; - target_language: string | null; - reviewed: boolean; - meeting_id: string | null; - source_kind: SourceKind; - room_id?: string | null; - room_name?: string | null; - audio_deleted?: boolean | null; - participants: Array | null; -}; - -export type GetTranscriptMinimal = { - id: string; - user_id: string | null; - name: string; - status: string; - locked: boolean; - duration: number; - title: string | null; - short_summary: string | null; - long_summary: string | null; - created_at: string; - share_mode?: string; - source_language: string | null; - target_language: string | null; - reviewed: boolean; - meeting_id: string | null; - source_kind: SourceKind; - room_id?: string | null; - room_name?: string | null; - audio_deleted?: boolean | null; -}; - -export type GetTranscriptSegmentTopic = { - text: string; - start: number; - speaker: number; -}; - -export type GetTranscriptTopic = { - id: string; - title: string; - summary: string; - timestamp: number; - duration: number | null; - transcript: string; - segments?: Array; -}; - -export type GetTranscriptTopicWithWords = { - id: string; - title: string; - summary: string; - timestamp: number; - duration: number | null; - transcript: string; - segments?: Array; - words?: Array; -}; - -export type GetTranscriptTopicWithWordsPerSpeaker = { - id: string; - title: string; - summary: string; - timestamp: number; - duration: number | null; - transcript: string; - segments?: Array; - words_per_speaker?: Array; -}; - -export type HTTPValidationError = { - detail?: Array; -}; - -export type Meeting = { - id: string; - room_name: string; - room_url: string; - host_room_url: string; - start_date: string; - end_date: string; - recording_type?: "none" | "local" | "cloud"; -}; - -export type recording_type = "none" | "local" | "cloud"; - -export type MeetingConsentRequest = { - consent_given: boolean; -}; - -export type Page_GetTranscriptMinimal_ = { - items: Array; - total?: number | null; - page: number | null; - size: number | null; - pages?: number | null; -}; - -export type Page_RoomDetails_ = { - items: Array; - total?: number | null; - page: number | null; - size: number | null; - pages?: number | null; -}; - -export type Participant = { - id: string; - speaker: number | null; - name: string; -}; - -export type Room = { - id: string; - name: string; - user_id: string; - created_at: string; - zulip_auto_post: boolean; - zulip_stream: string; - zulip_topic: string; - is_locked: boolean; - room_mode: string; - recording_type: string; - recording_trigger: string; - is_shared: boolean; -}; - -export type RoomDetails = { - id: string; - name: string; - user_id: string; - created_at: string; - zulip_auto_post: boolean; - zulip_stream: string; - zulip_topic: string; - is_locked: boolean; - room_mode: string; - recording_type: string; - recording_trigger: string; - is_shared: boolean; - webhook_url: string | null; - webhook_secret: string | null; -}; - -export type RtcOffer = { - sdp: string; - type: string; -}; - -export type SearchResponse = { - results: Array; - /** - * Total number of search results - */ - total: number; - query?: string | null; - /** - * Results per page - */ - limit: number; - /** - * Number of results to skip - */ - offset: number; -}; - -/** - * Public search result model with computed fields. - */ -export type SearchResult = { - id: string; - title?: string | null; - user_id?: string | null; - room_id?: string | null; - room_name?: string | null; - source_kind: SourceKind; - created_at: string; - status: string; - rank: number; - /** - * Duration in seconds - */ - duration: number | null; - /** - * Text snippets around search matches - */ - search_snippets: Array; - /** - * Total number of matches found in the transcript - */ - total_match_count?: number; -}; - -export type SourceKind = "room" | "live" | "file"; - -export type SpeakerAssignment = { - speaker?: number | null; - participant?: string | null; - timestamp_from: number; - timestamp_to: number; -}; - -export type SpeakerAssignmentStatus = { - status: string; -}; - -export type SpeakerMerge = { - speaker_from: number; - speaker_to: number; -}; - -export type SpeakerWords = { - speaker: number; - words: Array; -}; - -export type Stream = { - stream_id: number; - name: string; -}; - -export type Topic = { - name: string; -}; - -export type TranscriptParticipant = { - id?: string; - speaker: number | null; - name: string; -}; - -export type UpdateParticipant = { - speaker?: number | null; - name?: string | null; -}; - -export type UpdateRoom = { - name: string; - zulip_auto_post: boolean; - zulip_stream: string; - zulip_topic: string; - is_locked: boolean; - room_mode: string; - recording_type: string; - recording_trigger: string; - is_shared: boolean; - webhook_url: string; - webhook_secret: string; -}; - -export type UpdateTranscript = { - name?: string | null; - locked?: boolean | null; - title?: string | null; - short_summary?: string | null; - long_summary?: string | null; - share_mode?: "public" | "semi-private" | "private" | null; - participants?: Array | null; - reviewed?: boolean | null; - audio_deleted?: boolean | null; -}; - -export type UserInfo = { - sub: string; - email: string | null; - email_verified: boolean | null; -}; - -export type ValidationError = { - loc: Array; - msg: string; - type: string; -}; - -export type WebhookTestResult = { - success: boolean; - message?: string; - error?: string; - status_code?: number | null; - response_preview?: string | null; -}; - -export type WherebyWebhookEvent = { - apiVersion: string; - id: string; - createdAt: string; - type: string; - data: { - [key: string]: unknown; - }; -}; - -export type Word = { - text: string; - /** - * Time in seconds with float part - */ - start: number; - /** - * Time in seconds with float part - */ - end: number; - speaker?: number; -}; - -export type MetricsResponse = unknown; - -export type V1MeetingAudioConsentData = { - meetingId: string; - requestBody: MeetingConsentRequest; -}; - -export type V1MeetingAudioConsentResponse = unknown; - -export type V1RoomsListData = { - /** - * Page number - */ - page?: number; - /** - * Page size - */ - size?: number; -}; - -export type V1RoomsListResponse = Page_RoomDetails_; - -export type V1RoomsCreateData = { - requestBody: CreateRoom; -}; - -export type V1RoomsCreateResponse = Room; - -export type V1RoomsGetData = { - roomId: string; -}; - -export type V1RoomsGetResponse = RoomDetails; - -export type V1RoomsUpdateData = { - requestBody: UpdateRoom; - roomId: string; -}; - -export type V1RoomsUpdateResponse = RoomDetails; - -export type V1RoomsDeleteData = { - roomId: string; -}; - -export type V1RoomsDeleteResponse = DeletionStatus; - -export type V1RoomsCreateMeetingData = { - roomName: string; -}; - -export type V1RoomsCreateMeetingResponse = Meeting; - -export type V1RoomsTestWebhookData = { - roomId: string; -}; - -export type V1RoomsTestWebhookResponse = WebhookTestResult; - -export type V1TranscriptsListData = { - /** - * Page number - */ - page?: number; - roomId?: string | null; - searchTerm?: string | null; - /** - * Page size - */ - size?: number; - sourceKind?: SourceKind | null; -}; - -export type V1TranscriptsListResponse = Page_GetTranscriptMinimal_; - -export type V1TranscriptsCreateData = { - requestBody: CreateTranscript; -}; - -export type V1TranscriptsCreateResponse = GetTranscript; - -export type V1TranscriptsSearchData = { - /** - * Results per page - */ - limit?: number; - /** - * Number of results to skip - */ - offset?: number; - /** - * Search query text - */ - q: string; - roomId?: string | null; - sourceKind?: SourceKind | null; -}; - -export type V1TranscriptsSearchResponse = SearchResponse; - -export type V1TranscriptGetData = { - transcriptId: string; -}; - -export type V1TranscriptGetResponse = GetTranscript; - -export type V1TranscriptUpdateData = { - requestBody: UpdateTranscript; - transcriptId: string; -}; - -export type V1TranscriptUpdateResponse = GetTranscript; - -export type V1TranscriptDeleteData = { - transcriptId: string; -}; - -export type V1TranscriptDeleteResponse = DeletionStatus; - -export type V1TranscriptGetTopicsData = { - transcriptId: string; -}; - -export type V1TranscriptGetTopicsResponse = Array; - -export type V1TranscriptGetTopicsWithWordsData = { - transcriptId: string; -}; - -export type V1TranscriptGetTopicsWithWordsResponse = - Array; - -export type V1TranscriptGetTopicsWithWordsPerSpeakerData = { - topicId: string; - transcriptId: string; -}; - -export type V1TranscriptGetTopicsWithWordsPerSpeakerResponse = - GetTranscriptTopicWithWordsPerSpeaker; - -export type V1TranscriptPostToZulipData = { - includeTopics: boolean; - stream: string; - topic: string; - transcriptId: string; -}; - -export type V1TranscriptPostToZulipResponse = unknown; - -export type V1TranscriptHeadAudioMp3Data = { - token?: string | null; - transcriptId: string; -}; - -export type V1TranscriptHeadAudioMp3Response = unknown; - -export type V1TranscriptGetAudioMp3Data = { - token?: string | null; - transcriptId: string; -}; - -export type V1TranscriptGetAudioMp3Response = unknown; - -export type V1TranscriptGetAudioWaveformData = { - transcriptId: string; -}; - -export type V1TranscriptGetAudioWaveformResponse = AudioWaveform; - -export type V1TranscriptGetParticipantsData = { - transcriptId: string; -}; - -export type V1TranscriptGetParticipantsResponse = Array; - -export type V1TranscriptAddParticipantData = { - requestBody: CreateParticipant; - transcriptId: string; -}; - -export type V1TranscriptAddParticipantResponse = Participant; - -export type V1TranscriptGetParticipantData = { - participantId: string; - transcriptId: string; -}; - -export type V1TranscriptGetParticipantResponse = Participant; - -export type V1TranscriptUpdateParticipantData = { - participantId: string; - requestBody: UpdateParticipant; - transcriptId: string; -}; - -export type V1TranscriptUpdateParticipantResponse = Participant; - -export type V1TranscriptDeleteParticipantData = { - participantId: string; - transcriptId: string; -}; - -export type V1TranscriptDeleteParticipantResponse = DeletionStatus; - -export type V1TranscriptAssignSpeakerData = { - requestBody: SpeakerAssignment; - transcriptId: string; -}; - -export type V1TranscriptAssignSpeakerResponse = SpeakerAssignmentStatus; - -export type V1TranscriptMergeSpeakerData = { - requestBody: SpeakerMerge; - transcriptId: string; -}; - -export type V1TranscriptMergeSpeakerResponse = SpeakerAssignmentStatus; - -export type V1TranscriptRecordUploadData = { - chunkNumber: number; - formData: Body_transcript_record_upload_v1_transcripts__transcript_id__record_upload_post; - totalChunks: number; - transcriptId: string; -}; - -export type V1TranscriptRecordUploadResponse = unknown; - -export type V1TranscriptGetWebsocketEventsData = { - transcriptId: string; -}; - -export type V1TranscriptGetWebsocketEventsResponse = unknown; - -export type V1TranscriptRecordWebrtcData = { - requestBody: RtcOffer; - transcriptId: string; -}; - -export type V1TranscriptRecordWebrtcResponse = unknown; - -export type V1TranscriptProcessData = { - transcriptId: string; -}; - -export type V1TranscriptProcessResponse = unknown; - -export type V1UserMeResponse = UserInfo | null; - -export type V1ZulipGetStreamsResponse = Array; - -export type V1ZulipGetTopicsData = { - streamId: number; -}; - -export type V1ZulipGetTopicsResponse = Array; - -export type V1WherebyWebhookData = { - requestBody: WherebyWebhookEvent; -}; - -export type V1WherebyWebhookResponse = unknown; - -export type $OpenApiTs = { - "/metrics": { - get: { - res: { - /** - * Successful Response - */ - 200: unknown; - }; - }; - }; - "/v1/meetings/{meeting_id}/consent": { - post: { - req: V1MeetingAudioConsentData; - res: { - /** - * Successful Response - */ - 200: unknown; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/rooms": { - get: { - req: V1RoomsListData; - res: { - /** - * Successful Response - */ - 200: Page_RoomDetails_; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - post: { - req: V1RoomsCreateData; - res: { - /** - * Successful Response - */ - 200: Room; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/rooms/{room_id}": { - get: { - req: V1RoomsGetData; - res: { - /** - * Successful Response - */ - 200: RoomDetails; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - patch: { - req: V1RoomsUpdateData; - res: { - /** - * Successful Response - */ - 200: RoomDetails; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - delete: { - req: V1RoomsDeleteData; - res: { - /** - * Successful Response - */ - 200: DeletionStatus; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/rooms/{room_name}/meeting": { - post: { - req: V1RoomsCreateMeetingData; - res: { - /** - * Successful Response - */ - 200: Meeting; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/rooms/{room_id}/webhook/test": { - post: { - req: V1RoomsTestWebhookData; - res: { - /** - * Successful Response - */ - 200: WebhookTestResult; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts": { - get: { - req: V1TranscriptsListData; - res: { - /** - * Successful Response - */ - 200: Page_GetTranscriptMinimal_; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - post: { - req: V1TranscriptsCreateData; - res: { - /** - * Successful Response - */ - 200: GetTranscript; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/search": { - get: { - req: V1TranscriptsSearchData; - res: { - /** - * Successful Response - */ - 200: SearchResponse; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}": { - get: { - req: V1TranscriptGetData; - res: { - /** - * Successful Response - */ - 200: GetTranscript; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - patch: { - req: V1TranscriptUpdateData; - res: { - /** - * Successful Response - */ - 200: GetTranscript; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - delete: { - req: V1TranscriptDeleteData; - res: { - /** - * Successful Response - */ - 200: DeletionStatus; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/topics": { - get: { - req: V1TranscriptGetTopicsData; - res: { - /** - * Successful Response - */ - 200: Array; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/topics/with-words": { - get: { - req: V1TranscriptGetTopicsWithWordsData; - res: { - /** - * Successful Response - */ - 200: Array; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/topics/{topic_id}/words-per-speaker": { - get: { - req: V1TranscriptGetTopicsWithWordsPerSpeakerData; - res: { - /** - * Successful Response - */ - 200: GetTranscriptTopicWithWordsPerSpeaker; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/zulip": { - post: { - req: V1TranscriptPostToZulipData; - res: { - /** - * Successful Response - */ - 200: unknown; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/audio/mp3": { - head: { - req: V1TranscriptHeadAudioMp3Data; - res: { - /** - * Successful Response - */ - 200: unknown; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - get: { - req: V1TranscriptGetAudioMp3Data; - res: { - /** - * Successful Response - */ - 200: unknown; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/audio/waveform": { - get: { - req: V1TranscriptGetAudioWaveformData; - res: { - /** - * Successful Response - */ - 200: AudioWaveform; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/participants": { - get: { - req: V1TranscriptGetParticipantsData; - res: { - /** - * Successful Response - */ - 200: Array; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - post: { - req: V1TranscriptAddParticipantData; - res: { - /** - * Successful Response - */ - 200: Participant; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/participants/{participant_id}": { - get: { - req: V1TranscriptGetParticipantData; - res: { - /** - * Successful Response - */ - 200: Participant; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - patch: { - req: V1TranscriptUpdateParticipantData; - res: { - /** - * Successful Response - */ - 200: Participant; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - delete: { - req: V1TranscriptDeleteParticipantData; - res: { - /** - * Successful Response - */ - 200: DeletionStatus; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/speaker/assign": { - patch: { - req: V1TranscriptAssignSpeakerData; - res: { - /** - * Successful Response - */ - 200: SpeakerAssignmentStatus; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/speaker/merge": { - patch: { - req: V1TranscriptMergeSpeakerData; - res: { - /** - * Successful Response - */ - 200: SpeakerAssignmentStatus; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/record/upload": { - post: { - req: V1TranscriptRecordUploadData; - res: { - /** - * Successful Response - */ - 200: unknown; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/events": { - get: { - req: V1TranscriptGetWebsocketEventsData; - res: { - /** - * Successful Response - */ - 200: unknown; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/record/webrtc": { - post: { - req: V1TranscriptRecordWebrtcData; - res: { - /** - * Successful Response - */ - 200: unknown; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/transcripts/{transcript_id}/process": { - post: { - req: V1TranscriptProcessData; - res: { - /** - * Successful Response - */ - 200: unknown; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/me": { - get: { - res: { - /** - * Successful Response - */ - 200: UserInfo | null; - }; - }; - }; - "/v1/zulip/streams": { - get: { - res: { - /** - * Successful Response - */ - 200: Array; - }; - }; - }; - "/v1/zulip/streams/{stream_id}/topics": { - get: { - req: V1ZulipGetTopicsData; - res: { - /** - * Successful Response - */ - 200: Array; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; - "/v1/whereby": { - post: { - req: V1WherebyWebhookData; - res: { - /** - * Successful Response - */ - 200: unknown; - /** - * Validation Error - */ - 422: HTTPValidationError; - }; - }; - }; -}; diff --git a/www/app/api/urls.ts b/www/app/api/urls.ts index bd0a910c..89ce5af8 100644 --- a/www/app/api/urls.ts +++ b/www/app/api/urls.ts @@ -1,2 +1 @@ -// TODO better connection with generated schema; it's duplication export const RECORD_A_MEETING_URL = "/transcripts/new" as const; diff --git a/www/app/layout.tsx b/www/app/layout.tsx index f73b8813..62175be9 100644 --- a/www/app/layout.tsx +++ b/www/app/layout.tsx @@ -1,7 +1,6 @@ import "./styles/globals.scss"; import { Metadata, Viewport } from "next"; import { Poppins } from "next/font/google"; -import SessionProvider from "./lib/SessionProvider"; import { ErrorProvider } from "./(errors)/errorContext"; import ErrorMessage from "./(errors)/errorMessage"; import { DomainContextProvider } from "./domainContext"; @@ -74,18 +73,16 @@ export default async function RootLayout({ return ( - - - - "something went really wrong"

}> - - - {children} - -
-
-
-
+ + + "something went really wrong"

}> + + + {children} + +
+
+
); diff --git a/www/app/lib/AuthProvider.tsx b/www/app/lib/AuthProvider.tsx new file mode 100644 index 00000000..96f49f87 --- /dev/null +++ b/www/app/lib/AuthProvider.tsx @@ -0,0 +1,104 @@ +"use client"; + +import { createContext, useContext } from "react"; +import { useSession as useNextAuthSession } from "next-auth/react"; +import { signOut, signIn } from "next-auth/react"; +import { configureApiAuth } from "./apiClient"; +import { assertCustomSession, CustomSession } from "./types"; +import { Session } from "next-auth"; +import { SessionAutoRefresh } from "./SessionAutoRefresh"; +import { REFRESH_ACCESS_TOKEN_ERROR } from "./auth"; + +type AuthContextType = ( + | { status: "loading" } + | { status: "refreshing" } + | { status: "unauthenticated"; error?: string } + | { + status: "authenticated"; + accessToken: string; + accessTokenExpires: number; + user: CustomSession["user"]; + } +) & { + update: () => Promise; + signIn: typeof signIn; + signOut: typeof signOut; +}; + +const AuthContext = createContext(undefined); + +export function AuthProvider({ children }: { children: React.ReactNode }) { + const { data: session, status, update } = useNextAuthSession(); + const customSession = session ? assertCustomSession(session) : null; + + const contextValue: AuthContextType = { + ...(() => { + switch (status) { + case "loading": { + const sessionIsHere = !!customSession; + switch (sessionIsHere) { + case false: { + return { status }; + } + case true: { + return { status: "refreshing" as const }; + } + default: { + const _: never = sessionIsHere; + throw new Error("unreachable"); + } + } + } + case "authenticated": { + if (customSession?.error === REFRESH_ACCESS_TOKEN_ERROR) { + // token had expired but next auth still returns "authenticated" so show user unauthenticated state + return { + status: "unauthenticated" as const, + }; + } else if (customSession?.accessToken) { + return { + status, + accessToken: customSession.accessToken, + accessTokenExpires: customSession.accessTokenExpires, + user: customSession.user, + }; + } else { + console.warn( + "illegal state: authenticated but have no session/or access token. ignoring", + ); + return { status: "unauthenticated" as const }; + } + } + case "unauthenticated": { + return { status: "unauthenticated" as const }; + } + default: { + const _: never = status; + throw new Error("unreachable"); + } + } + })(), + update, + signIn, + signOut, + }; + + // not useEffect, we need it ASAP + configureApiAuth( + contextValue.status === "authenticated" ? contextValue.accessToken : null, + ); + + return ( + + {children} + + ); +} + +export function useAuth() { + const context = useContext(AuthContext); + if (context === undefined) { + throw new Error("useAuth must be used within an AuthProvider"); + } + return context; +} diff --git a/www/app/lib/SessionAutoRefresh.tsx b/www/app/lib/SessionAutoRefresh.tsx index 1e230d6c..fd29367f 100644 --- a/www/app/lib/SessionAutoRefresh.tsx +++ b/www/app/lib/SessionAutoRefresh.tsx @@ -1,5 +1,5 @@ /** - * This is a custom hook that automatically refreshes the session when the access token is about to expire. + * This is a custom provider that automatically refreshes the session when the access token is about to expire. * When communicating with the reflector API, we need to ensure that the access token is always valid. * * We could have implemented that as an interceptor on the API client, but not everything is using the @@ -7,30 +7,38 @@ */ "use client"; -import { useSession } from "next-auth/react"; import { useEffect } from "react"; -import { CustomSession } from "./types"; +import { useAuth } from "./AuthProvider"; +import { REFRESH_ACCESS_TOKEN_BEFORE } from "./auth"; -export function SessionAutoRefresh({ - children, - refreshInterval = 20 /* seconds */, -}) { - const { data: session, update } = useSession(); - const customSession = session as CustomSession; - const accessTokenExpires = customSession?.accessTokenExpires; +const REFRESH_BEFORE = REFRESH_ACCESS_TOKEN_BEFORE; + +export function SessionAutoRefresh({ children }) { + const auth = useAuth(); + const accessTokenExpires = + auth.status === "authenticated" ? auth.accessTokenExpires : null; useEffect(() => { + // technical value for how often the setInterval will be polling news - not too fast (no spam in case of errors) + // and not too slow (debuggable) + const INTERVAL_REFRESH_MS = 5000; const interval = setInterval(() => { - if (accessTokenExpires) { + if (accessTokenExpires !== null) { const timeLeft = accessTokenExpires - Date.now(); - if (timeLeft < refreshInterval * 1000) { - update(); + if (timeLeft < REFRESH_BEFORE) { + auth + .update() + .then(() => {}) + .catch((e) => { + // note: 401 won't be considered error here + console.error("error refreshing auth token", e); + }); } } - }, refreshInterval * 1000); + }, INTERVAL_REFRESH_MS); return () => clearInterval(interval); - }, [accessTokenExpires, refreshInterval, update]); + }, [accessTokenExpires, auth.update]); return children; } diff --git a/www/app/lib/SessionProvider.tsx b/www/app/lib/SessionProvider.tsx deleted file mode 100644 index 9c95fbc8..00000000 --- a/www/app/lib/SessionProvider.tsx +++ /dev/null @@ -1,11 +0,0 @@ -"use client"; -import { SessionProvider as SessionProviderNextAuth } from "next-auth/react"; -import { SessionAutoRefresh } from "./SessionAutoRefresh"; - -export default function SessionProvider({ children }) { - return ( - - {children} - - ); -} diff --git a/www/app/lib/__tests__/redisTokenCache.test.ts b/www/app/lib/__tests__/redisTokenCache.test.ts new file mode 100644 index 00000000..8ca8e8a1 --- /dev/null +++ b/www/app/lib/__tests__/redisTokenCache.test.ts @@ -0,0 +1,85 @@ +import { + getTokenCache, + setTokenCache, + deleteTokenCache, + TokenCacheEntry, + KV, +} from "../redisTokenCache"; + +const mockKV: KV & { + clear: () => void; +} = (() => { + const data = new Map(); + return { + async get(key: string): Promise { + return data.get(key) || null; + }, + + async setex(key: string, seconds_: number, value: string): Promise<"OK"> { + data.set(key, value); + return "OK"; + }, + + async del(key: string): Promise { + const existed = data.has(key); + data.delete(key); + return existed ? 1 : 0; + }, + + clear() { + data.clear(); + }, + }; +})(); + +describe("Redis Token Cache", () => { + beforeEach(() => { + mockKV.clear(); + }); + + test("basic write/read - value written equals value read", async () => { + const testKey = "token:test-user-123"; + const testValue: TokenCacheEntry = { + token: { + sub: "test-user-123", + name: "Test User", + email: "test@example.com", + accessToken: "access-token-123", + accessTokenExpires: Date.now() + 3600000, // 1 hour from now + refreshToken: "refresh-token-456", + }, + timestamp: Date.now(), + }; + + await setTokenCache(mockKV, testKey, testValue); + const retrievedValue = await getTokenCache(mockKV, testKey); + + expect(retrievedValue).not.toBeNull(); + expect(retrievedValue).toEqual(testValue); + expect(retrievedValue?.token.accessToken).toBe(testValue.token.accessToken); + expect(retrievedValue?.token.sub).toBe(testValue.token.sub); + expect(retrievedValue?.timestamp).toBe(testValue.timestamp); + }); + + test("get returns null for non-existent key", async () => { + const result = await getTokenCache(mockKV, "non-existent-key"); + expect(result).toBeNull(); + }); + + test("delete removes token from cache", async () => { + const testKey = "token:delete-test"; + const testValue: TokenCacheEntry = { + token: { + accessToken: "test-token", + accessTokenExpires: Date.now() + 3600000, + }, + timestamp: Date.now(), + }; + + await setTokenCache(mockKV, testKey, testValue); + await deleteTokenCache(mockKV, testKey); + + const result = await getTokenCache(mockKV, testKey); + expect(result).toBeNull(); + }); +}); diff --git a/www/app/lib/apiClient.tsx b/www/app/lib/apiClient.tsx new file mode 100644 index 00000000..cd97e151 --- /dev/null +++ b/www/app/lib/apiClient.tsx @@ -0,0 +1,50 @@ +"use client"; + +import createClient from "openapi-fetch"; +import type { paths } from "../reflector-api"; +import { + queryOptions, + useMutation, + useQuery, + useSuspenseQuery, +} from "@tanstack/react-query"; +import createFetchClient from "openapi-react-query"; +import { assertExistsAndNonEmptyString } from "./utils"; +import { isBuildPhase } from "./next"; + +const API_URL = !isBuildPhase + ? assertExistsAndNonEmptyString(process.env.NEXT_PUBLIC_API_URL) + : "http://localhost"; + +// Create the base openapi-fetch client with a default URL +// The actual URL will be set via middleware in AuthProvider +export const client = createClient({ + baseUrl: API_URL, +}); + +export const $api = createFetchClient(client); + +let currentAuthToken: string | null | undefined = null; + +client.use({ + onRequest({ request }) { + if (currentAuthToken) { + request.headers.set("Authorization", `Bearer ${currentAuthToken}`); + } + // XXX Only set Content-Type if not already set (FormData will set its own boundary) + // This is a work around for uploading file, we're passing a formdata + // but the content type was still application/json + if ( + !request.headers.has("Content-Type") && + !(request.body instanceof FormData) + ) { + request.headers.set("Content-Type", "application/json"); + } + return request; + }, +}); + +// the function contract: lightweight, idempotent +export const configureApiAuth = (token: string | null | undefined) => { + currentAuthToken = token; +}; diff --git a/www/app/lib/apiHooks.ts b/www/app/lib/apiHooks.ts new file mode 100644 index 00000000..94d84c9b --- /dev/null +++ b/www/app/lib/apiHooks.ts @@ -0,0 +1,618 @@ +"use client"; + +import { $api } from "./apiClient"; +import { useError } from "../(errors)/errorContext"; +import { useQueryClient } from "@tanstack/react-query"; +import type { components } from "../reflector-api"; +import { useAuth } from "./AuthProvider"; + +/* + * XXX error types returned from the hooks are not always correct; declared types are ValidationError but real type could be string or any other + * this is either a limitation or incorrect usage of Python json schema generator + * or, limitation or incorrect usage of .d type generator from json schema + * */ + +const useAuthReady = () => { + const auth = useAuth(); + + return { + isAuthenticated: auth.status === "authenticated", + isLoading: auth.status === "loading", + }; +}; + +export function useRoomsList(page: number = 1) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/rooms", + { + params: { + query: { page }, + }, + }, + { + enabled: isAuthenticated, + }, + ); +} + +type SourceKind = components["schemas"]["SourceKind"]; + +export function useTranscriptsSearch( + q: string = "", + options: { + limit?: number; + offset?: number; + room_id?: string; + source_kind?: SourceKind; + } = {}, +) { + return $api.useQuery( + "get", + "/v1/transcripts/search", + { + params: { + query: { + q, + limit: options.limit, + offset: options.offset, + room_id: options.room_id, + source_kind: options.source_kind, + }, + }, + }, + { + enabled: true, + }, + ); +} + +export function useTranscriptDelete() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation("delete", "/v1/transcripts/{transcript_id}", { + onSuccess: () => { + queryClient.invalidateQueries({ + queryKey: ["get", "/v1/transcripts/search"], + }); + }, + onError: (error) => { + setError(error as Error, "There was an error deleting the transcript"); + }, + }); +} + +export function useTranscriptProcess() { + const { setError } = useError(); + + return $api.useMutation("post", "/v1/transcripts/{transcript_id}/process", { + onError: (error) => { + setError(error as Error, "There was an error processing the transcript"); + }, + }); +} + +export function useTranscriptGet(transcriptId: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/transcripts/{transcript_id}", + { + params: { + path: { + transcript_id: transcriptId || "", + }, + }, + }, + { + enabled: !!transcriptId && isAuthenticated, + }, + ); +} + +export function useRoomGet(roomId: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/rooms/{room_id}", + { + params: { + path: { room_id: roomId || "" }, + }, + }, + { + enabled: !!roomId && isAuthenticated, + }, + ); +} + +export function useRoomTestWebhook() { + const { setError } = useError(); + + return $api.useMutation("post", "/v1/rooms/{room_id}/webhook/test", { + onError: (error) => { + setError(error as Error, "There was an error testing the webhook"); + }, + }); +} + +export function useRoomCreate() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation("post", "/v1/rooms", { + onSuccess: () => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions("get", "/v1/rooms").queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error creating the room"); + }, + }); +} + +export function useRoomUpdate() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation("patch", "/v1/rooms/{room_id}", { + onSuccess: async (room) => { + await Promise.all([ + queryClient.invalidateQueries({ + queryKey: $api.queryOptions("get", "/v1/rooms").queryKey, + }), + queryClient.invalidateQueries({ + queryKey: $api.queryOptions("get", "/v1/rooms/{room_id}", { + params: { + path: { + room_id: room.id, + }, + }, + }).queryKey, + }), + ]); + }, + onError: (error) => { + setError(error as Error, "There was an error updating the room"); + }, + }); +} + +export function useRoomDelete() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation("delete", "/v1/rooms/{room_id}", { + onSuccess: () => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions("get", "/v1/rooms").queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error deleting the room"); + }, + }); +} + +export function useZulipStreams() { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/zulip/streams", + {}, + { + enabled: isAuthenticated, + }, + ); +} + +export function useZulipTopics(streamId: number | null) { + const { isAuthenticated } = useAuthReady(); + const enabled = !!streamId && isAuthenticated; + return $api.useQuery( + "get", + "/v1/zulip/streams/{stream_id}/topics", + { + params: { + path: { + stream_id: enabled ? streamId : 0, + }, + }, + }, + { + enabled, + }, + ); +} + +export function useTranscriptUpdate() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation("patch", "/v1/transcripts/{transcript_id}", { + onSuccess: (data, variables) => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions("get", "/v1/transcripts/{transcript_id}", { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, + }).queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error updating the transcript"); + }, + }); +} + +export function useTranscriptPostToZulip() { + const { setError } = useError(); + + // @ts-ignore - Zulip endpoint not in OpenAPI spec + return $api.useMutation("post", "/v1/transcripts/{transcript_id}/zulip", { + onError: (error) => { + setError(error as Error, "There was an error posting to Zulip"); + }, + }); +} + +export function useTranscriptUploadAudio() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation( + "post", + "/v1/transcripts/{transcript_id}/record/upload", + { + onSuccess: (data, variables) => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, + }, + ).queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error uploading the audio file"); + }, + }, + ); +} + +export function useTranscriptWaveform(transcriptId: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/transcripts/{transcript_id}/audio/waveform", + { + params: { + path: { transcript_id: transcriptId || "" }, + }, + }, + { + enabled: !!transcriptId && isAuthenticated, + }, + ); +} + +export function useTranscriptMP3(transcriptId: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/transcripts/{transcript_id}/audio/mp3", + { + params: { + path: { transcript_id: transcriptId || "" }, + }, + }, + { + enabled: !!transcriptId && isAuthenticated, + }, + ); +} + +export function useTranscriptTopics(transcriptId: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/transcripts/{transcript_id}/topics", + { + params: { + path: { transcript_id: transcriptId || "" }, + }, + }, + { + enabled: !!transcriptId && isAuthenticated, + }, + ); +} + +export function useTranscriptTopicsWithWords(transcriptId: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/transcripts/{transcript_id}/topics/with-words", + { + params: { + path: { transcript_id: transcriptId || "" }, + }, + }, + { + enabled: !!transcriptId && isAuthenticated, + }, + ); +} + +export function useTranscriptTopicsWithWordsPerSpeaker( + transcriptId: string | null, + topicId: string | null, +) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/transcripts/{transcript_id}/topics/{topic_id}/words-per-speaker", + { + params: { + path: { + transcript_id: transcriptId || "", + topic_id: topicId || "", + }, + }, + }, + { + enabled: !!transcriptId && !!topicId && isAuthenticated, + }, + ); +} + +export function useTranscriptParticipants(transcriptId: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/transcripts/{transcript_id}/participants", + { + params: { + path: { transcript_id: transcriptId || "" }, + }, + }, + { + enabled: !!transcriptId && isAuthenticated, + }, + ); +} + +export function useTranscriptParticipantUpdate() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation( + "patch", + "/v1/transcripts/{transcript_id}/participants/{participant_id}", + { + onSuccess: (data, variables) => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}/participants", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, + }, + ).queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error updating the participant"); + }, + }, + ); +} + +export function useTranscriptParticipantCreate() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation( + "post", + "/v1/transcripts/{transcript_id}/participants", + { + onSuccess: (data, variables) => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}/participants", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, + }, + ).queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error creating the participant"); + }, + }, + ); +} + +export function useTranscriptParticipantDelete() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation( + "delete", + "/v1/transcripts/{transcript_id}/participants/{participant_id}", + { + onSuccess: (data, variables) => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}/participants", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, + }, + ).queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error deleting the participant"); + }, + }, + ); +} + +export function useTranscriptSpeakerAssign() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation( + "patch", + "/v1/transcripts/{transcript_id}/speaker/assign", + { + onSuccess: (data, variables) => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, + }, + ).queryKey, + }); + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}/participants", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, + }, + ).queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error assigning the speaker"); + }, + }, + ); +} + +export function useTranscriptSpeakerMerge() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation( + "patch", + "/v1/transcripts/{transcript_id}/speaker/merge", + { + onSuccess: (data, variables) => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, + }, + ).queryKey, + }); + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}/participants", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, + }, + ).queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error merging speakers"); + }, + }, + ); +} + +export function useMeetingAudioConsent() { + const { setError } = useError(); + + return $api.useMutation("post", "/v1/meetings/{meeting_id}/consent", { + onError: (error) => { + setError(error as Error, "There was an error recording consent"); + }, + }); +} + +export function useTranscriptWebRTC() { + const { setError } = useError(); + + return $api.useMutation( + "post", + "/v1/transcripts/{transcript_id}/record/webrtc", + { + onError: (error) => { + setError(error as Error, "There was an error with WebRTC connection"); + }, + }, + ); +} + +export function useTranscriptCreate() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation("post", "/v1/transcripts", { + onSuccess: () => { + queryClient.invalidateQueries({ + queryKey: ["get", "/v1/transcripts/search"], + }); + }, + onError: (error) => { + setError(error as Error, "There was an error creating the transcript"); + }, + }); +} + +export function useRoomsCreateMeeting() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation("post", "/v1/rooms/{room_name}/meeting", { + onSuccess: () => { + queryClient.invalidateQueries({ + queryKey: $api.queryOptions("get", "/v1/rooms").queryKey, + }); + }, + onError: (error) => { + setError(error as Error, "There was an error creating the meeting"); + }, + }); +} diff --git a/www/app/lib/auth.ts b/www/app/lib/auth.ts index 9169c694..f6e60513 100644 --- a/www/app/lib/auth.ts +++ b/www/app/lib/auth.ts @@ -1,157 +1,13 @@ -// import { kv } from "@vercel/kv"; -import Redlock, { ResourceLockedError } from "redlock"; -import { AuthOptions } from "next-auth"; -import AuthentikProvider from "next-auth/providers/authentik"; -import { JWT } from "next-auth/jwt"; -import { JWTWithAccessToken, CustomSession } from "./types"; -import Redis from "ioredis"; +export const REFRESH_ACCESS_TOKEN_ERROR = "RefreshAccessTokenError" as const; +// 4 min is 1 min less than default authentic value. here we assume that authentic won't be set to access tokens < 4 min +export const REFRESH_ACCESS_TOKEN_BEFORE = 4 * 60 * 1000; -const PRETIMEOUT = 60; // seconds before token expires to refresh it -const DEFAULT_REDIS_KEY_TIMEOUT = 60 * 60 * 24 * 30; // 30 days (refresh token expires in 30 days) -const kv = new Redis(process.env.KV_URL || "", { - tls: {}, -}); -const redlock = new Redlock([kv], {}); +export const LOGIN_REQUIRED_PAGES = [ + "/transcripts/[!new]", + "/browse(.*)", + "/rooms(.*)", +]; -redlock.on("error", (error) => { - if (error instanceof ResourceLockedError) { - return; - } - - // Log all other errors. - console.error(error); -}); - -export const authOptions: AuthOptions = { - providers: [ - AuthentikProvider({ - clientId: process.env.AUTHENTIK_CLIENT_ID as string, - clientSecret: process.env.AUTHENTIK_CLIENT_SECRET as string, - issuer: process.env.AUTHENTIK_ISSUER, - authorization: { - params: { - scope: "openid email profile offline_access", - }, - }, - }), - ], - session: { - strategy: "jwt", - }, - callbacks: { - async jwt({ token, account, user }) { - const extendedToken = token as JWTWithAccessToken; - if (account && user) { - // called only on first login - // XXX account.expires_in used in example is not defined for authentik backend, but expires_at is - const expiresAt = (account.expires_at as number) - PRETIMEOUT; - const jwtToken = { - ...extendedToken, - accessToken: account.access_token, - accessTokenExpires: expiresAt * 1000, - refreshToken: account.refresh_token, - }; - kv.set( - `token:${jwtToken.sub}`, - JSON.stringify(jwtToken), - "EX", - DEFAULT_REDIS_KEY_TIMEOUT, - ); - return jwtToken; - } - - if (Date.now() < extendedToken.accessTokenExpires) { - return token; - } - - // access token has expired, try to update it - return await redisLockedrefreshAccessToken(token); - }, - async session({ session, token }) { - const extendedToken = token as JWTWithAccessToken; - const customSession = session as CustomSession; - customSession.accessToken = extendedToken.accessToken; - customSession.accessTokenExpires = extendedToken.accessTokenExpires; - customSession.error = extendedToken.error; - customSession.user = { - id: extendedToken.sub, - name: extendedToken.name, - email: extendedToken.email, - }; - return customSession; - }, - }, -}; - -async function redisLockedrefreshAccessToken(token: JWT) { - return await redlock.using( - [token.sub as string, "jwt-refresh"], - 5000, - async () => { - const redisToken = await kv.get(`token:${token.sub}`); - const currentToken = JSON.parse( - redisToken as string, - ) as JWTWithAccessToken; - - // if there is multiple requests for the same token, it may already have been refreshed - if (Date.now() < currentToken.accessTokenExpires) { - return currentToken; - } - - // now really do the request - const newToken = await refreshAccessToken(currentToken); - await kv.set( - `token:${currentToken.sub}`, - JSON.stringify(newToken), - "EX", - DEFAULT_REDIS_KEY_TIMEOUT, - ); - return newToken; - }, - ); -} - -async function refreshAccessToken(token: JWT): Promise { - try { - const url = `${process.env.AUTHENTIK_REFRESH_TOKEN_URL}`; - - const options = { - headers: { - "Content-Type": "application/x-www-form-urlencoded", - }, - body: new URLSearchParams({ - client_id: process.env.AUTHENTIK_CLIENT_ID as string, - client_secret: process.env.AUTHENTIK_CLIENT_SECRET as string, - grant_type: "refresh_token", - refresh_token: token.refreshToken as string, - }).toString(), - method: "POST", - }; - - const response = await fetch(url, options); - if (!response.ok) { - console.error( - new Date().toISOString(), - "Failed to refresh access token. Response status:", - response.status, - ); - const responseBody = await response.text(); - console.error(new Date().toISOString(), "Response body:", responseBody); - throw new Error(`Failed to refresh access token: ${response.statusText}`); - } - const refreshedTokens = await response.json(); - return { - ...token, - accessToken: refreshedTokens.access_token, - accessTokenExpires: - Date.now() + (refreshedTokens.expires_in - PRETIMEOUT) * 1000, - refreshToken: refreshedTokens.refresh_token, - }; - } catch (error) { - console.error("Error refreshing access token", error); - return { - ...token, - error: "RefreshAccessTokenError", - } as JWTWithAccessToken; - } -} +export const PROTECTED_PAGES = new RegExp( + LOGIN_REQUIRED_PAGES.map((page) => `^${page}$`).join("|"), +); diff --git a/www/app/lib/authBackend.ts b/www/app/lib/authBackend.ts new file mode 100644 index 00000000..af93b274 --- /dev/null +++ b/www/app/lib/authBackend.ts @@ -0,0 +1,178 @@ +import { AuthOptions } from "next-auth"; +import AuthentikProvider from "next-auth/providers/authentik"; +import type { JWT } from "next-auth/jwt"; +import { JWTWithAccessToken, CustomSession } from "./types"; +import { assertExists, assertExistsAndNonEmptyString } from "./utils"; +import { + REFRESH_ACCESS_TOKEN_BEFORE, + REFRESH_ACCESS_TOKEN_ERROR, +} from "./auth"; +import { + getTokenCache, + setTokenCache, + deleteTokenCache, +} from "./redisTokenCache"; +import { tokenCacheRedis } from "./redisClient"; +import { isBuildPhase } from "./next"; + +// REFRESH_ACCESS_TOKEN_BEFORE because refresh is based on access token expiration (imagine we cache it 30 days) +const TOKEN_CACHE_TTL = REFRESH_ACCESS_TOKEN_BEFORE; + +const refreshLocks = new Map>(); + +const CLIENT_ID = !isBuildPhase + ? assertExistsAndNonEmptyString(process.env.AUTHENTIK_CLIENT_ID) + : "noop"; +const CLIENT_SECRET = !isBuildPhase + ? assertExistsAndNonEmptyString(process.env.AUTHENTIK_CLIENT_SECRET) + : "noop"; + +export const authOptions: AuthOptions = { + providers: [ + AuthentikProvider({ + clientId: CLIENT_ID, + clientSecret: CLIENT_SECRET, + issuer: process.env.AUTHENTIK_ISSUER, + authorization: { + params: { + scope: "openid email profile offline_access", + }, + }, + }), + ], + session: { + strategy: "jwt", + }, + callbacks: { + async jwt({ token, account, user }) { + const KEY = `token:${token.sub}`; + + if (account && user) { + // called only on first login + // XXX account.expires_in used in example is not defined for authentik backend, but expires_at is + const expiresAtS = assertExists(account.expires_at); + const expiresAtMs = expiresAtS * 1000; + if (!account.access_token) { + await deleteTokenCache(tokenCacheRedis, KEY); + } else { + const jwtToken: JWTWithAccessToken = { + ...token, + accessToken: account.access_token, + accessTokenExpires: expiresAtMs, + refreshToken: account.refresh_token, + }; + await setTokenCache(tokenCacheRedis, KEY, { + token: jwtToken, + timestamp: Date.now(), + }); + return jwtToken; + } + } + + const currentToken = await getTokenCache(tokenCacheRedis, KEY); + if (currentToken && Date.now() < currentToken.token.accessTokenExpires) { + return currentToken.token; + } + + // access token has expired, try to update it + return await lockedRefreshAccessToken(token); + }, + async session({ session, token }) { + const extendedToken = token as JWTWithAccessToken; + return { + ...session, + accessToken: extendedToken.accessToken, + accessTokenExpires: extendedToken.accessTokenExpires, + error: extendedToken.error, + user: { + id: assertExists(extendedToken.sub), + name: extendedToken.name, + email: extendedToken.email, + }, + } satisfies CustomSession; + }, + }, +}; + +async function lockedRefreshAccessToken( + token: JWT, +): Promise { + const lockKey = `${token.sub}-refresh`; + + const existingRefresh = refreshLocks.get(lockKey); + if (existingRefresh) { + return await existingRefresh; + } + + const refreshPromise = (async () => { + try { + const cached = await getTokenCache(tokenCacheRedis, `token:${token.sub}`); + if (cached) { + if (Date.now() - cached.timestamp > TOKEN_CACHE_TTL) { + await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`); + } else if (Date.now() < cached.token.accessTokenExpires) { + return cached.token; + } + } + + const currentToken = cached?.token || (token as JWTWithAccessToken); + const newToken = await refreshAccessToken(currentToken); + + await setTokenCache(tokenCacheRedis, `token:${token.sub}`, { + token: newToken, + timestamp: Date.now(), + }); + + return newToken; + } finally { + setTimeout(() => refreshLocks.delete(lockKey), 100); + } + })(); + + refreshLocks.set(lockKey, refreshPromise); + return refreshPromise; +} + +async function refreshAccessToken(token: JWT): Promise { + try { + const url = `${process.env.AUTHENTIK_REFRESH_TOKEN_URL}`; + + const options = { + headers: { + "Content-Type": "application/x-www-form-urlencoded", + }, + body: new URLSearchParams({ + client_id: process.env.AUTHENTIK_CLIENT_ID as string, + client_secret: process.env.AUTHENTIK_CLIENT_SECRET as string, + grant_type: "refresh_token", + refresh_token: token.refreshToken as string, + }).toString(), + method: "POST", + }; + + const response = await fetch(url, options); + if (!response.ok) { + console.error( + new Date().toISOString(), + "Failed to refresh access token. Response status:", + response.status, + ); + const responseBody = await response.text(); + console.error(new Date().toISOString(), "Response body:", responseBody); + throw new Error(`Failed to refresh access token: ${response.statusText}`); + } + const refreshedTokens = await response.json(); + return { + ...token, + accessToken: refreshedTokens.access_token, + accessTokenExpires: Date.now() + refreshedTokens.expires_in * 1000, + refreshToken: refreshedTokens.refresh_token, + }; + } catch (error) { + console.error("Error refreshing access token", error); + return { + ...token, + error: REFRESH_ACCESS_TOKEN_ERROR, + } as JWTWithAccessToken; + } +} diff --git a/www/app/lib/edgeConfig.ts b/www/app/lib/edgeConfig.ts index 2e31e146..f234a2cf 100644 --- a/www/app/lib/edgeConfig.ts +++ b/www/app/lib/edgeConfig.ts @@ -1,5 +1,5 @@ import { get } from "@vercel/edge-config"; -import { isDevelopment } from "./utils"; +import { isBuildPhase } from "./next"; type EdgeConfig = { [domainWithDash: string]: { @@ -29,12 +29,18 @@ export function edgeDomainToKey(domain: string) { // get edge config server-side (prefer DomainContext when available), domain is the hostname export async function getConfig() { - const domain = new URL(process.env.NEXT_PUBLIC_SITE_URL!).hostname; - if (process.env.NEXT_PUBLIC_ENV === "development") { - return require("../../config").localConfig; + try { + return require("../../config").localConfig; + } catch (e) { + // next build() WILL try to execute the require above even if conditionally protected + // but thank god it at least runs catch{} block properly + if (!isBuildPhase) throw new Error(e); + return require("../../config-template").localConfig; + } } + const domain = new URL(process.env.NEXT_PUBLIC_SITE_URL!).hostname; let config = await get(edgeDomainToKey(domain)); if (typeof config !== "object") { diff --git a/www/app/lib/next.ts b/www/app/lib/next.ts new file mode 100644 index 00000000..91d88bd2 --- /dev/null +++ b/www/app/lib/next.ts @@ -0,0 +1,2 @@ +// next.js tries to run all the lib code during build phase; we don't always want it when e.g. we have connections initialized we don't want to have +export const isBuildPhase = process.env.NEXT_PHASE?.includes("build"); diff --git a/www/app/lib/queryClient.tsx b/www/app/lib/queryClient.tsx new file mode 100644 index 00000000..bd5946e0 --- /dev/null +++ b/www/app/lib/queryClient.tsx @@ -0,0 +1,17 @@ +"use client"; + +import { QueryClient } from "@tanstack/react-query"; + +export const queryClient = new QueryClient({ + defaultOptions: { + queries: { + staleTime: 60 * 1000, // 1 minute + gcTime: 5 * 60 * 1000, // 5 minutes (formerly cacheTime) + retry: 1, + refetchOnWindowFocus: false, + }, + mutations: { + retry: 0, + }, + }, +}); diff --git a/www/app/lib/redisClient.ts b/www/app/lib/redisClient.ts new file mode 100644 index 00000000..1be36538 --- /dev/null +++ b/www/app/lib/redisClient.ts @@ -0,0 +1,46 @@ +import Redis from "ioredis"; +import { isBuildPhase } from "./next"; + +export type RedisClient = Pick; + +const getRedisClient = (): RedisClient => { + const redisUrl = process.env.KV_URL; + if (!redisUrl) { + throw new Error("KV_URL environment variable is required"); + } + const redis = new Redis(redisUrl, { + maxRetriesPerRequest: 3, + lazyConnect: true, + }); + + redis.on("error", (error) => { + console.error("Redis error:", error); + }); + + // not necessary but will indicate redis config errors by failfast at startup + // happens only once; after that connection is allowed to die and the lib is assumed to be able to restore it eventually + redis.connect().catch((e) => { + console.error("Failed to connect to Redis:", e); + process.exit(1); + }); + + return redis; +}; + +// next.js buildtime usage - we want to isolate next.js "build" time concepts here +const noopClient: RedisClient = (() => { + const noopSetex: Redis["setex"] = async () => { + return "OK" as const; + }; + const noopDel: Redis["del"] = async () => { + return 0; + }; + return { + get: async () => { + return null; + }, + setex: noopSetex, + del: noopDel, + }; +})(); +export const tokenCacheRedis = isBuildPhase ? noopClient : getRedisClient(); diff --git a/www/app/lib/redisTokenCache.ts b/www/app/lib/redisTokenCache.ts new file mode 100644 index 00000000..4fa4e304 --- /dev/null +++ b/www/app/lib/redisTokenCache.ts @@ -0,0 +1,61 @@ +import { z } from "zod"; +import { REFRESH_ACCESS_TOKEN_BEFORE } from "./auth"; + +const TokenCacheEntrySchema = z.object({ + token: z.object({ + sub: z.string().optional(), + name: z.string().nullish(), + email: z.string().nullish(), + accessToken: z.string(), + accessTokenExpires: z.number(), + refreshToken: z.string().optional(), + error: z.string().optional(), + }), + timestamp: z.number(), +}); + +const TokenCacheEntryCodec = z.codec(z.string(), TokenCacheEntrySchema, { + decode: (jsonString) => { + const parsed = JSON.parse(jsonString); + return TokenCacheEntrySchema.parse(parsed); + }, + encode: (value) => JSON.stringify(value), +}); + +export type TokenCacheEntry = z.infer; + +export type KV = { + get(key: string): Promise; + setex(key: string, seconds: number, value: string): Promise<"OK">; + del(key: string): Promise; +}; + +export async function getTokenCache( + redis: KV, + key: string, +): Promise { + const data = await redis.get(key); + if (!data) return null; + + try { + return TokenCacheEntryCodec.decode(data); + } catch (error) { + console.error("Invalid token cache data:", error); + await redis.del(key); + return null; + } +} + +export async function setTokenCache( + redis: KV, + key: string, + value: TokenCacheEntry, +): Promise { + const encodedValue = TokenCacheEntryCodec.encode(value); + const ttlSeconds = Math.floor(REFRESH_ACCESS_TOKEN_BEFORE / 1000); + await redis.setex(key, ttlSeconds, encodedValue); +} + +export async function deleteTokenCache(redis: KV, key: string): Promise { + await redis.del(key); +} diff --git a/www/app/lib/types.ts b/www/app/lib/types.ts index 851ee5be..0576e186 100644 --- a/www/app/lib/types.ts +++ b/www/app/lib/types.ts @@ -1,10 +1,11 @@ -import { Session } from "next-auth"; -import { JWT } from "next-auth/jwt"; +import type { Session } from "next-auth"; +import type { JWT } from "next-auth/jwt"; +import { parseMaybeNonEmptyString } from "./utils"; export interface JWTWithAccessToken extends JWT { accessToken: string; accessTokenExpires: number; - refreshToken: string; + refreshToken?: string; error?: string; } @@ -12,9 +13,62 @@ export interface CustomSession extends Session { accessToken: string; accessTokenExpires: number; error?: string; - user: { - id?: string; - name?: string | null; - email?: string | null; + user: Session["user"] & { + id: string; }; } + +// assumption that JWT is JWTWithAccessToken - we set it in jwt callback of auth; typing isn't strong around there +// but the assumption is crucial to auth working +export const assertExtendedToken = ( + t: T, +): T & { + accessTokenExpires: number; + accessToken: string; +} => { + if ( + typeof (t as { accessTokenExpires: any }).accessTokenExpires === "number" && + !isNaN((t as { accessTokenExpires: any }).accessTokenExpires) && + typeof ( + t as { + accessToken: any; + } + ).accessToken === "string" && + parseMaybeNonEmptyString((t as { accessToken: any }).accessToken) !== null + ) { + return t as T & { + accessTokenExpires: number; + accessToken: string; + }; + } + throw new Error("Token is not extended with access token"); +}; + +export const assertExtendedTokenAndUserId = ( + t: T, +): T & { + accessTokenExpires: number; + accessToken: string; + user: U & { + id: string; + }; +} => { + const extendedToken = assertExtendedToken(t); + if (typeof (extendedToken.user as any)?.id === "string") { + return t as T & { + accessTokenExpires: number; + accessToken: string; + user: U & { + id: string; + }; + }; + } + throw new Error("Token is not extended with user id"); +}; + +// best attempt to check the session is valid +export const assertCustomSession = (s: S): CustomSession => { + const r = assertExtendedTokenAndUserId(s); + // no other checks for now + return r as CustomSession; +}; diff --git a/www/app/lib/useApi.ts b/www/app/lib/useApi.ts deleted file mode 100644 index 837ef84f..00000000 --- a/www/app/lib/useApi.ts +++ /dev/null @@ -1,37 +0,0 @@ -import { useSession, signOut } from "next-auth/react"; -import { useContext, useEffect, useState } from "react"; -import { DomainContext, featureEnabled } from "../domainContext"; -import { OpenApi, DefaultService } from "../api"; -import { CustomSession } from "./types"; -import useSessionStatus from "./useSessionStatus"; -import useSessionAccessToken from "./useSessionAccessToken"; - -export default function useApi(): DefaultService | null { - const api_url = useContext(DomainContext).api_url; - const [api, setApi] = useState(null); - const { isLoading, isAuthenticated } = useSessionStatus(); - const { accessToken, error } = useSessionAccessToken(); - - if (!api_url) throw new Error("no API URL"); - - useEffect(() => { - if (error === "RefreshAccessTokenError") { - signOut(); - } - }, [error]); - - useEffect(() => { - if (isLoading || (isAuthenticated && !accessToken)) { - return; - } - - const openApi = new OpenApi({ - BASE: api_url, - TOKEN: accessToken || undefined, - }); - - setApi(openApi); - }, [isLoading, isAuthenticated, accessToken]); - - return api?.default ?? null; -} diff --git a/www/app/lib/useLoginRequiredPages.ts b/www/app/lib/useLoginRequiredPages.ts new file mode 100644 index 00000000..37ee96b1 --- /dev/null +++ b/www/app/lib/useLoginRequiredPages.ts @@ -0,0 +1,26 @@ +// for paths that are not supposed to be public +import { PROTECTED_PAGES } from "./auth"; +import { usePathname } from "next/navigation"; +import { useAuth } from "./AuthProvider"; +import { useEffect } from "react"; + +const HOME = "/" as const; + +export const useLoginRequiredPages = () => { + const pathname = usePathname(); + const isProtected = PROTECTED_PAGES.test(pathname); + const auth = useAuth(); + const isNotLoggedIn = auth.status === "unauthenticated"; + // safety + const isLastDestination = pathname === HOME; + const shouldRedirect = isNotLoggedIn && isProtected && !isLastDestination; + useEffect(() => { + if (!shouldRedirect) return; + // on the backend, the redirect goes straight to the auth provider, but we don't have it because it's hidden inside next-auth middleware + // so we just "softly" lead the user to the main page + // warning: if HOME redirects somewhere else, we won't be protected by isLastDestination + window.location.href = HOME; + }, [shouldRedirect]); + // optionally save from blink, since window.location.href takes a bit of time + return shouldRedirect ? HOME : null; +}; diff --git a/www/app/lib/useSessionAccessToken.ts b/www/app/lib/useSessionAccessToken.ts deleted file mode 100644 index fc28c076..00000000 --- a/www/app/lib/useSessionAccessToken.ts +++ /dev/null @@ -1,42 +0,0 @@ -"use client"; - -import { useState, useEffect } from "react"; -import { useSession as useNextAuthSession } from "next-auth/react"; -import { CustomSession } from "./types"; - -export default function useSessionAccessToken() { - const { data: session } = useNextAuthSession(); - const customSession = session as CustomSession; - const naAccessToken = customSession?.accessToken; - const naAccessTokenExpires = customSession?.accessTokenExpires; - const naError = customSession?.error; - const [accessToken, setAccessToken] = useState(null); - const [accessTokenExpires, setAccessTokenExpires] = useState( - null, - ); - const [error, setError] = useState(); - - useEffect(() => { - if (naAccessToken !== accessToken) { - setAccessToken(naAccessToken); - } - }, [naAccessToken]); - - useEffect(() => { - if (naAccessTokenExpires !== accessTokenExpires) { - setAccessTokenExpires(naAccessTokenExpires); - } - }, [naAccessTokenExpires]); - - useEffect(() => { - if (naError !== error) { - setError(naError); - } - }, [naError]); - - return { - accessToken, - accessTokenExpires, - error, - }; -} diff --git a/www/app/lib/useSessionStatus.ts b/www/app/lib/useSessionStatus.ts deleted file mode 100644 index 5629c025..00000000 --- a/www/app/lib/useSessionStatus.ts +++ /dev/null @@ -1,22 +0,0 @@ -"use client"; - -import { useState, useEffect } from "react"; -import { useSession as useNextAuthSession } from "next-auth/react"; -import { Session } from "next-auth"; - -export default function useSessionStatus() { - const { status: naStatus } = useNextAuthSession(); - const [status, setStatus] = useState("loading"); - - useEffect(() => { - if (naStatus !== "loading" && naStatus !== status) { - setStatus(naStatus); - } - }, [naStatus]); - - return { - status, - isLoading: status === "loading", - isAuthenticated: status === "authenticated", - }; -} diff --git a/www/app/lib/useSessionUser.ts b/www/app/lib/useSessionUser.ts deleted file mode 100644 index 2da299f5..00000000 --- a/www/app/lib/useSessionUser.ts +++ /dev/null @@ -1,33 +0,0 @@ -"use client"; - -import { useState, useEffect } from "react"; -import { useSession as useNextAuthSession } from "next-auth/react"; -import { Session } from "next-auth"; - -// user type with id, name, email -export interface User { - id?: string | null; - name?: string | null; - email?: string | null; -} - -export default function useSessionUser() { - const { data: session } = useNextAuthSession(); - const [user, setUser] = useState(null); - - useEffect(() => { - if (!session?.user) { - setUser(null); - return; - } - if (JSON.stringify(session.user) !== JSON.stringify(user)) { - setUser(session.user); - } - }, [session]); - - return { - id: user?.id, - name: user?.name, - email: user?.email, - }; -} diff --git a/www/app/lib/useUserName.ts b/www/app/lib/useUserName.ts new file mode 100644 index 00000000..80814281 --- /dev/null +++ b/www/app/lib/useUserName.ts @@ -0,0 +1,7 @@ +import { useAuth } from "./AuthProvider"; + +export const useUserName = (): string | null | undefined => { + const auth = useAuth(); + if (auth.status !== "authenticated") return undefined; + return auth.user?.name || null; +}; diff --git a/www/app/lib/utils.ts b/www/app/lib/utils.ts index 80d0d91b..122ab234 100644 --- a/www/app/lib/utils.ts +++ b/www/app/lib/utils.ts @@ -137,9 +137,28 @@ export function extractDomain(url) { } } -export function assertExists(value: T | null | undefined, err?: string): T { +export type NonEmptyString = string & { __brand: "NonEmptyString" }; +export const parseMaybeNonEmptyString = ( + s: string, + trim = true, +): NonEmptyString | null => { + s = trim ? s.trim() : s; + return s.length > 0 ? (s as NonEmptyString) : null; +}; +export const parseNonEmptyString = (s: string, trim = true): NonEmptyString => + assertExists(parseMaybeNonEmptyString(s, trim), "Expected non-empty string"); + +export const assertExists = ( + value: T | null | undefined, + err?: string, +): T => { if (value === null || value === undefined) { throw new Error(`Assertion failed: ${err ?? "value is null or undefined"}`); } return value; -} +}; + +export const assertExistsAndNonEmptyString = ( + value: string | null | undefined, +): NonEmptyString => + parseNonEmptyString(assertExists(value, "Expected non-empty string")); diff --git a/www/app/providers.tsx b/www/app/providers.tsx index f0f1ea52..2e3b78eb 100644 --- a/www/app/providers.tsx +++ b/www/app/providers.tsx @@ -6,16 +6,26 @@ import system from "./styles/theme"; import { WherebyProvider } from "@whereby.com/browser-sdk/react"; import { Toaster } from "./components/ui/toaster"; import { NuqsAdapter } from "nuqs/adapters/next/app"; +import { QueryClientProvider } from "@tanstack/react-query"; +import { queryClient } from "./lib/queryClient"; +import { AuthProvider } from "./lib/AuthProvider"; +import { SessionProvider as SessionProviderNextAuth } from "next-auth/react"; export function Providers({ children }: { children: React.ReactNode }) { return ( - - - {children} - - - + + + + + + {children} + + + + + + ); } diff --git a/www/app/reflector-api.d.ts b/www/app/reflector-api.d.ts new file mode 100644 index 00000000..8a2cadb0 --- /dev/null +++ b/www/app/reflector-api.d.ts @@ -0,0 +1,2330 @@ +/** + * This file was auto-generated by openapi-typescript. + * Do not make direct changes to the file. + */ + +export interface paths { + "/metrics": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Metrics + * @description Endpoint that serves Prometheus metrics. + */ + get: operations["metrics"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/meetings/{meeting_id}/consent": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Meeting Audio Consent */ + post: operations["v1_meeting_audio_consent"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/rooms": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Rooms List */ + get: operations["v1_rooms_list"]; + put?: never; + /** Rooms Create */ + post: operations["v1_rooms_create"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/rooms/{room_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Rooms Get */ + get: operations["v1_rooms_get"]; + put?: never; + post?: never; + /** Rooms Delete */ + delete: operations["v1_rooms_delete"]; + options?: never; + head?: never; + /** Rooms Update */ + patch: operations["v1_rooms_update"]; + trace?: never; + }; + "/v1/rooms/{room_name}/meeting": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Rooms Create Meeting */ + post: operations["v1_rooms_create_meeting"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/rooms/{room_id}/webhook/test": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Rooms Test Webhook + * @description Test webhook configuration by sending a sample payload. + */ + post: operations["v1_rooms_test_webhook"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcripts List */ + get: operations["v1_transcripts_list"]; + put?: never; + /** Transcripts Create */ + post: operations["v1_transcripts_create"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/search": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Transcripts Search + * @description Full-text search across transcript titles and content. + */ + get: operations["v1_transcripts_search"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcript Get */ + get: operations["v1_transcript_get"]; + put?: never; + post?: never; + /** Transcript Delete */ + delete: operations["v1_transcript_delete"]; + options?: never; + head?: never; + /** Transcript Update */ + patch: operations["v1_transcript_update"]; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/topics": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcript Get Topics */ + get: operations["v1_transcript_get_topics"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/topics/with-words": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcript Get Topics With Words */ + get: operations["v1_transcript_get_topics_with_words"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/topics/{topic_id}/words-per-speaker": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcript Get Topics With Words Per Speaker */ + get: operations["v1_transcript_get_topics_with_words_per_speaker"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/zulip": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Transcript Post To Zulip */ + post: operations["v1_transcript_post_to_zulip"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/audio/mp3": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcript Get Audio Mp3 */ + get: operations["v1_transcript_get_audio_mp3"]; + put?: never; + post?: never; + delete?: never; + options?: never; + /** Transcript Get Audio Mp3 */ + head: operations["v1_transcript_head_audio_mp3"]; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/audio/waveform": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcript Get Audio Waveform */ + get: operations["v1_transcript_get_audio_waveform"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/participants": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcript Get Participants */ + get: operations["v1_transcript_get_participants"]; + put?: never; + /** Transcript Add Participant */ + post: operations["v1_transcript_add_participant"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/participants/{participant_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcript Get Participant */ + get: operations["v1_transcript_get_participant"]; + put?: never; + post?: never; + /** Transcript Delete Participant */ + delete: operations["v1_transcript_delete_participant"]; + options?: never; + head?: never; + /** Transcript Update Participant */ + patch: operations["v1_transcript_update_participant"]; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/speaker/assign": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + /** Transcript Assign Speaker */ + patch: operations["v1_transcript_assign_speaker"]; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/speaker/merge": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + /** Transcript Merge Speaker */ + patch: operations["v1_transcript_merge_speaker"]; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/record/upload": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Transcript Record Upload */ + post: operations["v1_transcript_record_upload"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/events": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Transcript Get Websocket Events */ + get: operations["v1_transcript_get_websocket_events"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/record/webrtc": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Transcript Record Webrtc */ + post: operations["v1_transcript_record_webrtc"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/transcripts/{transcript_id}/process": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Transcript Process */ + post: operations["v1_transcript_process"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/me": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** User Me */ + get: operations["v1_user_me"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/zulip/streams": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Zulip Get Streams + * @description Get all Zulip streams. + */ + get: operations["v1_zulip_get_streams"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/zulip/streams/{stream_id}/topics": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Zulip Get Topics + * @description Get all topics for a specific Zulip stream. + */ + get: operations["v1_zulip_get_topics"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/whereby": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Whereby Webhook */ + post: operations["v1_whereby_webhook"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; +} +export type webhooks = Record; +export interface components { + schemas: { + /** AudioWaveform */ + AudioWaveform: { + /** Data */ + data: number[]; + }; + /** Body_transcript_record_upload_v1_transcripts__transcript_id__record_upload_post */ + Body_transcript_record_upload_v1_transcripts__transcript_id__record_upload_post: { + /** + * Chunk + * Format: binary + */ + chunk: string; + }; + /** CreateParticipant */ + CreateParticipant: { + /** Speaker */ + speaker?: number | null; + /** Name */ + name: string; + }; + /** CreateRoom */ + CreateRoom: { + /** Name */ + name: string; + /** Zulip Auto Post */ + zulip_auto_post: boolean; + /** Zulip Stream */ + zulip_stream: string; + /** Zulip Topic */ + zulip_topic: string; + /** Is Locked */ + is_locked: boolean; + /** Room Mode */ + room_mode: string; + /** Recording Type */ + recording_type: string; + /** Recording Trigger */ + recording_trigger: string; + /** Is Shared */ + is_shared: boolean; + /** Webhook Url */ + webhook_url: string; + /** Webhook Secret */ + webhook_secret: string; + }; + /** CreateTranscript */ + CreateTranscript: { + /** Name */ + name: string; + /** + * Source Language + * @default en + */ + source_language: string; + /** + * Target Language + * @default en + */ + target_language: string; + source_kind?: components["schemas"]["SourceKind"] | null; + }; + /** DeletionStatus */ + DeletionStatus: { + /** Status */ + status: string; + }; + /** GetTranscript */ + GetTranscript: { + /** Id */ + id: string; + /** User Id */ + user_id: string | null; + /** Name */ + name: string; + /** + * Status + * @enum {string} + */ + status: + | "idle" + | "uploaded" + | "recording" + | "processing" + | "error" + | "ended"; + /** Locked */ + locked: boolean; + /** Duration */ + duration: number; + /** Title */ + title: string | null; + /** Short Summary */ + short_summary: string | null; + /** Long Summary */ + long_summary: string | null; + /** Created At */ + created_at: string; + /** + * Share Mode + * @default private + */ + share_mode: string; + /** Source Language */ + source_language: string | null; + /** Target Language */ + target_language: string | null; + /** Reviewed */ + reviewed: boolean; + /** Meeting Id */ + meeting_id: string | null; + source_kind: components["schemas"]["SourceKind"]; + /** Room Id */ + room_id?: string | null; + /** Room Name */ + room_name?: string | null; + /** Audio Deleted */ + audio_deleted?: boolean | null; + /** Participants */ + participants: components["schemas"]["TranscriptParticipant"][] | null; + }; + /** GetTranscriptMinimal */ + GetTranscriptMinimal: { + /** Id */ + id: string; + /** User Id */ + user_id: string | null; + /** Name */ + name: string; + /** + * Status + * @enum {string} + */ + status: + | "idle" + | "uploaded" + | "recording" + | "processing" + | "error" + | "ended"; + /** Locked */ + locked: boolean; + /** Duration */ + duration: number; + /** Title */ + title: string | null; + /** Short Summary */ + short_summary: string | null; + /** Long Summary */ + long_summary: string | null; + /** Created At */ + created_at: string; + /** + * Share Mode + * @default private + */ + share_mode: string; + /** Source Language */ + source_language: string | null; + /** Target Language */ + target_language: string | null; + /** Reviewed */ + reviewed: boolean; + /** Meeting Id */ + meeting_id: string | null; + source_kind: components["schemas"]["SourceKind"]; + /** Room Id */ + room_id?: string | null; + /** Room Name */ + room_name?: string | null; + /** Audio Deleted */ + audio_deleted?: boolean | null; + }; + /** GetTranscriptSegmentTopic */ + GetTranscriptSegmentTopic: { + /** Text */ + text: string; + /** Start */ + start: number; + /** Speaker */ + speaker: number; + }; + /** GetTranscriptTopic */ + GetTranscriptTopic: { + /** Id */ + id: string; + /** Title */ + title: string; + /** Summary */ + summary: string; + /** Timestamp */ + timestamp: number; + /** Duration */ + duration: number | null; + /** Transcript */ + transcript: string; + /** + * Segments + * @default [] + */ + segments: components["schemas"]["GetTranscriptSegmentTopic"][]; + }; + /** GetTranscriptTopicWithWords */ + GetTranscriptTopicWithWords: { + /** Id */ + id: string; + /** Title */ + title: string; + /** Summary */ + summary: string; + /** Timestamp */ + timestamp: number; + /** Duration */ + duration: number | null; + /** Transcript */ + transcript: string; + /** + * Segments + * @default [] + */ + segments: components["schemas"]["GetTranscriptSegmentTopic"][]; + /** + * Words + * @default [] + */ + words: components["schemas"]["Word"][]; + }; + /** GetTranscriptTopicWithWordsPerSpeaker */ + GetTranscriptTopicWithWordsPerSpeaker: { + /** Id */ + id: string; + /** Title */ + title: string; + /** Summary */ + summary: string; + /** Timestamp */ + timestamp: number; + /** Duration */ + duration: number | null; + /** Transcript */ + transcript: string; + /** + * Segments + * @default [] + */ + segments: components["schemas"]["GetTranscriptSegmentTopic"][]; + /** + * Words Per Speaker + * @default [] + */ + words_per_speaker: components["schemas"]["SpeakerWords"][]; + }; + /** HTTPValidationError */ + HTTPValidationError: { + /** Detail */ + detail?: components["schemas"]["ValidationError"][]; + }; + /** Meeting */ + Meeting: { + /** Id */ + id: string; + /** Room Name */ + room_name: string; + /** Room Url */ + room_url: string; + /** Host Room Url */ + host_room_url: string; + /** + * Start Date + * Format: date-time + */ + start_date: string; + /** + * End Date + * Format: date-time + */ + end_date: string; + /** + * Recording Type + * @default cloud + * @enum {string} + */ + recording_type: "none" | "local" | "cloud"; + }; + /** MeetingConsentRequest */ + MeetingConsentRequest: { + /** Consent Given */ + consent_given: boolean; + }; + /** Page[GetTranscriptMinimal] */ + Page_GetTranscriptMinimal_: { + /** Items */ + items: components["schemas"]["GetTranscriptMinimal"][]; + /** Total */ + total?: number | null; + /** Page */ + page: number | null; + /** Size */ + size: number | null; + /** Pages */ + pages?: number | null; + }; + /** Page[RoomDetails] */ + Page_RoomDetails_: { + /** Items */ + items: components["schemas"]["RoomDetails"][]; + /** Total */ + total?: number | null; + /** Page */ + page: number | null; + /** Size */ + size: number | null; + /** Pages */ + pages?: number | null; + }; + /** Participant */ + Participant: { + /** Id */ + id: string; + /** Speaker */ + speaker: number | null; + /** Name */ + name: string; + }; + /** Room */ + Room: { + /** Id */ + id: string; + /** Name */ + name: string; + /** User Id */ + user_id: string; + /** + * Created At + * Format: date-time + */ + created_at: string; + /** Zulip Auto Post */ + zulip_auto_post: boolean; + /** Zulip Stream */ + zulip_stream: string; + /** Zulip Topic */ + zulip_topic: string; + /** Is Locked */ + is_locked: boolean; + /** Room Mode */ + room_mode: string; + /** Recording Type */ + recording_type: string; + /** Recording Trigger */ + recording_trigger: string; + /** Is Shared */ + is_shared: boolean; + }; + /** RoomDetails */ + RoomDetails: { + /** Id */ + id: string; + /** Name */ + name: string; + /** User Id */ + user_id: string; + /** + * Created At + * Format: date-time + */ + created_at: string; + /** Zulip Auto Post */ + zulip_auto_post: boolean; + /** Zulip Stream */ + zulip_stream: string; + /** Zulip Topic */ + zulip_topic: string; + /** Is Locked */ + is_locked: boolean; + /** Room Mode */ + room_mode: string; + /** Recording Type */ + recording_type: string; + /** Recording Trigger */ + recording_trigger: string; + /** Is Shared */ + is_shared: boolean; + /** Webhook Url */ + webhook_url: string | null; + /** Webhook Secret */ + webhook_secret: string | null; + }; + /** RtcOffer */ + RtcOffer: { + /** Sdp */ + sdp: string; + /** Type */ + type: string; + }; + /** SearchResponse */ + SearchResponse: { + /** Results */ + results: components["schemas"]["SearchResult"][]; + /** + * Total + * @description Total number of search results + */ + total: number; + /** Query */ + query?: string | null; + /** + * Limit + * @description Results per page + */ + limit: number; + /** + * Offset + * @description Number of results to skip + */ + offset: number; + }; + /** + * SearchResult + * @description Public search result model with computed fields. + */ + SearchResult: { + /** Id */ + id: string; + /** Title */ + title?: string | null; + /** User Id */ + user_id?: string | null; + /** Room Id */ + room_id?: string | null; + /** Room Name */ + room_name?: string | null; + source_kind: components["schemas"]["SourceKind"]; + /** Created At */ + created_at: string; + /** Status */ + status: string; + /** Rank */ + rank: number; + /** + * Duration + * @description Duration in seconds + */ + duration: number | null; + /** + * Search Snippets + * @description Text snippets around search matches + */ + search_snippets: string[]; + /** + * Total Match Count + * @description Total number of matches found in the transcript + * @default 0 + */ + total_match_count: number; + }; + /** + * SourceKind + * @enum {string} + */ + SourceKind: "room" | "live" | "file"; + /** SpeakerAssignment */ + SpeakerAssignment: { + /** Speaker */ + speaker?: number | null; + /** Participant */ + participant?: string | null; + /** Timestamp From */ + timestamp_from: number; + /** Timestamp To */ + timestamp_to: number; + }; + /** SpeakerAssignmentStatus */ + SpeakerAssignmentStatus: { + /** Status */ + status: string; + }; + /** SpeakerMerge */ + SpeakerMerge: { + /** Speaker From */ + speaker_from: number; + /** Speaker To */ + speaker_to: number; + }; + /** SpeakerWords */ + SpeakerWords: { + /** Speaker */ + speaker: number; + /** Words */ + words: components["schemas"]["Word"][]; + }; + /** Stream */ + Stream: { + /** Stream Id */ + stream_id: number; + /** Name */ + name: string; + }; + /** Topic */ + Topic: { + /** Name */ + name: string; + }; + /** TranscriptParticipant */ + TranscriptParticipant: { + /** Id */ + id?: string; + /** Speaker */ + speaker: number | null; + /** Name */ + name: string; + }; + /** UpdateParticipant */ + UpdateParticipant: { + /** Speaker */ + speaker?: number | null; + /** Name */ + name?: string | null; + }; + /** UpdateRoom */ + UpdateRoom: { + /** Name */ + name: string; + /** Zulip Auto Post */ + zulip_auto_post: boolean; + /** Zulip Stream */ + zulip_stream: string; + /** Zulip Topic */ + zulip_topic: string; + /** Is Locked */ + is_locked: boolean; + /** Room Mode */ + room_mode: string; + /** Recording Type */ + recording_type: string; + /** Recording Trigger */ + recording_trigger: string; + /** Is Shared */ + is_shared: boolean; + /** Webhook Url */ + webhook_url: string; + /** Webhook Secret */ + webhook_secret: string; + }; + /** UpdateTranscript */ + UpdateTranscript: { + /** Name */ + name?: string | null; + /** Locked */ + locked?: boolean | null; + /** Title */ + title?: string | null; + /** Short Summary */ + short_summary?: string | null; + /** Long Summary */ + long_summary?: string | null; + /** Share Mode */ + share_mode?: ("public" | "semi-private" | "private") | null; + /** Participants */ + participants?: components["schemas"]["TranscriptParticipant"][] | null; + /** Reviewed */ + reviewed?: boolean | null; + /** Audio Deleted */ + audio_deleted?: boolean | null; + }; + /** UserInfo */ + UserInfo: { + /** Sub */ + sub: string; + /** Email */ + email: string | null; + /** Email Verified */ + email_verified: boolean | null; + }; + /** ValidationError */ + ValidationError: { + /** Location */ + loc: (string | number)[]; + /** Message */ + msg: string; + /** Error Type */ + type: string; + }; + /** WebhookTestResult */ + WebhookTestResult: { + /** Success */ + success: boolean; + /** + * Message + * @default + */ + message: string; + /** + * Error + * @default + */ + error: string; + /** Status Code */ + status_code?: number | null; + /** Response Preview */ + response_preview?: string | null; + }; + /** WherebyWebhookEvent */ + WherebyWebhookEvent: { + /** Apiversion */ + apiVersion: string; + /** Id */ + id: string; + /** + * Createdat + * Format: date-time + */ + createdAt: string; + /** Type */ + type: string; + /** Data */ + data: { + [key: string]: unknown; + }; + }; + /** Word */ + Word: { + /** Text */ + text: string; + /** + * Start + * @description Time in seconds with float part + */ + start: number; + /** + * End + * @description Time in seconds with float part + */ + end: number; + /** + * Speaker + * @default 0 + */ + speaker: number; + }; + }; + responses: never; + parameters: never; + requestBodies: never; + headers: never; + pathItems: never; +} +export type $defs = Record; +export interface operations { + metrics: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + }; + }; + v1_meeting_audio_consent: { + parameters: { + query?: never; + header?: never; + path: { + meeting_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["MeetingConsentRequest"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_list: { + parameters: { + query?: { + /** @description Page number */ + page?: number; + /** @description Page size */ + size?: number; + }; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Page_RoomDetails_"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_create: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["CreateRoom"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Room"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_get: { + parameters: { + query?: never; + header?: never; + path: { + room_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["RoomDetails"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_delete: { + parameters: { + query?: never; + header?: never; + path: { + room_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["DeletionStatus"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_update: { + parameters: { + query?: never; + header?: never; + path: { + room_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["UpdateRoom"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["RoomDetails"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_create_meeting: { + parameters: { + query?: never; + header?: never; + path: { + room_name: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Meeting"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_test_webhook: { + parameters: { + query?: never; + header?: never; + path: { + room_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["WebhookTestResult"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcripts_list: { + parameters: { + query?: { + source_kind?: components["schemas"]["SourceKind"] | null; + room_id?: string | null; + search_term?: string | null; + /** @description Page number */ + page?: number; + /** @description Page size */ + size?: number; + }; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Page_GetTranscriptMinimal_"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcripts_create: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["CreateTranscript"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["GetTranscript"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcripts_search: { + parameters: { + query: { + /** @description Search query text */ + q: string; + /** @description Results per page */ + limit?: number; + /** @description Number of results to skip */ + offset?: number; + room_id?: string | null; + source_kind?: components["schemas"]["SourceKind"] | null; + }; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["SearchResponse"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_get: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["GetTranscript"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_delete: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["DeletionStatus"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_update: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["UpdateTranscript"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["GetTranscript"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_get_topics: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["GetTranscriptTopic"][]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_get_topics_with_words: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["GetTranscriptTopicWithWords"][]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_get_topics_with_words_per_speaker: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + topic_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["GetTranscriptTopicWithWordsPerSpeaker"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_post_to_zulip: { + parameters: { + query: { + stream: string; + topic: string; + include_topics: boolean; + }; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_get_audio_mp3: { + parameters: { + query?: { + token?: string | null; + }; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_head_audio_mp3: { + parameters: { + query?: { + token?: string | null; + }; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_get_audio_waveform: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["AudioWaveform"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_get_participants: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Participant"][]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_add_participant: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["CreateParticipant"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Participant"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_get_participant: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + participant_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Participant"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_delete_participant: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + participant_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["DeletionStatus"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_update_participant: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + participant_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["UpdateParticipant"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Participant"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_assign_speaker: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["SpeakerAssignment"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["SpeakerAssignmentStatus"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_merge_speaker: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["SpeakerMerge"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["SpeakerAssignmentStatus"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_record_upload: { + parameters: { + query: { + chunk_number: number; + total_chunks: number; + }; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "multipart/form-data": components["schemas"]["Body_transcript_record_upload_v1_transcripts__transcript_id__record_upload_post"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_get_websocket_events: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_record_webrtc: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["RtcOffer"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_transcript_process: { + parameters: { + query?: never; + header?: never; + path: { + transcript_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_user_me: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["UserInfo"] | null; + }; + }; + }; + }; + v1_zulip_get_streams: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Stream"][]; + }; + }; + }; + }; + v1_zulip_get_topics: { + parameters: { + query?: never; + header?: never; + path: { + stream_id: number; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Topic"][]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_whereby_webhook: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["WherebyWebhookEvent"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; +} diff --git a/www/jest.config.js b/www/jest.config.js new file mode 100644 index 00000000..d2f3247b --- /dev/null +++ b/www/jest.config.js @@ -0,0 +1,8 @@ +module.exports = { + preset: "ts-jest", + testEnvironment: "node", + roots: ["/app"], + testMatch: ["**/__tests__/**/*.test.ts"], + collectCoverage: true, + collectCoverageFrom: ["app/**/*.ts", "!app/**/*.d.ts"], +}; diff --git a/www/middleware.ts b/www/middleware.ts index 39145220..2b60d715 100644 --- a/www/middleware.ts +++ b/www/middleware.ts @@ -1,16 +1,7 @@ import { withAuth } from "next-auth/middleware"; import { getConfig } from "./app/lib/edgeConfig"; import { NextResponse } from "next/server"; - -const LOGIN_REQUIRED_PAGES = [ - "/transcripts/[!new]", - "/browse(.*)", - "/rooms(.*)", -]; - -const PROTECTED_PAGES = new RegExp( - LOGIN_REQUIRED_PAGES.map((page) => `^${page}$`).join("|"), -); +import { PROTECTED_PAGES } from "./app/lib/auth"; export const config = { matcher: [ diff --git a/www/next.config.js b/www/next.config.js index e37d5402..bbc3f710 100644 --- a/www/next.config.js +++ b/www/next.config.js @@ -2,6 +2,9 @@ const nextConfig = { output: "standalone", experimental: { esmExternals: "loose" }, + env: { + IS_CI: process.env.IS_CI, + }, }; module.exports = nextConfig; diff --git a/www/openapi-ts.config.ts b/www/openapi-ts.config.ts deleted file mode 100644 index 9304b8f7..00000000 --- a/www/openapi-ts.config.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { defineConfig } from "@hey-api/openapi-ts"; - -export default defineConfig({ - client: "axios", - name: "OpenApi", - input: "http://127.0.0.1:1250/openapi.json", - output: { - path: "./app/api", - format: "prettier", - }, - services: { - asClass: true, - }, -}); diff --git a/www/package.json b/www/package.json index 482a29f6..b7511147 100644 --- a/www/package.json +++ b/www/package.json @@ -8,7 +8,8 @@ "start": "next start", "lint": "next lint", "format": "prettier --write .", - "openapi": "openapi-ts" + "openapi": "openapi-typescript http://127.0.0.1:1250/openapi.json -o ./app/reflector-api.d.ts", + "test": "jest" }, "dependencies": { "@chakra-ui/react": "^3.24.2", @@ -17,21 +18,24 @@ "@fortawesome/free-solid-svg-icons": "^6.4.0", "@fortawesome/react-fontawesome": "^0.2.0", "@sentry/nextjs": "^7.77.0", + "@tanstack/react-query": "^5.85.9", + "@types/ioredis": "^5.0.0", "@vercel/edge-config": "^0.4.1", - "@vercel/kv": "^2.0.0", "@whereby.com/browser-sdk": "^3.3.4", "autoprefixer": "10.4.20", "axios": "^1.8.2", "eslint": "^9.33.0", "eslint-config-next": "^14.2.31", "fontawesome": "^5.6.3", - "ioredis": "^5.4.1", + "ioredis": "^5.7.0", "jest-worker": "^29.6.2", "lucide-react": "^0.525.0", "next": "^14.2.30", "next-auth": "^4.24.7", "next-themes": "^0.4.6", "nuqs": "^2.4.3", + "openapi-fetch": "^0.14.0", + "openapi-react-query": "^0.5.0", "postcss": "8.4.31", "prop-types": "^15.8.1", "react": "^18.2.0", @@ -41,21 +45,24 @@ "react-markdown": "^9.0.0", "react-qr-code": "^2.0.12", "react-select-search": "^4.1.7", - "redlock": "^5.0.0-beta.2", "sass": "^1.63.6", "simple-peer": "^9.11.1", "tailwindcss": "^3.3.2", "typescript": "^5.1.6", - "wavesurfer.js": "^7.4.2" + "wavesurfer.js": "^7.4.2", + "zod": "^4.1.5" }, "main": "index.js", "repository": "https://github.com/Monadical-SAS/reflector-ui.git", "author": "Andreas ", "license": "All Rights Reserved", "devDependencies": { - "@hey-api/openapi-ts": "^0.48.0", + "@types/jest": "^30.0.0", "@types/react": "18.2.20", + "jest": "^30.1.3", + "openapi-typescript": "^7.9.1", "prettier": "^3.0.0", + "ts-jest": "^29.4.1", "vercel": "^37.3.0" }, "packageManager": "pnpm@10.14.0+sha512.ad27a79641b49c3e481a16a805baa71817a04bbe06a38d17e60e2eaee83f6a146c6a688125f5792e48dd5ba30e7da52a5cda4c3992b9ccf333f9ce223af84748" diff --git a/www/pnpm-lock.yaml b/www/pnpm-lock.yaml index 55aef9c8..14b42c55 100644 --- a/www/pnpm-lock.yaml +++ b/www/pnpm-lock.yaml @@ -24,13 +24,16 @@ importers: version: 0.2.3(@fortawesome/fontawesome-svg-core@6.7.2)(react@18.3.1) "@sentry/nextjs": specifier: ^7.77.0 - version: 7.120.4(next@14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1) + version: 7.120.4(next@14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1) + "@tanstack/react-query": + specifier: ^5.85.9 + version: 5.85.9(react@18.3.1) + "@types/ioredis": + specifier: ^5.0.0 + version: 5.0.0 "@vercel/edge-config": specifier: ^0.4.1 version: 0.4.1 - "@vercel/kv": - specifier: ^2.0.0 - version: 2.0.0 "@whereby.com/browser-sdk": specifier: ^3.3.4 version: 3.13.1(@types/react@18.2.20)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -50,7 +53,7 @@ importers: specifier: ^5.6.3 version: 5.6.3 ioredis: - specifier: ^5.4.1 + specifier: ^5.7.0 version: 5.7.0 jest-worker: specifier: ^29.6.2 @@ -60,16 +63,22 @@ importers: version: 0.525.0(react@18.3.1) next: specifier: ^14.2.30 - version: 14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) + version: 14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) next-auth: specifier: ^4.24.7 - version: 4.24.11(next@14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + version: 4.24.11(next@14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) next-themes: specifier: ^0.4.6 version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1) nuqs: specifier: ^2.4.3 - version: 2.4.3(next@14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1) + version: 2.4.3(next@14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1) + openapi-fetch: + specifier: ^0.14.0 + version: 0.14.0 + openapi-react-query: + specifier: ^0.5.0 + version: 0.5.0(@tanstack/react-query@5.85.9(react@18.3.1))(openapi-fetch@0.14.0) postcss: specifier: 8.4.31 version: 8.4.31 @@ -97,9 +106,6 @@ importers: react-select-search: specifier: ^4.1.7 version: 4.1.8(prop-types@15.8.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) - redlock: - specifier: ^5.0.0-beta.2 - version: 5.0.0-beta.2 sass: specifier: ^1.63.6 version: 1.90.0 @@ -115,16 +121,28 @@ importers: wavesurfer.js: specifier: ^7.4.2 version: 7.10.1 + zod: + specifier: ^4.1.5 + version: 4.1.5 devDependencies: - "@hey-api/openapi-ts": - specifier: ^0.48.0 - version: 0.48.3(typescript@5.9.2) + "@types/jest": + specifier: ^30.0.0 + version: 30.0.0 "@types/react": specifier: 18.2.20 version: 18.2.20 + jest: + specifier: ^30.1.3 + version: 30.1.3(@types/node@16.18.11)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)) + openapi-typescript: + specifier: ^7.9.1 + version: 7.9.1(typescript@5.9.2) prettier: specifier: ^3.0.0 version: 3.6.2 + ts-jest: + specifier: ^29.4.1 + version: 29.4.1(@babel/core@7.28.3)(@jest/transform@30.1.2)(@jest/types@30.0.5)(babel-jest@30.1.2(@babel/core@7.28.3))(jest-util@30.0.5)(jest@30.1.3(@types/node@16.18.11)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)))(typescript@5.9.2) vercel: specifier: ^37.3.0 version: 37.14.0 @@ -137,12 +155,12 @@ packages: } engines: { node: ">=10" } - "@apidevtools/json-schema-ref-parser@11.6.4": + "@ampproject/remapping@2.3.0": resolution: { - integrity: sha512-9K6xOqeevacvweLGik6LnZCb1fBtCOSIWQs8d096XGeqoLKC33UVMGz9+77Gw44KvbH4pKcQPWo4ZpxkXYj05w==, + integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==, } - engines: { node: ">= 16" } + engines: { node: ">=6.0.0" } "@ark-ui/react@5.18.2": resolution: @@ -160,6 +178,20 @@ packages: } engines: { node: ">=6.9.0" } + "@babel/compat-data@7.28.0": + resolution: + { + integrity: sha512-60X7qkglvrap8mn1lh2ebxXdZYtUcpd7gsmy9kLaBJ4i/WdY8PqTSdxyA8qraikqKQK5C1KRBKXqznrVapyNaw==, + } + engines: { node: ">=6.9.0" } + + "@babel/core@7.28.3": + resolution: + { + integrity: sha512-yDBHV9kQNcr2/sUr9jghVyz9C3Y5G2zUM2H2lo+9mKv4sFgbA8s8Z9t8D1jiTkGoO/NoIfKMyKWr4s6CN23ZwQ==, + } + engines: { node: ">=6.9.0" } + "@babel/generator@7.28.0": resolution: { @@ -167,6 +199,20 @@ packages: } engines: { node: ">=6.9.0" } + "@babel/generator@7.28.3": + resolution: + { + integrity: sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw==, + } + engines: { node: ">=6.9.0" } + + "@babel/helper-compilation-targets@7.27.2": + resolution: + { + integrity: sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==, + } + engines: { node: ">=6.9.0" } + "@babel/helper-globals@7.28.0": resolution: { @@ -181,6 +227,22 @@ packages: } engines: { node: ">=6.9.0" } + "@babel/helper-module-transforms@7.28.3": + resolution: + { + integrity: sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==, + } + engines: { node: ">=6.9.0" } + peerDependencies: + "@babel/core": ^7.0.0 + + "@babel/helper-plugin-utils@7.27.1": + resolution: + { + integrity: sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==, + } + engines: { node: ">=6.9.0" } + "@babel/helper-string-parser@7.27.1": resolution: { @@ -195,6 +257,20 @@ packages: } engines: { node: ">=6.9.0" } + "@babel/helper-validator-option@7.27.1": + resolution: + { + integrity: sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==, + } + engines: { node: ">=6.9.0" } + + "@babel/helpers@7.28.3": + resolution: + { + integrity: sha512-PTNtvUQihsAsDHMOP5pfobP8C6CM4JWXmP8DrEIt46c3r2bf87Ua1zoqevsMo9g+tWDwgWrFP5EIxuBx5RudAw==, + } + engines: { node: ">=6.9.0" } + "@babel/parser@7.28.0": resolution: { @@ -203,6 +279,156 @@ packages: engines: { node: ">=6.0.0" } hasBin: true + "@babel/parser@7.28.3": + resolution: + { + integrity: sha512-7+Ey1mAgYqFAx2h0RuoxcQT5+MlG3GTV0TQrgr7/ZliKsm/MNDxVVutlWaziMq7wJNAz8MTqz55XLpWvva6StA==, + } + engines: { node: ">=6.0.0" } + hasBin: true + + "@babel/plugin-syntax-async-generators@7.8.4": + resolution: + { + integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-bigint@7.8.3": + resolution: + { + integrity: sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-class-properties@7.12.13": + resolution: + { + integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-class-static-block@7.14.5": + resolution: + { + integrity: sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==, + } + engines: { node: ">=6.9.0" } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-import-attributes@7.27.1": + resolution: + { + integrity: sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==, + } + engines: { node: ">=6.9.0" } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-import-meta@7.10.4": + resolution: + { + integrity: sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-json-strings@7.8.3": + resolution: + { + integrity: sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-jsx@7.27.1": + resolution: + { + integrity: sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==, + } + engines: { node: ">=6.9.0" } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-logical-assignment-operators@7.10.4": + resolution: + { + integrity: sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-nullish-coalescing-operator@7.8.3": + resolution: + { + integrity: sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-numeric-separator@7.10.4": + resolution: + { + integrity: sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-object-rest-spread@7.8.3": + resolution: + { + integrity: sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-optional-catch-binding@7.8.3": + resolution: + { + integrity: sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-optional-chaining@7.8.3": + resolution: + { + integrity: sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==, + } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-private-property-in-object@7.14.5": + resolution: + { + integrity: sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==, + } + engines: { node: ">=6.9.0" } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-top-level-await@7.14.5": + resolution: + { + integrity: sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==, + } + engines: { node: ">=6.9.0" } + peerDependencies: + "@babel/core": ^7.0.0-0 + + "@babel/plugin-syntax-typescript@7.27.1": + resolution: + { + integrity: sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==, + } + engines: { node: ">=6.9.0" } + peerDependencies: + "@babel/core": ^7.0.0-0 + "@babel/runtime@7.28.2": resolution: { @@ -224,6 +450,13 @@ packages: } engines: { node: ">=6.9.0" } + "@babel/traverse@7.28.3": + resolution: + { + integrity: sha512-7w4kZYHneL3A6NP2nxzHvT3HCZ7puDZZjFMqDpBPECub79sTtSO5CGXDkKrTQq8ksAwfD/XI2MRFX23njdDaIQ==, + } + engines: { node: ">=6.9.0" } + "@babel/types@7.28.2": resolution: { @@ -231,6 +464,12 @@ packages: } engines: { node: ">=6.9.0" } + "@bcoe/v8-coverage@0.2.3": + resolution: + { + integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==, + } + "@chakra-ui/react@3.24.2": resolution: { @@ -516,16 +755,6 @@ packages: "@fortawesome/fontawesome-svg-core": ~1 || ~6 || ~7 react: ^16.3 || ^17.0.0 || ^18.0.0 || ^19.0.0 - "@hey-api/openapi-ts@0.48.3": - resolution: - { - integrity: sha512-R53Nr4Gicz77icS+RiH0fwHa9A0uFPtzsjC8SBaGwtOel5ZyxeBbayWE6HhE789hp3dok9pegwWncwwOrr4WFA==, - } - engines: { node: ^18.0.0 || >=20.0.0 } - hasBin: true - peerDependencies: - typescript: ^5.x - "@humanfs/core@0.19.1": resolution: { @@ -573,10 +802,10 @@ packages: integrity: sha512-p+Zh1sb6EfrfVaS86jlHGQ9HA66fJhV9x5LiE5vCbZtXEHAuhcmUZUdZ4WrFpUBfNalr2OkAJI5AcKEQF+Lebw==, } - "@ioredis/commands@1.3.0": + "@ioredis/commands@1.3.1": resolution: { - integrity: sha512-M/T6Zewn7sDaBQEqIZ8Rb+i9y8qfGmq+5SDFSf9sA2lUZTmdDLVdOiQaeDp+Q4wElZ9HG1GAX5KhDaidp6LQsQ==, + integrity: sha512-bYtU8avhGIcje3IhvF9aSjsa5URMZBHnwKtOvXsT4sfYy9gppW11gLPT/9oNqlJZD47yPKveQFTAFWpHjKvUoQ==, } "@isaacs/cliui@8.0.2": @@ -586,6 +815,107 @@ packages: } engines: { node: ">=12" } + "@istanbuljs/load-nyc-config@1.1.0": + resolution: + { + integrity: sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==, + } + engines: { node: ">=8" } + + "@istanbuljs/schema@0.1.3": + resolution: + { + integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==, + } + engines: { node: ">=8" } + + "@jest/console@30.1.2": + resolution: + { + integrity: sha512-BGMAxj8VRmoD0MoA/jo9alMXSRoqW8KPeqOfEo1ncxnRLatTBCpRoOwlwlEMdudp68Q6WSGwYrrLtTGOh8fLzw==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/core@30.1.3": + resolution: + { + integrity: sha512-LIQz7NEDDO1+eyOA2ZmkiAyYvZuo6s1UxD/e2IHldR6D7UYogVq3arTmli07MkENLq6/3JEQjp0mA8rrHHJ8KQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + "@jest/diff-sequences@30.0.1": + resolution: + { + integrity: sha512-n5H8QLDJ47QqbCNn5SuFjCRDrOLEZ0h8vAHCK5RL9Ls7Xa8AQLa/YxAc9UjFqoEDM48muwtBGjtMY5cr0PLDCw==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/environment@30.1.2": + resolution: + { + integrity: sha512-N8t1Ytw4/mr9uN28OnVf0SYE2dGhaIxOVYcwsf9IInBKjvofAjbFRvedvBBlyTYk2knbJTiEjEJ2PyyDIBnd9w==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/expect-utils@30.1.2": + resolution: + { + integrity: sha512-HXy1qT/bfdjCv7iC336ExbqqYtZvljrV8odNdso7dWK9bSeHtLlvwWWC3YSybSPL03Gg5rug6WLCZAZFH72m0A==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/expect@30.1.2": + resolution: + { + integrity: sha512-tyaIExOwQRCxPCGNC05lIjWJztDwk2gPDNSDGg1zitXJJ8dC3++G/CRjE5mb2wQsf89+lsgAgqxxNpDLiCViTA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/fake-timers@30.1.2": + resolution: + { + integrity: sha512-Beljfv9AYkr9K+ETX9tvV61rJTY706BhBUtiaepQHeEGfe0DbpvUA5Z3fomwc5Xkhns6NWrcFDZn+72fLieUnA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/get-type@30.1.0": + resolution: + { + integrity: sha512-eMbZE2hUnx1WV0pmURZY9XoXPkUYjpc55mb0CrhtdWLtzMQPFvu/rZkTLZFTsdaVQa+Tr4eWAteqcUzoawq/uA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/globals@30.1.2": + resolution: + { + integrity: sha512-teNTPZ8yZe3ahbYnvnVRDeOjr+3pu2uiAtNtrEsiMjVPPj+cXd5E/fr8BL7v/T7F31vYdEHrI5cC/2OoO/vM9A==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/pattern@30.0.1": + resolution: + { + integrity: sha512-gWp7NfQW27LaBQz3TITS8L7ZCQ0TLvtmI//4OwlQRx4rnWxcPNIYjxZpDcN4+UlGxgm3jS5QPz8IPTCkb59wZA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/reporters@30.1.3": + resolution: + { + integrity: sha512-VWEQmJWfXMOrzdFEOyGjUEOuVXllgZsoPtEHZzfdNz18RmzJ5nlR6kp8hDdY8dDS1yGOXAY7DHT+AOHIPSBV0w==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + "@jest/schemas@29.6.3": resolution: { @@ -593,6 +923,48 @@ packages: } engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 } + "@jest/schemas@30.0.5": + resolution: + { + integrity: sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/snapshot-utils@30.1.2": + resolution: + { + integrity: sha512-vHoMTpimcPSR7OxS2S0V1Cpg8eKDRxucHjoWl5u4RQcnxqQrV3avETiFpl8etn4dqxEGarBeHbIBety/f8mLXw==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/source-map@30.0.1": + resolution: + { + integrity: sha512-MIRWMUUR3sdbP36oyNyhbThLHyJ2eEDClPCiHVbrYAe5g3CHRArIVpBw7cdSB5fr+ofSfIb2Tnsw8iEHL0PYQg==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/test-result@30.1.3": + resolution: + { + integrity: sha512-P9IV8T24D43cNRANPPokn7tZh0FAFnYS2HIfi5vK18CjRkTDR9Y3e1BoEcAJnl4ghZZF4Ecda4M/k41QkvurEQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/test-sequencer@30.1.3": + resolution: + { + integrity: sha512-82J+hzC0qeQIiiZDThh+YUadvshdBswi5nuyXlEmXzrhw5ZQSRHeQ5LpVMD/xc8B3wPePvs6VMzHnntxL+4E3w==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + "@jest/transform@30.1.2": + resolution: + { + integrity: sha512-UYYFGifSgfjujf1Cbd3iU/IQoSd6uwsj8XHj5DSDf5ERDcWMdJOPTkHWXj4U+Z/uMagyOQZ6Vne8C4nRIrCxqA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + "@jest/types@29.6.3": resolution: { @@ -600,6 +972,13 @@ packages: } engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 } + "@jest/types@30.0.5": + resolution: + { + integrity: sha512-aREYa3aku9SSnea4aX6bhKn4bgv3AXkgijoQgbYV3yvbiGt6z+MQ85+6mIhx9DsKW2BuB/cLR/A+tcMThx+KLQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + "@jridgewell/gen-mapping@0.3.13": resolution: { @@ -631,12 +1010,6 @@ packages: integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==, } - "@jsdevtools/ono@7.1.3": - resolution: - { - integrity: sha512-4JQNk+3mVzK3xh2rqd6RB4J46qUR19azEHBneZyTZM+c456qOrbbM/5xcR8huNCCcbVt7+UmizG6GuUvPvKUYg==, - } - "@mapbox/node-pre-gyp@1.0.11": resolution: { @@ -914,6 +1287,13 @@ packages: } engines: { node: ">=14" } + "@pkgr/core@0.2.9": + resolution: + { + integrity: sha512-QNqXyfVS2wm9hweSYD2O7F0G06uurj9kZ96TRQE5Y9hU7+tgdZwIkbAKc5Ocy1HxEY2kuDQa6cQ1WRs/O5LFKA==, + } + engines: { node: ^12.20.0 || ^14.18.0 || >=16.0.0 } + "@radix-ui/primitive@1.1.3": resolution: { @@ -1198,6 +1578,25 @@ packages: integrity: sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==, } + "@redocly/ajv@8.11.3": + resolution: + { + integrity: sha512-4P3iZse91TkBiY+Dx5DUgxQ9GXkVJf++cmI0MOyLDxV9b5MUBI4II6ES8zA5JCbO72nKAJxWrw4PUPW+YP3ZDQ==, + } + + "@redocly/config@0.22.2": + resolution: + { + integrity: sha512-roRDai8/zr2S9YfmzUfNhKjOF0NdcOIqF7bhf4MVC5UxpjIysDjyudvlAiVbpPHp3eDRWbdzUgtkK1a7YiDNyQ==, + } + + "@redocly/openapi-core@1.34.5": + resolution: + { + integrity: sha512-0EbE8LRbkogtcCXU7liAyC00n9uNG9hJ+eMyHFdUsy9lB/WGqnEBgwjA9q2cyzAVcdTkQqTBBU1XePNnN3OijA==, + } + engines: { node: ">=18.17.0", npm: ">=9.5.0" } + "@reduxjs/toolkit@2.8.2": resolution: { @@ -1382,6 +1781,24 @@ packages: integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==, } + "@sinclair/typebox@0.34.41": + resolution: + { + integrity: sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==, + } + + "@sinonjs/commons@3.0.1": + resolution: + { + integrity: sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==, + } + + "@sinonjs/fake-timers@13.0.5": + resolution: + { + integrity: sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw==, + } + "@socket.io/component-emitter@3.1.2": resolution: { @@ -1418,6 +1835,20 @@ packages: integrity: sha512-KGYxvIOXcceOAbEk4bi/dVLEK9z8sZ0uBB3Il5b1rhfClSpcX0yfRO0KmTkqR2cnQDymwLB+25ZyMzICg/cm/A==, } + "@tanstack/query-core@5.85.9": + resolution: + { + integrity: sha512-5fxb9vwyftYE6KFLhhhDyLr8NO75+Wpu7pmTo+TkwKmMX2oxZDoLwcqGP8ItKSpUMwk3urWgQDZfyWr5Jm9LsQ==, + } + + "@tanstack/react-query@5.85.9": + resolution: + { + integrity: sha512-2T5zgSpcOZXGkH/UObIbIkGmUPQqZqn7esVQFXLOze622h4spgWf5jmvrqAo9dnI13/hyMcNsF1jsoDcb59nJQ==, + } + peerDependencies: + react: ^18 || ^19 + "@tootallnate/once@2.0.0": resolution: { @@ -1461,6 +1892,30 @@ packages: integrity: sha512-VyyPYFlOMNylG45GoAe0xDoLwWuowvf92F9kySqzYh8vmYm7D2u4iUJKa1tOUpS70Ku13ASrOkS4ScXFsTaCNQ==, } + "@types/babel__core@7.20.5": + resolution: + { + integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==, + } + + "@types/babel__generator@7.27.0": + resolution: + { + integrity: sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==, + } + + "@types/babel__template@7.4.4": + resolution: + { + integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==, + } + + "@types/babel__traverse@7.28.0": + resolution: + { + integrity: sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==, + } + "@types/debug@4.1.12": resolution: { @@ -1491,6 +1946,13 @@ packages: integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==, } + "@types/ioredis@5.0.0": + resolution: + { + integrity: sha512-zJbJ3FVE17CNl5KXzdeSPtdltc4tMT3TzC6fxQS0sQngkbFZ6h+0uTafsRqu+eSLIugf6Yb0Ea0SUuRr42Nk9g==, + } + deprecated: This is a stub types definition. ioredis provides its own type definitions, so you do not need this installed. + "@types/istanbul-lib-coverage@2.0.6": resolution: { @@ -1509,6 +1971,12 @@ packages: integrity: sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==, } + "@types/jest@30.0.0": + resolution: + { + integrity: sha512-XTYugzhuwqWjws0CVz8QpM36+T+Dz5mTEBKhNs/esGLnCIlGdRy+Dq78NRjd7ls7r8BC8ZRMOrKlkO1hU0JOwA==, + } + "@types/json-schema@7.0.15": resolution: { @@ -1575,6 +2043,12 @@ packages: integrity: sha512-WFHp9YUJQ6CKshqoC37iOlHnQSmxNc795UhB26CyBBttrN9svdIrUjl/NjnNmfcwtncN0h/0PPAFWv9ovP8mLA==, } + "@types/stack-utils@2.0.3": + resolution: + { + integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==, + } + "@types/ua-parser-js@0.7.39": resolution: { @@ -1888,12 +2362,6 @@ packages: cpu: [x64] os: [win32] - "@upstash/redis@1.35.3": - resolution: - { - integrity: sha512-hSjv66NOuahW3MisRGlSgoszU2uONAY2l5Qo3Sae8OT3/Tng9K+2/cBRuyPBX8egwEGcNNCF9+r0V6grNnhL+w==, - } - "@vercel/build-utils@8.4.12": resolution: { @@ -1950,13 +2418,6 @@ packages: integrity: sha512-IPAVaALuGAzt2apvTtBs5tB+8zZRzn/yG3AGp8dFyCsw/v5YOuk0Q5s8Z3fayLvJbFpjrKtqRNDZzVJBBU3MrQ==, } - "@vercel/kv@2.0.0": - resolution: - { - integrity: sha512-zdVrhbzZBYo5d1Hfn4bKtqCeKf0FuzW8rSHauzQVMUgv1+1JOwof2mWcBuI+YMJy8s0G0oqAUfQ7HgUDzb8EbA==, - } - engines: { node: ">=14.6" } - "@vercel/next@4.3.18": resolution: { @@ -2502,6 +2963,13 @@ packages: } engines: { node: ">= 6.0.0" } + agent-base@7.1.4: + resolution: + { + integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==, + } + engines: { node: ">= 14" } + ajv@6.12.6: resolution: { @@ -2514,6 +2982,20 @@ packages: integrity: sha512-SMJOdDP6LqTkD0Uq8qLi+gMwSt0imXLSV080qFVwJCpH9U6Mb+SUGHAXM0KNbcBPguytWyvFxcHgMLe2D2XSpw==, } + ansi-colors@4.1.3: + resolution: + { + integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==, + } + engines: { node: ">=6" } + + ansi-escapes@4.3.2: + resolution: + { + integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==, + } + engines: { node: ">=8" } + ansi-regex@5.0.1: resolution: { @@ -2535,6 +3017,13 @@ packages: } engines: { node: ">=8" } + ansi-styles@5.2.0: + resolution: + { + integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==, + } + engines: { node: ">=10" } + ansi-styles@6.2.1: resolution: { @@ -2587,6 +3076,12 @@ packages: integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==, } + argparse@1.0.10: + resolution: + { + integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==, + } + argparse@2.0.1: resolution: { @@ -2758,6 +3253,29 @@ packages: } engines: { node: ">= 0.4" } + babel-jest@30.1.2: + resolution: + { + integrity: sha512-IQCus1rt9kaSh7PQxLYRY5NmkNrNlU2TpabzwV7T2jljnpdHOcmnYYv8QmE04Li4S3a2Lj8/yXyET5pBarPr6g==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + peerDependencies: + "@babel/core": ^7.11.0 + + babel-plugin-istanbul@7.0.0: + resolution: + { + integrity: sha512-C5OzENSx/A+gt7t4VH1I2XsflxyPUmXRFPKBxt33xncdOmq7oROVM3bZv9Ysjjkv8OJYDMa+tKuKMvqU/H3xdw==, + } + engines: { node: ">=12" } + + babel-plugin-jest-hoist@30.0.1: + resolution: + { + integrity: sha512-zTPME3pI50NsFW8ZBaVIOeAxzEY7XHlmWeXXu9srI+9kNfzCUTy8MFan46xOGZY8NZThMqq+e3qZUKsvXbasnQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + babel-plugin-macros@3.1.0: resolution: { @@ -2765,6 +3283,23 @@ packages: } engines: { node: ">=10", npm: ">=6" } + babel-preset-current-node-syntax@1.2.0: + resolution: + { + integrity: sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==, + } + peerDependencies: + "@babel/core": ^7.0.0 || ^8.0.0-0 + + babel-preset-jest@30.0.1: + resolution: + { + integrity: sha512-+YHejD5iTWI46cZmcc/YtX4gaKBtdqCHCVfuVinizVpbmyjO3zYmeuyFdfA8duRqQZfgCAMlsfmkVbJ+e2MAJw==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + peerDependencies: + "@babel/core": ^7.11.0 + bail@2.0.2: resolution: { @@ -2823,6 +3358,19 @@ packages: engines: { node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7 } hasBin: true + bs-logger@0.2.6: + resolution: + { + integrity: sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==, + } + engines: { node: ">= 6" } + + bser@2.1.1: + resolution: + { + integrity: sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==, + } + btoa@1.2.1: resolution: { @@ -2837,6 +3385,12 @@ packages: integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==, } + buffer-from@1.1.2: + resolution: + { + integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==, + } + buffer@6.0.3: resolution: { @@ -2857,17 +3411,6 @@ packages: } engines: { node: ">= 0.8" } - c12@1.11.1: - resolution: - { - integrity: sha512-KDU0TvSvVdaYcQKQ6iPHATGz/7p/KiVjPg4vQrB6Jg/wX9R0yl5RZxWm9IoZqaIHD2+6PZd81+KMGwRr/lRIUg==, - } - peerDependencies: - magicast: ^0.3.4 - peerDependenciesMeta: - magicast: - optional: true - call-bind-apply-helpers@1.0.2: resolution: { @@ -2903,12 +3446,19 @@ packages: } engines: { node: ">= 6" } - camelcase@8.0.0: + camelcase@5.3.1: resolution: { - integrity: sha512-8WB3Jcas3swSvjIeA2yvCJ+Miyz5l1ZmB6HFb9R1317dt9LCQoswg/BGrmAmkWVEszSrrg4RwmO46qIm2OEnSA==, + integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==, } - engines: { node: ">=16" } + engines: { node: ">=6" } + + camelcase@6.3.0: + resolution: + { + integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==, + } + engines: { node: ">=10" } caniuse-lite@1.0.30001734: resolution: @@ -2936,6 +3486,19 @@ packages: } engines: { node: ">=10" } + change-case@5.4.4: + resolution: + { + integrity: sha512-HRQyTk2/YPEkt9TnUPbOpr64Uw3KOicFWPVBb+xiHvd6eBx/qPr9xqfBFDT8P2vWsvvz4jbEkfDe71W3VyNu2w==, + } + + char-regex@1.0.2: + resolution: + { + integrity: sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==, + } + engines: { node: ">=10" } + character-entities-html4@2.1.0: resolution: { @@ -3007,11 +3570,12 @@ packages: } engines: { node: ">=8" } - citty@0.1.6: + ci-info@4.3.0: resolution: { - integrity: sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ==, + integrity: sha512-l+2bNRMiQgcfILUi33labAZYIWlH1kWDp+ecNo5iisRKrbm0xcRyCww71/YU0Fkw0mAFpz9bJayXPjey6vkmaQ==, } + engines: { node: ">=8" } cjs-module-lexer@1.2.3: resolution: @@ -3019,6 +3583,12 @@ packages: integrity: sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==, } + cjs-module-lexer@2.1.0: + resolution: + { + integrity: sha512-UX0OwmYRYQQetfrLEZeewIFFI+wSTofC+pMBLNuH3RUuu/xzG1oz84UCEDOSoQlN3fZ4+AzmV50ZYvGqkMh9yA==, + } + classnames@2.5.1: resolution: { @@ -3031,6 +3601,13 @@ packages: integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==, } + cliui@8.0.1: + resolution: + { + integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==, + } + engines: { node: ">=12" } + clsx@2.1.1: resolution: { @@ -3045,12 +3622,25 @@ packages: } engines: { node: ">=0.10.0" } + co@4.6.0: + resolution: + { + integrity: sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==, + } + engines: { iojs: ">= 1.0.0", node: ">= 0.12.0" } + code-block-writer@10.1.1: resolution: { integrity: sha512-67ueh2IRGst/51p0n6FvPrnRjAGHY5F8xdjkgrYE7DDzpJe6qA07RYQ9VcoUeo5ATOjSOiWpSL3SWBRRbempMw==, } + collect-v8-coverage@1.0.2: + resolution: + { + integrity: sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==, + } + color-convert@2.0.1: resolution: { @@ -3071,6 +3661,12 @@ packages: } hasBin: true + colorette@1.4.0: + resolution: + { + integrity: sha512-Y2oEozpomLn7Q3HFP7dpww7AtMJplbM9lGZP6RDfHqmbeRjiwRg4n6VM6j4KLmRke85uWEI7JqF17f3pqdRA0g==, + } + combined-stream@1.0.8: resolution: { @@ -3084,13 +3680,6 @@ packages: integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==, } - commander@12.1.0: - resolution: - { - integrity: sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==, - } - engines: { node: ">=18" } - commander@4.1.1: resolution: { @@ -3110,19 +3699,6 @@ packages: integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==, } - confbox@0.1.8: - resolution: - { - integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==, - } - - consola@3.4.2: - resolution: - { - integrity: sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==, - } - engines: { node: ^14.18.0 || >=16.10.0 } - console-control-strings@1.1.0: resolution: { @@ -3149,6 +3725,12 @@ packages: integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==, } + convert-source-map@2.0.0: + resolution: + { + integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==, + } + cookie@0.7.2: resolution: { @@ -3270,12 +3852,30 @@ packages: integrity: sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==, } + dedent@1.7.0: + resolution: + { + integrity: sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==, + } + peerDependencies: + babel-plugin-macros: ^3.1.0 + peerDependenciesMeta: + babel-plugin-macros: + optional: true + deep-is@0.1.4: resolution: { integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==, } + deepmerge@4.3.1: + resolution: + { + integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==, + } + engines: { node: ">=0.10.0" } + define-data-property@1.1.4: resolution: { @@ -3290,12 +3890,6 @@ packages: } engines: { node: ">= 0.4" } - defu@6.1.4: - resolution: - { - integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==, - } - delayed-stream@1.0.0: resolution: { @@ -3330,12 +3924,6 @@ packages: } engines: { node: ">=6" } - destr@2.0.5: - resolution: - { - integrity: sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==, - } - detect-europe-js@0.1.2: resolution: { @@ -3357,6 +3945,13 @@ packages: } engines: { node: ">=8" } + detect-newline@3.1.0: + resolution: + { + integrity: sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==, + } + engines: { node: ">=8" } + detect-node-es@1.1.0: resolution: { @@ -3413,13 +4008,6 @@ packages: integrity: sha512-h7g5eduvnLwowJJPkcB5lNzo8vd/Hx4e3I4IOtLpX0qB2wBiuryGLNa61MeFre4b6gMaQIhegMIZ2I8rQCAJwQ==, } - dotenv@16.6.1: - resolution: - { - integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==, - } - engines: { node: ">=12" } - dunder-proto@1.0.1: resolution: { @@ -3447,6 +4035,13 @@ packages: integrity: sha512-rFCxROw7aOe4uPTfIAx+rXv9cEcGx+buAF4npnhtTqCJk5KDFRnh3+KYj7rdVh6lsFt5/aPs+Irj9rZ33WMA7w==, } + emittery@0.13.1: + resolution: + { + integrity: sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==, + } + engines: { node: ">=12" } + emoji-regex@8.0.0: resolution: { @@ -3753,6 +4348,13 @@ packages: } engines: { node: ">=6" } + escape-string-regexp@2.0.0: + resolution: + { + integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==, + } + engines: { node: ">=8" } + escape-string-regexp@4.0.0: resolution: { @@ -3899,6 +4501,14 @@ packages: } engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + esprima@4.0.1: + resolution: + { + integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==, + } + engines: { node: ">=4" } + hasBin: true + esquery@1.6.0: resolution: { @@ -3973,6 +4583,27 @@ packages: } engines: { node: ^8.12.0 || >=9.7.0 } + execa@5.1.1: + resolution: + { + integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==, + } + engines: { node: ">=10" } + + exit-x@0.2.2: + resolution: + { + integrity: sha512-+I6B/IkJc1o/2tiURyz/ivu/O0nKNEArIUB5O7zBrlDVJr22SCLH3xTeEry428LvFhRzIA1g8izguxJ/gbNcVQ==, + } + engines: { node: ">= 0.8.0" } + + expect@30.1.2: + resolution: + { + integrity: sha512-xvHszRavo28ejws8FpemjhwswGj4w/BetHIL8cU49u4sGyXDw2+p3YbeDbj6xzlxi6kWTjIRSTJ+9sNXPnF0Zg==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + extend@3.0.2: resolution: { @@ -4022,6 +4653,12 @@ packages: integrity: sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==, } + fb-watchman@2.0.2: + resolution: + { + integrity: sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==, + } + fd-slicer@1.1.0: resolution: { @@ -4065,6 +4702,13 @@ packages: integrity: sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==, } + find-up@4.1.0: + resolution: + { + integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==, + } + engines: { node: ">=8" } + find-up@5.0.0: resolution: { @@ -4213,12 +4857,26 @@ packages: } engines: { node: ">= 4" } + gensync@1.0.0-beta.2: + resolution: + { + integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==, + } + engines: { node: ">=6.9.0" } + get-browser-rtc@1.1.0: resolution: { integrity: sha512-MghbMJ61EJrRsDe7w1Bvqt3ZsBuqhce5nrn/XAwgwOXhcsz53/ltdxOse1h/8eKXj5slzxdsz56g5rzOFSGwfQ==, } + get-caller-file@2.0.5: + resolution: + { + integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==, + } + engines: { node: 6.* || 8.* || >= 10.* } + get-intrinsic@1.3.0: resolution: { @@ -4233,6 +4891,13 @@ packages: } engines: { node: ">=6" } + get-package-type@0.1.0: + resolution: + { + integrity: sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==, + } + engines: { node: ">=8.0.0" } + get-proto@1.0.1: resolution: { @@ -4247,6 +4912,13 @@ packages: } engines: { node: ">=8" } + get-stream@6.0.1: + resolution: + { + integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==, + } + engines: { node: ">=10" } + get-symbol-description@1.1.0: resolution: { @@ -4260,13 +4932,6 @@ packages: integrity: sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==, } - giget@1.2.5: - resolution: - { - integrity: sha512-r1ekGw/Bgpi3HLV3h1MRBIlSAdHoIMklpaQ3OQLFcRw9PwAj2rqigvIbg+dBUI51OxVI2jsEtDywDBjSiuf7Ug==, - } - hasBin: true - glob-parent@5.1.2: resolution: { @@ -4437,6 +5102,12 @@ packages: integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==, } + html-escaper@2.0.2: + resolution: + { + integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==, + } + html-url-attributes@3.0.1: resolution: { @@ -4464,6 +5135,13 @@ packages: } engines: { node: ">= 6" } + https-proxy-agent@7.0.6: + resolution: + { + integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==, + } + engines: { node: ">= 14" } + human-signals@1.1.1: resolution: { @@ -4471,6 +5149,13 @@ packages: } engines: { node: ">=8.12.0" } + human-signals@2.1.0: + resolution: + { + integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==, + } + engines: { node: ">=10.17.0" } + hyperhtml-style@0.1.3: resolution: { @@ -4529,6 +5214,14 @@ packages: } engines: { node: ">=6" } + import-local@3.2.0: + resolution: + { + integrity: sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==, + } + engines: { node: ">=8" } + hasBin: true + imurmurhash@0.1.4: resolution: { @@ -4536,6 +5229,13 @@ packages: } engines: { node: ">=0.8.19" } + index-to-position@1.1.0: + resolution: + { + integrity: sha512-XPdx9Dq4t9Qk1mTMbWONJqU7boCoumEH7fRET37HX5+khDUl3J2W6PdALxhILYlIYx2amlwYcRPp28p0tSiojg==, + } + engines: { node: ">=18" } + inflight@1.0.6: resolution: { @@ -4709,6 +5409,13 @@ packages: } engines: { node: ">=8" } + is-generator-fn@2.1.0: + resolution: + { + integrity: sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==, + } + engines: { node: ">=6" } + is-generator-function@1.1.0: resolution: { @@ -4864,6 +5571,41 @@ packages: integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==, } + istanbul-lib-coverage@3.2.2: + resolution: + { + integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==, + } + engines: { node: ">=8" } + + istanbul-lib-instrument@6.0.3: + resolution: + { + integrity: sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==, + } + engines: { node: ">=10" } + + istanbul-lib-report@3.0.1: + resolution: + { + integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==, + } + engines: { node: ">=10" } + + istanbul-lib-source-maps@5.0.6: + resolution: + { + integrity: sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==, + } + engines: { node: ">=10" } + + istanbul-reports@3.2.0: + resolution: + { + integrity: sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==, + } + engines: { node: ">=8" } + iterator.prototype@1.1.5: resolution: { @@ -4884,6 +5626,168 @@ packages: integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==, } + jest-changed-files@30.0.5: + resolution: + { + integrity: sha512-bGl2Ntdx0eAwXuGpdLdVYVr5YQHnSZlQ0y9HVDu565lCUAe9sj6JOtBbMmBBikGIegne9piDDIOeiLVoqTkz4A==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-circus@30.1.3: + resolution: + { + integrity: sha512-Yf3dnhRON2GJT4RYzM89t/EXIWNxKTpWTL9BfF3+geFetWP4XSvJjiU1vrWplOiUkmq8cHLiwuhz+XuUp9DscA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-cli@30.1.3: + resolution: + { + integrity: sha512-G8E2Ol3OKch1DEeIBl41NP7OiC6LBhfg25Btv+idcusmoUSpqUkbrneMqbW9lVpI/rCKb/uETidb7DNteheuAQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + jest-config@30.1.3: + resolution: + { + integrity: sha512-M/f7gqdQEPgZNA181Myz+GXCe8jXcJsGjCMXUzRj22FIXsZOyHNte84e0exntOvdPaeh9tA0w+B8qlP2fAezfw==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + peerDependencies: + "@types/node": "*" + esbuild-register: ">=3.4.0" + ts-node: ">=9.0.0" + peerDependenciesMeta: + "@types/node": + optional: true + esbuild-register: + optional: true + ts-node: + optional: true + + jest-diff@30.1.2: + resolution: + { + integrity: sha512-4+prq+9J61mOVXCa4Qp8ZjavdxzrWQXrI80GNxP8f4tkI2syPuPrJgdRPZRrfUTRvIoUwcmNLbqEJy9W800+NQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-docblock@30.0.1: + resolution: + { + integrity: sha512-/vF78qn3DYphAaIc3jy4gA7XSAz167n9Bm/wn/1XhTLW7tTBIzXtCJpb/vcmc73NIIeeohCbdL94JasyXUZsGA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-each@30.1.0: + resolution: + { + integrity: sha512-A+9FKzxPluqogNahpCv04UJvcZ9B3HamqpDNWNKDjtxVRYB8xbZLFuCr8JAJFpNp83CA0anGQFlpQna9Me+/tQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-environment-node@30.1.2: + resolution: + { + integrity: sha512-w8qBiXtqGWJ9xpJIA98M0EIoq079GOQRQUyse5qg1plShUCQ0Ek1VTTcczqKrn3f24TFAgFtT+4q3aOXvjbsuA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-haste-map@30.1.0: + resolution: + { + integrity: sha512-JLeM84kNjpRkggcGpQLsV7B8W4LNUWz7oDNVnY1Vjj22b5/fAb3kk3htiD+4Na8bmJmjJR7rBtS2Rmq/NEcADg==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-leak-detector@30.1.0: + resolution: + { + integrity: sha512-AoFvJzwxK+4KohH60vRuHaqXfWmeBATFZpzpmzNmYTtmRMiyGPVhkXpBqxUQunw+dQB48bDf4NpUs6ivVbRv1g==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-matcher-utils@30.1.2: + resolution: + { + integrity: sha512-7ai16hy4rSbDjvPTuUhuV8nyPBd6EX34HkBsBcBX2lENCuAQ0qKCPb/+lt8OSWUa9WWmGYLy41PrEzkwRwoGZQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-message-util@30.1.0: + resolution: + { + integrity: sha512-HizKDGG98cYkWmaLUHChq4iN+oCENohQLb7Z5guBPumYs+/etonmNFlg1Ps6yN9LTPyZn+M+b/9BbnHx3WTMDg==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-mock@30.0.5: + resolution: + { + integrity: sha512-Od7TyasAAQX/6S+QCbN6vZoWOMwlTtzzGuxJku1GhGanAjz9y+QsQkpScDmETvdc9aSXyJ/Op4rhpMYBWW91wQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-pnp-resolver@1.2.3: + resolution: + { + integrity: sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==, + } + engines: { node: ">=6" } + peerDependencies: + jest-resolve: "*" + peerDependenciesMeta: + jest-resolve: + optional: true + + jest-regex-util@30.0.1: + resolution: + { + integrity: sha512-jHEQgBXAgc+Gh4g0p3bCevgRCVRkB4VB70zhoAE48gxeSr1hfUOsM/C2WoJgVL7Eyg//hudYENbm3Ne+/dRVVA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-resolve-dependencies@30.1.3: + resolution: + { + integrity: sha512-DNfq3WGmuRyHRHfEet+Zm3QOmVFtIarUOQHHryKPc0YL9ROfgWZxl4+aZq/VAzok2SS3gZdniP+dO4zgo59hBg==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-resolve@30.1.3: + resolution: + { + integrity: sha512-DI4PtTqzw9GwELFS41sdMK32Ajp3XZQ8iygeDMWkxlRhm7uUTOFSZFVZABFuxr0jvspn8MAYy54NxZCsuCTSOw==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-runner@30.1.3: + resolution: + { + integrity: sha512-dd1ORcxQraW44Uz029TtXj85W11yvLpDuIzNOlofrC8GN+SgDlgY4BvyxJiVeuabA1t6idjNbX59jLd2oplOGQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-runtime@30.1.3: + resolution: + { + integrity: sha512-WS8xgjuNSphdIGnleQcJ3AKE4tBKOVP+tKhCD0u+Tb2sBmsU8DxfbBpZX7//+XOz81zVs4eFpJQwBNji2Y07DA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-snapshot@30.1.2: + resolution: + { + integrity: sha512-4q4+6+1c8B6Cy5pGgFvjDy/Pa6VYRiGu0yQafKkJ9u6wQx4G5PqI2QR6nxTl43yy7IWsINwz6oT4o6tD12a8Dg==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + jest-util@29.7.0: resolution: { @@ -4891,6 +5795,27 @@ packages: } engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 } + jest-util@30.0.5: + resolution: + { + integrity: sha512-pvyPWssDZR0FlfMxCBoc0tvM8iUEskaRFALUtGQYzVEAqisAztmy+R8LnU14KT4XA0H/a5HMVTXat1jLne010g==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-validate@30.1.0: + resolution: + { + integrity: sha512-7P3ZlCFW/vhfQ8pE7zW6Oi4EzvuB4sgR72Q1INfW9m0FGo0GADYlPwIkf4CyPq7wq85g+kPMtPOHNAdWHeBOaA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest-watcher@30.1.3: + resolution: + { + integrity: sha512-6jQUZCP1BTL2gvG9E4YF06Ytq4yMb4If6YoQGRR6PpjtqOXSP3sKe2kqwB6SQ+H9DezOfZaSLnmka1NtGm3fCQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + jest-worker@29.7.0: resolution: { @@ -4898,6 +5823,26 @@ packages: } engines: { node: ^14.15.0 || ^16.10.0 || >=18.0.0 } + jest-worker@30.1.0: + resolution: + { + integrity: sha512-uvWcSjlwAAgIu133Tt77A05H7RIk3Ho8tZL50bQM2AkvLdluw9NG48lRCl3Dt+MOH719n/0nnb5YxUwcuJiKRA==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + + jest@30.1.3: + resolution: + { + integrity: sha512-Ry+p2+NLk6u8Agh5yVqELfUJvRfV51hhVBRIB5yZPY7mU0DGBmOuFG5GebZbMbm86cdQNK0fhJuDX8/1YorISQ==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + jiti@1.21.7: resolution: { @@ -4911,12 +5856,26 @@ packages: integrity: sha512-1vUQX+IdDMVPj4k8kOxgUqlcK518yluMuGZwqlr44FS1ppZB/5GWh4rZG89erpOBOJjU/OBsnCVFfapsRz6nEA==, } + js-levenshtein@1.1.6: + resolution: + { + integrity: sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==, + } + engines: { node: ">=0.10.0" } + js-tokens@4.0.0: resolution: { integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==, } + js-yaml@3.14.1: + resolution: + { + integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==, + } + hasBin: true + js-yaml@4.1.0: resolution: { @@ -4981,6 +5940,14 @@ packages: } hasBin: true + json5@2.2.3: + resolution: + { + integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==, + } + engines: { node: ">=6" } + hasBin: true + jsonfile@4.0.0: resolution: { @@ -5019,6 +5986,13 @@ packages: } engines: { node: ">=0.10" } + leven@3.1.0: + resolution: + { + integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==, + } + engines: { node: ">=6" } + levn@0.4.1: resolution: { @@ -5057,6 +6031,13 @@ packages: integrity: sha512-14/H1aX7hzBBmmh7sGPd+AOMkkIrHM3Z1PAyGgZigA1H1p5O5ANnMyWzvpAETtG68/dC4pC0ncy3+PPGzXZHPg==, } + locate-path@5.0.0: + resolution: + { + integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==, + } + engines: { node: ">=8" } + locate-path@6.0.0: resolution: { @@ -5076,6 +6057,12 @@ packages: integrity: sha512-chi4NHZlZqZD18a0imDHnZPrDeBbTtVN7GXMwuGdRH9qotxAjYs3aVLKc7zNOG9eddR5Ksd8rvFEBc9SsggPpg==, } + lodash.memoize@4.1.2: + resolution: + { + integrity: sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==, + } + lodash.merge@4.6.2: resolution: { @@ -5101,6 +6088,12 @@ packages: integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==, } + lru-cache@5.1.1: + resolution: + { + integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==, + } + lru-cache@6.0.0: resolution: { @@ -5130,12 +6123,25 @@ packages: } engines: { node: ">=8" } + make-dir@4.0.0: + resolution: + { + integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==, + } + engines: { node: ">=10" } + make-error@1.3.6: resolution: { integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==, } + makeerror@1.0.12: + resolution: + { + integrity: sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==, + } + math-intrinsics@1.1.0: resolution: { @@ -5460,12 +6466,6 @@ packages: engines: { node: ">=10" } hasBin: true - mlly@1.7.4: - resolution: - { - integrity: sha512-qmdSIPC4bDJXgZTCR7XosJiNKySV7O215tsPtDN9iEO/7q/76b/ijtgRu/+epFXSJhijtTCCGp3DWS549P3xKw==, - } - mri@1.2.0: resolution: { @@ -5566,24 +6566,12 @@ packages: sass: optional: true - node-abort-controller@3.1.1: - resolution: - { - integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==, - } - node-addon-api@7.1.1: resolution: { integrity: sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==, } - node-fetch-native@1.6.7: - resolution: - { - integrity: sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==, - } - node-fetch@2.6.7: resolution: { @@ -5627,6 +6615,12 @@ packages: } hasBin: true + node-int64@0.4.0: + resolution: + { + integrity: sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==, + } + node-releases@2.0.19: resolution: { @@ -5690,14 +6684,6 @@ packages: react-router-dom: optional: true - nypm@0.5.4: - resolution: - { - integrity: sha512-X0SNNrZiGU8/e/zAB7sCTtdxWTMSIO73q+xuKgglm2Yvzwlo8UoC5FNySQFCvl84uPaeADkqHUZUkWy4aH4xOA==, - } - engines: { node: ^14.16.0 || >=16.10.0 } - hasBin: true - oauth@0.9.15: resolution: { @@ -5774,12 +6760,6 @@ packages: } engines: { node: ">= 0.4" } - ohash@1.1.6: - resolution: - { - integrity: sha512-TBu7PtV8YkAZn0tSxobKY2n2aAQva936lhRrj6957aDaCf9IEtqsKbgMzXE/F/sjqYOwmrukeORHNLe5glk7Cg==, - } - oidc-token-hash@5.1.1: resolution: { @@ -5806,6 +6786,36 @@ packages: } engines: { node: ">=6" } + openapi-fetch@0.14.0: + resolution: + { + integrity: sha512-PshIdm1NgdLvb05zp8LqRQMNSKzIlPkyMxYFxwyHR+UlKD4t2nUjkDhNxeRbhRSEd3x5EUNh2w5sJYwkhOH4fg==, + } + + openapi-react-query@0.5.0: + resolution: + { + integrity: sha512-VtyqiamsbWsdSWtXmj/fAR+m9nNxztsof6h8ZIsjRj8c8UR/x9AIwHwd60IqwgymmFwo7qfSJQ1ZzMJrtqjQVg==, + } + peerDependencies: + "@tanstack/react-query": ^5.25.0 + openapi-fetch: ^0.14.0 + + openapi-typescript-helpers@0.0.15: + resolution: + { + integrity: sha512-opyTPaunsklCBpTK8JGef6mfPhLSnyy5a0IN9vKtx3+4aExf+KxEqYwIy3hqkedXIB97u357uLMJsOnm3GVjsw==, + } + + openapi-typescript@7.9.1: + resolution: + { + integrity: sha512-9gJtoY04mk6iPMbToPjPxEAtfXZ0dTsMZtsgUI8YZta0btPPig9DJFP4jlerQD/7QOwYgb0tl+zLUpDf7vb7VA==, + } + hasBin: true + peerDependencies: + typescript: ^5.x + openid-client@5.7.1: resolution: { @@ -5840,6 +6850,13 @@ packages: } engines: { node: ">=8" } + p-limit@2.3.0: + resolution: + { + integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==, + } + engines: { node: ">=6" } + p-limit@3.1.0: resolution: { @@ -5847,6 +6864,13 @@ packages: } engines: { node: ">=10" } + p-locate@4.1.0: + resolution: + { + integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==, + } + engines: { node: ">=8" } + p-locate@5.0.0: resolution: { @@ -5854,6 +6878,13 @@ packages: } engines: { node: ">=10" } + p-try@2.2.0: + resolution: + { + integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==, + } + engines: { node: ">=6" } + package-json-from-dist@1.0.1: resolution: { @@ -5880,6 +6911,13 @@ packages: } engines: { node: ">=8" } + parse-json@8.3.0: + resolution: + { + integrity: sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==, + } + engines: { node: ">=18" } + parse-ms@2.1.0: resolution: { @@ -5959,30 +6997,12 @@ packages: } engines: { node: ">=8" } - pathe@1.1.2: - resolution: - { - integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==, - } - - pathe@2.0.3: - resolution: - { - integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==, - } - pend@1.2.0: resolution: { integrity: sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==, } - perfect-debounce@1.0.0: - resolution: - { - integrity: sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==, - } - perfect-freehand@1.2.2: resolution: { @@ -6029,11 +7049,19 @@ packages: } engines: { node: ">= 6" } - pkg-types@1.3.1: + pkg-dir@4.2.0: resolution: { - integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==, + integrity: sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==, } + engines: { node: ">=8" } + + pluralize@8.0.0: + resolution: + { + integrity: sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==, + } + engines: { node: ">=4" } possible-typed-array-names@1.1.0: resolution: @@ -6146,6 +7174,13 @@ packages: integrity: sha512-WuxUnVtlWL1OfZFQFuqvnvs6MiAGk9UNsBostyBOB0Is9wb5uRESevA6rnl/rkksXaGX3GzZhPup5d6Vp1nFew==, } + pretty-format@30.0.5: + resolution: + { + integrity: sha512-D1tKtYvByrBkFLe2wHJl2bwMJIiT8rW+XA+TiataH79/FszLQMrpGEvzUVkzPau7OCO0Qnrhpe87PqtOAIB8Yw==, + } + engines: { node: ^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0 } + pretty-ms@7.0.1: resolution: { @@ -6209,6 +7244,12 @@ packages: } engines: { node: ">=6" } + pure-rand@7.0.1: + resolution: + { + integrity: sha512-oTUZM/NAZS8p7ANR3SHh30kXB+zK2r2BPcEn/awJIbOvq82WoMN4p62AWWp3Hhw50G0xMsw1mhIBLqHw64EcNQ==, + } + qr.js@0.0.0: resolution: { @@ -6234,12 +7275,6 @@ packages: } engines: { node: ">= 0.8" } - rc9@2.1.2: - resolution: - { - integrity: sha512-btXCnMmRIBINM2LDZoEmOogIZU7Qe7zn4BpomSKZ/ykbLObuBdvG+mFq11DL6fjH1DRwHhrlgtYWG96bJiC7Cg==, - } - react-dom@18.3.1: resolution: { @@ -6271,6 +7306,12 @@ packages: integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==, } + react-is@18.3.1: + resolution: + { + integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==, + } + react-markdown@9.1.0: resolution: { @@ -6392,13 +7433,6 @@ packages: } engines: { node: ">=4" } - redlock@5.0.0-beta.2: - resolution: - { - integrity: sha512-2RDWXg5jgRptDrB1w9O/JgSZC0j7y4SlaXnor93H/UJm/QyDiFgBKNtrh0TI6oCXqYSaSoXxFh6Sd3VtYfhRXw==, - } - engines: { node: ">=12" } - redux-thunk@3.1.0: resolution: { @@ -6439,6 +7473,13 @@ packages: integrity: sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==, } + require-directory@2.1.1: + resolution: + { + integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==, + } + engines: { node: ">=0.10.0" } + require-from-string@2.0.2: resolution: { @@ -6458,6 +7499,13 @@ packages: integrity: sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==, } + resolve-cwd@3.0.0: + resolution: + { + integrity: sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==, + } + engines: { node: ">=8" } + resolve-from@4.0.0: resolution: { @@ -6727,6 +7775,13 @@ packages: integrity: sha512-D1SaWpOW8afq1CZGWB8xTfrT3FekjQmPValrqncJMX7QFl8YwhrPTZvMCANLtgBwwdS+7zURyqxDDEmY558tTw==, } + slash@3.0.0: + resolution: + { + integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==, + } + engines: { node: ">=8" } + socket.io-client@4.7.2: resolution: { @@ -6748,6 +7803,12 @@ packages: } engines: { node: ">=0.10.0" } + source-map-support@0.5.13: + resolution: + { + integrity: sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==, + } + source-map@0.5.7: resolution: { @@ -6768,6 +7829,12 @@ packages: integrity: sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==, } + sprintf-js@1.0.3: + resolution: + { + integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==, + } + sprintf-js@1.1.3: resolution: { @@ -6780,6 +7847,13 @@ packages: integrity: sha512-+L3ccpzibovGXFK+Ap/f8LOS0ahMrHTf3xu7mMLSpEGU0EO9ucaysSylKo9eRDFNhWve/y275iPmIZ4z39a9iA==, } + stack-utils@2.0.6: + resolution: + { + integrity: sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==, + } + engines: { node: ">=10" } + stacktrace-parser@0.1.11: resolution: { @@ -6832,6 +7906,13 @@ packages: } engines: { node: ">=10.0.0" } + string-length@4.0.2: + resolution: + { + integrity: sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==, + } + engines: { node: ">=10" } + string-width@4.2.3: resolution: { @@ -6920,6 +8001,13 @@ packages: } engines: { node: ">=4" } + strip-bom@4.0.0: + resolution: + { + integrity: sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==, + } + engines: { node: ">=8" } + strip-final-newline@2.0.0: resolution: { @@ -6976,6 +8064,13 @@ packages: engines: { node: ">=16 || 14 >=14.17" } hasBin: true + supports-color@10.2.0: + resolution: + { + integrity: sha512-5eG9FQjEjDbAlI5+kdpdyPIBMRH4GfTVDGREVupaZHmVoppknhM29b/S9BkQz7cathp85BVgRi/As3Siln7e0Q==, + } + engines: { node: ">=18" } + supports-color@7.2.0: resolution: { @@ -7004,6 +8099,13 @@ packages: } engines: { node: ">= 0.4" } + synckit@0.11.11: + resolution: + { + integrity: sha512-MeQTA1r0litLUf0Rp/iisCaL8761lKAZHaimlbGK4j0HysC4PLfqygQj9srcs0m2RdtDYnF8UuYyKpbjHYp7Jw==, + } + engines: { node: ^14.18.0 || >=16.0.0 } + tailwindcss@3.4.17: resolution: { @@ -7026,6 +8128,13 @@ packages: } engines: { node: ">=10" } + test-exclude@6.0.0: + resolution: + { + integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==, + } + engines: { node: ">=8" } + thenify-all@1.6.0: resolution: { @@ -7046,12 +8155,6 @@ packages: } engines: { node: ">=10" } - tinyexec@0.3.2: - resolution: - { - integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==, - } - tinyglobby@0.2.14: resolution: { @@ -7059,6 +8162,12 @@ packages: } engines: { node: ">=12.0.0" } + tmpl@1.0.5: + resolution: + { + integrity: sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==, + } + to-regex-range@5.0.1: resolution: { @@ -7113,6 +8222,36 @@ packages: integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==, } + ts-jest@29.4.1: + resolution: + { + integrity: sha512-SaeUtjfpg9Uqu8IbeDKtdaS0g8lS6FT6OzM3ezrDfErPJPHNDo/Ey+VFGP1bQIDfagYDLyRpd7O15XpG1Es2Uw==, + } + engines: { node: ^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0 } + hasBin: true + peerDependencies: + "@babel/core": ">=7.0.0-beta.0 <8" + "@jest/transform": ^29.0.0 || ^30.0.0 + "@jest/types": ^29.0.0 || ^30.0.0 + babel-jest: ^29.0.0 || ^30.0.0 + esbuild: "*" + jest: ^29.0.0 || ^30.0.0 + jest-util: ^29.0.0 || ^30.0.0 + typescript: ">=4.3 <6" + peerDependenciesMeta: + "@babel/core": + optional: true + "@jest/transform": + optional: true + "@jest/types": + optional: true + babel-jest: + optional: true + esbuild: + optional: true + jest-util: + optional: true + ts-morph@12.0.0: resolution: { @@ -7161,6 +8300,20 @@ packages: } engines: { node: ">= 0.8.0" } + type-detect@4.0.8: + resolution: + { + integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==, + } + engines: { node: ">=4" } + + type-fest@0.21.3: + resolution: + { + integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==, + } + engines: { node: ">=10" } + type-fest@0.7.1: resolution: { @@ -7168,6 +8321,13 @@ packages: } engines: { node: ">=8" } + type-fest@4.41.0: + resolution: + { + integrity: sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==, + } + engines: { node: ">=16" } + typed-array-buffer@1.0.3: resolution: { @@ -7244,12 +8404,6 @@ packages: integrity: sha512-v+Z8Jal+GtmKGtJ34GIQlCJAxrDt9kbjpNsNvYoAXFyr4gNfWlD4uJJuoNNu/0UTVaKvQwHaSU095YDl71lKPw==, } - ufo@1.6.1: - resolution: - { - integrity: sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==, - } - uglify-js@3.19.3: resolution: { @@ -7289,12 +8443,6 @@ packages: } engines: { node: ">= 0.4" } - uncrypto@0.1.3: - resolution: - { - integrity: sha512-Ql87qFHB3s/De2ClA9e0gsnS6zXG27SkTiSJwjCc9MebbfapQfuPzumMIUMi38ezPZVNFcHI9sUIepeQfw8J8Q==, - } - undici-types@7.10.0: resolution: { @@ -7386,6 +8534,12 @@ packages: integrity: sha512-MJu7ypHq6QasgF5YRTjqscSzQp/W11zoUk6kvmlH+fmWEs63Y0Eib13hYFwAzagRJcVY8WVnlV+eBDUGMJ5IbA==, } + uri-js-replace@1.0.1: + resolution: + { + integrity: sha512-W+C9NWNLFOoBI2QWDp4UT9pv65r2w5Cx+3sTYFvtMdDBxkKt1syCqsUdSFAChbEe1uK5TfS04wt/nGwmaeIQ0g==, + } + uri-js@4.4.1: resolution: { @@ -7464,6 +8618,13 @@ packages: integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==, } + v8-to-istanbul@9.3.0: + resolution: + { + integrity: sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==, + } + engines: { node: ">=10.12.0" } + vercel@37.14.0: resolution: { @@ -7484,6 +8645,12 @@ packages: integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==, } + walker@1.0.8: + resolution: + { + integrity: sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==, + } + wavesurfer.js@7.10.1: resolution: { @@ -7597,6 +8764,13 @@ packages: integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==, } + write-file-atomic@5.0.1: + resolution: + { + integrity: sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw==, + } + engines: { node: ^14.17.0 || ^16.13.0 || >=18.0.0 } + ws@8.17.1: resolution: { @@ -7633,6 +8807,13 @@ packages: } engines: { node: ">=0.4.0" } + y18n@5.0.8: + resolution: + { + integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==, + } + engines: { node: ">=10" } + yallist@3.1.1: resolution: { @@ -7645,6 +8826,12 @@ packages: integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==, } + yaml-ast-parser@0.0.43: + resolution: + { + integrity: sha512-2PTINUwsRqSd+s8XxKaJWQlUuEMHJQyEuh2edBbW8KNJz0SJPwUSD2zRWqezFEdN7IzAgeuYHFUCF7o8zRdZ0A==, + } + yaml@1.10.2: resolution: { @@ -7660,6 +8847,20 @@ packages: engines: { node: ">= 14.6" } hasBin: true + yargs-parser@21.1.1: + resolution: + { + integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==, + } + engines: { node: ">=12" } + + yargs@17.7.2: + resolution: + { + integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==, + } + engines: { node: ">=12" } + yauzl-clone@1.0.4: resolution: { @@ -7694,6 +8895,12 @@ packages: } engines: { node: ">=10" } + zod@4.1.5: + resolution: + { + integrity: sha512-rcUUZqlLJgBC33IT3PNMgsCq6TzLQEG/Ei/KTCU0PedSWRMAXoOUN+4t/0H+Q8bdnLPdqUYnvboJT0bn/229qg==, + } + zwitch@2.0.4: resolution: { @@ -7703,11 +8910,10 @@ packages: snapshots: "@alloc/quick-lru@5.2.0": {} - "@apidevtools/json-schema-ref-parser@11.6.4": + "@ampproject/remapping@2.3.0": dependencies: - "@jsdevtools/ono": 7.1.3 - "@types/json-schema": 7.0.15 - js-yaml: 4.1.0 + "@jridgewell/gen-mapping": 0.3.13 + "@jridgewell/trace-mapping": 0.3.30 "@ark-ui/react@5.18.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1)": dependencies: @@ -7779,6 +8985,28 @@ snapshots: js-tokens: 4.0.0 picocolors: 1.1.1 + "@babel/compat-data@7.28.0": {} + + "@babel/core@7.28.3": + dependencies: + "@ampproject/remapping": 2.3.0 + "@babel/code-frame": 7.27.1 + "@babel/generator": 7.28.3 + "@babel/helper-compilation-targets": 7.27.2 + "@babel/helper-module-transforms": 7.28.3(@babel/core@7.28.3) + "@babel/helpers": 7.28.3 + "@babel/parser": 7.28.3 + "@babel/template": 7.27.2 + "@babel/traverse": 7.28.3 + "@babel/types": 7.28.2 + convert-source-map: 2.0.0 + debug: 4.4.1(supports-color@9.4.0) + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + "@babel/generator@7.28.0": dependencies: "@babel/parser": 7.28.0 @@ -7787,6 +9015,22 @@ snapshots: "@jridgewell/trace-mapping": 0.3.30 jsesc: 3.1.0 + "@babel/generator@7.28.3": + dependencies: + "@babel/parser": 7.28.3 + "@babel/types": 7.28.2 + "@jridgewell/gen-mapping": 0.3.13 + "@jridgewell/trace-mapping": 0.3.30 + jsesc: 3.1.0 + + "@babel/helper-compilation-targets@7.27.2": + dependencies: + "@babel/compat-data": 7.28.0 + "@babel/helper-validator-option": 7.27.1 + browserslist: 4.25.2 + lru-cache: 5.1.1 + semver: 6.3.1 + "@babel/helper-globals@7.28.0": {} "@babel/helper-module-imports@7.27.1": @@ -7796,14 +9040,121 @@ snapshots: transitivePeerDependencies: - supports-color + "@babel/helper-module-transforms@7.28.3(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-module-imports": 7.27.1 + "@babel/helper-validator-identifier": 7.27.1 + "@babel/traverse": 7.28.3 + transitivePeerDependencies: + - supports-color + + "@babel/helper-plugin-utils@7.27.1": {} + "@babel/helper-string-parser@7.27.1": {} "@babel/helper-validator-identifier@7.27.1": {} + "@babel/helper-validator-option@7.27.1": {} + + "@babel/helpers@7.28.3": + dependencies: + "@babel/template": 7.27.2 + "@babel/types": 7.28.2 + "@babel/parser@7.28.0": dependencies: "@babel/types": 7.28.2 + "@babel/parser@7.28.3": + dependencies: + "@babel/types": 7.28.2 + + "@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-bigint@7.8.3(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-import-attributes@7.27.1(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-jsx@7.27.1(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + + "@babel/plugin-syntax-typescript@7.27.1(@babel/core@7.28.3)": + dependencies: + "@babel/core": 7.28.3 + "@babel/helper-plugin-utils": 7.27.1 + "@babel/runtime@7.28.2": {} "@babel/template@7.27.2": @@ -7824,11 +9175,25 @@ snapshots: transitivePeerDependencies: - supports-color + "@babel/traverse@7.28.3": + dependencies: + "@babel/code-frame": 7.27.1 + "@babel/generator": 7.28.3 + "@babel/helper-globals": 7.28.0 + "@babel/parser": 7.28.3 + "@babel/template": 7.27.2 + "@babel/types": 7.28.2 + debug: 4.4.1(supports-color@9.4.0) + transitivePeerDependencies: + - supports-color + "@babel/types@7.28.2": dependencies: "@babel/helper-string-parser": 7.27.1 "@babel/helper-validator-identifier": 7.27.1 + "@bcoe/v8-coverage@0.2.3": {} + "@chakra-ui/react@3.24.2(@emotion/react@11.14.0(@types/react@18.2.20)(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)": dependencies: "@ark-ui/react": 5.18.2(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -8027,17 +9392,6 @@ snapshots: prop-types: 15.8.1 react: 18.3.1 - "@hey-api/openapi-ts@0.48.3(typescript@5.9.2)": - dependencies: - "@apidevtools/json-schema-ref-parser": 11.6.4 - c12: 1.11.1 - camelcase: 8.0.0 - commander: 12.1.0 - handlebars: 4.7.8 - typescript: 5.9.2 - transitivePeerDependencies: - - magicast - "@humanfs/core@0.19.1": {} "@humanfs/node@0.16.6": @@ -8059,7 +9413,7 @@ snapshots: dependencies: "@swc/helpers": 0.5.17 - "@ioredis/commands@1.3.0": {} + "@ioredis/commands@1.3.1": {} "@isaacs/cliui@8.0.2": dependencies: @@ -8070,10 +9424,189 @@ snapshots: wrap-ansi: 8.1.0 wrap-ansi-cjs: wrap-ansi@7.0.0 + "@istanbuljs/load-nyc-config@1.1.0": + dependencies: + camelcase: 5.3.1 + find-up: 4.1.0 + get-package-type: 0.1.0 + js-yaml: 3.14.1 + resolve-from: 5.0.0 + + "@istanbuljs/schema@0.1.3": {} + + "@jest/console@30.1.2": + dependencies: + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + chalk: 4.1.2 + jest-message-util: 30.1.0 + jest-util: 30.0.5 + slash: 3.0.0 + + "@jest/core@30.1.3(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2))": + dependencies: + "@jest/console": 30.1.2 + "@jest/pattern": 30.0.1 + "@jest/reporters": 30.1.3 + "@jest/test-result": 30.1.3 + "@jest/transform": 30.1.2 + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + ci-info: 4.3.0 + exit-x: 0.2.2 + graceful-fs: 4.2.11 + jest-changed-files: 30.0.5 + jest-config: 30.1.3(@types/node@24.2.1)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)) + jest-haste-map: 30.1.0 + jest-message-util: 30.1.0 + jest-regex-util: 30.0.1 + jest-resolve: 30.1.3 + jest-resolve-dependencies: 30.1.3 + jest-runner: 30.1.3 + jest-runtime: 30.1.3 + jest-snapshot: 30.1.2 + jest-util: 30.0.5 + jest-validate: 30.1.0 + jest-watcher: 30.1.3 + micromatch: 4.0.8 + pretty-format: 30.0.5 + slash: 3.0.0 + transitivePeerDependencies: + - babel-plugin-macros + - esbuild-register + - supports-color + - ts-node + + "@jest/diff-sequences@30.0.1": {} + + "@jest/environment@30.1.2": + dependencies: + "@jest/fake-timers": 30.1.2 + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + jest-mock: 30.0.5 + + "@jest/expect-utils@30.1.2": + dependencies: + "@jest/get-type": 30.1.0 + + "@jest/expect@30.1.2": + dependencies: + expect: 30.1.2 + jest-snapshot: 30.1.2 + transitivePeerDependencies: + - supports-color + + "@jest/fake-timers@30.1.2": + dependencies: + "@jest/types": 30.0.5 + "@sinonjs/fake-timers": 13.0.5 + "@types/node": 24.2.1 + jest-message-util: 30.1.0 + jest-mock: 30.0.5 + jest-util: 30.0.5 + + "@jest/get-type@30.1.0": {} + + "@jest/globals@30.1.2": + dependencies: + "@jest/environment": 30.1.2 + "@jest/expect": 30.1.2 + "@jest/types": 30.0.5 + jest-mock: 30.0.5 + transitivePeerDependencies: + - supports-color + + "@jest/pattern@30.0.1": + dependencies: + "@types/node": 24.2.1 + jest-regex-util: 30.0.1 + + "@jest/reporters@30.1.3": + dependencies: + "@bcoe/v8-coverage": 0.2.3 + "@jest/console": 30.1.2 + "@jest/test-result": 30.1.3 + "@jest/transform": 30.1.2 + "@jest/types": 30.0.5 + "@jridgewell/trace-mapping": 0.3.30 + "@types/node": 24.2.1 + chalk: 4.1.2 + collect-v8-coverage: 1.0.2 + exit-x: 0.2.2 + glob: 10.4.5 + graceful-fs: 4.2.11 + istanbul-lib-coverage: 3.2.2 + istanbul-lib-instrument: 6.0.3 + istanbul-lib-report: 3.0.1 + istanbul-lib-source-maps: 5.0.6 + istanbul-reports: 3.2.0 + jest-message-util: 30.1.0 + jest-util: 30.0.5 + jest-worker: 30.1.0 + slash: 3.0.0 + string-length: 4.0.2 + v8-to-istanbul: 9.3.0 + transitivePeerDependencies: + - supports-color + "@jest/schemas@29.6.3": dependencies: "@sinclair/typebox": 0.27.8 + "@jest/schemas@30.0.5": + dependencies: + "@sinclair/typebox": 0.34.41 + + "@jest/snapshot-utils@30.1.2": + dependencies: + "@jest/types": 30.0.5 + chalk: 4.1.2 + graceful-fs: 4.2.11 + natural-compare: 1.4.0 + + "@jest/source-map@30.0.1": + dependencies: + "@jridgewell/trace-mapping": 0.3.30 + callsites: 3.1.0 + graceful-fs: 4.2.11 + + "@jest/test-result@30.1.3": + dependencies: + "@jest/console": 30.1.2 + "@jest/types": 30.0.5 + "@types/istanbul-lib-coverage": 2.0.6 + collect-v8-coverage: 1.0.2 + + "@jest/test-sequencer@30.1.3": + dependencies: + "@jest/test-result": 30.1.3 + graceful-fs: 4.2.11 + jest-haste-map: 30.1.0 + slash: 3.0.0 + + "@jest/transform@30.1.2": + dependencies: + "@babel/core": 7.28.3 + "@jest/types": 30.0.5 + "@jridgewell/trace-mapping": 0.3.30 + babel-plugin-istanbul: 7.0.0 + chalk: 4.1.2 + convert-source-map: 2.0.0 + fast-json-stable-stringify: 2.1.0 + graceful-fs: 4.2.11 + jest-haste-map: 30.1.0 + jest-regex-util: 30.0.1 + jest-util: 30.0.5 + micromatch: 4.0.8 + pirates: 4.0.7 + slash: 3.0.0 + write-file-atomic: 5.0.1 + transitivePeerDependencies: + - supports-color + "@jest/types@29.6.3": dependencies: "@jest/schemas": 29.6.3 @@ -8083,6 +9616,16 @@ snapshots: "@types/yargs": 17.0.33 chalk: 4.1.2 + "@jest/types@30.0.5": + dependencies: + "@jest/pattern": 30.0.1 + "@jest/schemas": 30.0.5 + "@types/istanbul-lib-coverage": 2.0.6 + "@types/istanbul-reports": 3.0.4 + "@types/node": 24.2.1 + "@types/yargs": 17.0.33 + chalk: 4.1.2 + "@jridgewell/gen-mapping@0.3.13": dependencies: "@jridgewell/sourcemap-codec": 1.5.5 @@ -8102,8 +9645,6 @@ snapshots: "@jridgewell/resolve-uri": 3.1.2 "@jridgewell/sourcemap-codec": 1.5.5 - "@jsdevtools/ono@7.1.3": {} - "@mapbox/node-pre-gyp@1.0.11": dependencies: detect-libc: 2.0.4 @@ -8241,6 +9782,8 @@ snapshots: "@pkgjs/parseargs@0.11.0": optional: true + "@pkgr/core@0.2.9": {} + "@radix-ui/primitive@1.1.3": {} "@radix-ui/react-arrow@1.1.7(@types/react@18.2.20)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)": @@ -8420,6 +9963,29 @@ snapshots: "@radix-ui/rect@1.1.1": {} + "@redocly/ajv@8.11.3": + dependencies: + fast-deep-equal: 3.1.3 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + uri-js-replace: 1.0.1 + + "@redocly/config@0.22.2": {} + + "@redocly/openapi-core@1.34.5(supports-color@10.2.0)": + dependencies: + "@redocly/ajv": 8.11.3 + "@redocly/config": 0.22.2 + colorette: 1.4.0 + https-proxy-agent: 7.0.6(supports-color@10.2.0) + js-levenshtein: 1.1.6 + js-yaml: 4.1.0 + minimatch: 5.1.6 + pluralize: 8.0.0 + yaml-ast-parser: 0.0.43 + transitivePeerDependencies: + - supports-color + "@reduxjs/toolkit@2.8.2(react@18.3.1)": dependencies: "@standard-schema/spec": 1.0.0 @@ -8513,7 +10079,7 @@ snapshots: "@sentry/utils": 7.120.4 localforage: 1.10.0 - "@sentry/nextjs@7.120.4(next@14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)": + "@sentry/nextjs@7.120.4(next@14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)": dependencies: "@rollup/plugin-commonjs": 24.0.0(rollup@2.79.2) "@sentry/core": 7.120.4 @@ -8525,7 +10091,7 @@ snapshots: "@sentry/vercel-edge": 7.120.4 "@sentry/webpack-plugin": 1.21.0 chalk: 3.0.0 - next: 14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) + next: 14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) react: 18.3.1 resolve: 1.22.8 rollup: 2.79.2 @@ -8584,6 +10150,16 @@ snapshots: "@sinclair/typebox@0.27.8": {} + "@sinclair/typebox@0.34.41": {} + + "@sinonjs/commons@3.0.1": + dependencies: + type-detect: 4.0.8 + + "@sinonjs/fake-timers@13.0.5": + dependencies: + "@sinonjs/commons": 3.0.1 + "@socket.io/component-emitter@3.1.2": {} "@standard-schema/spec@1.0.0": {} @@ -8601,6 +10177,13 @@ snapshots: "@swc/counter": 0.1.3 tslib: 2.8.1 + "@tanstack/query-core@5.85.9": {} + + "@tanstack/react-query@5.85.9(react@18.3.1)": + dependencies: + "@tanstack/query-core": 5.85.9 + react: 18.3.1 + "@tootallnate/once@2.0.0": {} "@ts-morph/common@0.11.1": @@ -8623,6 +10206,27 @@ snapshots: tslib: 2.8.1 optional: true + "@types/babel__core@7.20.5": + dependencies: + "@babel/parser": 7.28.0 + "@babel/types": 7.28.2 + "@types/babel__generator": 7.27.0 + "@types/babel__template": 7.4.4 + "@types/babel__traverse": 7.28.0 + + "@types/babel__generator@7.27.0": + dependencies: + "@babel/types": 7.28.2 + + "@types/babel__template@7.4.4": + dependencies: + "@babel/parser": 7.28.0 + "@babel/types": 7.28.2 + + "@types/babel__traverse@7.28.0": + dependencies: + "@babel/types": 7.28.2 + "@types/debug@4.1.12": dependencies: "@types/ms": 2.1.0 @@ -8639,6 +10243,12 @@ snapshots: dependencies: "@types/unist": 3.0.3 + "@types/ioredis@5.0.0": + dependencies: + ioredis: 5.7.0 + transitivePeerDependencies: + - supports-color + "@types/istanbul-lib-coverage@2.0.6": {} "@types/istanbul-lib-report@3.0.3": @@ -8649,6 +10259,11 @@ snapshots: dependencies: "@types/istanbul-lib-report": 3.0.3 + "@types/jest@30.0.0": + dependencies: + expect: 30.1.2 + pretty-format: 30.0.5 + "@types/json-schema@7.0.15": {} "@types/json5@0.0.29": {} @@ -8682,6 +10297,8 @@ snapshots: "@types/scheduler@0.26.0": {} + "@types/stack-utils@2.0.3": {} + "@types/ua-parser-js@0.7.39": {} "@types/unist@2.0.11": {} @@ -8860,10 +10477,6 @@ snapshots: "@unrs/resolver-binding-win32-x64-msvc@1.11.1": optional: true - "@upstash/redis@1.35.3": - dependencies: - uncrypto: 0.1.3 - "@vercel/build-utils@8.4.12": {} "@vercel/edge-config-fs@0.1.0": {} @@ -8920,10 +10533,6 @@ snapshots: "@vercel/static-config": 3.0.0 ts-morph: 12.0.0 - "@vercel/kv@2.0.0": - dependencies: - "@upstash/redis": 1.35.3 - "@vercel/next@4.3.18": dependencies: "@vercel/nft": 0.27.3 @@ -9601,6 +11210,8 @@ snapshots: transitivePeerDependencies: - supports-color + agent-base@7.1.4: {} + ajv@6.12.6: dependencies: fast-deep-equal: 3.1.3 @@ -9615,6 +11226,12 @@ snapshots: require-from-string: 2.0.2 uri-js: 4.4.1 + ansi-colors@4.1.3: {} + + ansi-escapes@4.3.2: + dependencies: + type-fest: 0.21.3 + ansi-regex@5.0.1: {} ansi-regex@6.1.0: {} @@ -9623,6 +11240,8 @@ snapshots: dependencies: color-convert: 2.0.1 + ansi-styles@5.2.0: {} + ansi-styles@6.2.1: {} any-promise@1.3.0: {} @@ -9645,6 +11264,10 @@ snapshots: arg@5.0.2: {} + argparse@1.0.10: + dependencies: + sprintf-js: 1.0.3 + argparse@2.0.1: {} aria-hidden@1.2.6: @@ -9771,12 +11394,66 @@ snapshots: axobject-query@4.1.0: {} + babel-jest@30.1.2(@babel/core@7.28.3): + dependencies: + "@babel/core": 7.28.3 + "@jest/transform": 30.1.2 + "@types/babel__core": 7.20.5 + babel-plugin-istanbul: 7.0.0 + babel-preset-jest: 30.0.1(@babel/core@7.28.3) + chalk: 4.1.2 + graceful-fs: 4.2.11 + slash: 3.0.0 + transitivePeerDependencies: + - supports-color + + babel-plugin-istanbul@7.0.0: + dependencies: + "@babel/helper-plugin-utils": 7.27.1 + "@istanbuljs/load-nyc-config": 1.1.0 + "@istanbuljs/schema": 0.1.3 + istanbul-lib-instrument: 6.0.3 + test-exclude: 6.0.0 + transitivePeerDependencies: + - supports-color + + babel-plugin-jest-hoist@30.0.1: + dependencies: + "@babel/template": 7.27.2 + "@babel/types": 7.28.2 + "@types/babel__core": 7.20.5 + babel-plugin-macros@3.1.0: dependencies: "@babel/runtime": 7.28.2 cosmiconfig: 7.1.0 resolve: 1.22.10 + babel-preset-current-node-syntax@1.2.0(@babel/core@7.28.3): + dependencies: + "@babel/core": 7.28.3 + "@babel/plugin-syntax-async-generators": 7.8.4(@babel/core@7.28.3) + "@babel/plugin-syntax-bigint": 7.8.3(@babel/core@7.28.3) + "@babel/plugin-syntax-class-properties": 7.12.13(@babel/core@7.28.3) + "@babel/plugin-syntax-class-static-block": 7.14.5(@babel/core@7.28.3) + "@babel/plugin-syntax-import-attributes": 7.27.1(@babel/core@7.28.3) + "@babel/plugin-syntax-import-meta": 7.10.4(@babel/core@7.28.3) + "@babel/plugin-syntax-json-strings": 7.8.3(@babel/core@7.28.3) + "@babel/plugin-syntax-logical-assignment-operators": 7.10.4(@babel/core@7.28.3) + "@babel/plugin-syntax-nullish-coalescing-operator": 7.8.3(@babel/core@7.28.3) + "@babel/plugin-syntax-numeric-separator": 7.10.4(@babel/core@7.28.3) + "@babel/plugin-syntax-object-rest-spread": 7.8.3(@babel/core@7.28.3) + "@babel/plugin-syntax-optional-catch-binding": 7.8.3(@babel/core@7.28.3) + "@babel/plugin-syntax-optional-chaining": 7.8.3(@babel/core@7.28.3) + "@babel/plugin-syntax-private-property-in-object": 7.14.5(@babel/core@7.28.3) + "@babel/plugin-syntax-top-level-await": 7.14.5(@babel/core@7.28.3) + + babel-preset-jest@30.0.1(@babel/core@7.28.3): + dependencies: + "@babel/core": 7.28.3 + babel-plugin-jest-hoist: 30.0.1 + babel-preset-current-node-syntax: 1.2.0(@babel/core@7.28.3) + bail@2.0.2: {} balanced-match@1.0.2: {} @@ -9809,10 +11486,20 @@ snapshots: node-releases: 2.0.19 update-browserslist-db: 1.1.3(browserslist@4.25.2) + bs-logger@0.2.6: + dependencies: + fast-json-stable-stringify: 2.1.0 + + bser@2.1.1: + dependencies: + node-int64: 0.4.0 + btoa@1.2.1: {} buffer-crc32@0.2.13: {} + buffer-from@1.1.2: {} + buffer@6.0.3: dependencies: base64-js: 1.5.1 @@ -9824,21 +11511,6 @@ snapshots: bytes@3.1.0: {} - c12@1.11.1: - dependencies: - chokidar: 3.6.0 - confbox: 0.1.8 - defu: 6.1.4 - dotenv: 16.6.1 - giget: 1.2.5 - jiti: 1.21.7 - mlly: 1.7.4 - ohash: 1.1.6 - pathe: 1.1.2 - perfect-debounce: 1.0.0 - pkg-types: 1.3.1 - rc9: 2.1.2 - call-bind-apply-helpers@1.0.2: dependencies: es-errors: 1.3.0 @@ -9860,7 +11532,9 @@ snapshots: camelcase-css@2.0.1: {} - camelcase@8.0.0: {} + camelcase@5.3.1: {} + + camelcase@6.3.0: {} caniuse-lite@1.0.30001734: {} @@ -9876,6 +11550,10 @@ snapshots: ansi-styles: 4.3.0 supports-color: 7.2.0 + change-case@5.4.4: {} + + char-regex@1.0.2: {} + character-entities-html4@2.1.0: {} character-entities-legacy@3.0.0: {} @@ -9922,22 +11600,32 @@ snapshots: ci-info@3.9.0: {} - citty@0.1.6: - dependencies: - consola: 3.4.2 + ci-info@4.3.0: {} cjs-module-lexer@1.2.3: {} + cjs-module-lexer@2.1.0: {} + classnames@2.5.1: {} client-only@0.0.1: {} + cliui@8.0.1: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + clsx@2.1.1: {} cluster-key-slot@1.1.2: {} + co@4.6.0: {} + code-block-writer@10.1.1: {} + collect-v8-coverage@1.0.2: {} + color-convert@2.0.1: dependencies: color-name: 1.1.4 @@ -9946,24 +11634,20 @@ snapshots: color-support@1.1.3: {} + colorette@1.4.0: {} + combined-stream@1.0.8: dependencies: delayed-stream: 1.0.0 comma-separated-tokens@2.0.3: {} - commander@12.1.0: {} - commander@4.1.1: {} commondir@1.0.1: {} concat-map@0.0.1: {} - confbox@0.1.8: {} - - consola@3.4.2: {} - console-control-strings@1.1.0: {} content-type@1.0.4: {} @@ -9972,6 +11656,8 @@ snapshots: convert-source-map@1.9.0: {} + convert-source-map@2.0.0: {} + cookie@0.7.2: {} cosmiconfig@7.1.0: @@ -10026,6 +11712,12 @@ snapshots: dependencies: ms: 2.1.3 + debug@4.4.1(supports-color@10.2.0): + dependencies: + ms: 2.1.3 + optionalDependencies: + supports-color: 10.2.0 + debug@4.4.1(supports-color@9.4.0): dependencies: ms: 2.1.3 @@ -10036,8 +11728,14 @@ snapshots: dependencies: character-entities: 2.0.2 + dedent@1.7.0(babel-plugin-macros@3.1.0): + optionalDependencies: + babel-plugin-macros: 3.1.0 + deep-is@0.1.4: {} + deepmerge@4.3.1: {} + define-data-property@1.1.4: dependencies: es-define-property: 1.0.1 @@ -10050,8 +11748,6 @@ snapshots: has-property-descriptors: 1.0.2 object-keys: 1.1.1 - defu@6.1.4: {} - delayed-stream@1.0.0: {} delegates@1.0.0: {} @@ -10062,8 +11758,6 @@ snapshots: dequal@2.0.3: {} - destr@2.0.5: {} - detect-europe-js@0.1.2: {} detect-libc@1.0.3: @@ -10071,6 +11765,8 @@ snapshots: detect-libc@2.0.4: {} + detect-newline@3.1.0: {} + detect-node-es@1.1.0: {} devlop@1.1.0: @@ -10103,8 +11799,6 @@ snapshots: domsanitizer: 0.2.3 umap: 1.0.2 - dotenv@16.6.1: {} - dunder-proto@1.0.1: dependencies: call-bind-apply-helpers: 1.0.2 @@ -10127,6 +11821,8 @@ snapshots: electron-to-chromium@1.5.200: {} + emittery@0.13.1: {} + emoji-regex@8.0.0: {} emoji-regex@9.2.2: {} @@ -10347,6 +12043,8 @@ snapshots: escalade@3.2.0: {} + escape-string-regexp@2.0.0: {} + escape-string-regexp@4.0.0: {} eslint-config-next@14.2.31(eslint@9.33.0(jiti@1.21.7))(typescript@5.9.2): @@ -10534,6 +12232,8 @@ snapshots: acorn-jsx: 5.3.2(acorn@8.15.0) eslint-visitor-keys: 4.2.1 + esprima@4.0.1: {} + esquery@1.6.0: dependencies: estraverse: 5.3.0 @@ -10571,6 +12271,29 @@ snapshots: signal-exit: 3.0.7 strip-final-newline: 2.0.0 + execa@5.1.1: + dependencies: + cross-spawn: 7.0.6 + get-stream: 6.0.1 + human-signals: 2.1.0 + is-stream: 2.0.1 + merge-stream: 2.0.0 + npm-run-path: 4.0.1 + onetime: 5.1.2 + signal-exit: 3.0.7 + strip-final-newline: 2.0.0 + + exit-x@0.2.2: {} + + expect@30.1.2: + dependencies: + "@jest/expect-utils": 30.1.2 + "@jest/get-type": 30.1.0 + jest-matcher-utils: 30.1.2 + jest-message-util: 30.1.0 + jest-mock: 30.0.5 + jest-util: 30.0.5 + extend@3.0.2: {} fake-mediastreamtrack@1.2.0: @@ -10598,6 +12321,10 @@ snapshots: dependencies: reusify: 1.1.0 + fb-watchman@2.0.2: + dependencies: + bser: 2.1.1 + fd-slicer@1.1.0: dependencies: pend: 1.2.0 @@ -10618,6 +12345,11 @@ snapshots: find-root@1.1.0: {} + find-up@4.1.0: + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + find-up@5.0.0: dependencies: locate-path: 6.0.0 @@ -10708,8 +12440,12 @@ snapshots: generic-pool@3.4.2: {} + gensync@1.0.0-beta.2: {} + get-browser-rtc@1.1.0: {} + get-caller-file@2.0.5: {} + get-intrinsic@1.3.0: dependencies: call-bind-apply-helpers: 1.0.2 @@ -10725,6 +12461,8 @@ snapshots: get-nonce@1.0.1: {} + get-package-type@0.1.0: {} + get-proto@1.0.1: dependencies: dunder-proto: 1.0.1 @@ -10734,6 +12472,8 @@ snapshots: dependencies: pump: 3.0.3 + get-stream@6.0.1: {} + get-symbol-description@1.1.0: dependencies: call-bound: 1.0.4 @@ -10744,16 +12484,6 @@ snapshots: dependencies: resolve-pkg-maps: 1.0.0 - giget@1.2.5: - dependencies: - citty: 0.1.6 - consola: 3.4.2 - defu: 6.1.4 - node-fetch-native: 1.6.7 - nypm: 0.5.4 - pathe: 2.0.3 - tar: 6.2.1 - glob-parent@5.1.2: dependencies: is-glob: 4.0.3 @@ -10885,6 +12615,8 @@ snapshots: dependencies: react-is: 16.13.1 + html-escaper@2.0.2: {} + html-url-attributes@3.0.1: {} http-errors@1.4.0: @@ -10907,8 +12639,17 @@ snapshots: transitivePeerDependencies: - supports-color + https-proxy-agent@7.0.6(supports-color@10.2.0): + dependencies: + agent-base: 7.1.4 + debug: 4.4.1(supports-color@10.2.0) + transitivePeerDependencies: + - supports-color + human-signals@1.1.1: {} + human-signals@2.1.0: {} + hyperhtml-style@0.1.3: {} iconv-lite@0.4.24: @@ -10932,8 +12673,15 @@ snapshots: parent-module: 1.0.1 resolve-from: 4.0.0 + import-local@3.2.0: + dependencies: + pkg-dir: 4.2.0 + resolve-cwd: 3.0.0 + imurmurhash@0.1.4: {} + index-to-position@1.1.0: {} + inflight@1.0.6: dependencies: once: 1.4.0 @@ -10953,7 +12701,7 @@ snapshots: ioredis@5.7.0: dependencies: - "@ioredis/commands": 1.3.0 + "@ioredis/commands": 1.3.1 cluster-key-slot: 1.1.2 debug: 4.4.1(supports-color@9.4.0) denque: 2.1.0 @@ -11043,6 +12791,8 @@ snapshots: is-fullwidth-code-point@3.0.0: {} + is-generator-fn@2.1.0: {} + is-generator-function@1.1.0: dependencies: call-bound: 1.0.4 @@ -11122,6 +12872,37 @@ snapshots: isexe@2.0.0: {} + istanbul-lib-coverage@3.2.2: {} + + istanbul-lib-instrument@6.0.3: + dependencies: + "@babel/core": 7.28.3 + "@babel/parser": 7.28.0 + "@istanbuljs/schema": 0.1.3 + istanbul-lib-coverage: 3.2.2 + semver: 7.7.2 + transitivePeerDependencies: + - supports-color + + istanbul-lib-report@3.0.1: + dependencies: + istanbul-lib-coverage: 3.2.2 + make-dir: 4.0.0 + supports-color: 7.2.0 + + istanbul-lib-source-maps@5.0.6: + dependencies: + "@jridgewell/trace-mapping": 0.3.30 + debug: 4.4.1(supports-color@9.4.0) + istanbul-lib-coverage: 3.2.2 + transitivePeerDependencies: + - supports-color + + istanbul-reports@3.2.0: + dependencies: + html-escaper: 2.0.2 + istanbul-lib-report: 3.0.1 + iterator.prototype@1.1.5: dependencies: define-data-property: 1.1.4 @@ -11143,6 +12924,301 @@ snapshots: optionalDependencies: "@pkgjs/parseargs": 0.11.0 + jest-changed-files@30.0.5: + dependencies: + execa: 5.1.1 + jest-util: 30.0.5 + p-limit: 3.1.0 + + jest-circus@30.1.3(babel-plugin-macros@3.1.0): + dependencies: + "@jest/environment": 30.1.2 + "@jest/expect": 30.1.2 + "@jest/test-result": 30.1.3 + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + chalk: 4.1.2 + co: 4.6.0 + dedent: 1.7.0(babel-plugin-macros@3.1.0) + is-generator-fn: 2.1.0 + jest-each: 30.1.0 + jest-matcher-utils: 30.1.2 + jest-message-util: 30.1.0 + jest-runtime: 30.1.3 + jest-snapshot: 30.1.2 + jest-util: 30.0.5 + p-limit: 3.1.0 + pretty-format: 30.0.5 + pure-rand: 7.0.1 + slash: 3.0.0 + stack-utils: 2.0.6 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + jest-cli@30.1.3(@types/node@16.18.11)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)): + dependencies: + "@jest/core": 30.1.3(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)) + "@jest/test-result": 30.1.3 + "@jest/types": 30.0.5 + chalk: 4.1.2 + exit-x: 0.2.2 + import-local: 3.2.0 + jest-config: 30.1.3(@types/node@16.18.11)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)) + jest-util: 30.0.5 + jest-validate: 30.1.0 + yargs: 17.7.2 + transitivePeerDependencies: + - "@types/node" + - babel-plugin-macros + - esbuild-register + - supports-color + - ts-node + + jest-config@30.1.3(@types/node@16.18.11)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)): + dependencies: + "@babel/core": 7.28.3 + "@jest/get-type": 30.1.0 + "@jest/pattern": 30.0.1 + "@jest/test-sequencer": 30.1.3 + "@jest/types": 30.0.5 + babel-jest: 30.1.2(@babel/core@7.28.3) + chalk: 4.1.2 + ci-info: 4.3.0 + deepmerge: 4.3.1 + glob: 10.4.5 + graceful-fs: 4.2.11 + jest-circus: 30.1.3(babel-plugin-macros@3.1.0) + jest-docblock: 30.0.1 + jest-environment-node: 30.1.2 + jest-regex-util: 30.0.1 + jest-resolve: 30.1.3 + jest-runner: 30.1.3 + jest-util: 30.0.5 + jest-validate: 30.1.0 + micromatch: 4.0.8 + parse-json: 5.2.0 + pretty-format: 30.0.5 + slash: 3.0.0 + strip-json-comments: 3.1.1 + optionalDependencies: + "@types/node": 16.18.11 + ts-node: 10.9.1(@types/node@16.18.11)(typescript@5.9.2) + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + jest-config@30.1.3(@types/node@24.2.1)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)): + dependencies: + "@babel/core": 7.28.3 + "@jest/get-type": 30.1.0 + "@jest/pattern": 30.0.1 + "@jest/test-sequencer": 30.1.3 + "@jest/types": 30.0.5 + babel-jest: 30.1.2(@babel/core@7.28.3) + chalk: 4.1.2 + ci-info: 4.3.0 + deepmerge: 4.3.1 + glob: 10.4.5 + graceful-fs: 4.2.11 + jest-circus: 30.1.3(babel-plugin-macros@3.1.0) + jest-docblock: 30.0.1 + jest-environment-node: 30.1.2 + jest-regex-util: 30.0.1 + jest-resolve: 30.1.3 + jest-runner: 30.1.3 + jest-util: 30.0.5 + jest-validate: 30.1.0 + micromatch: 4.0.8 + parse-json: 5.2.0 + pretty-format: 30.0.5 + slash: 3.0.0 + strip-json-comments: 3.1.1 + optionalDependencies: + "@types/node": 24.2.1 + ts-node: 10.9.1(@types/node@16.18.11)(typescript@5.9.2) + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + jest-diff@30.1.2: + dependencies: + "@jest/diff-sequences": 30.0.1 + "@jest/get-type": 30.1.0 + chalk: 4.1.2 + pretty-format: 30.0.5 + + jest-docblock@30.0.1: + dependencies: + detect-newline: 3.1.0 + + jest-each@30.1.0: + dependencies: + "@jest/get-type": 30.1.0 + "@jest/types": 30.0.5 + chalk: 4.1.2 + jest-util: 30.0.5 + pretty-format: 30.0.5 + + jest-environment-node@30.1.2: + dependencies: + "@jest/environment": 30.1.2 + "@jest/fake-timers": 30.1.2 + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + jest-mock: 30.0.5 + jest-util: 30.0.5 + jest-validate: 30.1.0 + + jest-haste-map@30.1.0: + dependencies: + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + anymatch: 3.1.3 + fb-watchman: 2.0.2 + graceful-fs: 4.2.11 + jest-regex-util: 30.0.1 + jest-util: 30.0.5 + jest-worker: 30.1.0 + micromatch: 4.0.8 + walker: 1.0.8 + optionalDependencies: + fsevents: 2.3.3 + + jest-leak-detector@30.1.0: + dependencies: + "@jest/get-type": 30.1.0 + pretty-format: 30.0.5 + + jest-matcher-utils@30.1.2: + dependencies: + "@jest/get-type": 30.1.0 + chalk: 4.1.2 + jest-diff: 30.1.2 + pretty-format: 30.0.5 + + jest-message-util@30.1.0: + dependencies: + "@babel/code-frame": 7.27.1 + "@jest/types": 30.0.5 + "@types/stack-utils": 2.0.3 + chalk: 4.1.2 + graceful-fs: 4.2.11 + micromatch: 4.0.8 + pretty-format: 30.0.5 + slash: 3.0.0 + stack-utils: 2.0.6 + + jest-mock@30.0.5: + dependencies: + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + jest-util: 30.0.5 + + jest-pnp-resolver@1.2.3(jest-resolve@30.1.3): + optionalDependencies: + jest-resolve: 30.1.3 + + jest-regex-util@30.0.1: {} + + jest-resolve-dependencies@30.1.3: + dependencies: + jest-regex-util: 30.0.1 + jest-snapshot: 30.1.2 + transitivePeerDependencies: + - supports-color + + jest-resolve@30.1.3: + dependencies: + chalk: 4.1.2 + graceful-fs: 4.2.11 + jest-haste-map: 30.1.0 + jest-pnp-resolver: 1.2.3(jest-resolve@30.1.3) + jest-util: 30.0.5 + jest-validate: 30.1.0 + slash: 3.0.0 + unrs-resolver: 1.11.1 + + jest-runner@30.1.3: + dependencies: + "@jest/console": 30.1.2 + "@jest/environment": 30.1.2 + "@jest/test-result": 30.1.3 + "@jest/transform": 30.1.2 + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + chalk: 4.1.2 + emittery: 0.13.1 + exit-x: 0.2.2 + graceful-fs: 4.2.11 + jest-docblock: 30.0.1 + jest-environment-node: 30.1.2 + jest-haste-map: 30.1.0 + jest-leak-detector: 30.1.0 + jest-message-util: 30.1.0 + jest-resolve: 30.1.3 + jest-runtime: 30.1.3 + jest-util: 30.0.5 + jest-watcher: 30.1.3 + jest-worker: 30.1.0 + p-limit: 3.1.0 + source-map-support: 0.5.13 + transitivePeerDependencies: + - supports-color + + jest-runtime@30.1.3: + dependencies: + "@jest/environment": 30.1.2 + "@jest/fake-timers": 30.1.2 + "@jest/globals": 30.1.2 + "@jest/source-map": 30.0.1 + "@jest/test-result": 30.1.3 + "@jest/transform": 30.1.2 + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + chalk: 4.1.2 + cjs-module-lexer: 2.1.0 + collect-v8-coverage: 1.0.2 + glob: 10.4.5 + graceful-fs: 4.2.11 + jest-haste-map: 30.1.0 + jest-message-util: 30.1.0 + jest-mock: 30.0.5 + jest-regex-util: 30.0.1 + jest-resolve: 30.1.3 + jest-snapshot: 30.1.2 + jest-util: 30.0.5 + slash: 3.0.0 + strip-bom: 4.0.0 + transitivePeerDependencies: + - supports-color + + jest-snapshot@30.1.2: + dependencies: + "@babel/core": 7.28.3 + "@babel/generator": 7.28.0 + "@babel/plugin-syntax-jsx": 7.27.1(@babel/core@7.28.3) + "@babel/plugin-syntax-typescript": 7.27.1(@babel/core@7.28.3) + "@babel/types": 7.28.2 + "@jest/expect-utils": 30.1.2 + "@jest/get-type": 30.1.0 + "@jest/snapshot-utils": 30.1.2 + "@jest/transform": 30.1.2 + "@jest/types": 30.0.5 + babel-preset-current-node-syntax: 1.2.0(@babel/core@7.28.3) + chalk: 4.1.2 + expect: 30.1.2 + graceful-fs: 4.2.11 + jest-diff: 30.1.2 + jest-matcher-utils: 30.1.2 + jest-message-util: 30.1.0 + jest-util: 30.0.5 + pretty-format: 30.0.5 + semver: 7.7.2 + synckit: 0.11.11 + transitivePeerDependencies: + - supports-color + jest-util@29.7.0: dependencies: "@jest/types": 29.6.3 @@ -11152,6 +13228,35 @@ snapshots: graceful-fs: 4.2.11 picomatch: 2.3.1 + jest-util@30.0.5: + dependencies: + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + chalk: 4.1.2 + ci-info: 4.3.0 + graceful-fs: 4.2.11 + picomatch: 4.0.3 + + jest-validate@30.1.0: + dependencies: + "@jest/get-type": 30.1.0 + "@jest/types": 30.0.5 + camelcase: 6.3.0 + chalk: 4.1.2 + leven: 3.1.0 + pretty-format: 30.0.5 + + jest-watcher@30.1.3: + dependencies: + "@jest/test-result": 30.1.3 + "@jest/types": 30.0.5 + "@types/node": 24.2.1 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + emittery: 0.13.1 + jest-util: 30.0.5 + string-length: 4.0.2 + jest-worker@29.7.0: dependencies: "@types/node": 24.2.1 @@ -11159,12 +13264,40 @@ snapshots: merge-stream: 2.0.0 supports-color: 8.1.1 + jest-worker@30.1.0: + dependencies: + "@types/node": 24.2.1 + "@ungap/structured-clone": 1.3.0 + jest-util: 30.0.5 + merge-stream: 2.0.0 + supports-color: 8.1.1 + + jest@30.1.3(@types/node@16.18.11)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)): + dependencies: + "@jest/core": 30.1.3(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)) + "@jest/types": 30.0.5 + import-local: 3.2.0 + jest-cli: 30.1.3(@types/node@16.18.11)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)) + transitivePeerDependencies: + - "@types/node" + - babel-plugin-macros + - esbuild-register + - supports-color + - ts-node + jiti@1.21.7: {} jose@4.15.9: {} + js-levenshtein@1.1.6: {} + js-tokens@4.0.0: {} + js-yaml@3.14.1: + dependencies: + argparse: 1.0.10 + esprima: 4.0.1 + js-yaml@4.1.0: dependencies: argparse: 2.0.1 @@ -11192,6 +13325,8 @@ snapshots: dependencies: minimist: 1.2.8 + json5@2.2.3: {} + jsonfile@4.0.0: optionalDependencies: graceful-fs: 4.2.11 @@ -11219,6 +13354,8 @@ snapshots: dependencies: language-subtag-registry: 0.3.23 + leven@3.1.0: {} + levn@0.4.1: dependencies: prelude-ls: 1.2.1 @@ -11248,6 +13385,10 @@ snapshots: dependencies: lie: 3.1.1 + locate-path@5.0.0: + dependencies: + p-locate: 4.1.0 + locate-path@6.0.0: dependencies: p-locate: 5.0.0 @@ -11256,6 +13397,8 @@ snapshots: lodash.isarguments@3.1.0: {} + lodash.memoize@4.1.2: {} + lodash.merge@4.6.2: {} longest-streak@3.1.0: {} @@ -11266,6 +13409,10 @@ snapshots: lru-cache@10.4.3: {} + lru-cache@5.1.1: + dependencies: + yallist: 3.1.1 + lru-cache@6.0.0: dependencies: yallist: 4.0.0 @@ -11282,8 +13429,16 @@ snapshots: dependencies: semver: 6.3.1 + make-dir@4.0.0: + dependencies: + semver: 7.7.2 + make-error@1.3.6: {} + makeerror@1.0.12: + dependencies: + tmpl: 1.0.5 + math-intrinsics@1.1.0: {} mdast-util-from-markdown@2.0.2: @@ -11591,13 +13746,6 @@ snapshots: mkdirp@1.0.4: {} - mlly@1.7.4: - dependencies: - acorn: 8.15.0 - pathe: 2.0.3 - pkg-types: 1.3.1 - ufo: 1.6.1 - mri@1.2.0: {} ms@2.1.1: {} @@ -11618,13 +13766,13 @@ snapshots: neo-async@2.6.2: {} - next-auth@4.24.11(next@14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): + next-auth@4.24.11(next@14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): dependencies: "@babel/runtime": 7.28.2 "@panva/hkdf": 1.2.1 cookie: 0.7.2 jose: 4.15.9 - next: 14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) + next: 14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) oauth: 0.9.15 openid-client: 5.7.1 preact: 10.27.0 @@ -11638,7 +13786,7 @@ snapshots: react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - next@14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0): + next@14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0): dependencies: "@next/env": 14.2.31 "@swc/helpers": 0.5.5 @@ -11648,7 +13796,7 @@ snapshots: postcss: 8.4.31 react: 18.3.1 react-dom: 18.3.1(react@18.3.1) - styled-jsx: 5.1.1(react@18.3.1) + styled-jsx: 5.1.1(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react@18.3.1) optionalDependencies: "@next/swc-darwin-arm64": 14.2.31 "@next/swc-darwin-x64": 14.2.31 @@ -11664,13 +13812,9 @@ snapshots: - "@babel/core" - babel-plugin-macros - node-abort-controller@3.1.1: {} - node-addon-api@7.1.1: optional: true - node-fetch-native@1.6.7: {} - node-fetch@2.6.7: dependencies: whatwg-url: 5.0.0 @@ -11685,6 +13829,8 @@ snapshots: node-gyp-build@4.8.4: {} + node-int64@0.4.0: {} + node-releases@2.0.19: {} nopt@5.0.0: @@ -11706,21 +13852,12 @@ snapshots: gauge: 3.0.2 set-blocking: 2.0.0 - nuqs@2.4.3(next@14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1): + nuqs@2.4.3(next@14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1): dependencies: mitt: 3.0.1 react: 18.3.1 optionalDependencies: - next: 14.2.31(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) - - nypm@0.5.4: - dependencies: - citty: 0.1.6 - consola: 3.4.2 - pathe: 2.0.3 - pkg-types: 1.3.1 - tinyexec: 0.3.2 - ufo: 1.6.1 + next: 14.2.31(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) oauth@0.9.15: {} @@ -11770,8 +13907,6 @@ snapshots: define-properties: 1.2.1 es-object-atoms: 1.1.1 - ohash@1.1.6: {} - oidc-token-hash@5.1.1: {} once@1.3.3: @@ -11786,6 +13921,28 @@ snapshots: dependencies: mimic-fn: 2.1.0 + openapi-fetch@0.14.0: + dependencies: + openapi-typescript-helpers: 0.0.15 + + openapi-react-query@0.5.0(@tanstack/react-query@5.85.9(react@18.3.1))(openapi-fetch@0.14.0): + dependencies: + "@tanstack/react-query": 5.85.9(react@18.3.1) + openapi-fetch: 0.14.0 + openapi-typescript-helpers: 0.0.15 + + openapi-typescript-helpers@0.0.15: {} + + openapi-typescript@7.9.1(typescript@5.9.2): + dependencies: + "@redocly/openapi-core": 1.34.5(supports-color@10.2.0) + ansi-colors: 4.1.3 + change-case: 5.4.4 + parse-json: 8.3.0 + supports-color: 10.2.0 + typescript: 5.9.2 + yargs-parser: 21.1.1 + openid-client@5.7.1: dependencies: jose: 4.15.9 @@ -11812,14 +13969,24 @@ snapshots: p-finally@2.0.1: {} + p-limit@2.3.0: + dependencies: + p-try: 2.2.0 + p-limit@3.1.0: dependencies: yocto-queue: 0.1.0 + p-locate@4.1.0: + dependencies: + p-limit: 2.3.0 + p-locate@5.0.0: dependencies: p-limit: 3.1.0 + p-try@2.2.0: {} + package-json-from-dist@1.0.1: {} parent-module@1.0.1: @@ -11843,6 +14010,12 @@ snapshots: json-parse-even-better-errors: 2.3.1 lines-and-columns: 1.2.4 + parse-json@8.3.0: + dependencies: + "@babel/code-frame": 7.27.1 + index-to-position: 1.1.0 + type-fest: 4.41.0 + parse-ms@2.1.0: {} path-browserify@1.0.1: {} @@ -11875,14 +14048,8 @@ snapshots: path-type@4.0.0: {} - pathe@1.1.2: {} - - pathe@2.0.3: {} - pend@1.2.0: {} - perfect-debounce@1.0.0: {} - perfect-freehand@1.2.2: {} picocolors@1.0.0: {} @@ -11897,11 +14064,11 @@ snapshots: pirates@4.0.7: {} - pkg-types@1.3.1: + pkg-dir@4.2.0: dependencies: - confbox: 0.1.8 - mlly: 1.7.4 - pathe: 2.0.3 + find-up: 4.1.0 + + pluralize@8.0.0: {} possible-typed-array-names@1.1.0: {} @@ -11962,6 +14129,12 @@ snapshots: pretty-format@3.8.0: {} + pretty-format@30.0.5: + dependencies: + "@jest/schemas": 30.0.5 + ansi-styles: 5.2.0 + react-is: 18.3.1 + pretty-ms@7.0.1: dependencies: parse-ms: 2.1.0 @@ -11993,6 +14166,8 @@ snapshots: punycode@2.3.1: {} + pure-rand@7.0.1: {} + qr.js@0.0.0: {} queue-microtask@1.2.3: {} @@ -12008,11 +14183,6 @@ snapshots: iconv-lite: 0.4.24 unpipe: 1.0.0 - rc9@2.1.2: - dependencies: - defu: 6.1.4 - destr: 2.0.5 - react-dom@18.3.1(react@18.3.1): dependencies: loose-envify: 1.4.0 @@ -12031,6 +14201,8 @@ snapshots: react-is@16.13.1: {} + react-is@18.3.1: {} + react-markdown@9.1.0(@types/react@18.2.20)(react@18.3.1): dependencies: "@types/hast": 3.0.4 @@ -12118,10 +14290,6 @@ snapshots: dependencies: redis-errors: 1.2.0 - redlock@5.0.0-beta.2: - dependencies: - node-abort-controller: 3.1.1 - redux-thunk@3.1.0(redux@5.0.1): dependencies: redux: 5.0.1 @@ -12165,12 +14333,18 @@ snapshots: unified: 11.0.5 vfile: 6.0.3 + require-directory@2.1.1: {} + require-from-string@2.0.2: {} reraf@1.1.1: {} reselect@5.1.1: {} + resolve-cwd@3.0.0: + dependencies: + resolve-from: 5.0.0 + resolve-from@4.0.0: {} resolve-from@5.0.0: {} @@ -12339,6 +14513,8 @@ snapshots: transitivePeerDependencies: - supports-color + slash@3.0.0: {} + socket.io-client@4.7.2: dependencies: "@socket.io/component-emitter": 3.1.2 @@ -12359,16 +14535,27 @@ snapshots: source-map-js@1.2.1: {} + source-map-support@0.5.13: + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + source-map@0.5.7: {} source-map@0.6.1: {} space-separated-tokens@2.0.2: {} + sprintf-js@1.0.3: {} + sprintf-js@1.1.3: {} stable-hash@0.0.5: {} + stack-utils@2.0.6: + dependencies: + escape-string-regexp: 2.0.0 + stacktrace-parser@0.1.11: dependencies: type-fest: 0.7.1 @@ -12396,6 +14583,11 @@ snapshots: streamsearch@1.1.0: {} + string-length@4.0.2: + dependencies: + char-regex: 1.0.2 + strip-ansi: 6.0.1 + string-width@4.2.3: dependencies: emoji-regex: 8.0.0 @@ -12477,6 +14669,8 @@ snapshots: strip-bom@3.0.0: {} + strip-bom@4.0.0: {} + strip-final-newline@2.0.0: {} strip-json-comments@3.1.1: {} @@ -12489,10 +14683,13 @@ snapshots: dependencies: inline-style-parser: 0.2.4 - styled-jsx@5.1.1(react@18.3.1): + styled-jsx@5.1.1(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react@18.3.1): dependencies: client-only: 0.0.1 react: 18.3.1 + optionalDependencies: + "@babel/core": 7.28.3 + babel-plugin-macros: 3.1.0 stylis@4.2.0: {} @@ -12506,6 +14703,8 @@ snapshots: pirates: 4.0.7 ts-interface-checker: 0.1.13 + supports-color@10.2.0: {} + supports-color@7.2.0: dependencies: has-flag: 4.0.0 @@ -12518,6 +14717,10 @@ snapshots: supports-preserve-symlinks-flag@1.0.0: {} + synckit@0.11.11: + dependencies: + "@pkgr/core": 0.2.9 + tailwindcss@3.4.17(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)): dependencies: "@alloc/quick-lru": 5.2.0 @@ -12564,6 +14767,12 @@ snapshots: mkdirp: 1.0.4 yallist: 4.0.0 + test-exclude@6.0.0: + dependencies: + "@istanbuljs/schema": 0.1.3 + glob: 7.2.3 + minimatch: 3.1.2 + thenify-all@1.6.0: dependencies: thenify: 3.3.1 @@ -12576,13 +14785,13 @@ snapshots: dependencies: convert-hrtime: 3.0.0 - tinyexec@0.3.2: {} - tinyglobby@0.2.14: dependencies: fdir: 6.4.6(picomatch@4.0.3) picomatch: 4.0.3 + tmpl@1.0.5: {} + to-regex-range@5.0.1: dependencies: is-number: 7.0.0 @@ -12603,6 +14812,26 @@ snapshots: ts-interface-checker@0.1.13: {} + ts-jest@29.4.1(@babel/core@7.28.3)(@jest/transform@30.1.2)(@jest/types@30.0.5)(babel-jest@30.1.2(@babel/core@7.28.3))(jest-util@30.0.5)(jest@30.1.3(@types/node@16.18.11)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)))(typescript@5.9.2): + dependencies: + bs-logger: 0.2.6 + fast-json-stable-stringify: 2.1.0 + handlebars: 4.7.8 + jest: 30.1.3(@types/node@16.18.11)(babel-plugin-macros@3.1.0)(ts-node@10.9.1(@types/node@16.18.11)(typescript@5.9.2)) + json5: 2.2.3 + lodash.memoize: 4.1.2 + make-error: 1.3.6 + semver: 7.7.2 + type-fest: 4.41.0 + typescript: 5.9.2 + yargs-parser: 21.1.1 + optionalDependencies: + "@babel/core": 7.28.3 + "@jest/transform": 30.1.2 + "@jest/types": 30.0.5 + babel-jest: 30.1.2(@babel/core@7.28.3) + jest-util: 30.0.5 + ts-morph@12.0.0: dependencies: "@ts-morph/common": 0.11.1 @@ -12660,8 +14889,14 @@ snapshots: dependencies: prelude-ls: 1.2.1 + type-detect@4.0.8: {} + + type-fest@0.21.3: {} + type-fest@0.7.1: {} + type-fest@4.41.0: {} + typed-array-buffer@1.0.3: dependencies: call-bound: 1.0.4 @@ -12717,8 +14952,6 @@ snapshots: udomdiff@1.1.2: {} - ufo@1.6.1: {} - uglify-js@3.19.3: optional: true @@ -12739,8 +14972,6 @@ snapshots: has-symbols: 1.1.0 which-boxed-primitive: 1.1.1 - uncrypto@0.1.3: {} - undici-types@7.10.0: {} undici@5.28.4: @@ -12818,6 +15049,8 @@ snapshots: uqr@0.1.2: {} + uri-js-replace@1.0.1: {} + uri-js@4.4.1: dependencies: punycode: 2.3.1 @@ -12853,6 +15086,12 @@ snapshots: v8-compile-cache-lib@3.0.1: {} + v8-to-istanbul@9.3.0: + dependencies: + "@jridgewell/trace-mapping": 0.3.30 + "@types/istanbul-lib-coverage": 2.0.6 + convert-source-map: 2.0.0 + vercel@37.14.0: dependencies: "@vercel/build-utils": 8.4.12 @@ -12883,6 +15122,10 @@ snapshots: "@types/unist": 3.0.3 vfile-message: 4.0.3 + walker@1.0.8: + dependencies: + makeerror: 1.0.12 + wavesurfer.js@7.10.1: {} web-vitals@0.2.4: {} @@ -12967,6 +15210,11 @@ snapshots: wrappy@1.0.2: {} + write-file-atomic@5.0.1: + dependencies: + imurmurhash: 0.1.4 + signal-exit: 4.1.0 + ws@8.17.1: {} xdg-app-paths@5.1.0: @@ -12979,14 +15227,30 @@ snapshots: xmlhttprequest-ssl@2.0.0: {} + y18n@5.0.8: {} + yallist@3.1.1: {} yallist@4.0.0: {} + yaml-ast-parser@0.0.43: {} + yaml@1.10.2: {} yaml@2.8.1: {} + yargs-parser@21.1.1: {} + + yargs@17.7.2: + dependencies: + cliui: 8.0.1 + escalade: 3.2.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + yauzl-clone@1.0.4: dependencies: events-intercept: 2.0.0 @@ -13005,4 +15269,6 @@ snapshots: yocto-queue@0.1.0: {} + zod@4.1.5: {} + zwitch@2.0.4: {} diff --git a/www/public/service-worker.js b/www/public/service-worker.js index 109561d5..e798e369 100644 --- a/www/public/service-worker.js +++ b/www/public/service-worker.js @@ -1,4 +1,4 @@ -let authToken = ""; // Variable to store the token +let authToken = null; self.addEventListener("message", (event) => { if (event.data && event.data.type === "SET_AUTH_TOKEN") { From 08d88ec349f38b0d13e0fa4cb73486c8dfd31836 Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Fri, 5 Sep 2025 18:39:32 -0400 Subject: [PATCH 16/77] fix: kv use tls explicit (#610) Co-authored-by: Igor Loskutov --- www/app/lib/redisClient.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/www/app/lib/redisClient.ts b/www/app/lib/redisClient.ts index 1be36538..753a0561 100644 --- a/www/app/lib/redisClient.ts +++ b/www/app/lib/redisClient.ts @@ -3,6 +3,10 @@ import { isBuildPhase } from "./next"; export type RedisClient = Pick; +const KV_USE_TLS = process.env.KV_USE_TLS + ? process.env.KV_USE_TLS === "true" + : undefined; + const getRedisClient = (): RedisClient => { const redisUrl = process.env.KV_URL; if (!redisUrl) { @@ -11,6 +15,11 @@ const getRedisClient = (): RedisClient => { const redis = new Redis(redisUrl, { maxRetriesPerRequest: 3, lazyConnect: true, + ...(KV_USE_TLS === true + ? { + tls: {}, + } + : {}), }); redis.on("error", (error) => { From 7f5a4c9ddc7fd098860c8bdda2ca3b57f63ded2f Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Fri, 5 Sep 2025 23:03:24 -0400 Subject: [PATCH 17/77] fix: token refresh locking (#613) * fix: kv use tls explicit * fix: token refresh locking * remove logs * compile fix * compile fix --------- Co-authored-by: Igor Loskutov --- www/app/lib/AuthProvider.tsx | 8 ++- www/app/lib/SessionAutoRefresh.tsx | 22 +++---- www/app/lib/authBackend.ts | 101 +++++++++++++++++++---------- www/app/lib/redisClient.ts | 47 ++++++++++---- www/app/lib/redisTokenCache.ts | 6 +- www/app/lib/useUserName.ts | 3 +- www/app/lib/utils.ts | 11 ++++ www/package.json | 1 + www/pnpm-lock.yaml | 22 +++++++ 9 files changed, 159 insertions(+), 62 deletions(-) diff --git a/www/app/lib/AuthProvider.tsx b/www/app/lib/AuthProvider.tsx index 96f49f87..6c09926b 100644 --- a/www/app/lib/AuthProvider.tsx +++ b/www/app/lib/AuthProvider.tsx @@ -8,10 +8,11 @@ import { assertCustomSession, CustomSession } from "./types"; import { Session } from "next-auth"; import { SessionAutoRefresh } from "./SessionAutoRefresh"; import { REFRESH_ACCESS_TOKEN_ERROR } from "./auth"; +import { assertExists } from "./utils"; type AuthContextType = ( | { status: "loading" } - | { status: "refreshing" } + | { status: "refreshing"; user: CustomSession["user"] } | { status: "unauthenticated"; error?: string } | { status: "authenticated"; @@ -41,7 +42,10 @@ export function AuthProvider({ children }: { children: React.ReactNode }) { return { status }; } case true: { - return { status: "refreshing" as const }; + return { + status: "refreshing" as const, + user: assertExists(customSession).user, + }; } default: { const _: never = sessionIsHere; diff --git a/www/app/lib/SessionAutoRefresh.tsx b/www/app/lib/SessionAutoRefresh.tsx index fd29367f..3729db8c 100644 --- a/www/app/lib/SessionAutoRefresh.tsx +++ b/www/app/lib/SessionAutoRefresh.tsx @@ -15,6 +15,7 @@ const REFRESH_BEFORE = REFRESH_ACCESS_TOKEN_BEFORE; export function SessionAutoRefresh({ children }) { const auth = useAuth(); + const accessTokenExpires = auth.status === "authenticated" ? auth.accessTokenExpires : null; @@ -23,17 +24,16 @@ export function SessionAutoRefresh({ children }) { // and not too slow (debuggable) const INTERVAL_REFRESH_MS = 5000; const interval = setInterval(() => { - if (accessTokenExpires !== null) { - const timeLeft = accessTokenExpires - Date.now(); - if (timeLeft < REFRESH_BEFORE) { - auth - .update() - .then(() => {}) - .catch((e) => { - // note: 401 won't be considered error here - console.error("error refreshing auth token", e); - }); - } + if (accessTokenExpires === null) return; + const timeLeft = accessTokenExpires - Date.now(); + if (timeLeft < REFRESH_BEFORE) { + auth + .update() + .then(() => {}) + .catch((e) => { + // note: 401 won't be considered error here + console.error("error refreshing auth token", e); + }); } }, INTERVAL_REFRESH_MS); diff --git a/www/app/lib/authBackend.ts b/www/app/lib/authBackend.ts index af93b274..0b48f613 100644 --- a/www/app/lib/authBackend.ts +++ b/www/app/lib/authBackend.ts @@ -2,7 +2,11 @@ import { AuthOptions } from "next-auth"; import AuthentikProvider from "next-auth/providers/authentik"; import type { JWT } from "next-auth/jwt"; import { JWTWithAccessToken, CustomSession } from "./types"; -import { assertExists, assertExistsAndNonEmptyString } from "./utils"; +import { + assertExists, + assertExistsAndNonEmptyString, + assertNotExists, +} from "./utils"; import { REFRESH_ACCESS_TOKEN_BEFORE, REFRESH_ACCESS_TOKEN_ERROR, @@ -12,14 +16,10 @@ import { setTokenCache, deleteTokenCache, } from "./redisTokenCache"; -import { tokenCacheRedis } from "./redisClient"; +import { tokenCacheRedis, redlock } from "./redisClient"; import { isBuildPhase } from "./next"; -// REFRESH_ACCESS_TOKEN_BEFORE because refresh is based on access token expiration (imagine we cache it 30 days) const TOKEN_CACHE_TTL = REFRESH_ACCESS_TOKEN_BEFORE; - -const refreshLocks = new Map>(); - const CLIENT_ID = !isBuildPhase ? assertExistsAndNonEmptyString(process.env.AUTHENTIK_CLIENT_ID) : "noop"; @@ -45,31 +45,48 @@ export const authOptions: AuthOptions = { }, callbacks: { async jwt({ token, account, user }) { - const KEY = `token:${token.sub}`; + if (account && !account.access_token) { + await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`); + } if (account && user) { // called only on first login // XXX account.expires_in used in example is not defined for authentik backend, but expires_at is - const expiresAtS = assertExists(account.expires_at); - const expiresAtMs = expiresAtS * 1000; - if (!account.access_token) { - await deleteTokenCache(tokenCacheRedis, KEY); - } else { + if (account.access_token) { + const expiresAtS = assertExists(account.expires_at); + const expiresAtMs = expiresAtS * 1000; const jwtToken: JWTWithAccessToken = { ...token, accessToken: account.access_token, accessTokenExpires: expiresAtMs, refreshToken: account.refresh_token, }; - await setTokenCache(tokenCacheRedis, KEY, { - token: jwtToken, - timestamp: Date.now(), - }); - return jwtToken; + if (jwtToken.error) { + await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`); + } else { + assertNotExists( + jwtToken.error, + `panic! trying to cache token with error in jwt: ${jwtToken.error}`, + ); + await setTokenCache(tokenCacheRedis, `token:${token.sub}`, { + token: jwtToken, + timestamp: Date.now(), + }); + return jwtToken; + } } } - const currentToken = await getTokenCache(tokenCacheRedis, KEY); + const currentToken = await getTokenCache( + tokenCacheRedis, + `token:${token.sub}`, + ); + console.debug( + "currentToken from cache", + JSON.stringify(currentToken, null, 2), + "will be returned?", + currentToken && Date.now() < currentToken.token.accessTokenExpires, + ); if (currentToken && Date.now() < currentToken.token.accessTokenExpires) { return currentToken.token; } @@ -97,20 +114,22 @@ export const authOptions: AuthOptions = { async function lockedRefreshAccessToken( token: JWT, ): Promise { - const lockKey = `${token.sub}-refresh`; + const lockKey = `${token.sub}-lock`; - const existingRefresh = refreshLocks.get(lockKey); - if (existingRefresh) { - return await existingRefresh; - } - - const refreshPromise = (async () => { - try { + return redlock + .using([lockKey], 10000, async () => { const cached = await getTokenCache(tokenCacheRedis, `token:${token.sub}`); + if (cached) + console.debug( + "received cached token. to delete?", + Date.now() - cached.timestamp > TOKEN_CACHE_TTL, + ); + else console.debug("no cached token received"); if (cached) { if (Date.now() - cached.timestamp > TOKEN_CACHE_TTL) { await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`); } else if (Date.now() < cached.token.accessTokenExpires) { + console.debug("returning cached token", cached.token); return cached.token; } } @@ -118,19 +137,35 @@ async function lockedRefreshAccessToken( const currentToken = cached?.token || (token as JWTWithAccessToken); const newToken = await refreshAccessToken(currentToken); + console.debug("current token during refresh", currentToken); + console.debug("new token during refresh", newToken); + + if (newToken.error) { + await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`); + return newToken; + } + + assertNotExists( + newToken.error, + `panic! trying to cache token with error during refresh: ${newToken.error}`, + ); await setTokenCache(tokenCacheRedis, `token:${token.sub}`, { token: newToken, timestamp: Date.now(), }); return newToken; - } finally { - setTimeout(() => refreshLocks.delete(lockKey), 100); - } - })(); - - refreshLocks.set(lockKey, refreshPromise); - return refreshPromise; + }) + .catch((e) => { + console.error("error refreshing token", e); + deleteTokenCache(tokenCacheRedis, `token:${token.sub}`).catch((e) => { + console.error("error deleting errored token", e); + }); + return { + ...token, + error: REFRESH_ACCESS_TOKEN_ERROR, + } as JWTWithAccessToken; + }); } async function refreshAccessToken(token: JWT): Promise { diff --git a/www/app/lib/redisClient.ts b/www/app/lib/redisClient.ts index 753a0561..aeb3595b 100644 --- a/www/app/lib/redisClient.ts +++ b/www/app/lib/redisClient.ts @@ -1,20 +1,29 @@ import Redis from "ioredis"; import { isBuildPhase } from "./next"; +import Redlock, { ResourceLockedError } from "redlock"; export type RedisClient = Pick; - +export type RedlockClient = { + using: ( + keys: string | string[], + ttl: number, + cb: () => Promise, + ) => Promise; +}; const KV_USE_TLS = process.env.KV_USE_TLS ? process.env.KV_USE_TLS === "true" : undefined; +let redisClient: Redis | null = null; + const getRedisClient = (): RedisClient => { + if (redisClient) return redisClient; const redisUrl = process.env.KV_URL; if (!redisUrl) { throw new Error("KV_URL environment variable is required"); } - const redis = new Redis(redisUrl, { + redisClient = new Redis(redisUrl, { maxRetriesPerRequest: 3, - lazyConnect: true, ...(KV_USE_TLS === true ? { tls: {}, @@ -22,18 +31,11 @@ const getRedisClient = (): RedisClient => { : {}), }); - redis.on("error", (error) => { + redisClient.on("error", (error) => { console.error("Redis error:", error); }); - // not necessary but will indicate redis config errors by failfast at startup - // happens only once; after that connection is allowed to die and the lib is assumed to be able to restore it eventually - redis.connect().catch((e) => { - console.error("Failed to connect to Redis:", e); - process.exit(1); - }); - - return redis; + return redisClient; }; // next.js buildtime usage - we want to isolate next.js "build" time concepts here @@ -52,4 +54,25 @@ const noopClient: RedisClient = (() => { del: noopDel, }; })(); + +const noopRedlock: RedlockClient = { + using: (resource: string | string[], ttl: number, cb: () => Promise) => + cb(), +}; + +export const redlock: RedlockClient = isBuildPhase + ? noopRedlock + : (() => { + const r = new Redlock([getRedisClient()], {}); + r.on("error", (error) => { + if (error instanceof ResourceLockedError) { + return; + } + + // Log all other errors. + console.error(error); + }); + return r; + })(); + export const tokenCacheRedis = isBuildPhase ? noopClient : getRedisClient(); diff --git a/www/app/lib/redisTokenCache.ts b/www/app/lib/redisTokenCache.ts index 4fa4e304..a8b720ef 100644 --- a/www/app/lib/redisTokenCache.ts +++ b/www/app/lib/redisTokenCache.ts @@ -9,7 +9,6 @@ const TokenCacheEntrySchema = z.object({ accessToken: z.string(), accessTokenExpires: z.number(), refreshToken: z.string().optional(), - error: z.string().optional(), }), timestamp: z.number(), }); @@ -46,14 +45,15 @@ export async function getTokenCache( } } +const TTL_SECONDS = 30 * 24 * 60 * 60; + export async function setTokenCache( redis: KV, key: string, value: TokenCacheEntry, ): Promise { const encodedValue = TokenCacheEntryCodec.encode(value); - const ttlSeconds = Math.floor(REFRESH_ACCESS_TOKEN_BEFORE / 1000); - await redis.setex(key, ttlSeconds, encodedValue); + await redis.setex(key, TTL_SECONDS, encodedValue); } export async function deleteTokenCache(redis: KV, key: string): Promise { diff --git a/www/app/lib/useUserName.ts b/www/app/lib/useUserName.ts index 80814281..46850176 100644 --- a/www/app/lib/useUserName.ts +++ b/www/app/lib/useUserName.ts @@ -2,6 +2,7 @@ import { useAuth } from "./AuthProvider"; export const useUserName = (): string | null | undefined => { const auth = useAuth(); - if (auth.status !== "authenticated") return undefined; + if (auth.status !== "authenticated" && auth.status !== "refreshing") + return undefined; return auth.user?.name || null; }; diff --git a/www/app/lib/utils.ts b/www/app/lib/utils.ts index 122ab234..8e8651ff 100644 --- a/www/app/lib/utils.ts +++ b/www/app/lib/utils.ts @@ -158,6 +158,17 @@ export const assertExists = ( return value; }; +export const assertNotExists = ( + value: T | null | undefined, + err?: string, +): void => { + if (value !== null && value !== undefined) { + throw new Error( + `Assertion failed: ${err ?? "value is not null or undefined"}`, + ); + } +}; + export const assertExistsAndNonEmptyString = ( value: string | null | undefined, ): NonEmptyString => diff --git a/www/package.json b/www/package.json index b7511147..27e30a5f 100644 --- a/www/package.json +++ b/www/package.json @@ -45,6 +45,7 @@ "react-markdown": "^9.0.0", "react-qr-code": "^2.0.12", "react-select-search": "^4.1.7", + "redlock": "5.0.0-beta.2", "sass": "^1.63.6", "simple-peer": "^9.11.1", "tailwindcss": "^3.3.2", diff --git a/www/pnpm-lock.yaml b/www/pnpm-lock.yaml index 14b42c55..f4346855 100644 --- a/www/pnpm-lock.yaml +++ b/www/pnpm-lock.yaml @@ -106,6 +106,9 @@ importers: react-select-search: specifier: ^4.1.7 version: 4.1.8(prop-types@15.8.1)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + redlock: + specifier: 5.0.0-beta.2 + version: 5.0.0-beta.2 sass: specifier: ^1.63.6 version: 1.90.0 @@ -6566,6 +6569,12 @@ packages: sass: optional: true + node-abort-controller@3.1.1: + resolution: + { + integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==, + } + node-addon-api@7.1.1: resolution: { @@ -7433,6 +7442,13 @@ packages: } engines: { node: ">=4" } + redlock@5.0.0-beta.2: + resolution: + { + integrity: sha512-2RDWXg5jgRptDrB1w9O/JgSZC0j7y4SlaXnor93H/UJm/QyDiFgBKNtrh0TI6oCXqYSaSoXxFh6Sd3VtYfhRXw==, + } + engines: { node: ">=12" } + redux-thunk@3.1.0: resolution: { @@ -13812,6 +13828,8 @@ snapshots: - "@babel/core" - babel-plugin-macros + node-abort-controller@3.1.1: {} + node-addon-api@7.1.1: optional: true @@ -14290,6 +14308,10 @@ snapshots: dependencies: redis-errors: 1.2.0 + redlock@5.0.0-beta.2: + dependencies: + node-abort-controller: 3.1.1 + redux-thunk@3.1.0(redux@5.0.1): dependencies: redux: 5.0.1 From 02a3938822f7167125bc8a3a250eda6ea850f273 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Fri, 5 Sep 2025 22:50:10 -0600 Subject: [PATCH 18/77] chore(main): release 0.9.0 (#603) --- CHANGELOG.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 433691e9..987a6579 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## [0.9.0](https://github.com/Monadical-SAS/reflector/compare/v0.8.2...v0.9.0) (2025-09-06) + + +### Features + +* frontend openapi react query ([#606](https://github.com/Monadical-SAS/reflector/issues/606)) ([c4d2825](https://github.com/Monadical-SAS/reflector/commit/c4d2825c81f81ad8835629fbf6ea8c7383f8c31b)) + + +### Bug Fixes + +* align whisper transcriber api with parakeet ([#602](https://github.com/Monadical-SAS/reflector/issues/602)) ([0663700](https://github.com/Monadical-SAS/reflector/commit/0663700a615a4af69a03c96c410f049e23ec9443)) +* kv use tls explicit ([#610](https://github.com/Monadical-SAS/reflector/issues/610)) ([08d88ec](https://github.com/Monadical-SAS/reflector/commit/08d88ec349f38b0d13e0fa4cb73486c8dfd31836)) +* source kind for file processing ([#601](https://github.com/Monadical-SAS/reflector/issues/601)) ([dc82f8b](https://github.com/Monadical-SAS/reflector/commit/dc82f8bb3bdf3ab3d4088e592a30fd63907319e1)) +* token refresh locking ([#613](https://github.com/Monadical-SAS/reflector/issues/613)) ([7f5a4c9](https://github.com/Monadical-SAS/reflector/commit/7f5a4c9ddc7fd098860c8bdda2ca3b57f63ded2f)) + ## [0.8.2](https://github.com/Monadical-SAS/reflector/compare/v0.8.1...v0.8.2) (2025-08-29) From 5a5b3233820df9536da75e87ce6184a983d4713a Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Mon, 8 Sep 2025 10:40:18 -0400 Subject: [PATCH 19/77] fix: sync backend and frontend token refresh logic (#614) * sync backend and frontend token refresh logic * return react strict mode --------- Co-authored-by: Igor Loskutov --- www/app/lib/SessionAutoRefresh.tsx | 7 ++----- www/app/lib/auth.ts | 5 +++++ www/app/lib/authBackend.ts | 11 ++++++++--- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/www/app/lib/SessionAutoRefresh.tsx b/www/app/lib/SessionAutoRefresh.tsx index 3729db8c..6b26077d 100644 --- a/www/app/lib/SessionAutoRefresh.tsx +++ b/www/app/lib/SessionAutoRefresh.tsx @@ -9,9 +9,7 @@ import { useEffect } from "react"; import { useAuth } from "./AuthProvider"; -import { REFRESH_ACCESS_TOKEN_BEFORE } from "./auth"; - -const REFRESH_BEFORE = REFRESH_ACCESS_TOKEN_BEFORE; +import { shouldRefreshToken } from "./auth"; export function SessionAutoRefresh({ children }) { const auth = useAuth(); @@ -25,8 +23,7 @@ export function SessionAutoRefresh({ children }) { const INTERVAL_REFRESH_MS = 5000; const interval = setInterval(() => { if (accessTokenExpires === null) return; - const timeLeft = accessTokenExpires - Date.now(); - if (timeLeft < REFRESH_BEFORE) { + if (shouldRefreshToken(accessTokenExpires)) { auth .update() .then(() => {}) diff --git a/www/app/lib/auth.ts b/www/app/lib/auth.ts index f6e60513..c83db264 100644 --- a/www/app/lib/auth.ts +++ b/www/app/lib/auth.ts @@ -2,6 +2,11 @@ export const REFRESH_ACCESS_TOKEN_ERROR = "RefreshAccessTokenError" as const; // 4 min is 1 min less than default authentic value. here we assume that authentic won't be set to access tokens < 4 min export const REFRESH_ACCESS_TOKEN_BEFORE = 4 * 60 * 1000; +export const shouldRefreshToken = (accessTokenExpires: number): boolean => { + const timeLeft = accessTokenExpires - Date.now(); + return timeLeft < REFRESH_ACCESS_TOKEN_BEFORE; +}; + export const LOGIN_REQUIRED_PAGES = [ "/transcripts/[!new]", "/browse(.*)", diff --git a/www/app/lib/authBackend.ts b/www/app/lib/authBackend.ts index 0b48f613..06bddff2 100644 --- a/www/app/lib/authBackend.ts +++ b/www/app/lib/authBackend.ts @@ -10,6 +10,7 @@ import { import { REFRESH_ACCESS_TOKEN_BEFORE, REFRESH_ACCESS_TOKEN_ERROR, + shouldRefreshToken, } from "./auth"; import { getTokenCache, @@ -85,9 +86,13 @@ export const authOptions: AuthOptions = { "currentToken from cache", JSON.stringify(currentToken, null, 2), "will be returned?", - currentToken && Date.now() < currentToken.token.accessTokenExpires, + currentToken && + !shouldRefreshToken(currentToken.token.accessTokenExpires), ); - if (currentToken && Date.now() < currentToken.token.accessTokenExpires) { + if ( + currentToken && + !shouldRefreshToken(currentToken.token.accessTokenExpires) + ) { return currentToken.token; } @@ -128,7 +133,7 @@ async function lockedRefreshAccessToken( if (cached) { if (Date.now() - cached.timestamp > TOKEN_CACHE_TTL) { await deleteTokenCache(tokenCacheRedis, `token:${token.sub}`); - } else if (Date.now() < cached.token.accessTokenExpires) { + } else if (!shouldRefreshToken(cached.token.accessTokenExpires)) { console.debug("returning cached token", cached.token); return cached.token; } From f81fe9948a9237b3e0001b2d8ca84f54d76878f9 Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Tue, 9 Sep 2025 10:50:29 -0400 Subject: [PATCH 20/77] fix: anonymous users transcript permissions (#621) * fix: public transcript visibility * fix: transcript permissions frontend * dead code removal * chore: remove unused code * fix search tests * fix search tests --------- Co-authored-by: Igor Loskutov --- server/reflector/db/search.py | 4 +- server/reflector/views/rooms.py | 12 ++-- server/reflector/views/transcripts.py | 2 - server/tests/test_search.py | 16 ++--- server/tests/test_search_long_summary.py | 4 +- .../_components/TranscriptStatusIcon.tsx | 3 +- .../[transcriptId]/_components/TopicList.tsx | 3 +- .../[transcriptId]/correct/page.tsx | 12 ++-- .../(app)/transcripts/[transcriptId]/page.tsx | 35 ++++++---- .../[transcriptId]/record/page.tsx | 13 ++-- .../[transcriptId]/upload/page.tsx | 16 ++--- www/app/(app)/transcripts/recorder.tsx | 3 +- www/app/(app)/transcripts/useTranscript.ts | 69 ------------------- www/app/(app)/transcripts/useWebSockets.ts | 4 +- www/app/(app)/transcripts/webSocketTypes.ts | 3 +- www/app/lib/apiHooks.ts | 16 ++--- www/app/lib/transcript.ts | 5 ++ www/app/reflector-api.d.ts | 13 +++- 18 files changed, 90 insertions(+), 143 deletions(-) delete mode 100644 www/app/(app)/transcripts/useTranscript.ts create mode 100644 www/app/lib/transcript.ts diff --git a/server/reflector/db/search.py b/server/reflector/db/search.py index 66a25ccf..caa21c65 100644 --- a/server/reflector/db/search.py +++ b/server/reflector/db/search.py @@ -23,7 +23,7 @@ from pydantic import ( from reflector.db import get_database from reflector.db.rooms import rooms -from reflector.db.transcripts import SourceKind, transcripts +from reflector.db.transcripts import SourceKind, TranscriptStatus, transcripts from reflector.db.utils import is_postgresql from reflector.logger import logger from reflector.utils.string import NonEmptyString, try_parse_non_empty_string @@ -161,7 +161,7 @@ class SearchResult(BaseModel): room_name: str | None = None source_kind: SourceKind created_at: datetime - status: str = Field(..., min_length=1) + status: TranscriptStatus = Field(..., min_length=1) rank: float = Field(..., ge=0, le=1) duration: NonNegativeFloat | None = Field(..., description="Duration in seconds") search_snippets: list[str] = Field( diff --git a/server/reflector/views/rooms.py b/server/reflector/views/rooms.py index cc00f3c0..38b611d6 100644 --- a/server/reflector/views/rooms.py +++ b/server/reflector/views/rooms.py @@ -215,14 +215,10 @@ async def rooms_create_meeting( except (asyncpg.exceptions.UniqueViolationError, sqlite3.IntegrityError): # Another request already created a meeting for this room # Log this race condition occurrence - logger.info( - "Race condition detected for room %s - fetching existing meeting", - room.name, - ) logger.warning( - "Whereby meeting %s was created but not used (resource leak) for room %s", - whereby_meeting["meetingId"], + "Race condition detected for room %s and meeting %s - fetching existing meeting", room.name, + whereby_meeting["meetingId"], ) # Fetch the meeting that was created by the other request @@ -232,7 +228,9 @@ async def rooms_create_meeting( if meeting is None: # Edge case: meeting was created but expired/deleted between checks logger.error( - "Meeting disappeared after race condition for room %s", room.name + "Meeting disappeared after race condition for room %s", + room.name, + exc_info=True, ) raise HTTPException( status_code=503, detail="Unable to join meeting - please try again" diff --git a/server/reflector/views/transcripts.py b/server/reflector/views/transcripts.py index 9acfcbf8..ed2445ae 100644 --- a/server/reflector/views/transcripts.py +++ b/server/reflector/views/transcripts.py @@ -350,8 +350,6 @@ async def transcript_update( transcript = await transcripts_controller.get_by_id_for_http( transcript_id, user_id=user_id ) - if not transcript: - raise HTTPException(status_code=404, detail="Transcript not found") values = info.dict(exclude_unset=True) updated_transcript = await transcripts_controller.update(transcript, values) return updated_transcript diff --git a/server/tests/test_search.py b/server/tests/test_search.py index 0f5c8923..82890080 100644 --- a/server/tests/test_search.py +++ b/server/tests/test_search.py @@ -58,7 +58,7 @@ async def test_empty_transcript_title_only_match(): "id": test_id, "name": "Empty Transcript", "title": "Empty Meeting", - "status": "completed", + "status": "ended", "locked": False, "duration": 0.0, "created_at": datetime.now(timezone.utc), @@ -109,7 +109,7 @@ async def test_search_with_long_summary(): "id": test_id, "name": "Test Long Summary", "title": "Regular Meeting", - "status": "completed", + "status": "ended", "locked": False, "duration": 1800.0, "created_at": datetime.now(timezone.utc), @@ -165,7 +165,7 @@ async def test_postgresql_search_with_data(): "id": test_id, "name": "Test Search Transcript", "title": "Engineering Planning Meeting Q4 2024", - "status": "completed", + "status": "ended", "locked": False, "duration": 1800.0, "created_at": datetime.now(timezone.utc), @@ -221,7 +221,7 @@ We need to implement PostgreSQL tsvector for better performance.""", test_result = next((r for r in results if r.id == test_id), None) if test_result: assert test_result.title == "Engineering Planning Meeting Q4 2024" - assert test_result.status == "completed" + assert test_result.status == "ended" assert test_result.duration == 1800.0 assert 0 <= test_result.rank <= 1, "Rank should be normalized to 0-1" @@ -268,7 +268,7 @@ def mock_db_result(): "title": "Test Transcript", "created_at": datetime(2024, 6, 15, tzinfo=timezone.utc), "duration": 3600.0, - "status": "completed", + "status": "ended", "user_id": "test-user", "room_id": "room1", "source_kind": SourceKind.LIVE, @@ -433,7 +433,7 @@ class TestSearchResultModel: room_id="room-456", source_kind=SourceKind.ROOM, created_at=datetime(2024, 6, 15, tzinfo=timezone.utc), - status="completed", + status="ended", rank=0.85, duration=1800.5, search_snippets=["snippet 1", "snippet 2"], @@ -443,7 +443,7 @@ class TestSearchResultModel: assert result.title == "Test Title" assert result.user_id == "user-123" assert result.room_id == "room-456" - assert result.status == "completed" + assert result.status == "ended" assert result.rank == 0.85 assert result.duration == 1800.5 assert len(result.search_snippets) == 2 @@ -474,7 +474,7 @@ class TestSearchResultModel: id="test-id", source_kind=SourceKind.LIVE, created_at=datetime(2024, 6, 15, 12, 30, 45, tzinfo=timezone.utc), - status="completed", + status="ended", rank=0.9, duration=None, search_snippets=[], diff --git a/server/tests/test_search_long_summary.py b/server/tests/test_search_long_summary.py index 8857778b..3f911a99 100644 --- a/server/tests/test_search_long_summary.py +++ b/server/tests/test_search_long_summary.py @@ -25,7 +25,7 @@ async def test_long_summary_snippet_prioritization(): "id": test_id, "name": "Test Snippet Priority", "title": "Meeting About Projects", - "status": "completed", + "status": "ended", "locked": False, "duration": 1800.0, "created_at": datetime.now(timezone.utc), @@ -106,7 +106,7 @@ async def test_long_summary_only_search(): "id": test_id, "name": "Test Long Only", "title": "Standard Meeting", - "status": "completed", + "status": "ended", "locked": False, "duration": 1800.0, "created_at": datetime.now(timezone.utc), diff --git a/www/app/(app)/browse/_components/TranscriptStatusIcon.tsx b/www/app/(app)/browse/_components/TranscriptStatusIcon.tsx index 0eebadc8..20164993 100644 --- a/www/app/(app)/browse/_components/TranscriptStatusIcon.tsx +++ b/www/app/(app)/browse/_components/TranscriptStatusIcon.tsx @@ -7,9 +7,10 @@ import { FaMicrophone, FaGear, } from "react-icons/fa6"; +import { TranscriptStatus } from "../../../lib/transcript"; interface TranscriptStatusIconProps { - status: string; + status: TranscriptStatus; } export default function TranscriptStatusIcon({ diff --git a/www/app/(app)/transcripts/[transcriptId]/_components/TopicList.tsx b/www/app/(app)/transcripts/[transcriptId]/_components/TopicList.tsx index 1f5d1588..534f0c0a 100644 --- a/www/app/(app)/transcripts/[transcriptId]/_components/TopicList.tsx +++ b/www/app/(app)/transcripts/[transcriptId]/_components/TopicList.tsx @@ -5,6 +5,7 @@ import useParticipants from "../../useParticipants"; import { Box, Flex, Text, Accordion } from "@chakra-ui/react"; import { featureEnabled } from "../../../../domainContext"; import { TopicItem } from "./TopicItem"; +import { TranscriptStatus } from "../../../../lib/transcript"; type TopicListProps = { topics: Topic[]; @@ -14,7 +15,7 @@ type TopicListProps = { ]; autoscroll: boolean; transcriptId: string; - status: string; + status: TranscriptStatus | null; currentTranscriptText: any; }; diff --git a/www/app/(app)/transcripts/[transcriptId]/correct/page.tsx b/www/app/(app)/transcripts/[transcriptId]/correct/page.tsx index c885ca6e..1c7705f4 100644 --- a/www/app/(app)/transcripts/[transcriptId]/correct/page.tsx +++ b/www/app/(app)/transcripts/[transcriptId]/correct/page.tsx @@ -9,8 +9,10 @@ import ParticipantList from "./participantList"; import type { components } from "../../../../reflector-api"; type GetTranscriptTopic = components["schemas"]["GetTranscriptTopic"]; import { SelectedText, selectedTextIsTimeSlice } from "./types"; -import { useTranscriptUpdate } from "../../../../lib/apiHooks"; -import useTranscript from "../../useTranscript"; +import { + useTranscriptGet, + useTranscriptUpdate, +} from "../../../../lib/apiHooks"; import { useError } from "../../../../(errors)/errorContext"; import { useRouter } from "next/navigation"; import { Box, Grid } from "@chakra-ui/react"; @@ -25,7 +27,7 @@ export default function TranscriptCorrect({ params: { transcriptId }, }: TranscriptCorrect) { const updateTranscriptMutation = useTranscriptUpdate(); - const transcript = useTranscript(transcriptId); + const transcript = useTranscriptGet(transcriptId); const stateCurrentTopic = useState(); const [currentTopic, _sct] = stateCurrentTopic; const stateSelectedText = useState(); @@ -36,7 +38,7 @@ export default function TranscriptCorrect({ const router = useRouter(); const markAsDone = async () => { - if (transcript.response && !transcript.response.reviewed) { + if (transcript.data && !transcript.data.reviewed) { try { await updateTranscriptMutation.mutateAsync({ params: { @@ -114,7 +116,7 @@ export default function TranscriptCorrect({ }} /> - {transcript.response && !transcript.response?.reviewed && ( + {transcript.data && !transcript.data?.reviewed && (
+ + )} + + {syncResult && syncStatus === "success" && ( + + + + Sync completed + + + {syncResult.totalEvents} events downloaded,{" "} + {syncResult.eventsFound} match this room + + {(syncResult.eventsCreated > 0 || + syncResult.eventsUpdated > 0) && ( + + {syncResult.eventsCreated} created,{" "} + {syncResult.eventsUpdated} updated + + )} + + + )} + + {syncMessage && ( + + + {syncMessage} + + + )} + + {icsLastSync && ( + + + + Last sync: {new Date(icsLastSync).toLocaleString()} + + {icsLastEtag && ( + + ETag: {icsLastEtag.slice(0, 8)}... + + )} + + )} + + )} + + ); +} diff --git a/www/app/(app)/rooms/_components/RoomList.tsx b/www/app/(app)/rooms/_components/RoomList.tsx index 218c890c..8cd83277 100644 --- a/www/app/(app)/rooms/_components/RoomList.tsx +++ b/www/app/(app)/rooms/_components/RoomList.tsx @@ -4,12 +4,13 @@ import type { components } from "../../../reflector-api"; type Room = components["schemas"]["Room"]; import { RoomTable } from "./RoomTable"; import { RoomCards } from "./RoomCards"; +import { NonEmptyString } from "../../../lib/utils"; interface RoomListProps { title: string; rooms: Room[]; linkCopied: string; - onCopyUrl: (roomName: string) => void; + onCopyUrl: (roomName: NonEmptyString) => void; onEdit: (roomId: string, roomData: any) => void; onDelete: (roomId: string) => void; emptyMessage?: string; diff --git a/www/app/(app)/rooms/_components/RoomTable.tsx b/www/app/(app)/rooms/_components/RoomTable.tsx index 113eca7f..ca6c2214 100644 --- a/www/app/(app)/rooms/_components/RoomTable.tsx +++ b/www/app/(app)/rooms/_components/RoomTable.tsx @@ -1,4 +1,4 @@ -import React from "react"; +import React, { useState } from "react"; import { Box, Table, @@ -7,17 +7,58 @@ import { IconButton, Text, Spinner, + Badge, + VStack, + Icon, } from "@chakra-ui/react"; -import { LuLink } from "react-icons/lu"; +import { LuLink, LuRefreshCw } from "react-icons/lu"; +import { FaCalendarAlt } from "react-icons/fa"; import type { components } from "../../../reflector-api"; +import { + useRoomActiveMeetings, + useRoomUpcomingMeetings, + useRoomIcsSync, +} from "../../../lib/apiHooks"; type Room = components["schemas"]["Room"]; +type Meeting = components["schemas"]["Meeting"]; +type CalendarEventResponse = components["schemas"]["CalendarEventResponse"]; import { RoomActionsMenu } from "./RoomActionsMenu"; +import { MEETING_DEFAULT_TIME_MINUTES } from "../../../[roomName]/[meetingId]/constants"; +import { NonEmptyString, parseNonEmptyString } from "../../../lib/utils"; + +// Custom icon component that combines calendar and refresh icons +const CalendarSyncIcon = () => ( + + + + + + +); interface RoomTableProps { rooms: Room[]; linkCopied: string; - onCopyUrl: (roomName: string) => void; + onCopyUrl: (roomName: NonEmptyString) => void; onEdit: (roomId: string, roomData: any) => void; onDelete: (roomId: string) => void; loading?: boolean; @@ -63,6 +104,71 @@ const getZulipDisplay = ( return "Enabled"; }; +function MeetingStatus({ roomName }: { roomName: string }) { + const activeMeetingsQuery = useRoomActiveMeetings(roomName); + const upcomingMeetingsQuery = useRoomUpcomingMeetings(roomName); + + const activeMeetings = activeMeetingsQuery.data || []; + const upcomingMeetings = upcomingMeetingsQuery.data || []; + + if (activeMeetingsQuery.isLoading || upcomingMeetingsQuery.isLoading) { + return ; + } + + if (activeMeetings.length > 0) { + const meeting = activeMeetings[0]; + const title = String( + meeting.calendar_metadata?.["title"] || "Active Meeting", + ); + return ( + + + {title} + + + {meeting.num_clients} participants + + + ); + } + + if (upcomingMeetings.length > 0) { + const event = upcomingMeetings[0]; + const startTime = new Date(event.start_time); + const now = new Date(); + const diffMinutes = Math.floor( + (startTime.getTime() - now.getTime()) / 60000, + ); + + return ( + + + {diffMinutes < MEETING_DEFAULT_TIME_MINUTES + ? `In ${diffMinutes}m` + : "Upcoming"} + + + {event.title || "Scheduled Meeting"} + + + {startTime.toLocaleTimeString("en-US", { + hour: "2-digit", + minute: "2-digit", + month: "short", + day: "numeric", + })} + + + ); + } + + return ( + + No meetings + + ); +} + export function RoomTable({ rooms, linkCopied, @@ -71,6 +177,30 @@ export function RoomTable({ onDelete, loading, }: RoomTableProps) { + const [syncingRooms, setSyncingRooms] = useState>( + new Set(), + ); + const syncMutation = useRoomIcsSync(); + + const handleForceSync = async (roomName: NonEmptyString) => { + setSyncingRooms((prev) => new Set(prev).add(roomName)); + try { + await syncMutation.mutateAsync({ + params: { + path: { room_name: roomName }, + }, + }); + } catch (err) { + console.error("Failed to sync calendar:", err); + } finally { + setSyncingRooms((prev) => { + const next = new Set(prev); + next.delete(roomName); + return next; + }); + } + }; + return ( {loading && ( @@ -97,13 +227,16 @@ export function RoomTable({ Room Name - - Zulip - - - Room Size + + Current Meeting + Zulip + + + Room Size + + Recording {room.name} + + + {getZulipDisplay( room.zulip_auto_post, @@ -133,7 +269,26 @@ export function RoomTable({ )} - + + {room.ics_enabled && ( + + handleForceSync(parseNonEmptyString(room.name)) + } + size="sm" + variant="ghost" + disabled={syncingRooms.has( + parseNonEmptyString(room.name), + )} + > + {syncingRooms.has(parseNonEmptyString(room.name)) ? ( + + ) : ( + + )} + + )} {linkCopied === room.name ? ( Copied! @@ -141,7 +296,9 @@ export function RoomTable({ ) : ( onCopyUrl(room.name)} + onClick={() => + onCopyUrl(parseNonEmptyString(room.name)) + } size="sm" variant="ghost" > diff --git a/www/app/(app)/rooms/page.tsx b/www/app/(app)/rooms/page.tsx index 8b1378df..88e66720 100644 --- a/www/app/(app)/rooms/page.tsx +++ b/www/app/(app)/rooms/page.tsx @@ -14,6 +14,7 @@ import { IconButton, createListCollection, useDisclosure, + Tabs, } from "@chakra-ui/react"; import { useEffect, useMemo, useState } from "react"; import { LuEye, LuEyeOff } from "react-icons/lu"; @@ -30,7 +31,13 @@ import { } from "../../lib/apiHooks"; import { RoomList } from "./_components/RoomList"; import { PaginationPage } from "../browse/_components/Pagination"; -import { assertExists } from "../../lib/utils"; +import { + assertExists, + NonEmptyString, + parseNonEmptyString, +} from "../../lib/utils"; +import ICSSettings from "./_components/ICSSettings"; +import { roomAbsoluteUrl } from "../../lib/routesClient"; type Room = components["schemas"]["Room"]; @@ -40,6 +47,8 @@ interface SelectOption { } const RESERVED_PATHS = ["browse", "rooms", "transcripts"]; +const SUCCESS_EMOJI = "✅"; +const ERROR_EMOJI = "❌"; const roomModeOptions: SelectOption[] = [ { label: "2-4 people", value: "normal" }, @@ -70,6 +79,9 @@ const roomInitialState = { isShared: false, webhookUrl: "", webhookSecret: "", + icsUrl: "", + icsEnabled: false, + icsFetchInterval: 5, }; export default function RoomsList() { @@ -137,6 +149,9 @@ export default function RoomsList() { isShared: detailedEditedRoom.is_shared, webhookUrl: detailedEditedRoom.webhook_url || "", webhookSecret: detailedEditedRoom.webhook_secret || "", + icsUrl: detailedEditedRoom.ics_url || "", + icsEnabled: detailedEditedRoom.ics_enabled || false, + icsFetchInterval: detailedEditedRoom.ics_fetch_interval || 5, } : null, [detailedEditedRoom], @@ -176,14 +191,13 @@ export default function RoomsList() { items: topicOptions, }); - const handleCopyUrl = (roomName: string) => { - const roomUrl = `${window.location.origin}/${roomName}`; - navigator.clipboard.writeText(roomUrl); - setLinkCopied(roomName); - - setTimeout(() => { - setLinkCopied(""); - }, 2000); + const handleCopyUrl = (roomName: NonEmptyString) => { + navigator.clipboard.writeText(roomAbsoluteUrl(roomName)).then(() => { + setLinkCopied(roomName); + setTimeout(() => { + setLinkCopied(""); + }, 2000); + }); }; const handleCloseDialog = () => { @@ -217,10 +231,10 @@ export default function RoomsList() { if (response.success) { setWebhookTestResult( - `✅ Webhook test successful! Status: ${response.status_code}`, + `${SUCCESS_EMOJI} Webhook test successful! Status: ${response.status_code}`, ); } else { - let errorMsg = `❌ Webhook test failed`; + let errorMsg = `${ERROR_EMOJI} Webhook test failed`; errorMsg += ` (Status: ${response.status_code})`; if (response.error) { errorMsg += `: ${response.error}`; @@ -275,6 +289,9 @@ export default function RoomsList() { is_shared: room.isShared, webhook_url: room.webhookUrl, webhook_secret: room.webhookSecret, + ics_url: room.icsUrl, + ics_enabled: room.icsEnabled, + ics_fetch_interval: room.icsFetchInterval, }; if (isEditing) { @@ -316,6 +333,22 @@ export default function RoomsList() { setShowWebhookSecret(false); setWebhookTestResult(null); + setRoomInput({ + name: roomData.name, + zulipAutoPost: roomData.zulip_auto_post, + zulipStream: roomData.zulip_stream, + zulipTopic: roomData.zulip_topic, + isLocked: roomData.is_locked, + roomMode: roomData.room_mode, + recordingType: roomData.recording_type, + recordingTrigger: roomData.recording_trigger, + isShared: roomData.is_shared, + webhookUrl: roomData.webhook_url || "", + webhookSecret: roomData.webhook_secret || "", + icsUrl: roomData.ics_url || "", + icsEnabled: roomData.ics_enabled || false, + icsFetchInterval: roomData.ics_fetch_interval || 5, + }); setEditRoomId(roomId); setIsEditing(true); setNameError(""); @@ -416,353 +449,407 @@ export default function RoomsList() { - - Room name - - - No spaces or special characters allowed - - {nameError && {nameError}} - + + + General + Calendar + Share + WebHook + - - { - const syntheticEvent = { - target: { - name: "isLocked", - type: "checkbox", - checked: e.checked, - }, - }; - handleRoomChange(syntheticEvent); - }} - > - - - - - Locked room - - - - Room size - - setRoomInput({ ...room, roomMode: e.value[0] }) - } - collection={roomModeCollection} - > - - - - - - - - - - - - {roomModeOptions.map((option) => ( - - {option.label} - - - ))} - - - - - - Recording type - - setRoomInput({ - ...room, - recordingType: e.value[0], - recordingTrigger: - e.value[0] !== "cloud" ? "none" : room.recordingTrigger, - }) - } - collection={recordingTypeCollection} - > - - - - - - - - - - - - {recordingTypeOptions.map((option) => ( - - {option.label} - - - ))} - - - - - - Cloud recording start trigger - - setRoomInput({ ...room, recordingTrigger: e.value[0] }) - } - collection={recordingTriggerCollection} - disabled={room.recordingType !== "cloud"} - > - - - - - - - - - - - - {recordingTriggerOptions.map((option) => ( - - {option.label} - - - ))} - - - - - - { - const syntheticEvent = { - target: { - name: "zulipAutoPost", - type: "checkbox", - checked: e.checked, - }, - }; - handleRoomChange(syntheticEvent); - }} - > - - - - - - Automatically post transcription to Zulip - - - - - Zulip stream - - setRoomInput({ - ...room, - zulipStream: e.value[0], - zulipTopic: "", - }) - } - collection={streamCollection} - disabled={!room.zulipAutoPost} - > - - - - - - - - - - - - {streamOptions.map((option) => ( - - {option.label} - - - ))} - - - - - - Zulip topic - - setRoomInput({ ...room, zulipTopic: e.value[0] }) - } - collection={topicCollection} - disabled={!room.zulipAutoPost} - > - - - - - - - - - - - - {topicOptions.map((option) => ( - - {option.label} - - - ))} - - - - - - {/* Webhook Configuration Section */} - - Webhook URL - - - Optional: URL to receive notifications when transcripts are - ready - - - - {room.webhookUrl && ( - <> - - Webhook Secret - - - {isEditing && room.webhookSecret && ( - - setShowWebhookSecret(!showWebhookSecret) - } - > - {showWebhookSecret ? : } - - )} - + + + Room name + - Used for HMAC signature verification (auto-generated if - left empty) + No spaces or special characters allowed + + {nameError && ( + {nameError} + )} + + + + { + const syntheticEvent = { + target: { + name: "isLocked", + type: "checkbox", + checked: e.checked, + }, + }; + handleRoomChange(syntheticEvent); + }} + > + + + + + Locked room + + + + + Room size + + setRoomInput({ ...room, roomMode: e.value[0] }) + } + collection={roomModeCollection} + > + + + + + + + + + + + + {roomModeOptions.map((option) => ( + + {option.label} + + + ))} + + + + + + + Recording type + + setRoomInput({ + ...room, + recordingType: e.value[0], + recordingTrigger: + e.value[0] !== "cloud" + ? "none" + : room.recordingTrigger, + }) + } + collection={recordingTypeCollection} + > + + + + + + + + + + + + {recordingTypeOptions.map((option) => ( + + {option.label} + + + ))} + + + + + + + Cloud recording start trigger + + setRoomInput({ ...room, recordingTrigger: e.value[0] }) + } + collection={recordingTriggerCollection} + disabled={room.recordingType !== "cloud"} + > + + + + + + + + + + + + {recordingTriggerOptions.map((option) => ( + + {option.label} + + + ))} + + + + + + + { + const syntheticEvent = { + target: { + name: "isShared", + type: "checkbox", + checked: e.checked, + }, + }; + handleRoomChange(syntheticEvent); + }} + > + + + + + Shared room + + + + + + { + setRoomInput({ + ...room, + icsUrl: + settings.ics_url !== undefined + ? settings.ics_url + : room.icsUrl, + icsEnabled: + settings.ics_enabled !== undefined + ? settings.ics_enabled + : room.icsEnabled, + icsFetchInterval: + settings.ics_fetch_interval !== undefined + ? settings.ics_fetch_interval + : room.icsFetchInterval, + }); + }} + isOwner={true} + isEditing={isEditing} + /> + + + + + { + const syntheticEvent = { + target: { + name: "zulipAutoPost", + type: "checkbox", + checked: e.checked, + }, + }; + handleRoomChange(syntheticEvent); + }} + > + + + + + + Automatically post transcription to Zulip + + + + + + Zulip stream + + setRoomInput({ + ...room, + zulipStream: e.value[0], + zulipTopic: "", + }) + } + collection={streamCollection} + disabled={!room.zulipAutoPost} + > + + + + + + + + + + + + {streamOptions.map((option) => ( + + {option.label} + + + ))} + + + + + + + Zulip topic + + setRoomInput({ ...room, zulipTopic: e.value[0] }) + } + collection={topicCollection} + disabled={!room.zulipAutoPost} + > + + + + + + + + + + + + {topicOptions.map((option) => ( + + {option.label} + + + ))} + + + + + + + + + Webhook URL + + + Optional: URL to receive notifications when transcripts + are ready - {isEditing && ( + {room.webhookUrl && ( <> - - - {webhookTestResult && ( -
+ + Used for HMAC signature verification (auto-generated + if left empty) + + + + {isEditing && ( + <> + - {webhookTestResult} -
- )} -
+ + {webhookTestResult && ( +
+ {webhookTestResult} +
+ )} +
+ + )} )} - - )} - - - { - const syntheticEvent = { - target: { - name: "isShared", - type: "checkbox", - checked: e.checked, - }, - }; - handleRoomChange(syntheticEvent); - }} - > - - - - - Shared room - - + + + {isOwner && ( + + )} + +
+
+ ))} + + ) : upcomingMeetings.length > 0 ? ( + /* Upcoming Meetings - BIG DISPLAY when no ongoing meetings */ + + + Upcoming Meeting{upcomingMeetings.length > 1 ? "s" : ""} + + {upcomingMeetings.map((meeting) => { + const now = new Date(); + const startTime = new Date(meeting.start_date); + const minutesUntilStart = Math.floor( + (startTime.getTime() - now.getTime()) / (1000 * 60), + ); + + return ( + + + + + + + {(meeting.calendar_metadata as any)?.title || + "Upcoming Meeting"} + + + + {isOwner && + (meeting.calendar_metadata as any)?.description && ( + + {(meeting.calendar_metadata as any).description} + + )} + + + + Starts in {minutesUntilStart} minute + {minutesUntilStart !== 1 ? "s" : ""} + + + {formatDateTime(new Date(meeting.start_date))} + + + + {isOwner && + (meeting.calendar_metadata as any)?.attendees && ( + + {(meeting.calendar_metadata as any).attendees + .slice(0, 4) + .map((attendee: any, idx: number) => ( + + {attendee.name || attendee.email} + + ))} + {(meeting.calendar_metadata as any).attendees + .length > 4 && ( + + + + {(meeting.calendar_metadata as any).attendees + .length - 4}{" "} + more + + )} + + )} + + + + + {isOwner && ( + + )} + + + + ); + })} + + ) : null} + + {/* Upcoming Meetings - SMALLER ASIDE DISPLAY when there are ongoing meetings */} + {currentMeetings.length > 0 && upcomingMeetings.length > 0 && ( + + + Starting Soon + + + {upcomingMeetings.map((meeting) => { + const now = new Date(); + const startTime = new Date(meeting.start_date); + const minutesUntilStart = Math.floor( + (startTime.getTime() - now.getTime()) / (1000 * 60), + ); + + return ( + + + + + + {(meeting.calendar_metadata as any)?.title || + "Upcoming Meeting"} + + + + + in {minutesUntilStart} minute + {minutesUntilStart !== 1 ? "s" : ""} + + + + Starts: {formatDateTime(new Date(meeting.start_date))} + + + + + + ); + })} + + + )} + + {/* No meetings message - show when no ongoing or upcoming meetings */} + {currentMeetings.length === 0 && upcomingMeetings.length === 0 && ( + + + + + + No meetings right now + + + There are no ongoing or upcoming meetings in this room at the + moment. + + + + + )} + + + ); +} diff --git a/www/app/[roomName]/[meetingId]/constants.ts b/www/app/[roomName]/[meetingId]/constants.ts new file mode 100644 index 00000000..6978da36 --- /dev/null +++ b/www/app/[roomName]/[meetingId]/constants.ts @@ -0,0 +1 @@ +export const MEETING_DEFAULT_TIME_MINUTES = 60; diff --git a/www/app/[roomName]/[meetingId]/page.tsx b/www/app/[roomName]/[meetingId]/page.tsx new file mode 100644 index 00000000..8ce405ba --- /dev/null +++ b/www/app/[roomName]/[meetingId]/page.tsx @@ -0,0 +1,3 @@ +import Room from "../room"; + +export default Room; diff --git a/www/app/[roomName]/page.tsx b/www/app/[roomName]/page.tsx index 867aeb3e..1aaca4c7 100644 --- a/www/app/[roomName]/page.tsx +++ b/www/app/[roomName]/page.tsx @@ -1,336 +1,3 @@ -"use client"; +import Room from "./room"; -import { - useCallback, - useEffect, - useRef, - useState, - useContext, - RefObject, - use, -} from "react"; -import { - Box, - Button, - Text, - VStack, - HStack, - Spinner, - Icon, -} from "@chakra-ui/react"; -import { toaster } from "../components/ui/toaster"; -import useRoomMeeting from "./useRoomMeeting"; -import { useRouter } from "next/navigation"; -import { notFound } from "next/navigation"; -import { useRecordingConsent } from "../recordingConsentContext"; -import { useMeetingAudioConsent } from "../lib/apiHooks"; -import type { components } from "../reflector-api"; - -type Meeting = components["schemas"]["Meeting"]; -import { FaBars } from "react-icons/fa6"; -import { useAuth } from "../lib/AuthProvider"; - -export type RoomDetails = { - params: Promise<{ - roomName: string; - }>; -}; - -// stages: we focus on the consent, then whereby steals focus, then we focus on the consent again, then return focus to whoever stole it initially -const useConsentWherebyFocusManagement = ( - acceptButtonRef: RefObject, - wherebyRef: RefObject, -) => { - const currentFocusRef = useRef(null); - useEffect(() => { - if (acceptButtonRef.current) { - acceptButtonRef.current.focus(); - } else { - console.error( - "accept button ref not available yet for focus management - seems to be illegal state", - ); - } - - const handleWherebyReady = () => { - console.log("whereby ready - refocusing consent button"); - currentFocusRef.current = document.activeElement as HTMLElement; - if (acceptButtonRef.current) { - acceptButtonRef.current.focus(); - } - }; - - if (wherebyRef.current) { - wherebyRef.current.addEventListener("ready", handleWherebyReady); - } else { - console.warn( - "whereby ref not available yet for focus management - seems to be illegal state. not waiting, focus management off.", - ); - } - - return () => { - wherebyRef.current?.removeEventListener("ready", handleWherebyReady); - currentFocusRef.current?.focus(); - }; - }, []); -}; - -const useConsentDialog = ( - meetingId: string, - wherebyRef: RefObject /*accessibility*/, -) => { - const { state: consentState, touch, hasConsent } = useRecordingConsent(); - // toast would open duplicates, even with using "id=" prop - const [modalOpen, setModalOpen] = useState(false); - const audioConsentMutation = useMeetingAudioConsent(); - - const handleConsent = useCallback( - async (meetingId: string, given: boolean) => { - try { - await audioConsentMutation.mutateAsync({ - params: { - path: { - meeting_id: meetingId, - }, - }, - body: { - consent_given: given, - }, - }); - - touch(meetingId); - } catch (error) { - console.error("Error submitting consent:", error); - } - }, - [audioConsentMutation, touch], - ); - - const showConsentModal = useCallback(() => { - if (modalOpen) return; - - setModalOpen(true); - - const toastId = toaster.create({ - placement: "top", - duration: null, - render: ({ dismiss }) => { - const AcceptButton = () => { - const buttonRef = useRef(null); - useConsentWherebyFocusManagement(buttonRef, wherebyRef); - return ( - - ); - }; - - return ( - - - - Can we have your permission to store this meeting's audio - recording on our servers? - - - - - - - - ); - }, - }); - - // Set modal state when toast is dismissed - toastId.then((id) => { - const checkToastStatus = setInterval(() => { - if (!toaster.isActive(id)) { - setModalOpen(false); - clearInterval(checkToastStatus); - } - }, 100); - }); - - // Handle escape key to close the toast - const handleKeyDown = (event: KeyboardEvent) => { - if (event.key === "Escape") { - toastId.then((id) => toaster.dismiss(id)); - } - }; - - document.addEventListener("keydown", handleKeyDown); - - const cleanup = () => { - toastId.then((id) => toaster.dismiss(id)); - document.removeEventListener("keydown", handleKeyDown); - }; - - return cleanup; - }, [meetingId, handleConsent, wherebyRef, modalOpen]); - - return { - showConsentModal, - consentState, - hasConsent, - consentLoading: audioConsentMutation.isPending, - }; -}; - -function ConsentDialogButton({ - meetingId, - wherebyRef, -}: { - meetingId: string; - wherebyRef: React.RefObject; -}) { - const { showConsentModal, consentState, hasConsent, consentLoading } = - useConsentDialog(meetingId, wherebyRef); - - if (!consentState.ready || hasConsent(meetingId) || consentLoading) { - return null; - } - - return ( - - ); -} - -const recordingTypeRequiresConsent = ( - recordingType: NonNullable, -) => { - return recordingType === "cloud"; -}; - -// next throws even with "use client" -const useWhereby = () => { - const [wherebyLoaded, setWherebyLoaded] = useState(false); - useEffect(() => { - if (typeof window !== "undefined") { - import("@whereby.com/browser-sdk/embed") - .then(() => { - setWherebyLoaded(true); - }) - .catch(console.error.bind(console)); - } - }, []); - return wherebyLoaded; -}; - -export default function Room(details: RoomDetails) { - const params = use(details.params); - const wherebyLoaded = useWhereby(); - const wherebyRef = useRef(null); - const roomName = params.roomName; - const meeting = useRoomMeeting(roomName); - const router = useRouter(); - const status = useAuth().status; - const isAuthenticated = status === "authenticated"; - const isLoading = status === "loading" || meeting.loading; - - const roomUrl = meeting?.response?.host_room_url - ? meeting?.response?.host_room_url - : meeting?.response?.room_url; - - const meetingId = meeting?.response?.id; - - const recordingType = meeting?.response?.recording_type; - - const handleLeave = useCallback(() => { - router.push("/browse"); - }, [router]); - - useEffect(() => { - if ( - !isLoading && - meeting?.error && - "status" in meeting.error && - meeting.error.status === 404 - ) { - notFound(); - } - }, [isLoading, meeting?.error]); - - useEffect(() => { - if (isLoading || !isAuthenticated || !roomUrl || !wherebyLoaded) return; - - wherebyRef.current?.addEventListener("leave", handleLeave); - - return () => { - wherebyRef.current?.removeEventListener("leave", handleLeave); - }; - }, [handleLeave, roomUrl, isLoading, isAuthenticated, wherebyLoaded]); - - if (isLoading) { - return ( - - - - ); - } - - return ( - <> - {roomUrl && meetingId && wherebyLoaded && ( - <> - - {recordingType && recordingTypeRequiresConsent(recordingType) && ( - - )} - - )} - - ); -} +export default Room; diff --git a/www/app/[roomName]/room.tsx b/www/app/[roomName]/room.tsx new file mode 100644 index 00000000..780851e2 --- /dev/null +++ b/www/app/[roomName]/room.tsx @@ -0,0 +1,437 @@ +"use client"; + +import { roomMeetingUrl, roomUrl as getRoomUrl } from "../lib/routes"; +import { + useCallback, + useEffect, + useRef, + useState, + useContext, + RefObject, + use, +} from "react"; +import { + Box, + Button, + Text, + VStack, + HStack, + Spinner, + Icon, +} from "@chakra-ui/react"; +import { toaster } from "../components/ui/toaster"; +import { useRouter } from "next/navigation"; +import { useRecordingConsent } from "../recordingConsentContext"; +import { + useMeetingAudioConsent, + useRoomGetByName, + useRoomActiveMeetings, + useRoomUpcomingMeetings, + useRoomsCreateMeeting, + useRoomGetMeeting, +} from "../lib/apiHooks"; +import type { components } from "../reflector-api"; +import MeetingSelection from "./MeetingSelection"; +import useRoomDefaultMeeting from "./useRoomDefaultMeeting"; + +type Meeting = components["schemas"]["Meeting"]; +import { FaBars } from "react-icons/fa6"; +import { useAuth } from "../lib/AuthProvider"; +import { getWherebyUrl, useWhereby } from "../lib/wherebyClient"; +import { useError } from "../(errors)/errorContext"; +import { + assertExistsAndNonEmptyString, + NonEmptyString, + parseNonEmptyString, +} from "../lib/utils"; +import { printApiError } from "../api/_error"; + +export type RoomDetails = { + params: Promise<{ + roomName: string; + meetingId?: string; + }>; +}; + +// stages: we focus on the consent, then whereby steals focus, then we focus on the consent again, then return focus to whoever stole it initially +const useConsentWherebyFocusManagement = ( + acceptButtonRef: RefObject, + wherebyRef: RefObject, +) => { + const currentFocusRef = useRef(null); + useEffect(() => { + if (acceptButtonRef.current) { + acceptButtonRef.current.focus(); + } else { + console.error( + "accept button ref not available yet for focus management - seems to be illegal state", + ); + } + + const handleWherebyReady = () => { + console.log("whereby ready - refocusing consent button"); + currentFocusRef.current = document.activeElement as HTMLElement; + if (acceptButtonRef.current) { + acceptButtonRef.current.focus(); + } + }; + + if (wherebyRef.current) { + wherebyRef.current.addEventListener("ready", handleWherebyReady); + } else { + console.warn( + "whereby ref not available yet for focus management - seems to be illegal state. not waiting, focus management off.", + ); + } + + return () => { + wherebyRef.current?.removeEventListener("ready", handleWherebyReady); + currentFocusRef.current?.focus(); + }; + }, []); +}; + +const useConsentDialog = ( + meetingId: string, + wherebyRef: RefObject /*accessibility*/, +) => { + const { state: consentState, touch, hasConsent } = useRecordingConsent(); + // toast would open duplicates, even with using "id=" prop + const [modalOpen, setModalOpen] = useState(false); + const audioConsentMutation = useMeetingAudioConsent(); + + const handleConsent = useCallback( + async (meetingId: string, given: boolean) => { + try { + await audioConsentMutation.mutateAsync({ + params: { + path: { + meeting_id: meetingId, + }, + }, + body: { + consent_given: given, + }, + }); + + touch(meetingId); + } catch (error) { + console.error("Error submitting consent:", error); + } + }, + [audioConsentMutation, touch], + ); + + const showConsentModal = useCallback(() => { + if (modalOpen) return; + + setModalOpen(true); + + const toastId = toaster.create({ + placement: "top", + duration: null, + render: ({ dismiss }) => { + const AcceptButton = () => { + const buttonRef = useRef(null); + useConsentWherebyFocusManagement(buttonRef, wherebyRef); + return ( + + ); + }; + + return ( + + + + Can we have your permission to store this meeting's audio + recording on our servers? + + + + + + + + ); + }, + }); + + // Set modal state when toast is dismissed + toastId.then((id) => { + const checkToastStatus = setInterval(() => { + if (!toaster.isActive(id)) { + setModalOpen(false); + clearInterval(checkToastStatus); + } + }, 100); + }); + + // Handle escape key to close the toast + const handleKeyDown = (event: KeyboardEvent) => { + if (event.key === "Escape") { + toastId.then((id) => toaster.dismiss(id)); + } + }; + + document.addEventListener("keydown", handleKeyDown); + + const cleanup = () => { + toastId.then((id) => toaster.dismiss(id)); + document.removeEventListener("keydown", handleKeyDown); + }; + + return cleanup; + }, [meetingId, handleConsent, wherebyRef, modalOpen]); + + return { + showConsentModal, + consentState, + hasConsent, + consentLoading: audioConsentMutation.isPending, + }; +}; + +function ConsentDialogButton({ + meetingId, + wherebyRef, +}: { + meetingId: NonEmptyString; + wherebyRef: React.RefObject; +}) { + const { showConsentModal, consentState, hasConsent, consentLoading } = + useConsentDialog(meetingId, wherebyRef); + + if (!consentState.ready || hasConsent(meetingId) || consentLoading) { + return null; + } + + return ( + + ); +} + +const recordingTypeRequiresConsent = ( + recordingType: NonNullable, +) => { + return recordingType === "cloud"; +}; + +export default function Room(details: RoomDetails) { + const params = use(details.params); + const wherebyLoaded = useWhereby(); + const wherebyRef = useRef(null); + const roomName = parseNonEmptyString(params.roomName); + const router = useRouter(); + const auth = useAuth(); + const status = auth.status; + const isAuthenticated = status === "authenticated"; + const { setError } = useError(); + + const roomQuery = useRoomGetByName(roomName); + const createMeetingMutation = useRoomsCreateMeeting(); + + const room = roomQuery.data; + + const pageMeetingId = params.meetingId; + + // this one is called on room page + const defaultMeeting = useRoomDefaultMeeting( + room && !room.ics_enabled && !pageMeetingId ? roomName : null, + ); + + const explicitMeeting = useRoomGetMeeting(roomName, pageMeetingId || null); + const wherebyRoomUrl = explicitMeeting.data + ? getWherebyUrl(explicitMeeting.data) + : defaultMeeting.response + ? getWherebyUrl(defaultMeeting.response) + : null; + const recordingType = (explicitMeeting.data || defaultMeeting.response) + ?.recording_type; + const meetingId = (explicitMeeting.data || defaultMeeting.response)?.id; + + const isLoading = + status === "loading" || + roomQuery.isLoading || + defaultMeeting?.loading || + explicitMeeting.isLoading; + + const errors = [ + explicitMeeting.error, + defaultMeeting.error, + roomQuery.error, + createMeetingMutation.error, + ].filter(Boolean); + + const isOwner = + isAuthenticated && room ? auth.user?.id === room.user_id : false; + + const handleMeetingSelect = (selectedMeeting: Meeting) => { + router.push( + roomMeetingUrl(roomName, parseNonEmptyString(selectedMeeting.id)), + ); + }; + + const handleCreateUnscheduled = async () => { + try { + // Create a new unscheduled meeting + const newMeeting = await createMeetingMutation.mutateAsync({ + params: { + path: { room_name: roomName }, + }, + body: { + allow_duplicated: room ? room.ics_enabled : false, + }, + }); + handleMeetingSelect(newMeeting); + } catch (err) { + console.error("Failed to create meeting:", err); + } + }; + + const handleLeave = useCallback(() => { + router.push("/browse"); + }, [router]); + + useEffect(() => { + if (isLoading || !isAuthenticated || !wherebyRoomUrl || !wherebyLoaded) + return; + + wherebyRef.current?.addEventListener("leave", handleLeave); + + return () => { + wherebyRef.current?.removeEventListener("leave", handleLeave); + }; + }, [handleLeave, wherebyRoomUrl, isLoading, isAuthenticated, wherebyLoaded]); + + useEffect(() => { + if (!isLoading && !wherebyRoomUrl) { + setError(new Error("Whereby room URL not found")); + } + }, [isLoading, wherebyRoomUrl]); + + if (isLoading) { + return ( + + + + ); + } + + if (!room) { + return ( + + Room not found + + ); + } + + if (room.ics_enabled && !params.meetingId) { + return ( + + ); + } + + if (errors.length > 0) { + return ( + + {errors.map((error, i) => ( + + {printApiError(error)} + + ))} + + ); + } + + return ( + <> + {wherebyRoomUrl && wherebyLoaded && ( + <> + + {recordingType && + recordingTypeRequiresConsent(recordingType) && + meetingId && ( + + )} + + )} + + ); +} diff --git a/www/app/[roomName]/useRoomMeeting.tsx b/www/app/[roomName]/useRoomDefaultMeeting.tsx similarity index 75% rename from www/app/[roomName]/useRoomMeeting.tsx rename to www/app/[roomName]/useRoomDefaultMeeting.tsx index 93491a05..724e692f 100644 --- a/www/app/[roomName]/useRoomMeeting.tsx +++ b/www/app/[roomName]/useRoomDefaultMeeting.tsx @@ -1,4 +1,4 @@ -import { useEffect, useState } from "react"; +import { useEffect, useState, useRef } from "react"; import { useError } from "../(errors)/errorContext"; import type { components } from "../reflector-api"; import { shouldShowError } from "../lib/errorUtils"; @@ -6,30 +6,31 @@ import { shouldShowError } from "../lib/errorUtils"; type Meeting = components["schemas"]["Meeting"]; import { useRoomsCreateMeeting } from "../lib/apiHooks"; import { notFound } from "next/navigation"; +import { ApiError } from "../api/_error"; type ErrorMeeting = { - error: Error; + error: ApiError; loading: false; response: null; reload: () => void; }; type LoadingMeeting = { + error: null; response: null; loading: true; - error: false; reload: () => void; }; type SuccessMeeting = { + error: null; response: Meeting; loading: false; - error: null; reload: () => void; }; -const useRoomMeeting = ( - roomName: string | null | undefined, +const useRoomDefaultMeeting = ( + roomName: string | null, ): ErrorMeeting | LoadingMeeting | SuccessMeeting => { const [response, setResponse] = useState(null); const [reload, setReload] = useState(0); @@ -37,10 +38,15 @@ const useRoomMeeting = ( const createMeetingMutation = useRoomsCreateMeeting(); const reloadHandler = () => setReload((prev) => prev + 1); + // this is to undupe dev mode room creation + const creatingRef = useRef(false); + useEffect(() => { if (!roomName) return; + if (creatingRef.current) return; const createMeeting = async () => { + creatingRef.current = true; try { const result = await createMeetingMutation.mutateAsync({ params: { @@ -48,6 +54,9 @@ const useRoomMeeting = ( room_name: roomName, }, }, + body: { + allow_duplicated: false, + }, }); setResponse(result); } catch (error: any) { @@ -60,14 +69,16 @@ const useRoomMeeting = ( } else { setError(error); } + } finally { + creatingRef.current = false; } }; - createMeeting(); + createMeeting().then(() => {}); }, [roomName, reload]); const loading = createMeetingMutation.isPending && !response; - const error = createMeetingMutation.error as Error | null; + const error = createMeetingMutation.error; return { response, loading, error, reload: reloadHandler } as | ErrorMeeting @@ -75,4 +86,4 @@ const useRoomMeeting = ( | SuccessMeeting; }; -export default useRoomMeeting; +export default useRoomDefaultMeeting; diff --git a/www/app/api/_error.ts b/www/app/api/_error.ts new file mode 100644 index 00000000..9603b8e8 --- /dev/null +++ b/www/app/api/_error.ts @@ -0,0 +1,26 @@ +import { components } from "../reflector-api"; +import { isArray } from "remeda"; + +export type ApiError = { + detail?: components["schemas"]["ValidationError"][]; +} | null; + +// errors as declared on api types is not != as they in reality e.g. detail may be a string +export const printApiError = (error: ApiError) => { + if (!error || !error.detail) { + return null; + } + const detail = error.detail as unknown; + if (isArray(error.detail)) { + return error.detail.map((e) => e.msg).join(", "); + } + if (typeof detail === "string") { + if (detail.length > 0) { + return detail; + } + console.error("Error detail is empty"); + return null; + } + console.error("Error detail is not a string or array"); + return null; +}; diff --git a/www/app/api/schemas.gen.ts b/www/app/api/schemas.gen.ts deleted file mode 100644 index e69de29b..00000000 diff --git a/www/app/api/services.gen.ts b/www/app/api/services.gen.ts deleted file mode 100644 index e69de29b..00000000 diff --git a/www/app/api/types.gen.ts b/www/app/api/types.gen.ts deleted file mode 100644 index e69de29b..00000000 diff --git a/www/app/components/MeetingMinimalHeader.tsx b/www/app/components/MeetingMinimalHeader.tsx new file mode 100644 index 00000000..fe08c9d6 --- /dev/null +++ b/www/app/components/MeetingMinimalHeader.tsx @@ -0,0 +1,101 @@ +"use client"; + +import { Flex, Link, Button, Text, HStack } from "@chakra-ui/react"; +import NextLink from "next/link"; +import Image from "next/image"; +import { useRouter } from "next/navigation"; +import { roomUrl } from "../lib/routes"; +import { NonEmptyString } from "../lib/utils"; + +interface MeetingMinimalHeaderProps { + roomName: NonEmptyString; + displayName?: string; + showLeaveButton?: boolean; + onLeave?: () => void; + showCreateButton?: boolean; + onCreateMeeting?: () => void; + isCreatingMeeting?: boolean; +} + +export default function MeetingMinimalHeader({ + roomName, + displayName, + showLeaveButton = true, + onLeave, + showCreateButton = false, + onCreateMeeting, + isCreatingMeeting = false, +}: MeetingMinimalHeaderProps) { + const router = useRouter(); + + const handleLeaveMeeting = () => { + if (onLeave) { + onLeave(); + } else { + router.push(roomUrl(roomName)); + } + }; + + const roomTitle = displayName + ? displayName.endsWith("'s") || displayName.endsWith("s") + ? `${displayName} Room` + : `${displayName}'s Room` + : `${roomName} Room`; + + return ( + + {/* Logo and Room Context */} + + + Reflector + + + {roomTitle} + + + + {/* Action Buttons */} + + {showCreateButton && onCreateMeeting && ( + + )} + {showLeaveButton && ( + + )} + + + ); +} diff --git a/www/app/lib/WherebyWebinarEmbed.tsx b/www/app/lib/WherebyWebinarEmbed.tsx index 5bfef554..5526cca2 100644 --- a/www/app/lib/WherebyWebinarEmbed.tsx +++ b/www/app/lib/WherebyWebinarEmbed.tsx @@ -4,16 +4,16 @@ import "@whereby.com/browser-sdk/embed"; import { Box, Button, HStack, Text, Link } from "@chakra-ui/react"; import { toaster } from "../components/ui/toaster"; -interface WherebyEmbedProps { +interface WherebyWebinarEmbedProps { roomUrl: string; onLeave?: () => void; } -// currently used for webinars only +// used for webinars only export default function WherebyWebinarEmbed({ roomUrl, onLeave, -}: WherebyEmbedProps) { +}: WherebyWebinarEmbedProps) { const wherebyRef = useRef(null); // TODO extract common toast logic / styles to be used by consent toast on normal rooms diff --git a/www/app/lib/apiHooks.ts b/www/app/lib/apiHooks.ts index 3b5eed2b..c5b4f9b9 100644 --- a/www/app/lib/apiHooks.ts +++ b/www/app/lib/apiHooks.ts @@ -12,7 +12,7 @@ import { useAuth } from "./AuthProvider"; * or, limitation or incorrect usage of .d type generator from json schema * */ -const useAuthReady = () => { +export const useAuthReady = () => { const auth = useAuth(); return { @@ -75,7 +75,7 @@ export function useTranscriptDelete() { return $api.useMutation("delete", "/v1/transcripts/{transcript_id}", { onSuccess: () => { - queryClient.invalidateQueries({ + return queryClient.invalidateQueries({ queryKey: ["get", "/v1/transcripts/search"], }); }, @@ -102,7 +102,7 @@ export function useTranscriptGet(transcriptId: string | null) { { params: { path: { - transcript_id: transcriptId || "", + transcript_id: transcriptId!, }, }, }, @@ -120,7 +120,7 @@ export function useRoomGet(roomId: string | null) { "/v1/rooms/{room_id}", { params: { - path: { room_id: roomId || "" }, + path: { room_id: roomId! }, }, }, { @@ -145,7 +145,7 @@ export function useRoomCreate() { return $api.useMutation("post", "/v1/rooms", { onSuccess: () => { - queryClient.invalidateQueries({ + return queryClient.invalidateQueries({ queryKey: $api.queryOptions("get", "/v1/rooms").queryKey, }); }, @@ -188,7 +188,7 @@ export function useRoomDelete() { return $api.useMutation("delete", "/v1/rooms/{room_id}", { onSuccess: () => { - queryClient.invalidateQueries({ + return queryClient.invalidateQueries({ queryKey: $api.queryOptions("get", "/v1/rooms").queryKey, }); }, @@ -236,7 +236,7 @@ export function useTranscriptUpdate() { return $api.useMutation("patch", "/v1/transcripts/{transcript_id}", { onSuccess: (data, variables) => { - queryClient.invalidateQueries({ + return queryClient.invalidateQueries({ queryKey: $api.queryOptions("get", "/v1/transcripts/{transcript_id}", { params: { path: { transcript_id: variables.params.path.transcript_id }, @@ -270,7 +270,7 @@ export function useTranscriptUploadAudio() { "/v1/transcripts/{transcript_id}/record/upload", { onSuccess: (data, variables) => { - queryClient.invalidateQueries({ + return queryClient.invalidateQueries({ queryKey: $api.queryOptions( "get", "/v1/transcripts/{transcript_id}", @@ -327,7 +327,7 @@ export function useTranscriptTopics(transcriptId: string | null) { "/v1/transcripts/{transcript_id}/topics", { params: { - path: { transcript_id: transcriptId || "" }, + path: { transcript_id: transcriptId! }, }, }, { @@ -344,7 +344,7 @@ export function useTranscriptTopicsWithWords(transcriptId: string | null) { "/v1/transcripts/{transcript_id}/topics/with-words", { params: { - path: { transcript_id: transcriptId || "" }, + path: { transcript_id: transcriptId! }, }, }, { @@ -365,8 +365,8 @@ export function useTranscriptTopicsWithWordsPerSpeaker( { params: { path: { - transcript_id: transcriptId || "", - topic_id: topicId || "", + transcript_id: transcriptId!, + topic_id: topicId!, }, }, }, @@ -384,7 +384,7 @@ export function useTranscriptParticipants(transcriptId: string | null) { "/v1/transcripts/{transcript_id}/participants", { params: { - path: { transcript_id: transcriptId || "" }, + path: { transcript_id: transcriptId! }, }, }, { @@ -402,7 +402,7 @@ export function useTranscriptParticipantUpdate() { "/v1/transcripts/{transcript_id}/participants/{participant_id}", { onSuccess: (data, variables) => { - queryClient.invalidateQueries({ + return queryClient.invalidateQueries({ queryKey: $api.queryOptions( "get", "/v1/transcripts/{transcript_id}/participants", @@ -430,7 +430,7 @@ export function useTranscriptParticipantCreate() { "/v1/transcripts/{transcript_id}/participants", { onSuccess: (data, variables) => { - queryClient.invalidateQueries({ + return queryClient.invalidateQueries({ queryKey: $api.queryOptions( "get", "/v1/transcripts/{transcript_id}/participants", @@ -458,7 +458,7 @@ export function useTranscriptParticipantDelete() { "/v1/transcripts/{transcript_id}/participants/{participant_id}", { onSuccess: (data, variables) => { - queryClient.invalidateQueries({ + return queryClient.invalidateQueries({ queryKey: $api.queryOptions( "get", "/v1/transcripts/{transcript_id}/participants", @@ -486,28 +486,30 @@ export function useTranscriptSpeakerAssign() { "/v1/transcripts/{transcript_id}/speaker/assign", { onSuccess: (data, variables) => { - queryClient.invalidateQueries({ - queryKey: $api.queryOptions( - "get", - "/v1/transcripts/{transcript_id}", - { - params: { - path: { transcript_id: variables.params.path.transcript_id }, + return Promise.all([ + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, }, - }, - ).queryKey, - }); - queryClient.invalidateQueries({ - queryKey: $api.queryOptions( - "get", - "/v1/transcripts/{transcript_id}/participants", - { - params: { - path: { transcript_id: variables.params.path.transcript_id }, + ).queryKey, + }), + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}/participants", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, }, - }, - ).queryKey, - }); + ).queryKey, + }), + ]); }, onError: (error) => { setError(error as Error, "There was an error assigning the speaker"); @@ -525,28 +527,30 @@ export function useTranscriptSpeakerMerge() { "/v1/transcripts/{transcript_id}/speaker/merge", { onSuccess: (data, variables) => { - queryClient.invalidateQueries({ - queryKey: $api.queryOptions( - "get", - "/v1/transcripts/{transcript_id}", - { - params: { - path: { transcript_id: variables.params.path.transcript_id }, + return Promise.all([ + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, }, - }, - ).queryKey, - }); - queryClient.invalidateQueries({ - queryKey: $api.queryOptions( - "get", - "/v1/transcripts/{transcript_id}/participants", - { - params: { - path: { transcript_id: variables.params.path.transcript_id }, + ).queryKey, + }), + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/transcripts/{transcript_id}/participants", + { + params: { + path: { transcript_id: variables.params.path.transcript_id }, + }, }, - }, - ).queryKey, - }); + ).queryKey, + }), + ]); }, onError: (error) => { setError(error as Error, "There was an error merging speakers"); @@ -565,6 +569,29 @@ export function useMeetingAudioConsent() { }); } +export function useMeetingDeactivate() { + const { setError } = useError(); + const queryClient = useQueryClient(); + + return $api.useMutation("patch", `/v1/meetings/{meeting_id}/deactivate`, { + onError: (error) => { + setError(error as Error, "Failed to end meeting"); + }, + onSuccess: () => { + return queryClient.invalidateQueries({ + predicate: (query) => { + const key = query.queryKey; + return key.some( + (k) => + typeof k === "string" && + !!MEETING_LIST_PATH_PARTIALS.find((e) => k.includes(e)), + ); + }, + }); + }, + }); +} + export function useTranscriptWebRTC() { const { setError } = useError(); @@ -585,7 +612,7 @@ export function useTranscriptCreate() { return $api.useMutation("post", "/v1/transcripts", { onSuccess: () => { - queryClient.invalidateQueries({ + return queryClient.invalidateQueries({ queryKey: ["get", "/v1/transcripts/search"], }); }, @@ -600,13 +627,164 @@ export function useRoomsCreateMeeting() { const queryClient = useQueryClient(); return $api.useMutation("post", "/v1/rooms/{room_name}/meeting", { - onSuccess: () => { - queryClient.invalidateQueries({ - queryKey: $api.queryOptions("get", "/v1/rooms").queryKey, - }); + onSuccess: async (data, variables) => { + const roomName = variables.params.path.room_name; + await Promise.all([ + queryClient.invalidateQueries({ + queryKey: $api.queryOptions("get", "/v1/rooms").queryKey, + }), + queryClient.invalidateQueries({ + queryKey: $api.queryOptions( + "get", + "/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`, + { + params: { + path: { room_name: roomName }, + }, + }, + ).queryKey, + }), + ]); }, onError: (error) => { setError(error as Error, "There was an error creating the meeting"); }, }); } + +// Calendar integration hooks +export function useRoomGetByName(roomName: string | null) { + return $api.useQuery( + "get", + "/v1/rooms/name/{room_name}", + { + params: { + path: { room_name: roomName! }, + }, + }, + { + enabled: !!roomName, + }, + ); +} + +export function useRoomUpcomingMeetings(roomName: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/rooms/{room_name}/meetings/upcoming" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_UPCOMING_PATH_PARTIAL}`, + { + params: { + path: { room_name: roomName! }, + }, + }, + { + enabled: !!roomName && isAuthenticated, + }, + ); +} + +const MEETINGS_PATH_PARTIAL = "meetings" as const; +const MEETINGS_ACTIVE_PATH_PARTIAL = `${MEETINGS_PATH_PARTIAL}/active` as const; +const MEETINGS_UPCOMING_PATH_PARTIAL = + `${MEETINGS_PATH_PARTIAL}/upcoming` as const; +const MEETING_LIST_PATH_PARTIALS = [ + MEETINGS_ACTIVE_PATH_PARTIAL, + MEETINGS_UPCOMING_PATH_PARTIAL, +]; + +export function useRoomActiveMeetings(roomName: string | null) { + return $api.useQuery( + "get", + "/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`, + { + params: { + path: { room_name: roomName! }, + }, + }, + { + enabled: !!roomName, + }, + ); +} + +export function useRoomGetMeeting( + roomName: string | null, + meetingId: string | null, +) { + return $api.useQuery( + "get", + "/v1/rooms/{room_name}/meetings/{meeting_id}", + { + params: { + path: { + room_name: roomName!, + meeting_id: meetingId!, + }, + }, + }, + { + enabled: !!roomName && !!meetingId, + }, + ); +} + +export function useRoomJoinMeeting() { + const { setError } = useError(); + + return $api.useMutation( + "post", + "/v1/rooms/{room_name}/meetings/{meeting_id}/join", + { + onError: (error) => { + setError(error as Error, "There was an error joining the meeting"); + }, + }, + ); +} + +export function useRoomIcsSync() { + const { setError } = useError(); + + return $api.useMutation("post", "/v1/rooms/{room_name}/ics/sync", { + onError: (error) => { + setError(error as Error, "There was an error syncing the calendar"); + }, + }); +} + +export function useRoomIcsStatus(roomName: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/rooms/{room_name}/ics/status", + { + params: { + path: { room_name: roomName! }, + }, + }, + { + enabled: !!roomName && isAuthenticated, + }, + ); +} + +export function useRoomCalendarEvents(roomName: string | null) { + const { isAuthenticated } = useAuthReady(); + + return $api.useQuery( + "get", + "/v1/rooms/{room_name}/meetings", + { + params: { + path: { room_name: roomName! }, + }, + }, + { + enabled: !!roomName && isAuthenticated, + }, + ); +} +// End of Calendar integration hooks diff --git a/www/app/lib/routes.ts b/www/app/lib/routes.ts new file mode 100644 index 00000000..480082d0 --- /dev/null +++ b/www/app/lib/routes.ts @@ -0,0 +1,7 @@ +import { NonEmptyString } from "./utils"; + +export const roomUrl = (roomName: NonEmptyString) => `/${roomName}`; +export const roomMeetingUrl = ( + roomName: NonEmptyString, + meetingId: NonEmptyString, +) => `${roomUrl(roomName)}/${meetingId}`; diff --git a/www/app/lib/routesClient.ts b/www/app/lib/routesClient.ts new file mode 100644 index 00000000..9522bc74 --- /dev/null +++ b/www/app/lib/routesClient.ts @@ -0,0 +1,5 @@ +import { roomUrl } from "./routes"; +import { NonEmptyString } from "./utils"; + +export const roomAbsoluteUrl = (roomName: NonEmptyString) => + `${window.location.origin}${roomUrl(roomName)}`; diff --git a/www/app/lib/timeUtils.ts b/www/app/lib/timeUtils.ts new file mode 100644 index 00000000..db8a8152 --- /dev/null +++ b/www/app/lib/timeUtils.ts @@ -0,0 +1,25 @@ +export const formatDateTime = (d: Date): string => { + return d.toLocaleString("en-US", { + month: "short", + day: "numeric", + hour: "2-digit", + minute: "2-digit", + }); +}; + +export const formatStartedAgo = ( + startTime: Date, + now: Date = new Date(), +): string => { + const diff = now.getTime() - startTime.getTime(); + + if (diff <= 0) return "Starting now"; + + const minutes = Math.floor(diff / 60000); + const hours = Math.floor(minutes / 60); + const days = Math.floor(hours / 24); + + if (days > 0) return `Started ${days}d ${hours % 24}h ${minutes % 60}m ago`; + if (hours > 0) return `Started ${hours}h ${minutes % 60}m ago`; + return `Started ${minutes} minutes ago`; +}; diff --git a/www/app/lib/wherebyClient.ts b/www/app/lib/wherebyClient.ts new file mode 100644 index 00000000..2345bd7b --- /dev/null +++ b/www/app/lib/wherebyClient.ts @@ -0,0 +1,22 @@ +import { useEffect, useState } from "react"; +import { components } from "../reflector-api"; + +export const useWhereby = () => { + const [wherebyLoaded, setWherebyLoaded] = useState(false); + useEffect(() => { + if (typeof window !== "undefined") { + import("@whereby.com/browser-sdk/embed") + .then(() => { + setWherebyLoaded(true); + }) + .catch(console.error.bind(console)); + } + }, []); + return wherebyLoaded; +}; + +export const getWherebyUrl = ( + meeting: Pick, +) => + // host_room_url possible '' atm + meeting.host_room_url || meeting.room_url; diff --git a/www/app/reflector-api.d.ts b/www/app/reflector-api.d.ts index 2b92f4d4..e1709d69 100644 --- a/www/app/reflector-api.d.ts +++ b/www/app/reflector-api.d.ts @@ -41,6 +41,23 @@ export interface paths { patch?: never; trace?: never; }; + "/v1/meetings/{meeting_id}/deactivate": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + /** Meeting Deactivate */ + patch: operations["v1_meeting_deactivate"]; + trace?: never; + }; "/v1/rooms": { parameters: { query?: never; @@ -78,6 +95,23 @@ export interface paths { patch: operations["v1_rooms_update"]; trace?: never; }; + "/v1/rooms/name/{room_name}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Rooms Get By Name */ + get: operations["v1_rooms_get_by_name"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/v1/rooms/{room_name}/meeting": { parameters: { query?: never; @@ -115,6 +149,128 @@ export interface paths { patch?: never; trace?: never; }; + "/v1/rooms/{room_name}/ics/sync": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Rooms Sync Ics */ + post: operations["v1_rooms_sync_ics"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/rooms/{room_name}/ics/status": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Rooms Ics Status */ + get: operations["v1_rooms_ics_status"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/rooms/{room_name}/meetings": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Rooms List Meetings */ + get: operations["v1_rooms_list_meetings"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/rooms/{room_name}/meetings/upcoming": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Rooms List Upcoming Meetings */ + get: operations["v1_rooms_list_upcoming_meetings"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/rooms/{room_name}/meetings/active": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** Rooms List Active Meetings */ + get: operations["v1_rooms_list_active_meetings"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/rooms/{room_name}/meetings/{meeting_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Rooms Get Meeting + * @description Get a single meeting by ID within a specific room. + */ + get: operations["v1_rooms_get_meeting"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/v1/rooms/{room_name}/meetings/{meeting_id}/join": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** Rooms Join Meeting */ + post: operations["v1_rooms_join_meeting"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/v1/transcripts": { parameters: { query?: never; @@ -505,6 +661,52 @@ export interface components { */ chunk: string; }; + /** CalendarEventResponse */ + CalendarEventResponse: { + /** Id */ + id: string; + /** Room Id */ + room_id: string; + /** Ics Uid */ + ics_uid: string; + /** Title */ + title?: string | null; + /** Description */ + description?: string | null; + /** + * Start Time + * Format: date-time + */ + start_time: string; + /** + * End Time + * Format: date-time + */ + end_time: string; + /** Attendees */ + attendees?: + | { + [key: string]: unknown; + }[] + | null; + /** Location */ + location?: string | null; + /** + * Last Synced + * Format: date-time + */ + last_synced: string; + /** + * Created At + * Format: date-time + */ + created_at: string; + /** + * Updated At + * Format: date-time + */ + updated_at: string; + }; /** CreateParticipant */ CreateParticipant: { /** Speaker */ @@ -536,6 +738,26 @@ export interface components { webhook_url: string; /** Webhook Secret */ webhook_secret: string; + /** Ics Url */ + ics_url?: string | null; + /** + * Ics Fetch Interval + * @default 300 + */ + ics_fetch_interval: number; + /** + * Ics Enabled + * @default false + */ + ics_enabled: boolean; + }; + /** CreateRoomMeeting */ + CreateRoomMeeting: { + /** + * Allow Duplicated + * @default false + */ + allow_duplicated: boolean | null; }; /** CreateTranscript */ CreateTranscript: { @@ -748,6 +970,60 @@ export interface components { /** Detail */ detail?: components["schemas"]["ValidationError"][]; }; + /** ICSStatus */ + ICSStatus: { + /** + * Status + * @enum {string} + */ + status: "enabled" | "disabled"; + /** Last Sync */ + last_sync?: string | null; + /** Next Sync */ + next_sync?: string | null; + /** Last Etag */ + last_etag?: string | null; + /** + * Events Count + * @default 0 + */ + events_count: number; + }; + /** ICSSyncResult */ + ICSSyncResult: { + status: components["schemas"]["SyncStatus"]; + /** Hash */ + hash?: string | null; + /** + * Events Found + * @default 0 + */ + events_found: number; + /** + * Total Events + * @default 0 + */ + total_events: number; + /** + * Events Created + * @default 0 + */ + events_created: number; + /** + * Events Updated + * @default 0 + */ + events_updated: number; + /** + * Events Deleted + * @default 0 + */ + events_deleted: number; + /** Error */ + error?: string | null; + /** Reason */ + reason?: string | null; + }; /** Meeting */ Meeting: { /** Id */ @@ -768,12 +1044,53 @@ export interface components { * Format: date-time */ end_date: string; + /** User Id */ + user_id?: string | null; + /** Room Id */ + room_id?: string | null; + /** + * Is Locked + * @default false + */ + is_locked: boolean; + /** + * Room Mode + * @default normal + * @enum {string} + */ + room_mode: "normal" | "group"; /** * Recording Type * @default cloud * @enum {string} */ recording_type: "none" | "local" | "cloud"; + /** + * Recording Trigger + * @default automatic-2nd-participant + * @enum {string} + */ + recording_trigger: + | "none" + | "prompt" + | "automatic" + | "automatic-2nd-participant"; + /** + * Num Clients + * @default 0 + */ + num_clients: number; + /** + * Is Active + * @default true + */ + is_active: boolean; + /** Calendar Event Id */ + calendar_event_id?: string | null; + /** Calendar Metadata */ + calendar_metadata?: { + [key: string]: unknown; + } | null; }; /** MeetingConsentRequest */ MeetingConsentRequest: { @@ -844,6 +1161,22 @@ export interface components { recording_trigger: string; /** Is Shared */ is_shared: boolean; + /** Ics Url */ + ics_url?: string | null; + /** + * Ics Fetch Interval + * @default 300 + */ + ics_fetch_interval: number; + /** + * Ics Enabled + * @default false + */ + ics_enabled: boolean; + /** Ics Last Sync */ + ics_last_sync?: string | null; + /** Ics Last Etag */ + ics_last_etag?: string | null; }; /** RoomDetails */ RoomDetails: { @@ -874,6 +1207,22 @@ export interface components { recording_trigger: string; /** Is Shared */ is_shared: boolean; + /** Ics Url */ + ics_url?: string | null; + /** + * Ics Fetch Interval + * @default 300 + */ + ics_fetch_interval: number; + /** + * Ics Enabled + * @default false + */ + ics_enabled: boolean; + /** Ics Last Sync */ + ics_last_sync?: string | null; + /** Ics Last Etag */ + ics_last_etag?: string | null; /** Webhook Url */ webhook_url: string | null; /** Webhook Secret */ @@ -998,6 +1347,11 @@ export interface components { /** Name */ name: string; }; + /** + * SyncStatus + * @enum {string} + */ + SyncStatus: "success" | "unchanged" | "error" | "skipped"; /** Topic */ Topic: { /** Name */ @@ -1022,27 +1376,33 @@ export interface components { /** UpdateRoom */ UpdateRoom: { /** Name */ - name: string; + name?: string | null; /** Zulip Auto Post */ - zulip_auto_post: boolean; + zulip_auto_post?: boolean | null; /** Zulip Stream */ - zulip_stream: string; + zulip_stream?: string | null; /** Zulip Topic */ - zulip_topic: string; + zulip_topic?: string | null; /** Is Locked */ - is_locked: boolean; + is_locked?: boolean | null; /** Room Mode */ - room_mode: string; + room_mode?: string | null; /** Recording Type */ - recording_type: string; + recording_type?: string | null; /** Recording Trigger */ - recording_trigger: string; + recording_trigger?: string | null; /** Is Shared */ - is_shared: boolean; + is_shared?: boolean | null; /** Webhook Url */ - webhook_url: string; + webhook_url?: string | null; /** Webhook Secret */ - webhook_secret: string; + webhook_secret?: string | null; + /** Ics Url */ + ics_url?: string | null; + /** Ics Fetch Interval */ + ics_fetch_interval?: number | null; + /** Ics Enabled */ + ics_enabled?: boolean | null; }; /** UpdateTranscript */ UpdateTranscript: { @@ -1204,6 +1564,37 @@ export interface operations { }; }; }; + v1_meeting_deactivate: { + parameters: { + query?: never; + header?: never; + path: { + meeting_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; v1_rooms_list: { parameters: { query?: { @@ -1368,7 +1759,7 @@ export interface operations { }; }; }; - v1_rooms_create_meeting: { + v1_rooms_get_by_name: { parameters: { query?: never; header?: never; @@ -1378,6 +1769,41 @@ export interface operations { cookie?: never; }; requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["RoomDetails"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_create_meeting: { + parameters: { + query?: never; + header?: never; + path: { + room_name: string; + }; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["CreateRoomMeeting"]; + }; + }; responses: { /** @description Successful Response */ 200: { @@ -1430,6 +1856,227 @@ export interface operations { }; }; }; + v1_rooms_sync_ics: { + parameters: { + query?: never; + header?: never; + path: { + room_name: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ICSSyncResult"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_ics_status: { + parameters: { + query?: never; + header?: never; + path: { + room_name: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ICSStatus"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_list_meetings: { + parameters: { + query?: never; + header?: never; + path: { + room_name: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["CalendarEventResponse"][]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_list_upcoming_meetings: { + parameters: { + query?: { + minutes_ahead?: number; + }; + header?: never; + path: { + room_name: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["CalendarEventResponse"][]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_list_active_meetings: { + parameters: { + query?: never; + header?: never; + path: { + room_name: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Meeting"][]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_get_meeting: { + parameters: { + query?: never; + header?: never; + path: { + room_name: string; + meeting_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Meeting"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; + v1_rooms_join_meeting: { + parameters: { + query?: never; + header?: never; + path: { + room_name: string; + meeting_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["Meeting"]; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; v1_transcripts_list: { parameters: { query?: { diff --git a/www/app/webinars/[title]/page.tsx b/www/app/webinars/[title]/page.tsx index 51583a2a..ff21af1e 100644 --- a/www/app/webinars/[title]/page.tsx +++ b/www/app/webinars/[title]/page.tsx @@ -3,7 +3,7 @@ import { useEffect, useState, use } from "react"; import Link from "next/link"; import Image from "next/image"; import { notFound } from "next/navigation"; -import useRoomMeeting from "../../[roomName]/useRoomMeeting"; +import useRoomDefaultMeeting from "../../[roomName]/useRoomDefaultMeeting"; import dynamic from "next/dynamic"; const WherebyEmbed = dynamic(() => import("../../lib/WherebyWebinarEmbed"), { ssr: false, @@ -72,7 +72,7 @@ export default function WebinarPage(details: WebinarDetails) { const startDate = new Date(Date.parse(webinar.startsAt)); const endDate = new Date(Date.parse(webinar.endsAt)); - const meeting = useRoomMeeting(ROOM_NAME); + const meeting = useRoomDefaultMeeting(ROOM_NAME); const roomUrl = meeting?.response?.host_room_url ? meeting?.response?.host_room_url : meeting?.response?.room_url; diff --git a/www/package.json b/www/package.json index d53c1536..c93a9554 100644 --- a/www/package.json +++ b/www/package.json @@ -45,6 +45,7 @@ "react-qr-code": "^2.0.12", "react-select-search": "^4.1.7", "redlock": "5.0.0-beta.2", + "remeda": "^2.31.1", "sass": "^1.63.6", "simple-peer": "^9.11.1", "tailwindcss": "^3.3.2", diff --git a/www/pnpm-lock.yaml b/www/pnpm-lock.yaml index a4e78972..6c0a3d83 100644 --- a/www/pnpm-lock.yaml +++ b/www/pnpm-lock.yaml @@ -106,6 +106,9 @@ importers: redlock: specifier: 5.0.0-beta.2 version: 5.0.0-beta.2 + remeda: + specifier: ^2.31.1 + version: 2.31.1 sass: specifier: ^1.63.6 version: 1.90.0 @@ -7645,6 +7648,12 @@ packages: integrity: sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==, } + remeda@2.31.1: + resolution: + { + integrity: sha512-FRZefcuXbmCoYt8hAITAzW4t8i/RERaGk/+GtRN90eV3NHxsnRKCDIOJVrwrQ6zz77TG/Xyi9mGRfiJWT7DK1g==, + } + require-directory@2.1.1: resolution: { @@ -14510,6 +14519,10 @@ snapshots: unified: 11.0.5 vfile: 6.0.3 + remeda@2.31.1: + dependencies: + type-fest: 4.41.0 + require-directory@2.1.1: {} require-from-string@2.0.2: {} From 396a95d5cef54d24535ccff1b9d9a8cbc0e52d12 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Wed, 17 Sep 2025 16:44:11 -0600 Subject: [PATCH 38/77] chore(main): release 0.12.0 (#654) --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e59f1ab6..9d52ffff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [0.12.0](https://github.com/Monadical-SAS/reflector/compare/v0.11.0...v0.12.0) (2025-09-17) + + +### Features + +* calendar integration ([#608](https://github.com/Monadical-SAS/reflector/issues/608)) ([6f680b5](https://github.com/Monadical-SAS/reflector/commit/6f680b57954c688882c4ed49f40f161c52a00a24)) +* self-hosted gpu api ([#636](https://github.com/Monadical-SAS/reflector/issues/636)) ([ab859d6](https://github.com/Monadical-SAS/reflector/commit/ab859d65a6bded904133a163a081a651b3938d42)) + + +### Bug Fixes + +* ignore player hotkeys for text inputs ([#646](https://github.com/Monadical-SAS/reflector/issues/646)) ([fa049e8](https://github.com/Monadical-SAS/reflector/commit/fa049e8d068190ce7ea015fd9fcccb8543f54a3f)) + ## [0.11.0](https://github.com/Monadical-SAS/reflector/compare/v0.10.0...v0.11.0) (2025-09-16) From 870e8605171a27155a9cbee215eeccb9a8d6c0a2 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Wed, 17 Sep 2025 17:09:54 -0600 Subject: [PATCH 39/77] fix: production blocked because having existing meeting with room_id null (#657) --- .../6dec9fb5b46c_make_meeting_room_id_required_and_add_.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/server/migrations/versions/6dec9fb5b46c_make_meeting_room_id_required_and_add_.py b/server/migrations/versions/6dec9fb5b46c_make_meeting_room_id_required_and_add_.py index 20828c65..c0a29246 100644 --- a/server/migrations/versions/6dec9fb5b46c_make_meeting_room_id_required_and_add_.py +++ b/server/migrations/versions/6dec9fb5b46c_make_meeting_room_id_required_and_add_.py @@ -8,7 +8,6 @@ Create Date: 2025-09-10 10:47:06.006819 from typing import Sequence, Union -import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. @@ -21,7 +20,6 @@ depends_on: Union[str, Sequence[str], None] = None def upgrade() -> None: # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table("meeting", schema=None) as batch_op: - batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=False) batch_op.create_foreign_key( None, "room", ["room_id"], ["id"], ondelete="CASCADE" ) @@ -33,6 +31,5 @@ def downgrade() -> None: # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table("meeting", schema=None) as batch_op: batch_op.drop_constraint("meeting_room_id_fkey", type_="foreignkey") - batch_op.alter_column("room_id", existing_type=sa.VARCHAR(), nullable=True) # ### end Alembic commands ### From 6566e04300d56897587375020e7099e47328bbd6 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Wed, 17 Sep 2025 17:17:22 -0600 Subject: [PATCH 40/77] chore(main): release 0.12.1 (#658) --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d52ffff..9933ba7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [0.12.1](https://github.com/Monadical-SAS/reflector/compare/v0.12.0...v0.12.1) (2025-09-17) + + +### Bug Fixes + +* production blocked because having existing meeting with room_id null ([#657](https://github.com/Monadical-SAS/reflector/issues/657)) ([870e860](https://github.com/Monadical-SAS/reflector/commit/870e8605171a27155a9cbee215eeccb9a8d6c0a2)) + ## [0.12.0](https://github.com/Monadical-SAS/reflector/compare/v0.11.0...v0.12.0) (2025-09-17) From 2b723da08bd8f1e037cb769285abd3d57463905a Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Wed, 17 Sep 2025 20:02:17 -0400 Subject: [PATCH 41/77] rooms-page-calendar-ics-room-name-fix (#659) Co-authored-by: Igor Loskutov --- .../(app)/rooms/_components/ICSSettings.tsx | 54 ++++++++++--------- www/app/(app)/rooms/page.tsx | 2 +- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/www/app/(app)/rooms/_components/ICSSettings.tsx b/www/app/(app)/rooms/_components/ICSSettings.tsx index 1fa97692..9b45ff33 100644 --- a/www/app/(app)/rooms/_components/ICSSettings.tsx +++ b/www/app/(app)/rooms/_components/ICSSettings.tsx @@ -27,7 +27,7 @@ import { } from "../../../lib/utils"; interface ICSSettingsProps { - roomName: NonEmptyString; + roomName: NonEmptyString | null; icsUrl?: string; icsEnabled?: boolean; icsFetchInterval?: number; @@ -85,7 +85,7 @@ export default function ICSSettings({ const handleCopyRoomUrl = async () => { try { await navigator.clipboard.writeText( - roomAbsoluteUrl(assertExistsAndNonEmptyString(roomName)), + roomAbsoluteUrl(assertExists(roomName)), ); setJustCopied(true); @@ -123,7 +123,7 @@ export default function ICSSettings({ const handleRoomUrlClick = () => { if (roomUrlInputRef.current) { roomUrlInputRef.current.select(); - handleCopyRoomUrl(); + handleCopyRoomUrl().then(() => {}); } }; @@ -196,30 +196,32 @@ export default function ICSSettings({ To enable Reflector to recognize your calendar events as meetings, add this URL as the location in your calendar events - - - - - {justCopied ? : } - + {roomName ? ( + + + + + {justCopied ? : } + + - + ) : null} diff --git a/www/app/(app)/rooms/page.tsx b/www/app/(app)/rooms/page.tsx index 88e66720..9de5950a 100644 --- a/www/app/(app)/rooms/page.tsx +++ b/www/app/(app)/rooms/page.tsx @@ -624,7 +624,7 @@ export default function RoomsList() { Date: Thu, 18 Sep 2025 10:02:30 -0600 Subject: [PATCH 42/77] fix: invalid cleanup call (#660) --- server/reflector/worker/cleanup.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/server/reflector/worker/cleanup.py b/server/reflector/worker/cleanup.py index e634994d..66d45e94 100644 --- a/server/reflector/worker/cleanup.py +++ b/server/reflector/worker/cleanup.py @@ -5,7 +5,6 @@ Deletes old anonymous transcripts and their associated meetings/recordings. Transcripts are the main entry point - any associated data is also removed. """ -import asyncio from datetime import datetime, timedelta, timezone from typing import TypedDict @@ -152,5 +151,5 @@ async def cleanup_old_public_data( retry_kwargs={"max_retries": 3, "countdown": 300}, ) @asynctask -def cleanup_old_public_data_task(days: int | None = None): - asyncio.run(cleanup_old_public_data(days=days)) +async def cleanup_old_public_data_task(days: int | None = None): + await cleanup_old_public_data(days=days) From 47716f6e5ddee952609d2fa0ffabdfa865286796 Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Fri, 19 Sep 2025 15:14:40 -0400 Subject: [PATCH 43/77] feat: room form edit with enter (#662) * room form edit with enter * mobile form enter do nothing * restore overwritten older change --------- Co-authored-by: Igor Loskutov --- www/app/(app)/rooms/page.tsx | 799 ++++++++++++++++++----------------- 1 file changed, 406 insertions(+), 393 deletions(-) diff --git a/www/app/(app)/rooms/page.tsx b/www/app/(app)/rooms/page.tsx index 9de5950a..723d698a 100644 --- a/www/app/(app)/rooms/page.tsx +++ b/www/app/(app)/rooms/page.tsx @@ -309,7 +309,7 @@ export default function RoomsList() { setRoomInput(null); setIsEditing(false); - setEditRoomId(""); + setEditRoomId(null); setNameError(""); refetch(); onClose(); @@ -449,415 +449,428 @@ export default function RoomsList() { - - - General - Calendar - Share - WebHook - +
{ + e.preventDefault(); + handleSaveRoom(); + }} + > + + + General + Calendar + Share + WebHook + - - - Room name - - - No spaces or special characters allowed - - {nameError && ( - {nameError} - )} - + + + Room name + + + No spaces or special characters allowed + + {nameError && ( + {nameError} + )} + - - { - const syntheticEvent = { - target: { - name: "isLocked", - type: "checkbox", - checked: e.checked, - }, - }; - handleRoomChange(syntheticEvent); - }} - > - - - - - Locked room - - + + { + const syntheticEvent = { + target: { + name: "isLocked", + type: "checkbox", + checked: e.checked, + }, + }; + handleRoomChange(syntheticEvent); + }} + > + + + + + Locked room + + + + Room size + + setRoomInput({ ...room, roomMode: e.value[0] }) + } + collection={roomModeCollection} + > + + + + + + + + + + + + {roomModeOptions.map((option) => ( + + {option.label} + + + ))} + + + + + + Recording type + + setRoomInput({ + ...room, + recordingType: e.value[0], + recordingTrigger: + e.value[0] !== "cloud" + ? "none" + : room.recordingTrigger, + }) + } + collection={recordingTypeCollection} + > + + + + + + + + + + + + {recordingTypeOptions.map((option) => ( + + {option.label} + + + ))} + + + + + + Cloud recording start trigger + + setRoomInput({ + ...room, + recordingTrigger: e.value[0], + }) + } + collection={recordingTriggerCollection} + disabled={room.recordingType !== "cloud"} + > + + + + + + + + + + + + {recordingTriggerOptions.map((option) => ( + + {option.label} + + + ))} + + + + - - Room size - - setRoomInput({ ...room, roomMode: e.value[0] }) - } - collection={roomModeCollection} - > - - - - - - - - - - - - {roomModeOptions.map((option) => ( - - {option.label} - - - ))} - - - - + + { + const syntheticEvent = { + target: { + name: "isShared", + type: "checkbox", + checked: e.checked, + }, + }; + handleRoomChange(syntheticEvent); + }} + > + + + + + Shared room + + + - - Recording type - - setRoomInput({ - ...room, - recordingType: e.value[0], - recordingTrigger: - e.value[0] !== "cloud" - ? "none" - : room.recordingTrigger, - }) - } - collection={recordingTypeCollection} - > - - - - - - - - - - - - {recordingTypeOptions.map((option) => ( - - {option.label} - - - ))} - - - - + + + { + const syntheticEvent = { + target: { + name: "zulipAutoPost", + type: "checkbox", + checked: e.checked, + }, + }; + handleRoomChange(syntheticEvent); + }} + > + + + + + + Automatically post transcription to Zulip + + + + + Zulip stream + + setRoomInput({ + ...room, + zulipStream: e.value[0], + zulipTopic: "", + }) + } + collection={streamCollection} + disabled={!room.zulipAutoPost} + > + + + + + + + + + + + + {streamOptions.map((option) => ( + + {option.label} + + + ))} + + + + + + Zulip topic + + setRoomInput({ ...room, zulipTopic: e.value[0] }) + } + collection={topicCollection} + disabled={!room.zulipAutoPost} + > + + + + + + + + + + + + {topicOptions.map((option) => ( + + {option.label} + + + ))} + + + + + - - Cloud recording start trigger - - setRoomInput({ ...room, recordingTrigger: e.value[0] }) - } - collection={recordingTriggerCollection} - disabled={room.recordingType !== "cloud"} - > - - - - - - - - - - - - {recordingTriggerOptions.map((option) => ( - - {option.label} - - - ))} - - - - + + + Webhook URL + + + Optional: URL to receive notifications when transcripts + are ready + + - - { - const syntheticEvent = { - target: { - name: "isShared", - type: "checkbox", - checked: e.checked, - }, - }; - handleRoomChange(syntheticEvent); - }} - > - - - - - Shared room - - - - - - { - setRoomInput({ - ...room, - icsUrl: - settings.ics_url !== undefined - ? settings.ics_url - : room.icsUrl, - icsEnabled: - settings.ics_enabled !== undefined - ? settings.ics_enabled - : room.icsEnabled, - icsFetchInterval: - settings.ics_fetch_interval !== undefined - ? settings.ics_fetch_interval - : room.icsFetchInterval, - }); - }} - isOwner={true} - isEditing={isEditing} - /> - - - - - { - const syntheticEvent = { - target: { - name: "zulipAutoPost", - type: "checkbox", - checked: e.checked, - }, - }; - handleRoomChange(syntheticEvent); - }} - > - - - - - - Automatically post transcription to Zulip - - - - - - Zulip stream - - setRoomInput({ - ...room, - zulipStream: e.value[0], - zulipTopic: "", - }) - } - collection={streamCollection} - disabled={!room.zulipAutoPost} - > - - - - - - - - - - - - {streamOptions.map((option) => ( - - {option.label} - - - ))} - - - - - - - Zulip topic - - setRoomInput({ ...room, zulipTopic: e.value[0] }) - } - collection={topicCollection} - disabled={!room.zulipAutoPost} - > - - - - - - - - - - - - {topicOptions.map((option) => ( - - {option.label} - - - ))} - - - - - - - - - Webhook URL - - - Optional: URL to receive notifications when transcripts - are ready - - - - {room.webhookUrl && ( - <> - - Webhook Secret - - - {isEditing && room.webhookSecret && ( - + + Webhook Secret + + - setShowWebhookSecret(!showWebhookSecret) - } - > - {showWebhookSecret ? : } - - )} - - - Used for HMAC signature verification (auto-generated - if left empty) - - - - {isEditing && ( - <> - - - {webhookTestResult && ( -
+ {isEditing && room.webhookSecret && ( + + setShowWebhookSecret(!showWebhookSecret) + } > - {webhookTestResult} -
+ {showWebhookSecret ? : } + )}
- - )} - - )} -
-
+ + Used for HMAC signature verification (auto-generated + if left empty) + + + + {isEditing && ( + <> + + + {webhookTestResult && ( +
+ {webhookTestResult} +
+ )} +
+ + )} + + )} + + + + + { + setRoomInput({ + ...room, + icsUrl: + settings.ics_url !== undefined + ? settings.ics_url + : room.icsUrl, + icsEnabled: + settings.ics_enabled !== undefined + ? settings.ics_enabled + : room.icsEnabled, + icsFetchInterval: + settings.ics_fetch_interval !== undefined + ? settings.ics_fetch_interval + : room.icsFetchInterval, + }); + }} + isOwner={true} + isEditing={isEditing} + /> + + + +
+ + + Make sure to copy your API key now. You won't be able to see it + again! + + + + {createdKey.key} + + handleCopyKey(createdKey.key)} + > + + + + + )} + + {/* Create new key */} + + + Create New API Key + + {!isCreating ? ( + + ) : ( + + + Name (optional) + setNewKeyName(e.target.value)} + /> + + + + + + + )} + + + {/* List of API keys */} + + + Your API Keys + + {isLoading ? ( + Loading... + ) : !apiKeys || apiKeys.length === 0 ? ( + + No API keys yet. Create one to get started. + + ) : ( + + + + Name + Created + Actions + + + + {apiKeys.map((key) => ( + + + {key.name || Unnamed} + + {formatDate(key.created_at)} + + handleDeleteRequest(key.id)} + loading={ + deleteKeyMutation.isPending && + deleteKeyMutation.variables?.params?.path?.key_id === + key.id + } + > + + + + + ))} + + + )} + + + {/* Delete confirmation dialog */} + { + if (!e.open) setKeyToDelete(null); + }} + initialFocusEl={() => cancelRef.current} + > + + + + + Delete API Key + + + + Are you sure you want to delete this API key? This action cannot + be undone. + + + + + + + + + + + ); +} From 1473fd82dc472c394cbaa2987212ad662a74bcac Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Wed, 12 Nov 2025 21:21:16 -0500 Subject: [PATCH 66/77] feat: daily.co support as alternative to whereby (#691) * llm instructions * vibe dailyco * vibe dailyco * doc update (vibe) * dont show recording ui on call * stub processor (vibe) * stub processor (vibe) self-review * stub processor (vibe) self-review * chore(main): release 0.14.0 (#670) * Add multitrack pipeline * Mixdown audio tracks * Mixdown with pyav filter graph * Trigger multitrack processing for daily recordings * apply platform from envs in priority: non-dry * Use explicit track keys for processing * Align tracks of a multitrack recording * Generate waveforms for the mixed audio * Emit multriack pipeline events * Fix multitrack pipeline track alignment * dailico docs * Enable multitrack reprocessing * modal temp files uniform names, cleanup. remove llm temporary docs * docs cleanup * dont proceed with raw recordings if any of the downloads fail * dry transcription pipelines * remove is_miltitrack * comments * explicit dailyco room name * docs * remove stub data/method * frontend daily/whereby code self-review (no-mistake) * frontend daily/whereby code self-review (no-mistakes) * frontend daily/whereby code self-review (no-mistakes) * consent cleanup for multitrack (no-mistakes) * llm fun * remove extra comments * fix tests * merge migrations * Store participant names * Get participants by meeting session id * pop back main branch migration * s3 paddington (no-mistakes) * comment * pr comments * pr comments * pr comments * platform / meeting cleanup * Use participant names in summary generation * platform assignment to meeting at controller level * pr comment * room playform properly default none * room playform properly default none * restore migration lost * streaming WIP * extract storage / use common storage / proper env vars for storage * fix mocks tests * remove fall back * streaming for multifile * cenrtal storage abstraction (no-mistakes) * remove dead code / vars * Set participant user id for authenticated users * whereby recording name parsing fix * whereby recording name parsing fix * more file stream * storage dry + tests * remove homemade boto3 streaming and use proper boto * update migration guide * webhook creation script - print uuid --------- Co-authored-by: Igor Loskutov Co-authored-by: Mathieu Virbel Co-authored-by: Sergey Mankovsky --- server/docs/video-platforms/README.md | 234 ++++++ server/env.example | 27 + .../1e49625677e4_add_platform_support.py | 50 ++ .../versions/f8294b31f022_add_track_keys.py | 28 + server/reflector/app.py | 2 + server/reflector/db/meetings.py | 57 +- server/reflector/db/recordings.py | 4 + server/reflector/db/rooms.py | 12 +- server/reflector/db/transcripts.py | 19 +- server/reflector/pipelines/__init__.py | 1 + .../reflector/pipelines/main_file_pipeline.py | 115 +-- .../reflector/pipelines/main_live_pipeline.py | 59 +- .../pipelines/main_multitrack_pipeline.py | 694 ++++++++++++++++++ .../reflector/pipelines/topic_processing.py | 109 +++ .../pipelines/transcription_helpers.py | 34 + .../processors/summary/summary_builder.py | 69 +- .../processors/transcript_final_summary.py | 38 +- .../processors/transcript_topic_detector.py | 8 +- server/reflector/schemas/platform.py | 5 + server/reflector/settings.py | 31 +- server/reflector/storage/__init__.py | 58 +- server/reflector/storage/base.py | 130 +++- server/reflector/storage/storage_aws.py | 229 +++++- server/reflector/utils/daily.py | 26 + server/reflector/utils/datetime.py | 9 + server/reflector/utils/string.py | 11 +- server/reflector/utils/url.py | 37 + server/reflector/video_platforms/__init__.py | 11 + server/reflector/video_platforms/base.py | 54 ++ server/reflector/video_platforms/daily.py | 198 +++++ server/reflector/video_platforms/factory.py | 62 ++ server/reflector/video_platforms/models.py | 40 + server/reflector/video_platforms/registry.py | 35 + server/reflector/video_platforms/whereby.py | 141 ++++ .../video_platforms/whereby_utils.py | 38 + server/reflector/views/daily.py | 233 ++++++ server/reflector/views/rooms.py | 89 ++- server/reflector/views/transcripts_process.py | 29 +- server/reflector/whereby.py | 114 --- server/reflector/worker/cleanup.py | 6 +- server/reflector/worker/ics_sync.py | 34 +- server/reflector/worker/process.py | 354 +++++++-- server/scripts/recreate_daily_webhook.py | 123 ++++ server/tests/conftest.py | 12 + server/tests/mocks/__init__.py | 0 server/tests/mocks/mock_platform.py | 112 +++ server/tests/test_cleanup.py | 8 +- server/tests/test_consent_multitrack.py | 330 +++++++++ server/tests/test_pipeline_main_file.py | 23 +- server/tests/test_room_ics_api.py | 10 + server/tests/test_storage.py | 321 ++++++++ .../test_transcripts_recording_deletion.py | 7 +- server/tests/test_utils_daily.py | 17 + server/tests/test_utils_url.py | 63 ++ server/tests/test_video_platforms_factory.py | 58 ++ www/app/[roomName]/[meetingId]/page.tsx | 4 +- www/app/[roomName]/components/DailyRoom.tsx | 93 +++ .../[roomName]/components/RoomContainer.tsx | 214 ++++++ www/app/[roomName]/components/WherebyRoom.tsx | 101 +++ www/app/[roomName]/page.tsx | 4 +- www/app/lib/consent/ConsentDialog.tsx | 36 + www/app/lib/consent/ConsentDialogButton.tsx | 39 + www/app/lib/consent/constants.ts | 12 + www/app/lib/consent/index.ts | 8 + www/app/lib/consent/types.ts | 9 + www/app/lib/consent/useConsentDialog.tsx | 109 +++ www/app/lib/consent/utils.ts | 13 + www/app/lib/useLoginRequiredPages.ts | 5 +- www/app/reflector-api.d.ts | 91 +++ www/package.json | 1 + www/pnpm-lock.yaml | 96 +++ 71 files changed, 4985 insertions(+), 468 deletions(-) create mode 100644 server/docs/video-platforms/README.md create mode 100644 server/migrations/versions/1e49625677e4_add_platform_support.py create mode 100644 server/migrations/versions/f8294b31f022_add_track_keys.py create mode 100644 server/reflector/pipelines/__init__.py create mode 100644 server/reflector/pipelines/main_multitrack_pipeline.py create mode 100644 server/reflector/pipelines/topic_processing.py create mode 100644 server/reflector/pipelines/transcription_helpers.py create mode 100644 server/reflector/schemas/platform.py create mode 100644 server/reflector/utils/daily.py create mode 100644 server/reflector/utils/datetime.py create mode 100644 server/reflector/utils/url.py create mode 100644 server/reflector/video_platforms/__init__.py create mode 100644 server/reflector/video_platforms/base.py create mode 100644 server/reflector/video_platforms/daily.py create mode 100644 server/reflector/video_platforms/factory.py create mode 100644 server/reflector/video_platforms/models.py create mode 100644 server/reflector/video_platforms/registry.py create mode 100644 server/reflector/video_platforms/whereby.py create mode 100644 server/reflector/video_platforms/whereby_utils.py create mode 100644 server/reflector/views/daily.py delete mode 100644 server/reflector/whereby.py create mode 100644 server/scripts/recreate_daily_webhook.py create mode 100644 server/tests/mocks/__init__.py create mode 100644 server/tests/mocks/mock_platform.py create mode 100644 server/tests/test_consent_multitrack.py create mode 100644 server/tests/test_storage.py create mode 100644 server/tests/test_utils_daily.py create mode 100644 server/tests/test_utils_url.py create mode 100644 server/tests/test_video_platforms_factory.py create mode 100644 www/app/[roomName]/components/DailyRoom.tsx create mode 100644 www/app/[roomName]/components/RoomContainer.tsx create mode 100644 www/app/[roomName]/components/WherebyRoom.tsx create mode 100644 www/app/lib/consent/ConsentDialog.tsx create mode 100644 www/app/lib/consent/ConsentDialogButton.tsx create mode 100644 www/app/lib/consent/constants.ts create mode 100644 www/app/lib/consent/index.ts create mode 100644 www/app/lib/consent/types.ts create mode 100644 www/app/lib/consent/useConsentDialog.tsx create mode 100644 www/app/lib/consent/utils.ts diff --git a/server/docs/video-platforms/README.md b/server/docs/video-platforms/README.md new file mode 100644 index 00000000..45a615c3 --- /dev/null +++ b/server/docs/video-platforms/README.md @@ -0,0 +1,234 @@ +# Reflector Architecture: Whereby + Daily.co Recording Storage + +## System Overview + +```mermaid +graph TB + subgraph "Actors" + APP[Our App
Reflector] + WHEREBY[Whereby Service
External] + DAILY[Daily.co Service
External] + end + + subgraph "AWS S3 Buckets" + TRANSCRIPT_BUCKET[Transcript Bucket
reflector-transcripts
Output: Processed MP3s] + WHEREBY_BUCKET[Whereby Bucket
reflector-whereby-recordings
Input: Raw MP4s] + DAILY_BUCKET[Daily.co Bucket
reflector-dailyco-recordings
Input: Raw WebM tracks] + end + + subgraph "AWS Infrastructure" + SQS[SQS Queue
Whereby notifications] + end + + subgraph "Database" + DB[(PostgreSQL
Recordings, Transcripts, Meetings)] + end + + APP -->|Write processed| TRANSCRIPT_BUCKET + APP -->|Read/Delete| WHEREBY_BUCKET + APP -->|Read/Delete| DAILY_BUCKET + APP -->|Poll| SQS + APP -->|Store metadata| DB + + WHEREBY -->|Write recordings| WHEREBY_BUCKET + WHEREBY_BUCKET -->|S3 Event| SQS + WHEREBY -->|Participant webhooks
room.client.joined/left| APP + + DAILY -->|Write recordings| DAILY_BUCKET + DAILY -->|Recording webhook
recording.ready-to-download| APP +``` + +**Note on Webhook vs S3 Event for Recording Processing:** +- **Whereby**: Uses S3 Events → SQS for recording availability (S3 as source of truth, no race conditions) +- **Daily.co**: Uses webhooks for recording availability (more immediate, built-in reliability) +- **Both**: Use webhooks for participant tracking (real-time updates) + +## Credentials & Permissions + +```mermaid +graph LR + subgraph "Master Credentials" + MASTER[TRANSCRIPT_STORAGE_AWS_*
Access Key ID + Secret] + end + + subgraph "Whereby Upload Credentials" + WHEREBY_CREDS[AWS_WHEREBY_ACCESS_KEY_*
Access Key ID + Secret] + end + + subgraph "Daily.co Upload Role" + DAILY_ROLE[DAILY_STORAGE_AWS_ROLE_ARN
IAM Role ARN] + end + + subgraph "Our App Uses" + MASTER -->|Read/Write/Delete| TRANSCRIPT_BUCKET[Transcript Bucket] + MASTER -->|Read/Delete| WHEREBY_BUCKET[Whereby Bucket] + MASTER -->|Read/Delete| DAILY_BUCKET[Daily.co Bucket] + MASTER -->|Poll/Delete| SQS[SQS Queue] + end + + subgraph "We Give To Services" + WHEREBY_CREDS -->|Passed in API call| WHEREBY_SERVICE[Whereby Service] + WHEREBY_SERVICE -->|Write Only| WHEREBY_BUCKET + + DAILY_ROLE -->|Passed in API call| DAILY_SERVICE[Daily.co Service] + DAILY_SERVICE -->|Assume Role| DAILY_ROLE + DAILY_SERVICE -->|Write Only| DAILY_BUCKET + end +``` + +# Video Platform Recording Integration + +This document explains how Reflector receives and identifies multitrack audio recordings from different video platforms. + +## Platform Comparison + +| Platform | Delivery Method | Track Identification | +|----------|----------------|---------------------| +| **Daily.co** | Webhook | Explicit track list in payload | +| **Whereby** | SQS (S3 notifications) | Single file per notification | + +--- + +## Daily.co (Webhook-based) + +Daily.co uses **webhooks** to notify Reflector when recordings are ready. + +### How It Works + +1. **Daily.co sends webhook** when recording is ready + - Event type: `recording.ready-to-download` + - Endpoint: `/v1/daily/webhook` (`reflector/views/daily.py:46-102`) + +2. **Webhook payload explicitly includes track list**: +```json +{ + "recording_id": "7443ee0a-dab1-40eb-b316-33d6c0d5ff88", + "room_name": "daily-20251020193458", + "tracks": [ + { + "type": "audio", + "s3Key": "monadical/daily-20251020193458/1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922", + "size": 831843 + }, + { + "type": "audio", + "s3Key": "monadical/daily-20251020193458/1760988935484-a37c35e3-6f8e-4274-a482-e9d0f102a732-cam-audio-1760988943823", + "size": 408438 + }, + { + "type": "video", + "s3Key": "monadical/daily-20251020193458/...-video.webm", + "size": 30000000 + } + ] +} +``` + +3. **System extracts audio tracks** (`daily.py:211`): +```python +track_keys = [t.s3Key for t in tracks if t.type == "audio"] +``` + +4. **Triggers multitrack processing** (`daily.py:213-218`): +```python +process_multitrack_recording.delay( + bucket_name=bucket_name, # reflector-dailyco-local + room_name=room_name, # daily-20251020193458 + recording_id=recording_id, # 7443ee0a-dab1-40eb-b316-33d6c0d5ff88 + track_keys=track_keys # Only audio s3Keys +) +``` + +### Key Advantage: No Ambiguity + +Even though multiple meetings may share the same S3 bucket/folder (`monadical/`), **there's no ambiguity** because: +- Each webhook payload contains the exact `s3Key` list for that specific `recording_id` +- No need to scan folders or guess which files belong together +- Each track's s3Key includes the room timestamp subfolder (e.g., `daily-20251020193458/`) + +The room name includes timestamp (`daily-20251020193458`) to keep recordings organized, but **the webhook's explicit track list is what prevents mixing files from different meetings**. + +### Track Timeline Extraction + +Daily.co provides timing information in two places: + +**1. PyAV WebM Metadata (current approach)**: +```python +# Read from WebM container stream metadata +stream.start_time = 8.130s # Meeting-relative timing +``` + +**2. Filename Timestamps (alternative approach, commit 3bae9076)**: +``` +Filename format: {recording_start_ts}-{uuid}-cam-audio-{track_start_ts}.webm +Example: 1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922.webm + +Parse timestamps: +- recording_start_ts: 1760988935484 (Unix ms) +- track_start_ts: 1760988935922 (Unix ms) +- offset: (1760988935922 - 1760988935484) / 1000 = 0.438s +``` + +**Time Difference (PyAV vs Filename)**: +``` +Track 0: + Filename offset: 438ms + PyAV metadata: 229ms + Difference: 209ms + +Track 1: + Filename offset: 8339ms + PyAV metadata: 8130ms + Difference: 209ms +``` + +**Consistent 209ms delta** suggests network/encoding delay between file upload initiation (filename) and actual audio stream start (metadata). + +**Current implementation uses PyAV metadata** because: +- More accurate (represents when audio actually started) +- Padding BEFORE transcription produces correct Whisper timestamps automatically +- No manual offset adjustment needed during transcript merge + +### Why Re-encoding During Padding + +Padding coincidentally involves re-encoding, which is important for Daily.co + Whisper: + +**Problem:** Daily.co skips frames in recordings when microphone is muted or paused +- WebM containers have gaps where audio frames should be +- Whisper doesn't understand these gaps and produces incorrect timestamps +- Example: 5s of audio with 2s muted → file has frames only for 3s, Whisper thinks duration is 3s + +**Solution:** Re-encoding via PyAV filter graph (`adelay` + `aresample`) +- Restores missing frames as silence +- Produces continuous audio stream without gaps +- Whisper now sees correct duration and produces accurate timestamps + +**Why combined with padding:** +- Already re-encoding for padding (adding initial silence) +- More performant to do both operations in single PyAV pipeline +- Padded values needed for mixdown anyway (creating final MP3) + +Implementation: `main_multitrack_pipeline.py:_apply_audio_padding_streaming()` + +--- + +## Whereby (SQS-based) + +Whereby uses **AWS SQS** (via S3 notifications) to notify Reflector when files are uploaded. + +### How It Works + +1. **Whereby uploads recording** to S3 +2. **S3 sends notification** to SQS queue (one notification per file) +3. **Reflector polls SQS queue** (`worker/process.py:process_messages()`) +4. **System processes single file** (`worker/process.py:process_recording()`) + +### Key Difference from Daily.co + +**Whereby (SQS):** System receives S3 notification "file X was created" - only knows about one file at a time, would need to scan folder to find related files + +**Daily.co (Webhook):** Daily explicitly tells system which files belong together in the webhook payload + +--- + + diff --git a/server/env.example b/server/env.example index ff0f4211..7375bf0a 100644 --- a/server/env.example +++ b/server/env.example @@ -71,3 +71,30 @@ DIARIZATION_URL=https://monadical-sas--reflector-diarizer-web.modal.run ## Sentry DSN configuration #SENTRY_DSN= + +## ======================================================= +## Video Platform Configuration +## ======================================================= + +## Whereby +#WHEREBY_API_KEY=your-whereby-api-key +#WHEREBY_WEBHOOK_SECRET=your-whereby-webhook-secret +#WHEREBY_STORAGE_AWS_ACCESS_KEY_ID=your-aws-key +#WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY=your-aws-secret +#AWS_PROCESS_RECORDING_QUEUE_URL=https://sqs.us-west-2.amazonaws.com/... + +## Daily.co +#DAILY_API_KEY=your-daily-api-key +#DAILY_WEBHOOK_SECRET=your-daily-webhook-secret +#DAILY_SUBDOMAIN=your-subdomain +#DAILY_WEBHOOK_UUID= # Auto-populated by recreate_daily_webhook.py script +#DAILYCO_STORAGE_AWS_ROLE_ARN=... # IAM role ARN for Daily.co S3 access +#DAILYCO_STORAGE_AWS_BUCKET_NAME=reflector-dailyco +#DAILYCO_STORAGE_AWS_REGION=us-west-2 + +## Whereby (optional separate bucket) +#WHEREBY_STORAGE_AWS_BUCKET_NAME=reflector-whereby +#WHEREBY_STORAGE_AWS_REGION=us-east-1 + +## Platform Configuration +#DEFAULT_VIDEO_PLATFORM=whereby # Default platform for new rooms diff --git a/server/migrations/versions/1e49625677e4_add_platform_support.py b/server/migrations/versions/1e49625677e4_add_platform_support.py new file mode 100644 index 00000000..fa403f92 --- /dev/null +++ b/server/migrations/versions/1e49625677e4_add_platform_support.py @@ -0,0 +1,50 @@ +"""add_platform_support + +Revision ID: 1e49625677e4 +Revises: 9e3f7b2a4c8e +Create Date: 2025-10-08 13:17:29.943612 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "1e49625677e4" +down_revision: Union[str, None] = "9e3f7b2a4c8e" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Add platform field with default 'whereby' for backward compatibility.""" + with op.batch_alter_table("room", schema=None) as batch_op: + batch_op.add_column( + sa.Column( + "platform", + sa.String(), + nullable=True, + server_default=None, + ) + ) + + with op.batch_alter_table("meeting", schema=None) as batch_op: + batch_op.add_column( + sa.Column( + "platform", + sa.String(), + nullable=False, + server_default="whereby", + ) + ) + + +def downgrade() -> None: + """Remove platform field.""" + with op.batch_alter_table("meeting", schema=None) as batch_op: + batch_op.drop_column("platform") + + with op.batch_alter_table("room", schema=None) as batch_op: + batch_op.drop_column("platform") diff --git a/server/migrations/versions/f8294b31f022_add_track_keys.py b/server/migrations/versions/f8294b31f022_add_track_keys.py new file mode 100644 index 00000000..7eda6ccc --- /dev/null +++ b/server/migrations/versions/f8294b31f022_add_track_keys.py @@ -0,0 +1,28 @@ +"""add_track_keys + +Revision ID: f8294b31f022 +Revises: 1e49625677e4 +Create Date: 2025-10-27 18:52:17.589167 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "f8294b31f022" +down_revision: Union[str, None] = "1e49625677e4" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + with op.batch_alter_table("recording", schema=None) as batch_op: + batch_op.add_column(sa.Column("track_keys", sa.JSON(), nullable=True)) + + +def downgrade() -> None: + with op.batch_alter_table("recording", schema=None) as batch_op: + batch_op.drop_column("track_keys") diff --git a/server/reflector/app.py b/server/reflector/app.py index a15934f5..2ca76acb 100644 --- a/server/reflector/app.py +++ b/server/reflector/app.py @@ -12,6 +12,7 @@ from reflector.events import subscribers_shutdown, subscribers_startup from reflector.logger import logger from reflector.metrics import metrics_init from reflector.settings import settings +from reflector.views.daily import router as daily_router from reflector.views.meetings import router as meetings_router from reflector.views.rooms import router as rooms_router from reflector.views.rtc_offer import router as rtc_offer_router @@ -96,6 +97,7 @@ app.include_router(user_api_keys_router, prefix="/v1") app.include_router(user_ws_router, prefix="/v1") app.include_router(zulip_router, prefix="/v1") app.include_router(whereby_router, prefix="/v1") +app.include_router(daily_router, prefix="/v1/daily") add_pagination(app) # prepare celery diff --git a/server/reflector/db/meetings.py b/server/reflector/db/meetings.py index 12a0c187..6912b285 100644 --- a/server/reflector/db/meetings.py +++ b/server/reflector/db/meetings.py @@ -7,7 +7,10 @@ from sqlalchemy.dialects.postgresql import JSONB from reflector.db import get_database, metadata from reflector.db.rooms import Room +from reflector.schemas.platform import WHEREBY_PLATFORM, Platform from reflector.utils import generate_uuid4 +from reflector.utils.string import assert_equal +from reflector.video_platforms.factory import get_platform meetings = sa.Table( "meeting", @@ -55,6 +58,12 @@ meetings = sa.Table( ), ), sa.Column("calendar_metadata", JSONB), + sa.Column( + "platform", + sa.String, + nullable=False, + server_default=assert_equal(WHEREBY_PLATFORM, "whereby"), + ), sa.Index("idx_meeting_room_id", "room_id"), sa.Index("idx_meeting_calendar_event", "calendar_event_id"), ) @@ -94,13 +103,14 @@ class Meeting(BaseModel): is_locked: bool = False room_mode: Literal["normal", "group"] = "normal" recording_type: Literal["none", "local", "cloud"] = "cloud" - recording_trigger: Literal[ + recording_trigger: Literal[ # whereby-specific "none", "prompt", "automatic", "automatic-2nd-participant" ] = "automatic-2nd-participant" num_clients: int = 0 is_active: bool = True calendar_event_id: str | None = None calendar_metadata: dict[str, Any] | None = None + platform: Platform = WHEREBY_PLATFORM class MeetingController: @@ -130,6 +140,7 @@ class MeetingController: recording_trigger=room.recording_trigger, calendar_event_id=calendar_event_id, calendar_metadata=calendar_metadata, + platform=get_platform(room.platform), ) query = meetings.insert().values(**meeting.model_dump()) await get_database().execute(query) @@ -137,7 +148,8 @@ class MeetingController: async def get_all_active(self) -> list[Meeting]: query = meetings.select().where(meetings.c.is_active) - return await get_database().fetch_all(query) + results = await get_database().fetch_all(query) + return [Meeting(**result) for result in results] async def get_by_room_name( self, @@ -147,16 +159,14 @@ class MeetingController: Get a meeting by room name. For backward compatibility, returns the most recent meeting. """ - end_date = getattr(meetings.c, "end_date") query = ( meetings.select() .where(meetings.c.room_name == room_name) - .order_by(end_date.desc()) + .order_by(meetings.c.end_date.desc()) ) result = await get_database().fetch_one(query) if not result: return None - return Meeting(**result) async def get_active(self, room: Room, current_time: datetime) -> Meeting | None: @@ -179,7 +189,6 @@ class MeetingController: result = await get_database().fetch_one(query) if not result: return None - return Meeting(**result) async def get_all_active_for_room( @@ -219,17 +228,27 @@ class MeetingController: return None return Meeting(**result) - async def get_by_id(self, meeting_id: str, **kwargs) -> Meeting | None: + async def get_by_id( + self, meeting_id: str, room: Room | None = None + ) -> Meeting | None: query = meetings.select().where(meetings.c.id == meeting_id) + + if room: + query = query.where(meetings.c.room_id == room.id) + result = await get_database().fetch_one(query) if not result: return None return Meeting(**result) - async def get_by_calendar_event(self, calendar_event_id: str) -> Meeting | None: + async def get_by_calendar_event( + self, calendar_event_id: str, room: Room + ) -> Meeting | None: query = meetings.select().where( meetings.c.calendar_event_id == calendar_event_id ) + if room: + query = query.where(meetings.c.room_id == room.id) result = await get_database().fetch_one(query) if not result: return None @@ -239,6 +258,28 @@ class MeetingController: query = meetings.update().where(meetings.c.id == meeting_id).values(**kwargs) await get_database().execute(query) + async def increment_num_clients(self, meeting_id: str) -> None: + """Atomically increment participant count.""" + query = ( + meetings.update() + .where(meetings.c.id == meeting_id) + .values(num_clients=meetings.c.num_clients + 1) + ) + await get_database().execute(query) + + async def decrement_num_clients(self, meeting_id: str) -> None: + """Atomically decrement participant count (min 0).""" + query = ( + meetings.update() + .where(meetings.c.id == meeting_id) + .values( + num_clients=sa.case( + (meetings.c.num_clients > 0, meetings.c.num_clients - 1), else_=0 + ) + ) + ) + await get_database().execute(query) + class MeetingConsentController: async def get_by_meeting_id(self, meeting_id: str) -> list[MeetingConsent]: diff --git a/server/reflector/db/recordings.py b/server/reflector/db/recordings.py index 0d05790d..bde4afa5 100644 --- a/server/reflector/db/recordings.py +++ b/server/reflector/db/recordings.py @@ -21,6 +21,7 @@ recordings = sa.Table( server_default="pending", ), sa.Column("meeting_id", sa.String), + sa.Column("track_keys", sa.JSON, nullable=True), sa.Index("idx_recording_meeting_id", "meeting_id"), ) @@ -28,10 +29,13 @@ recordings = sa.Table( class Recording(BaseModel): id: str = Field(default_factory=generate_uuid4) bucket_name: str + # for single-track object_key: str recorded_at: datetime status: Literal["pending", "processing", "completed", "failed"] = "pending" meeting_id: str | None = None + # for multitrack reprocessing + track_keys: list[str] | None = None class RecordingController: diff --git a/server/reflector/db/rooms.py b/server/reflector/db/rooms.py index 396c818a..1081ac38 100644 --- a/server/reflector/db/rooms.py +++ b/server/reflector/db/rooms.py @@ -9,6 +9,7 @@ from pydantic import BaseModel, Field from sqlalchemy.sql import false, or_ from reflector.db import get_database, metadata +from reflector.schemas.platform import Platform from reflector.utils import generate_uuid4 rooms = sqlalchemy.Table( @@ -50,6 +51,12 @@ rooms = sqlalchemy.Table( ), sqlalchemy.Column("ics_last_sync", sqlalchemy.DateTime(timezone=True)), sqlalchemy.Column("ics_last_etag", sqlalchemy.Text), + sqlalchemy.Column( + "platform", + sqlalchemy.String, + nullable=True, + server_default=None, + ), sqlalchemy.Index("idx_room_is_shared", "is_shared"), sqlalchemy.Index("idx_room_ics_enabled", "ics_enabled"), ) @@ -66,7 +73,7 @@ class Room(BaseModel): is_locked: bool = False room_mode: Literal["normal", "group"] = "normal" recording_type: Literal["none", "local", "cloud"] = "cloud" - recording_trigger: Literal[ + recording_trigger: Literal[ # whereby-specific "none", "prompt", "automatic", "automatic-2nd-participant" ] = "automatic-2nd-participant" is_shared: bool = False @@ -77,6 +84,7 @@ class Room(BaseModel): ics_enabled: bool = False ics_last_sync: datetime | None = None ics_last_etag: str | None = None + platform: Platform | None = None class RoomController: @@ -130,6 +138,7 @@ class RoomController: ics_url: str | None = None, ics_fetch_interval: int = 300, ics_enabled: bool = False, + platform: Platform | None = None, ): """ Add a new room @@ -153,6 +162,7 @@ class RoomController: ics_url=ics_url, ics_fetch_interval=ics_fetch_interval, ics_enabled=ics_enabled, + platform=platform, ) query = rooms.insert().values(**room.model_dump()) try: diff --git a/server/reflector/db/transcripts.py b/server/reflector/db/transcripts.py index b82e4fe1..f9c3c057 100644 --- a/server/reflector/db/transcripts.py +++ b/server/reflector/db/transcripts.py @@ -21,7 +21,7 @@ from reflector.db.utils import is_postgresql from reflector.logger import logger from reflector.processors.types import Word as ProcessorWord from reflector.settings import settings -from reflector.storage import get_recordings_storage, get_transcripts_storage +from reflector.storage import get_transcripts_storage from reflector.utils import generate_uuid4 from reflector.utils.webvtt import topics_to_webvtt @@ -186,6 +186,7 @@ class TranscriptParticipant(BaseModel): id: str = Field(default_factory=generate_uuid4) speaker: int | None name: str + user_id: str | None = None class Transcript(BaseModel): @@ -623,7 +624,9 @@ class TranscriptController: ) if recording: try: - await get_recordings_storage().delete_file(recording.object_key) + await get_transcripts_storage().delete_file( + recording.object_key, bucket=recording.bucket_name + ) except Exception as e: logger.warning( "Failed to delete recording object from S3", @@ -725,11 +728,13 @@ class TranscriptController: """ Download audio from storage """ - transcript.audio_mp3_filename.write_bytes( - await get_transcripts_storage().get_file( - transcript.storage_audio_path, - ) - ) + storage = get_transcripts_storage() + try: + with open(transcript.audio_mp3_filename, "wb") as f: + await storage.stream_to_fileobj(transcript.storage_audio_path, f) + except Exception: + transcript.audio_mp3_filename.unlink(missing_ok=True) + raise async def upsert_participant( self, diff --git a/server/reflector/pipelines/__init__.py b/server/reflector/pipelines/__init__.py new file mode 100644 index 00000000..89d3e9de --- /dev/null +++ b/server/reflector/pipelines/__init__.py @@ -0,0 +1 @@ +"""Pipeline modules for audio processing.""" diff --git a/server/reflector/pipelines/main_file_pipeline.py b/server/reflector/pipelines/main_file_pipeline.py index 0a05d593..6f8e8011 100644 --- a/server/reflector/pipelines/main_file_pipeline.py +++ b/server/reflector/pipelines/main_file_pipeline.py @@ -23,23 +23,18 @@ from reflector.db.transcripts import ( transcripts_controller, ) from reflector.logger import logger +from reflector.pipelines import topic_processing from reflector.pipelines.main_live_pipeline import ( PipelineMainBase, broadcast_to_sockets, task_cleanup_consent, task_pipeline_post_to_zulip, ) -from reflector.processors import ( - AudioFileWriterProcessor, - TranscriptFinalSummaryProcessor, - TranscriptFinalTitleProcessor, - TranscriptTopicDetectorProcessor, -) +from reflector.pipelines.transcription_helpers import transcribe_file_with_processor +from reflector.processors import AudioFileWriterProcessor from reflector.processors.audio_waveform_processor import AudioWaveformProcessor from reflector.processors.file_diarization import FileDiarizationInput from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor -from reflector.processors.file_transcript import FileTranscriptInput -from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor from reflector.processors.transcript_diarization_assembler import ( TranscriptDiarizationAssemblerInput, TranscriptDiarizationAssemblerProcessor, @@ -56,19 +51,6 @@ from reflector.storage import get_transcripts_storage from reflector.worker.webhook import send_transcript_webhook -class EmptyPipeline: - """Empty pipeline for processors that need a pipeline reference""" - - def __init__(self, logger: structlog.BoundLogger): - self.logger = logger - - def get_pref(self, k, d=None): - return d - - async def emit(self, event): - pass - - class PipelineMainFile(PipelineMainBase): """ Optimized file processing pipeline. @@ -81,7 +63,7 @@ class PipelineMainFile(PipelineMainBase): def __init__(self, transcript_id: str): super().__init__(transcript_id=transcript_id) self.logger = logger.bind(transcript_id=self.transcript_id) - self.empty_pipeline = EmptyPipeline(logger=self.logger) + self.empty_pipeline = topic_processing.EmptyPipeline(logger=self.logger) def _handle_gather_exceptions(self, results: list, operation: str) -> None: """Handle exceptions from asyncio.gather with return_exceptions=True""" @@ -262,24 +244,7 @@ class PipelineMainFile(PipelineMainBase): async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType: """Transcribe complete file""" - processor = FileTranscriptAutoProcessor() - input_data = FileTranscriptInput(audio_url=audio_url, language=language) - - # Store result for retrieval - result: TranscriptType | None = None - - async def capture_result(transcript): - nonlocal result - result = transcript - - processor.on(capture_result) - await processor.push(input_data) - await processor.flush() - - if not result: - raise ValueError("No transcript captured") - - return result + return await transcribe_file_with_processor(audio_url, language) async def diarize_file(self, audio_url: str) -> list[DiarizationSegment] | None: """Get diarization for file""" @@ -322,63 +287,31 @@ class PipelineMainFile(PipelineMainBase): async def detect_topics( self, transcript: TranscriptType, target_language: str ) -> list[TitleSummary]: - """Detect topics from complete transcript""" - chunk_size = 300 - topics: list[TitleSummary] = [] - - async def on_topic(topic: TitleSummary): - topics.append(topic) - return await self.on_topic(topic) - - topic_detector = TranscriptTopicDetectorProcessor(callback=on_topic) - topic_detector.set_pipeline(self.empty_pipeline) - - for i in range(0, len(transcript.words), chunk_size): - chunk_words = transcript.words[i : i + chunk_size] - if not chunk_words: - continue - - chunk_transcript = TranscriptType( - words=chunk_words, translation=transcript.translation - ) - - await topic_detector.push(chunk_transcript) - - await topic_detector.flush() - return topics + return await topic_processing.detect_topics( + transcript, + target_language, + on_topic_callback=self.on_topic, + empty_pipeline=self.empty_pipeline, + ) async def generate_title(self, topics: list[TitleSummary]): - """Generate title from topics""" - if not topics: - self.logger.warning("No topics for title generation") - return - - processor = TranscriptFinalTitleProcessor(callback=self.on_title) - processor.set_pipeline(self.empty_pipeline) - - for topic in topics: - await processor.push(topic) - - await processor.flush() + return await topic_processing.generate_title( + topics, + on_title_callback=self.on_title, + empty_pipeline=self.empty_pipeline, + logger=self.logger, + ) async def generate_summaries(self, topics: list[TitleSummary]): - """Generate long and short summaries from topics""" - if not topics: - self.logger.warning("No topics for summary generation") - return - transcript = await self.get_transcript() - processor = TranscriptFinalSummaryProcessor( - transcript=transcript, - callback=self.on_long_summary, - on_short_summary=self.on_short_summary, + return await topic_processing.generate_summaries( + topics, + transcript, + on_long_summary_callback=self.on_long_summary, + on_short_summary_callback=self.on_short_summary, + empty_pipeline=self.empty_pipeline, + logger=self.logger, ) - processor.set_pipeline(self.empty_pipeline) - - for topic in topics: - await processor.push(topic) - - await processor.flush() @shared_task diff --git a/server/reflector/pipelines/main_live_pipeline.py b/server/reflector/pipelines/main_live_pipeline.py index f6fe6a83..83e560d6 100644 --- a/server/reflector/pipelines/main_live_pipeline.py +++ b/server/reflector/pipelines/main_live_pipeline.py @@ -17,7 +17,6 @@ from contextlib import asynccontextmanager from typing import Generic import av -import boto3 from celery import chord, current_task, group, shared_task from pydantic import BaseModel from structlog import BoundLogger as Logger @@ -584,6 +583,7 @@ async def cleanup_consent(transcript: Transcript, logger: Logger): consent_denied = False recording = None + meeting = None try: if transcript.recording_id: recording = await recordings_controller.get_by_id(transcript.recording_id) @@ -594,8 +594,8 @@ async def cleanup_consent(transcript: Transcript, logger: Logger): meeting.id ) except Exception as e: - logger.error(f"Failed to get fetch consent: {e}", exc_info=e) - consent_denied = True + logger.error(f"Failed to fetch consent: {e}", exc_info=e) + raise if not consent_denied: logger.info("Consent approved, keeping all files") @@ -603,25 +603,24 @@ async def cleanup_consent(transcript: Transcript, logger: Logger): logger.info("Consent denied, cleaning up all related audio files") - if recording and recording.bucket_name and recording.object_key: - s3_whereby = boto3.client( - "s3", - aws_access_key_id=settings.AWS_WHEREBY_ACCESS_KEY_ID, - aws_secret_access_key=settings.AWS_WHEREBY_ACCESS_KEY_SECRET, - ) - try: - s3_whereby.delete_object( - Bucket=recording.bucket_name, Key=recording.object_key - ) - logger.info( - f"Deleted original Whereby recording: {recording.bucket_name}/{recording.object_key}" - ) - except Exception as e: - logger.error(f"Failed to delete Whereby recording: {e}", exc_info=e) + deletion_errors = [] + if recording and recording.bucket_name: + keys_to_delete = [] + if recording.track_keys: + keys_to_delete = recording.track_keys + elif recording.object_key: + keys_to_delete = [recording.object_key] + + master_storage = get_transcripts_storage() + for key in keys_to_delete: + try: + await master_storage.delete_file(key, bucket=recording.bucket_name) + logger.info(f"Deleted recording file: {recording.bucket_name}/{key}") + except Exception as e: + error_msg = f"Failed to delete {key}: {e}" + logger.error(error_msg, exc_info=e) + deletion_errors.append(error_msg) - # non-transactional, files marked for deletion not actually deleted is possible - await transcripts_controller.update(transcript, {"audio_deleted": True}) - # 2. Delete processed audio from transcript storage S3 bucket if transcript.audio_location == "storage": storage = get_transcripts_storage() try: @@ -630,18 +629,28 @@ async def cleanup_consent(transcript: Transcript, logger: Logger): f"Deleted processed audio from storage: {transcript.storage_audio_path}" ) except Exception as e: - logger.error(f"Failed to delete processed audio: {e}", exc_info=e) + error_msg = f"Failed to delete processed audio: {e}" + logger.error(error_msg, exc_info=e) + deletion_errors.append(error_msg) - # 3. Delete local audio files try: if hasattr(transcript, "audio_mp3_filename") and transcript.audio_mp3_filename: transcript.audio_mp3_filename.unlink(missing_ok=True) if hasattr(transcript, "audio_wav_filename") and transcript.audio_wav_filename: transcript.audio_wav_filename.unlink(missing_ok=True) except Exception as e: - logger.error(f"Failed to delete local audio files: {e}", exc_info=e) + error_msg = f"Failed to delete local audio files: {e}" + logger.error(error_msg, exc_info=e) + deletion_errors.append(error_msg) - logger.info("Consent cleanup done") + if deletion_errors: + logger.warning( + f"Consent cleanup completed with {len(deletion_errors)} errors", + errors=deletion_errors, + ) + else: + await transcripts_controller.update(transcript, {"audio_deleted": True}) + logger.info("Consent cleanup done - all audio deleted") @get_transcript diff --git a/server/reflector/pipelines/main_multitrack_pipeline.py b/server/reflector/pipelines/main_multitrack_pipeline.py new file mode 100644 index 00000000..addcd9b4 --- /dev/null +++ b/server/reflector/pipelines/main_multitrack_pipeline.py @@ -0,0 +1,694 @@ +import asyncio +import math +import tempfile +from fractions import Fraction +from pathlib import Path + +import av +from av.audio.resampler import AudioResampler +from celery import chain, shared_task + +from reflector.asynctask import asynctask +from reflector.db.transcripts import ( + TranscriptStatus, + TranscriptWaveform, + transcripts_controller, +) +from reflector.logger import logger +from reflector.pipelines import topic_processing +from reflector.pipelines.main_file_pipeline import task_send_webhook_if_needed +from reflector.pipelines.main_live_pipeline import ( + PipelineMainBase, + broadcast_to_sockets, + task_cleanup_consent, + task_pipeline_post_to_zulip, +) +from reflector.pipelines.transcription_helpers import transcribe_file_with_processor +from reflector.processors import AudioFileWriterProcessor +from reflector.processors.audio_waveform_processor import AudioWaveformProcessor +from reflector.processors.types import TitleSummary +from reflector.processors.types import Transcript as TranscriptType +from reflector.storage import Storage, get_transcripts_storage +from reflector.utils.string import NonEmptyString + +# Audio encoding constants +OPUS_STANDARD_SAMPLE_RATE = 48000 +OPUS_DEFAULT_BIT_RATE = 128000 + +# Storage operation constants +PRESIGNED_URL_EXPIRATION_SECONDS = 7200 # 2 hours + + +class PipelineMainMultitrack(PipelineMainBase): + def __init__(self, transcript_id: str): + super().__init__(transcript_id=transcript_id) + self.logger = logger.bind(transcript_id=self.transcript_id) + self.empty_pipeline = topic_processing.EmptyPipeline(logger=self.logger) + + async def pad_track_for_transcription( + self, + track_url: NonEmptyString, + track_idx: int, + storage: Storage, + ) -> NonEmptyString: + """ + Pad a single track with silence based on stream metadata start_time. + Downloads from S3 presigned URL, processes via PyAV using tempfile, uploads to S3. + Returns presigned URL of padded track (or original URL if no padding needed). + + Memory usage: + - Pattern: fixed_overhead(2-5MB) for PyAV codec/filters + - PyAV streams input efficiently (no full download, verified) + - Output written to tempfile (disk-based, not memory) + - Upload streams from file handle (boto3 chunks, typically 5-10MB) + + Daily.co raw-tracks timing - Two approaches: + + CURRENT APPROACH (PyAV metadata): + The WebM stream.start_time field encodes MEETING-RELATIVE timing: + - t=0: When Daily.co recording started (first participant joined) + - start_time=8.13s: This participant's track began 8.13s after recording started + - Purpose: Enables track alignment without external manifest files + + This is NOT: + - Stream-internal offset (first packet timestamp relative to stream start) + - Absolute/wall-clock time + - Recording duration + + ALTERNATIVE APPROACH (filename parsing): + Daily.co filenames contain Unix timestamps (milliseconds): + Format: {recording_start_ts}-{participant_id}-cam-audio-{track_start_ts}.webm + Example: 1760988935484-52f7f48b-fbab-431f-9a50-87b9abfc8255-cam-audio-1760988935922.webm + + Can calculate offset: (track_start_ts - recording_start_ts) / 1000 + - Track 0: (1760988935922 - 1760988935484) / 1000 = 0.438s + - Track 1: (1760988943823 - 1760988935484) / 1000 = 8.339s + + TIME DIFFERENCE: PyAV metadata vs filename timestamps differ by ~209ms: + - Track 0: filename=438ms, metadata=229ms (diff: 209ms) + - Track 1: filename=8339ms, metadata=8130ms (diff: 209ms) + + Consistent delta suggests network/encoding delay. PyAV metadata is ground truth + (represents when audio stream actually started vs when file upload initiated). + + Example with 2 participants: + Track A: start_time=0.2s → Joined 200ms after recording began + Track B: start_time=8.1s → Joined 8.1 seconds later + + After padding: + Track A: [0.2s silence] + [speech...] + Track B: [8.1s silence] + [speech...] + + Whisper transcription timestamps are now synchronized: + Track A word at 5.0s → happened at meeting t=5.0s + Track B word at 10.0s → happened at meeting t=10.0s + + Merging just sorts by timestamp - no offset calculation needed. + + Padding coincidentally involves re-encoding. It's important when we work with Daily.co + Whisper. + This is because Daily.co returns recordings with skipped frames e.g. when microphone muted. + Daily.co doesn't understand those frames and ignores them, causing timestamp issues in transcription. + Re-encoding restores those frames. We do padding and re-encoding together just because it's convenient and more performant: + we need padded values for mix mp3 anyways + """ + + transcript = await self.get_transcript() + + try: + # PyAV streams input from S3 URL efficiently (2-5MB fixed overhead for codec/filters) + with av.open(track_url) as in_container: + start_time_seconds = self._extract_stream_start_time_from_container( + in_container, track_idx + ) + + if start_time_seconds <= 0: + self.logger.info( + f"Track {track_idx} requires no padding (start_time={start_time_seconds}s)", + track_idx=track_idx, + ) + return track_url + + # Use tempfile instead of BytesIO for better memory efficiency + # Reduces peak memory usage during encoding/upload + with tempfile.NamedTemporaryFile( + suffix=".webm", delete=False + ) as temp_file: + temp_path = temp_file.name + + try: + self._apply_audio_padding_to_file( + in_container, temp_path, start_time_seconds, track_idx + ) + + storage_path = ( + f"file_pipeline/{transcript.id}/tracks/padded_{track_idx}.webm" + ) + + # Upload using file handle for streaming + with open(temp_path, "rb") as padded_file: + await storage.put_file(storage_path, padded_file) + finally: + # Clean up temp file + Path(temp_path).unlink(missing_ok=True) + + padded_url = await storage.get_file_url( + storage_path, + operation="get_object", + expires_in=PRESIGNED_URL_EXPIRATION_SECONDS, + ) + + self.logger.info( + f"Successfully padded track {track_idx}", + track_idx=track_idx, + start_time_seconds=start_time_seconds, + padded_url=padded_url, + ) + + return padded_url + + except Exception as e: + self.logger.error( + f"Failed to process track {track_idx}", + track_idx=track_idx, + url=track_url, + error=str(e), + exc_info=True, + ) + raise Exception( + f"Track {track_idx} padding failed - transcript would have incorrect timestamps" + ) from e + + def _extract_stream_start_time_from_container( + self, container, track_idx: int + ) -> float: + """ + Extract meeting-relative start time from WebM stream metadata. + Uses PyAV to read stream.start_time from WebM container. + More accurate than filename timestamps by ~209ms due to network/encoding delays. + """ + start_time_seconds = 0.0 + try: + audio_streams = [s for s in container.streams if s.type == "audio"] + stream = audio_streams[0] if audio_streams else container.streams[0] + + # 1) Try stream-level start_time (most reliable for Daily.co tracks) + if stream.start_time is not None and stream.time_base is not None: + start_time_seconds = float(stream.start_time * stream.time_base) + + # 2) Fallback to container-level start_time (in av.time_base units) + if (start_time_seconds <= 0) and (container.start_time is not None): + start_time_seconds = float(container.start_time * av.time_base) + + # 3) Fallback to first packet DTS in stream.time_base + if start_time_seconds <= 0: + for packet in container.demux(stream): + if packet.dts is not None: + start_time_seconds = float(packet.dts * stream.time_base) + break + except Exception as e: + self.logger.warning( + "PyAV metadata read failed; assuming 0 start_time", + track_idx=track_idx, + error=str(e), + ) + start_time_seconds = 0.0 + + self.logger.info( + f"Track {track_idx} stream metadata: start_time={start_time_seconds:.3f}s", + track_idx=track_idx, + ) + return start_time_seconds + + def _apply_audio_padding_to_file( + self, + in_container, + output_path: str, + start_time_seconds: float, + track_idx: int, + ) -> None: + """Apply silence padding to audio track using PyAV filter graph, writing to file""" + delay_ms = math.floor(start_time_seconds * 1000) + + self.logger.info( + f"Padding track {track_idx} with {delay_ms}ms delay using PyAV", + track_idx=track_idx, + delay_ms=delay_ms, + ) + + try: + with av.open(output_path, "w", format="webm") as out_container: + in_stream = next( + (s for s in in_container.streams if s.type == "audio"), None + ) + if in_stream is None: + raise Exception("No audio stream in input") + + out_stream = out_container.add_stream( + "libopus", rate=OPUS_STANDARD_SAMPLE_RATE + ) + out_stream.bit_rate = OPUS_DEFAULT_BIT_RATE + graph = av.filter.Graph() + + abuf_args = ( + f"time_base=1/{OPUS_STANDARD_SAMPLE_RATE}:" + f"sample_rate={OPUS_STANDARD_SAMPLE_RATE}:" + f"sample_fmt=s16:" + f"channel_layout=stereo" + ) + src = graph.add("abuffer", args=abuf_args, name="src") + aresample_f = graph.add("aresample", args="async=1", name="ares") + # adelay requires one delay value per channel separated by '|' + delays_arg = f"{delay_ms}|{delay_ms}" + adelay_f = graph.add( + "adelay", args=f"delays={delays_arg}:all=1", name="delay" + ) + sink = graph.add("abuffersink", name="sink") + + src.link_to(aresample_f) + aresample_f.link_to(adelay_f) + adelay_f.link_to(sink) + graph.configure() + + resampler = AudioResampler( + format="s16", layout="stereo", rate=OPUS_STANDARD_SAMPLE_RATE + ) + # Decode -> resample -> push through graph -> encode Opus + for frame in in_container.decode(in_stream): + out_frames = resampler.resample(frame) or [] + for rframe in out_frames: + rframe.sample_rate = OPUS_STANDARD_SAMPLE_RATE + rframe.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE) + src.push(rframe) + + while True: + try: + f_out = sink.pull() + except Exception: + break + f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE + f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE) + for packet in out_stream.encode(f_out): + out_container.mux(packet) + + src.push(None) + while True: + try: + f_out = sink.pull() + except Exception: + break + f_out.sample_rate = OPUS_STANDARD_SAMPLE_RATE + f_out.time_base = Fraction(1, OPUS_STANDARD_SAMPLE_RATE) + for packet in out_stream.encode(f_out): + out_container.mux(packet) + + for packet in out_stream.encode(None): + out_container.mux(packet) + except Exception as e: + self.logger.error( + "PyAV padding failed for track", + track_idx=track_idx, + delay_ms=delay_ms, + error=str(e), + exc_info=True, + ) + raise + + async def mixdown_tracks( + self, + track_urls: list[str], + writer: AudioFileWriterProcessor, + offsets_seconds: list[float] | None = None, + ) -> None: + """Multi-track mixdown using PyAV filter graph (amix), reading from S3 presigned URLs""" + + target_sample_rate: int | None = None + for url in track_urls: + if not url: + continue + container = None + try: + container = av.open(url) + for frame in container.decode(audio=0): + target_sample_rate = frame.sample_rate + break + except Exception: + continue + finally: + if container is not None: + container.close() + if target_sample_rate: + break + + if not target_sample_rate: + self.logger.error("Mixdown failed - no decodable audio frames found") + raise Exception("Mixdown failed: No decodable audio frames in any track") + # Build PyAV filter graph: + # N abuffer (s32/stereo) + # -> optional adelay per input (for alignment) + # -> amix (s32) + # -> aformat(s16) + # -> sink + graph = av.filter.Graph() + inputs = [] + valid_track_urls = [url for url in track_urls if url] + input_offsets_seconds = None + if offsets_seconds is not None: + input_offsets_seconds = [ + offsets_seconds[i] for i, url in enumerate(track_urls) if url + ] + for idx, url in enumerate(valid_track_urls): + args = ( + f"time_base=1/{target_sample_rate}:" + f"sample_rate={target_sample_rate}:" + f"sample_fmt=s32:" + f"channel_layout=stereo" + ) + in_ctx = graph.add("abuffer", args=args, name=f"in{idx}") + inputs.append(in_ctx) + + if not inputs: + self.logger.error("Mixdown failed - no valid inputs for graph") + raise Exception("Mixdown failed: No valid inputs for filter graph") + + mixer = graph.add("amix", args=f"inputs={len(inputs)}:normalize=0", name="mix") + + fmt = graph.add( + "aformat", + args=( + f"sample_fmts=s32:channel_layouts=stereo:sample_rates={target_sample_rate}" + ), + name="fmt", + ) + + sink = graph.add("abuffersink", name="out") + + # Optional per-input delay before mixing + delays_ms: list[int] = [] + if input_offsets_seconds is not None: + base = min(input_offsets_seconds) if input_offsets_seconds else 0.0 + delays_ms = [ + max(0, int(round((o - base) * 1000))) for o in input_offsets_seconds + ] + else: + delays_ms = [0 for _ in inputs] + + for idx, in_ctx in enumerate(inputs): + delay_ms = delays_ms[idx] if idx < len(delays_ms) else 0 + if delay_ms > 0: + # adelay requires one value per channel; use same for stereo + adelay = graph.add( + "adelay", + args=f"delays={delay_ms}|{delay_ms}:all=1", + name=f"delay{idx}", + ) + in_ctx.link_to(adelay) + adelay.link_to(mixer, 0, idx) + else: + in_ctx.link_to(mixer, 0, idx) + mixer.link_to(fmt) + fmt.link_to(sink) + graph.configure() + + containers = [] + try: + # Open all containers with cleanup guaranteed + for i, url in enumerate(valid_track_urls): + try: + c = av.open(url) + containers.append(c) + except Exception as e: + self.logger.warning( + "Mixdown: failed to open container from URL", + input=i, + url=url, + error=str(e), + ) + + if not containers: + self.logger.error("Mixdown failed - no valid containers opened") + raise Exception("Mixdown failed: Could not open any track containers") + + decoders = [c.decode(audio=0) for c in containers] + active = [True] * len(decoders) + resamplers = [ + AudioResampler(format="s32", layout="stereo", rate=target_sample_rate) + for _ in decoders + ] + + while any(active): + for i, (dec, is_active) in enumerate(zip(decoders, active)): + if not is_active: + continue + try: + frame = next(dec) + except StopIteration: + active[i] = False + continue + + if frame.sample_rate != target_sample_rate: + continue + out_frames = resamplers[i].resample(frame) or [] + for rf in out_frames: + rf.sample_rate = target_sample_rate + rf.time_base = Fraction(1, target_sample_rate) + inputs[i].push(rf) + + while True: + try: + mixed = sink.pull() + except Exception: + break + mixed.sample_rate = target_sample_rate + mixed.time_base = Fraction(1, target_sample_rate) + await writer.push(mixed) + + for in_ctx in inputs: + in_ctx.push(None) + while True: + try: + mixed = sink.pull() + except Exception: + break + mixed.sample_rate = target_sample_rate + mixed.time_base = Fraction(1, target_sample_rate) + await writer.push(mixed) + finally: + # Cleanup all containers, even if processing failed + for c in containers: + if c is not None: + try: + c.close() + except Exception: + pass # Best effort cleanup + + @broadcast_to_sockets + async def set_status(self, transcript_id: str, status: TranscriptStatus): + async with self.lock_transaction(): + return await transcripts_controller.set_status(transcript_id, status) + + async def on_waveform(self, data): + async with self.transaction(): + waveform = TranscriptWaveform(waveform=data) + transcript = await self.get_transcript() + return await transcripts_controller.append_event( + transcript=transcript, event="WAVEFORM", data=waveform + ) + + async def process(self, bucket_name: str, track_keys: list[str]): + transcript = await self.get_transcript() + async with self.transaction(): + await transcripts_controller.update( + transcript, + { + "events": [], + "topics": [], + }, + ) + + source_storage = get_transcripts_storage() + transcript_storage = source_storage + + track_urls: list[str] = [] + for key in track_keys: + url = await source_storage.get_file_url( + key, + operation="get_object", + expires_in=PRESIGNED_URL_EXPIRATION_SECONDS, + bucket=bucket_name, + ) + track_urls.append(url) + self.logger.info( + f"Generated presigned URL for track from {bucket_name}", + key=key, + ) + + created_padded_files = set() + padded_track_urls: list[str] = [] + for idx, url in enumerate(track_urls): + padded_url = await self.pad_track_for_transcription( + url, idx, transcript_storage + ) + padded_track_urls.append(padded_url) + if padded_url != url: + storage_path = f"file_pipeline/{transcript.id}/tracks/padded_{idx}.webm" + created_padded_files.add(storage_path) + self.logger.info(f"Track {idx} processed, padded URL: {padded_url}") + + transcript.data_path.mkdir(parents=True, exist_ok=True) + + mp3_writer = AudioFileWriterProcessor( + path=str(transcript.audio_mp3_filename), + on_duration=self.on_duration, + ) + await self.mixdown_tracks(padded_track_urls, mp3_writer, offsets_seconds=None) + await mp3_writer.flush() + + if not transcript.audio_mp3_filename.exists(): + raise Exception( + "Mixdown failed - no MP3 file generated. Cannot proceed without playable audio." + ) + + storage_path = f"{transcript.id}/audio.mp3" + # Use file handle streaming to avoid loading entire MP3 into memory + mp3_size = transcript.audio_mp3_filename.stat().st_size + with open(transcript.audio_mp3_filename, "rb") as mp3_file: + await transcript_storage.put_file(storage_path, mp3_file) + mp3_url = await transcript_storage.get_file_url(storage_path) + + await transcripts_controller.update(transcript, {"audio_location": "storage"}) + + self.logger.info( + f"Uploaded mixed audio to storage", + storage_path=storage_path, + size=mp3_size, + url=mp3_url, + ) + + self.logger.info("Generating waveform from mixed audio") + waveform_processor = AudioWaveformProcessor( + audio_path=transcript.audio_mp3_filename, + waveform_path=transcript.audio_waveform_filename, + on_waveform=self.on_waveform, + ) + waveform_processor.set_pipeline(self.empty_pipeline) + await waveform_processor.flush() + self.logger.info("Waveform generated successfully") + + speaker_transcripts: list[TranscriptType] = [] + for idx, padded_url in enumerate(padded_track_urls): + if not padded_url: + continue + + t = await self.transcribe_file(padded_url, transcript.source_language) + + if not t.words: + continue + + for w in t.words: + w.speaker = idx + + speaker_transcripts.append(t) + self.logger.info( + f"Track {idx} transcribed successfully with {len(t.words)} words", + track_idx=idx, + ) + + valid_track_count = len([url for url in padded_track_urls if url]) + if valid_track_count > 0 and len(speaker_transcripts) != valid_track_count: + raise Exception( + f"Only {len(speaker_transcripts)}/{valid_track_count} tracks transcribed successfully. " + f"All tracks must succeed to avoid incomplete transcripts." + ) + + if not speaker_transcripts: + raise Exception("No valid track transcriptions") + + self.logger.info(f"Cleaning up {len(created_padded_files)} temporary S3 files") + cleanup_tasks = [] + for storage_path in created_padded_files: + cleanup_tasks.append(transcript_storage.delete_file(storage_path)) + + if cleanup_tasks: + cleanup_results = await asyncio.gather( + *cleanup_tasks, return_exceptions=True + ) + for storage_path, result in zip(created_padded_files, cleanup_results): + if isinstance(result, Exception): + self.logger.warning( + "Failed to cleanup temporary padded track", + storage_path=storage_path, + error=str(result), + ) + + merged_words = [] + for t in speaker_transcripts: + merged_words.extend(t.words) + merged_words.sort( + key=lambda w: w.start if hasattr(w, "start") and w.start is not None else 0 + ) + + merged_transcript = TranscriptType(words=merged_words, translation=None) + + await self.on_transcript(merged_transcript) + + topics = await self.detect_topics(merged_transcript, transcript.target_language) + await asyncio.gather( + self.generate_title(topics), + self.generate_summaries(topics), + return_exceptions=False, + ) + + await self.set_status(transcript.id, "ended") + + async def transcribe_file(self, audio_url: str, language: str) -> TranscriptType: + return await transcribe_file_with_processor(audio_url, language) + + async def detect_topics( + self, transcript: TranscriptType, target_language: str + ) -> list[TitleSummary]: + return await topic_processing.detect_topics( + transcript, + target_language, + on_topic_callback=self.on_topic, + empty_pipeline=self.empty_pipeline, + ) + + async def generate_title(self, topics: list[TitleSummary]): + return await topic_processing.generate_title( + topics, + on_title_callback=self.on_title, + empty_pipeline=self.empty_pipeline, + logger=self.logger, + ) + + async def generate_summaries(self, topics: list[TitleSummary]): + transcript = await self.get_transcript() + return await topic_processing.generate_summaries( + topics, + transcript, + on_long_summary_callback=self.on_long_summary, + on_short_summary_callback=self.on_short_summary, + empty_pipeline=self.empty_pipeline, + logger=self.logger, + ) + + +@shared_task +@asynctask +async def task_pipeline_multitrack_process( + *, transcript_id: str, bucket_name: str, track_keys: list[str] +): + pipeline = PipelineMainMultitrack(transcript_id=transcript_id) + try: + await pipeline.set_status(transcript_id, "processing") + await pipeline.process(bucket_name, track_keys) + except Exception: + await pipeline.set_status(transcript_id, "error") + raise + + post_chain = chain( + task_cleanup_consent.si(transcript_id=transcript_id), + task_pipeline_post_to_zulip.si(transcript_id=transcript_id), + task_send_webhook_if_needed.si(transcript_id=transcript_id), + ) + post_chain.delay() diff --git a/server/reflector/pipelines/topic_processing.py b/server/reflector/pipelines/topic_processing.py new file mode 100644 index 00000000..7f055025 --- /dev/null +++ b/server/reflector/pipelines/topic_processing.py @@ -0,0 +1,109 @@ +""" +Topic processing utilities +========================== + +Shared topic detection, title generation, and summarization logic +used across file and multitrack pipelines. +""" + +from typing import Callable + +import structlog + +from reflector.db.transcripts import Transcript +from reflector.processors import ( + TranscriptFinalSummaryProcessor, + TranscriptFinalTitleProcessor, + TranscriptTopicDetectorProcessor, +) +from reflector.processors.types import TitleSummary +from reflector.processors.types import Transcript as TranscriptType + + +class EmptyPipeline: + def __init__(self, logger: structlog.BoundLogger): + self.logger = logger + + def get_pref(self, k, d=None): + return d + + async def emit(self, event): + pass + + +async def detect_topics( + transcript: TranscriptType, + target_language: str, + *, + on_topic_callback: Callable, + empty_pipeline: EmptyPipeline, +) -> list[TitleSummary]: + chunk_size = 300 + topics: list[TitleSummary] = [] + + async def on_topic(topic: TitleSummary): + topics.append(topic) + return await on_topic_callback(topic) + + topic_detector = TranscriptTopicDetectorProcessor(callback=on_topic) + topic_detector.set_pipeline(empty_pipeline) + + for i in range(0, len(transcript.words), chunk_size): + chunk_words = transcript.words[i : i + chunk_size] + if not chunk_words: + continue + + chunk_transcript = TranscriptType( + words=chunk_words, translation=transcript.translation + ) + + await topic_detector.push(chunk_transcript) + + await topic_detector.flush() + return topics + + +async def generate_title( + topics: list[TitleSummary], + *, + on_title_callback: Callable, + empty_pipeline: EmptyPipeline, + logger: structlog.BoundLogger, +): + if not topics: + logger.warning("No topics for title generation") + return + + processor = TranscriptFinalTitleProcessor(callback=on_title_callback) + processor.set_pipeline(empty_pipeline) + + for topic in topics: + await processor.push(topic) + + await processor.flush() + + +async def generate_summaries( + topics: list[TitleSummary], + transcript: Transcript, + *, + on_long_summary_callback: Callable, + on_short_summary_callback: Callable, + empty_pipeline: EmptyPipeline, + logger: structlog.BoundLogger, +): + if not topics: + logger.warning("No topics for summary generation") + return + + processor = TranscriptFinalSummaryProcessor( + transcript=transcript, + callback=on_long_summary_callback, + on_short_summary=on_short_summary_callback, + ) + processor.set_pipeline(empty_pipeline) + + for topic in topics: + await processor.push(topic) + + await processor.flush() diff --git a/server/reflector/pipelines/transcription_helpers.py b/server/reflector/pipelines/transcription_helpers.py new file mode 100644 index 00000000..b0cc5858 --- /dev/null +++ b/server/reflector/pipelines/transcription_helpers.py @@ -0,0 +1,34 @@ +from reflector.processors.file_transcript import FileTranscriptInput +from reflector.processors.file_transcript_auto import FileTranscriptAutoProcessor +from reflector.processors.types import Transcript as TranscriptType + + +async def transcribe_file_with_processor( + audio_url: str, + language: str, + processor_name: str | None = None, +) -> TranscriptType: + processor = ( + FileTranscriptAutoProcessor(name=processor_name) + if processor_name + else FileTranscriptAutoProcessor() + ) + input_data = FileTranscriptInput(audio_url=audio_url, language=language) + + result: TranscriptType | None = None + + async def capture_result(transcript): + nonlocal result + result = transcript + + processor.on(capture_result) + await processor.push(input_data) + await processor.flush() + + if not result: + processor_label = processor_name or "default" + raise ValueError( + f"No transcript captured from {processor_label} processor for audio: {audio_url}" + ) + + return result diff --git a/server/reflector/processors/summary/summary_builder.py b/server/reflector/processors/summary/summary_builder.py index efcf9227..df348093 100644 --- a/server/reflector/processors/summary/summary_builder.py +++ b/server/reflector/processors/summary/summary_builder.py @@ -165,6 +165,7 @@ class SummaryBuilder: self.llm: LLM = llm self.model_name: str = llm.model_name self.logger = logger or structlog.get_logger() + self.participant_instructions: str | None = None if filename: self.read_transcript_from_file(filename) @@ -191,14 +192,61 @@ class SummaryBuilder: self, prompt: str, output_cls: Type[T], tone_name: str | None = None ) -> T: """Generic function to get structured output from LLM for non-function-calling models.""" + # Add participant instructions to the prompt if available + enhanced_prompt = self._enhance_prompt_with_participants(prompt) return await self.llm.get_structured_response( - prompt, [self.transcript], output_cls, tone_name=tone_name + enhanced_prompt, [self.transcript], output_cls, tone_name=tone_name ) + async def _get_response( + self, prompt: str, texts: list[str], tone_name: str | None = None + ) -> str: + """Get text response with automatic participant instructions injection.""" + enhanced_prompt = self._enhance_prompt_with_participants(prompt) + return await self.llm.get_response(enhanced_prompt, texts, tone_name=tone_name) + + def _enhance_prompt_with_participants(self, prompt: str) -> str: + """Add participant instructions to any prompt if participants are known.""" + if self.participant_instructions: + self.logger.debug("Adding participant instructions to prompt") + return f"{prompt}\n\n{self.participant_instructions}" + return prompt + # ---------------------------------------------------------------------------- # Participants # ---------------------------------------------------------------------------- + def set_known_participants(self, participants: list[str]) -> None: + """ + Set known participants directly without LLM identification. + This is used when participants are already identified and stored. + They are appended at the end of the transcript, providing more context for the assistant. + """ + if not participants: + self.logger.warning("No participants provided") + return + + self.logger.info( + "Using known participants", + participants=participants, + ) + + participants_md = self.format_list_md(participants) + self.transcript += f"\n\n# Participants\n\n{participants_md}" + + # Set instructions that will be automatically added to all prompts + participants_list = ", ".join(participants) + self.participant_instructions = dedent( + f""" + # IMPORTANT: Participant Names + The following participants are identified in this conversation: {participants_list} + + You MUST use these specific participant names when referring to people in your response. + Do NOT use generic terms like "a participant", "someone", "attendee", "Speaker 1", "Speaker 2", etc. + Always refer to people by their actual names (e.g., "John suggested..." not "A participant suggested..."). + """ + ).strip() + async def identify_participants(self) -> None: """ From a transcript, try to identify the participants using TreeSummarize with structured output. @@ -232,6 +280,19 @@ class SummaryBuilder: if unique_participants: participants_md = self.format_list_md(unique_participants) self.transcript += f"\n\n# Participants\n\n{participants_md}" + + # Set instructions that will be automatically added to all prompts + participants_list = ", ".join(unique_participants) + self.participant_instructions = dedent( + f""" + # IMPORTANT: Participant Names + The following participants are identified in this conversation: {participants_list} + + You MUST use these specific participant names when referring to people in your response. + Do NOT use generic terms like "a participant", "someone", "attendee", "Speaker 1", "Speaker 2", etc. + Always refer to people by their actual names (e.g., "John suggested..." not "A participant suggested..."). + """ + ).strip() else: self.logger.warning("No participants identified in the transcript") @@ -318,13 +379,13 @@ class SummaryBuilder: for subject in self.subjects: detailed_prompt = DETAILED_SUBJECT_PROMPT_TEMPLATE.format(subject=subject) - detailed_response = await self.llm.get_response( + detailed_response = await self._get_response( detailed_prompt, [self.transcript], tone_name="Topic assistant" ) paragraph_prompt = PARAGRAPH_SUMMARY_PROMPT - paragraph_response = await self.llm.get_response( + paragraph_response = await self._get_response( paragraph_prompt, [str(detailed_response)], tone_name="Topic summarizer" ) @@ -345,7 +406,7 @@ class SummaryBuilder: recap_prompt = RECAP_PROMPT - recap_response = await self.llm.get_response( + recap_response = await self._get_response( recap_prompt, [summaries_text], tone_name="Recap summarizer" ) diff --git a/server/reflector/processors/transcript_final_summary.py b/server/reflector/processors/transcript_final_summary.py index 0b4a594c..dfe07aad 100644 --- a/server/reflector/processors/transcript_final_summary.py +++ b/server/reflector/processors/transcript_final_summary.py @@ -26,7 +26,25 @@ class TranscriptFinalSummaryProcessor(Processor): async def get_summary_builder(self, text) -> SummaryBuilder: builder = SummaryBuilder(self.llm, logger=self.logger) builder.set_transcript(text) - await builder.identify_participants() + + # Use known participants if available, otherwise identify them + if self.transcript and self.transcript.participants: + # Extract participant names from the stored participants + participant_names = [p.name for p in self.transcript.participants if p.name] + if participant_names: + self.logger.info( + f"Using {len(participant_names)} known participants from transcript" + ) + builder.set_known_participants(participant_names) + else: + self.logger.info( + "Participants field exists but is empty, identifying participants" + ) + await builder.identify_participants() + else: + self.logger.info("No participants stored, identifying participants") + await builder.identify_participants() + await builder.generate_summary() return builder @@ -49,18 +67,30 @@ class TranscriptFinalSummaryProcessor(Processor): speakermap = {} if self.transcript: speakermap = { - participant["speaker"]: participant["name"] - for participant in self.transcript.participants + p.speaker: p.name + for p in (self.transcript.participants or []) + if p.speaker is not None and p.name } + self.logger.info( + f"Built speaker map with {len(speakermap)} participants", + speakermap=speakermap, + ) # build the transcript as a single string - # XXX: unsure if the participants name as replaced directly in speaker ? + # Replace speaker IDs with actual participant names if available text_transcript = [] + unique_speakers = set() for topic in self.chunks: for segment in topic.transcript.as_segments(): name = speakermap.get(segment.speaker, f"Speaker {segment.speaker}") + unique_speakers.add((segment.speaker, name)) text_transcript.append(f"{name}: {segment.text}") + self.logger.info( + f"Built transcript with {len(unique_speakers)} unique speakers", + speakers=list(unique_speakers), + ) + text_transcript = "\n".join(text_transcript) last_chunk = self.chunks[-1] diff --git a/server/reflector/processors/transcript_topic_detector.py b/server/reflector/processors/transcript_topic_detector.py index 317e2d9c..695d3af3 100644 --- a/server/reflector/processors/transcript_topic_detector.py +++ b/server/reflector/processors/transcript_topic_detector.py @@ -1,6 +1,6 @@ from textwrap import dedent -from pydantic import BaseModel, Field +from pydantic import AliasChoices, BaseModel, Field from reflector.llm import LLM from reflector.processors.base import Processor @@ -36,15 +36,13 @@ class TopicResponse(BaseModel): title: str = Field( description="A descriptive title for the topic being discussed", - validation_alias="Title", + validation_alias=AliasChoices("title", "Title"), ) summary: str = Field( description="A concise 1-2 sentence summary of the discussion", - validation_alias="Summary", + validation_alias=AliasChoices("summary", "Summary"), ) - model_config = {"populate_by_name": True} - class TranscriptTopicDetectorProcessor(Processor): """ diff --git a/server/reflector/schemas/platform.py b/server/reflector/schemas/platform.py new file mode 100644 index 00000000..7b945841 --- /dev/null +++ b/server/reflector/schemas/platform.py @@ -0,0 +1,5 @@ +from typing import Literal + +Platform = Literal["whereby", "daily"] +WHEREBY_PLATFORM: Platform = "whereby" +DAILY_PLATFORM: Platform = "daily" diff --git a/server/reflector/settings.py b/server/reflector/settings.py index 9659f648..0e3fb3f7 100644 --- a/server/reflector/settings.py +++ b/server/reflector/settings.py @@ -1,6 +1,7 @@ from pydantic.types import PositiveInt from pydantic_settings import BaseSettings, SettingsConfigDict +from reflector.schemas.platform import WHEREBY_PLATFORM, Platform from reflector.utils.string import NonEmptyString @@ -47,14 +48,17 @@ class Settings(BaseSettings): TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID: str | None = None TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None - # Recording storage - RECORDING_STORAGE_BACKEND: str | None = None + # Platform-specific recording storage (follows {PREFIX}_STORAGE_AWS_{CREDENTIAL} pattern) + # Whereby storage configuration + WHEREBY_STORAGE_AWS_BUCKET_NAME: str | None = None + WHEREBY_STORAGE_AWS_REGION: str | None = None + WHEREBY_STORAGE_AWS_ACCESS_KEY_ID: str | None = None + WHEREBY_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None - # Recording storage configuration for AWS - RECORDING_STORAGE_AWS_BUCKET_NAME: str = "recording-bucket" - RECORDING_STORAGE_AWS_REGION: str = "us-east-1" - RECORDING_STORAGE_AWS_ACCESS_KEY_ID: str | None = None - RECORDING_STORAGE_AWS_SECRET_ACCESS_KEY: str | None = None + # Daily.co storage configuration + DAILYCO_STORAGE_AWS_BUCKET_NAME: str | None = None + DAILYCO_STORAGE_AWS_REGION: str | None = None + DAILYCO_STORAGE_AWS_ROLE_ARN: str | None = None # Translate into the target language TRANSLATION_BACKEND: str = "passthrough" @@ -124,11 +128,20 @@ class Settings(BaseSettings): WHEREBY_API_URL: str = "https://api.whereby.dev/v1" WHEREBY_API_KEY: NonEmptyString | None = None WHEREBY_WEBHOOK_SECRET: str | None = None - AWS_WHEREBY_ACCESS_KEY_ID: str | None = None - AWS_WHEREBY_ACCESS_KEY_SECRET: str | None = None AWS_PROCESS_RECORDING_QUEUE_URL: str | None = None SQS_POLLING_TIMEOUT_SECONDS: int = 60 + # Daily.co integration + DAILY_API_KEY: str | None = None + DAILY_WEBHOOK_SECRET: str | None = None + DAILY_SUBDOMAIN: str | None = None + DAILY_WEBHOOK_UUID: str | None = ( + None # Webhook UUID for this environment. Not used by production code + ) + + # Platform Configuration + DEFAULT_VIDEO_PLATFORM: Platform = WHEREBY_PLATFORM + # Zulip integration ZULIP_REALM: str | None = None ZULIP_API_KEY: str | None = None diff --git a/server/reflector/storage/__init__.py b/server/reflector/storage/__init__.py index 3db8a77b..aff6c767 100644 --- a/server/reflector/storage/__init__.py +++ b/server/reflector/storage/__init__.py @@ -3,6 +3,13 @@ from reflector.settings import settings def get_transcripts_storage() -> Storage: + """ + Get storage for processed transcript files (master credentials). + + Also use this for ALL our file operations with bucket override: + master = get_transcripts_storage() + master.delete_file(key, bucket=recording.bucket_name) + """ assert settings.TRANSCRIPT_STORAGE_BACKEND return Storage.get_instance( name=settings.TRANSCRIPT_STORAGE_BACKEND, @@ -10,8 +17,53 @@ def get_transcripts_storage() -> Storage: ) -def get_recordings_storage() -> Storage: +def get_whereby_storage() -> Storage: + """ + Get storage config for Whereby (for passing to Whereby API). + + Usage: + whereby_storage = get_whereby_storage() + key_id, secret = whereby_storage.key_credentials + whereby_api.create_meeting( + bucket=whereby_storage.bucket_name, + access_key_id=key_id, + secret=secret, + ) + + Do NOT use for our file operations - use get_transcripts_storage() instead. + """ + if not settings.WHEREBY_STORAGE_AWS_BUCKET_NAME: + raise ValueError( + "WHEREBY_STORAGE_AWS_BUCKET_NAME required for Whereby with AWS storage" + ) + return Storage.get_instance( - name=settings.RECORDING_STORAGE_BACKEND, - settings_prefix="RECORDING_STORAGE_", + name="aws", + settings_prefix="WHEREBY_STORAGE_", + ) + + +def get_dailyco_storage() -> Storage: + """ + Get storage config for Daily.co (for passing to Daily API). + + Usage: + daily_storage = get_dailyco_storage() + daily_api.create_meeting( + bucket=daily_storage.bucket_name, + region=daily_storage.region, + role_arn=daily_storage.role_credential, + ) + + Do NOT use for our file operations - use get_transcripts_storage() instead. + """ + # Fail fast if platform-specific config missing + if not settings.DAILYCO_STORAGE_AWS_BUCKET_NAME: + raise ValueError( + "DAILYCO_STORAGE_AWS_BUCKET_NAME required for Daily.co with AWS storage" + ) + + return Storage.get_instance( + name="aws", + settings_prefix="DAILYCO_STORAGE_", ) diff --git a/server/reflector/storage/base.py b/server/reflector/storage/base.py index 360930d8..ba4316d8 100644 --- a/server/reflector/storage/base.py +++ b/server/reflector/storage/base.py @@ -1,10 +1,23 @@ import importlib +from typing import BinaryIO, Union from pydantic import BaseModel from reflector.settings import settings +class StorageError(Exception): + """Base exception for storage operations.""" + + pass + + +class StoragePermissionError(StorageError): + """Exception raised when storage operation fails due to permission issues.""" + + pass + + class FileResult(BaseModel): filename: str url: str @@ -36,26 +49,113 @@ class Storage: return cls._registry[name](**config) - async def put_file(self, filename: str, data: bytes) -> FileResult: - return await self._put_file(filename, data) - - async def _put_file(self, filename: str, data: bytes) -> FileResult: + # Credential properties for API passthrough + @property + def bucket_name(self) -> str: + """Default bucket name for this storage instance.""" raise NotImplementedError - async def delete_file(self, filename: str): - return await self._delete_file(filename) - - async def _delete_file(self, filename: str): + @property + def region(self) -> str: + """AWS region for this storage instance.""" raise NotImplementedError - async def get_file_url(self, filename: str) -> str: - return await self._get_file_url(filename) + @property + def access_key_id(self) -> str | None: + """AWS access key ID (None for role-based auth). Prefer key_credentials property.""" + return None - async def _get_file_url(self, filename: str) -> str: + @property + def secret_access_key(self) -> str | None: + """AWS secret access key (None for role-based auth). Prefer key_credentials property.""" + return None + + @property + def role_arn(self) -> str | None: + """AWS IAM role ARN for role-based auth (None for key-based auth). Prefer role_credential property.""" + return None + + @property + def key_credentials(self) -> tuple[str, str]: + """ + Get (access_key_id, secret_access_key) for key-based auth. + Raises ValueError if storage uses IAM role instead. + """ raise NotImplementedError - async def get_file(self, filename: str): - return await self._get_file(filename) - - async def _get_file(self, filename: str): + @property + def role_credential(self) -> str: + """ + Get IAM role ARN for role-based auth. + Raises ValueError if storage uses access keys instead. + """ + raise NotImplementedError + + async def put_file( + self, filename: str, data: Union[bytes, BinaryIO], *, bucket: str | None = None + ) -> FileResult: + """Upload data. bucket: override instance default if provided.""" + return await self._put_file(filename, data, bucket=bucket) + + async def _put_file( + self, filename: str, data: Union[bytes, BinaryIO], *, bucket: str | None = None + ) -> FileResult: + raise NotImplementedError + + async def delete_file(self, filename: str, *, bucket: str | None = None): + """Delete file. bucket: override instance default if provided.""" + return await self._delete_file(filename, bucket=bucket) + + async def _delete_file(self, filename: str, *, bucket: str | None = None): + raise NotImplementedError + + async def get_file_url( + self, + filename: str, + operation: str = "get_object", + expires_in: int = 3600, + *, + bucket: str | None = None, + ) -> str: + """Generate presigned URL. bucket: override instance default if provided.""" + return await self._get_file_url(filename, operation, expires_in, bucket=bucket) + + async def _get_file_url( + self, + filename: str, + operation: str = "get_object", + expires_in: int = 3600, + *, + bucket: str | None = None, + ) -> str: + raise NotImplementedError + + async def get_file(self, filename: str, *, bucket: str | None = None): + """Download file. bucket: override instance default if provided.""" + return await self._get_file(filename, bucket=bucket) + + async def _get_file(self, filename: str, *, bucket: str | None = None): + raise NotImplementedError + + async def list_objects( + self, prefix: str = "", *, bucket: str | None = None + ) -> list[str]: + """List object keys. bucket: override instance default if provided.""" + return await self._list_objects(prefix, bucket=bucket) + + async def _list_objects( + self, prefix: str = "", *, bucket: str | None = None + ) -> list[str]: + raise NotImplementedError + + async def stream_to_fileobj( + self, filename: str, fileobj: BinaryIO, *, bucket: str | None = None + ): + """Stream file directly to file object without loading into memory. + bucket: override instance default if provided.""" + return await self._stream_to_fileobj(filename, fileobj, bucket=bucket) + + async def _stream_to_fileobj( + self, filename: str, fileobj: BinaryIO, *, bucket: str | None = None + ): raise NotImplementedError diff --git a/server/reflector/storage/storage_aws.py b/server/reflector/storage/storage_aws.py index de9ccf35..372af4aa 100644 --- a/server/reflector/storage/storage_aws.py +++ b/server/reflector/storage/storage_aws.py @@ -1,79 +1,236 @@ +from functools import wraps +from typing import BinaryIO, Union + import aioboto3 +from botocore.config import Config +from botocore.exceptions import ClientError from reflector.logger import logger -from reflector.storage.base import FileResult, Storage +from reflector.storage.base import FileResult, Storage, StoragePermissionError + + +def handle_s3_client_errors(operation_name: str): + """Decorator to handle S3 ClientError with bucket-aware messaging. + + Args: + operation_name: Human-readable operation name for error messages (e.g., "upload", "delete") + """ + + def decorator(func): + @wraps(func) + async def wrapper(self, *args, **kwargs): + bucket = kwargs.get("bucket") + try: + return await func(self, *args, **kwargs) + except ClientError as e: + error_code = e.response.get("Error", {}).get("Code") + if error_code in ("AccessDenied", "NoSuchBucket"): + actual_bucket = bucket or self._bucket_name + bucket_context = ( + f"overridden bucket '{actual_bucket}'" + if bucket + else f"default bucket '{actual_bucket}'" + ) + raise StoragePermissionError( + f"S3 {operation_name} failed for {bucket_context}: {error_code}. " + f"Check TRANSCRIPT_STORAGE_AWS_* credentials have permission." + ) from e + raise + + return wrapper + + return decorator class AwsStorage(Storage): + """AWS S3 storage with bucket override for multi-platform recording architecture. + Master credentials access all buckets via optional bucket parameter in operations.""" + def __init__( self, - aws_access_key_id: str, - aws_secret_access_key: str, aws_bucket_name: str, aws_region: str, + aws_access_key_id: str | None = None, + aws_secret_access_key: str | None = None, + aws_role_arn: str | None = None, ): - if not aws_access_key_id: - raise ValueError("Storage `aws_storage` require `aws_access_key_id`") - if not aws_secret_access_key: - raise ValueError("Storage `aws_storage` require `aws_secret_access_key`") if not aws_bucket_name: raise ValueError("Storage `aws_storage` require `aws_bucket_name`") if not aws_region: raise ValueError("Storage `aws_storage` require `aws_region`") + if not aws_access_key_id and not aws_role_arn: + raise ValueError( + "Storage `aws_storage` require either `aws_access_key_id` or `aws_role_arn`" + ) + if aws_role_arn and (aws_access_key_id or aws_secret_access_key): + raise ValueError( + "Storage `aws_storage` cannot use both `aws_role_arn` and access keys" + ) super().__init__() - self.aws_bucket_name = aws_bucket_name + self._bucket_name = aws_bucket_name + self._region = aws_region + self._access_key_id = aws_access_key_id + self._secret_access_key = aws_secret_access_key + self._role_arn = aws_role_arn + self.aws_folder = "" if "/" in aws_bucket_name: - self.aws_bucket_name, self.aws_folder = aws_bucket_name.split("/", 1) + self._bucket_name, self.aws_folder = aws_bucket_name.split("/", 1) + self.boto_config = Config(retries={"max_attempts": 3, "mode": "adaptive"}) self.session = aioboto3.Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region, ) - self.base_url = f"https://{aws_bucket_name}.s3.amazonaws.com/" + self.base_url = f"https://{self._bucket_name}.s3.amazonaws.com/" - async def _put_file(self, filename: str, data: bytes) -> FileResult: - bucket = self.aws_bucket_name - folder = self.aws_folder - logger.info(f"Uploading {filename} to S3 {bucket}/{folder}") - s3filename = f"{folder}/{filename}" if folder else filename - async with self.session.client("s3") as client: - await client.put_object( - Bucket=bucket, - Key=s3filename, - Body=data, + # Implement credential properties + @property + def bucket_name(self) -> str: + return self._bucket_name + + @property + def region(self) -> str: + return self._region + + @property + def access_key_id(self) -> str | None: + return self._access_key_id + + @property + def secret_access_key(self) -> str | None: + return self._secret_access_key + + @property + def role_arn(self) -> str | None: + return self._role_arn + + @property + def key_credentials(self) -> tuple[str, str]: + """Get (access_key_id, secret_access_key) for key-based auth.""" + if self._role_arn: + raise ValueError( + "Storage uses IAM role authentication. " + "Use role_credential property instead of key_credentials." ) + if not self._access_key_id or not self._secret_access_key: + raise ValueError("Storage access key credentials not configured") + return (self._access_key_id, self._secret_access_key) - async def _get_file_url(self, filename: str) -> FileResult: - bucket = self.aws_bucket_name + @property + def role_credential(self) -> str: + """Get IAM role ARN for role-based auth.""" + if self._access_key_id or self._secret_access_key: + raise ValueError( + "Storage uses access key authentication. " + "Use key_credentials property instead of role_credential." + ) + if not self._role_arn: + raise ValueError("Storage IAM role ARN not configured") + return self._role_arn + + @handle_s3_client_errors("upload") + async def _put_file( + self, filename: str, data: Union[bytes, BinaryIO], *, bucket: str | None = None + ) -> FileResult: + actual_bucket = bucket or self._bucket_name folder = self.aws_folder s3filename = f"{folder}/{filename}" if folder else filename - async with self.session.client("s3") as client: + logger.info(f"Uploading {filename} to S3 {actual_bucket}/{folder}") + + async with self.session.client("s3", config=self.boto_config) as client: + if isinstance(data, bytes): + await client.put_object(Bucket=actual_bucket, Key=s3filename, Body=data) + else: + # boto3 reads file-like object in chunks + # avoids creating extra memory copy vs bytes.getvalue() approach + await client.upload_fileobj(data, Bucket=actual_bucket, Key=s3filename) + + url = await self._get_file_url(filename, bucket=bucket) + return FileResult(filename=filename, url=url) + + @handle_s3_client_errors("presign") + async def _get_file_url( + self, + filename: str, + operation: str = "get_object", + expires_in: int = 3600, + *, + bucket: str | None = None, + ) -> str: + actual_bucket = bucket or self._bucket_name + folder = self.aws_folder + s3filename = f"{folder}/{filename}" if folder else filename + async with self.session.client("s3", config=self.boto_config) as client: presigned_url = await client.generate_presigned_url( - "get_object", - Params={"Bucket": bucket, "Key": s3filename}, - ExpiresIn=3600, + operation, + Params={"Bucket": actual_bucket, "Key": s3filename}, + ExpiresIn=expires_in, ) return presigned_url - async def _delete_file(self, filename: str): - bucket = self.aws_bucket_name + @handle_s3_client_errors("delete") + async def _delete_file(self, filename: str, *, bucket: str | None = None): + actual_bucket = bucket or self._bucket_name folder = self.aws_folder - logger.info(f"Deleting {filename} from S3 {bucket}/{folder}") + logger.info(f"Deleting {filename} from S3 {actual_bucket}/{folder}") s3filename = f"{folder}/{filename}" if folder else filename - async with self.session.client("s3") as client: - await client.delete_object(Bucket=bucket, Key=s3filename) + async with self.session.client("s3", config=self.boto_config) as client: + await client.delete_object(Bucket=actual_bucket, Key=s3filename) - async def _get_file(self, filename: str): - bucket = self.aws_bucket_name + @handle_s3_client_errors("download") + async def _get_file(self, filename: str, *, bucket: str | None = None): + actual_bucket = bucket or self._bucket_name folder = self.aws_folder - logger.info(f"Downloading {filename} from S3 {bucket}/{folder}") + logger.info(f"Downloading {filename} from S3 {actual_bucket}/{folder}") s3filename = f"{folder}/{filename}" if folder else filename - async with self.session.client("s3") as client: - response = await client.get_object(Bucket=bucket, Key=s3filename) + async with self.session.client("s3", config=self.boto_config) as client: + response = await client.get_object(Bucket=actual_bucket, Key=s3filename) return await response["Body"].read() + @handle_s3_client_errors("list_objects") + async def _list_objects( + self, prefix: str = "", *, bucket: str | None = None + ) -> list[str]: + actual_bucket = bucket or self._bucket_name + folder = self.aws_folder + # Combine folder and prefix + s3prefix = f"{folder}/{prefix}" if folder else prefix + logger.info(f"Listing objects from S3 {actual_bucket} with prefix '{s3prefix}'") + + keys = [] + async with self.session.client("s3", config=self.boto_config) as client: + paginator = client.get_paginator("list_objects_v2") + async for page in paginator.paginate(Bucket=actual_bucket, Prefix=s3prefix): + if "Contents" in page: + for obj in page["Contents"]: + # Strip folder prefix from keys if present + key = obj["Key"] + if folder: + if key.startswith(f"{folder}/"): + key = key[len(folder) + 1 :] + elif key == folder: + # Skip folder marker itself + continue + keys.append(key) + + return keys + + @handle_s3_client_errors("stream") + async def _stream_to_fileobj( + self, filename: str, fileobj: BinaryIO, *, bucket: str | None = None + ): + """Stream file from S3 directly to file object without loading into memory.""" + actual_bucket = bucket or self._bucket_name + folder = self.aws_folder + logger.info(f"Streaming {filename} from S3 {actual_bucket}/{folder}") + s3filename = f"{folder}/{filename}" if folder else filename + async with self.session.client("s3", config=self.boto_config) as client: + await client.download_fileobj( + Bucket=actual_bucket, Key=s3filename, Fileobj=fileobj + ) + Storage.register("aws", AwsStorage) diff --git a/server/reflector/utils/daily.py b/server/reflector/utils/daily.py new file mode 100644 index 00000000..1c3b367c --- /dev/null +++ b/server/reflector/utils/daily.py @@ -0,0 +1,26 @@ +from reflector.utils.string import NonEmptyString + +DailyRoomName = str + + +def extract_base_room_name(daily_room_name: DailyRoomName) -> NonEmptyString: + """ + Extract base room name from Daily.co timestamped room name. + + Daily.co creates rooms with timestamp suffix: {base_name}-YYYYMMDDHHMMSS + This function removes the timestamp to get the original room name. + + Examples: + "daily-20251020193458" → "daily" + "daily-2-20251020193458" → "daily-2" + "my-room-name-20251020193458" → "my-room-name" + + Args: + daily_room_name: Full Daily.co room name with optional timestamp + + Returns: + Base room name without timestamp suffix + """ + base_name = daily_room_name.rsplit("-", 1)[0] + assert base_name, f"Extracted base name is empty from: {daily_room_name}" + return base_name diff --git a/server/reflector/utils/datetime.py b/server/reflector/utils/datetime.py new file mode 100644 index 00000000..d416412f --- /dev/null +++ b/server/reflector/utils/datetime.py @@ -0,0 +1,9 @@ +from datetime import datetime, timezone + + +def parse_datetime_with_timezone(iso_string: str) -> datetime: + """Parse ISO datetime string and ensure timezone awareness (defaults to UTC if naive).""" + dt = datetime.fromisoformat(iso_string) + if dt.tzinfo is None: + dt = dt.replace(tzinfo=timezone.utc) + return dt diff --git a/server/reflector/utils/string.py b/server/reflector/utils/string.py index 05f40e30..ae4277c5 100644 --- a/server/reflector/utils/string.py +++ b/server/reflector/utils/string.py @@ -1,4 +1,4 @@ -from typing import Annotated +from typing import Annotated, TypeVar from pydantic import Field, TypeAdapter, constr @@ -21,3 +21,12 @@ def try_parse_non_empty_string(s: str) -> NonEmptyString | None: if not s: return None return parse_non_empty_string(s) + + +T = TypeVar("T", bound=str) + + +def assert_equal[T](s1: T, s2: T) -> T: + if s1 != s2: + raise ValueError(f"assert_equal: {s1} != {s2}") + return s1 diff --git a/server/reflector/utils/url.py b/server/reflector/utils/url.py new file mode 100644 index 00000000..e49a4cb0 --- /dev/null +++ b/server/reflector/utils/url.py @@ -0,0 +1,37 @@ +"""URL manipulation utilities.""" + +from urllib.parse import parse_qs, urlencode, urlparse, urlunparse + + +def add_query_param(url: str, key: str, value: str) -> str: + """ + Add or update a query parameter in a URL. + + Properly handles URLs with or without existing query parameters, + preserving fragments and encoding special characters. + + Args: + url: The URL to modify + key: The query parameter name + value: The query parameter value + + Returns: + The URL with the query parameter added or updated + + Examples: + >>> add_query_param("https://example.com/room", "t", "token123") + 'https://example.com/room?t=token123' + + >>> add_query_param("https://example.com/room?existing=param", "t", "token123") + 'https://example.com/room?existing=param&t=token123' + """ + parsed = urlparse(url) + + query_params = parse_qs(parsed.query, keep_blank_values=True) + + query_params[key] = [value] + + new_query = urlencode(query_params, doseq=True) + + new_parsed = parsed._replace(query=new_query) + return urlunparse(new_parsed) diff --git a/server/reflector/video_platforms/__init__.py b/server/reflector/video_platforms/__init__.py new file mode 100644 index 00000000..dcbdc45b --- /dev/null +++ b/server/reflector/video_platforms/__init__.py @@ -0,0 +1,11 @@ +from .base import VideoPlatformClient +from .models import MeetingData, VideoPlatformConfig +from .registry import get_platform_client, register_platform + +__all__ = [ + "VideoPlatformClient", + "VideoPlatformConfig", + "MeetingData", + "get_platform_client", + "register_platform", +] diff --git a/server/reflector/video_platforms/base.py b/server/reflector/video_platforms/base.py new file mode 100644 index 00000000..d208a75a --- /dev/null +++ b/server/reflector/video_platforms/base.py @@ -0,0 +1,54 @@ +from abc import ABC, abstractmethod +from datetime import datetime +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from ..schemas.platform import Platform +from ..utils.string import NonEmptyString +from .models import MeetingData, VideoPlatformConfig + +if TYPE_CHECKING: + from reflector.db.rooms import Room + +# separator doesn't guarantee there's no more "ROOM_PREFIX_SEPARATOR" strings in room name +ROOM_PREFIX_SEPARATOR = "-" + + +class VideoPlatformClient(ABC): + PLATFORM_NAME: Platform + + def __init__(self, config: VideoPlatformConfig): + self.config = config + + @abstractmethod + async def create_meeting( + self, room_name_prefix: NonEmptyString, end_date: datetime, room: "Room" + ) -> MeetingData: + pass + + @abstractmethod + async def get_room_sessions(self, room_name: str) -> List[Any] | None: + pass + + @abstractmethod + async def delete_room(self, room_name: str) -> bool: + pass + + @abstractmethod + async def upload_logo(self, room_name: str, logo_path: str) -> bool: + pass + + @abstractmethod + def verify_webhook_signature( + self, body: bytes, signature: str, timestamp: Optional[str] = None + ) -> bool: + pass + + def format_recording_config(self, room: "Room") -> Dict[str, Any]: + if room.recording_type == "cloud" and self.config.s3_bucket: + return { + "type": room.recording_type, + "bucket": self.config.s3_bucket, + "region": self.config.s3_region, + "trigger": room.recording_trigger, + } + return {"type": room.recording_type} diff --git a/server/reflector/video_platforms/daily.py b/server/reflector/video_platforms/daily.py new file mode 100644 index 00000000..ec45d965 --- /dev/null +++ b/server/reflector/video_platforms/daily.py @@ -0,0 +1,198 @@ +import base64 +import hmac +from datetime import datetime +from hashlib import sha256 +from http import HTTPStatus +from typing import Any, Dict, List, Optional + +import httpx + +from reflector.db.rooms import Room +from reflector.logger import logger +from reflector.storage import get_dailyco_storage + +from ..schemas.platform import Platform +from ..utils.daily import DailyRoomName +from ..utils.string import NonEmptyString +from .base import ROOM_PREFIX_SEPARATOR, VideoPlatformClient +from .models import MeetingData, RecordingType, VideoPlatformConfig + + +class DailyClient(VideoPlatformClient): + PLATFORM_NAME: Platform = "daily" + TIMEOUT = 10 + BASE_URL = "https://api.daily.co/v1" + TIMESTAMP_FORMAT = "%Y%m%d%H%M%S" + RECORDING_NONE: RecordingType = "none" + RECORDING_CLOUD: RecordingType = "cloud" + + def __init__(self, config: VideoPlatformConfig): + super().__init__(config) + self.headers = { + "Authorization": f"Bearer {config.api_key}", + "Content-Type": "application/json", + } + + async def create_meeting( + self, room_name_prefix: NonEmptyString, end_date: datetime, room: Room + ) -> MeetingData: + """ + Daily.co rooms vs meetings: + - We create a NEW Daily.co room for each Reflector meeting + - Daily.co meeting/session starts automatically when first participant joins + - Room auto-deletes after exp time + - Meeting.room_name stores the timestamped Daily.co room name + """ + timestamp = datetime.now().strftime(self.TIMESTAMP_FORMAT) + room_name = f"{room_name_prefix}{ROOM_PREFIX_SEPARATOR}{timestamp}" + + data = { + "name": room_name, + "privacy": "private" if room.is_locked else "public", + "properties": { + "enable_recording": "raw-tracks" + if room.recording_type != self.RECORDING_NONE + else False, + "enable_chat": True, + "enable_screenshare": True, + "start_video_off": False, + "start_audio_off": False, + "exp": int(end_date.timestamp()), + }, + } + + # Get storage config for passing to Daily API + daily_storage = get_dailyco_storage() + assert daily_storage.bucket_name, "S3 bucket must be configured" + data["properties"]["recordings_bucket"] = { + "bucket_name": daily_storage.bucket_name, + "bucket_region": daily_storage.region, + "assume_role_arn": daily_storage.role_credential, + "allow_api_access": True, + } + + async with httpx.AsyncClient() as client: + response = await client.post( + f"{self.BASE_URL}/rooms", + headers=self.headers, + json=data, + timeout=self.TIMEOUT, + ) + if response.status_code >= 400: + logger.error( + "Daily.co API error", + status_code=response.status_code, + response_body=response.text, + request_data=data, + ) + response.raise_for_status() + result = response.json() + + room_url = result["url"] + + return MeetingData( + meeting_id=result["id"], + room_name=result["name"], + room_url=room_url, + host_room_url=room_url, + platform=self.PLATFORM_NAME, + extra_data=result, + ) + + async def get_room_sessions(self, room_name: str) -> List[Any] | None: + # no such api + return None + + async def get_room_presence(self, room_name: str) -> Dict[str, Any]: + async with httpx.AsyncClient() as client: + response = await client.get( + f"{self.BASE_URL}/rooms/{room_name}/presence", + headers=self.headers, + timeout=self.TIMEOUT, + ) + response.raise_for_status() + return response.json() + + async def get_meeting_participants(self, meeting_id: str) -> Dict[str, Any]: + async with httpx.AsyncClient() as client: + response = await client.get( + f"{self.BASE_URL}/meetings/{meeting_id}/participants", + headers=self.headers, + timeout=self.TIMEOUT, + ) + response.raise_for_status() + return response.json() + + async def get_recording(self, recording_id: str) -> Dict[str, Any]: + async with httpx.AsyncClient() as client: + response = await client.get( + f"{self.BASE_URL}/recordings/{recording_id}", + headers=self.headers, + timeout=self.TIMEOUT, + ) + response.raise_for_status() + return response.json() + + async def delete_room(self, room_name: str) -> bool: + async with httpx.AsyncClient() as client: + response = await client.delete( + f"{self.BASE_URL}/rooms/{room_name}", + headers=self.headers, + timeout=self.TIMEOUT, + ) + return response.status_code in (HTTPStatus.OK, HTTPStatus.NOT_FOUND) + + async def upload_logo(self, room_name: str, logo_path: str) -> bool: + return True + + def verify_webhook_signature( + self, body: bytes, signature: str, timestamp: Optional[str] = None + ) -> bool: + """Verify Daily.co webhook signature. + + Daily.co uses: + - X-Webhook-Signature header + - X-Webhook-Timestamp header + - Signature format: HMAC-SHA256(base64_decode(secret), timestamp + '.' + body) + - Result is base64 encoded + """ + if not signature or not timestamp: + return False + + try: + secret_bytes = base64.b64decode(self.config.webhook_secret) + + signed_content = timestamp.encode() + b"." + body + + expected = hmac.new(secret_bytes, signed_content, sha256).digest() + expected_b64 = base64.b64encode(expected).decode() + + return hmac.compare_digest(expected_b64, signature) + except Exception as e: + logger.error("Daily.co webhook signature verification failed", exc_info=e) + return False + + async def create_meeting_token( + self, + room_name: DailyRoomName, + enable_recording: bool, + user_id: Optional[str] = None, + ) -> str: + data = {"properties": {"room_name": room_name}} + + if enable_recording: + data["properties"]["start_cloud_recording"] = True + data["properties"]["enable_recording_ui"] = False + + if user_id: + data["properties"]["user_id"] = user_id + + async with httpx.AsyncClient() as client: + response = await client.post( + f"{self.BASE_URL}/meeting-tokens", + headers=self.headers, + json=data, + timeout=self.TIMEOUT, + ) + response.raise_for_status() + return response.json()["token"] diff --git a/server/reflector/video_platforms/factory.py b/server/reflector/video_platforms/factory.py new file mode 100644 index 00000000..172d45e7 --- /dev/null +++ b/server/reflector/video_platforms/factory.py @@ -0,0 +1,62 @@ +from typing import Optional + +from reflector.settings import settings +from reflector.storage import get_dailyco_storage, get_whereby_storage + +from ..schemas.platform import WHEREBY_PLATFORM, Platform +from .base import VideoPlatformClient, VideoPlatformConfig +from .registry import get_platform_client + + +def get_platform_config(platform: Platform) -> VideoPlatformConfig: + if platform == WHEREBY_PLATFORM: + if not settings.WHEREBY_API_KEY: + raise ValueError( + "WHEREBY_API_KEY is required when platform='whereby'. " + "Set WHEREBY_API_KEY environment variable." + ) + whereby_storage = get_whereby_storage() + key_id, secret = whereby_storage.key_credentials + return VideoPlatformConfig( + api_key=settings.WHEREBY_API_KEY, + webhook_secret=settings.WHEREBY_WEBHOOK_SECRET or "", + api_url=settings.WHEREBY_API_URL, + s3_bucket=whereby_storage.bucket_name, + s3_region=whereby_storage.region, + aws_access_key_id=key_id, + aws_access_key_secret=secret, + ) + elif platform == "daily": + if not settings.DAILY_API_KEY: + raise ValueError( + "DAILY_API_KEY is required when platform='daily'. " + "Set DAILY_API_KEY environment variable." + ) + if not settings.DAILY_SUBDOMAIN: + raise ValueError( + "DAILY_SUBDOMAIN is required when platform='daily'. " + "Set DAILY_SUBDOMAIN environment variable." + ) + daily_storage = get_dailyco_storage() + return VideoPlatformConfig( + api_key=settings.DAILY_API_KEY, + webhook_secret=settings.DAILY_WEBHOOK_SECRET or "", + subdomain=settings.DAILY_SUBDOMAIN, + s3_bucket=daily_storage.bucket_name, + s3_region=daily_storage.region, + aws_role_arn=daily_storage.role_credential, + ) + else: + raise ValueError(f"Unknown platform: {platform}") + + +def create_platform_client(platform: Platform) -> VideoPlatformClient: + config = get_platform_config(platform) + return get_platform_client(platform, config) + + +def get_platform(room_platform: Optional[Platform] = None) -> Platform: + if room_platform: + return room_platform + + return settings.DEFAULT_VIDEO_PLATFORM diff --git a/server/reflector/video_platforms/models.py b/server/reflector/video_platforms/models.py new file mode 100644 index 00000000..82876888 --- /dev/null +++ b/server/reflector/video_platforms/models.py @@ -0,0 +1,40 @@ +from typing import Any, Dict, Literal, Optional + +from pydantic import BaseModel, Field + +from reflector.schemas.platform import WHEREBY_PLATFORM, Platform + +RecordingType = Literal["none", "local", "cloud"] + + +class MeetingData(BaseModel): + platform: Platform + meeting_id: str = Field(description="Platform-specific meeting identifier") + room_url: str = Field(description="URL for participants to join") + host_room_url: str = Field(description="URL for hosts (may be same as room_url)") + room_name: str = Field(description="Human-readable room name") + extra_data: Dict[str, Any] = Field(default_factory=dict) + + class Config: + json_schema_extra = { + "example": { + "platform": WHEREBY_PLATFORM, + "meeting_id": "12345678", + "room_url": "https://subdomain.whereby.com/room-20251008120000", + "host_room_url": "https://subdomain.whereby.com/room-20251008120000?roomKey=abc123", + "room_name": "room-20251008120000", + } + } + + +class VideoPlatformConfig(BaseModel): + api_key: str + webhook_secret: str + api_url: Optional[str] = None + subdomain: Optional[str] = None # Whereby/Daily subdomain + s3_bucket: Optional[str] = None + s3_region: Optional[str] = None + # Whereby uses access keys, Daily uses IAM role + aws_access_key_id: Optional[str] = None + aws_access_key_secret: Optional[str] = None + aws_role_arn: Optional[str] = None diff --git a/server/reflector/video_platforms/registry.py b/server/reflector/video_platforms/registry.py new file mode 100644 index 00000000..b4c10697 --- /dev/null +++ b/server/reflector/video_platforms/registry.py @@ -0,0 +1,35 @@ +from typing import Dict, Type + +from ..schemas.platform import DAILY_PLATFORM, WHEREBY_PLATFORM, Platform +from .base import VideoPlatformClient, VideoPlatformConfig + +_PLATFORMS: Dict[Platform, Type[VideoPlatformClient]] = {} + + +def register_platform(name: Platform, client_class: Type[VideoPlatformClient]): + _PLATFORMS[name] = client_class + + +def get_platform_client( + platform: Platform, config: VideoPlatformConfig +) -> VideoPlatformClient: + if platform not in _PLATFORMS: + raise ValueError(f"Unknown video platform: {platform}") + + client_class = _PLATFORMS[platform] + return client_class(config) + + +def get_available_platforms() -> list[Platform]: + return list(_PLATFORMS.keys()) + + +def _register_builtin_platforms(): + from .daily import DailyClient # noqa: PLC0415 + from .whereby import WherebyClient # noqa: PLC0415 + + register_platform(WHEREBY_PLATFORM, WherebyClient) + register_platform(DAILY_PLATFORM, DailyClient) + + +_register_builtin_platforms() diff --git a/server/reflector/video_platforms/whereby.py b/server/reflector/video_platforms/whereby.py new file mode 100644 index 00000000..f856454a --- /dev/null +++ b/server/reflector/video_platforms/whereby.py @@ -0,0 +1,141 @@ +import hmac +import json +import re +import time +from datetime import datetime +from hashlib import sha256 +from typing import Any, Dict, Optional + +import httpx + +from reflector.db.rooms import Room +from reflector.storage import get_whereby_storage + +from ..schemas.platform import WHEREBY_PLATFORM, Platform +from ..utils.string import NonEmptyString +from .base import ( + MeetingData, + VideoPlatformClient, + VideoPlatformConfig, +) +from .whereby_utils import whereby_room_name_prefix + + +class WherebyClient(VideoPlatformClient): + PLATFORM_NAME: Platform = WHEREBY_PLATFORM + TIMEOUT = 10 # seconds + MAX_ELAPSED_TIME = 60 * 1000 # 1 minute in milliseconds + + def __init__(self, config: VideoPlatformConfig): + super().__init__(config) + self.headers = { + "Content-Type": "application/json; charset=utf-8", + "Authorization": f"Bearer {config.api_key}", + } + + async def create_meeting( + self, room_name_prefix: NonEmptyString, end_date: datetime, room: Room + ) -> MeetingData: + data = { + "isLocked": room.is_locked, + "roomNamePrefix": whereby_room_name_prefix(room_name_prefix), + "roomNamePattern": "uuid", + "roomMode": room.room_mode, + "endDate": end_date.isoformat(), + "fields": ["hostRoomUrl"], + } + + if room.recording_type == "cloud": + # Get storage config for passing credentials to Whereby API + whereby_storage = get_whereby_storage() + key_id, secret = whereby_storage.key_credentials + data["recording"] = { + "type": room.recording_type, + "destination": { + "provider": "s3", + "bucket": whereby_storage.bucket_name, + "accessKeyId": key_id, + "accessKeySecret": secret, + "fileFormat": "mp4", + }, + "startTrigger": room.recording_trigger, + } + + async with httpx.AsyncClient() as client: + response = await client.post( + f"{self.config.api_url}/meetings", + headers=self.headers, + json=data, + timeout=self.TIMEOUT, + ) + response.raise_for_status() + result = response.json() + + return MeetingData( + meeting_id=result["meetingId"], + room_name=result["roomName"], + room_url=result["roomUrl"], + host_room_url=result["hostRoomUrl"], + platform=self.PLATFORM_NAME, + extra_data=result, + ) + + async def get_room_sessions(self, room_name: str) -> Dict[str, Any]: + async with httpx.AsyncClient() as client: + response = await client.get( + f"{self.config.api_url}/insights/room-sessions?roomName={room_name}", + headers=self.headers, + timeout=self.TIMEOUT, + ) + response.raise_for_status() + return response.json().get("results", []) + + async def delete_room(self, room_name: str) -> bool: + return True + + async def upload_logo(self, room_name: str, logo_path: str) -> bool: + async with httpx.AsyncClient() as client: + with open(logo_path, "rb") as f: + response = await client.put( + f"{self.config.api_url}/rooms/{room_name}/theme/logo", + headers={ + "Authorization": f"Bearer {self.config.api_key}", + }, + timeout=self.TIMEOUT, + files={"image": f}, + ) + response.raise_for_status() + return True + + def verify_webhook_signature( + self, body: bytes, signature: str, timestamp: Optional[str] = None + ) -> bool: + if not signature: + return False + + matches = re.match(r"t=(.*),v1=(.*)", signature) + if not matches: + return False + + ts, sig = matches.groups() + + current_time = int(time.time() * 1000) + diff_time = current_time - int(ts) * 1000 + if diff_time >= self.MAX_ELAPSED_TIME: + return False + + body_dict = json.loads(body) + signed_payload = f"{ts}.{json.dumps(body_dict, separators=(',', ':'))}" + hmac_obj = hmac.new( + self.config.webhook_secret.encode("utf-8"), + signed_payload.encode("utf-8"), + sha256, + ) + expected_signature = hmac_obj.hexdigest() + + try: + return hmac.compare_digest( + expected_signature.encode("utf-8"), sig.encode("utf-8") + ) + except Exception: + return False diff --git a/server/reflector/video_platforms/whereby_utils.py b/server/reflector/video_platforms/whereby_utils.py new file mode 100644 index 00000000..2724a7b5 --- /dev/null +++ b/server/reflector/video_platforms/whereby_utils.py @@ -0,0 +1,38 @@ +import re +from datetime import datetime + +from reflector.utils.datetime import parse_datetime_with_timezone +from reflector.utils.string import NonEmptyString, parse_non_empty_string +from reflector.video_platforms.base import ROOM_PREFIX_SEPARATOR + + +def parse_whereby_recording_filename( + object_key: NonEmptyString, +) -> (NonEmptyString, datetime): + filename = parse_non_empty_string(object_key.rsplit(".", 1)[0]) + timestamp_pattern = r"(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)" + match = re.search(timestamp_pattern, filename) + if not match: + raise ValueError(f"No ISO timestamp found in filename: {object_key}") + timestamp_str = match.group(1) + timestamp_start = match.start(1) + room_name_part = filename[:timestamp_start] + if room_name_part.endswith(ROOM_PREFIX_SEPARATOR): + room_name_part = room_name_part[: -len(ROOM_PREFIX_SEPARATOR)] + else: + raise ValueError( + f"room name {room_name_part} doesnt have {ROOM_PREFIX_SEPARATOR} at the end of filename: {object_key}" + ) + + return parse_non_empty_string(room_name_part), parse_datetime_with_timezone( + timestamp_str + ) + + +def whereby_room_name_prefix(room_name_prefix: NonEmptyString) -> NonEmptyString: + return room_name_prefix + ROOM_PREFIX_SEPARATOR + + +# room name comes with "/" from whereby api but lacks "/" e.g. in recording filenames +def room_name_to_whereby_api_room_name(room_name: NonEmptyString) -> NonEmptyString: + return f"/{room_name}" diff --git a/server/reflector/views/daily.py b/server/reflector/views/daily.py new file mode 100644 index 00000000..6f51cd1e --- /dev/null +++ b/server/reflector/views/daily.py @@ -0,0 +1,233 @@ +import json +from typing import Any, Dict, Literal + +from fastapi import APIRouter, HTTPException, Request +from pydantic import BaseModel + +from reflector.db.meetings import meetings_controller +from reflector.logger import logger as _logger +from reflector.settings import settings +from reflector.utils.daily import DailyRoomName +from reflector.video_platforms.factory import create_platform_client +from reflector.worker.process import process_multitrack_recording + +router = APIRouter() + +logger = _logger.bind(platform="daily") + + +class DailyTrack(BaseModel): + type: Literal["audio", "video"] + s3Key: str + size: int + + +class DailyWebhookEvent(BaseModel): + version: str + type: str + id: str + payload: Dict[str, Any] + event_ts: float + + +def _extract_room_name(event: DailyWebhookEvent) -> DailyRoomName | None: + """Extract room name from Daily event payload. + + Daily.co API inconsistency: + - participant.* events use "room" field + - recording.* events use "room_name" field + """ + return event.payload.get("room_name") or event.payload.get("room") + + +@router.post("/webhook") +async def webhook(request: Request): + """Handle Daily webhook events. + + Daily.co circuit-breaker: After 3+ failed responses (4xx/5xx), webhook + state→FAILED, stops sending events. Reset: scripts/recreate_daily_webhook.py + """ + body = await request.body() + signature = request.headers.get("X-Webhook-Signature", "") + timestamp = request.headers.get("X-Webhook-Timestamp", "") + + client = create_platform_client("daily") + + # TEMPORARY: Bypass signature check for testing + # TODO: Remove this after testing is complete + BYPASS_FOR_TESTING = True + if not BYPASS_FOR_TESTING: + if not client.verify_webhook_signature(body, signature, timestamp): + logger.warning( + "Invalid webhook signature", + signature=signature, + timestamp=timestamp, + has_body=bool(body), + ) + raise HTTPException(status_code=401, detail="Invalid webhook signature") + + try: + body_json = json.loads(body) + except json.JSONDecodeError: + raise HTTPException(status_code=422, detail="Invalid JSON") + + if body_json.get("test") == "test": + logger.info("Received Daily webhook test event") + return {"status": "ok"} + + # Parse as actual event + try: + event = DailyWebhookEvent(**body_json) + except Exception as e: + logger.error("Failed to parse webhook event", error=str(e), body=body.decode()) + raise HTTPException(status_code=422, detail="Invalid event format") + + # Handle participant events + if event.type == "participant.joined": + await _handle_participant_joined(event) + elif event.type == "participant.left": + await _handle_participant_left(event) + elif event.type == "recording.started": + await _handle_recording_started(event) + elif event.type == "recording.ready-to-download": + await _handle_recording_ready(event) + elif event.type == "recording.error": + await _handle_recording_error(event) + else: + logger.warning( + "Unhandled Daily webhook event type", + event_type=event.type, + payload=event.payload, + ) + + return {"status": "ok"} + + +async def _handle_participant_joined(event: DailyWebhookEvent): + daily_room_name = _extract_room_name(event) + if not daily_room_name: + logger.warning("participant.joined: no room in payload", payload=event.payload) + return + + meeting = await meetings_controller.get_by_room_name(daily_room_name) + if meeting: + await meetings_controller.increment_num_clients(meeting.id) + logger.info( + "Participant joined", + meeting_id=meeting.id, + room_name=daily_room_name, + recording_type=meeting.recording_type, + recording_trigger=meeting.recording_trigger, + ) + else: + logger.warning( + "participant.joined: meeting not found", room_name=daily_room_name + ) + + +async def _handle_participant_left(event: DailyWebhookEvent): + room_name = _extract_room_name(event) + if not room_name: + return + + meeting = await meetings_controller.get_by_room_name(room_name) + if meeting: + await meetings_controller.decrement_num_clients(meeting.id) + + +async def _handle_recording_started(event: DailyWebhookEvent): + room_name = _extract_room_name(event) + if not room_name: + logger.warning( + "recording.started: no room_name in payload", payload=event.payload + ) + return + + meeting = await meetings_controller.get_by_room_name(room_name) + if meeting: + logger.info( + "Recording started", + meeting_id=meeting.id, + room_name=room_name, + recording_id=event.payload.get("recording_id"), + platform="daily", + ) + else: + logger.warning("recording.started: meeting not found", room_name=room_name) + + +async def _handle_recording_ready(event: DailyWebhookEvent): + """Handle recording ready for download event. + + Daily.co webhook payload for raw-tracks recordings: + { + "recording_id": "...", + "room_name": "test2-20251009192341", + "tracks": [ + {"type": "audio", "s3Key": "monadical/test2-.../uuid-cam-audio-123.webm", "size": 400000}, + {"type": "video", "s3Key": "monadical/test2-.../uuid-cam-video-456.webm", "size": 30000000} + ] + } + """ + room_name = _extract_room_name(event) + recording_id = event.payload.get("recording_id") + tracks_raw = event.payload.get("tracks", []) + + if not room_name or not tracks_raw: + logger.warning( + "recording.ready-to-download: missing room_name or tracks", + room_name=room_name, + has_tracks=bool(tracks_raw), + payload=event.payload, + ) + return + + try: + tracks = [DailyTrack(**t) for t in tracks_raw] + except Exception as e: + logger.error( + "recording.ready-to-download: invalid tracks structure", + error=str(e), + tracks=tracks_raw, + ) + return + + logger.info( + "Recording ready for download", + room_name=room_name, + recording_id=recording_id, + num_tracks=len(tracks), + platform="daily", + ) + + bucket_name = settings.DAILYCO_STORAGE_AWS_BUCKET_NAME + if not bucket_name: + logger.error( + "DAILYCO_STORAGE_AWS_BUCKET_NAME not configured; cannot process Daily recording" + ) + return + + track_keys = [t.s3Key for t in tracks if t.type == "audio"] + + process_multitrack_recording.delay( + bucket_name=bucket_name, + daily_room_name=room_name, + recording_id=recording_id, + track_keys=track_keys, + ) + + +async def _handle_recording_error(event: DailyWebhookEvent): + room_name = _extract_room_name(event) + error = event.payload.get("error", "Unknown error") + + if room_name: + meeting = await meetings_controller.get_by_room_name(room_name) + if meeting: + logger.error( + "Recording error", + meeting_id=meeting.id, + room_name=room_name, + error=error, + platform="daily", + ) diff --git a/server/reflector/views/rooms.py b/server/reflector/views/rooms.py index 70e3f9e4..e786b0d9 100644 --- a/server/reflector/views/rooms.py +++ b/server/reflector/views/rooms.py @@ -15,9 +15,14 @@ from reflector.db.calendar_events import calendar_events_controller from reflector.db.meetings import meetings_controller from reflector.db.rooms import rooms_controller from reflector.redis_cache import RedisAsyncLock +from reflector.schemas.platform import Platform from reflector.services.ics_sync import ics_sync_service from reflector.settings import settings -from reflector.whereby import create_meeting, upload_logo +from reflector.utils.url import add_query_param +from reflector.video_platforms.factory import ( + create_platform_client, + get_platform, +) from reflector.worker.webhook import test_webhook logger = logging.getLogger(__name__) @@ -41,6 +46,7 @@ class Room(BaseModel): ics_enabled: bool = False ics_last_sync: Optional[datetime] = None ics_last_etag: Optional[str] = None + platform: Platform class RoomDetails(Room): @@ -68,6 +74,7 @@ class Meeting(BaseModel): is_active: bool = True calendar_event_id: str | None = None calendar_metadata: dict[str, Any] | None = None + platform: Platform class CreateRoom(BaseModel): @@ -85,6 +92,7 @@ class CreateRoom(BaseModel): ics_url: Optional[str] = None ics_fetch_interval: int = 300 ics_enabled: bool = False + platform: Optional[Platform] = None class UpdateRoom(BaseModel): @@ -102,6 +110,7 @@ class UpdateRoom(BaseModel): ics_url: Optional[str] = None ics_fetch_interval: Optional[int] = None ics_enabled: Optional[bool] = None + platform: Optional[Platform] = None class CreateRoomMeeting(BaseModel): @@ -165,14 +174,6 @@ class CalendarEventResponse(BaseModel): router = APIRouter() -def parse_datetime_with_timezone(iso_string: str) -> datetime: - """Parse ISO datetime string and ensure timezone awareness (defaults to UTC if naive).""" - dt = datetime.fromisoformat(iso_string) - if dt.tzinfo is None: - dt = dt.replace(tzinfo=timezone.utc) - return dt - - @router.get("/rooms", response_model=Page[RoomDetails]) async def rooms_list( user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)], @@ -182,13 +183,18 @@ async def rooms_list( user_id = user["sub"] if user else None - return await apaginate( + paginated = await apaginate( get_database(), await rooms_controller.get_all( user_id=user_id, order_by="-created_at", return_query=True ), ) + for room in paginated.items: + room.platform = get_platform(room.platform) + + return paginated + @router.get("/rooms/{room_id}", response_model=RoomDetails) async def rooms_get( @@ -201,6 +207,7 @@ async def rooms_get( raise HTTPException(status_code=404, detail="Room not found") if not room.is_shared and (user_id is None or room.user_id != user_id): raise HTTPException(status_code=403, detail="Room access denied") + room.platform = get_platform(room.platform) return room @@ -214,17 +221,16 @@ async def rooms_get_by_name( if not room: raise HTTPException(status_code=404, detail="Room not found") - # Convert to RoomDetails format (add webhook fields if user is owner) room_dict = room.__dict__.copy() if user_id == room.user_id: - # User is owner, include webhook details if available room_dict["webhook_url"] = getattr(room, "webhook_url", None) room_dict["webhook_secret"] = getattr(room, "webhook_secret", None) else: - # Non-owner, hide webhook details room_dict["webhook_url"] = None room_dict["webhook_secret"] = None + room_dict["platform"] = get_platform(room.platform) + return RoomDetails(**room_dict) @@ -251,6 +257,7 @@ async def rooms_create( ics_url=room.ics_url, ics_fetch_interval=room.ics_fetch_interval, ics_enabled=room.ics_enabled, + platform=room.platform, ) @@ -268,6 +275,7 @@ async def rooms_update( raise HTTPException(status_code=403, detail="Not authorized") values = info.dict(exclude_unset=True) await rooms_controller.update(room, values) + room.platform = get_platform(room.platform) return room @@ -315,19 +323,22 @@ async def rooms_create_meeting( if meeting is None: end_date = current_time + timedelta(hours=8) - whereby_meeting = await create_meeting("", end_date=end_date, room=room) + platform = get_platform(room.platform) + client = create_platform_client(platform) - await upload_logo(whereby_meeting["roomName"], "./images/logo.png") + meeting_data = await client.create_meeting( + room.name, end_date=end_date, room=room + ) + + await client.upload_logo(meeting_data.room_name, "./images/logo.png") meeting = await meetings_controller.create( - id=whereby_meeting["meetingId"], - room_name=whereby_meeting["roomName"], - room_url=whereby_meeting["roomUrl"], - host_room_url=whereby_meeting["hostRoomUrl"], - start_date=parse_datetime_with_timezone( - whereby_meeting["startDate"] - ), - end_date=parse_datetime_with_timezone(whereby_meeting["endDate"]), + id=meeting_data.meeting_id, + room_name=meeting_data.room_name, + room_url=meeting_data.room_url, + host_room_url=meeting_data.host_room_url, + start_date=current_time, + end_date=end_date, room=room, ) except LockError: @@ -336,6 +347,18 @@ async def rooms_create_meeting( status_code=503, detail="Meeting creation in progress, please try again" ) + if meeting.platform == "daily" and room.recording_trigger != "none": + client = create_platform_client(meeting.platform) + token = await client.create_meeting_token( + meeting.room_name, + enable_recording=True, + user_id=user_id, + ) + meeting = meeting.model_copy() + meeting.room_url = add_query_param(meeting.room_url, "t", token) + if meeting.host_room_url: + meeting.host_room_url = add_query_param(meeting.host_room_url, "t", token) + if user_id != room.user_id: meeting.host_room_url = "" @@ -490,7 +513,10 @@ async def rooms_list_active_meetings( room=room, current_time=current_time ) - # Hide host URLs from non-owners + effective_platform = get_platform(room.platform) + for meeting in meetings: + meeting.platform = effective_platform + if user_id != room.user_id: for meeting in meetings: meeting.host_room_url = "" @@ -511,15 +537,10 @@ async def rooms_get_meeting( if not room: raise HTTPException(status_code=404, detail="Room not found") - meeting = await meetings_controller.get_by_id(meeting_id) + meeting = await meetings_controller.get_by_id(meeting_id, room=room) if not meeting: raise HTTPException(status_code=404, detail="Meeting not found") - if meeting.room_id != room.id: - raise HTTPException( - status_code=403, detail="Meeting does not belong to this room" - ) - if user_id != room.user_id and not room.is_shared: meeting.host_room_url = "" @@ -538,16 +559,11 @@ async def rooms_join_meeting( if not room: raise HTTPException(status_code=404, detail="Room not found") - meeting = await meetings_controller.get_by_id(meeting_id) + meeting = await meetings_controller.get_by_id(meeting_id, room=room) if not meeting: raise HTTPException(status_code=404, detail="Meeting not found") - if meeting.room_id != room.id: - raise HTTPException( - status_code=403, detail="Meeting does not belong to this room" - ) - if not meeting.is_active: raise HTTPException(status_code=400, detail="Meeting is not active") @@ -555,7 +571,6 @@ async def rooms_join_meeting( if meeting.end_date <= current_time: raise HTTPException(status_code=400, detail="Meeting has ended") - # Hide host URL from non-owners if user_id != room.user_id: meeting.host_room_url = "" diff --git a/server/reflector/views/transcripts_process.py b/server/reflector/views/transcripts_process.py index f9295765..46e070fd 100644 --- a/server/reflector/views/transcripts_process.py +++ b/server/reflector/views/transcripts_process.py @@ -5,8 +5,12 @@ from fastapi import APIRouter, Depends, HTTPException from pydantic import BaseModel import reflector.auth as auth +from reflector.db.recordings import recordings_controller from reflector.db.transcripts import transcripts_controller from reflector.pipelines.main_file_pipeline import task_pipeline_file_process +from reflector.pipelines.main_multitrack_pipeline import ( + task_pipeline_multitrack_process, +) router = APIRouter() @@ -33,14 +37,35 @@ async def transcript_process( status_code=400, detail="Recording is not ready for processing" ) + # avoid duplicate scheduling for either pipeline if task_is_scheduled_or_active( "reflector.pipelines.main_file_pipeline.task_pipeline_file_process", transcript_id=transcript_id, + ) or task_is_scheduled_or_active( + "reflector.pipelines.main_multitrack_pipeline.task_pipeline_multitrack_process", + transcript_id=transcript_id, ): return ProcessStatus(status="already running") - # schedule a background task process the file - task_pipeline_file_process.delay(transcript_id=transcript_id) + # Determine processing mode strictly from DB to avoid S3 scans + bucket_name = None + track_keys: list[str] = [] + + if transcript.recording_id: + recording = await recordings_controller.get_by_id(transcript.recording_id) + if recording: + bucket_name = recording.bucket_name + track_keys = list(getattr(recording, "track_keys", []) or []) + + if bucket_name: + task_pipeline_multitrack_process.delay( + transcript_id=transcript_id, + bucket_name=bucket_name, + track_keys=track_keys, + ) + else: + # Default single-file pipeline + task_pipeline_file_process.delay(transcript_id=transcript_id) return ProcessStatus(status="ok") diff --git a/server/reflector/whereby.py b/server/reflector/whereby.py deleted file mode 100644 index 8b5c18fd..00000000 --- a/server/reflector/whereby.py +++ /dev/null @@ -1,114 +0,0 @@ -import logging -from datetime import datetime - -import httpx - -from reflector.db.rooms import Room -from reflector.settings import settings -from reflector.utils.string import parse_non_empty_string - -logger = logging.getLogger(__name__) - - -def _get_headers(): - api_key = parse_non_empty_string( - settings.WHEREBY_API_KEY, "WHEREBY_API_KEY value is required." - ) - return { - "Content-Type": "application/json; charset=utf-8", - "Authorization": f"Bearer {api_key}", - } - - -TIMEOUT = 10 # seconds - - -def _get_whereby_s3_auth(): - errors = [] - try: - bucket_name = parse_non_empty_string( - settings.RECORDING_STORAGE_AWS_BUCKET_NAME, - "RECORDING_STORAGE_AWS_BUCKET_NAME value is required.", - ) - except Exception as e: - errors.append(e) - try: - key_id = parse_non_empty_string( - settings.AWS_WHEREBY_ACCESS_KEY_ID, - "AWS_WHEREBY_ACCESS_KEY_ID value is required.", - ) - except Exception as e: - errors.append(e) - try: - key_secret = parse_non_empty_string( - settings.AWS_WHEREBY_ACCESS_KEY_SECRET, - "AWS_WHEREBY_ACCESS_KEY_SECRET value is required.", - ) - except Exception as e: - errors.append(e) - if len(errors) > 0: - raise Exception( - f"Failed to get Whereby auth settings: {', '.join(str(e) for e in errors)}" - ) - return bucket_name, key_id, key_secret - - -async def create_meeting(room_name_prefix: str, end_date: datetime, room: Room): - s3_bucket_name, s3_key_id, s3_key_secret = _get_whereby_s3_auth() - data = { - "isLocked": room.is_locked, - "roomNamePrefix": room_name_prefix, - "roomNamePattern": "uuid", - "roomMode": room.room_mode, - "endDate": end_date.isoformat(), - "recording": { - "type": room.recording_type, - "destination": { - "provider": "s3", - "bucket": s3_bucket_name, - "accessKeyId": s3_key_id, - "accessKeySecret": s3_key_secret, - "fileFormat": "mp4", - }, - "startTrigger": room.recording_trigger, - }, - "fields": ["hostRoomUrl"], - } - async with httpx.AsyncClient() as client: - response = await client.post( - f"{settings.WHEREBY_API_URL}/meetings", - headers=_get_headers(), - json=data, - timeout=TIMEOUT, - ) - if response.status_code == 403: - logger.warning( - f"Failed to create meeting: access denied on Whereby: {response.text}" - ) - response.raise_for_status() - return response.json() - - -async def get_room_sessions(room_name: str): - async with httpx.AsyncClient() as client: - response = await client.get( - f"{settings.WHEREBY_API_URL}/insights/room-sessions?roomName={room_name}", - headers=_get_headers(), - timeout=TIMEOUT, - ) - response.raise_for_status() - return response.json() - - -async def upload_logo(room_name: str, logo_path: str): - async with httpx.AsyncClient() as client: - with open(logo_path, "rb") as f: - response = await client.put( - f"{settings.WHEREBY_API_URL}/rooms{room_name}/theme/logo", - headers={ - "Authorization": f"Bearer {settings.WHEREBY_API_KEY}", - }, - timeout=TIMEOUT, - files={"image": f}, - ) - response.raise_for_status() diff --git a/server/reflector/worker/cleanup.py b/server/reflector/worker/cleanup.py index 66d45e94..43559e64 100644 --- a/server/reflector/worker/cleanup.py +++ b/server/reflector/worker/cleanup.py @@ -19,7 +19,7 @@ from reflector.db.meetings import meetings from reflector.db.recordings import recordings from reflector.db.transcripts import transcripts, transcripts_controller from reflector.settings import settings -from reflector.storage import get_recordings_storage +from reflector.storage import get_transcripts_storage logger = structlog.get_logger(__name__) @@ -53,8 +53,8 @@ async def delete_single_transcript( ) if recording: try: - await get_recordings_storage().delete_file( - recording["object_key"] + await get_transcripts_storage().delete_file( + recording["object_key"], bucket=recording["bucket_name"] ) except Exception as storage_error: logger.warning( diff --git a/server/reflector/worker/ics_sync.py b/server/reflector/worker/ics_sync.py index faf62f4a..4d72d4ae 100644 --- a/server/reflector/worker/ics_sync.py +++ b/server/reflector/worker/ics_sync.py @@ -7,10 +7,10 @@ from celery.utils.log import get_task_logger from reflector.asynctask import asynctask from reflector.db.calendar_events import calendar_events_controller from reflector.db.meetings import meetings_controller -from reflector.db.rooms import rooms_controller +from reflector.db.rooms import Room, rooms_controller from reflector.redis_cache import RedisAsyncLock from reflector.services.ics_sync import SyncStatus, ics_sync_service -from reflector.whereby import create_meeting, upload_logo +from reflector.video_platforms.factory import create_platform_client, get_platform logger = structlog.wrap_logger(get_task_logger(__name__)) @@ -86,17 +86,17 @@ def _should_sync(room) -> bool: MEETING_DEFAULT_DURATION = timedelta(hours=1) -async def create_upcoming_meetings_for_event(event, create_window, room_id, room): +async def create_upcoming_meetings_for_event(event, create_window, room: Room): if event.start_time <= create_window: return - existing_meeting = await meetings_controller.get_by_calendar_event(event.id) + existing_meeting = await meetings_controller.get_by_calendar_event(event.id, room) if existing_meeting: return logger.info( "Pre-creating meeting for calendar event", - room_id=room_id, + room_id=room.id, event_id=event.id, event_title=event.title, ) @@ -104,20 +104,22 @@ async def create_upcoming_meetings_for_event(event, create_window, room_id, room try: end_date = event.end_time or (event.start_time + MEETING_DEFAULT_DURATION) - whereby_meeting = await create_meeting( + client = create_platform_client(get_platform(room.platform)) + + meeting_data = await client.create_meeting( "", end_date=end_date, room=room, ) - await upload_logo(whereby_meeting["roomName"], "./images/logo.png") + await client.upload_logo(meeting_data.room_name, "./images/logo.png") meeting = await meetings_controller.create( - id=whereby_meeting["meetingId"], - room_name=whereby_meeting["roomName"], - room_url=whereby_meeting["roomUrl"], - host_room_url=whereby_meeting["hostRoomUrl"], - start_date=datetime.fromisoformat(whereby_meeting["startDate"]), - end_date=datetime.fromisoformat(whereby_meeting["endDate"]), + id=meeting_data.meeting_id, + room_name=meeting_data.room_name, + room_url=meeting_data.room_url, + host_room_url=meeting_data.host_room_url, + start_date=event.start_time, + end_date=end_date, room=room, calendar_event_id=event.id, calendar_metadata={ @@ -136,7 +138,7 @@ async def create_upcoming_meetings_for_event(event, create_window, room_id, room except Exception as e: logger.error( "Failed to pre-create meeting", - room_id=room_id, + room_id=room.id, event_id=event.id, error=str(e), ) @@ -166,9 +168,7 @@ async def create_upcoming_meetings(): ) for event in events: - await create_upcoming_meetings_for_event( - event, create_window, room.id, room - ) + await create_upcoming_meetings_for_event(event, create_window, room) logger.info("Completed pre-creation check for upcoming meetings") except Exception as e: diff --git a/server/reflector/worker/process.py b/server/reflector/worker/process.py index e660e840..47cbb1cb 100644 --- a/server/reflector/worker/process.py +++ b/server/reflector/worker/process.py @@ -1,5 +1,6 @@ import json import os +import re from datetime import datetime, timezone from urllib.parse import unquote @@ -14,24 +15,32 @@ from redis.exceptions import LockError from reflector.db.meetings import meetings_controller from reflector.db.recordings import Recording, recordings_controller from reflector.db.rooms import rooms_controller -from reflector.db.transcripts import SourceKind, transcripts_controller +from reflector.db.transcripts import ( + SourceKind, + TranscriptParticipant, + transcripts_controller, +) from reflector.pipelines.main_file_pipeline import task_pipeline_file_process from reflector.pipelines.main_live_pipeline import asynctask +from reflector.pipelines.main_multitrack_pipeline import ( + task_pipeline_multitrack_process, +) +from reflector.pipelines.topic_processing import EmptyPipeline +from reflector.processors import AudioFileWriterProcessor +from reflector.processors.audio_waveform_processor import AudioWaveformProcessor from reflector.redis_cache import get_redis_client from reflector.settings import settings -from reflector.whereby import get_room_sessions +from reflector.storage import get_transcripts_storage +from reflector.utils.daily import DailyRoomName, extract_base_room_name +from reflector.video_platforms.factory import create_platform_client +from reflector.video_platforms.whereby_utils import ( + parse_whereby_recording_filename, + room_name_to_whereby_api_room_name, +) logger = structlog.wrap_logger(get_task_logger(__name__)) -def parse_datetime_with_timezone(iso_string: str) -> datetime: - """Parse ISO datetime string and ensure timezone awareness (defaults to UTC if naive).""" - dt = datetime.fromisoformat(iso_string) - if dt.tzinfo is None: - dt = dt.replace(tzinfo=timezone.utc) - return dt - - @shared_task def process_messages(): queue_url = settings.AWS_PROCESS_RECORDING_QUEUE_URL @@ -73,14 +82,16 @@ def process_messages(): logger.error("process_messages", error=str(e)) +# only whereby supported. @shared_task @asynctask async def process_recording(bucket_name: str, object_key: str): logger.info("Processing recording: %s/%s", bucket_name, object_key) - # extract a guid and a datetime from the object key - room_name = f"/{object_key[:36]}" - recorded_at = parse_datetime_with_timezone(object_key[37:57]) + room_name_part, recorded_at = parse_whereby_recording_filename(object_key) + + # we store whereby api room names, NOT whereby room names + room_name = room_name_to_whereby_api_room_name(room_name_part) meeting = await meetings_controller.get_by_room_name(room_name) room = await rooms_controller.get_by_id(meeting.room_id) @@ -102,6 +113,7 @@ async def process_recording(bucket_name: str, object_key: str): transcript, { "topics": [], + "participants": [], }, ) else: @@ -121,15 +133,15 @@ async def process_recording(bucket_name: str, object_key: str): upload_filename = transcript.data_path / f"upload{extension}" upload_filename.parent.mkdir(parents=True, exist_ok=True) - s3 = boto3.client( - "s3", - region_name=settings.TRANSCRIPT_STORAGE_AWS_REGION, - aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID, - aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY, - ) + storage = get_transcripts_storage() - with open(upload_filename, "wb") as f: - s3.download_fileobj(bucket_name, object_key, f) + try: + with open(upload_filename, "wb") as f: + await storage.stream_to_fileobj(object_key, f, bucket=bucket_name) + except Exception: + # Clean up partial file on stream failure + upload_filename.unlink(missing_ok=True) + raise container = av.open(upload_filename.as_posix()) try: @@ -146,6 +158,165 @@ async def process_recording(bucket_name: str, object_key: str): task_pipeline_file_process.delay(transcript_id=transcript.id) +@shared_task +@asynctask +async def process_multitrack_recording( + bucket_name: str, + daily_room_name: DailyRoomName, + recording_id: str, + track_keys: list[str], +): + logger.info( + "Processing multitrack recording", + bucket=bucket_name, + room_name=daily_room_name, + recording_id=recording_id, + provided_keys=len(track_keys), + ) + + if not track_keys: + logger.warning("No audio track keys provided") + return + + tz = timezone.utc + recorded_at = datetime.now(tz) + try: + if track_keys: + folder = os.path.basename(os.path.dirname(track_keys[0])) + ts_match = re.search(r"(\d{14})$", folder) + if ts_match: + ts = ts_match.group(1) + recorded_at = datetime.strptime(ts, "%Y%m%d%H%M%S").replace(tzinfo=tz) + except Exception as e: + logger.warning( + f"Could not parse recorded_at from keys, using now() {recorded_at}", + e, + exc_info=True, + ) + + meeting = await meetings_controller.get_by_room_name(daily_room_name) + + room_name_base = extract_base_room_name(daily_room_name) + + room = await rooms_controller.get_by_name(room_name_base) + if not room: + raise Exception(f"Room not found: {room_name_base}") + + if not meeting: + raise Exception(f"Meeting not found: {room_name_base}") + + logger.info( + "Found existing Meeting for recording", + meeting_id=meeting.id, + room_name=daily_room_name, + recording_id=recording_id, + ) + + recording = await recordings_controller.get_by_id(recording_id) + if not recording: + object_key_dir = os.path.dirname(track_keys[0]) if track_keys else "" + recording = await recordings_controller.create( + Recording( + id=recording_id, + bucket_name=bucket_name, + object_key=object_key_dir, + recorded_at=recorded_at, + meeting_id=meeting.id, + track_keys=track_keys, + ) + ) + else: + # Recording already exists; assume metadata was set at creation time + pass + + transcript = await transcripts_controller.get_by_recording_id(recording.id) + if transcript: + await transcripts_controller.update( + transcript, + { + "topics": [], + "participants": [], + }, + ) + else: + transcript = await transcripts_controller.add( + "", + source_kind=SourceKind.ROOM, + source_language="en", + target_language="en", + user_id=room.user_id, + recording_id=recording.id, + share_mode="public", + meeting_id=meeting.id, + room_id=room.id, + ) + + try: + daily_client = create_platform_client("daily") + + id_to_name = {} + id_to_user_id = {} + + mtg_session_id = None + try: + rec_details = await daily_client.get_recording(recording_id) + mtg_session_id = rec_details.get("mtgSessionId") + except Exception as e: + logger.warning( + "Failed to fetch Daily recording details", + error=str(e), + recording_id=recording_id, + exc_info=True, + ) + + if mtg_session_id: + try: + payload = await daily_client.get_meeting_participants(mtg_session_id) + for p in payload.get("data", []): + pid = p.get("participant_id") + name = p.get("user_name") + user_id = p.get("user_id") + if pid and name: + id_to_name[pid] = name + if pid and user_id: + id_to_user_id[pid] = user_id + except Exception as e: + logger.warning( + "Failed to fetch Daily meeting participants", + error=str(e), + mtg_session_id=mtg_session_id, + exc_info=True, + ) + else: + logger.warning( + "No mtgSessionId found for recording; participant names may be generic", + recording_id=recording_id, + ) + + for idx, key in enumerate(track_keys): + base = os.path.basename(key) + m = re.search(r"\d{13,}-([0-9a-fA-F-]{36})-cam-audio-", base) + participant_id = m.group(1) if m else None + + default_name = f"Speaker {idx}" + name = id_to_name.get(participant_id, default_name) + user_id = id_to_user_id.get(participant_id) + + participant = TranscriptParticipant( + id=participant_id, speaker=idx, name=name, user_id=user_id + ) + await transcripts_controller.upsert_participant(transcript, participant) + + except Exception as e: + logger.warning("Failed to map participant names", error=str(e), exc_info=True) + + task_pipeline_multitrack_process.delay( + transcript_id=transcript.id, + bucket_name=bucket_name, + track_keys=track_keys, + ) + + @shared_task @asynctask async def process_meetings(): @@ -164,7 +335,7 @@ async def process_meetings(): Uses distributed locking to prevent race conditions when multiple workers process the same meeting simultaneously. """ - logger.info("Processing meetings") + logger.debug("Processing meetings") meetings = await meetings_controller.get_all_active() current_time = datetime.now(timezone.utc) redis_client = get_redis_client() @@ -189,7 +360,8 @@ async def process_meetings(): end_date = end_date.replace(tzinfo=timezone.utc) # This API call could be slow, extend lock if needed - response = await get_room_sessions(meeting.room_name) + client = create_platform_client(meeting.platform) + room_sessions = await client.get_room_sessions(meeting.room_name) try: # Extend lock after slow operation to ensure we still hold it @@ -198,7 +370,6 @@ async def process_meetings(): logger_.warning("Lost lock for meeting, skipping") continue - room_sessions = response.get("results", []) has_active_sessions = room_sessions and any( rs["endedAt"] is None for rs in room_sessions ) @@ -231,69 +402,120 @@ async def process_meetings(): except LockError: pass # Lock already released or expired - logger.info( + logger.debug( "Processed meetings finished", processed_count=processed_count, skipped_count=skipped_count, ) +async def convert_audio_and_waveform(transcript) -> None: + """Convert WebM to MP3 and generate waveform for Daily.co recordings. + + This bypasses the full file pipeline which would overwrite stub data. + """ + try: + logger.info( + "Converting audio to MP3 and generating waveform", + transcript_id=transcript.id, + ) + + upload_path = transcript.data_path / "upload.webm" + mp3_path = transcript.audio_mp3_filename + + # Convert WebM to MP3 + mp3_writer = AudioFileWriterProcessor(path=mp3_path) + + container = av.open(str(upload_path)) + for frame in container.decode(audio=0): + await mp3_writer.push(frame) + await mp3_writer.flush() + container.close() + + logger.info( + "Converted WebM to MP3", + transcript_id=transcript.id, + mp3_size=mp3_path.stat().st_size, + ) + + waveform_processor = AudioWaveformProcessor( + audio_path=mp3_path, + waveform_path=transcript.audio_waveform_filename, + ) + waveform_processor.set_pipeline(EmptyPipeline(logger)) + await waveform_processor.flush() + + logger.info( + "Generated waveform", + transcript_id=transcript.id, + waveform_path=transcript.audio_waveform_filename, + ) + + # Update transcript status to ended (successful) + await transcripts_controller.update(transcript, {"status": "ended"}) + + except Exception as e: + logger.error( + "Failed to convert audio or generate waveform", + transcript_id=transcript.id, + error=str(e), + ) + # Keep status as uploaded even if conversion fails + pass + + @shared_task @asynctask async def reprocess_failed_recordings(): """ - Find recordings in the S3 bucket and check if they have proper transcriptions. + Find recordings in Whereby S3 bucket and check if they have proper transcriptions. If not, requeue them for processing. - """ - logger.info("Checking for recordings that need processing or reprocessing") - s3 = boto3.client( - "s3", - region_name=settings.TRANSCRIPT_STORAGE_AWS_REGION, - aws_access_key_id=settings.TRANSCRIPT_STORAGE_AWS_ACCESS_KEY_ID, - aws_secret_access_key=settings.TRANSCRIPT_STORAGE_AWS_SECRET_ACCESS_KEY, - ) + Note: Daily.co recordings are processed via webhooks, not this cron job. + """ + logger.info("Checking Whereby recordings that need processing or reprocessing") + + if not settings.WHEREBY_STORAGE_AWS_BUCKET_NAME: + raise ValueError( + "WHEREBY_STORAGE_AWS_BUCKET_NAME required for Whereby recording reprocessing. " + "Set WHEREBY_STORAGE_AWS_BUCKET_NAME environment variable." + ) + + storage = get_transcripts_storage() + bucket_name = settings.WHEREBY_STORAGE_AWS_BUCKET_NAME reprocessed_count = 0 try: - paginator = s3.get_paginator("list_objects_v2") - bucket_name = settings.RECORDING_STORAGE_AWS_BUCKET_NAME - pages = paginator.paginate(Bucket=bucket_name) + object_keys = await storage.list_objects(prefix="", bucket=bucket_name) - for page in pages: - if "Contents" not in page: + for object_key in object_keys: + if not object_key.endswith(".mp4"): continue - for obj in page["Contents"]: - object_key = obj["Key"] + recording = await recordings_controller.get_by_object_key( + bucket_name, object_key + ) + if not recording: + logger.info(f"Queueing recording for processing: {object_key}") + process_recording.delay(bucket_name, object_key) + reprocessed_count += 1 + continue - if not (object_key.endswith(".mp4")): - continue - - recording = await recordings_controller.get_by_object_key( - bucket_name, object_key + transcript = None + try: + transcript = await transcripts_controller.get_by_recording_id( + recording.id + ) + except ValidationError: + await transcripts_controller.remove_by_recording_id(recording.id) + logger.warning( + f"Removed invalid transcript for recording: {recording.id}" ) - if not recording: - logger.info(f"Queueing recording for processing: {object_key}") - process_recording.delay(bucket_name, object_key) - reprocessed_count += 1 - continue - transcript = None - try: - transcript = await transcripts_controller.get_by_recording_id( - recording.id - ) - except ValidationError: - await transcripts_controller.remove_by_recording_id(recording.id) - logger.warning( - f"Removed invalid transcript for recording: {recording.id}" - ) - - if transcript is None or transcript.status == "error": - logger.info(f"Queueing recording for processing: {object_key}") - process_recording.delay(bucket_name, object_key) - reprocessed_count += 1 + if transcript is None or transcript.status == "error": + logger.info(f"Queueing recording for processing: {object_key}") + process_recording.delay(bucket_name, object_key) + reprocessed_count += 1 except Exception as e: logger.error(f"Error checking S3 bucket: {str(e)}") diff --git a/server/scripts/recreate_daily_webhook.py b/server/scripts/recreate_daily_webhook.py new file mode 100644 index 00000000..a378baf2 --- /dev/null +++ b/server/scripts/recreate_daily_webhook.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 + +import asyncio +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +import httpx + +from reflector.settings import settings + + +async def setup_webhook(webhook_url: str): + """ + Create or update Daily.co webhook for this environment. + Uses DAILY_WEBHOOK_UUID to identify existing webhook. + """ + if not settings.DAILY_API_KEY: + print("Error: DAILY_API_KEY not set") + return 1 + + headers = { + "Authorization": f"Bearer {settings.DAILY_API_KEY}", + "Content-Type": "application/json", + } + + webhook_data = { + "url": webhook_url, + "eventTypes": [ + "participant.joined", + "participant.left", + "recording.started", + "recording.ready-to-download", + "recording.error", + ], + "hmac": settings.DAILY_WEBHOOK_SECRET, + } + + async with httpx.AsyncClient() as client: + webhook_uuid = settings.DAILY_WEBHOOK_UUID + + if webhook_uuid: + # Update existing webhook + print(f"Updating existing webhook {webhook_uuid}...") + try: + resp = await client.patch( + f"https://api.daily.co/v1/webhooks/{webhook_uuid}", + headers=headers, + json=webhook_data, + ) + resp.raise_for_status() + result = resp.json() + print(f"✓ Updated webhook {result['uuid']} (state: {result['state']})") + print(f" URL: {result['url']}") + return 0 + except httpx.HTTPStatusError as e: + if e.response.status_code == 404: + print(f"Webhook {webhook_uuid} not found, creating new one...") + webhook_uuid = None # Fall through to creation + else: + print(f"Error updating webhook: {e}") + return 1 + + if not webhook_uuid: + # Create new webhook + print("Creating new webhook...") + resp = await client.post( + "https://api.daily.co/v1/webhooks", headers=headers, json=webhook_data + ) + resp.raise_for_status() + result = resp.json() + webhook_uuid = result["uuid"] + + print(f"✓ Created webhook {webhook_uuid} (state: {result['state']})") + print(f" URL: {result['url']}") + print() + print("=" * 60) + print("IMPORTANT: Add this to your environment variables:") + print("=" * 60) + print(f"DAILY_WEBHOOK_UUID: {webhook_uuid}") + print("=" * 60) + print() + + # Try to write UUID to .env file + env_file = Path(__file__).parent.parent / ".env" + if env_file.exists(): + lines = env_file.read_text().splitlines() + updated = False + + # Update existing DAILY_WEBHOOK_UUID line or add it + for i, line in enumerate(lines): + if line.startswith("DAILY_WEBHOOK_UUID="): + lines[i] = f"DAILY_WEBHOOK_UUID={webhook_uuid}" + updated = True + break + + if not updated: + lines.append(f"DAILY_WEBHOOK_UUID={webhook_uuid}") + + env_file.write_text("\n".join(lines) + "\n") + print(f"✓ Also saved to local .env file") + else: + print(f"⚠ Local .env file not found - please add manually") + + return 0 + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python recreate_daily_webhook.py ") + print( + "Example: python recreate_daily_webhook.py https://example.com/v1/daily/webhook" + ) + print() + print("Behavior:") + print(" - If DAILY_WEBHOOK_UUID set: Updates existing webhook") + print( + " - If DAILY_WEBHOOK_UUID empty: Creates new webhook, saves UUID to .env" + ) + sys.exit(1) + + sys.exit(asyncio.run(setup_webhook(sys.argv[1]))) diff --git a/server/tests/conftest.py b/server/tests/conftest.py index a70604ae..7d6c4302 100644 --- a/server/tests/conftest.py +++ b/server/tests/conftest.py @@ -5,6 +5,18 @@ from unittest.mock import patch import pytest +from reflector.schemas.platform import WHEREBY_PLATFORM + + +@pytest.fixture(scope="session", autouse=True) +def register_mock_platform(): + from mocks.mock_platform import MockPlatformClient + + from reflector.video_platforms.registry import register_platform + + register_platform(WHEREBY_PLATFORM, MockPlatformClient) + yield + @pytest.fixture(scope="session", autouse=True) def settings_configuration(): diff --git a/server/tests/mocks/__init__.py b/server/tests/mocks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/server/tests/mocks/mock_platform.py b/server/tests/mocks/mock_platform.py new file mode 100644 index 00000000..0f84a271 --- /dev/null +++ b/server/tests/mocks/mock_platform.py @@ -0,0 +1,112 @@ +import uuid +from datetime import datetime +from typing import Any, Dict, Literal, Optional + +from reflector.db.rooms import Room +from reflector.video_platforms.base import ( + ROOM_PREFIX_SEPARATOR, + MeetingData, + VideoPlatformClient, + VideoPlatformConfig, +) + +MockPlatform = Literal["mock"] + + +class MockPlatformClient(VideoPlatformClient): + PLATFORM_NAME: MockPlatform = "mock" + + def __init__(self, config: VideoPlatformConfig): + super().__init__(config) + self._rooms: Dict[str, Dict[str, Any]] = {} + self._webhook_calls: list[Dict[str, Any]] = [] + + async def create_meeting( + self, room_name_prefix: str, end_date: datetime, room: Room + ) -> MeetingData: + meeting_id = str(uuid.uuid4()) + room_name = f"{room_name_prefix}{ROOM_PREFIX_SEPARATOR}{meeting_id[:8]}" + room_url = f"https://mock.video/{room_name}" + host_room_url = f"{room_url}?host=true" + + self._rooms[room_name] = { + "id": meeting_id, + "name": room_name, + "url": room_url, + "host_url": host_room_url, + "end_date": end_date, + "room": room, + "participants": [], + "is_active": True, + } + + return MeetingData.model_construct( + meeting_id=meeting_id, + room_name=room_name, + room_url=room_url, + host_room_url=host_room_url, + platform="whereby", + extra_data={"mock": True}, + ) + + async def get_room_sessions(self, room_name: str) -> Dict[str, Any]: + if room_name not in self._rooms: + return {"error": "Room not found"} + + room_data = self._rooms[room_name] + return { + "roomName": room_name, + "sessions": [ + { + "sessionId": room_data["id"], + "startTime": datetime.utcnow().isoformat(), + "participants": room_data["participants"], + "isActive": room_data["is_active"], + } + ], + } + + async def delete_room(self, room_name: str) -> bool: + if room_name in self._rooms: + self._rooms[room_name]["is_active"] = False + return True + return False + + async def upload_logo(self, room_name: str, logo_path: str) -> bool: + if room_name in self._rooms: + self._rooms[room_name]["logo_path"] = logo_path + return True + return False + + def verify_webhook_signature( + self, body: bytes, signature: str, timestamp: Optional[str] = None + ) -> bool: + return signature == "valid" + + def add_participant( + self, room_name: str, participant_id: str, participant_name: str + ): + if room_name in self._rooms: + self._rooms[room_name]["participants"].append( + { + "id": participant_id, + "name": participant_name, + "joined_at": datetime.utcnow().isoformat(), + } + ) + + def trigger_webhook(self, event_type: str, data: Dict[str, Any]): + self._webhook_calls.append( + { + "type": event_type, + "data": data, + "timestamp": datetime.utcnow().isoformat(), + } + ) + + def get_webhook_calls(self) -> list[Dict[str, Any]]: + return self._webhook_calls.copy() + + def clear_data(self): + self._rooms.clear() + self._webhook_calls.clear() diff --git a/server/tests/test_cleanup.py b/server/tests/test_cleanup.py index 2cb8614c..0c968941 100644 --- a/server/tests/test_cleanup.py +++ b/server/tests/test_cleanup.py @@ -139,14 +139,10 @@ async def test_cleanup_deletes_associated_meeting_and_recording(): mock_settings.PUBLIC_DATA_RETENTION_DAYS = 7 # Mock storage deletion - with patch("reflector.db.transcripts.get_transcripts_storage") as mock_storage: + with patch("reflector.worker.cleanup.get_transcripts_storage") as mock_storage: mock_storage.return_value.delete_file = AsyncMock() - with patch( - "reflector.worker.cleanup.get_recordings_storage" - ) as mock_rec_storage: - mock_rec_storage.return_value.delete_file = AsyncMock() - result = await cleanup_old_public_data() + result = await cleanup_old_public_data() # Check results assert result["transcripts_deleted"] == 1 diff --git a/server/tests/test_consent_multitrack.py b/server/tests/test_consent_multitrack.py new file mode 100644 index 00000000..15948708 --- /dev/null +++ b/server/tests/test_consent_multitrack.py @@ -0,0 +1,330 @@ +from datetime import datetime, timezone +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from reflector.db.meetings import ( + MeetingConsent, + meeting_consent_controller, + meetings_controller, +) +from reflector.db.recordings import Recording, recordings_controller +from reflector.db.rooms import rooms_controller +from reflector.db.transcripts import SourceKind, transcripts_controller +from reflector.pipelines.main_live_pipeline import cleanup_consent + + +@pytest.mark.asyncio +async def test_consent_cleanup_deletes_multitrack_files(): + room = await rooms_controller.add( + name="Test Room", + user_id="test-user", + zulip_auto_post=False, + zulip_stream="", + zulip_topic="", + is_locked=False, + room_mode="normal", + recording_type="cloud", + recording_trigger="automatic", + is_shared=False, + platform="daily", + ) + + # Create meeting + meeting = await meetings_controller.create( + id="test-multitrack-meeting", + room_name="test-room-20250101120000", + room_url="https://test.daily.co/test-room", + host_room_url="https://test.daily.co/test-room", + start_date=datetime.now(timezone.utc), + end_date=datetime.now(timezone.utc), + room=room, + ) + + track_keys = [ + "recordings/test-room-20250101120000/track-0.webm", + "recordings/test-room-20250101120000/track-1.webm", + "recordings/test-room-20250101120000/track-2.webm", + ] + recording = await recordings_controller.create( + Recording( + bucket_name="test-bucket", + object_key="recordings/test-room-20250101120000", # Folder path + recorded_at=datetime.now(timezone.utc), + meeting_id=meeting.id, + track_keys=track_keys, + ) + ) + + # Create transcript + transcript = await transcripts_controller.add( + name="Test Multitrack Transcript", + source_kind=SourceKind.ROOM, + recording_id=recording.id, + meeting_id=meeting.id, + ) + + # Add consent denial + await meeting_consent_controller.upsert( + MeetingConsent( + meeting_id=meeting.id, + user_id="test-user", + consent_given=False, + consent_timestamp=datetime.now(timezone.utc), + ) + ) + + # Mock get_transcripts_storage (master credentials with bucket override) + with patch( + "reflector.pipelines.main_live_pipeline.get_transcripts_storage" + ) as mock_get_transcripts_storage: + mock_master_storage = MagicMock() + mock_master_storage.delete_file = AsyncMock() + mock_get_transcripts_storage.return_value = mock_master_storage + + await cleanup_consent(transcript_id=transcript.id) + + # Verify master storage was used with bucket override for all track keys + assert mock_master_storage.delete_file.call_count == 3 + deleted_keys = [] + for call_args in mock_master_storage.delete_file.call_args_list: + key = call_args[0][0] + bucket_kwarg = call_args[1].get("bucket") + deleted_keys.append(key) + assert bucket_kwarg == "test-bucket" # Verify bucket override! + assert set(deleted_keys) == set(track_keys) + + updated_transcript = await transcripts_controller.get_by_id(transcript.id) + assert updated_transcript.audio_deleted is True + + +@pytest.mark.asyncio +async def test_consent_cleanup_handles_missing_track_keys(): + room = await rooms_controller.add( + name="Test Room 2", + user_id="test-user", + zulip_auto_post=False, + zulip_stream="", + zulip_topic="", + is_locked=False, + room_mode="normal", + recording_type="cloud", + recording_trigger="automatic", + is_shared=False, + platform="daily", + ) + + # Create meeting + meeting = await meetings_controller.create( + id="test-multitrack-meeting-2", + room_name="test-room-20250101120001", + room_url="https://test.daily.co/test-room-2", + host_room_url="https://test.daily.co/test-room-2", + start_date=datetime.now(timezone.utc), + end_date=datetime.now(timezone.utc), + room=room, + ) + + recording = await recordings_controller.create( + Recording( + bucket_name="test-bucket", + object_key="recordings/old-style-recording.mp4", + recorded_at=datetime.now(timezone.utc), + meeting_id=meeting.id, + track_keys=None, + ) + ) + + transcript = await transcripts_controller.add( + name="Test Old-Style Transcript", + source_kind=SourceKind.ROOM, + recording_id=recording.id, + meeting_id=meeting.id, + ) + + # Add consent denial + await meeting_consent_controller.upsert( + MeetingConsent( + meeting_id=meeting.id, + user_id="test-user-2", + consent_given=False, + consent_timestamp=datetime.now(timezone.utc), + ) + ) + + # Mock get_transcripts_storage (master credentials with bucket override) + with patch( + "reflector.pipelines.main_live_pipeline.get_transcripts_storage" + ) as mock_get_transcripts_storage: + mock_master_storage = MagicMock() + mock_master_storage.delete_file = AsyncMock() + mock_get_transcripts_storage.return_value = mock_master_storage + + await cleanup_consent(transcript_id=transcript.id) + + # Verify master storage was used with bucket override + assert mock_master_storage.delete_file.call_count == 1 + call_args = mock_master_storage.delete_file.call_args + assert call_args[0][0] == recording.object_key + assert call_args[1].get("bucket") == "test-bucket" # Verify bucket override! + + +@pytest.mark.asyncio +async def test_consent_cleanup_empty_track_keys_falls_back(): + room = await rooms_controller.add( + name="Test Room 3", + user_id="test-user", + zulip_auto_post=False, + zulip_stream="", + zulip_topic="", + is_locked=False, + room_mode="normal", + recording_type="cloud", + recording_trigger="automatic", + is_shared=False, + platform="daily", + ) + + # Create meeting + meeting = await meetings_controller.create( + id="test-multitrack-meeting-3", + room_name="test-room-20250101120002", + room_url="https://test.daily.co/test-room-3", + host_room_url="https://test.daily.co/test-room-3", + start_date=datetime.now(timezone.utc), + end_date=datetime.now(timezone.utc), + room=room, + ) + + recording = await recordings_controller.create( + Recording( + bucket_name="test-bucket", + object_key="recordings/fallback-recording.mp4", + recorded_at=datetime.now(timezone.utc), + meeting_id=meeting.id, + track_keys=[], + ) + ) + + transcript = await transcripts_controller.add( + name="Test Empty Track Keys Transcript", + source_kind=SourceKind.ROOM, + recording_id=recording.id, + meeting_id=meeting.id, + ) + + # Add consent denial + await meeting_consent_controller.upsert( + MeetingConsent( + meeting_id=meeting.id, + user_id="test-user-3", + consent_given=False, + consent_timestamp=datetime.now(timezone.utc), + ) + ) + + # Mock get_transcripts_storage (master credentials with bucket override) + with patch( + "reflector.pipelines.main_live_pipeline.get_transcripts_storage" + ) as mock_get_transcripts_storage: + mock_master_storage = MagicMock() + mock_master_storage.delete_file = AsyncMock() + mock_get_transcripts_storage.return_value = mock_master_storage + + # Run cleanup + await cleanup_consent(transcript_id=transcript.id) + + # Verify master storage was used with bucket override + assert mock_master_storage.delete_file.call_count == 1 + call_args = mock_master_storage.delete_file.call_args + assert call_args[0][0] == recording.object_key + assert call_args[1].get("bucket") == "test-bucket" # Verify bucket override! + + +@pytest.mark.asyncio +async def test_consent_cleanup_partial_failure_doesnt_mark_deleted(): + room = await rooms_controller.add( + name="Test Room 4", + user_id="test-user", + zulip_auto_post=False, + zulip_stream="", + zulip_topic="", + is_locked=False, + room_mode="normal", + recording_type="cloud", + recording_trigger="automatic", + is_shared=False, + platform="daily", + ) + + # Create meeting + meeting = await meetings_controller.create( + id="test-multitrack-meeting-4", + room_name="test-room-20250101120003", + room_url="https://test.daily.co/test-room-4", + host_room_url="https://test.daily.co/test-room-4", + start_date=datetime.now(timezone.utc), + end_date=datetime.now(timezone.utc), + room=room, + ) + + track_keys = [ + "recordings/test-room-20250101120003/track-0.webm", + "recordings/test-room-20250101120003/track-1.webm", + "recordings/test-room-20250101120003/track-2.webm", + ] + recording = await recordings_controller.create( + Recording( + bucket_name="test-bucket", + object_key="recordings/test-room-20250101120003", + recorded_at=datetime.now(timezone.utc), + meeting_id=meeting.id, + track_keys=track_keys, + ) + ) + + # Create transcript + transcript = await transcripts_controller.add( + name="Test Partial Failure Transcript", + source_kind=SourceKind.ROOM, + recording_id=recording.id, + meeting_id=meeting.id, + ) + + # Add consent denial + await meeting_consent_controller.upsert( + MeetingConsent( + meeting_id=meeting.id, + user_id="test-user-4", + consent_given=False, + consent_timestamp=datetime.now(timezone.utc), + ) + ) + + # Mock get_transcripts_storage (master credentials with bucket override) with partial failure + with patch( + "reflector.pipelines.main_live_pipeline.get_transcripts_storage" + ) as mock_get_transcripts_storage: + mock_master_storage = MagicMock() + + call_count = 0 + + async def delete_side_effect(key, bucket=None): + nonlocal call_count + call_count += 1 + if call_count == 2: + raise Exception("S3 deletion failed") + + mock_master_storage.delete_file = AsyncMock(side_effect=delete_side_effect) + mock_get_transcripts_storage.return_value = mock_master_storage + + await cleanup_consent(transcript_id=transcript.id) + + # Verify master storage was called with bucket override + assert mock_master_storage.delete_file.call_count == 3 + + updated_transcript = await transcripts_controller.get_by_id(transcript.id) + assert ( + updated_transcript.audio_deleted is None + or updated_transcript.audio_deleted is False + ) diff --git a/server/tests/test_pipeline_main_file.py b/server/tests/test_pipeline_main_file.py index f86dc85d..825c8389 100644 --- a/server/tests/test_pipeline_main_file.py +++ b/server/tests/test_pipeline_main_file.py @@ -127,18 +127,27 @@ async def mock_storage(): from reflector.storage.base import Storage class TestStorage(Storage): - async def _put_file(self, path, data): + async def _put_file(self, path, data, bucket=None): return None - async def _get_file_url(self, path): + async def _get_file_url( + self, + path, + operation: str = "get_object", + expires_in: int = 3600, + bucket=None, + ): return f"http://test-storage/{path}" - async def _get_file(self, path): + async def _get_file(self, path, bucket=None): return b"test_audio_data" - async def _delete_file(self, path): + async def _delete_file(self, path, bucket=None): return None + async def _stream_to_fileobj(self, path, fileobj, bucket=None): + fileobj.write(b"test_audio_data") + storage = TestStorage() # Add mock tracking for verification storage._put_file = AsyncMock(side_effect=storage._put_file) @@ -181,7 +190,7 @@ async def mock_waveform_processor(): async def mock_topic_detector(): """Mock TranscriptTopicDetectorProcessor""" with patch( - "reflector.pipelines.main_file_pipeline.TranscriptTopicDetectorProcessor" + "reflector.pipelines.topic_processing.TranscriptTopicDetectorProcessor" ) as mock_topic_class: mock_topic = AsyncMock() mock_topic.set_pipeline = MagicMock() @@ -218,7 +227,7 @@ async def mock_topic_detector(): async def mock_title_processor(): """Mock TranscriptFinalTitleProcessor""" with patch( - "reflector.pipelines.main_file_pipeline.TranscriptFinalTitleProcessor" + "reflector.pipelines.topic_processing.TranscriptFinalTitleProcessor" ) as mock_title_class: mock_title = AsyncMock() mock_title.set_pipeline = MagicMock() @@ -247,7 +256,7 @@ async def mock_title_processor(): async def mock_summary_processor(): """Mock TranscriptFinalSummaryProcessor""" with patch( - "reflector.pipelines.main_file_pipeline.TranscriptFinalSummaryProcessor" + "reflector.pipelines.topic_processing.TranscriptFinalSummaryProcessor" ) as mock_summary_class: mock_summary = AsyncMock() mock_summary.set_pipeline = MagicMock() diff --git a/server/tests/test_room_ics_api.py b/server/tests/test_room_ics_api.py index 8e7cf76f..79512995 100644 --- a/server/tests/test_room_ics_api.py +++ b/server/tests/test_room_ics_api.py @@ -48,6 +48,7 @@ async def test_create_room_with_ics_fields(authenticated_client): "ics_url": "https://calendar.example.com/test.ics", "ics_fetch_interval": 600, "ics_enabled": True, + "platform": "daily", }, ) assert response.status_code == 200 @@ -75,6 +76,7 @@ async def test_update_room_ics_configuration(authenticated_client): "is_shared": False, "webhook_url": "", "webhook_secret": "", + "platform": "daily", }, ) assert response.status_code == 200 @@ -111,6 +113,7 @@ async def test_trigger_ics_sync(authenticated_client): is_shared=False, ics_url="https://calendar.example.com/api.ics", ics_enabled=True, + platform="daily", ) cal = Calendar() @@ -154,6 +157,7 @@ async def test_trigger_ics_sync_unauthorized(client): is_shared=False, ics_url="https://calendar.example.com/api.ics", ics_enabled=True, + platform="daily", ) response = await client.post(f"/rooms/{room.name}/ics/sync") @@ -176,6 +180,7 @@ async def test_trigger_ics_sync_not_configured(authenticated_client): recording_trigger="automatic-2nd-participant", is_shared=False, ics_enabled=False, + platform="daily", ) response = await client.post(f"/rooms/{room.name}/ics/sync") @@ -200,6 +205,7 @@ async def test_get_ics_status(authenticated_client): ics_url="https://calendar.example.com/status.ics", ics_enabled=True, ics_fetch_interval=300, + platform="daily", ) now = datetime.now(timezone.utc) @@ -231,6 +237,7 @@ async def test_get_ics_status_unauthorized(client): is_shared=False, ics_url="https://calendar.example.com/status.ics", ics_enabled=True, + platform="daily", ) response = await client.get(f"/rooms/{room.name}/ics/status") @@ -252,6 +259,7 @@ async def test_list_room_meetings(authenticated_client): recording_type="cloud", recording_trigger="automatic-2nd-participant", is_shared=False, + platform="daily", ) now = datetime.now(timezone.utc) @@ -298,6 +306,7 @@ async def test_list_room_meetings_non_owner(client): recording_type="cloud", recording_trigger="automatic-2nd-participant", is_shared=False, + platform="daily", ) event = CalendarEvent( @@ -334,6 +343,7 @@ async def test_list_upcoming_meetings(authenticated_client): recording_type="cloud", recording_trigger="automatic-2nd-participant", is_shared=False, + platform="daily", ) now = datetime.now(timezone.utc) diff --git a/server/tests/test_storage.py b/server/tests/test_storage.py new file mode 100644 index 00000000..ccfc3dbd --- /dev/null +++ b/server/tests/test_storage.py @@ -0,0 +1,321 @@ +"""Tests for storage abstraction layer.""" + +import io +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from botocore.exceptions import ClientError + +from reflector.storage.base import StoragePermissionError +from reflector.storage.storage_aws import AwsStorage + + +@pytest.mark.asyncio +async def test_aws_storage_stream_to_fileobj(): + """Test that AWS storage can stream directly to a file object without loading into memory.""" + # Setup + storage = AwsStorage( + aws_bucket_name="test-bucket", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + ) + + # Mock download_fileobj to write data + async def mock_download(Bucket, Key, Fileobj, **kwargs): + Fileobj.write(b"chunk1chunk2") + + mock_client = AsyncMock() + mock_client.download_fileobj = AsyncMock(side_effect=mock_download) + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock(return_value=None) + + # Patch the session client + with patch.object(storage.session, "client", return_value=mock_client): + # Create a file-like object to stream to + output = io.BytesIO() + + # Act - stream to file object + await storage.stream_to_fileobj("test-file.mp4", output, bucket="test-bucket") + + # Assert + mock_client.download_fileobj.assert_called_once_with( + Bucket="test-bucket", Key="test-file.mp4", Fileobj=output + ) + + # Check that data was written to output + output.seek(0) + assert output.read() == b"chunk1chunk2" + + +@pytest.mark.asyncio +async def test_aws_storage_stream_to_fileobj_with_folder(): + """Test streaming with folder prefix in bucket name.""" + storage = AwsStorage( + aws_bucket_name="test-bucket/recordings", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + ) + + async def mock_download(Bucket, Key, Fileobj, **kwargs): + Fileobj.write(b"data") + + mock_client = AsyncMock() + mock_client.download_fileobj = AsyncMock(side_effect=mock_download) + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock(return_value=None) + + with patch.object(storage.session, "client", return_value=mock_client): + output = io.BytesIO() + await storage.stream_to_fileobj("file.mp4", output, bucket="other-bucket") + + # Should use folder prefix from instance config + mock_client.download_fileobj.assert_called_once_with( + Bucket="other-bucket", Key="recordings/file.mp4", Fileobj=output + ) + + +@pytest.mark.asyncio +async def test_storage_base_class_stream_to_fileobj(): + """Test that base Storage class has stream_to_fileobj method.""" + from reflector.storage.base import Storage + + # Verify method exists in base class + assert hasattr(Storage, "stream_to_fileobj") + + # Create a mock storage instance + storage = MagicMock(spec=Storage) + storage.stream_to_fileobj = AsyncMock() + + # Should be callable + await storage.stream_to_fileobj("file.mp4", io.BytesIO()) + storage.stream_to_fileobj.assert_called_once() + + +@pytest.mark.asyncio +async def test_aws_storage_stream_uses_download_fileobj(): + """Test that download_fileobj is called correctly.""" + storage = AwsStorage( + aws_bucket_name="test-bucket", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + ) + + async def mock_download(Bucket, Key, Fileobj, **kwargs): + Fileobj.write(b"data") + + mock_client = AsyncMock() + mock_client.download_fileobj = AsyncMock(side_effect=mock_download) + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock(return_value=None) + + with patch.object(storage.session, "client", return_value=mock_client): + output = io.BytesIO() + await storage.stream_to_fileobj("test.mp4", output) + + # Verify download_fileobj was called with correct parameters + mock_client.download_fileobj.assert_called_once_with( + Bucket="test-bucket", Key="test.mp4", Fileobj=output + ) + + +@pytest.mark.asyncio +async def test_aws_storage_handles_access_denied_error(): + """Test that AccessDenied errors are caught and wrapped in StoragePermissionError.""" + storage = AwsStorage( + aws_bucket_name="test-bucket", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + ) + + # Mock ClientError with AccessDenied + error_response = {"Error": {"Code": "AccessDenied", "Message": "Access Denied"}} + mock_client = AsyncMock() + mock_client.put_object = AsyncMock( + side_effect=ClientError(error_response, "PutObject") + ) + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock(return_value=None) + + with patch.object(storage.session, "client", return_value=mock_client): + with pytest.raises(StoragePermissionError) as exc_info: + await storage.put_file("test.txt", b"data") + + # Verify error message contains expected information + error_msg = str(exc_info.value) + assert "AccessDenied" in error_msg + assert "default bucket 'test-bucket'" in error_msg + assert "S3 upload failed" in error_msg + + +@pytest.mark.asyncio +async def test_aws_storage_handles_no_such_bucket_error(): + """Test that NoSuchBucket errors are caught and wrapped in StoragePermissionError.""" + storage = AwsStorage( + aws_bucket_name="test-bucket", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + ) + + # Mock ClientError with NoSuchBucket + error_response = { + "Error": { + "Code": "NoSuchBucket", + "Message": "The specified bucket does not exist", + } + } + mock_client = AsyncMock() + mock_client.delete_object = AsyncMock( + side_effect=ClientError(error_response, "DeleteObject") + ) + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock(return_value=None) + + with patch.object(storage.session, "client", return_value=mock_client): + with pytest.raises(StoragePermissionError) as exc_info: + await storage.delete_file("test.txt") + + # Verify error message contains expected information + error_msg = str(exc_info.value) + assert "NoSuchBucket" in error_msg + assert "default bucket 'test-bucket'" in error_msg + assert "S3 delete failed" in error_msg + + +@pytest.mark.asyncio +async def test_aws_storage_error_message_with_bucket_override(): + """Test that error messages correctly show overridden bucket.""" + storage = AwsStorage( + aws_bucket_name="default-bucket", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + ) + + # Mock ClientError with AccessDenied + error_response = {"Error": {"Code": "AccessDenied", "Message": "Access Denied"}} + mock_client = AsyncMock() + mock_client.get_object = AsyncMock( + side_effect=ClientError(error_response, "GetObject") + ) + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock(return_value=None) + + with patch.object(storage.session, "client", return_value=mock_client): + with pytest.raises(StoragePermissionError) as exc_info: + await storage.get_file("test.txt", bucket="override-bucket") + + # Verify error message shows overridden bucket, not default + error_msg = str(exc_info.value) + assert "overridden bucket 'override-bucket'" in error_msg + assert "default-bucket" not in error_msg + assert "S3 download failed" in error_msg + + +@pytest.mark.asyncio +async def test_aws_storage_reraises_non_handled_errors(): + """Test that non-AccessDenied/NoSuchBucket errors are re-raised as-is.""" + storage = AwsStorage( + aws_bucket_name="test-bucket", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + ) + + # Mock ClientError with different error code + error_response = { + "Error": {"Code": "InternalError", "Message": "Internal Server Error"} + } + mock_client = AsyncMock() + mock_client.put_object = AsyncMock( + side_effect=ClientError(error_response, "PutObject") + ) + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock(return_value=None) + + with patch.object(storage.session, "client", return_value=mock_client): + # Should raise ClientError, not StoragePermissionError + with pytest.raises(ClientError) as exc_info: + await storage.put_file("test.txt", b"data") + + # Verify it's the original ClientError + assert exc_info.value.response["Error"]["Code"] == "InternalError" + + +@pytest.mark.asyncio +async def test_aws_storage_presign_url_handles_errors(): + """Test that presigned URL generation handles permission errors.""" + storage = AwsStorage( + aws_bucket_name="test-bucket", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + ) + + # Mock ClientError with AccessDenied during presign operation + error_response = {"Error": {"Code": "AccessDenied", "Message": "Access Denied"}} + mock_client = AsyncMock() + mock_client.generate_presigned_url = AsyncMock( + side_effect=ClientError(error_response, "GeneratePresignedUrl") + ) + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock(return_value=None) + + with patch.object(storage.session, "client", return_value=mock_client): + with pytest.raises(StoragePermissionError) as exc_info: + await storage.get_file_url("test.txt") + + # Verify error message + error_msg = str(exc_info.value) + assert "S3 presign failed" in error_msg + assert "AccessDenied" in error_msg + + +@pytest.mark.asyncio +async def test_aws_storage_list_objects_handles_errors(): + """Test that list_objects handles permission errors.""" + storage = AwsStorage( + aws_bucket_name="test-bucket", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + ) + + # Mock ClientError during list operation + error_response = {"Error": {"Code": "AccessDenied", "Message": "Access Denied"}} + mock_paginator = MagicMock() + + async def mock_paginate(*args, **kwargs): + raise ClientError(error_response, "ListObjectsV2") + yield # Make it an async generator + + mock_paginator.paginate = mock_paginate + + mock_client = AsyncMock() + mock_client.get_paginator = MagicMock(return_value=mock_paginator) + mock_client.__aenter__ = AsyncMock(return_value=mock_client) + mock_client.__aexit__ = AsyncMock(return_value=None) + + with patch.object(storage.session, "client", return_value=mock_client): + with pytest.raises(StoragePermissionError) as exc_info: + await storage.list_objects(prefix="test/") + + error_msg = str(exc_info.value) + assert "S3 list_objects failed" in error_msg + assert "AccessDenied" in error_msg + + +def test_aws_storage_constructor_rejects_mixed_auth(): + """Test that constructor rejects both role_arn and access keys.""" + with pytest.raises(ValueError, match="cannot use both.*role_arn.*access keys"): + AwsStorage( + aws_bucket_name="test-bucket", + aws_region="us-east-1", + aws_access_key_id="test-key", + aws_secret_access_key="test-secret", + aws_role_arn="arn:aws:iam::123456789012:role/test-role", + ) diff --git a/server/tests/test_transcripts_recording_deletion.py b/server/tests/test_transcripts_recording_deletion.py index 810fe567..3a632612 100644 --- a/server/tests/test_transcripts_recording_deletion.py +++ b/server/tests/test_transcripts_recording_deletion.py @@ -22,13 +22,16 @@ async def test_recording_deleted_with_transcript(): recording_id=recording.id, ) - with patch("reflector.db.transcripts.get_recordings_storage") as mock_get_storage: + with patch("reflector.db.transcripts.get_transcripts_storage") as mock_get_storage: storage_instance = mock_get_storage.return_value storage_instance.delete_file = AsyncMock() await transcripts_controller.remove_by_id(transcript.id) - storage_instance.delete_file.assert_awaited_once_with(recording.object_key) + # Should be called with bucket override + storage_instance.delete_file.assert_awaited_once_with( + recording.object_key, bucket=recording.bucket_name + ) assert await recordings_controller.get_by_id(recording.id) is None assert await transcripts_controller.get_by_id(transcript.id) is None diff --git a/server/tests/test_utils_daily.py b/server/tests/test_utils_daily.py new file mode 100644 index 00000000..356ffc94 --- /dev/null +++ b/server/tests/test_utils_daily.py @@ -0,0 +1,17 @@ +import pytest + +from reflector.utils.daily import extract_base_room_name + + +@pytest.mark.parametrize( + "daily_room_name,expected", + [ + ("daily-20251020193458", "daily"), + ("daily-2-20251020193458", "daily-2"), + ("my-room-name-20251020193458", "my-room-name"), + ("room-with-numbers-123-20251020193458", "room-with-numbers-123"), + ("x-20251020193458", "x"), + ], +) +def test_extract_base_room_name(daily_room_name, expected): + assert extract_base_room_name(daily_room_name) == expected diff --git a/server/tests/test_utils_url.py b/server/tests/test_utils_url.py new file mode 100644 index 00000000..c833983c --- /dev/null +++ b/server/tests/test_utils_url.py @@ -0,0 +1,63 @@ +"""Tests for URL utility functions.""" + +from reflector.utils.url import add_query_param + + +class TestAddQueryParam: + """Test the add_query_param function.""" + + def test_add_param_to_url_without_query(self): + """Should add query param with ? to URL without existing params.""" + url = "https://example.com/room" + result = add_query_param(url, "t", "token123") + assert result == "https://example.com/room?t=token123" + + def test_add_param_to_url_with_existing_query(self): + """Should add query param with & to URL with existing params.""" + url = "https://example.com/room?existing=param" + result = add_query_param(url, "t", "token123") + assert result == "https://example.com/room?existing=param&t=token123" + + def test_add_param_to_url_with_multiple_existing_params(self): + """Should add query param to URL with multiple existing params.""" + url = "https://example.com/room?param1=value1¶m2=value2" + result = add_query_param(url, "t", "token123") + assert ( + result == "https://example.com/room?param1=value1¶m2=value2&t=token123" + ) + + def test_add_param_with_special_characters(self): + """Should properly encode special characters in param value.""" + url = "https://example.com/room" + result = add_query_param(url, "name", "hello world") + assert result == "https://example.com/room?name=hello+world" + + def test_add_param_to_url_with_fragment(self): + """Should preserve URL fragment when adding query param.""" + url = "https://example.com/room#section" + result = add_query_param(url, "t", "token123") + assert result == "https://example.com/room?t=token123#section" + + def test_add_param_to_url_with_query_and_fragment(self): + """Should preserve fragment when adding param to URL with existing query.""" + url = "https://example.com/room?existing=param#section" + result = add_query_param(url, "t", "token123") + assert result == "https://example.com/room?existing=param&t=token123#section" + + def test_add_param_overwrites_existing_param(self): + """Should overwrite existing param with same name.""" + url = "https://example.com/room?t=oldtoken" + result = add_query_param(url, "t", "newtoken") + assert result == "https://example.com/room?t=newtoken" + + def test_url_without_scheme(self): + """Should handle URLs without scheme (relative URLs).""" + url = "/room/path" + result = add_query_param(url, "t", "token123") + assert result == "/room/path?t=token123" + + def test_empty_url(self): + """Should handle empty URL.""" + url = "" + result = add_query_param(url, "t", "token123") + assert result == "?t=token123" diff --git a/server/tests/test_video_platforms_factory.py b/server/tests/test_video_platforms_factory.py new file mode 100644 index 00000000..6c8c02c5 --- /dev/null +++ b/server/tests/test_video_platforms_factory.py @@ -0,0 +1,58 @@ +"""Tests for video_platforms.factory module.""" + +from unittest.mock import patch + +from reflector.video_platforms.factory import get_platform + + +class TestGetPlatformF: + """Test suite for get_platform function.""" + + @patch("reflector.video_platforms.factory.settings") + def test_with_room_platform(self, mock_settings): + """When room_platform provided, should return room_platform.""" + mock_settings.DEFAULT_VIDEO_PLATFORM = "whereby" + + # Should return the room's platform when provided + assert get_platform(room_platform="daily") == "daily" + assert get_platform(room_platform="whereby") == "whereby" + + @patch("reflector.video_platforms.factory.settings") + def test_without_room_platform_uses_default(self, mock_settings): + """When no room_platform, should return DEFAULT_VIDEO_PLATFORM.""" + mock_settings.DEFAULT_VIDEO_PLATFORM = "whereby" + + # Should return default when room_platform is None + assert get_platform(room_platform=None) == "whereby" + + @patch("reflector.video_platforms.factory.settings") + def test_with_daily_default(self, mock_settings): + """When DEFAULT_VIDEO_PLATFORM is 'daily', should return 'daily' when no room_platform.""" + mock_settings.DEFAULT_VIDEO_PLATFORM = "daily" + + # Should return default 'daily' when room_platform is None + assert get_platform(room_platform=None) == "daily" + + @patch("reflector.video_platforms.factory.settings") + def test_no_room_id_provided(self, mock_settings): + """Should work correctly even when room_id is not provided.""" + mock_settings.DEFAULT_VIDEO_PLATFORM = "whereby" + + # Should use room_platform when provided + assert get_platform(room_platform="daily") == "daily" + + # Should use default when room_platform not provided + assert get_platform(room_platform=None) == "whereby" + + @patch("reflector.video_platforms.factory.settings") + def test_room_platform_always_takes_precedence(self, mock_settings): + """room_platform should always be used when provided.""" + mock_settings.DEFAULT_VIDEO_PLATFORM = "whereby" + + # room_platform should take precedence over default + assert get_platform(room_platform="daily") == "daily" + assert get_platform(room_platform="whereby") == "whereby" + + # Different default shouldn't matter when room_platform provided + mock_settings.DEFAULT_VIDEO_PLATFORM = "daily" + assert get_platform(room_platform="whereby") == "whereby" diff --git a/www/app/[roomName]/[meetingId]/page.tsx b/www/app/[roomName]/[meetingId]/page.tsx index 8ce405ba..725aa571 100644 --- a/www/app/[roomName]/[meetingId]/page.tsx +++ b/www/app/[roomName]/[meetingId]/page.tsx @@ -1,3 +1,3 @@ -import Room from "../room"; +import RoomContainer from "../components/RoomContainer"; -export default Room; +export default RoomContainer; diff --git a/www/app/[roomName]/components/DailyRoom.tsx b/www/app/[roomName]/components/DailyRoom.tsx new file mode 100644 index 00000000..920f8624 --- /dev/null +++ b/www/app/[roomName]/components/DailyRoom.tsx @@ -0,0 +1,93 @@ +"use client"; + +import { useCallback, useEffect, useRef } from "react"; +import { Box } from "@chakra-ui/react"; +import { useRouter } from "next/navigation"; +import DailyIframe, { DailyCall } from "@daily-co/daily-js"; +import type { components } from "../../reflector-api"; +import { useAuth } from "../../lib/AuthProvider"; +import { + ConsentDialogButton, + recordingTypeRequiresConsent, +} from "../../lib/consent"; + +type Meeting = components["schemas"]["Meeting"]; + +interface DailyRoomProps { + meeting: Meeting; +} + +export default function DailyRoom({ meeting }: DailyRoomProps) { + const router = useRouter(); + const auth = useAuth(); + const status = auth.status; + const containerRef = useRef(null); + + const roomUrl = meeting?.host_room_url || meeting?.room_url; + + const isLoading = status === "loading"; + + const handleLeave = useCallback(() => { + router.push("/browse"); + }, [router]); + + useEffect(() => { + if (isLoading || !roomUrl || !containerRef.current) return; + + let frame: DailyCall | null = null; + let destroyed = false; + + const createAndJoin = async () => { + try { + const existingFrame = DailyIframe.getCallInstance(); + if (existingFrame) { + await existingFrame.destroy(); + } + + frame = DailyIframe.createFrame(containerRef.current!, { + iframeStyle: { + width: "100vw", + height: "100vh", + border: "none", + }, + showLeaveButton: true, + showFullscreenButton: true, + }); + + if (destroyed) { + await frame.destroy(); + return; + } + + frame.on("left-meeting", handleLeave); + await frame.join({ url: roomUrl }); + } catch (error) { + console.error("Error creating Daily frame:", error); + } + }; + + createAndJoin(); + + return () => { + destroyed = true; + if (frame) { + frame.destroy().catch((e) => { + console.error("Error destroying frame:", e); + }); + } + }; + }, [roomUrl, isLoading, handleLeave]); + + if (!roomUrl) { + return null; + } + + return ( + +
+ {meeting.recording_type && + recordingTypeRequiresConsent(meeting.recording_type) && + meeting.id && } + + ); +} diff --git a/www/app/[roomName]/components/RoomContainer.tsx b/www/app/[roomName]/components/RoomContainer.tsx new file mode 100644 index 00000000..bfcd82f7 --- /dev/null +++ b/www/app/[roomName]/components/RoomContainer.tsx @@ -0,0 +1,214 @@ +"use client"; + +import { roomMeetingUrl } from "../../lib/routes"; +import { useCallback, useEffect, useState, use } from "react"; +import { Box, Text, Spinner } from "@chakra-ui/react"; +import { useRouter } from "next/navigation"; +import { + useRoomGetByName, + useRoomsCreateMeeting, + useRoomGetMeeting, +} from "../../lib/apiHooks"; +import type { components } from "../../reflector-api"; +import MeetingSelection from "../MeetingSelection"; +import useRoomDefaultMeeting from "../useRoomDefaultMeeting"; +import WherebyRoom from "./WherebyRoom"; +import DailyRoom from "./DailyRoom"; +import { useAuth } from "../../lib/AuthProvider"; +import { useError } from "../../(errors)/errorContext"; +import { parseNonEmptyString } from "../../lib/utils"; +import { printApiError } from "../../api/_error"; + +type Meeting = components["schemas"]["Meeting"]; + +export type RoomDetails = { + params: Promise<{ + roomName: string; + meetingId?: string; + }>; +}; + +function LoadingSpinner() { + return ( + + + + ); +} + +export default function RoomContainer(details: RoomDetails) { + const params = use(details.params); + const roomName = parseNonEmptyString( + params.roomName, + true, + "panic! params.roomName is required", + ); + const router = useRouter(); + const auth = useAuth(); + const status = auth.status; + const isAuthenticated = status === "authenticated"; + const { setError } = useError(); + + const roomQuery = useRoomGetByName(roomName); + const createMeetingMutation = useRoomsCreateMeeting(); + + const room = roomQuery.data; + + const pageMeetingId = params.meetingId; + + const defaultMeeting = useRoomDefaultMeeting( + room && !room.ics_enabled && !pageMeetingId ? roomName : null, + ); + + const explicitMeeting = useRoomGetMeeting(roomName, pageMeetingId || null); + + const meeting = explicitMeeting.data || defaultMeeting.response; + + const isLoading = + status === "loading" || + roomQuery.isLoading || + defaultMeeting?.loading || + explicitMeeting.isLoading || + createMeetingMutation.isPending; + + const errors = [ + explicitMeeting.error, + defaultMeeting.error, + roomQuery.error, + createMeetingMutation.error, + ].filter(Boolean); + + const isOwner = + isAuthenticated && room ? auth.user?.id === room.user_id : false; + + const handleMeetingSelect = (selectedMeeting: Meeting) => { + router.push( + roomMeetingUrl( + roomName, + parseNonEmptyString( + selectedMeeting.id, + true, + "panic! selectedMeeting.id is required", + ), + ), + ); + }; + + const handleCreateUnscheduled = async () => { + try { + const newMeeting = await createMeetingMutation.mutateAsync({ + params: { + path: { room_name: roomName }, + }, + body: { + allow_duplicated: room ? room.ics_enabled : false, + }, + }); + handleMeetingSelect(newMeeting); + } catch (err) { + console.error("Failed to create meeting:", err); + } + }; + + if (isLoading) { + return ; + } + + if (!room) { + return ( + + Room not found + + ); + } + + if (room.ics_enabled && !params.meetingId) { + return ( + + ); + } + + if (errors.length > 0) { + return ( + + {errors.map((error, i) => ( + + {printApiError(error)} + + ))} + + ); + } + + if (!meeting) { + return ; + } + + const platform = meeting.platform; + + if (!platform) { + return ( + + Meeting platform not configured + + ); + } + + switch (platform) { + case "daily": + return ; + case "whereby": + return ; + default: { + const _exhaustive: never = platform; + return ( + + Unknown platform: {platform} + + ); + } + } +} diff --git a/www/app/[roomName]/components/WherebyRoom.tsx b/www/app/[roomName]/components/WherebyRoom.tsx new file mode 100644 index 00000000..d670b4e2 --- /dev/null +++ b/www/app/[roomName]/components/WherebyRoom.tsx @@ -0,0 +1,101 @@ +"use client"; + +import { useCallback, useEffect, useRef, RefObject } from "react"; +import { useRouter } from "next/navigation"; +import type { components } from "../../reflector-api"; +import { useAuth } from "../../lib/AuthProvider"; +import { getWherebyUrl, useWhereby } from "../../lib/wherebyClient"; +import { assertExistsAndNonEmptyString, NonEmptyString } from "../../lib/utils"; +import { + ConsentDialogButton as BaseConsentDialogButton, + useConsentDialog, + recordingTypeRequiresConsent, +} from "../../lib/consent"; + +type Meeting = components["schemas"]["Meeting"]; + +interface WherebyRoomProps { + meeting: Meeting; +} + +function WherebyConsentDialogButton({ + meetingId, + wherebyRef, +}: { + meetingId: NonEmptyString; + wherebyRef: React.RefObject; +}) { + const previousFocusRef = useRef(null); + + useEffect(() => { + const element = wherebyRef.current; + if (!element) return; + + const handleWherebyReady = () => { + previousFocusRef.current = document.activeElement as HTMLElement; + }; + + element.addEventListener("ready", handleWherebyReady); + + return () => { + element.removeEventListener("ready", handleWherebyReady); + if (previousFocusRef.current && document.activeElement === element) { + previousFocusRef.current.focus(); + } + }; + }, [wherebyRef]); + + return ; +} + +export default function WherebyRoom({ meeting }: WherebyRoomProps) { + const wherebyLoaded = useWhereby(); + const wherebyRef = useRef(null); + const router = useRouter(); + const auth = useAuth(); + const status = auth.status; + const isAuthenticated = status === "authenticated"; + + const wherebyRoomUrl = getWherebyUrl(meeting); + const recordingType = meeting.recording_type; + const meetingId = meeting.id; + + const isLoading = status === "loading"; + + const handleLeave = useCallback(() => { + router.push("/browse"); + }, [router]); + + useEffect(() => { + if (isLoading || !isAuthenticated || !wherebyRoomUrl || !wherebyLoaded) + return; + + wherebyRef.current?.addEventListener("leave", handleLeave); + + return () => { + wherebyRef.current?.removeEventListener("leave", handleLeave); + }; + }, [handleLeave, wherebyRoomUrl, isLoading, isAuthenticated, wherebyLoaded]); + + if (!wherebyRoomUrl || !wherebyLoaded) { + return null; + } + + return ( + <> + + {recordingType && + recordingTypeRequiresConsent(recordingType) && + meetingId && ( + + )} + + ); +} diff --git a/www/app/[roomName]/page.tsx b/www/app/[roomName]/page.tsx index 1aaca4c7..87651a50 100644 --- a/www/app/[roomName]/page.tsx +++ b/www/app/[roomName]/page.tsx @@ -1,3 +1,3 @@ -import Room from "./room"; +import RoomContainer from "./components/RoomContainer"; -export default Room; +export default RoomContainer; diff --git a/www/app/lib/consent/ConsentDialog.tsx b/www/app/lib/consent/ConsentDialog.tsx new file mode 100644 index 00000000..488599d0 --- /dev/null +++ b/www/app/lib/consent/ConsentDialog.tsx @@ -0,0 +1,36 @@ +"use client"; + +import { Box, Button, Text, VStack, HStack } from "@chakra-ui/react"; +import { CONSENT_DIALOG_TEXT } from "./constants"; + +interface ConsentDialogProps { + onAccept: () => void; + onReject: () => void; +} + +export function ConsentDialog({ onAccept, onReject }: ConsentDialogProps) { + return ( + + + + {CONSENT_DIALOG_TEXT.question} + + + + + + + + ); +} diff --git a/www/app/lib/consent/ConsentDialogButton.tsx b/www/app/lib/consent/ConsentDialogButton.tsx new file mode 100644 index 00000000..2c1d084b --- /dev/null +++ b/www/app/lib/consent/ConsentDialogButton.tsx @@ -0,0 +1,39 @@ +"use client"; + +import { Button, Icon } from "@chakra-ui/react"; +import { FaBars } from "react-icons/fa6"; +import { useConsentDialog } from "./useConsentDialog"; +import { + CONSENT_BUTTON_TOP_OFFSET, + CONSENT_BUTTON_LEFT_OFFSET, + CONSENT_BUTTON_Z_INDEX, + CONSENT_DIALOG_TEXT, +} from "./constants"; + +interface ConsentDialogButtonProps { + meetingId: string; +} + +export function ConsentDialogButton({ meetingId }: ConsentDialogButtonProps) { + const { showConsentModal, consentState, hasConsent, consentLoading } = + useConsentDialog(meetingId); + + if (!consentState.ready || hasConsent(meetingId) || consentLoading) { + return null; + } + + return ( + + ); +} diff --git a/www/app/lib/consent/constants.ts b/www/app/lib/consent/constants.ts new file mode 100644 index 00000000..41e7c7e1 --- /dev/null +++ b/www/app/lib/consent/constants.ts @@ -0,0 +1,12 @@ +export const CONSENT_BUTTON_TOP_OFFSET = "56px"; +export const CONSENT_BUTTON_LEFT_OFFSET = "8px"; +export const CONSENT_BUTTON_Z_INDEX = 1000; +export const TOAST_CHECK_INTERVAL_MS = 100; + +export const CONSENT_DIALOG_TEXT = { + question: + "Can we have your permission to store this meeting's audio recording on our servers?", + acceptButton: "Yes, store the audio", + rejectButton: "No, delete after transcription", + triggerButton: "Meeting is being recorded", +} as const; diff --git a/www/app/lib/consent/index.ts b/www/app/lib/consent/index.ts new file mode 100644 index 00000000..eabca8ac --- /dev/null +++ b/www/app/lib/consent/index.ts @@ -0,0 +1,8 @@ +"use client"; + +export { ConsentDialogButton } from "./ConsentDialogButton"; +export { ConsentDialog } from "./ConsentDialog"; +export { useConsentDialog } from "./useConsentDialog"; +export { recordingTypeRequiresConsent } from "./utils"; +export * from "./constants"; +export * from "./types"; diff --git a/www/app/lib/consent/types.ts b/www/app/lib/consent/types.ts new file mode 100644 index 00000000..0bd15202 --- /dev/null +++ b/www/app/lib/consent/types.ts @@ -0,0 +1,9 @@ +export interface ConsentDialogResult { + showConsentModal: () => void; + consentState: { + ready: boolean; + consentAnsweredForMeetings?: Set; + }; + hasConsent: (meetingId: string) => boolean; + consentLoading: boolean; +} diff --git a/www/app/lib/consent/useConsentDialog.tsx b/www/app/lib/consent/useConsentDialog.tsx new file mode 100644 index 00000000..2a5c0ab3 --- /dev/null +++ b/www/app/lib/consent/useConsentDialog.tsx @@ -0,0 +1,109 @@ +"use client"; + +import { useCallback, useState, useEffect, useRef } from "react"; +import { toaster } from "../../components/ui/toaster"; +import { useRecordingConsent } from "../../recordingConsentContext"; +import { useMeetingAudioConsent } from "../apiHooks"; +import { ConsentDialog } from "./ConsentDialog"; +import { TOAST_CHECK_INTERVAL_MS } from "./constants"; +import type { ConsentDialogResult } from "./types"; + +export function useConsentDialog(meetingId: string): ConsentDialogResult { + const { state: consentState, touch, hasConsent } = useRecordingConsent(); + const [modalOpen, setModalOpen] = useState(false); + const audioConsentMutation = useMeetingAudioConsent(); + const intervalRef = useRef(null); + const keydownHandlerRef = useRef<((event: KeyboardEvent) => void) | null>( + null, + ); + + useEffect(() => { + return () => { + if (intervalRef.current) { + clearInterval(intervalRef.current); + intervalRef.current = null; + } + if (keydownHandlerRef.current) { + document.removeEventListener("keydown", keydownHandlerRef.current); + keydownHandlerRef.current = null; + } + }; + }, []); + + const handleConsent = useCallback( + async (given: boolean) => { + try { + await audioConsentMutation.mutateAsync({ + params: { + path: { meeting_id: meetingId }, + }, + body: { + consent_given: given, + }, + }); + + touch(meetingId); + } catch (error) { + console.error("Error submitting consent:", error); + } + }, + [audioConsentMutation, touch, meetingId], + ); + + const showConsentModal = useCallback(() => { + if (modalOpen) return; + + setModalOpen(true); + + const toastId = toaster.create({ + placement: "top", + duration: null, + render: ({ dismiss }) => ( + { + handleConsent(true); + dismiss(); + }} + onReject={() => { + handleConsent(false); + dismiss(); + }} + /> + ), + }); + + const handleKeyDown = (event: KeyboardEvent) => { + if (event.key === "Escape") { + toastId.then((id) => toaster.dismiss(id)); + } + }; + + keydownHandlerRef.current = handleKeyDown; + document.addEventListener("keydown", handleKeyDown); + + toastId.then((id) => { + intervalRef.current = setInterval(() => { + if (!toaster.isActive(id)) { + setModalOpen(false); + + if (intervalRef.current) { + clearInterval(intervalRef.current); + intervalRef.current = null; + } + + if (keydownHandlerRef.current) { + document.removeEventListener("keydown", keydownHandlerRef.current); + keydownHandlerRef.current = null; + } + } + }, TOAST_CHECK_INTERVAL_MS); + }); + }, [handleConsent, modalOpen]); + + return { + showConsentModal, + consentState, + hasConsent, + consentLoading: audioConsentMutation.isPending, + }; +} diff --git a/www/app/lib/consent/utils.ts b/www/app/lib/consent/utils.ts new file mode 100644 index 00000000..146bdd68 --- /dev/null +++ b/www/app/lib/consent/utils.ts @@ -0,0 +1,13 @@ +import type { components } from "../../reflector-api"; + +type Meeting = components["schemas"]["Meeting"]; + +/** + * Determines if a meeting's recording type requires user consent. + * Currently only "cloud" recordings require consent. + */ +export function recordingTypeRequiresConsent( + recordingType: Meeting["recording_type"], +): boolean { + return recordingType === "cloud"; +} diff --git a/www/app/lib/useLoginRequiredPages.ts b/www/app/lib/useLoginRequiredPages.ts index 37ee96b1..d0dee1b6 100644 --- a/www/app/lib/useLoginRequiredPages.ts +++ b/www/app/lib/useLoginRequiredPages.ts @@ -3,6 +3,7 @@ import { PROTECTED_PAGES } from "./auth"; import { usePathname } from "next/navigation"; import { useAuth } from "./AuthProvider"; import { useEffect } from "react"; +import { featureEnabled } from "./features"; const HOME = "/" as const; @@ -13,7 +14,9 @@ export const useLoginRequiredPages = () => { const isNotLoggedIn = auth.status === "unauthenticated"; // safety const isLastDestination = pathname === HOME; - const shouldRedirect = isNotLoggedIn && isProtected && !isLastDestination; + const requireLogin = featureEnabled("requireLogin"); + const shouldRedirect = + requireLogin && isNotLoggedIn && isProtected && !isLastDestination; useEffect(() => { if (!shouldRedirect) return; // on the backend, the redirect goes straight to the auth provider, but we don't have it because it's hidden inside next-auth middleware diff --git a/www/app/reflector-api.d.ts b/www/app/reflector-api.d.ts index 1dc92f2b..9b9582ba 100644 --- a/www/app/reflector-api.d.ts +++ b/www/app/reflector-api.d.ts @@ -696,6 +696,26 @@ export interface paths { patch?: never; trace?: never; }; + "/v1/webhook": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Webhook + * @description Handle Daily webhook events. + */ + post: operations["v1_webhook"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; } export type webhooks = Record; export interface components { @@ -852,6 +872,8 @@ export interface components { * @default false */ ics_enabled: boolean; + /** Platform */ + platform?: ("whereby" | "daily") | null; }; /** CreateRoomMeeting */ CreateRoomMeeting: { @@ -877,6 +899,22 @@ export interface components { target_language: string; source_kind?: components["schemas"]["SourceKind"] | null; }; + /** + * DailyWebhookEvent + * @description Daily webhook event structure. + */ + DailyWebhookEvent: { + /** Type */ + type: string; + /** Id */ + id: string; + /** Ts */ + ts: number; + /** Data */ + data: { + [key: string]: unknown; + }; + }; /** DeletionStatus */ DeletionStatus: { /** Status */ @@ -1193,6 +1231,12 @@ export interface components { calendar_metadata?: { [key: string]: unknown; } | null; + /** + * Platform + * @default whereby + * @enum {string} + */ + platform: "whereby" | "daily"; }; /** MeetingConsentRequest */ MeetingConsentRequest: { @@ -1279,6 +1323,12 @@ export interface components { ics_last_sync?: string | null; /** Ics Last Etag */ ics_last_etag?: string | null; + /** + * Platform + * @default whereby + * @enum {string} + */ + platform: "whereby" | "daily"; }; /** RoomDetails */ RoomDetails: { @@ -1325,6 +1375,12 @@ export interface components { ics_last_sync?: string | null; /** Ics Last Etag */ ics_last_etag?: string | null; + /** + * Platform + * @default whereby + * @enum {string} + */ + platform: "whereby" | "daily"; /** Webhook Url */ webhook_url: string | null; /** Webhook Secret */ @@ -1505,6 +1561,8 @@ export interface components { ics_fetch_interval?: number | null; /** Ics Enabled */ ics_enabled?: boolean | null; + /** Platform */ + platform?: ("whereby" | "daily") | null; }; /** UpdateTranscript */ UpdateTranscript: { @@ -3191,4 +3249,37 @@ export interface operations { }; }; }; + v1_webhook: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["DailyWebhookEvent"]; + }; + }; + responses: { + /** @description Successful Response */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": unknown; + }; + }; + /** @description Validation Error */ + 422: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["HTTPValidationError"]; + }; + }; + }; + }; } diff --git a/www/package.json b/www/package.json index 5169dbe2..f4412db0 100644 --- a/www/package.json +++ b/www/package.json @@ -14,6 +14,7 @@ }, "dependencies": { "@chakra-ui/react": "^3.24.2", + "@daily-co/daily-js": "^0.84.0", "@emotion/react": "^11.14.0", "@fortawesome/fontawesome-svg-core": "^6.4.0", "@fortawesome/free-solid-svg-icons": "^6.4.0", diff --git a/www/pnpm-lock.yaml b/www/pnpm-lock.yaml index 6c0a3d83..92667b7e 100644 --- a/www/pnpm-lock.yaml +++ b/www/pnpm-lock.yaml @@ -10,6 +10,9 @@ importers: "@chakra-ui/react": specifier: ^3.24.2 version: 3.24.2(@emotion/react@11.14.0(@types/react@18.2.20)(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + "@daily-co/daily-js": + specifier: ^0.84.0 + version: 0.84.0 "@emotion/react": specifier: ^11.14.0 version: 11.14.0(@types/react@18.2.20)(react@18.3.1) @@ -487,6 +490,13 @@ packages: } engines: { node: ">=12" } + "@daily-co/daily-js@0.84.0": + resolution: + { + integrity: sha512-/ynXrMDDkRXhLlHxiFNf9QU5yw4ZGPr56wNARgja/Tiid71UIniundTavCNF5cMb2I1vNoMh7oEJ/q8stg/V7g==, + } + engines: { node: ">=10.0.0" } + "@emnapi/core@1.4.5": resolution: { @@ -2293,6 +2303,13 @@ packages: } engines: { node: ">=18" } + "@sentry-internal/browser-utils@8.55.0": + resolution: + { + integrity: sha512-ROgqtQfpH/82AQIpESPqPQe0UyWywKJsmVIqi3c5Fh+zkds5LUxnssTj3yNd1x+kxaPDVB023jAP+3ibNgeNDw==, + } + engines: { node: ">=14.18" } + "@sentry-internal/feedback@10.11.0": resolution: { @@ -2300,6 +2317,13 @@ packages: } engines: { node: ">=18" } + "@sentry-internal/feedback@8.55.0": + resolution: + { + integrity: sha512-cP3BD/Q6pquVQ+YL+rwCnorKuTXiS9KXW8HNKu4nmmBAyf7urjs+F6Hr1k9MXP5yQ8W3yK7jRWd09Yu6DHWOiw==, + } + engines: { node: ">=14.18" } + "@sentry-internal/replay-canvas@10.11.0": resolution: { @@ -2307,6 +2331,13 @@ packages: } engines: { node: ">=18" } + "@sentry-internal/replay-canvas@8.55.0": + resolution: + { + integrity: sha512-nIkfgRWk1091zHdu4NbocQsxZF1rv1f7bbp3tTIlZYbrH62XVZosx5iHAuZG0Zc48AETLE7K4AX9VGjvQj8i9w==, + } + engines: { node: ">=14.18" } + "@sentry-internal/replay@10.11.0": resolution: { @@ -2314,6 +2345,13 @@ packages: } engines: { node: ">=18" } + "@sentry-internal/replay@8.55.0": + resolution: + { + integrity: sha512-roCDEGkORwolxBn8xAKedybY+Jlefq3xYmgN2fr3BTnsXjSYOPC7D1/mYqINBat99nDtvgFvNfRcZPiwwZ1hSw==, + } + engines: { node: ">=14.18" } + "@sentry/babel-plugin-component-annotate@4.3.0": resolution: { @@ -2328,6 +2366,13 @@ packages: } engines: { node: ">=18" } + "@sentry/browser@8.55.0": + resolution: + { + integrity: sha512-1A31mCEWCjaMxJt6qGUK+aDnLDcK6AwLAZnqpSchNysGni1pSn1RWSmk9TBF8qyTds5FH8B31H480uxMPUJ7Cw==, + } + engines: { node: ">=14.18" } + "@sentry/bundler-plugin-core@4.3.0": resolution: { @@ -2421,6 +2466,13 @@ packages: } engines: { node: ">=18" } + "@sentry/core@8.55.0": + resolution: + { + integrity: sha512-6g7jpbefjHYs821Z+EBJ8r4Z7LT5h80YSWRJaylGS4nW5W5Z2KXzpdnyFarv37O7QjauzVC2E+PABmpkw5/JGA==, + } + engines: { node: ">=14.18" } + "@sentry/nextjs@10.11.0": resolution: { @@ -4029,6 +4081,12 @@ packages: } engines: { node: ">=8" } + bowser@2.12.1: + resolution: + { + integrity: sha512-z4rE2Gxh7tvshQ4hluIT7XcFrgLIQaw9X3A+kTTRdovCz5PMukm/0QC/BKSYPj3omF5Qfypn9O/c5kgpmvYUCw==, + } + brace-expansion@1.1.12: resolution: { @@ -9288,6 +9346,14 @@ snapshots: "@jridgewell/trace-mapping": 0.3.9 optional: true + "@daily-co/daily-js@0.84.0": + dependencies: + "@babel/runtime": 7.28.2 + "@sentry/browser": 8.55.0 + bowser: 2.12.1 + dequal: 2.0.3 + events: 3.3.0 + "@emnapi/core@1.4.5": dependencies: "@emnapi/wasi-threads": 1.0.4 @@ -10506,20 +10572,38 @@ snapshots: dependencies: "@sentry/core": 10.11.0 + "@sentry-internal/browser-utils@8.55.0": + dependencies: + "@sentry/core": 8.55.0 + "@sentry-internal/feedback@10.11.0": dependencies: "@sentry/core": 10.11.0 + "@sentry-internal/feedback@8.55.0": + dependencies: + "@sentry/core": 8.55.0 + "@sentry-internal/replay-canvas@10.11.0": dependencies: "@sentry-internal/replay": 10.11.0 "@sentry/core": 10.11.0 + "@sentry-internal/replay-canvas@8.55.0": + dependencies: + "@sentry-internal/replay": 8.55.0 + "@sentry/core": 8.55.0 + "@sentry-internal/replay@10.11.0": dependencies: "@sentry-internal/browser-utils": 10.11.0 "@sentry/core": 10.11.0 + "@sentry-internal/replay@8.55.0": + dependencies: + "@sentry-internal/browser-utils": 8.55.0 + "@sentry/core": 8.55.0 + "@sentry/babel-plugin-component-annotate@4.3.0": {} "@sentry/browser@10.11.0": @@ -10530,6 +10614,14 @@ snapshots: "@sentry-internal/replay-canvas": 10.11.0 "@sentry/core": 10.11.0 + "@sentry/browser@8.55.0": + dependencies: + "@sentry-internal/browser-utils": 8.55.0 + "@sentry-internal/feedback": 8.55.0 + "@sentry-internal/replay": 8.55.0 + "@sentry-internal/replay-canvas": 8.55.0 + "@sentry/core": 8.55.0 + "@sentry/bundler-plugin-core@4.3.0": dependencies: "@babel/core": 7.28.3 @@ -10590,6 +10682,8 @@ snapshots: "@sentry/core@10.11.0": {} + "@sentry/core@8.55.0": {} + "@sentry/nextjs@10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)": dependencies: "@opentelemetry/api": 1.9.0 @@ -11967,6 +12061,8 @@ snapshots: binary-extensions@2.3.0: {} + bowser@2.12.1: {} + brace-expansion@1.1.12: dependencies: balanced-match: 1.0.2 From 34a3f5618c5b5bbd1ef65cb0b7c1d67c98fc56c3 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Wed, 12 Nov 2025 20:25:59 -0600 Subject: [PATCH 67/77] chore(main): release 0.17.0 (#717) --- CHANGELOG.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce676740..812a1880 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## [0.17.0](https://github.com/Monadical-SAS/reflector/compare/v0.16.0...v0.17.0) (2025-11-13) + + +### Features + +* add API key management UI ([#716](https://github.com/Monadical-SAS/reflector/issues/716)) ([372202b](https://github.com/Monadical-SAS/reflector/commit/372202b0e1a86823900b0aa77be1bfbc2893d8a1)) +* daily.co support as alternative to whereby ([#691](https://github.com/Monadical-SAS/reflector/issues/691)) ([1473fd8](https://github.com/Monadical-SAS/reflector/commit/1473fd82dc472c394cbaa2987212ad662a74bcac)) + ## [0.16.0](https://github.com/Monadical-SAS/reflector/compare/v0.15.0...v0.16.0) (2025-10-24) From 857e035562f805af7d3dd753fe299a258bd2e449 Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Thu, 13 Nov 2025 11:35:29 -0500 Subject: [PATCH 68/77] fix whereby reprocess logic branch (#720) Co-authored-by: Igor Loskutov --- server/reflector/views/transcripts_process.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/server/reflector/views/transcripts_process.py b/server/reflector/views/transcripts_process.py index 46e070fd..cee1e10d 100644 --- a/server/reflector/views/transcripts_process.py +++ b/server/reflector/views/transcripts_process.py @@ -55,9 +55,18 @@ async def transcript_process( recording = await recordings_controller.get_by_id(transcript.recording_id) if recording: bucket_name = recording.bucket_name - track_keys = list(getattr(recording, "track_keys", []) or []) + track_keys = recording.track_keys + if track_keys is not None and len(track_keys) == 0: + raise HTTPException( + status_code=500, + detail="No track keys found, must be either > 0 or None", + ) + if track_keys is not None and not bucket_name: + raise HTTPException( + status_code=500, detail="Bucket name must be specified" + ) - if bucket_name: + if track_keys: task_pipeline_multitrack_process.delay( transcript_id=transcript_id, bucket_name=bucket_name, From a9a4f32324f66c838e081eee42bb9502f38c1db1 Mon Sep 17 00:00:00 2001 From: Sergey Mankovsky Date: Fri, 14 Nov 2025 13:36:25 +0100 Subject: [PATCH 69/77] fix: copy transcript (#674) * Copy transcript * Fix share copy transcript * Move copy button above transcript --- .../transcripts/buildTranscriptWithTopics.ts | 60 ++++++++++++++++ www/app/(app)/transcripts/shareCopy.tsx | 15 ++-- www/app/(app)/transcripts/transcriptTitle.tsx | 68 +++++++++++++++++-- 3 files changed, 130 insertions(+), 13 deletions(-) create mode 100644 www/app/(app)/transcripts/buildTranscriptWithTopics.ts diff --git a/www/app/(app)/transcripts/buildTranscriptWithTopics.ts b/www/app/(app)/transcripts/buildTranscriptWithTopics.ts new file mode 100644 index 00000000..71553d31 --- /dev/null +++ b/www/app/(app)/transcripts/buildTranscriptWithTopics.ts @@ -0,0 +1,60 @@ +import type { components } from "../../reflector-api"; +import { formatTime } from "../../lib/time"; + +type GetTranscriptTopic = components["schemas"]["GetTranscriptTopic"]; +type Participant = components["schemas"]["Participant"]; + +function getSpeakerName( + speakerNumber: number, + participants?: Participant[] | null, +): string { + const name = participants?.find((p) => p.speaker === speakerNumber)?.name; + return name && name.trim().length > 0 ? name : `Speaker ${speakerNumber}`; +} + +export function buildTranscriptWithTopics( + topics: GetTranscriptTopic[], + participants?: Participant[] | null, + transcriptTitle?: string | null, +): string { + const blocks: string[] = []; + + if (transcriptTitle && transcriptTitle.trim()) { + blocks.push(`# ${transcriptTitle.trim()}`); + blocks.push(""); + } + + for (const topic of topics) { + // Topic header + const topicTime = formatTime(Math.floor(topic.timestamp || 0)); + const title = topic.title?.trim() || "Untitled Topic"; + blocks.push(`## ${title} [${topicTime}]`); + + if (topic.segments && topic.segments.length > 0) { + for (const seg of topic.segments) { + const ts = formatTime(Math.floor(seg.start || 0)); + const speaker = getSpeakerName(seg.speaker as number, participants); + const text = (seg.text || "").replace(/\s+/g, " ").trim(); + if (text) { + blocks.push(`[${ts}] ${speaker}: ${text}`); + } + } + } else if (topic.transcript) { + // Fallback: plain transcript when segments are not present + const text = topic.transcript.replace(/\s+/g, " ").trim(); + if (text) { + blocks.push(text); + } + } + + // Blank line between topics + blocks.push(""); + } + + // Trim trailing blank line + while (blocks.length > 0 && blocks[blocks.length - 1] === "") { + blocks.pop(); + } + + return blocks.join("\n"); +} diff --git a/www/app/(app)/transcripts/shareCopy.tsx b/www/app/(app)/transcripts/shareCopy.tsx index fb1b5f68..bdbff5f4 100644 --- a/www/app/(app)/transcripts/shareCopy.tsx +++ b/www/app/(app)/transcripts/shareCopy.tsx @@ -3,6 +3,8 @@ import type { components } from "../../reflector-api"; type GetTranscript = components["schemas"]["GetTranscript"]; type GetTranscriptTopic = components["schemas"]["GetTranscriptTopic"]; import { Button, BoxProps, Box } from "@chakra-ui/react"; +import { buildTranscriptWithTopics } from "./buildTranscriptWithTopics"; +import { useTranscriptParticipants } from "../../lib/apiHooks"; type ShareCopyProps = { finalSummaryElement: HTMLDivElement | null; @@ -18,6 +20,7 @@ export default function ShareCopy({ }: ShareCopyProps & BoxProps) { const [isCopiedSummary, setIsCopiedSummary] = useState(false); const [isCopiedTranscript, setIsCopiedTranscript] = useState(false); + const participantsQuery = useTranscriptParticipants(transcript?.id || null); const onCopySummaryClick = () => { const text_to_copy = finalSummaryElement?.innerText; @@ -32,12 +35,12 @@ export default function ShareCopy({ }; const onCopyTranscriptClick = () => { - let text_to_copy = - topics - ?.map((topic) => topic.transcript) - .join("\n\n") - .replace(/ +/g, " ") - .trim() || ""; + const text_to_copy = + buildTranscriptWithTopics( + topics || [], + participantsQuery?.data || null, + transcript?.title || null, + ) || ""; text_to_copy && navigator.clipboard.writeText(text_to_copy).then(() => { diff --git a/www/app/(app)/transcripts/transcriptTitle.tsx b/www/app/(app)/transcripts/transcriptTitle.tsx index 1ac32b02..49a22c71 100644 --- a/www/app/(app)/transcripts/transcriptTitle.tsx +++ b/www/app/(app)/transcripts/transcriptTitle.tsx @@ -4,10 +4,15 @@ import type { components } from "../../reflector-api"; type UpdateTranscript = components["schemas"]["UpdateTranscript"]; type GetTranscript = components["schemas"]["GetTranscript"]; type GetTranscriptTopic = components["schemas"]["GetTranscriptTopic"]; -import { useTranscriptUpdate } from "../../lib/apiHooks"; +import { + useTranscriptUpdate, + useTranscriptParticipants, +} from "../../lib/apiHooks"; import { Heading, IconButton, Input, Flex, Spacer } from "@chakra-ui/react"; -import { LuPen } from "react-icons/lu"; +import { LuPen, LuCopy, LuCheck } from "react-icons/lu"; import ShareAndPrivacy from "./shareAndPrivacy"; +import { buildTranscriptWithTopics } from "./buildTranscriptWithTopics"; +import { toaster } from "../../components/ui/toaster"; type TranscriptTitle = { title: string; @@ -25,6 +30,9 @@ const TranscriptTitle = (props: TranscriptTitle) => { const [preEditTitle, setPreEditTitle] = useState(props.title); const [isEditing, setIsEditing] = useState(false); const updateTranscriptMutation = useTranscriptUpdate(); + const participantsQuery = useTranscriptParticipants( + props.transcript?.id || null, + ); const updateTitle = async (newTitle: string, transcriptId: string) => { try { @@ -118,11 +126,57 @@ const TranscriptTitle = (props: TranscriptTitle) => { {props.transcript && props.topics && ( - + <> + { + const text = buildTranscriptWithTopics( + props.topics || [], + participantsQuery?.data || null, + props.transcript?.title || null, + ); + if (!text) return; + navigator.clipboard + .writeText(text) + .then(() => { + toaster + .create({ + placement: "top", + duration: 2500, + render: () => ( +
+
+ Transcript copied +
+
+ ), + }) + .then(() => {}); + }) + .catch(() => {}); + }} + > + +
+ + )} )} From 28a7258e45317b78e60e6397be2bc503647eaace Mon Sep 17 00:00:00 2001 From: Sergey Mankovsky Date: Fri, 14 Nov 2025 14:28:39 +0100 Subject: [PATCH 70/77] fix: add proccessing page to file upload and reprocessing (#650) --- .../(app)/transcripts/[transcriptId]/page.tsx | 59 ++++++++++- .../[transcriptId]/processing/page.tsx | 97 +++++++++++++++++++ .../[transcriptId]/upload/page.tsx | 48 ++++----- .../(app)/transcripts/fileUploadButton.tsx | 2 + 4 files changed, 171 insertions(+), 35 deletions(-) create mode 100644 www/app/(app)/transcripts/[transcriptId]/processing/page.tsx diff --git a/www/app/(app)/transcripts/[transcriptId]/page.tsx b/www/app/(app)/transcripts/[transcriptId]/page.tsx index ec5f9ebb..1e020f1c 100644 --- a/www/app/(app)/transcripts/[transcriptId]/page.tsx +++ b/www/app/(app)/transcripts/[transcriptId]/page.tsx @@ -10,7 +10,15 @@ import FinalSummary from "./finalSummary"; import TranscriptTitle from "../transcriptTitle"; import Player from "../player"; import { useRouter } from "next/navigation"; -import { Box, Flex, Grid, GridItem, Skeleton, Text } from "@chakra-ui/react"; +import { + Box, + Flex, + Grid, + GridItem, + Skeleton, + Text, + Spinner, +} from "@chakra-ui/react"; import { useTranscriptGet } from "../../../lib/apiHooks"; import { TranscriptStatus } from "../../../lib/transcript"; @@ -28,6 +36,7 @@ export default function TranscriptDetails(details: TranscriptDetails) { "idle", "recording", "processing", + "uploaded", ] satisfies TranscriptStatus[] as TranscriptStatus[]; const transcript = useTranscriptGet(transcriptId); @@ -45,15 +54,55 @@ export default function TranscriptDetails(details: TranscriptDetails) { useState(null); useEffect(() => { - if (waiting) { - const newUrl = "/transcripts/" + params.transcriptId + "/record"; + if (!waiting || !transcript.data) return; + + const status = transcript.data.status; + let newUrl: string | null = null; + + if (status === "processing" || status === "uploaded") { + newUrl = `/transcripts/${params.transcriptId}/processing`; + } else if (status === "recording") { + newUrl = `/transcripts/${params.transcriptId}/record`; + } else if (status === "idle") { + newUrl = + transcript.data.source_kind === "file" + ? `/transcripts/${params.transcriptId}/upload` + : `/transcripts/${params.transcriptId}/record`; + } + + if (newUrl) { // Shallow redirection does not work on NextJS 13 // https://github.com/vercel/next.js/discussions/48110 // https://github.com/vercel/next.js/discussions/49540 router.replace(newUrl); - // history.replaceState({}, "", newUrl); } - }, [waiting]); + }, [waiting, transcript.data?.status, transcript.data?.source_kind]); + + if (waiting) { + return ( + + + + + + Loading transcript... + + + + + ); + } if (transcript.error || topics?.error) { return ( diff --git a/www/app/(app)/transcripts/[transcriptId]/processing/page.tsx b/www/app/(app)/transcripts/[transcriptId]/processing/page.tsx new file mode 100644 index 00000000..4422e077 --- /dev/null +++ b/www/app/(app)/transcripts/[transcriptId]/processing/page.tsx @@ -0,0 +1,97 @@ +"use client"; +import { useEffect, use } from "react"; +import { + Heading, + Text, + VStack, + Spinner, + Button, + Center, +} from "@chakra-ui/react"; +import { useRouter } from "next/navigation"; +import { useTranscriptGet } from "../../../../lib/apiHooks"; + +type TranscriptProcessing = { + params: Promise<{ + transcriptId: string; + }>; +}; + +export default function TranscriptProcessing(details: TranscriptProcessing) { + const params = use(details.params); + const transcriptId = params.transcriptId; + const router = useRouter(); + + const transcript = useTranscriptGet(transcriptId); + + useEffect(() => { + const status = transcript.data?.status; + if (!status) return; + + if (status === "ended" || status === "error") { + router.replace(`/transcripts/${transcriptId}`); + } else if (status === "recording") { + router.replace(`/transcripts/${transcriptId}/record`); + } else if (status === "idle") { + const dest = + transcript.data?.source_kind === "file" + ? `/transcripts/${transcriptId}/upload` + : `/transcripts/${transcriptId}/record`; + router.replace(dest); + } + }, [ + transcript.data?.status, + transcript.data?.source_kind, + router, + transcriptId, + ]); + + if (transcript.isLoading) { + return ( + + Loading transcript... + + ); + } + + if (transcript.error) { + return ( + + Transcript not found + We couldn't load this transcript. + + ); + } + + return ( + <> + +
+ + + + Processing recording + + + You can safely return to the library while your recording is being + processed. + + + +
+
+ + ); +} diff --git a/www/app/(app)/transcripts/[transcriptId]/upload/page.tsx b/www/app/(app)/transcripts/[transcriptId]/upload/page.tsx index b4bc25cc..9fc6a687 100644 --- a/www/app/(app)/transcripts/[transcriptId]/upload/page.tsx +++ b/www/app/(app)/transcripts/[transcriptId]/upload/page.tsx @@ -4,7 +4,7 @@ import { useWebSockets } from "../../useWebSockets"; import { lockWakeState, releaseWakeState } from "../../../../lib/wakeLock"; import { useRouter } from "next/navigation"; import useMp3 from "../../useMp3"; -import { Center, VStack, Text, Heading, Button } from "@chakra-ui/react"; +import { Center, VStack, Text, Heading } from "@chakra-ui/react"; import FileUploadButton from "../../fileUploadButton"; import { useTranscriptGet } from "../../../../lib/apiHooks"; @@ -53,6 +53,12 @@ const TranscriptUpload = (details: TranscriptUpload) => { const newUrl = "/transcripts/" + params.transcriptId; router.replace(newUrl); + } else if ( + newStatus && + (newStatus == "uploaded" || newStatus == "processing") + ) { + // After upload finishes (or if already processing), redirect to the unified processing page + router.replace(`/transcripts/${params.transcriptId}/processing`); } }, [webSockets.status?.value, transcript.data?.status]); @@ -71,7 +77,7 @@ const TranscriptUpload = (details: TranscriptUpload) => { <> { Upload meeting
- {status && status == "idle" && ( - <> - - Please select the file, supported formats: .mp3, m4a, .wav, - .mp4, .mov or .webm - - - - )} - {status && status == "uploaded" && ( - File is uploaded, processing... - )} - {(status == "recording" || status == "processing") && ( - <> - Processing your recording... - - You can safely return to the library while your file is being - processed. - - - - )} + + Please select the file, supported formats: .mp3, m4a, .wav, .mp4, + .mov or .webm + + + router.replace(`/transcripts/${params.transcriptId}/processing`) + } + />
diff --git a/www/app/(app)/transcripts/fileUploadButton.tsx b/www/app/(app)/transcripts/fileUploadButton.tsx index 1f5d72eb..b5fda7b6 100644 --- a/www/app/(app)/transcripts/fileUploadButton.tsx +++ b/www/app/(app)/transcripts/fileUploadButton.tsx @@ -5,6 +5,7 @@ import { useError } from "../../(errors)/errorContext"; type FileUploadButton = { transcriptId: string; + onUploadComplete?: () => void; }; export default function FileUploadButton(props: FileUploadButton) { @@ -31,6 +32,7 @@ export default function FileUploadButton(props: FileUploadButton) { const uploadNextChunk = async () => { if (chunkNumber == totalChunks) { setProgress(0); + props.onUploadComplete?.(); return; } From b20cad76e69fb6a76405af299a005f1ddcf60eae Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Fri, 14 Nov 2025 14:31:52 -0500 Subject: [PATCH 71/77] feat: daily QOL: participants dictionary (#721) * daily QOL: participants dictionary * meeting deactivation fix * meeting deactivation fix --------- Co-authored-by: Igor Loskutov --- ...aa_add_daily_participant_session_table_.py | 79 ++++++++ server/reflector/db/__init__.py | 1 + .../db/daily_participant_sessions.py | 169 ++++++++++++++++++ server/reflector/video_platforms/base.py | 7 +- server/reflector/video_platforms/daily.py | 93 ++++++++-- server/reflector/video_platforms/models.py | 28 ++- server/reflector/video_platforms/whereby.py | 48 ++++- server/reflector/views/daily.py | 154 ++++++++++++++-- server/reflector/worker/ics_sync.py | 2 +- server/reflector/worker/process.py | 12 +- server/scripts/list_daily_webhooks.py | 91 ++++++++++ server/tests/mocks/mock_platform.py | 24 ++- server/tests/test_transcripts_process.py | 111 ++++++++++++ 13 files changed, 759 insertions(+), 60 deletions(-) create mode 100644 server/migrations/versions/2b92a1b03caa_add_daily_participant_session_table_.py create mode 100644 server/reflector/db/daily_participant_sessions.py create mode 100755 server/scripts/list_daily_webhooks.py diff --git a/server/migrations/versions/2b92a1b03caa_add_daily_participant_session_table_.py b/server/migrations/versions/2b92a1b03caa_add_daily_participant_session_table_.py new file mode 100644 index 00000000..90c3e94e --- /dev/null +++ b/server/migrations/versions/2b92a1b03caa_add_daily_participant_session_table_.py @@ -0,0 +1,79 @@ +"""add daily participant session table with immutable left_at + +Revision ID: 2b92a1b03caa +Revises: f8294b31f022 +Create Date: 2025-11-13 20:29:30.486577 + +""" + +from typing import Sequence, Union + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = "2b92a1b03caa" +down_revision: Union[str, None] = "f8294b31f022" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + # Create table + op.create_table( + "daily_participant_session", + sa.Column("id", sa.String(), nullable=False), + sa.Column("meeting_id", sa.String(), nullable=False), + sa.Column("room_id", sa.String(), nullable=False), + sa.Column("session_id", sa.String(), nullable=False), + sa.Column("user_id", sa.String(), nullable=True), + sa.Column("user_name", sa.String(), nullable=False), + sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("left_at", sa.DateTime(timezone=True), nullable=True), + sa.ForeignKeyConstraint(["meeting_id"], ["meeting.id"], ondelete="CASCADE"), + sa.ForeignKeyConstraint(["room_id"], ["room.id"], ondelete="CASCADE"), + sa.PrimaryKeyConstraint("id"), + ) + with op.batch_alter_table("daily_participant_session", schema=None) as batch_op: + batch_op.create_index( + "idx_daily_session_meeting_left", ["meeting_id", "left_at"], unique=False + ) + batch_op.create_index("idx_daily_session_room", ["room_id"], unique=False) + + # Create trigger function to prevent left_at from being updated once set + op.execute(""" + CREATE OR REPLACE FUNCTION prevent_left_at_update() + RETURNS TRIGGER AS $$ + BEGIN + IF OLD.left_at IS NOT NULL THEN + RAISE EXCEPTION 'left_at is immutable once set'; + END IF; + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + """) + + # Create trigger + op.execute(""" + CREATE TRIGGER prevent_left_at_update_trigger + BEFORE UPDATE ON daily_participant_session + FOR EACH ROW + EXECUTE FUNCTION prevent_left_at_update(); + """) + + +def downgrade() -> None: + # Drop trigger + op.execute( + "DROP TRIGGER IF EXISTS prevent_left_at_update_trigger ON daily_participant_session;" + ) + + # Drop trigger function + op.execute("DROP FUNCTION IF EXISTS prevent_left_at_update();") + + # Drop indexes and table + with op.batch_alter_table("daily_participant_session", schema=None) as batch_op: + batch_op.drop_index("idx_daily_session_room") + batch_op.drop_index("idx_daily_session_meeting_left") + + op.drop_table("daily_participant_session") diff --git a/server/reflector/db/__init__.py b/server/reflector/db/__init__.py index 8822e6b0..91ed12ee 100644 --- a/server/reflector/db/__init__.py +++ b/server/reflector/db/__init__.py @@ -25,6 +25,7 @@ def get_database() -> databases.Database: # import models import reflector.db.calendar_events # noqa +import reflector.db.daily_participant_sessions # noqa import reflector.db.meetings # noqa import reflector.db.recordings # noqa import reflector.db.rooms # noqa diff --git a/server/reflector/db/daily_participant_sessions.py b/server/reflector/db/daily_participant_sessions.py new file mode 100644 index 00000000..5fac1912 --- /dev/null +++ b/server/reflector/db/daily_participant_sessions.py @@ -0,0 +1,169 @@ +"""Daily.co participant session tracking. + +Stores webhook data for participant.joined and participant.left events to provide +historical session information (Daily.co API only returns current participants). +""" + +from datetime import datetime + +import sqlalchemy as sa +from pydantic import BaseModel +from sqlalchemy.dialects.postgresql import insert + +from reflector.db import get_database, metadata +from reflector.utils.string import NonEmptyString + +daily_participant_sessions = sa.Table( + "daily_participant_session", + metadata, + sa.Column("id", sa.String, primary_key=True), + sa.Column( + "meeting_id", + sa.String, + sa.ForeignKey("meeting.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column( + "room_id", + sa.String, + sa.ForeignKey("room.id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column("session_id", sa.String, nullable=False), + sa.Column("user_id", sa.String, nullable=True), + sa.Column("user_name", sa.String, nullable=False), + sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("left_at", sa.DateTime(timezone=True), nullable=True), + sa.Index("idx_daily_session_meeting_left", "meeting_id", "left_at"), + sa.Index("idx_daily_session_room", "room_id"), +) + + +class DailyParticipantSession(BaseModel): + """Daily.co participant session record. + + Tracks when a participant joined and left a meeting. Populated from webhooks: + - participant.joined: Creates record with left_at=None + - participant.left: Updates record with left_at + + ID format: {meeting_id}:{user_id}:{joined_at_ms} + - Ensures idempotency (duplicate webhooks don't create duplicates) + - Allows same user to rejoin (different joined_at = different session) + + Duration is calculated as: left_at - joined_at (not stored) + """ + + id: NonEmptyString + meeting_id: NonEmptyString + room_id: NonEmptyString + session_id: NonEmptyString # Daily.co's session_id (identifies room session) + user_id: NonEmptyString | None = None + user_name: str + joined_at: datetime + left_at: datetime | None = None + + +class DailyParticipantSessionController: + """Controller for Daily.co participant session persistence.""" + + async def get_by_id(self, id: str) -> DailyParticipantSession | None: + """Get a session by its ID.""" + query = daily_participant_sessions.select().where( + daily_participant_sessions.c.id == id + ) + result = await get_database().fetch_one(query) + return DailyParticipantSession(**result) if result else None + + async def get_open_session( + self, meeting_id: NonEmptyString, session_id: NonEmptyString + ) -> DailyParticipantSession | None: + """Get the open (not left) session for a user in a meeting.""" + query = daily_participant_sessions.select().where( + sa.and_( + daily_participant_sessions.c.meeting_id == meeting_id, + daily_participant_sessions.c.session_id == session_id, + daily_participant_sessions.c.left_at.is_(None), + ) + ) + results = await get_database().fetch_all(query) + + if len(results) > 1: + raise ValueError( + f"Multiple open sessions for daily session {session_id} in meeting {meeting_id}: " + f"found {len(results)} sessions" + ) + + return DailyParticipantSession(**results[0]) if results else None + + async def upsert_joined(self, session: DailyParticipantSession) -> None: + """Insert or update when participant.joined webhook arrives. + + Idempotent: Duplicate webhooks with same ID are safely ignored. + Out-of-order: If left webhook arrived first, preserves left_at. + """ + query = insert(daily_participant_sessions).values(**session.model_dump()) + query = query.on_conflict_do_update( + index_elements=["id"], + set_={"user_name": session.user_name}, + ) + await get_database().execute(query) + + async def upsert_left(self, session: DailyParticipantSession) -> None: + """Update session when participant.left webhook arrives. + + Finds the open session for this user in this meeting and updates left_at. + Works around Daily.co webhook timestamp inconsistency (joined_at differs by ~4ms between webhooks). + + Handles three cases: + 1. Normal flow: open session exists → updates left_at + 2. Out-of-order: left arrives first → creates new record with left data + 3. Duplicate: left arrives again → idempotent (DB trigger prevents left_at modification) + """ + if session.left_at is None: + raise ValueError("left_at is required for upsert_left") + + if session.left_at <= session.joined_at: + raise ValueError( + f"left_at ({session.left_at}) must be after joined_at ({session.joined_at})" + ) + + # Find existing open session (works around timestamp mismatch in webhooks) + existing = await self.get_open_session(session.meeting_id, session.session_id) + + if existing: + # Update existing open session + query = ( + daily_participant_sessions.update() + .where(daily_participant_sessions.c.id == existing.id) + .values(left_at=session.left_at) + ) + await get_database().execute(query) + else: + # Out-of-order or first webhook: insert new record + query = insert(daily_participant_sessions).values(**session.model_dump()) + query = query.on_conflict_do_nothing(index_elements=["id"]) + await get_database().execute(query) + + async def get_by_meeting(self, meeting_id: str) -> list[DailyParticipantSession]: + """Get all participant sessions for a meeting (active and ended).""" + query = daily_participant_sessions.select().where( + daily_participant_sessions.c.meeting_id == meeting_id + ) + results = await get_database().fetch_all(query) + return [DailyParticipantSession(**result) for result in results] + + async def get_active_by_meeting( + self, meeting_id: str + ) -> list[DailyParticipantSession]: + """Get only active (not left) participant sessions for a meeting.""" + query = daily_participant_sessions.select().where( + sa.and_( + daily_participant_sessions.c.meeting_id == meeting_id, + daily_participant_sessions.c.left_at.is_(None), + ) + ) + results = await get_database().fetch_all(query) + return [DailyParticipantSession(**result) for result in results] + + +daily_participant_sessions_controller = DailyParticipantSessionController() diff --git a/server/reflector/video_platforms/base.py b/server/reflector/video_platforms/base.py index d208a75a..877114f7 100644 --- a/server/reflector/video_platforms/base.py +++ b/server/reflector/video_platforms/base.py @@ -1,10 +1,10 @@ from abc import ABC, abstractmethod from datetime import datetime -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Dict, Optional from ..schemas.platform import Platform from ..utils.string import NonEmptyString -from .models import MeetingData, VideoPlatformConfig +from .models import MeetingData, SessionData, VideoPlatformConfig if TYPE_CHECKING: from reflector.db.rooms import Room @@ -26,7 +26,8 @@ class VideoPlatformClient(ABC): pass @abstractmethod - async def get_room_sessions(self, room_name: str) -> List[Any] | None: + async def get_room_sessions(self, room_name: str) -> list[SessionData]: + """Get session history for a room.""" pass @abstractmethod diff --git a/server/reflector/video_platforms/daily.py b/server/reflector/video_platforms/daily.py index ec45d965..7bec4864 100644 --- a/server/reflector/video_platforms/daily.py +++ b/server/reflector/video_platforms/daily.py @@ -3,10 +3,13 @@ import hmac from datetime import datetime from hashlib import sha256 from http import HTTPStatus -from typing import Any, Dict, List, Optional +from typing import Any, Dict, Optional import httpx +from reflector.db.daily_participant_sessions import ( + daily_participant_sessions_controller, +) from reflector.db.rooms import Room from reflector.logger import logger from reflector.storage import get_dailyco_storage @@ -15,7 +18,7 @@ from ..schemas.platform import Platform from ..utils.daily import DailyRoomName from ..utils.string import NonEmptyString from .base import ROOM_PREFIX_SEPARATOR, VideoPlatformClient -from .models import MeetingData, RecordingType, VideoPlatformConfig +from .models import MeetingData, RecordingType, SessionData, VideoPlatformConfig class DailyClient(VideoPlatformClient): @@ -61,16 +64,16 @@ class DailyClient(VideoPlatformClient): }, } - # Get storage config for passing to Daily API - daily_storage = get_dailyco_storage() - assert daily_storage.bucket_name, "S3 bucket must be configured" - data["properties"]["recordings_bucket"] = { - "bucket_name": daily_storage.bucket_name, - "bucket_region": daily_storage.region, - "assume_role_arn": daily_storage.role_credential, - "allow_api_access": True, - } - + # Only configure recordings_bucket if recording is enabled + if room.recording_type != self.RECORDING_NONE: + daily_storage = get_dailyco_storage() + assert daily_storage.bucket_name, "S3 bucket must be configured" + data["properties"]["recordings_bucket"] = { + "bucket_name": daily_storage.bucket_name, + "bucket_region": daily_storage.region, + "assume_role_arn": daily_storage.role_credential, + "allow_api_access": True, + } async with httpx.AsyncClient() as client: response = await client.post( f"{self.BASE_URL}/rooms", @@ -99,11 +102,49 @@ class DailyClient(VideoPlatformClient): extra_data=result, ) - async def get_room_sessions(self, room_name: str) -> List[Any] | None: - # no such api - return None + async def get_room_sessions(self, room_name: str) -> list[SessionData]: + """Get room session history from database (webhook-stored sessions). + + Daily.co doesn't provide historical session API, so we query our database + where participant.joined/left webhooks are stored. + """ + from reflector.db.meetings import meetings_controller + + meeting = await meetings_controller.get_by_room_name(room_name) + if not meeting: + return [] + + sessions = await daily_participant_sessions_controller.get_by_meeting( + meeting.id + ) + + return [ + SessionData( + session_id=s.id, + started_at=s.joined_at, + ended_at=s.left_at, + ) + for s in sessions + ] async def get_room_presence(self, room_name: str) -> Dict[str, Any]: + """Get room presence/session data for a Daily.co room. + + Example response: + { + "total_count": 1, + "data": [ + { + "room": "w2pp2cf4kltgFACPKXmX", + "id": "d61cd7b2-a273-42b4-89bd-be763fd562c1", + "userId": "pbZ+ismP7dk=", + "userName": "Moishe", + "joinTime": "2023-01-01T20:53:19.000Z", + "duration": 2312 + } + ] + } + """ async with httpx.AsyncClient() as client: response = await client.get( f"{self.BASE_URL}/rooms/{room_name}/presence", @@ -114,6 +155,28 @@ class DailyClient(VideoPlatformClient): return response.json() async def get_meeting_participants(self, meeting_id: str) -> Dict[str, Any]: + """Get participant data for a specific Daily.co meeting. + + Example response: + { + "data": [ + { + "user_id": "4q47OTmqa/w=", + "participant_id": "d61cd7b2-a273-42b4-89bd-be763fd562c1", + "user_name": "Lindsey", + "join_time": 1672786813, + "duration": 150 + }, + { + "user_id": "pbZ+ismP7dk=", + "participant_id": "b3d56359-14d7-46af-ac8b-18f8c991f5f6", + "user_name": "Moishe", + "join_time": 1672786797, + "duration": 165 + } + ] + } + """ async with httpx.AsyncClient() as client: response = await client.get( f"{self.BASE_URL}/meetings/{meeting_id}/participants", diff --git a/server/reflector/video_platforms/models.py b/server/reflector/video_platforms/models.py index 82876888..648da251 100644 --- a/server/reflector/video_platforms/models.py +++ b/server/reflector/video_platforms/models.py @@ -1,18 +1,38 @@ +from datetime import datetime from typing import Any, Dict, Literal, Optional from pydantic import BaseModel, Field from reflector.schemas.platform import WHEREBY_PLATFORM, Platform +from reflector.utils.string import NonEmptyString RecordingType = Literal["none", "local", "cloud"] +class SessionData(BaseModel): + """Platform-agnostic session data. + + Represents a participant session in a meeting room, regardless of platform. + Used to determine if a meeting is still active or has ended. + """ + + session_id: NonEmptyString = Field(description="Unique session identifier") + started_at: datetime = Field(description="When session started (UTC)") + ended_at: datetime | None = Field( + description="When session ended (UTC), None if still active" + ) + + class MeetingData(BaseModel): platform: Platform - meeting_id: str = Field(description="Platform-specific meeting identifier") - room_url: str = Field(description="URL for participants to join") - host_room_url: str = Field(description="URL for hosts (may be same as room_url)") - room_name: str = Field(description="Human-readable room name") + meeting_id: NonEmptyString = Field( + description="Platform-specific meeting identifier" + ) + room_url: NonEmptyString = Field(description="URL for participants to join") + host_room_url: NonEmptyString = Field( + description="URL for hosts (may be same as room_url)" + ) + room_name: NonEmptyString = Field(description="Human-readable room name") extra_data: Dict[str, Any] = Field(default_factory=dict) class Config: diff --git a/server/reflector/video_platforms/whereby.py b/server/reflector/video_platforms/whereby.py index f856454a..f4775e89 100644 --- a/server/reflector/video_platforms/whereby.py +++ b/server/reflector/video_platforms/whereby.py @@ -4,7 +4,7 @@ import re import time from datetime import datetime from hashlib import sha256 -from typing import Any, Dict, Optional +from typing import Optional import httpx @@ -13,11 +13,8 @@ from reflector.storage import get_whereby_storage from ..schemas.platform import WHEREBY_PLATFORM, Platform from ..utils.string import NonEmptyString -from .base import ( - MeetingData, - VideoPlatformClient, - VideoPlatformConfig, -) +from .base import VideoPlatformClient +from .models import MeetingData, SessionData, VideoPlatformConfig from .whereby_utils import whereby_room_name_prefix @@ -80,15 +77,50 @@ class WherebyClient(VideoPlatformClient): extra_data=result, ) - async def get_room_sessions(self, room_name: str) -> Dict[str, Any]: + async def get_room_sessions(self, room_name: str) -> list[SessionData]: + """Get room session history from Whereby API. + + Whereby API returns: [{"sessionId": "...", "startedAt": "...", "endedAt": "..." | null}, ...] + """ async with httpx.AsyncClient() as client: + """ + { + "cursor": "text", + "results": [ + { + "roomSessionId": "e2f29530-46ec-4cee-8b27-e565cb5bb2e9", + "roomName": "/room-prefix-793e9ec1-c686-423d-9043-9b7a10c553fd", + "startedAt": "2025-01-01T00:00:00.000Z", + "endedAt": "2025-01-01T01:00:00.000Z", + "totalParticipantMinutes": 124, + "totalRecorderMinutes": 120, + "totalStreamerMinutes": 120, + "totalUniqueParticipants": 4, + "totalUniqueRecorders": 3, + "totalUniqueStreamers": 2 + } + ] + }""" response = await client.get( f"{self.config.api_url}/insights/room-sessions?roomName={room_name}", headers=self.headers, timeout=self.TIMEOUT, ) response.raise_for_status() - return response.json().get("results", []) + results = response.json().get("results", []) + + return [ + SessionData( + session_id=s["roomSessionId"], + started_at=datetime.fromisoformat( + s["startedAt"].replace("Z", "+00:00") + ), + ended_at=datetime.fromisoformat(s["endedAt"].replace("Z", "+00:00")) + if s.get("endedAt") + else None, + ) + for s in results + ] async def delete_room(self, room_name: str) -> bool: return True diff --git a/server/reflector/views/daily.py b/server/reflector/views/daily.py index 6f51cd1e..baad97e9 100644 --- a/server/reflector/views/daily.py +++ b/server/reflector/views/daily.py @@ -1,9 +1,15 @@ import json +from datetime import datetime, timezone from typing import Any, Dict, Literal from fastapi import APIRouter, HTTPException, Request from pydantic import BaseModel +from reflector.db import get_database +from reflector.db.daily_participant_sessions import ( + DailyParticipantSession, + daily_participant_sessions_controller, +) from reflector.db.meetings import meetings_controller from reflector.logger import logger as _logger from reflector.settings import settings @@ -44,6 +50,24 @@ def _extract_room_name(event: DailyWebhookEvent) -> DailyRoomName | None: async def webhook(request: Request): """Handle Daily webhook events. + Example webhook payload: + { + "version": "1.0.0", + "type": "recording.ready-to-download", + "id": "rec-rtd-c3df927c-f738-4471-a2b7-066fa7e95a6b-1692124192", + "payload": { + "recording_id": "08fa0b24-9220-44c5-846c-3f116cf8e738", + "room_name": "Xcm97xRZ08b2dePKb78g", + "start_ts": 1692124183, + "status": "finished", + "max_participants": 1, + "duration": 9, + "share_token": "ntDCL5k98Ulq", #gitleaks:allow + "s3_key": "api-test-1j8fizhzd30c/Xcm97xRZ08b2dePKb78g/1692124183028" + }, + "event_ts": 1692124192 + } + Daily.co circuit-breaker: After 3+ failed responses (4xx/5xx), webhook state→FAILED, stops sending events. Reset: scripts/recreate_daily_webhook.py """ @@ -103,6 +127,32 @@ async def webhook(request: Request): return {"status": "ok"} +""" +{ + "version": "1.0.0", + "type": "participant.joined", + "id": "ptcpt-join-6497c79b-f326-4942-aef8-c36a29140ad1-1708972279961", + "payload": { + "room": "test", + "user_id": "6497c79b-f326-4942-aef8-c36a29140ad1", + "user_name": "testuser", + "session_id": "0c0d2dda-f21d-4cf9-ab56-86bf3c407ffa", + "joined_at": 1708972279.96, + "will_eject_at": 1708972299.541, + "owner": false, + "permissions": { + "hasPresence": true, + "canSend": true, + "canReceive": { "base": true }, + "canAdmin": false + } + }, + "event_ts": 1708972279.961 +} + +""" + + async def _handle_participant_joined(event: DailyWebhookEvent): daily_room_name = _extract_room_name(event) if not daily_room_name: @@ -110,29 +160,111 @@ async def _handle_participant_joined(event: DailyWebhookEvent): return meeting = await meetings_controller.get_by_room_name(daily_room_name) - if meeting: - await meetings_controller.increment_num_clients(meeting.id) - logger.info( - "Participant joined", - meeting_id=meeting.id, - room_name=daily_room_name, - recording_type=meeting.recording_type, - recording_trigger=meeting.recording_trigger, - ) - else: + if not meeting: logger.warning( "participant.joined: meeting not found", room_name=daily_room_name ) + return + + payload = event.payload + logger.warning({"payload": payload}) + joined_at = datetime.fromtimestamp(payload["joined_at"], tz=timezone.utc) + session_id = f"{meeting.id}:{payload['session_id']}" + + session = DailyParticipantSession( + id=session_id, + meeting_id=meeting.id, + room_id=meeting.room_id, + session_id=payload["session_id"], + user_id=payload.get("user_id", None), + user_name=payload["user_name"], + joined_at=joined_at, + left_at=None, + ) + + # num_clients serves as a projection/cache of active session count for Daily.co + # Both operations must succeed or fail together to maintain consistency + async with get_database().transaction(): + await meetings_controller.increment_num_clients(meeting.id) + await daily_participant_sessions_controller.upsert_joined(session) + + logger.info( + "Participant joined", + meeting_id=meeting.id, + room_name=daily_room_name, + user_id=payload.get("user_id", None), + user_name=payload.get("user_name"), + session_id=session_id, + ) + + +""" +{ + "version": "1.0.0", + "type": "participant.left", + "id": "ptcpt-left-16168c97-f973-4eae-9642-020fe3fda5db-1708972302986", + "payload": { + "room": "test", + "user_id": "16168c97-f973-4eae-9642-020fe3fda5db", + "user_name": "bipol", + "session_id": "0c0d2dda-f21d-4cf9-ab56-86bf3c407ffa", + "joined_at": 1708972291.567, + "will_eject_at": null, + "owner": false, + "permissions": { + "hasPresence": true, + "canSend": true, + "canReceive": { "base": true }, + "canAdmin": false + }, + "duration": 11.419000148773193 + }, + "event_ts": 1708972302.986 +} +""" async def _handle_participant_left(event: DailyWebhookEvent): room_name = _extract_room_name(event) if not room_name: + logger.warning("participant.left: no room in payload", payload=event.payload) return meeting = await meetings_controller.get_by_room_name(room_name) - if meeting: + if not meeting: + logger.warning("participant.left: meeting not found", room_name=room_name) + return + + payload = event.payload + joined_at = datetime.fromtimestamp(payload["joined_at"], tz=timezone.utc) + left_at = datetime.fromtimestamp(event.event_ts, tz=timezone.utc) + session_id = f"{meeting.id}:{payload['session_id']}" + + session = DailyParticipantSession( + id=session_id, + meeting_id=meeting.id, + room_id=meeting.room_id, + session_id=payload["session_id"], + user_id=payload.get("user_id", None), + user_name=payload["user_name"], + joined_at=joined_at, + left_at=left_at, + ) + + # num_clients serves as a projection/cache of active session count for Daily.co + # Both operations must succeed or fail together to maintain consistency + async with get_database().transaction(): await meetings_controller.decrement_num_clients(meeting.id) + await daily_participant_sessions_controller.upsert_left(session) + + logger.info( + "Participant left", + meeting_id=meeting.id, + room_name=room_name, + user_id=payload.get("user_id", None), + duration=payload.get("duration"), + session_id=session_id, + ) async def _handle_recording_started(event: DailyWebhookEvent): diff --git a/server/reflector/worker/ics_sync.py b/server/reflector/worker/ics_sync.py index 4d72d4ae..6881dfa2 100644 --- a/server/reflector/worker/ics_sync.py +++ b/server/reflector/worker/ics_sync.py @@ -107,7 +107,7 @@ async def create_upcoming_meetings_for_event(event, create_window, room: Room): client = create_platform_client(get_platform(room.platform)) meeting_data = await client.create_meeting( - "", + room.name, end_date=end_date, room=room, ) diff --git a/server/reflector/worker/process.py b/server/reflector/worker/process.py index 47cbb1cb..dd9c1059 100644 --- a/server/reflector/worker/process.py +++ b/server/reflector/worker/process.py @@ -335,15 +335,15 @@ async def process_meetings(): Uses distributed locking to prevent race conditions when multiple workers process the same meeting simultaneously. """ - logger.debug("Processing meetings") meetings = await meetings_controller.get_all_active() + logger.info(f"Processing {len(meetings)} meetings") current_time = datetime.now(timezone.utc) redis_client = get_redis_client() processed_count = 0 skipped_count = 0 - for meeting in meetings: logger_ = logger.bind(meeting_id=meeting.id, room_name=meeting.room_name) + logger_.info("Processing meeting") lock_key = f"meeting_process_lock:{meeting.id}" lock = redis_client.lock(lock_key, timeout=120) @@ -359,21 +359,23 @@ async def process_meetings(): if end_date.tzinfo is None: end_date = end_date.replace(tzinfo=timezone.utc) - # This API call could be slow, extend lock if needed client = create_platform_client(meeting.platform) room_sessions = await client.get_room_sessions(meeting.room_name) try: - # Extend lock after slow operation to ensure we still hold it + # Extend lock after operation to ensure we still hold it lock.extend(120, replace_ttl=True) except LockError: logger_.warning("Lost lock for meeting, skipping") continue has_active_sessions = room_sessions and any( - rs["endedAt"] is None for rs in room_sessions + s.ended_at is None for s in room_sessions ) has_had_sessions = bool(room_sessions) + logger_.info( + f"found {has_active_sessions} active sessions, had {has_had_sessions}" + ) if has_active_sessions: logger_.debug("Meeting still has active sessions, keep it") diff --git a/server/scripts/list_daily_webhooks.py b/server/scripts/list_daily_webhooks.py new file mode 100755 index 00000000..c3c13568 --- /dev/null +++ b/server/scripts/list_daily_webhooks.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 + +import asyncio +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +import httpx + +from reflector.settings import settings + + +async def list_webhooks(): + """ + List all Daily.co webhooks for this account. + """ + if not settings.DAILY_API_KEY: + print("Error: DAILY_API_KEY not set") + return 1 + + headers = { + "Authorization": f"Bearer {settings.DAILY_API_KEY}", + "Content-Type": "application/json", + } + + async with httpx.AsyncClient() as client: + try: + """ + Daily.co webhook list response format: + [ + { + "uuid": "0b4e4c7c-5eaf-46fe-990b-a3752f5684f5", + "url": "{{webhook_url}}", + "hmac": "NQrSA5z0FkJ44QPrFerW7uCc5kdNLv3l2FDEKDanL1U=", + "basicAuth": null, + "eventTypes": [ + "recording.started", + "recording.ready-to-download" + ], + "state": "ACTVIE", + "failedCount": 0, + "lastMomentPushed": "2023-08-15T18:29:52.000Z", + "domainId": "{{domain_id}}", + "createdAt": "2023-08-15T18:28:30.000Z", + "updatedAt": "2023-08-15T18:29:52.000Z" + } + ] + """ + resp = await client.get( + "https://api.daily.co/v1/webhooks", + headers=headers, + ) + resp.raise_for_status() + webhooks = resp.json() + + if not webhooks: + print("No webhooks found") + return 0 + + print(f"Found {len(webhooks)} webhook(s):\n") + + for webhook in webhooks: + print("=" * 80) + print(f"UUID: {webhook['uuid']}") + print(f"URL: {webhook['url']}") + print(f"State: {webhook['state']}") + print(f"Event Types: {', '.join(webhook.get('eventTypes', []))}") + print( + f"HMAC Secret: {'✓ Configured' if webhook.get('hmac') else '✗ Not set'}" + ) + print() + + print("=" * 80) + print( + f"\nCurrent DAILY_WEBHOOK_UUID in settings: {settings.DAILY_WEBHOOK_UUID or '(not set)'}" + ) + + return 0 + + except httpx.HTTPStatusError as e: + print(f"Error fetching webhooks: {e}") + print(f"Response: {e.response.text}") + return 1 + except Exception as e: + print(f"Unexpected error: {e}") + return 1 + + +if __name__ == "__main__": + sys.exit(asyncio.run(list_webhooks())) diff --git a/server/tests/mocks/mock_platform.py b/server/tests/mocks/mock_platform.py index 0f84a271..b4d9ae90 100644 --- a/server/tests/mocks/mock_platform.py +++ b/server/tests/mocks/mock_platform.py @@ -3,9 +3,11 @@ from datetime import datetime from typing import Any, Dict, Literal, Optional from reflector.db.rooms import Room +from reflector.utils.string import NonEmptyString from reflector.video_platforms.base import ( ROOM_PREFIX_SEPARATOR, MeetingData, + SessionData, VideoPlatformClient, VideoPlatformConfig, ) @@ -49,22 +51,18 @@ class MockPlatformClient(VideoPlatformClient): extra_data={"mock": True}, ) - async def get_room_sessions(self, room_name: str) -> Dict[str, Any]: + async def get_room_sessions(self, room_name: NonEmptyString) -> list[SessionData]: if room_name not in self._rooms: - return {"error": "Room not found"} + return [] room_data = self._rooms[room_name] - return { - "roomName": room_name, - "sessions": [ - { - "sessionId": room_data["id"], - "startTime": datetime.utcnow().isoformat(), - "participants": room_data["participants"], - "isActive": room_data["is_active"], - } - ], - } + return [ + SessionData( + session_id=room_data["id"], + started_at=datetime.utcnow(), + ended_at=None if room_data["is_active"] else datetime.utcnow(), + ) + ] async def delete_room(self, room_name: str) -> bool: if room_name in self._rooms: diff --git a/server/tests/test_transcripts_process.py b/server/tests/test_transcripts_process.py index 5f45cf4b..3a0614c1 100644 --- a/server/tests/test_transcripts_process.py +++ b/server/tests/test_transcripts_process.py @@ -1,5 +1,6 @@ import asyncio import time +from unittest.mock import patch import pytest from httpx import ASGITransport, AsyncClient @@ -101,3 +102,113 @@ async def test_transcript_process( assert response.status_code == 200 assert len(response.json()) == 1 assert "Hello world. How are you today?" in response.json()[0]["transcript"] + + +@pytest.mark.usefixtures("setup_database") +@pytest.mark.asyncio +async def test_whereby_recording_uses_file_pipeline(client): + """Test that Whereby recordings (bucket_name but no track_keys) use file pipeline""" + from datetime import datetime, timezone + + from reflector.db.recordings import Recording, recordings_controller + from reflector.db.transcripts import transcripts_controller + + # Create transcript with Whereby recording (has bucket_name, no track_keys) + transcript = await transcripts_controller.add( + "", + source_kind="room", + source_language="en", + target_language="en", + user_id="test-user", + share_mode="public", + ) + + recording = await recordings_controller.create( + Recording( + bucket_name="whereby-bucket", + object_key="test-recording.mp4", # gitleaks:allow + meeting_id="test-meeting", + recorded_at=datetime.now(timezone.utc), + track_keys=None, # Whereby recordings have no track_keys + ) + ) + + await transcripts_controller.update( + transcript, {"recording_id": recording.id, "status": "uploaded"} + ) + + with ( + patch( + "reflector.views.transcripts_process.task_pipeline_file_process" + ) as mock_file_pipeline, + patch( + "reflector.views.transcripts_process.task_pipeline_multitrack_process" + ) as mock_multitrack_pipeline, + ): + response = await client.post(f"/transcripts/{transcript.id}/process") + + assert response.status_code == 200 + assert response.json()["status"] == "ok" + + # Whereby recordings should use file pipeline + mock_file_pipeline.delay.assert_called_once_with(transcript_id=transcript.id) + mock_multitrack_pipeline.delay.assert_not_called() + + +@pytest.mark.usefixtures("setup_database") +@pytest.mark.asyncio +async def test_dailyco_recording_uses_multitrack_pipeline(client): + """Test that Daily.co recordings (bucket_name + track_keys) use multitrack pipeline""" + from datetime import datetime, timezone + + from reflector.db.recordings import Recording, recordings_controller + from reflector.db.transcripts import transcripts_controller + + # Create transcript with Daily.co multitrack recording + transcript = await transcripts_controller.add( + "", + source_kind="room", + source_language="en", + target_language="en", + user_id="test-user", + share_mode="public", + ) + + track_keys = [ + "recordings/test-room/track1.webm", + "recordings/test-room/track2.webm", + ] + recording = await recordings_controller.create( + Recording( + bucket_name="daily-bucket", + object_key="recordings/test-room", + meeting_id="test-meeting", + track_keys=track_keys, + recorded_at=datetime.now(timezone.utc), + ) + ) + + await transcripts_controller.update( + transcript, {"recording_id": recording.id, "status": "uploaded"} + ) + + with ( + patch( + "reflector.views.transcripts_process.task_pipeline_file_process" + ) as mock_file_pipeline, + patch( + "reflector.views.transcripts_process.task_pipeline_multitrack_process" + ) as mock_multitrack_pipeline, + ): + response = await client.post(f"/transcripts/{transcript.id}/process") + + assert response.status_code == 200 + assert response.json()["status"] == "ok" + + # Daily.co multitrack recordings should use multitrack pipeline + mock_multitrack_pipeline.delay.assert_called_once_with( + transcript_id=transcript.id, + bucket_name="daily-bucket", + track_keys=track_keys, + ) + mock_file_pipeline.delay.assert_not_called() From 2801ab3643cc98c6397c9a9926cfa566498555e1 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Fri, 14 Nov 2025 15:10:26 -0600 Subject: [PATCH 72/77] chore(main): release 0.18.0 (#722) --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 812a1880..083f5b2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [0.18.0](https://github.com/Monadical-SAS/reflector/compare/v0.17.0...v0.18.0) (2025-11-14) + + +### Features + +* daily QOL: participants dictionary ([#721](https://github.com/Monadical-SAS/reflector/issues/721)) ([b20cad7](https://github.com/Monadical-SAS/reflector/commit/b20cad76e69fb6a76405af299a005f1ddcf60eae)) + + +### Bug Fixes + +* add proccessing page to file upload and reprocessing ([#650](https://github.com/Monadical-SAS/reflector/issues/650)) ([28a7258](https://github.com/Monadical-SAS/reflector/commit/28a7258e45317b78e60e6397be2bc503647eaace)) +* copy transcript ([#674](https://github.com/Monadical-SAS/reflector/issues/674)) ([a9a4f32](https://github.com/Monadical-SAS/reflector/commit/a9a4f32324f66c838e081eee42bb9502f38c1db1)) + ## [0.17.0](https://github.com/Monadical-SAS/reflector/compare/v0.16.0...v0.17.0) (2025-11-13) From 18ed7133693653ef4ddac6c659a8c14b320d1657 Mon Sep 17 00:00:00 2001 From: Mathieu Virbel Date: Tue, 18 Nov 2025 09:15:29 -0600 Subject: [PATCH 73/77] fix: parakeet vad not getting the end timestamp (#728) --- gpu/modal_deployments/reflector_transcriber_parakeet.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/gpu/modal_deployments/reflector_transcriber_parakeet.py b/gpu/modal_deployments/reflector_transcriber_parakeet.py index 947fccca..5f326b77 100644 --- a/gpu/modal_deployments/reflector_transcriber_parakeet.py +++ b/gpu/modal_deployments/reflector_transcriber_parakeet.py @@ -81,9 +81,9 @@ image = ( "cuda-python==12.8.0", "fastapi==0.115.12", "numpy<2", - "librosa==0.10.1", + "librosa==0.11.0", "requests", - "silero-vad==5.1.0", + "silero-vad==6.2.0", "torch", ) .entrypoint([]) # silence chatty logs by container on start @@ -306,6 +306,7 @@ class TranscriberParakeetFile: ) -> Generator[TimeSegment, None, None]: """Generate speech segments using VAD with start/end sample indices""" vad_iterator = VADIterator(self.vad_model, sampling_rate=SAMPLERATE) + audio_duration = len(audio_array) / float(SAMPLERATE) window_size = VAD_CONFIG["window_size"] start = None @@ -332,6 +333,10 @@ class TranscriberParakeetFile: yield TimeSegment(start_time, end_time) start = None + if start is not None: + start_time = start / float(SAMPLERATE) + yield TimeSegment(start_time, audio_duration) + vad_iterator.reset_states() def batch_speech_segments( From 616092a9bb260e20732e81b406fbebf4b5421e6f Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Tue, 18 Nov 2025 10:40:46 -0500 Subject: [PATCH 74/77] keep only debug log for tracks with no words (#724) Co-authored-by: Igor Loskutov --- server/reflector/pipelines/main_multitrack_pipeline.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/reflector/pipelines/main_multitrack_pipeline.py b/server/reflector/pipelines/main_multitrack_pipeline.py index addcd9b4..f91c8250 100644 --- a/server/reflector/pipelines/main_multitrack_pipeline.py +++ b/server/reflector/pipelines/main_multitrack_pipeline.py @@ -582,7 +582,8 @@ class PipelineMainMultitrack(PipelineMainBase): t = await self.transcribe_file(padded_url, transcript.source_language) if not t.words: - continue + self.logger.debug(f"no words in track {idx}") + # not skipping, it may be silence or indistinguishable mumbling for w in t.words: w.speaker = idx From 3e47c2c0573504858e0d2e1798b6ed31f16b4a5d Mon Sep 17 00:00:00 2001 From: Sergey Mankovsky Date: Tue, 18 Nov 2025 21:04:32 +0100 Subject: [PATCH 75/77] fix: start raw tracks recording (#729) * Start raw tracks recording * Bring back recording properties --- www/app/[roomName]/components/DailyRoom.tsx | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/www/app/[roomName]/components/DailyRoom.tsx b/www/app/[roomName]/components/DailyRoom.tsx index 920f8624..cfefbf6a 100644 --- a/www/app/[roomName]/components/DailyRoom.tsx +++ b/www/app/[roomName]/components/DailyRoom.tsx @@ -60,6 +60,15 @@ export default function DailyRoom({ meeting }: DailyRoomProps) { } frame.on("left-meeting", handleLeave); + + frame.on("joined-meeting", async () => { + try { + await frame.startRecording({ type: "raw-tracks" }); + } catch (error) { + console.error("Failed to start recording:", error); + } + }); + await frame.join({ url: roomUrl }); } catch (error) { console.error("Error creating Daily frame:", error); From 4287f8b8aeee60e51db7539f4dcbda5f6e696bd8 Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Fri, 21 Nov 2025 10:24:04 -0500 Subject: [PATCH 76/77] feat: dailyco api module (#725) * dailyco api module (no-mistakes) * daily co library self-review * uncurse * self-review: daily resource leak, uniform types, enable_recording bomb, daily custom error, video_platforms/daily typing, daily timestamp dry * dailyco docs parser * remove generated daily docs --------- Co-authored-by: Igor Loskutov --- server/reflector/dailyco_api/README.md | 6 + server/reflector/dailyco_api/__init__.py | 96 ++++ server/reflector/dailyco_api/client.py | 527 ++++++++++++++++++ server/reflector/dailyco_api/requests.py | 158 ++++++ server/reflector/dailyco_api/responses.py | 182 ++++++ server/reflector/dailyco_api/webhook_utils.py | 229 ++++++++ server/reflector/dailyco_api/webhooks.py | 199 +++++++ server/reflector/video_platforms/daily.py | 252 +++------ server/reflector/views/daily.py | 70 +-- server/scripts/list_daily_webhooks.py | 58 +- server/scripts/recreate_daily_webhook.py | 84 +-- 11 files changed, 1558 insertions(+), 303 deletions(-) create mode 100644 server/reflector/dailyco_api/README.md create mode 100644 server/reflector/dailyco_api/__init__.py create mode 100644 server/reflector/dailyco_api/client.py create mode 100644 server/reflector/dailyco_api/requests.py create mode 100644 server/reflector/dailyco_api/responses.py create mode 100644 server/reflector/dailyco_api/webhook_utils.py create mode 100644 server/reflector/dailyco_api/webhooks.py diff --git a/server/reflector/dailyco_api/README.md b/server/reflector/dailyco_api/README.md new file mode 100644 index 00000000..88ec2cc3 --- /dev/null +++ b/server/reflector/dailyco_api/README.md @@ -0,0 +1,6 @@ +anything about Daily.co api interaction + +- webhook event shapes +- REST api client + +No REST api client existing found in the wild; the official lib is about working with videocall as a bot \ No newline at end of file diff --git a/server/reflector/dailyco_api/__init__.py b/server/reflector/dailyco_api/__init__.py new file mode 100644 index 00000000..1a65478b --- /dev/null +++ b/server/reflector/dailyco_api/__init__.py @@ -0,0 +1,96 @@ +""" +Daily.co API Module +""" + +# Client +from .client import DailyApiClient, DailyApiError + +# Request models +from .requests import ( + CreateMeetingTokenRequest, + CreateRoomRequest, + CreateWebhookRequest, + MeetingTokenProperties, + RecordingsBucketConfig, + RoomProperties, + UpdateWebhookRequest, +) + +# Response models +from .responses import ( + MeetingParticipant, + MeetingParticipantsResponse, + MeetingResponse, + MeetingTokenResponse, + RecordingResponse, + RecordingS3Info, + RoomPresenceParticipant, + RoomPresenceResponse, + RoomResponse, + WebhookResponse, +) + +# Webhook utilities +from .webhook_utils import ( + extract_room_name, + parse_participant_joined, + parse_participant_left, + parse_recording_error, + parse_recording_ready, + parse_recording_started, + parse_webhook_payload, + verify_webhook_signature, +) + +# Webhook models +from .webhooks import ( + DailyTrack, + DailyWebhookEvent, + ParticipantJoinedPayload, + ParticipantLeftPayload, + RecordingErrorPayload, + RecordingReadyToDownloadPayload, + RecordingStartedPayload, +) + +__all__ = [ + # Client + "DailyApiClient", + "DailyApiError", + # Requests + "CreateRoomRequest", + "RoomProperties", + "RecordingsBucketConfig", + "CreateMeetingTokenRequest", + "MeetingTokenProperties", + "CreateWebhookRequest", + "UpdateWebhookRequest", + # Responses + "RoomResponse", + "RoomPresenceResponse", + "RoomPresenceParticipant", + "MeetingParticipantsResponse", + "MeetingParticipant", + "MeetingResponse", + "RecordingResponse", + "RecordingS3Info", + "MeetingTokenResponse", + "WebhookResponse", + # Webhooks + "DailyWebhookEvent", + "DailyTrack", + "ParticipantJoinedPayload", + "ParticipantLeftPayload", + "RecordingStartedPayload", + "RecordingReadyToDownloadPayload", + "RecordingErrorPayload", + # Webhook utilities + "verify_webhook_signature", + "extract_room_name", + "parse_webhook_payload", + "parse_participant_joined", + "parse_participant_left", + "parse_recording_started", + "parse_recording_ready", + "parse_recording_error", +] diff --git a/server/reflector/dailyco_api/client.py b/server/reflector/dailyco_api/client.py new file mode 100644 index 00000000..24221bb2 --- /dev/null +++ b/server/reflector/dailyco_api/client.py @@ -0,0 +1,527 @@ +""" +Daily.co API Client + +Complete async client for Daily.co REST API with Pydantic models. + +Reference: https://docs.daily.co/reference/rest-api +""" + +from http import HTTPStatus +from typing import Any + +import httpx +import structlog + +from reflector.utils.string import NonEmptyString + +from .requests import ( + CreateMeetingTokenRequest, + CreateRoomRequest, + CreateWebhookRequest, + UpdateWebhookRequest, +) +from .responses import ( + MeetingParticipantsResponse, + MeetingResponse, + MeetingTokenResponse, + RecordingResponse, + RoomPresenceResponse, + RoomResponse, + WebhookResponse, +) + +logger = structlog.get_logger(__name__) + + +class DailyApiError(Exception): + """Daily.co API error with full request/response context.""" + + def __init__(self, operation: str, response: httpx.Response): + self.operation = operation + self.response = response + self.status_code = response.status_code + self.response_body = response.text + self.url = str(response.url) + self.request_body = ( + response.request.content.decode() if response.request.content else None + ) + + super().__init__( + f"Daily.co API error: {operation} failed with status {self.status_code}" + ) + + +class DailyApiClient: + """ + Complete async client for Daily.co REST API. + + Usage: + # Direct usage + client = DailyApiClient(api_key="your_api_key") + room = await client.create_room(CreateRoomRequest(name="my-room")) + await client.close() # Clean up when done + + # Context manager (recommended) + async with DailyApiClient(api_key="your_api_key") as client: + room = await client.create_room(CreateRoomRequest(name="my-room")) + """ + + BASE_URL = "https://api.daily.co/v1" + DEFAULT_TIMEOUT = 10.0 + + def __init__( + self, + api_key: NonEmptyString, + webhook_secret: NonEmptyString | None = None, + timeout: float = DEFAULT_TIMEOUT, + base_url: NonEmptyString | None = None, + ): + """ + Initialize Daily.co API client. + + Args: + api_key: Daily.co API key (Bearer token) + webhook_secret: Base64-encoded HMAC secret for webhook verification. + Must match the 'hmac' value provided when creating webhooks. + Generate with: base64.b64encode(os.urandom(32)).decode() + timeout: Default request timeout in seconds + base_url: Override base URL (for testing) + """ + self.api_key = api_key + self.webhook_secret = webhook_secret + self.timeout = timeout + self.base_url = base_url or self.BASE_URL + + self.headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + self._client: httpx.AsyncClient | None = None + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + async def _get_client(self) -> httpx.AsyncClient: + if self._client is None: + self._client = httpx.AsyncClient(timeout=self.timeout) + return self._client + + async def close(self): + if self._client is not None: + await self._client.aclose() + self._client = None + + async def _handle_response( + self, response: httpx.Response, operation: str + ) -> dict[str, Any]: + """ + Handle API response with error logging. + + Args: + response: HTTP response + operation: Operation name for logging (e.g., "create_room") + + Returns: + Parsed JSON response + + Raises: + DailyApiError: If request failed with full context + """ + if response.status_code >= 400: + logger.error( + f"Daily.co API error: {operation}", + status_code=response.status_code, + response_body=response.text, + request_body=response.request.content.decode() + if response.request.content + else None, + url=str(response.url), + ) + raise DailyApiError(operation, response) + + return response.json() + + # ============================================================================ + # ROOMS + # ============================================================================ + + async def create_room(self, request: CreateRoomRequest) -> RoomResponse: + """ + Create a new Daily.co room. + + Reference: https://docs.daily.co/reference/rest-api/rooms/create-room + + Args: + request: Room creation request with name, privacy, and properties + + Returns: + Created room data including URL and ID + + Raises: + httpx.HTTPStatusError: If API request fails + """ + client = await self._get_client() + response = await client.post( + f"{self.base_url}/rooms", + headers=self.headers, + json=request.model_dump(exclude_none=True), + ) + + data = await self._handle_response(response, "create_room") + return RoomResponse(**data) + + async def get_room(self, room_name: NonEmptyString) -> RoomResponse: + """ + Get room configuration. + + Args: + room_name: Daily.co room name + + Returns: + Room configuration data + + Raises: + httpx.HTTPStatusError: If API request fails + """ + client = await self._get_client() + response = await client.get( + f"{self.base_url}/rooms/{room_name}", + headers=self.headers, + ) + + data = await self._handle_response(response, "get_room") + return RoomResponse(**data) + + async def get_room_presence( + self, room_name: NonEmptyString + ) -> RoomPresenceResponse: + """ + Get current participants in a room (real-time presence). + + Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence + + Args: + room_name: Daily.co room name + + Returns: + List of currently present participants with join time and duration + + Raises: + httpx.HTTPStatusError: If API request fails + """ + client = await self._get_client() + response = await client.get( + f"{self.base_url}/rooms/{room_name}/presence", + headers=self.headers, + ) + + data = await self._handle_response(response, "get_room_presence") + return RoomPresenceResponse(**data) + + async def delete_room(self, room_name: NonEmptyString) -> None: + """ + Delete a room (idempotent - succeeds even if room doesn't exist). + + Reference: https://docs.daily.co/reference/rest-api/rooms/delete-room + + Args: + room_name: Daily.co room name + + Raises: + httpx.HTTPStatusError: If API request fails (except 404) + """ + client = await self._get_client() + response = await client.delete( + f"{self.base_url}/rooms/{room_name}", + headers=self.headers, + ) + + # Idempotent delete - 404 means already deleted + if response.status_code == HTTPStatus.NOT_FOUND: + logger.debug("Room not found (already deleted)", room_name=room_name) + return + + await self._handle_response(response, "delete_room") + + # ============================================================================ + # MEETINGS + # ============================================================================ + + async def get_meeting(self, meeting_id: NonEmptyString) -> MeetingResponse: + """ + Get full meeting information including participants. + + Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information + + Args: + meeting_id: Daily.co meeting/session ID + + Returns: + Meeting metadata including room, duration, participants, and status + + Raises: + httpx.HTTPStatusError: If API request fails + """ + client = await self._get_client() + response = await client.get( + f"{self.base_url}/meetings/{meeting_id}", + headers=self.headers, + ) + + data = await self._handle_response(response, "get_meeting") + return MeetingResponse(**data) + + async def get_meeting_participants( + self, + meeting_id: NonEmptyString, + limit: int | None = None, + joined_after: NonEmptyString | None = None, + joined_before: NonEmptyString | None = None, + ) -> MeetingParticipantsResponse: + """ + Get historical participant data from a completed meeting (paginated). + + Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants + + Args: + meeting_id: Daily.co meeting/session ID + limit: Maximum number of participant records to return + joined_after: Return participants who joined after this participant_id + joined_before: Return participants who joined before this participant_id + + Returns: + List of participants with join times and duration + + Raises: + httpx.HTTPStatusError: If API request fails (404 when no more participants) + + Note: + For pagination, use joined_after with the last participant_id from previous response. + Returns 404 when no more participants remain. + """ + params = {} + if limit is not None: + params["limit"] = limit + if joined_after is not None: + params["joined_after"] = joined_after + if joined_before is not None: + params["joined_before"] = joined_before + + client = await self._get_client() + response = await client.get( + f"{self.base_url}/meetings/{meeting_id}/participants", + headers=self.headers, + params=params, + ) + + data = await self._handle_response(response, "get_meeting_participants") + return MeetingParticipantsResponse(**data) + + # ============================================================================ + # RECORDINGS + # ============================================================================ + + async def get_recording(self, recording_id: NonEmptyString) -> RecordingResponse: + """ + Get recording metadata and status. + + Reference: https://docs.daily.co/reference/rest-api/recordings + + Args: + recording_id: Daily.co recording ID + + Returns: + Recording metadata including status, duration, and S3 info + + Raises: + httpx.HTTPStatusError: If API request fails + """ + client = await self._get_client() + response = await client.get( + f"{self.base_url}/recordings/{recording_id}", + headers=self.headers, + ) + + data = await self._handle_response(response, "get_recording") + return RecordingResponse(**data) + + # ============================================================================ + # MEETING TOKENS + # ============================================================================ + + async def create_meeting_token( + self, request: CreateMeetingTokenRequest + ) -> MeetingTokenResponse: + """ + Create a meeting token for participant authentication. + + Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token + + Args: + request: Token properties including room name, user_id, permissions + + Returns: + JWT meeting token + + Raises: + httpx.HTTPStatusError: If API request fails + """ + client = await self._get_client() + response = await client.post( + f"{self.base_url}/meeting-tokens", + headers=self.headers, + json=request.model_dump(exclude_none=True), + ) + + data = await self._handle_response(response, "create_meeting_token") + return MeetingTokenResponse(**data) + + # ============================================================================ + # WEBHOOKS + # ============================================================================ + + async def list_webhooks(self) -> list[WebhookResponse]: + """ + List all configured webhooks for this account. + + Reference: https://docs.daily.co/reference/rest-api/webhooks + + Returns: + List of webhook configurations + + Raises: + httpx.HTTPStatusError: If API request fails + """ + client = await self._get_client() + response = await client.get( + f"{self.base_url}/webhooks", + headers=self.headers, + ) + + data = await self._handle_response(response, "list_webhooks") + + # Daily.co returns array directly (not paginated) + if isinstance(data, list): + return [WebhookResponse(**wh) for wh in data] + + # Future-proof: handle potential pagination envelope + if isinstance(data, dict) and "data" in data: + return [WebhookResponse(**wh) for wh in data["data"]] + + logger.warning("Unexpected webhook list response format", data=data) + return [] + + async def create_webhook(self, request: CreateWebhookRequest) -> WebhookResponse: + """ + Create a new webhook subscription. + + Reference: https://docs.daily.co/reference/rest-api/webhooks + + Args: + request: Webhook configuration with URL, event types, and HMAC secret + + Returns: + Created webhook with UUID and state + + Raises: + httpx.HTTPStatusError: If API request fails + """ + client = await self._get_client() + response = await client.post( + f"{self.base_url}/webhooks", + headers=self.headers, + json=request.model_dump(exclude_none=True), + ) + + data = await self._handle_response(response, "create_webhook") + return WebhookResponse(**data) + + async def update_webhook( + self, webhook_uuid: NonEmptyString, request: UpdateWebhookRequest + ) -> WebhookResponse: + """ + Update webhook configuration. + + Note: Daily.co may not support PATCH for all fields. + Common pattern is delete + recreate. + + Reference: https://docs.daily.co/reference/rest-api/webhooks + + Args: + webhook_uuid: Webhook UUID to update + request: Updated webhook configuration + + Returns: + Updated webhook configuration + + Raises: + httpx.HTTPStatusError: If API request fails + """ + client = await self._get_client() + response = await client.patch( + f"{self.base_url}/webhooks/{webhook_uuid}", + headers=self.headers, + json=request.model_dump(exclude_none=True), + ) + + data = await self._handle_response(response, "update_webhook") + return WebhookResponse(**data) + + async def delete_webhook(self, webhook_uuid: NonEmptyString) -> None: + """ + Delete a webhook. + + Reference: https://docs.daily.co/reference/rest-api/webhooks + + Args: + webhook_uuid: Webhook UUID to delete + + Raises: + httpx.HTTPStatusError: If webhook not found or deletion fails + """ + client = await self._get_client() + response = await client.delete( + f"{self.base_url}/webhooks/{webhook_uuid}", + headers=self.headers, + ) + + await self._handle_response(response, "delete_webhook") + + # ============================================================================ + # HELPER METHODS + # ============================================================================ + + async def find_webhook_by_url(self, url: NonEmptyString) -> WebhookResponse | None: + """ + Find a webhook by its URL. + + Args: + url: Webhook endpoint URL to search for + + Returns: + Webhook if found, None otherwise + """ + webhooks = await self.list_webhooks() + for webhook in webhooks: + if webhook.url == url: + return webhook + return None + + async def find_webhooks_by_pattern( + self, pattern: NonEmptyString + ) -> list[WebhookResponse]: + """ + Find webhooks matching a URL pattern (e.g., 'ngrok'). + + Args: + pattern: String to match in webhook URLs + + Returns: + List of matching webhooks + """ + webhooks = await self.list_webhooks() + return [wh for wh in webhooks if pattern in wh.url] diff --git a/server/reflector/dailyco_api/requests.py b/server/reflector/dailyco_api/requests.py new file mode 100644 index 00000000..e943b90f --- /dev/null +++ b/server/reflector/dailyco_api/requests.py @@ -0,0 +1,158 @@ +""" +Daily.co API Request Models + +Reference: https://docs.daily.co/reference/rest-api +""" + +from typing import List, Literal + +from pydantic import BaseModel, Field + +from reflector.utils.string import NonEmptyString + + +class RecordingsBucketConfig(BaseModel): + """ + S3 bucket configuration for raw-tracks recordings. + + Reference: https://docs.daily.co/reference/rest-api/rooms/create-room + """ + + bucket_name: NonEmptyString = Field(description="S3 bucket name") + bucket_region: NonEmptyString = Field(description="AWS region (e.g., 'us-east-1')") + assume_role_arn: NonEmptyString = Field( + description="AWS IAM role ARN that Daily.co will assume to write recordings" + ) + allow_api_access: bool = Field( + default=True, + description="Whether to allow API access to recording metadata", + ) + + +class RoomProperties(BaseModel): + """ + Room configuration properties. + """ + + enable_recording: Literal["cloud", "local", "raw-tracks"] | None = Field( + default=None, + description="Recording mode: 'cloud' for mixed, 'local' for local recording, 'raw-tracks' for multitrack, None to disable", + ) + enable_chat: bool = Field(default=True, description="Enable in-meeting chat") + enable_screenshare: bool = Field(default=True, description="Enable screen sharing") + start_video_off: bool = Field( + default=False, description="Start with video off for all participants" + ) + start_audio_off: bool = Field( + default=False, description="Start with audio muted for all participants" + ) + exp: int | None = Field( + None, description="Room expiration timestamp (Unix epoch seconds)" + ) + recordings_bucket: RecordingsBucketConfig | None = Field( + None, description="S3 bucket configuration for raw-tracks recordings" + ) + + +class CreateRoomRequest(BaseModel): + """ + Request to create a new Daily.co room. + + Reference: https://docs.daily.co/reference/rest-api/rooms/create-room + """ + + name: NonEmptyString = Field(description="Room name (must be unique within domain)") + privacy: Literal["public", "private"] = Field( + default="public", description="Room privacy setting" + ) + properties: RoomProperties = Field( + default_factory=RoomProperties, description="Room configuration properties" + ) + + +class MeetingTokenProperties(BaseModel): + """ + Properties for meeting token creation. + + Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token + """ + + room_name: NonEmptyString = Field(description="Room name this token is valid for") + user_id: NonEmptyString | None = Field( + None, description="User identifier to associate with token" + ) + is_owner: bool = Field( + default=False, description="Grant owner privileges to token holder" + ) + start_cloud_recording: bool = Field( + default=False, description="Automatically start cloud recording on join" + ) + enable_recording_ui: bool = Field( + default=True, description="Show recording controls in UI" + ) + eject_at_token_exp: bool = Field( + default=False, description="Eject participant when token expires" + ) + nbf: int | None = Field( + None, description="Not-before timestamp (Unix epoch seconds)" + ) + exp: int | None = Field( + None, description="Expiration timestamp (Unix epoch seconds)" + ) + + +class CreateMeetingTokenRequest(BaseModel): + """ + Request to create a meeting token for participant authentication. + + Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token + """ + + properties: MeetingTokenProperties = Field(description="Token properties") + + +class CreateWebhookRequest(BaseModel): + """ + Request to create a webhook subscription. + + Reference: https://docs.daily.co/reference/rest-api/webhooks + """ + + url: NonEmptyString = Field(description="Webhook endpoint URL (must be HTTPS)") + eventTypes: List[ + Literal[ + "participant.joined", + "participant.left", + "recording.started", + "recording.ready-to-download", + "recording.error", + ] + ] = Field( + description="Array of event types to subscribe to (only events we handle)" + ) + hmac: NonEmptyString = Field( + description="Base64-encoded HMAC secret for webhook signature verification" + ) + basicAuth: NonEmptyString | None = Field( + None, description="Optional basic auth credentials for webhook endpoint" + ) + + +class UpdateWebhookRequest(BaseModel): + """ + Request to update an existing webhook. + + Note: Daily.co API may not support PATCH for webhooks. + Common pattern is to delete and recreate. + + Reference: https://docs.daily.co/reference/rest-api/webhooks + """ + + url: NonEmptyString | None = Field(None, description="New webhook endpoint URL") + eventTypes: List[NonEmptyString] | None = Field( + None, description="New array of event types" + ) + hmac: NonEmptyString | None = Field(None, description="New HMAC secret") + basicAuth: NonEmptyString | None = Field( + None, description="New basic auth credentials" + ) diff --git a/server/reflector/dailyco_api/responses.py b/server/reflector/dailyco_api/responses.py new file mode 100644 index 00000000..4eb84245 --- /dev/null +++ b/server/reflector/dailyco_api/responses.py @@ -0,0 +1,182 @@ +""" +Daily.co API Response Models +""" + +from typing import Any, Dict, List, Literal + +from pydantic import BaseModel, Field + +from reflector.utils.string import NonEmptyString + +# not documented in daily; we fill it according to observations +RecordingStatus = Literal["in-progress", "finished"] + + +class RoomResponse(BaseModel): + """ + Response from room creation or retrieval. + + Reference: https://docs.daily.co/reference/rest-api/rooms/create-room + """ + + id: NonEmptyString = Field(description="Unique room identifier (UUID)") + name: NonEmptyString = Field(description="Room name used in URLs") + api_created: bool = Field(description="Whether room was created via API") + privacy: Literal["public", "private"] = Field(description="Room privacy setting") + url: NonEmptyString = Field(description="Full room URL") + created_at: NonEmptyString = Field(description="ISO 8601 creation timestamp") + config: Dict[NonEmptyString, Any] = Field( + default_factory=dict, description="Room configuration properties" + ) + + +class RoomPresenceParticipant(BaseModel): + """ + Participant presence information in a room. + + Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence + """ + + room: NonEmptyString = Field(description="Room name") + id: NonEmptyString = Field(description="Participant session ID") + userId: NonEmptyString | None = Field(None, description="User ID if provided") + userName: NonEmptyString | None = Field(None, description="User display name") + joinTime: NonEmptyString = Field(description="ISO 8601 join timestamp") + duration: int = Field(description="Duration in room (seconds)") + + +class RoomPresenceResponse(BaseModel): + """ + Response from room presence endpoint. + + Reference: https://docs.daily.co/reference/rest-api/rooms/get-room-presence + """ + + total_count: int = Field( + description="Total number of participants currently in room" + ) + data: List[RoomPresenceParticipant] = Field( + default_factory=list, description="Array of participant presence data" + ) + + +class MeetingParticipant(BaseModel): + """ + Historical participant data from a meeting. + + Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants + """ + + user_id: NonEmptyString = Field(description="User identifier") + participant_id: NonEmptyString = Field(description="Participant session identifier") + user_name: NonEmptyString | None = Field(None, description="User display name") + join_time: int = Field(description="Join timestamp (Unix epoch seconds)") + duration: int = Field(description="Duration in meeting (seconds)") + + +class MeetingParticipantsResponse(BaseModel): + """ + Response from meeting participants endpoint. + + Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-participants + """ + + data: List[MeetingParticipant] = Field( + default_factory=list, description="Array of participant data" + ) + + +class MeetingResponse(BaseModel): + """ + Response from meeting information endpoint. + + Reference: https://docs.daily.co/reference/rest-api/meetings/get-meeting-information + """ + + id: NonEmptyString = Field(description="Meeting session identifier (UUID)") + room: NonEmptyString = Field(description="Room name where meeting occurred") + start_time: int = Field( + description="Meeting start Unix timestamp (~15s granularity)" + ) + duration: int = Field(description="Total meeting duration in seconds") + ongoing: bool = Field(description="Whether meeting is currently active") + max_participants: int = Field(description="Peak concurrent participant count") + participants: List[MeetingParticipant] = Field( + default_factory=list, description="Array of participant session data" + ) + + +class RecordingS3Info(BaseModel): + """ + S3 bucket information for a recording. + + Reference: https://docs.daily.co/reference/rest-api/recordings + """ + + bucket_name: NonEmptyString + bucket_region: NonEmptyString + endpoint: NonEmptyString | None = None + + +class RecordingResponse(BaseModel): + """ + Response from recording retrieval endpoint. + + Reference: https://docs.daily.co/reference/rest-api/recordings + """ + + id: NonEmptyString = Field(description="Recording identifier") + room_name: NonEmptyString = Field(description="Room where recording occurred") + start_ts: int = Field(description="Recording start timestamp (Unix epoch seconds)") + status: RecordingStatus = Field( + description="Recording status ('in-progress' or 'finished')" + ) + max_participants: int = Field(description="Maximum participants during recording") + duration: int = Field(description="Recording duration in seconds") + share_token: NonEmptyString | None = Field( + None, description="Token for sharing recording" + ) + s3: RecordingS3Info | None = Field(None, description="S3 bucket information") + + +class MeetingTokenResponse(BaseModel): + """ + Response from meeting token creation. + + Reference: https://docs.daily.co/reference/rest-api/meeting-tokens/create-meeting-token + """ + + token: NonEmptyString = Field( + description="JWT meeting token for participant authentication" + ) + + +class WebhookResponse(BaseModel): + """ + Response from webhook creation or retrieval. + + Reference: https://docs.daily.co/reference/rest-api/webhooks + """ + + uuid: NonEmptyString = Field(description="Unique webhook identifier") + url: NonEmptyString = Field(description="Webhook endpoint URL") + hmac: NonEmptyString | None = Field( + None, description="Base64-encoded HMAC secret for signature verification" + ) + basicAuth: NonEmptyString | None = Field( + None, description="Basic auth credentials if configured" + ) + eventTypes: List[NonEmptyString] = Field( + default_factory=list, + description="Array of event types (e.g., ['recording.started', 'participant.joined'])", + ) + state: Literal["ACTIVE", "FAILED"] = Field( + description="Webhook state - FAILED after 3+ consecutive failures" + ) + failedCount: int = Field(default=0, description="Number of consecutive failures") + lastMomentPushed: NonEmptyString | None = Field( + None, description="ISO 8601 timestamp of last successful push" + ) + domainId: NonEmptyString = Field(description="Daily.co domain/account identifier") + createdAt: NonEmptyString = Field(description="ISO 8601 creation timestamp") + updatedAt: NonEmptyString = Field(description="ISO 8601 last update timestamp") diff --git a/server/reflector/dailyco_api/webhook_utils.py b/server/reflector/dailyco_api/webhook_utils.py new file mode 100644 index 00000000..b10d4fa2 --- /dev/null +++ b/server/reflector/dailyco_api/webhook_utils.py @@ -0,0 +1,229 @@ +""" +Daily.co Webhook Utilities + +Utilities for verifying and parsing Daily.co webhook events. + +Reference: https://docs.daily.co/reference/rest-api/webhooks +""" + +import base64 +import hmac +from hashlib import sha256 + +import structlog + +from .webhooks import ( + DailyWebhookEvent, + ParticipantJoinedPayload, + ParticipantLeftPayload, + RecordingErrorPayload, + RecordingReadyToDownloadPayload, + RecordingStartedPayload, +) + +logger = structlog.get_logger(__name__) + + +def verify_webhook_signature( + body: bytes, + signature: str, + timestamp: str, + webhook_secret: str, +) -> bool: + """ + Verify Daily.co webhook signature using HMAC-SHA256. + + Daily.co signature verification: + 1. Base64-decode the webhook secret + 2. Create signed content: timestamp + '.' + body + 3. Compute HMAC-SHA256(secret, signed_content) + 4. Base64-encode the result + 5. Compare with provided signature using constant-time comparison + + Reference: https://docs.daily.co/reference/rest-api/webhooks + + Args: + body: Raw request body bytes + signature: X-Webhook-Signature header value + timestamp: X-Webhook-Timestamp header value + webhook_secret: Base64-encoded HMAC secret + + Returns: + True if signature is valid, False otherwise + + Example: + >>> body = b'{"version":"1.0.0","type":"participant.joined",...}' + >>> signature = "abc123..." + >>> timestamp = "1234567890" + >>> secret = "your-base64-secret" + >>> is_valid = verify_webhook_signature(body, signature, timestamp, secret) + """ + if not signature or not timestamp or not webhook_secret: + logger.warning( + "Missing required data for webhook verification", + has_signature=bool(signature), + has_timestamp=bool(timestamp), + has_secret=bool(webhook_secret), + ) + return False + + try: + secret_bytes = base64.b64decode(webhook_secret) + signed_content = timestamp.encode() + b"." + body + expected = hmac.new(secret_bytes, signed_content, sha256).digest() + expected_b64 = base64.b64encode(expected).decode() + + # Constant-time comparison to prevent timing attacks + return hmac.compare_digest(expected_b64, signature) + + except (base64.binascii.Error, ValueError, TypeError, UnicodeDecodeError) as e: + logger.error( + "Webhook signature verification failed", + error=str(e), + error_type=type(e).__name__, + ) + return False + + +def extract_room_name(event: DailyWebhookEvent) -> str | None: + """ + Extract room name from Daily.co webhook event payload. + + Args: + event: Parsed webhook event + + Returns: + Room name if present and is a string, None otherwise + + Example: + >>> event = DailyWebhookEvent(**webhook_payload) + >>> room_name = extract_room_name(event) + """ + room = event.payload.get("room_name") + # Ensure we return a string, not any falsy value that might be in payload + return room if isinstance(room, str) else None + + +def parse_participant_joined(event: DailyWebhookEvent) -> ParticipantJoinedPayload: + """ + Parse participant.joined webhook event payload. + + Args: + event: Webhook event with type "participant.joined" + + Returns: + Parsed participant joined payload + + Raises: + pydantic.ValidationError: If payload doesn't match expected schema + """ + return ParticipantJoinedPayload(**event.payload) + + +def parse_participant_left(event: DailyWebhookEvent) -> ParticipantLeftPayload: + """ + Parse participant.left webhook event payload. + + Args: + event: Webhook event with type "participant.left" + + Returns: + Parsed participant left payload + + Raises: + pydantic.ValidationError: If payload doesn't match expected schema + """ + return ParticipantLeftPayload(**event.payload) + + +def parse_recording_started(event: DailyWebhookEvent) -> RecordingStartedPayload: + """ + Parse recording.started webhook event payload. + + Args: + event: Webhook event with type "recording.started" + + Returns: + Parsed recording started payload + + Raises: + pydantic.ValidationError: If payload doesn't match expected schema + """ + return RecordingStartedPayload(**event.payload) + + +def parse_recording_ready( + event: DailyWebhookEvent, +) -> RecordingReadyToDownloadPayload: + """ + Parse recording.ready-to-download webhook event payload. + + This event is sent when raw-tracks recordings are complete and uploaded to S3. + The payload includes a 'tracks' array with individual audio/video files. + + Args: + event: Webhook event with type "recording.ready-to-download" + + Returns: + Parsed recording ready payload with tracks array + + Raises: + pydantic.ValidationError: If payload doesn't match expected schema + + Example: + >>> event = DailyWebhookEvent(**webhook_payload) + >>> if event.type == "recording.ready-to-download": + ... payload = parse_recording_ready(event) + ... audio_tracks = [t for t in payload.tracks if t.type == "audio"] + """ + return RecordingReadyToDownloadPayload(**event.payload) + + +def parse_recording_error(event: DailyWebhookEvent) -> RecordingErrorPayload: + """ + Parse recording.error webhook event payload. + + Args: + event: Webhook event with type "recording.error" + + Returns: + Parsed recording error payload + + Raises: + pydantic.ValidationError: If payload doesn't match expected schema + """ + return RecordingErrorPayload(**event.payload) + + +# Webhook event type to parser mapping +WEBHOOK_PARSERS = { + "participant.joined": parse_participant_joined, + "participant.left": parse_participant_left, + "recording.started": parse_recording_started, + "recording.ready-to-download": parse_recording_ready, + "recording.error": parse_recording_error, +} + + +def parse_webhook_payload(event: DailyWebhookEvent): + """ + Parse webhook event payload based on event type. + + Args: + event: Webhook event + + Returns: + Typed payload model based on event type, or raw dict if unknown + + Example: + >>> event = DailyWebhookEvent(**webhook_payload) + >>> payload = parse_webhook_payload(event) + >>> if isinstance(payload, ParticipantJoinedPayload): + ... print(f"User {payload.user_name} joined") + """ + parser = WEBHOOK_PARSERS.get(event.type) + if parser: + return parser(event) + else: + logger.warning("Unknown webhook event type", event_type=event.type) + return event.payload diff --git a/server/reflector/dailyco_api/webhooks.py b/server/reflector/dailyco_api/webhooks.py new file mode 100644 index 00000000..862f4996 --- /dev/null +++ b/server/reflector/dailyco_api/webhooks.py @@ -0,0 +1,199 @@ +""" +Daily.co Webhook Event Models + +Reference: https://docs.daily.co/reference/rest-api/webhooks +""" + +from typing import Any, Dict, Literal + +from pydantic import BaseModel, Field, field_validator + +from reflector.utils.string import NonEmptyString + + +def normalize_timestamp_to_int(v): + """ + Normalize float timestamps to int by truncating decimal part. + + Daily.co sometimes sends timestamps as floats (e.g., 1708972279.96). + Pydantic expects int for fields typed as `int`. + """ + if v is None: + return v + if isinstance(v, float): + return int(v) + return v + + +WebhookEventType = Literal[ + "participant.joined", + "participant.left", + "recording.started", + "recording.ready-to-download", + "recording.error", +] + + +class DailyTrack(BaseModel): + """ + Individual audio or video track from a multitrack recording. + + Reference: https://docs.daily.co/reference/rest-api/recordings + """ + + type: Literal["audio", "video"] + s3Key: NonEmptyString = Field(description="S3 object key for the track file") + size: int = Field(description="File size in bytes") + + +class DailyWebhookEvent(BaseModel): + """ + Base structure for all Daily.co webhook events. + All events share five common fields documented below. + + Reference: https://docs.daily.co/reference/rest-api/webhooks + """ + + version: NonEmptyString = Field( + description="Represents the version of the event. This uses semantic versioning to inform a consumer if the payload has introduced any breaking changes" + ) + type: WebhookEventType = Field( + description="Represents the type of the event described in the payload" + ) + id: NonEmptyString = Field( + description="An identifier representing this specific event" + ) + payload: Dict[NonEmptyString, Any] = Field( + description="An object representing the event, whose fields are described in the corresponding payload class" + ) + event_ts: int = Field( + description="Documenting when the webhook itself was sent. This timestamp is different than the time of the event the webhook describes. For example, a recording.started event will contain a start_ts timestamp of when the actual recording started, and a slightly later event_ts timestamp indicating when the webhook event was sent" + ) + + _normalize_event_ts = field_validator("event_ts", mode="before")( + normalize_timestamp_to_int + ) + + +class ParticipantJoinedPayload(BaseModel): + """ + Payload for participant.joined webhook event. + + Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-joined + """ + + room_name: NonEmptyString | None = Field(None, description="Daily.co room name") + session_id: NonEmptyString = Field(description="Daily.co session identifier") + user_id: NonEmptyString = Field(description="User identifier (may be encoded)") + user_name: NonEmptyString | None = Field(None, description="User display name") + joined_at: int = Field(description="Join timestamp in Unix epoch seconds") + + _normalize_joined_at = field_validator("joined_at", mode="before")( + normalize_timestamp_to_int + ) + + +class ParticipantLeftPayload(BaseModel): + """ + Payload for participant.left webhook event. + + Reference: https://docs.daily.co/reference/rest-api/webhooks/events/participant-left + """ + + room_name: NonEmptyString | None = Field(None, description="Daily.co room name") + session_id: NonEmptyString = Field(description="Daily.co session identifier") + user_id: NonEmptyString = Field(description="User identifier (may be encoded)") + user_name: NonEmptyString | None = Field(None, description="User display name") + joined_at: int = Field(description="Join timestamp in Unix epoch seconds") + duration: int | None = Field( + None, description="Duration of participation in seconds" + ) + + _normalize_joined_at = field_validator("joined_at", mode="before")( + normalize_timestamp_to_int + ) + + +class RecordingStartedPayload(BaseModel): + """ + Payload for recording.started webhook event. + + Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-started + """ + + room_name: NonEmptyString | None = Field(None, description="Daily.co room name") + recording_id: NonEmptyString = Field(description="Recording identifier") + start_ts: int | None = Field(None, description="Recording start timestamp") + + _normalize_start_ts = field_validator("start_ts", mode="before")( + normalize_timestamp_to_int + ) + + +class RecordingReadyToDownloadPayload(BaseModel): + """ + Payload for recording.ready-to-download webhook event. + This is sent when raw-tracks recordings are complete and uploaded to S3. + + Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-ready-to-download + """ + + type: Literal["cloud", "raw-tracks"] = Field( + description="The type of recording that was generated" + ) + recording_id: NonEmptyString = Field( + description="An ID identifying the recording that was generated" + ) + room_name: NonEmptyString = Field( + description="The name of the room where the recording was made" + ) + start_ts: int = Field( + description="The Unix epoch time in seconds representing when the recording started" + ) + status: Literal["finished"] = Field( + description="The status of the given recording (always 'finished' in ready-to-download webhook, see RecordingStatus in responses.py for full API statuses)" + ) + max_participants: int = Field( + description="The number of participants on the call that were recorded" + ) + duration: int = Field(description="The duration in seconds of the call") + s3_key: NonEmptyString = Field( + description="The location of the recording in the provided S3 bucket" + ) + share_token: NonEmptyString | None = Field( + None, description="undocumented documented secret field" + ) + tracks: list[DailyTrack] | None = Field( + None, + description="If the recording is a raw-tracks recording, a tracks field will be provided. If role permissions have been removed, the tracks field may be null", + ) + + _normalize_start_ts = field_validator("start_ts", mode="before")( + normalize_timestamp_to_int + ) + + +class RecordingErrorPayload(BaseModel): + """ + Payload for recording.error webhook event. + + Reference: https://docs.daily.co/reference/rest-api/webhooks/events/recording-error + """ + + action: Literal["clourd-recording-err", "cloud-recording-error"] = Field( + description="A string describing the event that was emitted (both variants are documented)" + ) + error_msg: NonEmptyString = Field(description="The error message returned") + instance_id: NonEmptyString = Field( + description="The recording instance ID that was passed into the start recording command" + ) + room_name: NonEmptyString = Field( + description="The name of the room where the recording was made" + ) + timestamp: int = Field( + description="The Unix epoch time in seconds representing when the error was emitted" + ) + + _normalize_timestamp = field_validator("timestamp", mode="before")( + normalize_timestamp_to_int + ) diff --git a/server/reflector/video_platforms/daily.py b/server/reflector/video_platforms/daily.py index 7bec4864..7485cc95 100644 --- a/server/reflector/video_platforms/daily.py +++ b/server/reflector/video_platforms/daily.py @@ -1,12 +1,17 @@ -import base64 -import hmac from datetime import datetime -from hashlib import sha256 -from http import HTTPStatus -from typing import Any, Dict, Optional - -import httpx +from reflector.dailyco_api import ( + CreateMeetingTokenRequest, + CreateRoomRequest, + DailyApiClient, + MeetingParticipantsResponse, + MeetingTokenProperties, + RecordingResponse, + RecordingsBucketConfig, + RoomPresenceResponse, + RoomProperties, + verify_webhook_signature, +) from reflector.db.daily_participant_sessions import ( daily_participant_sessions_controller, ) @@ -23,18 +28,17 @@ from .models import MeetingData, RecordingType, SessionData, VideoPlatformConfig class DailyClient(VideoPlatformClient): PLATFORM_NAME: Platform = "daily" - TIMEOUT = 10 - BASE_URL = "https://api.daily.co/v1" TIMESTAMP_FORMAT = "%Y%m%d%H%M%S" RECORDING_NONE: RecordingType = "none" RECORDING_CLOUD: RecordingType = "cloud" def __init__(self, config: VideoPlatformConfig): super().__init__(config) - self.headers = { - "Authorization": f"Bearer {config.api_key}", - "Content-Type": "application/json", - } + self._api_client = DailyApiClient( + api_key=config.api_key, + webhook_secret=config.webhook_secret, + timeout=10.0, + ) async def create_meeting( self, room_name_prefix: NonEmptyString, end_date: datetime, room: Room @@ -49,57 +53,43 @@ class DailyClient(VideoPlatformClient): timestamp = datetime.now().strftime(self.TIMESTAMP_FORMAT) room_name = f"{room_name_prefix}{ROOM_PREFIX_SEPARATOR}{timestamp}" - data = { - "name": room_name, - "privacy": "private" if room.is_locked else "public", - "properties": { - "enable_recording": "raw-tracks" - if room.recording_type != self.RECORDING_NONE - else False, - "enable_chat": True, - "enable_screenshare": True, - "start_video_off": False, - "start_audio_off": False, - "exp": int(end_date.timestamp()), - }, - } + properties = RoomProperties( + enable_recording="raw-tracks" + if room.recording_type != self.RECORDING_NONE + else False, + enable_chat=True, + enable_screenshare=True, + start_video_off=False, + start_audio_off=False, + exp=int(end_date.timestamp()), + ) # Only configure recordings_bucket if recording is enabled if room.recording_type != self.RECORDING_NONE: daily_storage = get_dailyco_storage() assert daily_storage.bucket_name, "S3 bucket must be configured" - data["properties"]["recordings_bucket"] = { - "bucket_name": daily_storage.bucket_name, - "bucket_region": daily_storage.region, - "assume_role_arn": daily_storage.role_credential, - "allow_api_access": True, - } - async with httpx.AsyncClient() as client: - response = await client.post( - f"{self.BASE_URL}/rooms", - headers=self.headers, - json=data, - timeout=self.TIMEOUT, + properties.recordings_bucket = RecordingsBucketConfig( + bucket_name=daily_storage.bucket_name, + bucket_region=daily_storage.region, + assume_role_arn=daily_storage.role_credential, + allow_api_access=True, ) - if response.status_code >= 400: - logger.error( - "Daily.co API error", - status_code=response.status_code, - response_body=response.text, - request_data=data, - ) - response.raise_for_status() - result = response.json() - room_url = result["url"] + request = CreateRoomRequest( + name=room_name, + privacy="private" if room.is_locked else "public", + properties=properties, + ) + + result = await self._api_client.create_room(request) return MeetingData( - meeting_id=result["id"], - room_name=result["name"], - room_url=room_url, - host_room_url=room_url, + meeting_id=result.id, + room_name=result.name, + room_url=result.url, + host_room_url=result.url, platform=self.PLATFORM_NAME, - extra_data=result, + extra_data=result.model_dump(), ) async def get_room_sessions(self, room_name: str) -> list[SessionData]: @@ -108,7 +98,7 @@ class DailyClient(VideoPlatformClient): Daily.co doesn't provide historical session API, so we query our database where participant.joined/left webhooks are stored. """ - from reflector.db.meetings import meetings_controller + from reflector.db.meetings import meetings_controller # noqa: PLC0415 meeting = await meetings_controller.get_by_room_name(room_name) if not meeting: @@ -127,135 +117,65 @@ class DailyClient(VideoPlatformClient): for s in sessions ] - async def get_room_presence(self, room_name: str) -> Dict[str, Any]: - """Get room presence/session data for a Daily.co room. + async def get_room_presence(self, room_name: str) -> RoomPresenceResponse: + """Get room presence/session data for a Daily.co room.""" + return await self._api_client.get_room_presence(room_name) - Example response: - { - "total_count": 1, - "data": [ - { - "room": "w2pp2cf4kltgFACPKXmX", - "id": "d61cd7b2-a273-42b4-89bd-be763fd562c1", - "userId": "pbZ+ismP7dk=", - "userName": "Moishe", - "joinTime": "2023-01-01T20:53:19.000Z", - "duration": 2312 - } - ] - } - """ - async with httpx.AsyncClient() as client: - response = await client.get( - f"{self.BASE_URL}/rooms/{room_name}/presence", - headers=self.headers, - timeout=self.TIMEOUT, - ) - response.raise_for_status() - return response.json() + async def get_meeting_participants( + self, meeting_id: str + ) -> MeetingParticipantsResponse: + """Get participant data for a specific Daily.co meeting.""" + return await self._api_client.get_meeting_participants(meeting_id) - async def get_meeting_participants(self, meeting_id: str) -> Dict[str, Any]: - """Get participant data for a specific Daily.co meeting. - - Example response: - { - "data": [ - { - "user_id": "4q47OTmqa/w=", - "participant_id": "d61cd7b2-a273-42b4-89bd-be763fd562c1", - "user_name": "Lindsey", - "join_time": 1672786813, - "duration": 150 - }, - { - "user_id": "pbZ+ismP7dk=", - "participant_id": "b3d56359-14d7-46af-ac8b-18f8c991f5f6", - "user_name": "Moishe", - "join_time": 1672786797, - "duration": 165 - } - ] - } - """ - async with httpx.AsyncClient() as client: - response = await client.get( - f"{self.BASE_URL}/meetings/{meeting_id}/participants", - headers=self.headers, - timeout=self.TIMEOUT, - ) - response.raise_for_status() - return response.json() - - async def get_recording(self, recording_id: str) -> Dict[str, Any]: - async with httpx.AsyncClient() as client: - response = await client.get( - f"{self.BASE_URL}/recordings/{recording_id}", - headers=self.headers, - timeout=self.TIMEOUT, - ) - response.raise_for_status() - return response.json() + async def get_recording(self, recording_id: str) -> RecordingResponse: + return await self._api_client.get_recording(recording_id) async def delete_room(self, room_name: str) -> bool: - async with httpx.AsyncClient() as client: - response = await client.delete( - f"{self.BASE_URL}/rooms/{room_name}", - headers=self.headers, - timeout=self.TIMEOUT, - ) - return response.status_code in (HTTPStatus.OK, HTTPStatus.NOT_FOUND) + """Delete a room (idempotent - succeeds even if room doesn't exist).""" + await self._api_client.delete_room(room_name) + return True async def upload_logo(self, room_name: str, logo_path: str) -> bool: return True def verify_webhook_signature( - self, body: bytes, signature: str, timestamp: Optional[str] = None + self, body: bytes, signature: str, timestamp: str | None = None ) -> bool: - """Verify Daily.co webhook signature. - - Daily.co uses: - - X-Webhook-Signature header - - X-Webhook-Timestamp header - - Signature format: HMAC-SHA256(base64_decode(secret), timestamp + '.' + body) - - Result is base64 encoded - """ - if not signature or not timestamp: + """Verify Daily.co webhook signature using dailyco_api module.""" + if not self.config.webhook_secret: + logger.warning("Webhook secret not configured") return False - try: - secret_bytes = base64.b64decode(self.config.webhook_secret) - - signed_content = timestamp.encode() + b"." + body - - expected = hmac.new(secret_bytes, signed_content, sha256).digest() - expected_b64 = base64.b64encode(expected).decode() - - return hmac.compare_digest(expected_b64, signature) - except Exception as e: - logger.error("Daily.co webhook signature verification failed", exc_info=e) - return False + return verify_webhook_signature( + body=body, + signature=signature, + timestamp=timestamp or "", + webhook_secret=self.config.webhook_secret, + ) async def create_meeting_token( self, room_name: DailyRoomName, enable_recording: bool, - user_id: Optional[str] = None, + user_id: str | None = None, ) -> str: - data = {"properties": {"room_name": room_name}} + properties = MeetingTokenProperties( + room_name=room_name, + user_id=user_id, + start_cloud_recording=enable_recording, + enable_recording_ui=not enable_recording, + ) - if enable_recording: - data["properties"]["start_cloud_recording"] = True - data["properties"]["enable_recording_ui"] = False + request = CreateMeetingTokenRequest(properties=properties) + result = await self._api_client.create_meeting_token(request) + return result.token - if user_id: - data["properties"]["user_id"] = user_id + async def close(self): + """Clean up API client resources.""" + await self._api_client.close() - async with httpx.AsyncClient() as client: - response = await client.post( - f"{self.BASE_URL}/meeting-tokens", - headers=self.headers, - json=data, - timeout=self.TIMEOUT, - ) - response.raise_for_status() - return response.json()["token"] + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() diff --git a/server/reflector/views/daily.py b/server/reflector/views/daily.py index baad97e9..733c70a3 100644 --- a/server/reflector/views/daily.py +++ b/server/reflector/views/daily.py @@ -1,10 +1,14 @@ import json from datetime import datetime, timezone -from typing import Any, Dict, Literal from fastapi import APIRouter, HTTPException, Request -from pydantic import BaseModel +from reflector.dailyco_api import ( + DailyTrack, + DailyWebhookEvent, + extract_room_name, + parse_recording_error, +) from reflector.db import get_database from reflector.db.daily_participant_sessions import ( DailyParticipantSession, @@ -13,7 +17,6 @@ from reflector.db.daily_participant_sessions import ( from reflector.db.meetings import meetings_controller from reflector.logger import logger as _logger from reflector.settings import settings -from reflector.utils.daily import DailyRoomName from reflector.video_platforms.factory import create_platform_client from reflector.worker.process import process_multitrack_recording @@ -22,30 +25,6 @@ router = APIRouter() logger = _logger.bind(platform="daily") -class DailyTrack(BaseModel): - type: Literal["audio", "video"] - s3Key: str - size: int - - -class DailyWebhookEvent(BaseModel): - version: str - type: str - id: str - payload: Dict[str, Any] - event_ts: float - - -def _extract_room_name(event: DailyWebhookEvent) -> DailyRoomName | None: - """Extract room name from Daily event payload. - - Daily.co API inconsistency: - - participant.* events use "room" field - - recording.* events use "room_name" field - """ - return event.payload.get("room_name") or event.payload.get("room") - - @router.post("/webhook") async def webhook(request: Request): """Handle Daily webhook events. @@ -77,18 +56,14 @@ async def webhook(request: Request): client = create_platform_client("daily") - # TEMPORARY: Bypass signature check for testing - # TODO: Remove this after testing is complete - BYPASS_FOR_TESTING = True - if not BYPASS_FOR_TESTING: - if not client.verify_webhook_signature(body, signature, timestamp): - logger.warning( - "Invalid webhook signature", - signature=signature, - timestamp=timestamp, - has_body=bool(body), - ) - raise HTTPException(status_code=401, detail="Invalid webhook signature") + if not client.verify_webhook_signature(body, signature, timestamp): + logger.warning( + "Invalid webhook signature", + signature=signature, + timestamp=timestamp, + has_body=bool(body), + ) + raise HTTPException(status_code=401, detail="Invalid webhook signature") try: body_json = json.loads(body) @@ -99,14 +74,12 @@ async def webhook(request: Request): logger.info("Received Daily webhook test event") return {"status": "ok"} - # Parse as actual event try: event = DailyWebhookEvent(**body_json) except Exception as e: logger.error("Failed to parse webhook event", error=str(e), body=body.decode()) raise HTTPException(status_code=422, detail="Invalid event format") - # Handle participant events if event.type == "participant.joined": await _handle_participant_joined(event) elif event.type == "participant.left": @@ -154,7 +127,7 @@ async def webhook(request: Request): async def _handle_participant_joined(event: DailyWebhookEvent): - daily_room_name = _extract_room_name(event) + daily_room_name = extract_room_name(event) if not daily_room_name: logger.warning("participant.joined: no room in payload", payload=event.payload) return @@ -167,7 +140,6 @@ async def _handle_participant_joined(event: DailyWebhookEvent): return payload = event.payload - logger.warning({"payload": payload}) joined_at = datetime.fromtimestamp(payload["joined_at"], tz=timezone.utc) session_id = f"{meeting.id}:{payload['session_id']}" @@ -225,7 +197,7 @@ async def _handle_participant_joined(event: DailyWebhookEvent): async def _handle_participant_left(event: DailyWebhookEvent): - room_name = _extract_room_name(event) + room_name = extract_room_name(event) if not room_name: logger.warning("participant.left: no room in payload", payload=event.payload) return @@ -268,7 +240,7 @@ async def _handle_participant_left(event: DailyWebhookEvent): async def _handle_recording_started(event: DailyWebhookEvent): - room_name = _extract_room_name(event) + room_name = extract_room_name(event) if not room_name: logger.warning( "recording.started: no room_name in payload", payload=event.payload @@ -301,7 +273,7 @@ async def _handle_recording_ready(event: DailyWebhookEvent): ] } """ - room_name = _extract_room_name(event) + room_name = extract_room_name(event) recording_id = event.payload.get("recording_id") tracks_raw = event.payload.get("tracks", []) @@ -350,8 +322,8 @@ async def _handle_recording_ready(event: DailyWebhookEvent): async def _handle_recording_error(event: DailyWebhookEvent): - room_name = _extract_room_name(event) - error = event.payload.get("error", "Unknown error") + payload = parse_recording_error(event) + room_name = payload.room_name if room_name: meeting = await meetings_controller.get_by_room_name(room_name) @@ -360,6 +332,6 @@ async def _handle_recording_error(event: DailyWebhookEvent): "Recording error", meeting_id=meeting.id, room_name=room_name, - error=error, + error=payload.error_msg, platform="daily", ) diff --git a/server/scripts/list_daily_webhooks.py b/server/scripts/list_daily_webhooks.py index c3c13568..e2e3c912 100755 --- a/server/scripts/list_daily_webhooks.py +++ b/server/scripts/list_daily_webhooks.py @@ -6,53 +6,19 @@ from pathlib import Path sys.path.insert(0, str(Path(__file__).parent.parent)) -import httpx - +from reflector.dailyco_api import DailyApiClient from reflector.settings import settings async def list_webhooks(): - """ - List all Daily.co webhooks for this account. - """ + """List all Daily.co webhooks for this account using dailyco_api module.""" if not settings.DAILY_API_KEY: print("Error: DAILY_API_KEY not set") return 1 - headers = { - "Authorization": f"Bearer {settings.DAILY_API_KEY}", - "Content-Type": "application/json", - } - - async with httpx.AsyncClient() as client: + async with DailyApiClient(api_key=settings.DAILY_API_KEY) as client: try: - """ - Daily.co webhook list response format: - [ - { - "uuid": "0b4e4c7c-5eaf-46fe-990b-a3752f5684f5", - "url": "{{webhook_url}}", - "hmac": "NQrSA5z0FkJ44QPrFerW7uCc5kdNLv3l2FDEKDanL1U=", - "basicAuth": null, - "eventTypes": [ - "recording.started", - "recording.ready-to-download" - ], - "state": "ACTVIE", - "failedCount": 0, - "lastMomentPushed": "2023-08-15T18:29:52.000Z", - "domainId": "{{domain_id}}", - "createdAt": "2023-08-15T18:28:30.000Z", - "updatedAt": "2023-08-15T18:29:52.000Z" - } - ] - """ - resp = await client.get( - "https://api.daily.co/v1/webhooks", - headers=headers, - ) - resp.raise_for_status() - webhooks = resp.json() + webhooks = await client.list_webhooks() if not webhooks: print("No webhooks found") @@ -62,12 +28,12 @@ async def list_webhooks(): for webhook in webhooks: print("=" * 80) - print(f"UUID: {webhook['uuid']}") - print(f"URL: {webhook['url']}") - print(f"State: {webhook['state']}") - print(f"Event Types: {', '.join(webhook.get('eventTypes', []))}") + print(f"UUID: {webhook.uuid}") + print(f"URL: {webhook.url}") + print(f"State: {webhook.state}") + print(f"Event Types: {', '.join(webhook.eventTypes)}") print( - f"HMAC Secret: {'✓ Configured' if webhook.get('hmac') else '✗ Not set'}" + f"HMAC Secret: {'✓ Configured' if webhook.hmac else '✗ Not set'}" ) print() @@ -78,12 +44,8 @@ async def list_webhooks(): return 0 - except httpx.HTTPStatusError as e: - print(f"Error fetching webhooks: {e}") - print(f"Response: {e.response.text}") - return 1 except Exception as e: - print(f"Unexpected error: {e}") + print(f"Error fetching webhooks: {e}") return 1 diff --git a/server/scripts/recreate_daily_webhook.py b/server/scripts/recreate_daily_webhook.py index a378baf2..e4ac9ce9 100644 --- a/server/scripts/recreate_daily_webhook.py +++ b/server/scripts/recreate_daily_webhook.py @@ -6,56 +6,60 @@ from pathlib import Path sys.path.insert(0, str(Path(__file__).parent.parent)) -import httpx - +from reflector.dailyco_api import ( + CreateWebhookRequest, + DailyApiClient, +) from reflector.settings import settings async def setup_webhook(webhook_url: str): """ - Create or update Daily.co webhook for this environment. + Create or update Daily.co webhook for this environment using dailyco_api module. Uses DAILY_WEBHOOK_UUID to identify existing webhook. """ if not settings.DAILY_API_KEY: print("Error: DAILY_API_KEY not set") return 1 - headers = { - "Authorization": f"Bearer {settings.DAILY_API_KEY}", - "Content-Type": "application/json", - } + if not settings.DAILY_WEBHOOK_SECRET: + print("Error: DAILY_WEBHOOK_SECRET not set") + return 1 - webhook_data = { - "url": webhook_url, - "eventTypes": [ - "participant.joined", - "participant.left", - "recording.started", - "recording.ready-to-download", - "recording.error", - ], - "hmac": settings.DAILY_WEBHOOK_SECRET, - } + event_types = [ + "participant.joined", + "participant.left", + "recording.started", + "recording.ready-to-download", + "recording.error", + ] - async with httpx.AsyncClient() as client: + async with DailyApiClient(api_key=settings.DAILY_API_KEY) as client: webhook_uuid = settings.DAILY_WEBHOOK_UUID if webhook_uuid: - # Update existing webhook print(f"Updating existing webhook {webhook_uuid}...") try: - resp = await client.patch( - f"https://api.daily.co/v1/webhooks/{webhook_uuid}", - headers=headers, - json=webhook_data, + # Note: Daily.co doesn't support PATCH well, so we delete + recreate + await client.delete_webhook(webhook_uuid) + print(f"Deleted old webhook {webhook_uuid}") + + request = CreateWebhookRequest( + url=webhook_url, + eventTypes=event_types, + hmac=settings.DAILY_WEBHOOK_SECRET, ) - resp.raise_for_status() - result = resp.json() - print(f"✓ Updated webhook {result['uuid']} (state: {result['state']})") - print(f" URL: {result['url']}") - return 0 - except httpx.HTTPStatusError as e: - if e.response.status_code == 404: + result = await client.create_webhook(request) + + print( + f"✓ Created replacement webhook {result.uuid} (state: {result.state})" + ) + print(f" URL: {result.url}") + + webhook_uuid = result.uuid + + except Exception as e: + if hasattr(e, "response") and e.response.status_code == 404: print(f"Webhook {webhook_uuid} not found, creating new one...") webhook_uuid = None # Fall through to creation else: @@ -63,17 +67,17 @@ async def setup_webhook(webhook_url: str): return 1 if not webhook_uuid: - # Create new webhook print("Creating new webhook...") - resp = await client.post( - "https://api.daily.co/v1/webhooks", headers=headers, json=webhook_data + request = CreateWebhookRequest( + url=webhook_url, + eventTypes=event_types, + hmac=settings.DAILY_WEBHOOK_SECRET, ) - resp.raise_for_status() - result = resp.json() - webhook_uuid = result["uuid"] + result = await client.create_webhook(request) + webhook_uuid = result.uuid - print(f"✓ Created webhook {webhook_uuid} (state: {result['state']})") - print(f" URL: {result['url']}") + print(f"✓ Created webhook {webhook_uuid} (state: {result.state})") + print(f" URL: {result.url}") print() print("=" * 60) print("IMPORTANT: Add this to your environment variables:") @@ -114,7 +118,7 @@ if __name__ == "__main__": ) print() print("Behavior:") - print(" - If DAILY_WEBHOOK_UUID set: Updates existing webhook") + print(" - If DAILY_WEBHOOK_UUID set: Deletes old webhook, creates new one") print( " - If DAILY_WEBHOOK_UUID empty: Creates new webhook, saves UUID to .env" ) From 11731c9d38439b04e93b1c3afbd7090bad11a11f Mon Sep 17 00:00:00 2001 From: Igor Monadical Date: Mon, 24 Nov 2025 10:35:06 -0500 Subject: [PATCH 77/77] feat: multitrack cli (#735) * multitrack cli prd * prd/todo (no-mistakes) * multitrack cli (no-mistakes) * multitrack cli (no-mistakes) * multitrack cli (no-mistakes) * multitrack cli (no-mistakes) * remove multitrack tests most worthless * useless comments away * useless comments away --------- Co-authored-by: Igor Loskutov --- server/reflector/tools/cli_multitrack.py | 347 +++++++++++++++++++++++ server/reflector/tools/process.py | 179 ++++++++++-- server/tests/test_s3_url_parser.py | 136 +++++++++ 3 files changed, 643 insertions(+), 19 deletions(-) create mode 100644 server/reflector/tools/cli_multitrack.py create mode 100644 server/tests/test_s3_url_parser.py diff --git a/server/reflector/tools/cli_multitrack.py b/server/reflector/tools/cli_multitrack.py new file mode 100644 index 00000000..aad5ab2f --- /dev/null +++ b/server/reflector/tools/cli_multitrack.py @@ -0,0 +1,347 @@ +import asyncio +import sys +import time +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Protocol + +import structlog +from celery.result import AsyncResult + +from reflector.db import get_database +from reflector.db.transcripts import SourceKind, Transcript, transcripts_controller +from reflector.pipelines.main_multitrack_pipeline import ( + task_pipeline_multitrack_process, +) +from reflector.storage import get_transcripts_storage +from reflector.tools.process import ( + extract_result_from_entry, + parse_s3_url, + validate_s3_objects, +) + +logger = structlog.get_logger(__name__) + +DEFAULT_PROCESSING_TIMEOUT_SECONDS = 3600 + +MAX_ERROR_MESSAGE_LENGTH = 500 + +TASK_POLL_INTERVAL_SECONDS = 2 + + +class StatusCallback(Protocol): + def __call__(self, state: str, elapsed_seconds: int) -> None: ... + + +@dataclass +class MultitrackTaskResult: + success: bool + transcript_id: str + error: Optional[str] = None + + +async def create_multitrack_transcript( + bucket_name: str, + track_keys: List[str], + source_language: str, + target_language: str, + user_id: Optional[str] = None, +) -> Transcript: + num_tracks = len(track_keys) + track_word = "track" if num_tracks == 1 else "tracks" + transcript_name = f"Multitrack ({num_tracks} {track_word})" + + transcript = await transcripts_controller.add( + transcript_name, + source_kind=SourceKind.FILE, + source_language=source_language, + target_language=target_language, + user_id=user_id, + ) + + logger.info( + "Created multitrack transcript", + transcript_id=transcript.id, + name=transcript_name, + bucket=bucket_name, + num_tracks=len(track_keys), + ) + + return transcript + + +def submit_multitrack_task( + transcript_id: str, bucket_name: str, track_keys: List[str] +) -> AsyncResult: + result = task_pipeline_multitrack_process.delay( + transcript_id=transcript_id, + bucket_name=bucket_name, + track_keys=track_keys, + ) + + logger.info( + "Multitrack task submitted", + transcript_id=transcript_id, + task_id=result.id, + bucket=bucket_name, + num_tracks=len(track_keys), + ) + + return result + + +async def wait_for_task( + result: AsyncResult, + transcript_id: str, + timeout_seconds: int = DEFAULT_PROCESSING_TIMEOUT_SECONDS, + poll_interval: int = TASK_POLL_INTERVAL_SECONDS, + status_callback: Optional[StatusCallback] = None, +) -> MultitrackTaskResult: + start_time = time.time() + last_status = None + + while not result.ready(): + elapsed = time.time() - start_time + if elapsed > timeout_seconds: + error_msg = ( + f"Task {result.id} did not complete within {timeout_seconds}s " + f"for transcript {transcript_id}" + ) + logger.error( + "Task timeout", + task_id=result.id, + transcript_id=transcript_id, + elapsed_seconds=elapsed, + ) + raise TimeoutError(error_msg) + + if result.state != last_status: + if status_callback: + status_callback(result.state, int(elapsed)) + last_status = result.state + + await asyncio.sleep(poll_interval) + + if result.failed(): + error_info = result.info + traceback_info = getattr(result, "traceback", None) + + logger.error( + "Multitrack task failed", + transcript_id=transcript_id, + task_id=result.id, + error=str(error_info), + has_traceback=bool(traceback_info), + ) + + error_detail = str(error_info) + if traceback_info: + error_detail += f"\nTraceback:\n{traceback_info}" + + return MultitrackTaskResult( + success=False, transcript_id=transcript_id, error=error_detail + ) + + logger.info( + "Multitrack task completed", + transcript_id=transcript_id, + task_id=result.id, + state=result.state, + ) + + return MultitrackTaskResult(success=True, transcript_id=transcript_id) + + +async def update_transcript_status( + transcript_id: str, + status: str, + error: Optional[str] = None, + max_error_length: int = MAX_ERROR_MESSAGE_LENGTH, +) -> None: + database = get_database() + connected = False + + try: + await database.connect() + connected = True + + transcript = await transcripts_controller.get_by_id(transcript_id) + if transcript: + update_data: Dict[str, Any] = {"status": status} + + if error: + if len(error) > max_error_length: + error = error[: max_error_length - 3] + "..." + update_data["error"] = error + + await transcripts_controller.update(transcript, update_data) + + logger.info( + "Updated transcript status", + transcript_id=transcript_id, + status=status, + has_error=bool(error), + ) + except Exception as e: + logger.warning( + "Failed to update transcript status", + transcript_id=transcript_id, + error=str(e), + ) + finally: + if connected: + try: + await database.disconnect() + except Exception as e: + logger.warning(f"Database disconnect failed: {e}") + + +async def process_multitrack( + bucket_name: str, + track_keys: List[str], + source_language: str, + target_language: str, + user_id: Optional[str] = None, + timeout_seconds: int = DEFAULT_PROCESSING_TIMEOUT_SECONDS, + status_callback: Optional[StatusCallback] = None, +) -> MultitrackTaskResult: + """High-level orchestration for multitrack processing.""" + database = get_database() + transcript = None + connected = False + + try: + await database.connect() + connected = True + + transcript = await create_multitrack_transcript( + bucket_name=bucket_name, + track_keys=track_keys, + source_language=source_language, + target_language=target_language, + user_id=user_id, + ) + + result = submit_multitrack_task( + transcript_id=transcript.id, bucket_name=bucket_name, track_keys=track_keys + ) + + except Exception as e: + if transcript: + try: + await update_transcript_status( + transcript_id=transcript.id, status="failed", error=str(e) + ) + except Exception as update_error: + logger.error( + "Failed to update transcript status after error", + original_error=str(e), + update_error=str(update_error), + transcript_id=transcript.id, + ) + raise + finally: + if connected: + try: + await database.disconnect() + except Exception as e: + logger.warning(f"Database disconnect failed: {e}") + + # Poll outside database connection + task_result = await wait_for_task( + result=result, + transcript_id=transcript.id, + timeout_seconds=timeout_seconds, + poll_interval=2, + status_callback=status_callback, + ) + + if not task_result.success: + await update_transcript_status( + transcript_id=transcript.id, status="failed", error=task_result.error + ) + + return task_result + + +def print_progress(message: str) -> None: + """Print progress message to stderr for CLI visibility.""" + print(f"{message}", file=sys.stderr) + + +def create_status_callback() -> StatusCallback: + """Create callback for task status updates during polling.""" + + def callback(state: str, elapsed_seconds: int) -> None: + print_progress( + f"Multitrack pipeline status: {state} (elapsed: {elapsed_seconds}s)" + ) + + return callback + + +async def process_multitrack_cli( + s3_urls: List[str], + source_language: str, + target_language: str, + output_path: Optional[str] = None, +) -> None: + if not s3_urls: + raise ValueError("At least one track required for multitrack processing") + + bucket_keys = [] + for url in s3_urls: + try: + bucket, key = parse_s3_url(url) + bucket_keys.append((bucket, key)) + except ValueError as e: + raise ValueError(f"Invalid S3 URL '{url}': {e}") from e + + buckets = set(bucket for bucket, _ in bucket_keys) + if len(buckets) > 1: + raise ValueError( + f"All tracks must be in the same S3 bucket. " + f"Found {len(buckets)} different buckets: {sorted(buckets)}. " + f"Please upload all files to a single bucket." + ) + + primary_bucket = bucket_keys[0][0] + track_keys = [key for _, key in bucket_keys] + + print_progress( + f"Starting multitrack CLI processing: " + f"bucket={primary_bucket}, num_tracks={len(track_keys)}, " + f"source_language={source_language}, target_language={target_language}" + ) + + storage = get_transcripts_storage() + await validate_s3_objects(storage, bucket_keys) + print_progress(f"S3 validation complete: {len(bucket_keys)} objects verified") + + result = await process_multitrack( + bucket_name=primary_bucket, + track_keys=track_keys, + source_language=source_language, + target_language=target_language, + user_id=None, + timeout_seconds=3600, + status_callback=create_status_callback(), + ) + + if not result.success: + error_msg = ( + f"Multitrack pipeline failed for transcript {result.transcript_id}\n" + ) + if result.error: + error_msg += f"Error: {result.error}\n" + raise RuntimeError(error_msg) + + print_progress( + f"Multitrack processing complete for transcript {result.transcript_id}" + ) + + database = get_database() + await database.connect() + try: + await extract_result_from_entry(result.transcript_id, output_path) + finally: + await database.disconnect() diff --git a/server/reflector/tools/process.py b/server/reflector/tools/process.py index eb770f76..a3a74138 100644 --- a/server/reflector/tools/process.py +++ b/server/reflector/tools/process.py @@ -9,7 +9,10 @@ import shutil import sys import time from pathlib import Path -from typing import Any, Dict, List, Literal +from typing import Any, Dict, List, Literal, Tuple +from urllib.parse import unquote, urlparse + +from botocore.exceptions import BotoCoreError, ClientError, NoCredentialsError from reflector.db.transcripts import SourceKind, TranscriptTopic, transcripts_controller from reflector.logger import logger @@ -20,10 +23,119 @@ from reflector.pipelines.main_live_pipeline import pipeline_post as live_pipelin from reflector.pipelines.main_live_pipeline import ( pipeline_process as live_pipeline_process, ) +from reflector.storage import Storage + + +def validate_s3_bucket_name(bucket: str) -> None: + if not bucket: + raise ValueError("Bucket name cannot be empty") + if len(bucket) > 255: # Absolute max for any region + raise ValueError(f"Bucket name too long: {len(bucket)} characters (max 255)") + + +def validate_s3_key(key: str) -> None: + if not key: + raise ValueError("S3 key cannot be empty") + if len(key) > 1024: + raise ValueError(f"S3 key too long: {len(key)} characters (max 1024)") + + +def parse_s3_url(url: str) -> Tuple[str, str]: + parsed = urlparse(url) + + if parsed.scheme == "s3": + bucket = parsed.netloc + key = parsed.path.lstrip("/") + if parsed.fragment: + logger.debug( + "URL fragment ignored (not part of S3 key)", + url=url, + fragment=parsed.fragment, + ) + if not bucket or not key: + raise ValueError(f"Invalid S3 URL: {url} (missing bucket or key)") + bucket = unquote(bucket) + key = unquote(key) + validate_s3_bucket_name(bucket) + validate_s3_key(key) + return bucket, key + + elif parsed.scheme in ("http", "https"): + if ".s3." in parsed.netloc or parsed.netloc.endswith(".s3.amazonaws.com"): + bucket = parsed.netloc.split(".")[0] + key = parsed.path.lstrip("/") + if parsed.fragment: + logger.debug("URL fragment ignored", url=url, fragment=parsed.fragment) + if not bucket or not key: + raise ValueError(f"Invalid S3 URL: {url} (missing bucket or key)") + bucket = unquote(bucket) + key = unquote(key) + validate_s3_bucket_name(bucket) + validate_s3_key(key) + return bucket, key + + elif parsed.netloc.startswith("s3.") and "amazonaws.com" in parsed.netloc: + path_parts = parsed.path.lstrip("/").split("/", 1) + if len(path_parts) != 2: + raise ValueError(f"Invalid S3 URL: {url} (missing bucket or key)") + bucket, key = path_parts + if parsed.fragment: + logger.debug("URL fragment ignored", url=url, fragment=parsed.fragment) + bucket = unquote(bucket) + key = unquote(key) + validate_s3_bucket_name(bucket) + validate_s3_key(key) + return bucket, key + + else: + raise ValueError(f"Invalid S3 URL format: {url} (not recognized as S3 URL)") + + else: + raise ValueError(f"Invalid S3 URL scheme: {url} (must be s3:// or https://)") + + +async def validate_s3_objects( + storage: Storage, bucket_keys: List[Tuple[str, str]] +) -> None: + async with storage.session.client("s3") as client: + + async def check_object(bucket: str, key: str) -> None: + try: + await client.head_object(Bucket=bucket, Key=key) + except ClientError as e: + error_code = e.response["Error"]["Code"] + if error_code in ("404", "NoSuchKey"): + raise ValueError(f"S3 object not found: s3://{bucket}/{key}") from e + elif error_code in ("403", "Forbidden", "AccessDenied"): + raise ValueError( + f"Access denied for S3 object: s3://{bucket}/{key}. " + f"Check AWS credentials and permissions" + ) from e + else: + raise ValueError( + f"S3 error {error_code} for s3://{bucket}/{key}: " + f"{e.response['Error'].get('Message', 'Unknown error')}" + ) from e + except NoCredentialsError as e: + raise ValueError( + "AWS credentials not configured. Set AWS_ACCESS_KEY_ID and " + "AWS_SECRET_ACCESS_KEY environment variables" + ) from e + except BotoCoreError as e: + raise ValueError( + f"AWS service error for s3://{bucket}/{key}: {str(e)}" + ) from e + except Exception as e: + raise ValueError( + f"Unexpected error validating s3://{bucket}/{key}: {str(e)}" + ) from e + + await asyncio.gather( + *(check_object(bucket, key) for bucket, key in bucket_keys) + ) def serialize_topics(topics: List[TranscriptTopic]) -> List[Dict[str, Any]]: - """Convert TranscriptTopic objects to JSON-serializable dicts""" serialized = [] for topic in topics: topic_dict = topic.model_dump() @@ -32,7 +144,6 @@ def serialize_topics(topics: List[TranscriptTopic]) -> List[Dict[str, Any]]: def debug_print_speakers(serialized_topics: List[Dict[str, Any]]) -> None: - """Print debug info about speakers found in topics""" all_speakers = set() for topic_dict in serialized_topics: for word in topic_dict.get("words", []): @@ -47,8 +158,6 @@ def debug_print_speakers(serialized_topics: List[Dict[str, Any]]) -> None: TranscriptId = str -# common interface for every flow: it needs an Entry in db with specific ceremony (file path + status + actual file in file system) -# ideally we want to get rid of it at some point async def prepare_entry( source_path: str, source_language: str, @@ -65,9 +174,7 @@ async def prepare_entry( user_id=None, ) - logger.info( - f"Created empty transcript {transcript.id} for file {file_path.name} because technically we need an empty transcript before we start transcript" - ) + logger.info(f"Created transcript {transcript.id} for {file_path.name}") # pipelines expect files as upload.* @@ -83,7 +190,6 @@ async def prepare_entry( return transcript.id -# same reason as prepare_entry async def extract_result_from_entry( transcript_id: TranscriptId, output_path: str ) -> None: @@ -193,13 +299,20 @@ if __name__ == "__main__": parser = argparse.ArgumentParser( description="Process audio files with speaker diarization" ) - parser.add_argument("source", help="Source file (mp3, wav, mp4...)") + parser.add_argument( + "source", + help="Source file (mp3, wav, mp4...) or comma-separated S3 URLs with --multitrack", + ) parser.add_argument( "--pipeline", - required=True, choices=["live", "file"], help="Pipeline type to use for processing (live: streaming/incremental, file: batch/parallel)", ) + parser.add_argument( + "--multitrack", + action="store_true", + help="Process multiple audio tracks from comma-separated S3 URLs", + ) parser.add_argument( "--source-language", default="en", help="Source language code (default: en)" ) @@ -209,12 +322,40 @@ if __name__ == "__main__": parser.add_argument("--output", "-o", help="Output file (output.jsonl)") args = parser.parse_args() - asyncio.run( - process( - args.source, - args.source_language, - args.target_language, - args.pipeline, - args.output, + if args.multitrack: + if not args.source: + parser.error("Source URLs required for multitrack processing") + + s3_urls = [url.strip() for url in args.source.split(",") if url.strip()] + + if not s3_urls: + parser.error("At least one S3 URL required for multitrack processing") + + from reflector.tools.cli_multitrack import process_multitrack_cli + + asyncio.run( + process_multitrack_cli( + s3_urls, + args.source_language, + args.target_language, + args.output, + ) + ) + else: + if not args.pipeline: + parser.error("--pipeline is required for single-track processing") + + if "," in args.source: + parser.error( + "Multiple files detected. Use --multitrack flag for multitrack processing" + ) + + asyncio.run( + process( + args.source, + args.source_language, + args.target_language, + args.pipeline, + args.output, + ) ) - ) diff --git a/server/tests/test_s3_url_parser.py b/server/tests/test_s3_url_parser.py new file mode 100644 index 00000000..638f7c29 --- /dev/null +++ b/server/tests/test_s3_url_parser.py @@ -0,0 +1,136 @@ +"""Tests for S3 URL parsing functionality in reflector.tools.process""" + +import pytest + +from reflector.tools.process import parse_s3_url + + +class TestParseS3URL: + """Test cases for parse_s3_url function""" + + def test_parse_s3_protocol(self): + """Test parsing s3:// protocol URLs""" + bucket, key = parse_s3_url("s3://my-bucket/path/to/file.webm") + assert bucket == "my-bucket" + assert key == "path/to/file.webm" + + def test_parse_s3_protocol_deep_path(self): + """Test s3:// with deeply nested paths""" + bucket, key = parse_s3_url("s3://bucket-name/very/deep/path/to/audio.mp4") + assert bucket == "bucket-name" + assert key == "very/deep/path/to/audio.mp4" + + def test_parse_https_subdomain_format(self): + """Test parsing https://bucket.s3.amazonaws.com/key format""" + bucket, key = parse_s3_url("https://my-bucket.s3.amazonaws.com/path/file.webm") + assert bucket == "my-bucket" + assert key == "path/file.webm" + + def test_parse_https_regional_subdomain(self): + """Test parsing regional endpoint with subdomain""" + bucket, key = parse_s3_url( + "https://my-bucket.s3.us-west-2.amazonaws.com/path/file.webm" + ) + assert bucket == "my-bucket" + assert key == "path/file.webm" + + def test_parse_https_path_style(self): + """Test parsing https://s3.amazonaws.com/bucket/key format""" + bucket, key = parse_s3_url("https://s3.amazonaws.com/my-bucket/path/file.webm") + assert bucket == "my-bucket" + assert key == "path/file.webm" + + def test_parse_https_regional_path_style(self): + """Test parsing regional endpoint with path style""" + bucket, key = parse_s3_url( + "https://s3.us-east-1.amazonaws.com/my-bucket/path/file.webm" + ) + assert bucket == "my-bucket" + assert key == "path/file.webm" + + def test_parse_url_encoded_keys(self): + """Test parsing URL-encoded keys""" + bucket, key = parse_s3_url( + "s3://my-bucket/path%20with%20spaces/file%2Bname.webm" + ) + assert bucket == "my-bucket" + assert key == "path with spaces/file+name.webm" # Should be decoded + + def test_parse_url_encoded_https(self): + """Test URL-encoded keys with HTTPS format""" + bucket, key = parse_s3_url( + "https://my-bucket.s3.amazonaws.com/file%20with%20spaces.webm" + ) + assert bucket == "my-bucket" + assert key == "file with spaces.webm" + + def test_invalid_url_no_scheme(self): + """Test that URLs without scheme raise ValueError""" + with pytest.raises(ValueError, match="Invalid S3 URL scheme"): + parse_s3_url("my-bucket/path/file.webm") + + def test_invalid_url_wrong_scheme(self): + """Test that non-S3 schemes raise ValueError""" + with pytest.raises(ValueError, match="Invalid S3 URL scheme"): + parse_s3_url("ftp://my-bucket/path/file.webm") + + def test_invalid_s3_missing_bucket(self): + """Test s3:// URL without bucket raises ValueError""" + with pytest.raises(ValueError, match="missing bucket or key"): + parse_s3_url("s3:///path/file.webm") + + def test_invalid_s3_missing_key(self): + """Test s3:// URL without key raises ValueError""" + with pytest.raises(ValueError, match="missing bucket or key"): + parse_s3_url("s3://my-bucket/") + + def test_invalid_s3_empty_key(self): + """Test s3:// URL with empty key raises ValueError""" + with pytest.raises(ValueError, match="missing bucket or key"): + parse_s3_url("s3://my-bucket") + + def test_invalid_https_not_s3(self): + """Test HTTPS URL that's not S3 raises ValueError""" + with pytest.raises(ValueError, match="not recognized as S3 URL"): + parse_s3_url("https://example.com/path/file.webm") + + def test_invalid_https_subdomain_missing_key(self): + """Test HTTPS subdomain format without key raises ValueError""" + with pytest.raises(ValueError, match="missing bucket or key"): + parse_s3_url("https://my-bucket.s3.amazonaws.com/") + + def test_invalid_https_path_style_missing_parts(self): + """Test HTTPS path style with missing bucket/key raises ValueError""" + with pytest.raises(ValueError, match="missing bucket or key"): + parse_s3_url("https://s3.amazonaws.com/") + + def test_bucket_with_dots(self): + """Test parsing bucket names with dots""" + bucket, key = parse_s3_url("s3://my.bucket.name/path/file.webm") + assert bucket == "my.bucket.name" + assert key == "path/file.webm" + + def test_bucket_with_hyphens(self): + """Test parsing bucket names with hyphens""" + bucket, key = parse_s3_url("s3://my-bucket-name-123/path/file.webm") + assert bucket == "my-bucket-name-123" + assert key == "path/file.webm" + + def test_key_with_special_chars(self): + """Test keys with various special characters""" + # Note: # is treated as URL fragment separator, not part of key + bucket, key = parse_s3_url("s3://bucket/2024-01-01_12:00:00/file.webm") + assert bucket == "bucket" + assert key == "2024-01-01_12:00:00/file.webm" + + def test_fragment_handling(self): + """Test that URL fragments are properly ignored""" + bucket, key = parse_s3_url("s3://bucket/path/to/file.webm#fragment123") + assert bucket == "bucket" + assert key == "path/to/file.webm" # Fragment not included + + def test_http_scheme_s3_url(self): + """Test that HTTP (not HTTPS) S3 URLs are supported""" + bucket, key = parse_s3_url("http://my-bucket.s3.amazonaws.com/path/file.webm") + assert bucket == "my-bucket" + assert key == "path/file.webm"