Compare commits

...

5 Commits

Author SHA1 Message Date
Kevin Guevara
dc428b2042 adding app v2 (#943) 2026-04-09 11:25:19 -05:00
Juan Diego García
5cefc39972 chore(main): release 0.45.0 (#953) 2026-04-08 20:57:55 -05:00
Juan Diego García
739cd51375 fix: inline imports (#955) 2026-04-08 20:32:47 -05:00
Juan Diego García
ee8db36f2c feat: make video recording optional, deleting video tracks (#954)
* feat: make video recording optional, deleting video tracks
2026-04-08 17:05:50 -05:00
Juan Diego García
5f0c5635eb fix: better topic chunking and subject extraction (#952)
* fix: better detect topics chunking depending on duration

* fix:  better subject detection + prompt improvements
2026-04-08 15:38:04 -05:00
128 changed files with 25231 additions and 258 deletions

2
.gitignore vendored
View File

@@ -33,3 +33,5 @@ Caddyfile.gpu-host
.env.gpu-host
vibedocs/
server/tests/integration/logs/
node_modules
node_modules

View File

@@ -1,5 +1,18 @@
# Changelog
## [0.45.0](https://github.com/GreyhavenHQ/reflector/compare/v0.44.0...v0.45.0) (2026-04-09)
### Features
* make video recording optional, deleting video tracks ([#954](https://github.com/GreyhavenHQ/reflector/issues/954)) ([ee8db36](https://github.com/GreyhavenHQ/reflector/commit/ee8db36f2cd93b8f1ff4f4318e331fe2bac219c5))
### Bug Fixes
* better topic chunking and subject extraction ([#952](https://github.com/GreyhavenHQ/reflector/issues/952)) ([5f0c563](https://github.com/GreyhavenHQ/reflector/commit/5f0c5635eb77955b70168242ad7c336a20c98dd0))
* inline imports ([#955](https://github.com/GreyhavenHQ/reflector/issues/955)) ([739cd51](https://github.com/GreyhavenHQ/reflector/commit/739cd513751cd52d8e3d6d80b64568b1cf409414))
## [0.44.0](https://github.com/GreyhavenHQ/reflector/compare/v0.43.0...v0.44.0) (2026-04-07)

View File

@@ -201,4 +201,4 @@ If you need to do any worker/pipeline related work, search for "Pipeline" classe
## Code Style
- Always put imports at the top of the file. Let ruff/pre-commit handle sorting and formatting of imports.
- Exception: In Hatchet pipeline task functions, DB controller imports (e.g., `transcripts_controller`, `meetings_controller`) stay as deferred/inline imports inside `fresh_db_connection()` blocks — this is intentional to avoid sharing DB connections across forked processes. Non-DB imports (utilities, services) should still go at the top of the file.
- The **only** imports allowed to remain inline are from `reflector.db.*` modules (e.g., `reflector.db.transcripts`, `reflector.db.meetings`, `reflector.db.recordings`, `reflector.db.rooms`). These stay as deferred/inline imports inside `fresh_db_connection()` blocks in Hatchet pipeline task functions — this is intentional to avoid sharing DB connections across forked processes. All other imports (utilities, services, processors, storage, third-party libs) **must** go at the top of the file, even in Hatchet workflows.

View File

@@ -2,7 +2,8 @@ services:
server:
build:
context: server
network_mode: host
ports:
- "1250:1250"
volumes:
- ./server/:/app/
- /app/.venv
@@ -10,12 +11,17 @@ services:
- ./server/.env
environment:
ENTRYPOINT: server
DATABASE_URL: postgresql+asyncpg://reflector:reflector@localhost:5432/reflector
REDIS_HOST: localhost
CELERY_BROKER_URL: redis://localhost:6379/1
CELERY_RESULT_BACKEND: redis://localhost:6379/1
HATCHET_CLIENT_SERVER_URL: http://localhost:8889
HATCHET_CLIENT_HOST_PORT: localhost:7078
DATABASE_URL: postgresql+asyncpg://reflector:reflector@postgres:5432/reflector
REDIS_HOST: redis
CELERY_BROKER_URL: redis://redis:6379/1
CELERY_RESULT_BACKEND: redis://redis:6379/1
HATCHET_CLIENT_SERVER_URL: http://hatchet:8888
HATCHET_CLIENT_HOST_PORT: hatchet:7077
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_started
worker:
build:

View File

@@ -0,0 +1,43 @@
"""add store_video to room and meeting
Revision ID: c1d2e3f4a5b6
Revises: b4c7e8f9a012
Create Date: 2026-04-08 00:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
revision: str = "c1d2e3f4a5b6"
down_revision: Union[str, None] = "b4c7e8f9a012"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column(
"room",
sa.Column(
"store_video",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
)
op.add_column(
"meeting",
sa.Column(
"store_video",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
)
def downgrade() -> None:
op.drop_column("meeting", "store_video")
op.drop_column("room", "store_video")

View File

@@ -69,6 +69,7 @@ meetings = sa.Table(
sa.Column("daily_composed_video_duration", sa.Integer, nullable=True),
# Email recipients for transcript notification
sa.Column("email_recipients", JSONB, nullable=True),
sa.Column("store_video", sa.Boolean, nullable=False, server_default=sa.false()),
sa.Index("idx_meeting_room_id", "room_id"),
sa.Index("idx_meeting_calendar_event", "calendar_event_id"),
)
@@ -122,6 +123,7 @@ class Meeting(BaseModel):
# Email recipients for transcript notification
# Each entry is {"email": str, "include_link": bool} or a legacy plain str
email_recipients: list[dict | str] | None = None
store_video: bool = False
class MeetingController:
@@ -152,6 +154,7 @@ class MeetingController:
calendar_event_id=calendar_event_id,
calendar_metadata=calendar_metadata,
platform=room.platform,
store_video=room.store_video,
)
query = meetings.insert().values(**meeting.model_dump())
await get_database().execute(query)

View File

@@ -64,6 +64,9 @@ rooms = sqlalchemy.Table(
server_default=sqlalchemy.sql.false(),
),
sqlalchemy.Column("email_transcript_to", sqlalchemy.String, nullable=True),
sqlalchemy.Column(
"store_video", sqlalchemy.Boolean, nullable=False, server_default=false()
),
sqlalchemy.Index("idx_room_is_shared", "is_shared"),
sqlalchemy.Index("idx_room_ics_enabled", "ics_enabled"),
)
@@ -94,6 +97,7 @@ class Room(BaseModel):
platform: Platform = Field(default_factory=lambda: settings.DEFAULT_VIDEO_PLATFORM)
skip_consent: bool = False
email_transcript_to: str | None = None
store_video: bool = False
class RoomController:
@@ -150,6 +154,7 @@ class RoomController:
platform: Platform = settings.DEFAULT_VIDEO_PLATFORM,
skip_consent: bool = False,
email_transcript_to: str | None = None,
store_video: bool = False,
):
"""
Add a new room
@@ -176,6 +181,7 @@ class RoomController:
"platform": platform,
"skip_consent": skip_consent,
"email_transcript_to": email_transcript_to,
"store_video": store_video,
}
room = Room(**room_data)

View File

@@ -10,6 +10,7 @@ from reflector.hatchet.client import HatchetClientManager
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
daily_multitrack_pipeline,
)
from reflector.hatchet.workflows.failed_runs_monitor import failed_runs_monitor
from reflector.hatchet.workflows.file_pipeline import file_pipeline
from reflector.hatchet.workflows.live_post_pipeline import live_post_pipeline
from reflector.hatchet.workflows.subject_processing import subject_workflow
@@ -54,10 +55,6 @@ def main():
]
)
if _zulip_dag_enabled:
from reflector.hatchet.workflows.failed_runs_monitor import ( # noqa: PLC0415
failed_runs_monitor,
)
workflows.append(failed_runs_monitor)
logger.info(
"FailedRunsMonitor cron enabled",

View File

@@ -18,10 +18,11 @@ import json
import tempfile
import time
from contextlib import asynccontextmanager
from datetime import timedelta
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any, Callable, Coroutine, Protocol, TypeVar
import databases
import httpx
from hatchet_sdk import (
ConcurrencyExpression,
@@ -83,6 +84,7 @@ from reflector.hatchet.workflows.topic_chunk_processing import (
topic_chunk_workflow,
)
from reflector.hatchet.workflows.track_processing import TrackInput, track_workflow
from reflector.llm import LLM
from reflector.logger import logger
from reflector.pipelines import topic_processing
from reflector.processors.audio_mixdown_auto import AudioMixdownAutoProcessor
@@ -95,7 +97,9 @@ from reflector.processors.summary.prompts import (
from reflector.processors.summary.summary_builder import SummaryBuilder
from reflector.processors.types import TitleSummary, Word
from reflector.processors.types import Transcript as TranscriptType
from reflector.redis_cache import get_async_redis_client
from reflector.settings import settings
from reflector.storage import get_source_storage, get_transcripts_storage
from reflector.utils.audio_constants import (
PRESIGNED_URL_EXPIRATION_SECONDS,
WAVEFORM_SEGMENTS,
@@ -105,8 +109,16 @@ from reflector.utils.daily import (
filter_cam_audio_tracks,
parse_daily_recording_filename,
)
from reflector.utils.livekit import parse_livekit_track_filepath
from reflector.utils.string import NonEmptyString, assert_non_none_and_non_empty
from reflector.utils.transcript_constants import TOPIC_CHUNK_WORD_COUNT
from reflector.utils.transcript_constants import (
compute_max_subjects,
compute_topic_chunk_size,
)
from reflector.utils.webhook import (
fetch_transcript_webhook_payload,
send_webhook_request,
)
from reflector.zulip import post_transcript_notification
@@ -135,8 +147,6 @@ async def fresh_db_connection():
The real fix would be making the db module fork-aware instead of bypassing it.
Current pattern is acceptable given Hatchet's process model.
"""
import databases # noqa: PLC0415
from reflector.db import _database_context # noqa: PLC0415
_database_context.set(None)
@@ -173,8 +183,6 @@ async def set_workflow_error_status(transcript_id: NonEmptyString) -> bool:
def _spawn_storage():
"""Create fresh storage instance for writing to our transcript bucket."""
from reflector.storage import get_transcripts_storage # noqa: PLC0415
return get_transcripts_storage()
@@ -388,10 +396,6 @@ async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsRe
if input.source_platform == "livekit":
# LiveKit: participant identity is in the track dict or can be parsed from filepath
from reflector.utils.livekit import (
parse_livekit_track_filepath, # noqa: PLC0415
)
# Look up identity → Reflector user_id mapping from Redis
# (stored at join time in rooms.py)
identity_to_user_id: dict[str, str] = {}
@@ -399,9 +403,6 @@ async def get_participants(input: PipelineInput, ctx: Context) -> ParticipantsRe
from reflector.db.meetings import (
meetings_controller as mc, # noqa: PLC0415
)
from reflector.redis_cache import (
get_async_redis_client, # noqa: PLC0415
)
meeting = (
await mc.get_by_id(transcript.meeting_id)
@@ -543,12 +544,6 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
# OGG files don't have embedded start_time metadata, so we pre-calculate.
track_padding: dict[int, float] = {}
if input.source_platform == "livekit":
from datetime import datetime # noqa: PLC0415
from reflector.utils.livekit import (
parse_livekit_track_filepath, # noqa: PLC0415
)
timestamps = []
for i, track in enumerate(input.tracks):
ts_str = track.get("timestamp")
@@ -885,7 +880,8 @@ async def detect_topics(input: PipelineInput, ctx: Context) -> TopicsResult:
transcripts_controller,
)
chunk_size = TOPIC_CHUNK_WORD_COUNT
duration_seconds = words[-1].end - words[0].start if words else 0
chunk_size = compute_topic_chunk_size(duration_seconds, len(words))
chunks = []
for i in range(0, len(words), chunk_size):
chunk_words = words[i : i + chunk_size]
@@ -975,7 +971,7 @@ async def detect_topics(input: PipelineInput, ctx: Context) -> TopicsResult:
ctx.log(f"detect_topics complete: found {len(topics_list)} topics")
return TopicsResult(topics=topics_list)
return TopicsResult(topics=topics_list, duration_seconds=duration_seconds)
@daily_multitrack_pipeline.task(
@@ -1069,10 +1065,9 @@ async def extract_subjects(input: PipelineInput, ctx: Context) -> SubjectsResult
participant_name_to_id={},
)
# Deferred imports: Hatchet workers fork processes, fresh imports avoid
# sharing DB connections and LLM HTTP pools across forks
# Deferred DB import: Hatchet workers fork processes, fresh imports avoid
# sharing DB connections across forks
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
from reflector.llm import LLM # noqa: PLC0415
async with fresh_db_connection():
transcript = await transcripts_controller.get_by_id(input.transcript_id)
@@ -1112,8 +1107,14 @@ async def extract_subjects(input: PipelineInput, ctx: Context) -> SubjectsResult
participant_names, participant_name_to_id=participant_name_to_id
)
max_subjects = compute_max_subjects(topics_result.duration_seconds)
ctx.log(
f"extract_subjects: duration={topics_result.duration_seconds:.0f}s, "
f"max_subjects={max_subjects}"
)
ctx.log("extract_subjects: calling LLM to extract subjects")
await builder.extract_subjects()
await builder.extract_subjects(max_subjects=max_subjects)
ctx.log(f"extract_subjects complete: {len(builder.subjects)} subjects")
@@ -1196,14 +1197,13 @@ async def generate_recap(input: PipelineInput, ctx: Context) -> RecapResult:
subjects_result = ctx.task_output(extract_subjects)
process_result = ctx.task_output(process_subjects)
# Deferred imports: Hatchet workers fork processes, fresh imports avoid
# sharing DB connections and LLM HTTP pools across forks
# Deferred DB import: Hatchet workers fork processes, fresh imports avoid
# sharing DB connections across forks
from reflector.db.transcripts import ( # noqa: PLC0415
TranscriptFinalLongSummary,
TranscriptFinalShortSummary,
transcripts_controller,
)
from reflector.llm import LLM # noqa: PLC0415
subject_summaries = process_result.subject_summaries
@@ -1292,13 +1292,12 @@ async def identify_action_items(
ctx.log("identify_action_items: no transcript text, returning empty")
return ActionItemsResult(action_items=ActionItemsResponse())
# Deferred imports: Hatchet workers fork processes, fresh imports avoid
# sharing DB connections and LLM HTTP pools across forks
# Deferred DB import: Hatchet workers fork processes, fresh imports avoid
# sharing DB connections across forks
from reflector.db.transcripts import ( # noqa: PLC0415
TranscriptActionItems,
transcripts_controller,
)
from reflector.llm import LLM # noqa: PLC0415
# TODO: refactor SummaryBuilder methods into standalone functions
llm = LLM(settings=settings)
@@ -1435,10 +1434,6 @@ async def cleanup_consent(input: PipelineInput, ctx: Context) -> ConsentResult:
)
from reflector.db.recordings import recordings_controller # noqa: PLC0415
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
from reflector.storage import ( # noqa: PLC0415
get_source_storage,
get_transcripts_storage,
)
transcript = await transcripts_controller.get_by_id(input.transcript_id)
if not transcript:
@@ -1587,10 +1582,6 @@ async def send_webhook(input: PipelineInput, ctx: Context) -> WebhookResult:
async with fresh_db_connection():
from reflector.db.rooms import rooms_controller # noqa: PLC0415
from reflector.utils.webhook import ( # noqa: PLC0415
fetch_transcript_webhook_payload,
send_webhook_request,
)
room = await rooms_controller.get_by_id(input.room_id)
if not room or not room.webhook_url:

View File

@@ -15,6 +15,8 @@ import json
from datetime import timedelta
from pathlib import Path
import av
import httpx
from hatchet_sdk import Context
from pydantic import BaseModel
@@ -47,9 +49,30 @@ from reflector.hatchet.workflows.models import (
)
from reflector.logger import logger
from reflector.pipelines import topic_processing
from reflector.pipelines.transcription_helpers import transcribe_file_with_processor
from reflector.processors import AudioFileWriterProcessor
from reflector.processors.file_diarization import FileDiarizationInput
from reflector.processors.file_diarization_auto import FileDiarizationAutoProcessor
from reflector.processors.transcript_diarization_assembler import (
TranscriptDiarizationAssemblerInput,
TranscriptDiarizationAssemblerProcessor,
)
from reflector.processors.types import (
DiarizationSegment,
Word,
)
from reflector.processors.types import (
Transcript as TranscriptType,
)
from reflector.settings import settings
from reflector.storage import get_source_storage, get_transcripts_storage
from reflector.utils.audio_constants import WAVEFORM_SEGMENTS
from reflector.utils.audio_waveform import get_audio_waveform
from reflector.utils.webhook import (
fetch_transcript_webhook_payload,
send_webhook_request,
)
from reflector.zulip import post_transcript_notification
class FilePipelineInput(BaseModel):
@@ -135,10 +158,6 @@ async def extract_audio(input: FilePipelineInput, ctx: Context) -> ExtractAudioR
ctx.log(f"extract_audio: processing {audio_file}")
# Extract audio and write as MP3
import av # noqa: PLC0415
from reflector.processors import AudioFileWriterProcessor # noqa: PLC0415
duration_ms_container = [0.0]
async def capture_duration(d):
@@ -189,8 +208,6 @@ async def upload_audio(input: FilePipelineInput, ctx: Context) -> UploadAudioRes
extract_result = ctx.task_output(extract_audio)
audio_path = extract_result.audio_path
from reflector.storage import get_transcripts_storage # noqa: PLC0415
storage = get_transcripts_storage()
if not storage:
raise ValueError(
@@ -232,10 +249,6 @@ async def transcribe(input: FilePipelineInput, ctx: Context) -> TranscribeResult
raise ValueError(f"Transcript {input.transcript_id} not found")
source_language = transcript.source_language
from reflector.pipelines.transcription_helpers import ( # noqa: PLC0415
transcribe_file_with_processor,
)
result = await transcribe_file_with_processor(audio_url, source_language)
ctx.log(f"transcribe complete: {len(result.words)} words")
@@ -264,13 +277,6 @@ async def diarize(input: FilePipelineInput, ctx: Context) -> DiarizeResult:
upload_result = ctx.task_output(upload_audio)
audio_url = upload_result.audio_url
from reflector.processors.file_diarization import ( # noqa: PLC0415
FileDiarizationInput,
)
from reflector.processors.file_diarization_auto import ( # noqa: PLC0415
FileDiarizationAutoProcessor,
)
processor = FileDiarizationAutoProcessor()
input_data = FileDiarizationInput(audio_url=audio_url)
@@ -353,18 +359,6 @@ async def assemble_transcript(
transcribe_result = ctx.task_output(transcribe)
diarize_result = ctx.task_output(diarize)
from reflector.processors.transcript_diarization_assembler import ( # noqa: PLC0415
TranscriptDiarizationAssemblerInput,
TranscriptDiarizationAssemblerProcessor,
)
from reflector.processors.types import ( # noqa: PLC0415
DiarizationSegment,
Word,
)
from reflector.processors.types import ( # noqa: PLC0415
Transcript as TranscriptType,
)
words = [Word(**w) for w in transcribe_result.words]
transcript_data = TranscriptType(
words=words, translation=transcribe_result.translation
@@ -437,17 +431,6 @@ async def detect_topics(input: FilePipelineInput, ctx: Context) -> TopicsResult:
TranscriptTopic,
transcripts_controller,
)
from reflector.processors.transcript_diarization_assembler import ( # noqa: PLC0415
TranscriptDiarizationAssemblerInput,
TranscriptDiarizationAssemblerProcessor,
)
from reflector.processors.types import ( # noqa: PLC0415
DiarizationSegment,
Word,
)
from reflector.processors.types import ( # noqa: PLC0415
Transcript as TranscriptType,
)
words = [Word(**w) for w in transcribe_result.words]
transcript_data = TranscriptType(
@@ -688,10 +671,6 @@ async def cleanup_consent(input: FilePipelineInput, ctx: Context) -> ConsentResu
)
from reflector.db.recordings import recordings_controller # noqa: PLC0415
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
from reflector.storage import ( # noqa: PLC0415
get_source_storage,
get_transcripts_storage,
)
transcript = await transcripts_controller.get_by_id(input.transcript_id)
if not transcript:
@@ -807,7 +786,6 @@ async def post_zulip(input: FilePipelineInput, ctx: Context) -> ZulipResult:
async with fresh_db_connection():
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
from reflector.zulip import post_transcript_notification # noqa: PLC0415
transcript = await transcripts_controller.get_by_id(input.transcript_id)
if transcript:
@@ -837,10 +815,6 @@ async def send_webhook(input: FilePipelineInput, ctx: Context) -> WebhookResult:
async with fresh_db_connection():
from reflector.db.rooms import rooms_controller # noqa: PLC0415
from reflector.utils.webhook import ( # noqa: PLC0415
fetch_transcript_webhook_payload,
send_webhook_request,
)
room = await rooms_controller.get_by_id(input.room_id)
if not room or not room.webhook_url:
@@ -856,8 +830,6 @@ async def send_webhook(input: FilePipelineInput, ctx: Context) -> WebhookResult:
ctx.log(f"send_webhook skipped (could not build payload): {payload}")
return WebhookResult(webhook_sent=False, skipped=True)
import httpx # noqa: PLC0415
try:
response = await send_webhook_request(
url=room.webhook_url,

View File

@@ -14,6 +14,7 @@ are not shared across forks, avoiding connection pooling issues.
from datetime import timedelta
import httpx
from hatchet_sdk import Context
from pydantic import BaseModel
@@ -40,7 +41,24 @@ from reflector.hatchet.workflows.models import (
ZulipResult,
)
from reflector.logger import logger
from reflector.pipelines.main_live_pipeline import (
PipelineMainTitle,
PipelineMainWaveform,
pipeline_convert_to_mp3,
pipeline_diarization,
pipeline_remove_upload,
pipeline_summaries,
pipeline_upload_mp3,
)
from reflector.pipelines.main_live_pipeline import (
cleanup_consent as _cleanup_consent,
)
from reflector.settings import settings
from reflector.utils.webhook import (
fetch_transcript_webhook_payload,
send_webhook_request,
)
from reflector.zulip import post_transcript_notification
class LivePostPipelineInput(BaseModel):
@@ -91,9 +109,6 @@ async def waveform(input: LivePostPipelineInput, ctx: Context) -> WaveformResult
async with fresh_db_connection():
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
PipelineMainWaveform,
)
transcript = await transcripts_controller.get_by_id(input.transcript_id)
if not transcript:
@@ -118,10 +133,6 @@ async def generate_title(input: LivePostPipelineInput, ctx: Context) -> TitleRes
ctx.log(f"generate_title: starting for transcript_id={input.transcript_id}")
async with fresh_db_connection():
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
PipelineMainTitle,
)
runner = PipelineMainTitle(transcript_id=input.transcript_id)
await runner.run()
@@ -142,10 +153,6 @@ async def convert_mp3(input: LivePostPipelineInput, ctx: Context) -> ConvertMp3R
ctx.log(f"convert_mp3: starting for transcript_id={input.transcript_id}")
async with fresh_db_connection():
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
pipeline_convert_to_mp3,
)
await pipeline_convert_to_mp3(transcript_id=input.transcript_id)
ctx.log("convert_mp3 complete")
@@ -165,10 +172,6 @@ async def upload_mp3(input: LivePostPipelineInput, ctx: Context) -> UploadMp3Res
ctx.log(f"upload_mp3: starting for transcript_id={input.transcript_id}")
async with fresh_db_connection():
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
pipeline_upload_mp3,
)
await pipeline_upload_mp3(transcript_id=input.transcript_id)
ctx.log("upload_mp3 complete")
@@ -190,10 +193,6 @@ async def remove_upload(
ctx.log(f"remove_upload: starting for transcript_id={input.transcript_id}")
async with fresh_db_connection():
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
pipeline_remove_upload,
)
await pipeline_remove_upload(transcript_id=input.transcript_id)
ctx.log("remove_upload complete")
@@ -213,10 +212,6 @@ async def diarize(input: LivePostPipelineInput, ctx: Context) -> DiarizeResult:
ctx.log(f"diarize: starting for transcript_id={input.transcript_id}")
async with fresh_db_connection():
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
pipeline_diarization,
)
await pipeline_diarization(transcript_id=input.transcript_id)
ctx.log("diarize complete")
@@ -236,10 +231,6 @@ async def cleanup_consent(input: LivePostPipelineInput, ctx: Context) -> Consent
ctx.log(f"cleanup_consent: transcript_id={input.transcript_id}")
async with fresh_db_connection():
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
cleanup_consent as _cleanup_consent,
)
await _cleanup_consent(transcript_id=input.transcript_id)
ctx.log("cleanup_consent complete")
@@ -261,10 +252,6 @@ async def final_summaries(
ctx.log(f"final_summaries: starting for transcript_id={input.transcript_id}")
async with fresh_db_connection():
from reflector.pipelines.main_live_pipeline import ( # noqa: PLC0415
pipeline_summaries,
)
await pipeline_summaries(transcript_id=input.transcript_id)
ctx.log("final_summaries complete")
@@ -289,7 +276,6 @@ async def post_zulip(input: LivePostPipelineInput, ctx: Context) -> ZulipResult:
async with fresh_db_connection():
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
from reflector.zulip import post_transcript_notification # noqa: PLC0415
transcript = await transcripts_controller.get_by_id(input.transcript_id)
if transcript:
@@ -319,10 +305,6 @@ async def send_webhook(input: LivePostPipelineInput, ctx: Context) -> WebhookRes
async with fresh_db_connection():
from reflector.db.rooms import rooms_controller # noqa: PLC0415
from reflector.utils.webhook import ( # noqa: PLC0415
fetch_transcript_webhook_payload,
send_webhook_request,
)
room = await rooms_controller.get_by_id(input.room_id)
if not room or not room.webhook_url:
@@ -338,8 +320,6 @@ async def send_webhook(input: LivePostPipelineInput, ctx: Context) -> WebhookRes
ctx.log(f"send_webhook skipped (could not build payload): {payload}")
return WebhookResult(webhook_sent=False, skipped=True)
import httpx # noqa: PLC0415
try:
response = await send_webhook_request(
url=room.webhook_url,

View File

@@ -102,6 +102,7 @@ class TopicsResult(BaseModel):
"""Result from detect_topics task."""
topics: list[TitleSummary]
duration_seconds: float = 0
class TitleResult(BaseModel):

View File

@@ -13,6 +13,8 @@ from reflector.hatchet.client import HatchetClientManager
from reflector.hatchet.constants import TIMEOUT_AUDIO
from reflector.hatchet.workflows.models import PadTrackResult
from reflector.logger import logger
from reflector.processors.audio_padding_auto import AudioPaddingAutoProcessor
from reflector.storage import get_source_storage, get_transcripts_storage
from reflector.utils.audio_constants import PRESIGNED_URL_EXPIRATION_SECONDS
from reflector.utils.audio_padding import extract_stream_start_time_from_container
@@ -51,11 +53,6 @@ async def pad_track(input: PaddingInput, ctx: Context) -> PadTrackResult:
)
try:
from reflector.storage import ( # noqa: PLC0415
get_source_storage,
get_transcripts_storage,
)
# Source reads: use platform-specific credentials
source_storage = get_source_storage(input.source_platform)
source_url = await source_storage.get_file_url(
@@ -104,10 +101,6 @@ async def pad_track(input: PaddingInput, ctx: Context) -> PadTrackResult:
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
)
from reflector.processors.audio_padding_auto import ( # noqa: PLC0415
AudioPaddingAutoProcessor,
)
processor = AudioPaddingAutoProcessor()
result = await processor.pad_track(
track_url=source_url,

View File

@@ -15,12 +15,14 @@ from pydantic import BaseModel
from reflector.hatchet.client import HatchetClientManager
from reflector.hatchet.constants import LLM_RATE_LIMIT_KEY, TIMEOUT_HEAVY
from reflector.hatchet.workflows.models import SubjectSummaryResult
from reflector.llm import LLM
from reflector.logger import logger
from reflector.processors.summary.prompts import (
DETAILED_SUBJECT_PROMPT_TEMPLATE,
PARAGRAPH_SUMMARY_PROMPT,
build_participant_instructions,
)
from reflector.settings import settings
class SubjectInput(BaseModel):
@@ -60,11 +62,6 @@ async def generate_detailed_summary(
subject_index=input.subject_index,
)
# Deferred imports: Hatchet workers fork processes, fresh imports ensure
# LLM HTTP connection pools aren't shared across forks
from reflector.llm import LLM # noqa: PLC0415
from reflector.settings import settings # noqa: PLC0415
llm = LLM(settings=settings)
participant_instructions = build_participant_instructions(input.participant_names)

View File

@@ -18,9 +18,13 @@ from pydantic import BaseModel
from reflector.hatchet.client import HatchetClientManager
from reflector.hatchet.constants import LLM_RATE_LIMIT_KEY, TIMEOUT_MEDIUM
from reflector.hatchet.workflows.models import TopicChunkResult
from reflector.llm import LLM
from reflector.logger import logger
from reflector.processors.prompts import TOPIC_PROMPT
from reflector.processors.transcript_topic_detector import TopicResponse
from reflector.processors.types import Word
from reflector.settings import settings
from reflector.utils.text import clean_title
class TopicChunkInput(BaseModel):
@@ -64,15 +68,6 @@ async def detect_chunk_topic(input: TopicChunkInput, ctx: Context) -> TopicChunk
text_length=len(input.chunk_text),
)
# Deferred imports: Hatchet workers fork processes, fresh imports avoid
# sharing LLM HTTP connection pools across forks
from reflector.llm import LLM # noqa: PLC0415
from reflector.processors.transcript_topic_detector import ( # noqa: PLC0415
TopicResponse,
)
from reflector.settings import settings # noqa: PLC0415
from reflector.utils.text import clean_title # noqa: PLC0415
llm = LLM(settings=settings, temperature=0.9)
prompt = TOPIC_PROMPT.format(text=input.chunk_text)

View File

@@ -9,9 +9,9 @@ because Hatchet workflow DAGs are defined statically, but the number of tracks v
at runtime. Child workflow spawning via `aio_run()` + `asyncio.gather()` is the
standard pattern for dynamic fan-out. See `process_tracks` in daily_multitrack_pipeline.py.
Note: This file uses deferred imports (inside tasks) intentionally.
Note: DB imports (reflector.db.*) are kept inline (deferred) intentionally.
Hatchet workers run in forked processes; fresh imports per task ensure
storage/DB connections are not shared across forks.
DB connections are not shared across forks.
"""
from datetime import timedelta
@@ -24,6 +24,9 @@ from reflector.hatchet.client import HatchetClientManager
from reflector.hatchet.constants import TIMEOUT_AUDIO, TIMEOUT_HEAVY
from reflector.hatchet.workflows.models import PadTrackResult, TranscribeTrackResult
from reflector.logger import logger
from reflector.pipelines.transcription_helpers import transcribe_file_with_processor
from reflector.processors.audio_padding_auto import AudioPaddingAutoProcessor
from reflector.storage import get_source_storage, get_transcripts_storage
from reflector.utils.audio_constants import PRESIGNED_URL_EXPIRATION_SECONDS
from reflector.utils.audio_padding import extract_stream_start_time_from_container
@@ -72,11 +75,6 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
)
try:
from reflector.storage import ( # noqa: PLC0415
get_source_storage,
get_transcripts_storage,
)
# Source reads: use platform-specific credentials
source_storage = get_source_storage(input.source_platform)
source_url = await source_storage.get_file_url(
@@ -120,10 +118,6 @@ async def pad_track(input: TrackInput, ctx: Context) -> PadTrackResult:
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
)
from reflector.processors.audio_padding_auto import ( # noqa: PLC0415
AudioPaddingAutoProcessor,
)
processor = AudioPaddingAutoProcessor()
result = await processor.pad_track(
track_url=source_url,
@@ -179,11 +173,6 @@ async def transcribe_track(input: TrackInput, ctx: Context) -> TranscribeTrackRe
raise ValueError("Missing padded_key from pad_track")
# Presign URL on demand (avoids stale URLs on workflow replay)
from reflector.storage import ( # noqa: PLC0415
get_source_storage,
get_transcripts_storage,
)
# If bucket_name is set, file is still in the platform's source bucket (no padding applied).
# If bucket_name is None, padded file was written to our transcript storage.
if bucket_name:
@@ -198,10 +187,6 @@ async def transcribe_track(input: TrackInput, ctx: Context) -> TranscribeTrackRe
bucket=bucket_name,
)
from reflector.pipelines.transcription_helpers import ( # noqa: PLC0415
transcribe_file_with_processor,
)
transcript = await transcribe_file_with_processor(audio_url, input.language)
# Tag all words with speaker index

View File

@@ -38,6 +38,7 @@ from reflector.db.transcripts import (
TranscriptWaveform,
transcripts_controller,
)
from reflector.hatchet.client import HatchetClientManager
from reflector.logger import logger
from reflector.pipelines.runner import PipelineMessage, PipelineRunner
from reflector.processors import (
@@ -814,8 +815,6 @@ async def pipeline_post(*, transcript_id: str, room_id: str | None = None):
"""
Run the post pipeline via Hatchet.
"""
from reflector.hatchet.client import HatchetClientManager # noqa: PLC0415
await HatchetClientManager.start_workflow(
"LivePostProcessingPipeline",
{

View File

@@ -18,7 +18,7 @@ from reflector.processors import (
)
from reflector.processors.types import TitleSummary
from reflector.processors.types import Transcript as TranscriptType
from reflector.utils.transcript_constants import TOPIC_CHUNK_WORD_COUNT
from reflector.utils.transcript_constants import compute_topic_chunk_size
class EmptyPipeline:
@@ -39,7 +39,10 @@ async def detect_topics(
on_topic_callback: Callable,
empty_pipeline: EmptyPipeline,
) -> list[TitleSummary]:
chunk_size = TOPIC_CHUNK_WORD_COUNT
duration_seconds = (
transcript.words[-1].end - transcript.words[0].start if transcript.words else 0
)
chunk_size = compute_topic_chunk_size(duration_seconds, len(transcript.words))
topics: list[TitleSummary] = []
async def on_topic(topic: TitleSummary):

View File

@@ -10,6 +10,7 @@ import os
import tempfile
import av
import requests
from reflector.logger import logger
from reflector.processors.audio_padding import AudioPaddingProcessor, PaddingResponse
@@ -65,8 +66,6 @@ class AudioPaddingPyavProcessor(AudioPaddingProcessor):
track_index: int,
) -> PaddingResponse:
"""Blocking padding work: download, pad with PyAV, upload."""
import requests
log = logger.bind(track_index=track_index, padding_seconds=start_time_seconds)
temp_dir = tempfile.mkdtemp()
input_path = None

View File

@@ -34,7 +34,8 @@ class AudioTranscriptModalProcessor(AudioTranscriptProcessor):
self.transcript_url = settings.TRANSCRIPT_URL + "/v1"
self.timeout = settings.TRANSCRIPT_TIMEOUT
self.modal_api_key = modal_api_key
print(self.timeout, self.modal_api_key)
async def _transcript(self, data: AudioFile):
async with AsyncOpenAI(
base_url=self.transcript_url,

View File

@@ -43,7 +43,8 @@ DETAILED_SUBJECT_PROMPT_TEMPLATE = dedent(
include any deadlines or timeframes discussed for completion or follow-up.
- Mention unresolved issues or topics needing further discussion, aiding in
planning future meetings or follow-up actions.
- Do not include topic unrelated to {subject}.
- Be specific and cite participant names when attributing statements or actions.
- Do not include topics unrelated to {subject}.
# OUTPUT
Your summary should be clear, concise, and structured, covering all major
@@ -58,6 +59,7 @@ PARAGRAPH_SUMMARY_PROMPT = dedent(
"""
Summarize the mentioned topic in 1 paragraph.
It will be integrated into the final summary, so just for this topic.
Preserve key decisions and action items. Do not introduce new information.
"""
).strip()

View File

@@ -48,17 +48,24 @@ TRANSCRIPTION_TYPE_PROMPT = dedent(
"""
).strip()
SUBJECTS_PROMPT = dedent(
"""
What are the main / high level topic of the meeting.
Do not include direct quotes or unnecessary details.
Be concise and focused on the main ideas.
A subject briefly mentioned should not be included.
There should be maximum 6 subjects.
Do not write complete narrative sentences for the subject,
you must write a concise subject using noun phrases.
"""
).strip()
_DEFAULT_MAX_SUBJECTS = 6
def build_subjects_prompt(max_subjects: int = _DEFAULT_MAX_SUBJECTS) -> str:
"""Build subjects extraction prompt with a dynamic subject cap."""
subject_word = "subject" if max_subjects == 1 else "subjects"
return dedent(
f"""
What are the main / high level topics of the meeting.
Do not include direct quotes or unnecessary details.
Be concise and focused on the main ideas.
A subject briefly mentioned should not be included.
There should be maximum {max_subjects} {subject_word}.
Do not write complete narrative sentences for the subject,
you must write a concise subject using noun phrases.
"""
).strip()
ACTION_ITEMS_PROMPT = dedent(
"""
@@ -145,7 +152,7 @@ class SubjectsResponse(BaseModel):
"""Pydantic model for extracted subjects/topics"""
subjects: list[str] = Field(
description="List of main subjects/topics discussed, maximum 6 items",
description="List of main subjects/topics discussed",
)
@@ -345,11 +352,14 @@ class SummaryBuilder:
# Summary
# ----------------------------------------------------------------------------
async def extract_subjects(self) -> None:
async def extract_subjects(self, max_subjects: int = _DEFAULT_MAX_SUBJECTS) -> None:
"""Extract main subjects/topics from the transcript."""
self.logger.info("--- extract main subjects using TreeSummarize")
self.logger.info(
"--- extract main subjects using TreeSummarize",
max_subjects=max_subjects,
)
subjects_prompt = SUBJECTS_PROMPT
subjects_prompt = build_subjects_prompt(max_subjects)
try:
response = await self._get_structured_response(
@@ -358,7 +368,7 @@ class SummaryBuilder:
tone_name="Meeting assistant that talk only as list item",
)
self.subjects = response.subjects
self.subjects = response.subjects[:max_subjects]
self.logger.info(f"Extracted subjects: {self.subjects}")
except Exception as e:

View File

@@ -333,7 +333,9 @@ if __name__ == "__main__":
if not s3_urls:
parser.error("At least one S3 URL required for multitrack processing")
from reflector.tools.cli_multitrack import process_multitrack_cli
from reflector.tools.cli_multitrack import (
process_multitrack_cli, # circular import
)
asyncio.run(
process_multitrack_cli(

View File

@@ -5,6 +5,7 @@ This tools help to either create a pipeline from command line,
or read a yaml description of a pipeline and run it.
"""
import importlib
import json
from reflector.logger import logger
@@ -37,8 +38,6 @@ def get_jsonl(filename, filter_processor_name=None):
def get_processor(name):
import importlib
module_name = f"reflector.processors.{name}"
class_name = snake_to_camel(name) + "Processor"
module = importlib.import_module(module_name)

View File

@@ -4,5 +4,67 @@ Shared transcript processing constants.
Used by both Hatchet workflows and Celery pipelines for consistent processing.
"""
# Topic detection: number of words per chunk for topic extraction
import math
# Topic detection: legacy static chunk size, used as fallback
TOPIC_CHUNK_WORD_COUNT = 300
# Dynamic chunking curve parameters
# Formula: target_topics = _COEFFICIENT * duration_minutes ^ _EXPONENT
# Derived from anchors: 5 min -> 3 topics, 180 min -> 40 topics
_TOPIC_CURVE_COEFFICIENT = 0.833
_TOPIC_CURVE_EXPONENT = 0.723
_MIN_TOPICS = 2
_MAX_TOPICS = 50
_MIN_CHUNK_WORDS = 375
_MAX_CHUNK_WORDS = 1500
def compute_topic_chunk_size(duration_seconds: float, total_words: int) -> int:
"""Calculate optimal chunk size for topic detection based on recording duration.
Uses a power-curve function to scale topic count sublinearly with duration,
producing fewer LLM calls for longer recordings while maintaining topic quality.
Returns the number of words per chunk.
"""
if total_words <= 0 or duration_seconds <= 0:
return _MIN_CHUNK_WORDS
duration_minutes = duration_seconds / 60.0
target_topics = _TOPIC_CURVE_COEFFICIENT * math.pow(
duration_minutes, _TOPIC_CURVE_EXPONENT
)
target_topics = int(round(max(_MIN_TOPICS, min(_MAX_TOPICS, target_topics))))
chunk_size = total_words // target_topics
chunk_size = max(_MIN_CHUNK_WORDS, min(_MAX_CHUNK_WORDS, chunk_size))
return chunk_size
# Subject extraction: scale max subjects with recording duration
# Short calls get fewer subjects to avoid over-analyzing trivial content
_SUBJECT_DURATION_THRESHOLDS = [
(5 * 60, 1), # ≤ 5 min → 1 subject
(15 * 60, 2), # ≤ 15 min → 2 subjects
(30 * 60, 3), # ≤ 30 min → 3 subjects
(45 * 60, 4), # ≤ 45 min → 4 subjects
(60 * 60, 5), # ≤ 60 min → 5 subjects
]
_MAX_SUBJECTS = 6
def compute_max_subjects(duration_seconds: float) -> int:
"""Calculate maximum number of subjects to extract based on recording duration.
Uses a step function: short recordings get fewer subjects to avoid
generating excessive detail for trivial content.
"""
if duration_seconds <= 0:
return 1
for threshold, max_subjects in _SUBJECT_DURATION_THRESHOLDS:
if duration_seconds <= threshold:
return max_subjects
return _MAX_SUBJECTS

View File

@@ -15,6 +15,7 @@ from reflector.dailyco_api import (
from reflector.db.meetings import meetings_controller
from reflector.logger import logger as _logger
from reflector.settings import settings
from reflector.storage import get_source_storage
from reflector.video_platforms.factory import create_platform_client
from reflector.worker.process import (
poll_daily_room_presence_task,
@@ -219,6 +220,30 @@ async def _handle_recording_ready(event: RecordingReadyEvent):
track_keys = [t.s3Key for t in tracks if t.type == "audio"]
# Delete video tracks when store_video is disabled (same pattern as LiveKit).
# Only delete if we have a meeting AND store_video is explicitly false.
# If no meeting found, leave files alone (can't confirm user intent).
video_track_keys = [t.s3Key for t in tracks if t.type == "video"]
if video_track_keys:
meeting = await meetings_controller.get_by_room_name(room_name)
if meeting is not None and not meeting.store_video:
storage = get_source_storage("daily")
for video_key in video_track_keys:
try:
await storage.delete_file(video_key)
logger.info(
"Deleted video track from raw-tracks recording",
s3_key=video_key,
room_name=room_name,
)
except Exception as e:
# Non-critical — pipeline filters these out anyway
logger.warning(
"Failed to delete video track from raw-tracks recording",
s3_key=video_key,
error=str(e),
)
logger.info(
"Raw-tracks recording queuing processing",
recording_id=recording_id,

View File

@@ -17,6 +17,7 @@ from reflector.db.meetings import meetings_controller
from reflector.livekit_api.webhooks import create_webhook_receiver, verify_webhook
from reflector.logger import logger as _logger
from reflector.settings import settings
from reflector.storage import get_source_storage
router = APIRouter()
@@ -189,8 +190,6 @@ async def _handle_egress_ended(event):
filename = file_result.filename
if filename and filename.endswith(".webm"):
try:
from reflector.storage import get_source_storage # noqa: PLC0415
storage = get_source_storage("livekit")
await storage.delete_file(filename)
logger.info(

View File

@@ -1,4 +1,6 @@
import logging
import re
import uuid
from datetime import datetime, timedelta, timezone
from enum import Enum
from typing import Annotated, Any, Literal, Optional
@@ -14,7 +16,7 @@ from reflector.db import get_database
from reflector.db.calendar_events import calendar_events_controller
from reflector.db.meetings import meetings_controller
from reflector.db.rooms import rooms_controller
from reflector.redis_cache import RedisAsyncLock
from reflector.redis_cache import RedisAsyncLock, get_async_redis_client
from reflector.schemas.platform import Platform
from reflector.services.ics_sync import ics_sync_service
from reflector.utils.url import add_query_param
@@ -45,6 +47,7 @@ class Room(BaseModel):
platform: Platform
skip_consent: bool = False
email_transcript_to: str | None = None
store_video: bool = False
class RoomDetails(Room):
@@ -75,6 +78,7 @@ class Meeting(BaseModel):
platform: Platform
daily_composed_video_s3_key: str | None = None
daily_composed_video_duration: int | None = None
store_video: bool = False
class CreateRoom(BaseModel):
@@ -95,6 +99,7 @@ class CreateRoom(BaseModel):
platform: Platform
skip_consent: bool = False
email_transcript_to: str | None = None
store_video: bool = False
class UpdateRoom(BaseModel):
@@ -115,6 +120,7 @@ class UpdateRoom(BaseModel):
platform: Optional[Platform] = None
skip_consent: Optional[bool] = None
email_transcript_to: Optional[str] = None
store_video: Optional[bool] = None
class CreateRoomMeeting(BaseModel):
@@ -257,6 +263,7 @@ async def rooms_create(
platform=room.platform,
skip_consent=room.skip_consent,
email_transcript_to=room.email_transcript_to,
store_video=room.store_video,
)
@@ -325,6 +332,7 @@ async def rooms_create_meeting(
and meeting.recording_type == room.recording_type
and meeting.recording_trigger == room.recording_trigger
and meeting.platform == room.platform
and meeting.store_video == room.store_video
)
if not settings_match:
logger.info(
@@ -600,9 +608,6 @@ async def rooms_join_meeting(
meeting.room_url = add_query_param(meeting.room_url, "t", token)
elif meeting.platform == "livekit":
import re
import uuid
client = create_platform_client(meeting.platform)
# Identity must be unique per participant to avoid S3 key collisions.
# Format: {readable_name}-{short_uuid} ensures uniqueness even for same names.
@@ -625,8 +630,6 @@ async def rooms_join_meeting(
# Store identity → Reflector user_id mapping for the pipeline
# (so TranscriptParticipant.user_id can be set correctly)
if user_id:
from reflector.redis_cache import get_async_redis_client # noqa: PLC0415
redis_client = await get_async_redis_client()
mapping_key = f"livekit:participant_map:{meeting.room_name}"
await redis_client.hset(mapping_key, participant_identity, user_id)

View File

@@ -11,6 +11,7 @@ from reflector.events import subscribers_shutdown
from reflector.logger import logger
from reflector.pipelines.runner import PipelineRunner
from reflector.settings import settings
from reflector.webrtc_ports import resolve_webrtc_host, rewrite_sdp_host
sessions = []
router = APIRouter()
@@ -128,8 +129,6 @@ async def rtc_offer_base(
# Rewrite ICE candidate IPs when running behind Docker bridge networking
if settings.WEBRTC_HOST:
from reflector.webrtc_ports import resolve_webrtc_host, rewrite_sdp_host
host_ip = resolve_webrtc_host(settings.WEBRTC_HOST)
sdp = rewrite_sdp_host(sdp, host_ip)

View File

@@ -4,6 +4,7 @@ from fastapi import APIRouter, Depends, HTTPException, Request
import reflector.auth as auth
from reflector.db.transcripts import transcripts_controller
from reflector.pipelines.main_live_pipeline import PipelineMainLive
from .rtc_offer import RtcOffer, rtc_offer_base
@@ -28,8 +29,6 @@ async def transcript_record_webrtc(
raise HTTPException(status_code=400, detail="Transcript is locked")
# create a pipeline runner
from reflector.pipelines.main_live_pipeline import PipelineMainLive # noqa: PLC0415
pipeline_runner = PipelineMainLive(transcript_id=transcript_id)
# FIXME do not allow multiple recording at the same time

View File

@@ -11,6 +11,8 @@ This allows running the server in Docker with bridge networking
import asyncio
import socket
import aioice.ice
from reflector.logger import logger
@@ -36,9 +38,7 @@ def patch_aioice_port_range(min_port: int, max_port: int) -> None:
Works by temporarily wrapping loop.create_datagram_endpoint() during
aioice's get_component_candidates() to intercept bind(addr, 0) calls.
"""
import aioice.ice as _ice
_original = _ice.Connection.get_component_candidates
_original = aioice.ice.Connection.get_component_candidates
_state = {"next_port": min_port}
async def _patched_get_component_candidates(self, component, addresses, timeout=5):
@@ -78,7 +78,7 @@ def patch_aioice_port_range(min_port: int, max_port: int) -> None:
finally:
loop.create_datagram_endpoint = _orig_create
_ice.Connection.get_component_candidates = _patched_get_component_candidates
aioice.ice.Connection.get_component_candidates = _patched_get_component_candidates
logger.info(
"aioice patched for WebRTC port range",
min_port=min_port,
@@ -102,8 +102,6 @@ def rewrite_sdp_host(sdp: str, target_ip: str) -> str:
Replace container-internal IPs in SDP with target_ip so that
ICE candidates advertise a routable address.
"""
import aioice.ice
container_ips = aioice.ice.get_host_addresses(use_ipv4=True, use_ipv6=False)
for ip in container_ips:
if ip != "127.0.0.1" and ip != target_ip:

View File

@@ -30,6 +30,8 @@ def build_beat_schedule(
whereby_api_key=None,
aws_process_recording_queue_url=None,
daily_api_key=None,
livekit_api_key=None,
livekit_url=None,
public_mode=False,
public_data_retention_days=None,
healthcheck_url=None,
@@ -83,7 +85,7 @@ def build_beat_schedule(
else:
logger.info("Daily.co beat tasks disabled (no DAILY_API_KEY)")
_livekit_enabled = bool(settings.LIVEKIT_API_KEY and settings.LIVEKIT_URL)
_livekit_enabled = bool(livekit_api_key and livekit_url)
if _livekit_enabled:
beat_schedule["process_livekit_ended_meetings"] = {
"task": "reflector.worker.process.process_livekit_ended_meetings",
@@ -175,6 +177,8 @@ else:
whereby_api_key=settings.WHEREBY_API_KEY,
aws_process_recording_queue_url=settings.AWS_PROCESS_RECORDING_QUEUE_URL,
daily_api_key=settings.DAILY_API_KEY,
livekit_api_key=settings.LIVEKIT_API_KEY,
livekit_url=settings.LIVEKIT_URL,
public_mode=settings.PUBLIC_MODE,
public_data_retention_days=settings.PUBLIC_DATA_RETENTION_DAYS,
healthcheck_url=settings.HEALTHCHECK_URL,

View File

@@ -1,3 +1,4 @@
import asyncio
import json
import os
import re
@@ -26,16 +27,26 @@ from reflector.db.transcripts import (
transcripts_controller,
)
from reflector.hatchet.client import HatchetClientManager
from reflector.pipelines.topic_processing import EmptyPipeline
from reflector.processors.audio_file_writer import AudioFileWriterProcessor
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
from reflector.redis_cache import RedisAsyncLock
from reflector.settings import settings
from reflector.storage import get_transcripts_storage
from reflector.storage import get_source_storage, get_transcripts_storage
from reflector.utils.daily import (
DailyRoomName,
extract_base_room_name,
filter_cam_audio_tracks,
recording_lock_key,
)
from reflector.utils.livekit import (
extract_livekit_base_room_name,
filter_audio_tracks,
parse_livekit_track_filepath,
)
from reflector.utils.livekit import (
recording_lock_key as livekit_recording_lock_key,
)
from reflector.utils.string import NonEmptyString
from reflector.video_platforms.factory import create_platform_client
from reflector.video_platforms.whereby_utils import (
@@ -562,6 +573,15 @@ async def store_cloud_recording(
)
return False
if not meeting.store_video:
logger.info(
f"Cloud recording ({source}): skipped, store_video=false",
recording_id=recording_id,
room_name=room_name,
meeting_id=meeting.id,
)
return False
success = await meetings_controller.set_cloud_recording_if_missing(
meeting_id=meeting.id,
s3_key=s3_key,
@@ -923,11 +943,6 @@ async def convert_audio_and_waveform(transcript) -> None:
transcript_id=transcript.id,
)
from reflector.pipelines.topic_processing import EmptyPipeline # noqa: PLC0415
from reflector.processors.audio_file_writer import (
AudioFileWriterProcessor, # noqa: PLC0415
)
upload_path = transcript.data_path / "upload.webm"
mp3_path = transcript.audio_mp3_filename
@@ -1206,17 +1221,13 @@ async def process_livekit_multitrack(
Tracks are discovered via S3 listing (source of truth), not webhooks.
Called from room_finished webhook (fast-path) or beat task (fallback).
"""
from reflector.utils.livekit import ( # noqa: PLC0415
recording_lock_key,
)
logger.info(
"Processing LiveKit multitrack recording",
room_name=room_name,
meeting_id=meeting_id,
)
lock_key = recording_lock_key(room_name)
lock_key = livekit_recording_lock_key(room_name)
async with RedisAsyncLock(
key=lock_key,
timeout=600,
@@ -1243,19 +1254,10 @@ async def _process_livekit_multitrack_inner(
# 1. Discover tracks by listing S3 prefix.
# Wait briefly for egress files to finish flushing to S3 — the room_finished
# webhook fires after empty_timeout, but egress finalization may still be in progress.
import asyncio as _asyncio # noqa: PLC0415
from reflector.storage import get_source_storage # noqa: PLC0415
from reflector.utils.livekit import ( # noqa: PLC0415
extract_livekit_base_room_name,
filter_audio_tracks,
parse_livekit_track_filepath,
)
EGRESS_FLUSH_DELAY = 10 # seconds — egress typically flushes within a few seconds
EGRESS_RETRY_DELAY = 30 # seconds — retry if first listing finds nothing
await _asyncio.sleep(EGRESS_FLUSH_DELAY)
await asyncio.sleep(EGRESS_FLUSH_DELAY)
storage = get_source_storage("livekit")
s3_prefix = f"livekit/{room_name}/"
@@ -1271,7 +1273,7 @@ async def _process_livekit_multitrack_inner(
room_name=room_name,
retry_delay=EGRESS_RETRY_DELAY,
)
await _asyncio.sleep(EGRESS_RETRY_DELAY)
await asyncio.sleep(EGRESS_RETRY_DELAY)
all_keys = await storage.list_objects(prefix=s3_prefix)
audio_keys = filter_audio_tracks(all_keys) if all_keys else []
@@ -1290,7 +1292,7 @@ async def _process_livekit_multitrack_inner(
expected=expected_audio,
found=len(audio_keys),
)
await _asyncio.sleep(EGRESS_RETRY_DELAY)
await asyncio.sleep(EGRESS_RETRY_DELAY)
all_keys = await storage.list_objects(prefix=s3_prefix)
audio_keys = filter_audio_tracks(all_keys) if all_keys else []

View File

@@ -32,6 +32,10 @@ DAILY_TASKS = {
"trigger_daily_reconciliation",
"reprocess_failed_daily_recordings",
}
LIVEKIT_TASKS = {
"process_livekit_ended_meetings",
"reprocess_failed_livekit_recordings",
}
PLATFORM_TASKS = {
"process_meetings",
"sync_all_ics_calendars",
@@ -47,6 +51,7 @@ class TestNoPlatformConfigured:
task_names = set(schedule.keys())
assert not task_names & WHEREBY_TASKS
assert not task_names & DAILY_TASKS
assert not task_names & LIVEKIT_TASKS
assert not task_names & PLATFORM_TASKS
def test_only_healthcheck_disabled_warning(self):
@@ -72,6 +77,7 @@ class TestWherebyOnly:
assert WHEREBY_TASKS <= task_names
assert PLATFORM_TASKS <= task_names
assert not task_names & DAILY_TASKS
assert not task_names & LIVEKIT_TASKS
def test_whereby_sqs_url(self):
schedule = build_beat_schedule(
@@ -81,6 +87,7 @@ class TestWherebyOnly:
assert WHEREBY_TASKS <= task_names
assert PLATFORM_TASKS <= task_names
assert not task_names & DAILY_TASKS
assert not task_names & LIVEKIT_TASKS
def test_whereby_task_count(self):
schedule = build_beat_schedule(whereby_api_key="test-key")
@@ -97,6 +104,7 @@ class TestDailyOnly:
assert DAILY_TASKS <= task_names
assert PLATFORM_TASKS <= task_names
assert not task_names & WHEREBY_TASKS
assert not task_names & LIVEKIT_TASKS
def test_daily_task_count(self):
schedule = build_beat_schedule(daily_api_key="test-daily-key")
@@ -104,6 +112,33 @@ class TestDailyOnly:
assert len(schedule) == 6
class TestLiveKitOnly:
"""When only LiveKit is configured."""
def test_livekit_keys(self):
schedule = build_beat_schedule(
livekit_api_key="test-lk-key", livekit_url="ws://livekit:7880"
)
task_names = set(schedule.keys())
assert LIVEKIT_TASKS <= task_names
assert PLATFORM_TASKS <= task_names
assert not task_names & WHEREBY_TASKS
assert not task_names & DAILY_TASKS
def test_livekit_task_count(self):
schedule = build_beat_schedule(
livekit_api_key="test-lk-key", livekit_url="ws://livekit:7880"
)
# LiveKit (2) + Platform (3) = 5
assert len(schedule) == 5
def test_livekit_needs_both_key_and_url(self):
schedule_key_only = build_beat_schedule(livekit_api_key="test-lk-key")
schedule_url_only = build_beat_schedule(livekit_url="ws://livekit:7880")
assert not set(schedule_key_only.keys()) & LIVEKIT_TASKS
assert not set(schedule_url_only.keys()) & LIVEKIT_TASKS
class TestBothPlatforms:
"""When both Whereby and Daily.co are configured."""

View File

@@ -0,0 +1,99 @@
import math
import pytest
from reflector.utils.transcript_constants import (
compute_max_subjects,
compute_topic_chunk_size,
)
@pytest.mark.parametrize(
"duration_min,total_words,expected_topics_range",
[
(5, 750, (1, 3)),
(10, 1500, (3, 6)),
(30, 4500, (8, 14)),
(60, 9000, (14, 22)),
(120, 18000, (24, 35)),
(180, 27000, (30, 42)),
],
)
def test_topic_count_in_expected_range(
duration_min, total_words, expected_topics_range
):
chunk_size = compute_topic_chunk_size(duration_min * 60, total_words)
num_topics = math.ceil(total_words / chunk_size)
assert expected_topics_range[0] <= num_topics <= expected_topics_range[1], (
f"For {duration_min}min/{total_words}words: got {num_topics} topics "
f"(chunk_size={chunk_size}), expected {expected_topics_range[0]}-{expected_topics_range[1]}"
)
def test_chunk_size_within_bounds():
for duration_min in [5, 10, 30, 60, 120, 180]:
chunk_size = compute_topic_chunk_size(duration_min * 60, duration_min * 150)
assert (
375 <= chunk_size <= 1500
), f"For {duration_min}min: chunk_size={chunk_size} out of bounds [375, 1500]"
def test_zero_duration_falls_back():
assert compute_topic_chunk_size(0, 1000) == 375
def test_zero_words_falls_back():
assert compute_topic_chunk_size(600, 0) == 375
def test_negative_inputs_fall_back():
assert compute_topic_chunk_size(-10, 1000) == 375
assert compute_topic_chunk_size(600, -5) == 375
def test_very_short_transcript():
"""A 1-minute call with very few words should still produce at least 1 topic."""
chunk_size = compute_topic_chunk_size(60, 100)
# chunk_size is at least 375, so 100 words = 1 chunk
assert chunk_size >= 375
def test_very_long_transcript():
"""A 4-hour call should cap at max topics."""
chunk_size = compute_topic_chunk_size(4 * 3600, 36000)
num_topics = math.ceil(36000 / chunk_size)
assert num_topics <= 50
# --- compute_max_subjects tests ---
@pytest.mark.parametrize(
"duration_seconds,expected_max",
[
(0, 1), # zero/invalid → 1
(-10, 1), # negative → 1
(60, 1), # 1 min → 1
(120, 1), # 2 min → 1
(300, 1), # 5 min (boundary) → 1
(301, 2), # just over 5 min → 2
(900, 2), # 15 min (boundary) → 2
(901, 3), # just over 15 min → 3
(1800, 3), # 30 min (boundary) → 3
(1801, 4), # just over 30 min → 4
(2700, 4), # 45 min (boundary) → 4
(2701, 5), # just over 45 min → 5
(3600, 5), # 60 min (boundary) → 5
(3601, 6), # just over 60 min → 6
(7200, 6), # 2 hours → 6
(14400, 6), # 4 hours → 6
],
)
def test_max_subjects_scales_with_duration(duration_seconds, expected_max):
assert compute_max_subjects(duration_seconds) == expected_max
def test_max_subjects_never_exceeds_cap():
"""Even very long recordings should cap at 6 subjects."""
for hours in range(1, 10):
assert compute_max_subjects(hours * 3600) <= 6

View File

@@ -95,6 +95,7 @@ const roomInitialState = {
platform: "whereby",
skipConsent: false,
emailTranscriptTo: "",
storeVideo: false,
};
export default function RoomsList() {
@@ -185,6 +186,7 @@ export default function RoomsList() {
platform: detailedEditedRoom.platform,
skipConsent: detailedEditedRoom.skip_consent || false,
emailTranscriptTo: detailedEditedRoom.email_transcript_to || "",
storeVideo: detailedEditedRoom.store_video || false,
}
: null,
[detailedEditedRoom],
@@ -335,6 +337,7 @@ export default function RoomsList() {
platform,
skip_consent: room.skipConsent,
email_transcript_to: room.emailTranscriptTo || null,
store_video: room.storeVideo,
};
if (isEditing) {
@@ -400,6 +403,7 @@ export default function RoomsList() {
platform: roomData.platform,
skipConsent: roomData.skip_consent || false,
emailTranscriptTo: roomData.email_transcript_to || "",
storeVideo: roomData.store_video || false,
});
setEditRoomId(roomId);
setIsEditing(true);
@@ -842,6 +846,38 @@ export default function RoomsList() {
</Field.HelperText>
</Field.Root>
)}
{room.platform === "daily" &&
room.recordingType === "cloud" && (
<Field.Root mt={4}>
<Checkbox.Root
name="storeVideo"
checked={room.storeVideo}
onCheckedChange={(e) => {
const syntheticEvent = {
target: {
name: "storeVideo",
type: "checkbox",
checked: e.checked,
},
};
handleRoomChange(syntheticEvent);
}}
>
<Checkbox.HiddenInput />
<Checkbox.Control>
<Checkbox.Indicator />
</Checkbox.Control>
<Checkbox.Label>
Store video recording
</Checkbox.Label>
</Checkbox.Root>
<Field.HelperText>
When enabled, a composed video recording will be
saved alongside audio. Disabling saves significant
storage.
</Field.HelperText>
</Field.Root>
)}
</Tabs.Content>
<Tabs.Content value="share" pt={6}>

View File

@@ -267,12 +267,13 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
const handleFrameJoinMeeting = useCallback(() => {
if (meeting.recording_type === "cloud") {
console.log("Starting dual recording via REST API", {
console.log("Starting recording via REST API", {
cloudInstanceId,
rawTracksInstanceId,
storeVideo: meeting.store_video,
});
// Start both cloud and raw-tracks via backend REST API (with retry on 404)
// Start recordings via backend REST API (with retry on 404)
// Daily.co needs time to register call as "hosting" for REST API
const startRecordingWithRetry = (
type: DailyRecordingType,
@@ -320,12 +321,17 @@ export default function DailyRoom({ meeting, room }: DailyRoomProps) {
}, RECORDING_START_DELAY_MS);
};
// Start both recordings
startRecordingWithRetry("cloud", cloudInstanceId);
// Always start raw-tracks (needed for transcription pipeline)
startRecordingWithRetry("raw-tracks", rawTracksInstanceId);
// Only start cloud (composed video) if store_video is enabled
if (meeting.store_video) {
startRecordingWithRetry("cloud", cloudInstanceId);
}
}
}, [
meeting.recording_type,
meeting.store_video,
meeting.id,
startRecordingMutation,
cloudInstanceId,

View File

@@ -361,6 +361,7 @@ export function useTranscriptUploadAudio() {
});
},
onError: (error) => {
console.log(error)
setError(error as Error, "There was an error uploading the audio file");
},
},

View File

@@ -1134,6 +1134,11 @@ export interface components {
skip_consent: boolean;
/** Email Transcript To */
email_transcript_to?: string | null;
/**
* Store Video
* @default false
*/
store_video: boolean;
};
/** CreateRoomMeeting */
CreateRoomMeeting: {
@@ -1852,6 +1857,11 @@ export interface components {
daily_composed_video_s3_key?: string | null;
/** Daily Composed Video Duration */
daily_composed_video_duration?: number | null;
/**
* Store Video
* @default false
*/
store_video: boolean;
};
/** MeetingConsentRequest */
MeetingConsentRequest: {
@@ -1955,6 +1965,11 @@ export interface components {
skip_consent: boolean;
/** Email Transcript To */
email_transcript_to?: string | null;
/**
* Store Video
* @default false
*/
store_video: boolean;
};
/** RoomDetails */
RoomDetails: {
@@ -2013,6 +2028,11 @@ export interface components {
skip_consent: boolean;
/** Email Transcript To */
email_transcript_to?: string | null;
/**
* Store Video
* @default false
*/
store_video: boolean;
/** Webhook Url */
webhook_url: string | null;
/** Webhook Secret */
@@ -2389,6 +2409,8 @@ export interface components {
skip_consent?: boolean | null;
/** Email Transcript To */
email_transcript_to?: string | null;
/** Store Video */
store_video?: boolean | null;
};
/** UpdateTranscript */
UpdateTranscript: {

30
www/appv2/.env.example Normal file
View File

@@ -0,0 +1,30 @@
# ─── API ──────────────────────────────────────────────────────────────────────
VITE_API_URL=/v1
VITE_WEBSOCKET_URL=auto
# ─── Auth (server-side, used by Express proxy) ───────────────────────────────
AUTHENTIK_CLIENT_ID=
AUTHENTIK_CLIENT_SECRET=
AUTHENTIK_ISSUER=
AUTHENTIK_REFRESH_TOKEN_URL=
SERVER_API_URL=http://localhost:1250
AUTH_PROVIDER=authentik
# AUTH_PROVIDER=credentials
# ─── Auth Proxy ──────────────────────────────────────────────────────────────
AUTH_PROXY_PORT=3001
AUTH_PROXY_URL=http://localhost:3001
# ─── Features ────────────────────────────────────────────────────────────────
VITE_FEATURE_REQUIRE_LOGIN=true
VITE_FEATURE_PRIVACY=true
VITE_FEATURE_BROWSE=true
VITE_FEATURE_SEND_TO_ZULIP=true
VITE_FEATURE_ROOMS=true
VITE_FEATURE_EMAIL_TRANSCRIPT=false
# ─── Sentry ──────────────────────────────────────────────────────────────────
VITE_SENTRY_DSN=
# ─── Site ────────────────────────────────────────────────────────────────────
VITE_SITE_URL=http://localhost:3000

20
www/appv2/README.md Normal file
View File

@@ -0,0 +1,20 @@
<div align="center">
<img width="1200" height="475" alt="GHBanner" src="https://github.com/user-attachments/assets/0aa67016-6eaf-458a-adb2-6e31a0763ed6" />
</div>
# Run and deploy your AI Studio app
This contains everything you need to run your app locally.
View your app in AI Studio: https://ai.studio/apps/4d85d7fb-26cc-40ae-b70d-3c99d72ec5e8
## Run Locally
**Prerequisites:** Node.js
1. Install dependencies:
`npm install`
2. Set the `GEMINI_API_KEY` in [.env.local](.env.local) to your Gemini API key
3. Run the app:
`npm run dev`

282
www/appv2/dist/assets/index-BSIeQkMT.js vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

14
www/appv2/dist/index.html vendored Normal file
View File

@@ -0,0 +1,14 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>My Google AI Studio App</title>
<script type="module" crossorigin src="/assets/index-BSIeQkMT.js"></script>
<link rel="stylesheet" crossorigin href="/assets/index-DT0hy75l.css">
</head>
<body>
<div id="root"></div>
</body>
</html>

13
www/appv2/index.html Normal file
View File

@@ -0,0 +1,13 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Reflector</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

5
www/appv2/metadata.json Normal file
View File

@@ -0,0 +1,5 @@
{
"name": "Reflector",
"description": "An open-source AI meeting transcription and summarization platform.",
"requestFramePermissions": []
}

6888
www/appv2/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

59
www/appv2/package.json Normal file
View File

@@ -0,0 +1,59 @@
{
"name": "reflector-appv2",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "concurrently \"vite --port=3000 --host=0.0.0.0\" \"tsx watch server/index.ts\"",
"dev:client": "vite --port=3000 --host=0.0.0.0",
"dev:server": "tsx watch server/index.ts",
"build": "vite build",
"preview": "vite preview",
"clean": "rm -rf dist",
"lint": "tsc --noEmit"
},
"dependencies": {
"@daily-co/daily-js": "^0.87.0",
"@fontsource/manrope": "^5.2.8",
"@fontsource/newsreader": "^5.2.10",
"@sentry/react": "^10.40.0",
"@tailwindcss/vite": "^4.1.14",
"@tanstack/react-query": "^5.90.21",
"@types/uuid": "^11.0.0",
"@vitejs/plugin-react": "^5.0.4",
"@whereby.com/browser-sdk": "^3.18.21",
"concurrently": "^9.0.0",
"cookie-parser": "^1.4.7",
"cors": "^2.8.5",
"dotenv": "^17.2.3",
"express": "^4.21.2",
"lucide-react": "^0.546.0",
"motion": "^12.23.24",
"openapi-fetch": "^0.17.0",
"openapi-react-query": "^0.5.4",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"react-hook-form": "^7.72.0",
"react-router-dom": "^7.13.2",
"remeda": "^2.33.6",
"uuid": "^13.0.0",
"vite": "^6.2.0",
"wavesurfer.js": "^7.12.5",
"zustand": "^5.0.12"
},
"devDependencies": {
"@sentry/vite-plugin": "^3.0.0",
"@tailwindcss/postcss": "^4.2.2",
"@types/cookie-parser": "^1.4.8",
"@types/cors": "^2.8.17",
"@types/express": "^4.17.21",
"@types/node": "^22.14.0",
"@types/react": "^19.2.14",
"@types/react-dom": "^19.2.3",
"autoprefixer": "^10.4.21",
"tailwindcss": "^4.1.14",
"tsx": "^4.21.0",
"typescript": "~5.8.2",
"vite": "^6.2.0"
}
}

View File

@@ -0,0 +1,6 @@
export default {
plugins: {
'@tailwindcss/postcss': {},
autoprefixer: {},
},
}

28
www/appv2/server/auth.ts Normal file
View File

@@ -0,0 +1,28 @@
/**
* Auth constants and helpers — ported from Next.js app/lib/auth.ts
*/
export const REFRESH_ACCESS_TOKEN_ERROR = "RefreshAccessTokenError" as const;
// 4 min is 1 min less than default authentik value.
// Assumes authentik won't be set to access tokens < 4 min
export const REFRESH_ACCESS_TOKEN_BEFORE = 4 * 60 * 1000;
export const shouldRefreshToken = (accessTokenExpires: number): boolean => {
const timeLeft = accessTokenExpires - Date.now();
return timeLeft < REFRESH_ACCESS_TOKEN_BEFORE;
};
export const LOGIN_REQUIRED_PAGES = [
"/transcripts/[!new]",
"/browse(.*)",
"/rooms(.*)",
];
export const PROTECTED_PAGES = new RegExp(
LOGIN_REQUIRED_PAGES.map((page) => `^${page}$`).join("|"),
);
export function getLogoutRedirectUrl(pathname: string): string {
const transcriptPagePattern = /^\/transcripts\/[^/]+$/;
return transcriptPagePattern.test(pathname) ? pathname : "/";
}

354
www/appv2/server/index.ts Normal file
View File

@@ -0,0 +1,354 @@
/**
* Minimal Express auth proxy server for Authentik SSO.
*
* Handles:
* - OAuth redirect to Authentik
* - Callback with code exchange
* - Token refresh
* - Credentials-based login (fallback)
* - Session introspection
*/
import express from "express";
import cookieParser from "cookie-parser";
import cors from "cors";
import { shouldRefreshToken, REFRESH_ACCESS_TOKEN_ERROR } from "./auth";
const app = express();
app.use(express.json());
app.use(cookieParser());
app.use(
cors({
origin: process.env.VITE_SITE_URL || "http://localhost:3000",
credentials: true,
}),
);
// ─── Config ──────────────────────────────────────────────────────────────────
const PORT = Number(process.env.AUTH_PROXY_PORT) || 3001;
const SERVER_API_URL =
process.env.SERVER_API_URL || "http://localhost:1250";
const AUTH_PROVIDER = process.env.AUTH_PROVIDER || "authentik";
// Authentik-specific
const AUTHENTIK_CLIENT_ID = process.env.AUTHENTIK_CLIENT_ID || "";
const AUTHENTIK_CLIENT_SECRET = process.env.AUTHENTIK_CLIENT_SECRET || "";
const AUTHENTIK_ISSUER = process.env.AUTHENTIK_ISSUER || "";
const AUTHENTIK_REFRESH_TOKEN_URL =
process.env.AUTHENTIK_REFRESH_TOKEN_URL || "";
// Cookie settings
const COOKIE_NAME = "reflector_session";
const COOKIE_OPTIONS: express.CookieOptions = {
httpOnly: true,
secure: process.env.NODE_ENV === "production",
sameSite: "lax",
maxAge: 7 * 24 * 60 * 60 * 1000, // 7 days
path: "/",
};
// ─── Types ───────────────────────────────────────────────────────────────────
interface SessionData {
accessToken: string;
accessTokenExpires: number;
refreshToken?: string;
user: {
id: string;
name?: string | null;
email?: string | null;
};
}
// ─── Helpers ─────────────────────────────────────────────────────────────────
async function getUserId(accessToken: string): Promise<string | null> {
try {
const response = await fetch(`${SERVER_API_URL}/v1/me`, {
headers: { Authorization: `Bearer ${accessToken}` },
});
if (!response.ok) return null;
const userInfo = await response.json();
return userInfo.sub || null;
} catch (error) {
console.error("Error fetching user ID from backend:", error);
return null;
}
}
function getRedirectUri(req: express.Request): string {
const protocol = req.headers["x-forwarded-proto"] || req.protocol;
const host = req.headers["x-forwarded-host"] || req.get("host");
return `${protocol}://${host}/auth/callback`;
}
function encodeSession(session: SessionData): string {
return Buffer.from(JSON.stringify(session)).toString("base64");
}
function decodeSession(cookie: string): SessionData | null {
try {
return JSON.parse(Buffer.from(cookie, "base64").toString("utf-8"));
} catch {
return null;
}
}
// ─── Routes ──────────────────────────────────────────────────────────────────
/**
* GET /auth/login
* Redirects to Authentik authorize endpoint (SSO flow)
*/
app.get("/auth/login", (req, res) => {
if (AUTH_PROVIDER !== "authentik") {
return res
.status(400)
.json({ error: "SSO not configured. Use POST /auth/login instead." });
}
if (!AUTHENTIK_ISSUER || !AUTHENTIK_CLIENT_ID) {
return res.status(500).json({ error: "Authentik not configured" });
}
const redirectUri = getRedirectUri(req);
const authorizeUrl = new URL(
`${AUTHENTIK_ISSUER}/authorize`,
);
authorizeUrl.searchParams.set("client_id", AUTHENTIK_CLIENT_ID);
authorizeUrl.searchParams.set("response_type", "code");
authorizeUrl.searchParams.set("redirect_uri", redirectUri);
authorizeUrl.searchParams.set(
"scope",
"openid email profile offline_access",
);
return res.redirect(authorizeUrl.toString());
});
/**
* GET /auth/callback
* Handles OAuth callback from Authentik — exchanges code for tokens
*/
app.get("/auth/callback", async (req, res) => {
const { code } = req.query;
if (!code || typeof code !== "string") {
return res.status(400).json({ error: "Missing authorization code" });
}
try {
const redirectUri = getRedirectUri(req);
const tokenResponse = await fetch(AUTHENTIK_REFRESH_TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/x-www-form-urlencoded" },
body: new URLSearchParams({
grant_type: "authorization_code",
client_id: AUTHENTIK_CLIENT_ID,
client_secret: AUTHENTIK_CLIENT_SECRET,
code,
redirect_uri: redirectUri,
}).toString(),
});
if (!tokenResponse.ok) {
const errorBody = await tokenResponse.text();
console.error("Token exchange failed:", tokenResponse.status, errorBody);
return res.redirect("/?error=token_exchange_failed");
}
const tokens = await tokenResponse.json();
const accessToken = tokens.access_token;
const expiresIn = tokens.expires_in;
const refreshToken = tokens.refresh_token;
// Resolve user ID from backend
const userId = await getUserId(accessToken);
if (!userId) {
return res.redirect("/?error=user_id_resolution_failed");
}
const session: SessionData = {
accessToken,
accessTokenExpires: Date.now() + expiresIn * 1000,
refreshToken,
user: {
id: userId,
email: tokens.email || null,
name: tokens.name || null,
},
};
res.cookie(COOKIE_NAME, encodeSession(session), COOKIE_OPTIONS);
// Redirect to the app
const frontendUrl = process.env.VITE_SITE_URL || "http://localhost:3000";
return res.redirect(`${frontendUrl}/welcome`);
} catch (error) {
console.error("OAuth callback error:", error);
return res.redirect("/?error=callback_error");
}
});
/**
* POST /auth/login
* Credentials-based login (email + password)
*/
app.post("/auth/login", async (req, res) => {
const { email, password } = req.body;
if (!email || !password) {
return res.status(400).json({ error: "Email and password are required" });
}
try {
const response = await fetch(`${SERVER_API_URL}/v1/auth/login`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ email, password }),
});
if (!response.ok) {
return res.status(401).json({ error: "Invalid credentials" });
}
const data = await response.json();
const accessToken = data.access_token;
const expiresIn = data.expires_in;
// Resolve user ID from backend
const userId = await getUserId(accessToken);
if (!userId) {
return res.status(500).json({ error: "Could not resolve user ID" });
}
const session: SessionData = {
accessToken,
accessTokenExpires: Date.now() + expiresIn * 1000,
user: {
id: userId,
email,
},
};
res.cookie(COOKIE_NAME, encodeSession(session), COOKIE_OPTIONS);
return res.json({
accessToken: session.accessToken,
accessTokenExpires: session.accessTokenExpires,
user: session.user,
});
} catch (error) {
console.error("Credentials login error:", error);
return res.status(500).json({ error: "Internal server error" });
}
});
/**
* POST /auth/refresh
* Refresh access token using refresh_token (Authentik only)
*/
app.post("/auth/refresh", async (req, res) => {
const cookie = req.cookies[COOKIE_NAME];
const session = cookie ? decodeSession(cookie) : null;
if (!session) {
return res.status(401).json({ error: "No active session" });
}
if (!session.refreshToken) {
return res.status(400).json({ error: "No refresh token available" });
}
try {
const response = await fetch(AUTHENTIK_REFRESH_TOKEN_URL, {
method: "POST",
headers: { "Content-Type": "application/x-www-form-urlencoded" },
body: new URLSearchParams({
grant_type: "refresh_token",
client_id: AUTHENTIK_CLIENT_ID,
client_secret: AUTHENTIK_CLIENT_SECRET,
refresh_token: session.refreshToken,
}).toString(),
});
if (!response.ok) {
console.error("Token refresh failed:", response.status);
res.clearCookie(COOKIE_NAME);
return res.status(401).json({ error: REFRESH_ACCESS_TOKEN_ERROR });
}
const refreshedTokens = await response.json();
const updatedSession: SessionData = {
...session,
accessToken: refreshedTokens.access_token,
accessTokenExpires: Date.now() + refreshedTokens.expires_in * 1000,
refreshToken: refreshedTokens.refresh_token || session.refreshToken,
};
res.cookie(COOKIE_NAME, encodeSession(updatedSession), COOKIE_OPTIONS);
return res.json({
accessToken: updatedSession.accessToken,
accessTokenExpires: updatedSession.accessTokenExpires,
user: updatedSession.user,
});
} catch (error) {
console.error("Token refresh error:", error);
return res.status(500).json({ error: "Internal server error" });
}
});
/**
* GET /auth/session
* Returns current session info or 401
*/
app.get("/auth/session", (req, res) => {
const cookie = req.cookies[COOKIE_NAME];
const session = cookie ? decodeSession(cookie) : null;
if (!session) {
return res.status(401).json({ status: "unauthenticated" });
}
// Check if token is expired
if (session.accessTokenExpires < Date.now()) {
// If we have a refresh token, indicate refresh is needed
if (session.refreshToken) {
return res.json({
status: "refresh_needed",
user: session.user,
});
}
// No refresh token → session is dead
res.clearCookie(COOKIE_NAME);
return res.status(401).json({ status: "unauthenticated" });
}
return res.json({
status: "authenticated",
accessToken: session.accessToken,
accessTokenExpires: session.accessTokenExpires,
user: session.user,
});
});
/**
* POST /auth/logout
* Clears session cookie
*/
app.post("/auth/logout", (_req, res) => {
res.clearCookie(COOKIE_NAME);
return res.json({ status: "logged_out" });
});
// ─── Start ───────────────────────────────────────────────────────────────────
app.listen(PORT, () => {
console.log(`Auth proxy server running on http://localhost:${PORT}`);
console.log(` AUTH_PROVIDER: ${AUTH_PROVIDER}`);
console.log(` SERVER_API_URL: ${SERVER_API_URL}`);
if (AUTH_PROVIDER === "authentik") {
console.log(` AUTHENTIK_ISSUER: ${AUTHENTIK_ISSUER || "(not set)"}`);
}
});

138
www/appv2/src/App.tsx Normal file
View File

@@ -0,0 +1,138 @@
import {
Routes,
Route,
Navigate,
Outlet,
useLocation,
} from "react-router-dom";
import { QueryClientProvider } from "@tanstack/react-query";
import { Sentry } from "./lib/sentry";
import { queryClient } from "./lib/queryClient";
import { AuthProvider, useAuth } from "./lib/AuthProvider";
import { ErrorProvider } from "./lib/errorContext";
import { RecordingConsentProvider } from "./lib/recordingConsentContext";
import { UserEventsProvider } from "./lib/UserEventsProvider";
import { TopNav } from "./components/layout/TopNav";
import { Footer } from "./components/layout/Footer";
import LoginPage from "./pages/LoginPage";
import WelcomePage from "./pages/WelcomePage";
import RoomsPage from "./pages/RoomsPage";
import RoomMeetingPage from "./pages/RoomMeetingPage";
import TranscriptionsPage from "./pages/TranscriptionsPage";
import SingleTranscriptionPage from "./pages/SingleTranscriptionPage";
import SettingsPage from "./pages/SettingsPage";
import WebinarLandingPage from "./pages/WebinarLandingPage";
import AboutPage from "./pages/AboutPage";
import PrivacyPage from "./pages/PrivacyPage";
// Nav items for TopNav
const NAV_LINKS = [
{ label: "Create", href: "/welcome" },
{ label: "Browse", href: "/transcriptions" },
{ label: "Rooms", href: "/rooms" },
{ label: "Settings", href: "/settings" },
];
// Guard: redirect to / if not authenticated
function RequireAuth() {
const auth = useAuth();
if (auth.status === "loading") {
return (
<div className="min-h-screen flex items-center justify-center bg-surface">
<div className="w-8 h-8 border-2 border-primary/30 border-t-primary rounded-full animate-spin" />
</div>
);
}
return auth.status === "authenticated" ? (
<Outlet />
) : (
<Navigate to="/" replace />
);
}
// Layout: TopNav only
function TopNavLayout() {
return (
<div className="min-h-screen flex flex-col bg-surface">
<TopNav links={NAV_LINKS} />
<main className="flex-1 flex flex-col relative">
<Outlet />
</main>
<Footer />
</div>
);
}
// Layout: TopNav + Content
function AppShellLayout() {
return (
<div className="h-screen flex flex-col bg-surface overflow-hidden">
<TopNav links={NAV_LINKS} />
<div className="flex flex-1 overflow-hidden">
<main className="flex-1 overflow-y-auto flex flex-col relative">
<div className="flex-1 min-h-0 flex flex-col">
<Outlet />
</div>
<Footer />
</main>
</div>
</div>
);
}
export default function App() {
return (
<QueryClientProvider client={queryClient}>
<AuthProvider>
<ErrorProvider>
<RecordingConsentProvider>
<UserEventsProvider>
<Sentry.ErrorBoundary
fallback={
<div className="min-h-screen flex items-center justify-center bg-surface text-on-surface">
<p>Something went wrong. Please refresh the page.</p>
</div>
}
>
<Routes>
{/* Public */}
<Route path="/" element={<LoginPage />} />
<Route path="/webinars/:title" element={<WebinarLandingPage />} />
{/* Protected */}
<Route element={<RequireAuth />}>
<Route element={<TopNavLayout />}>
<Route path="/welcome" element={<WelcomePage />} />
<Route path="/about" element={<AboutPage />} />
<Route path="/privacy" element={<PrivacyPage />} />
</Route>
<Route element={<AppShellLayout />}>
<Route path="/rooms" element={<RoomsPage />} />
<Route
path="/transcriptions"
element={<TranscriptionsPage />}
/>
<Route
path="/transcriptions/:id"
element={<SingleTranscriptionPage />}
/>
<Route path="/settings" element={<SettingsPage />} />
</Route>
{/* Fullscreen Room Interfaces */}
<Route path="/rooms/:roomName" element={<RoomMeetingPage />} />
<Route path="/rooms/:roomName/:meetingId" element={<RoomMeetingPage />} />
</Route>
{/* Fallback */}
<Route path="*" element={<Navigate to="/" replace />} />
</Routes>
</Sentry.ErrorBoundary>
</UserEventsProvider>
</RecordingConsentProvider>
</ErrorProvider>
</AuthProvider>
</QueryClientProvider>
);
}

View File

@@ -0,0 +1,26 @@
import type { components } from "../lib/reflector-api";
import { isArray } from "remeda";
export type ApiError = {
detail?: components["schemas"]["ValidationError"][];
} | null;
// errors as declared on api types is not != as they in reality e.g. detail may be a string
export const printApiError = (error: ApiError) => {
if (!error || !error.detail) {
return null;
}
const detail = error.detail as unknown;
if (isArray(error.detail)) {
return error.detail.map((e) => e.msg).join(", ");
}
if (typeof detail === "string") {
if (detail.length > 0) {
return detail;
}
console.error("Error detail is empty");
return null;
}
console.error("Error detail is not a string or array");
return null;
};

View File

@@ -0,0 +1,84 @@
/**
* WherebyWebinarEmbed — ported from Next.js, restyled with Tailwind.
*
* Renders the Whereby embed web component for webinar rooms.
*/
import { useEffect, useRef, useState } from "react";
import "@whereby.com/browser-sdk/embed";
interface WherebyWebinarEmbedProps {
roomUrl: string;
onLeave?: () => void;
}
export default function WherebyWebinarEmbed({
roomUrl,
onLeave,
}: WherebyWebinarEmbedProps) {
const wherebyRef = useRef<HTMLElement>(null);
const [noticeDismissed, setNoticeDismissed] = useState(
() => !!localStorage.getItem("recording-notice-dismissed"),
);
// Recording notice toast
useEffect(() => {
if (!roomUrl || noticeDismissed) return;
// We'll show notice until dismissed
return () => {};
}, [roomUrl, noticeDismissed]);
const handleDismissNotice = () => {
localStorage.setItem("recording-notice-dismissed", "true");
setNoticeDismissed(true);
};
const handleLeave = () => {
if (onLeave) {
onLeave();
}
};
useEffect(() => {
wherebyRef.current?.addEventListener("leave", handleLeave);
return () => {
wherebyRef.current?.removeEventListener("leave", handleLeave);
};
}, [handleLeave]);
return (
<div className="relative w-screen h-screen">
{/* Recording Notice Banner */}
{roomUrl && !noticeDismissed && (
<div className="absolute top-4 left-1/2 -translate-x-1/2 z-50 bg-white/95 backdrop-blur-sm px-5 py-3 rounded-md shadow-lg border border-outline-variant/20 flex items-center gap-4 max-w-md">
<p className="text-sm text-on-surface flex-1">
This webinar is being recorded. By continuing, you agree to our{" "}
<a
href="https://monadical.com/privacy"
className="text-primary underline underline-offset-2"
target="_blank"
rel="noopener noreferrer"
>
Privacy Policy
</a>
</p>
<button
onClick={handleDismissNotice}
className="text-muted hover:text-on-surface text-lg leading-none"
>
</button>
</div>
)}
{/* @ts-ignore — whereby-embed is a web component */}
<whereby-embed
ref={wherebyRef}
room={roomUrl}
style={{ width: "100vw", height: "100vh" }}
/>
</div>
);
}

View File

@@ -0,0 +1,16 @@
import React from "react";
import { Link } from "react-router-dom";
export function Footer() {
return (
<footer className="mt-auto shrink-0 bg-surface-low py-6 px-8 flex flex-col sm:flex-row justify-between items-center gap-4 border-t border-outline-variant/20 z-10 w-full">
<span className="text-[0.6875rem] font-medium text-on-surface-variant uppercase tracking-widest">
© 2024 Reflector Archive
</span>
<div className="flex flex-wrap items-center justify-center gap-6">
<Link to="/about" className="text-sm text-on-surface-variant hover:text-primary transition-colors">Learn more</Link>
<Link to="/privacy" className="text-sm text-on-surface-variant hover:text-primary transition-colors">Privacy policy</Link>
</div>
</footer>
);
}

View File

@@ -0,0 +1,123 @@
import React, { useState, useRef, useEffect } from 'react';
import { Link, useLocation } from 'react-router-dom';
import { Bell, Menu, X } from 'lucide-react';
import { useAuth } from '../../lib/AuthProvider';
interface NavLink {
label: string;
href: string;
}
interface TopNavProps {
links: NavLink[];
}
export const TopNav: React.FC<TopNavProps> = ({ links }) => {
const location = useLocation();
const auth = useAuth();
const user = auth.status === 'authenticated' ? auth.user : null;
const [isDropdownOpen, setIsDropdownOpen] = useState(false);
const [isMobileMenuOpen, setIsMobileMenuOpen] = useState(false);
const dropdownRef = useRef<HTMLDivElement>(null);
useEffect(() => {
function handleClickOutside(event: MouseEvent) {
if (dropdownRef.current && !dropdownRef.current.contains(event.target as Node)) {
setIsDropdownOpen(false);
}
}
document.addEventListener('mousedown', handleClickOutside);
return () => document.removeEventListener('mousedown', handleClickOutside);
}, []);
return (
<header className="z-50 bg-surface/85 backdrop-blur-[12px] px-6 md:px-8 py-4 flex items-center justify-between border-b border-outline-variant/10 shrink-0 relative">
<div className="flex items-center gap-8">
<div className="flex items-center gap-2">
<button
className="md:hidden mr-1 text-muted hover:text-primary transition-colors"
onClick={() => setIsMobileMenuOpen(!isMobileMenuOpen)}
>
{isMobileMenuOpen ? <X className="w-5 h-5" /> : <Menu className="w-5 h-5" />}
</button>
<img src="https://reflector.monadical.com/reach.svg" alt="Reflector Logo" className="w-6 h-6" />
<span className="font-serif font-bold text-xl text-on-surface">Reflector</span>
</div>
<nav className="hidden md:flex items-center gap-6">
{links.map((link, index) => {
const isActive = location.pathname === link.href || (link.href !== '/' && location.pathname.startsWith(`${link.href}/`));
return (
<React.Fragment key={link.href}>
<Link
to={link.href}
className={`font-sans text-sm transition-colors ${
isActive ? 'text-primary font-semibold' : 'text-on-surface-variant hover:text-on-surface'
}`}
>
{link.label}
</Link>
{index < links.length - 1 && (
<span className="text-outline-variant/60">·</span>
)}
</React.Fragment>
);
})}
</nav>
</div>
<div className="flex items-center gap-5">
<button className="text-muted hover:text-primary transition-colors"><Bell className="w-5 h-5" /></button>
<div className="relative" ref={dropdownRef}>
<div
className="w-8 h-8 rounded-full bg-surface-high flex items-center justify-center overflow-hidden cursor-pointer hover:ring-2 hover:ring-primary/20 transition-all select-none"
onClick={() => setIsDropdownOpen(!isDropdownOpen)}
>
{user?.name ? (
<span className="font-serif font-bold text-primary">{user.name.charAt(0)}</span>
) : (
<span className="font-serif font-bold text-primary">C</span>
)}
</div>
{isDropdownOpen && (
<div className="absolute right-0 mt-2 w-48 bg-white rounded-md shadow-[0_4px_24px_rgba(27,28,20,0.1)] border border-outline-variant/10 py-1 z-50 animate-in fade-in slide-in-from-top-2 duration-200">
<div className="px-4 py-3 border-b border-outline-variant/10 mb-1">
<p className="text-sm font-semibold text-on-surface truncate">{user?.name || 'Curator'}</p>
<p className="text-xs text-muted truncate mt-0.5">{user?.email || 'admin@reflector.com'}</p>
</div>
<button
onClick={() => {
setIsDropdownOpen(false);
auth.signOut();
}}
className="w-full text-left px-4 py-2.5 text-sm text-red-600 hover:bg-red-50 hover:text-red-700 transition-colors font-medium flex items-center gap-2"
>
Log out
</button>
</div>
)}
</div>
</div>
{isMobileMenuOpen && (
<div className="absolute top-[100%] left-0 w-full bg-surface border-b border-outline-variant/10 flex flex-col px-6 py-2 md:hidden shadow-lg z-50 animate-in fade-in slide-in-from-top-2 duration-200">
{links.map((link) => {
const isActive = location.pathname === link.href || (link.href !== '/' && location.pathname.startsWith(`${link.href}/`));
return (
<Link
key={link.href}
to={link.href}
onClick={() => setIsMobileMenuOpen(false)}
className={`py-3.5 font-sans text-[0.9375rem] transition-colors ${
isActive ? 'text-primary font-bold' : 'text-on-surface hover:text-primary'
}`}
>
{link.label}
</Link>
);
})}
</div>
)}
</header>
);
};

View File

@@ -0,0 +1,462 @@
import React, { useEffect, useState } from 'react';
import { useForm } from 'react-hook-form';
import {
useRoomCreate,
useRoomUpdate,
useRoomGet,
useRoomTestWebhook,
useConfig,
useZulipStreams,
useZulipTopics
} from '../../lib/apiHooks';
import { Button } from '../ui/Button';
import { Input } from '../ui/Input';
import { Select } from '../ui/Select';
import { Checkbox } from '../ui/Checkbox';
import { X, Info, Link as LinkIcon, CheckCircle2, AlertCircle, Hexagon, Loader2 } from 'lucide-react';
import { useQueryClient } from '@tanstack/react-query';
interface AddRoomModalProps {
isOpen: boolean;
onClose: () => void;
editRoomId?: string | null;
}
type FormData = {
name: string;
platform: 'whereby' | 'daily';
roomMode: 'normal' | 'group';
recordingType: 'none' | 'local' | 'cloud';
recordingTrigger: 'none' | 'prompt' | 'automatic-2nd-participant';
isLocked: boolean;
isShared: boolean;
skipConsent: boolean;
enableIcs: boolean;
icsFetchInterval: number;
emailTranscript: boolean;
emailTranscriptTo: string;
postToZulip: boolean;
zulipStream: string;
zulipTopic: string;
webhookUrl: string;
webhookSecret: string;
};
export function AddRoomModal({ isOpen, onClose, editRoomId }: AddRoomModalProps) {
const [activeTab, setActiveTab] = useState<'general' | 'calendar' | 'sharing' | 'webhooks'>('general');
const [testResult, setTestResult] = useState<{ status: 'success'|'error', msg: string } | null>(null);
const [isTesting, setIsTesting] = useState(false);
const queryClient = useQueryClient();
const createRoom = useRoomCreate();
const updateRoom = useRoomUpdate();
const testWebhook = useRoomTestWebhook();
const { data: config } = useConfig();
const zulipEnabled = config?.zulip_enabled ?? false;
const emailEnabled = config?.email_enabled ?? false;
const { data: streams = [] } = useZulipStreams(zulipEnabled);
const { data: editedRoom, isFetching: isFetchingRoom } = useRoomGet(editRoomId || null);
const { register, handleSubmit, watch, reset, setValue, formState: { errors } } = useForm<FormData>({
defaultValues: {
name: '',
platform: 'whereby',
roomMode: 'normal',
recordingType: 'cloud',
recordingTrigger: 'automatic-2nd-participant',
isShared: true,
isLocked: false,
skipConsent: false,
enableIcs: false,
icsFetchInterval: 5,
emailTranscript: false,
emailTranscriptTo: '',
postToZulip: false,
zulipStream: '',
zulipTopic: '',
webhookUrl: '',
webhookSecret: '',
}
});
const platform = watch('platform');
const postToZulip = watch('postToZulip');
const webhookUrl = watch('webhookUrl');
const recordingType = watch('recordingType');
const selectedZulipStream = watch('zulipStream');
const emailTranscript = watch('emailTranscript');
// Dynamically resolve zulip stream IDs to query topics
const selectedStreamId = React.useMemo(() => {
if (!selectedZulipStream || streams.length === 0) return null;
const match = streams.find(s => s.name === selectedZulipStream);
return match ? match.stream_id : null;
}, [selectedZulipStream, streams]);
const { data: topics = [] } = useZulipTopics(selectedStreamId);
useEffect(() => {
if (isOpen) {
if (editRoomId && editedRoom) {
// Load Edit Mode
reset({
name: editedRoom.name,
platform: editedRoom.platform as 'whereby' | 'daily',
roomMode: editedRoom.platform === 'daily' ? 'group' : (editedRoom.room_mode || 'normal') as 'normal'|'group',
recordingType: (editedRoom.recording_type || 'none') as 'none'|'local'|'cloud',
recordingTrigger: editedRoom.platform === 'daily'
? (editedRoom.recording_type === 'cloud' ? 'automatic-2nd-participant' : 'none')
: (editedRoom.recording_trigger || 'none') as any,
isShared: editedRoom.is_shared,
isLocked: editedRoom.is_locked,
skipConsent: editedRoom.skip_consent,
enableIcs: editedRoom.ics_enabled || false,
icsFetchInterval: editedRoom.ics_fetch_interval || 5,
emailTranscript: !!editedRoom.email_transcript_to,
emailTranscriptTo: editedRoom.email_transcript_to || '',
postToZulip: editedRoom.zulip_auto_post || false,
zulipStream: editedRoom.zulip_stream || '',
zulipTopic: editedRoom.zulip_topic || '',
webhookUrl: editedRoom.webhook_url || '',
webhookSecret: editedRoom.webhook_secret || '',
});
} else if (!editRoomId) {
// Load Create Mode with specific backend defaults
reset({
name: '',
platform: 'whereby',
roomMode: 'normal',
recordingType: 'cloud',
recordingTrigger: 'automatic-2nd-participant',
isShared: false,
isLocked: false,
skipConsent: false,
enableIcs: false,
icsFetchInterval: 5,
emailTranscript: false,
emailTranscriptTo: '',
postToZulip: false,
zulipStream: '',
zulipTopic: '',
webhookUrl: '',
webhookSecret: '',
});
}
}
}, [isOpen, editRoomId, editedRoom, reset]);
// Handle rigid Platform dependency enums
useEffect(() => {
if (platform === 'daily') {
setValue('roomMode', 'group');
if (recordingType === 'cloud') {
setValue('recordingTrigger', 'automatic-2nd-participant');
} else {
setValue('recordingTrigger', 'none');
}
} else if (platform === 'whereby') {
if (recordingType !== 'cloud') {
setValue('recordingTrigger', 'none');
}
}
}, [platform, recordingType, setValue]);
const handleClose = () => {
reset();
setActiveTab('general');
setTestResult(null);
onClose();
};
const executeWebhookTest = async () => {
if (!webhookUrl || !editRoomId) return;
setIsTesting(true);
setTestResult(null);
try {
const resp = await testWebhook.mutateAsync({
params: { path: { room_id: editRoomId } }
});
if (resp.success) {
setTestResult({ status: 'success', msg: `Test successful! Status: ${resp.status_code}` });
} else {
let err = `Failed (${resp.status_code})`;
if (resp.response_preview) {
try {
const json = JSON.parse(resp.response_preview);
err += `: ${json.message || resp.response_preview}`;
} catch {
err += `: ${resp.response_preview.substring(0, 100)}`;
}
}
setTestResult({ status: 'error', msg: err });
}
} catch {
setTestResult({ status: 'error', msg: "Network failed attempting to test URL." });
} finally {
setIsTesting(false);
}
};
const onSubmit = (data: FormData) => {
const payload = {
name: data.name.replace(/[^a-zA-Z0-9\s-]/g, "").replace(/\s+/g, "-").toLowerCase(),
platform: data.platform,
zulip_auto_post: data.postToZulip,
zulip_stream: data.zulipStream,
zulip_topic: data.zulipTopic,
is_locked: data.isLocked,
room_mode: data.platform === 'daily' ? 'group' : data.roomMode,
recording_type: data.recordingType,
recording_trigger: data.platform === 'daily' ? (data.recordingType === 'cloud' ? 'automatic-2nd-participant' : 'none') : data.recordingTrigger,
is_shared: data.isShared,
webhook_url: data.webhookUrl,
webhook_secret: data.webhookSecret,
ics_url: '',
ics_enabled: data.enableIcs,
ics_fetch_interval: data.icsFetchInterval,
skip_consent: data.skipConsent,
email_transcript_to: data.emailTranscript ? data.emailTranscriptTo : null,
};
if (editRoomId) {
updateRoom.mutate({
params: { path: { room_id: editRoomId } },
body: payload as any
}, {
onSuccess: () => {
queryClient.invalidateQueries({ queryKey: ['rooms'] });
handleClose();
}
});
} else {
createRoom.mutate({
body: payload as any
}, {
onSuccess: () => {
queryClient.invalidateQueries({ queryKey: ['rooms'] });
handleClose();
}
});
}
};
if (!isOpen) return null;
const tabs = [
{ id: 'general', label: 'General' },
{ id: 'calendar', label: 'Calendar' },
...(zulipEnabled || emailEnabled ? [{ id: 'sharing', label: 'Sharing' }] : []),
{ id: 'webhooks', label: 'WebHooks' },
] as const;
return (
<div className="fixed inset-0 bg-[#1b1c14]/45 z-50 flex items-center justify-center p-4 backdrop-blur-sm">
<div className="bg-white rounded-[12px] shadow-[0_16px_48px_rgba(27,28,20,0.12)] w-[500px] max-w-full flex flex-col overflow-hidden animate-in fade-in zoom-in-95 duration-200">
{/* Header */}
<div className="pt-6 px-6 pb-0 flex items-center justify-between">
<div className="flex items-center gap-2">
<Hexagon className="w-5 h-5 text-primary fill-primary/20" />
<h2 className="font-serif text-lg font-bold text-on-surface">
{editRoomId ? 'Edit Room' : 'New Room'}
</h2>
{isFetchingRoom && <Loader2 className="w-4 h-4 animate-spin text-muted ml-2" />}
</div>
<button onClick={handleClose} className="text-muted hover:text-primary hover:bg-primary/10 p-1.5 rounded-full transition-colors">
<X className="w-5 h-5" />
</button>
</div>
{/* Tab Bar */}
<div className="px-6 mt-4 flex items-center gap-6 relative">
<div className="absolute bottom-0 left-0 right-0 h-[2px] bg-surface-high"></div>
{tabs.map(tab => (
<button
key={tab.id}
type="button"
onClick={() => setActiveTab(tab.id as any)}
className={`pb-3 font-sans text-sm transition-colors relative z-10 ${
activeTab === tab.id
? 'text-primary font-semibold border-b-[2.5px] border-primary'
: 'text-muted font-medium hover:text-on-surface-variant'
}`}
>
{tab.label}
</button>
))}
</div>
{/* Body */}
<div className="p-5 px-6 max-h-[60vh] overflow-y-auto">
<form id="add-room-form" onSubmit={handleSubmit(onSubmit)} className="space-y-6">
{activeTab === 'general' && (
<div className="space-y-5 animate-in fade-in duration-300">
<div>
<label className="font-sans text-[0.75rem] font-bold uppercase tracking-widest text-muted mb-1.5 block">Room Name</label>
<Input
{...register('name', { required: true })}
placeholder="e.g. editorial-sync"
className="w-full"
disabled={!!editRoomId}
/>
<p className="text-xs text-muted mt-1">No spaces allowed. E.g. my-room</p>
</div>
<div>
<label className="font-sans text-[0.75rem] font-bold uppercase tracking-widest text-muted mb-1.5 block">Platform</label>
<Select {...register('platform')} className="w-full">
<option value="whereby">Whereby</option>
<option value="daily">Daily.co</option>
</Select>
</div>
<div className="space-y-3 pt-2">
<Checkbox {...register('isLocked')} label="Locked room (Require password)" />
</div>
{platform !== 'daily' && (
<div>
<label className="font-sans text-[0.75rem] font-bold uppercase tracking-widest text-muted mb-1.5 block">Room Size</label>
<Select {...register('roomMode')} className="w-full">
<option value="normal">2-4 people</option>
<option value="group">2-200 people</option>
</Select>
</div>
)}
<div>
<label className="font-sans text-[0.75rem] font-bold uppercase tracking-widest text-muted mb-1.5 flex items-center gap-1.5">
Recording Type
<Info className="w-3.5 h-3.5 text-muted hover:text-primary transition-colors cursor-help" />
</label>
<Select {...register('recordingType')} className="w-full">
<option value="none">None</option>
<option value="local">Local</option>
<option value="cloud">Cloud</option>
</Select>
</div>
{recordingType === 'cloud' && platform !== 'daily' && (
<div>
<label className="font-sans text-[0.75rem] font-bold uppercase tracking-widest text-muted mb-1.5 block">Recording Trigger</label>
<Select {...register('recordingTrigger')} className="w-full">
<option value="none">None (Manual)</option>
<option value="prompt">Prompt on Join</option>
<option value="automatic-2nd-participant">Automatic on 2nd Participant</option>
</Select>
</div>
)}
<div className="space-y-3 pt-2 border-t border-outline-variant/10">
<Checkbox {...register('isShared')} label="Shared room (Public archive)" />
<Checkbox {...register('skipConsent')} label="Skip consent checkbox" />
</div>
</div>
)}
{activeTab === 'calendar' && (
<div className="space-y-2 animate-in fade-in duration-300">
<Checkbox {...register('enableIcs')} label="Enable ICS calendar sync" />
<p className="font-sans text-sm text-muted ml-6">When enabled, a calendar feed URL will be generated.</p>
</div>
)}
{activeTab === 'sharing' && (
<div className="space-y-4 animate-in fade-in duration-300">
{emailEnabled && (
<div className="space-y-2 pb-4 border-b border-outline-variant/10">
<Checkbox {...register('emailTranscript')} label="Email transcript functionality" />
{emailTranscript && (
<div className="pl-6 animate-in slide-in-from-top-2">
<label className="font-sans text-[0.75rem] font-bold uppercase tracking-widest text-muted mb-1.5 block">Email Address</label>
<Input type="email" {...register('emailTranscriptTo')} placeholder="editor@nyt.com" className="w-full" />
</div>
)}
</div>
)}
{zulipEnabled && (
<div className="space-y-2">
<Checkbox {...register('postToZulip')} label="Automatically post transcription to Zulip" />
<div className={`overflow-hidden transition-all duration-300 ${postToZulip ? 'max-h-48 opacity-100 mt-4' : 'max-h-0 opacity-0'}`}>
<div className="pl-6 space-y-4 border-l-2 border-surface-high ml-2 py-1">
<div>
<label className="font-sans text-[0.75rem] font-bold uppercase tracking-widest text-muted mb-1.5 block">Zulip stream</label>
<Select {...register('zulipStream')} disabled={!postToZulip} className="w-full">
<option value="">Select stream...</option>
{streams.map(s => <option key={s.stream_id} value={s.name}>{s.name}</option>)}
</Select>
</div>
<div>
<label className="font-sans text-[0.75rem] font-bold uppercase tracking-widest text-muted mb-1.5 block">Zulip topic</label>
<Select {...register('zulipTopic')} disabled={!postToZulip} className="w-full">
<option value="">Select topic...</option>
{topics.map(t => <option key={t.name} value={t.name}>{t.name}</option>)}
</Select>
</div>
</div>
</div>
</div>
)}
</div>
)}
{activeTab === 'webhooks' && (
<div className="space-y-4 animate-in fade-in duration-300">
<div>
<label className="font-sans text-[0.75rem] font-bold uppercase tracking-widest text-muted mb-1.5 block">Webhook URL</label>
<div className="relative">
<div className="absolute inset-y-0 left-0 pl-3 flex items-center pointer-events-none">
<LinkIcon className="w-4 h-4 text-muted" />
</div>
<Input
{...register('webhookUrl', { pattern: { value: /^https?:\/\/.+/, message: 'Must be a valid URL starting with http:// or https://' }})}
placeholder="https://example.com/webhook"
className="w-full pl-9 pr-9"
/>
</div>
{errors.webhookUrl && <p className="font-sans text-[0.75rem] text-primary mt-1.5">{errors.webhookUrl.message}</p>}
</div>
{webhookUrl && editRoomId && (
<div className="pt-2 border-t border-shell">
<Button
type="button"
variant="secondary"
onClick={executeWebhookTest}
disabled={isTesting}
className="mb-3"
>
{isTesting ? 'Testing...' : 'Test Webhook Settings'}
</Button>
{testResult && (
<div className={`p-3 rounded-lg text-sm border font-mono ${testResult.status === 'success' ? 'bg-green-50 text-green-800 border-green-200' : 'bg-red-50 text-red-800 border-red-200'}`}>
{testResult.msg}
</div>
)}
</div>
)}
</div>
)}
</form>
</div>
{/* Footer */}
<div className="px-6 py-4 bg-surface-low rounded-b-[12px] flex items-center justify-between border-t border-outline-variant/10">
<Button variant="secondary" onClick={handleClose} className="border-[1.5px] border-[#C8C8BE] text-on-surface-variant hover:bg-surface-high">
Cancel
</Button>
<Button variant="primary" type="submit" form="add-room-form" disabled={createRoom.isPending || updateRoom.isPending}>
{createRoom.isPending || updateRoom.isPending ? 'Saving...' : 'Save Room'}
</Button>
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,284 @@
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useNavigate, useParams } from 'react-router-dom';
import { Loader2 } from 'lucide-react';
import DailyIframe, {
DailyCall,
DailyCallOptions,
DailyCustomTrayButton,
DailyCustomTrayButtons,
DailyEventObjectCustomButtonClick,
DailyFactoryOptions,
DailyParticipantsObject,
} from '@daily-co/daily-js';
import type { components } from '../../lib/reflector-api';
import { useAuth } from '../../lib/AuthProvider';
import { useConsentDialog } from '../../lib/consent';
import { featureEnabled } from '../../lib/features';
import { useRoomJoinMeeting, useMeetingStartRecording } from '../../lib/apiHooks';
import { omit } from 'remeda';
import { NonEmptyString } from '../../lib/utils';
import { assertMeetingId, DailyRecordingType } from '../../lib/types';
import { v5 as uuidv5 } from 'uuid';
const CONSENT_BUTTON_ID = 'recording-consent';
const RECORDING_INDICATOR_ID = 'recording-indicator';
const RAW_TRACKS_NAMESPACE = 'a1b2c3d4-e5f6-7890-abcd-ef1234567890';
const RECORDING_START_DELAY_MS = 2000;
const RECORDING_START_MAX_RETRIES = 5;
type Meeting = components['schemas']['Meeting'];
type Room = components['schemas']['RoomDetails'];
type MeetingId = string;
type DailyRoomProps = {
meeting: Meeting;
room: Room;
};
const useCustomTrayButtons = (
frame: { updateCustomTrayButtons: (buttons: DailyCustomTrayButtons) => void; joined: boolean } | null
) => {
const [, setCustomTrayButtons] = useState<DailyCustomTrayButtons>({});
return useCallback(
(id: string, button: DailyCustomTrayButton | null) => {
setCustomTrayButtons((prev) => {
const state = button === null ? omit(prev, [id]) : { ...prev, [id]: button };
if (frame !== null && frame.joined) frame.updateCustomTrayButtons(state);
return state;
});
},
[frame]
);
};
const USE_FRAME_INIT_STATE = { frame: null as DailyCall | null, joined: false as boolean } as const;
const useFrame = (
container: HTMLDivElement | null,
cbs: {
onLeftMeeting: () => void;
onCustomButtonClick: (ev: DailyEventObjectCustomButtonClick) => void;
onJoinMeeting: () => void;
}
) => {
const [{ frame, joined }, setState] = useState(USE_FRAME_INIT_STATE);
const setJoined = useCallback((j: boolean) => setState((prev) => ({ ...prev, joined: j })), [setState]);
const setFrame = useCallback((f: DailyCall | null) => setState((prev) => ({ ...prev, frame: f })), [setState]);
useEffect(() => {
if (!container) return;
let isActive = true;
const init = async () => {
const existingFrame = DailyIframe.getCallInstance();
if (existingFrame) {
await existingFrame.destroy();
}
if (!isActive) return;
const frameOptions: DailyFactoryOptions = {
iframeStyle: {
width: '100vw',
height: '100vh',
border: 'none',
},
showLeaveButton: true,
showFullscreenButton: true,
};
const newFrame = DailyIframe.createFrame(container, frameOptions);
setFrame(newFrame);
};
init().catch(console.error);
return () => {
isActive = false;
frame?.destroy().catch(console.error);
setState(USE_FRAME_INIT_STATE);
};
}, [container]);
useEffect(() => {
if (!frame) return;
frame.on('left-meeting', cbs.onLeftMeeting);
frame.on('custom-button-click', cbs.onCustomButtonClick);
const joinCb = () => {
if (!frame) return;
cbs.onJoinMeeting();
};
frame.on('joined-meeting', joinCb);
return () => {
frame.off('left-meeting', cbs.onLeftMeeting);
frame.off('custom-button-click', cbs.onCustomButtonClick);
frame.off('joined-meeting', joinCb);
};
}, [frame, cbs]);
const frame_ = useMemo(() => {
if (frame === null) return frame;
return {
join: async (properties?: DailyCallOptions): Promise<DailyParticipantsObject | void> => {
await frame.join(properties);
setJoined(!frame.isDestroyed());
},
updateCustomTrayButtons: (buttons: DailyCustomTrayButtons): DailyCall => frame.updateCustomTrayButtons(buttons),
};
}, [frame, setJoined]);
const setCustomTrayButton = useCustomTrayButtons(
useMemo(() => (frame_ === null ? null : { updateCustomTrayButtons: frame_.updateCustomTrayButtons, joined }), [
frame_,
joined,
])
);
return [frame_, { setCustomTrayButton }] as const;
};
export default function DailyRoom({ meeting, room }: DailyRoomProps) {
const navigate = useNavigate();
const { roomName } = useParams();
const auth = useAuth();
const authLastUserId = auth.status === 'authenticated' ? auth.user.id : undefined;
const [container, setContainer] = useState<HTMLDivElement | null>(null);
const joinMutation = useRoomJoinMeeting();
const startRecordingMutation = useMeetingStartRecording();
const [joinedMeeting, setJoinedMeeting] = useState<Meeting | null>(null);
const cloudInstanceId = meeting.id;
const rawTracksInstanceId = uuidv5(meeting.id, RAW_TRACKS_NAMESPACE);
const { showConsentModal, showRecordingIndicator, showConsentButton } = useConsentDialog({
meetingId: assertMeetingId(meeting.id),
recordingType: meeting.recording_type,
skipConsent: room.skip_consent,
});
const showConsentModalRef = useRef(showConsentModal);
showConsentModalRef.current = showConsentModal;
useEffect(() => {
if (authLastUserId === undefined || !meeting?.id || !roomName) return;
let isMounted = true;
const join = async () => {
try {
const result = await joinMutation.mutateAsync({
params: { path: { room_name: roomName, meeting_id: meeting.id } },
});
if (isMounted) setJoinedMeeting(result);
} catch (error) {
console.error('Failed to join meeting:', error);
}
};
join().catch(console.error);
return () => { isMounted = false; };
}, [meeting?.id, roomName, authLastUserId]);
const roomUrl = joinedMeeting?.room_url;
const handleLeave = useCallback(() => {
navigate('/transcriptions');
}, [navigate]);
const handleCustomButtonClick = useCallback((ev: DailyEventObjectCustomButtonClick) => {
if (ev.button_id === CONSENT_BUTTON_ID) {
showConsentModalRef.current();
}
}, []);
const handleFrameJoinMeeting = useCallback(() => {
if (meeting.recording_type === 'cloud') {
const startRecordingWithRetry = (type: DailyRecordingType, instanceId: string, attempt: number = 1) => {
setTimeout(() => {
startRecordingMutation.mutate(
{
params: { path: { meeting_id: meeting.id as any } },
body: { type: type as any, instanceId }
},
{
onError: (error: any) => {
const errorText = error?.detail || error?.message || '';
const is404NotHosting = errorText.includes('does not seem to be hosting a call');
const isActiveStream = errorText.includes('has an active stream');
if (is404NotHosting && attempt < RECORDING_START_MAX_RETRIES) {
startRecordingWithRetry(type, instanceId, attempt + 1);
} else if (!isActiveStream) {
console.error(`Failed to start ${type} recording:`, error);
}
},
}
);
}, RECORDING_START_DELAY_MS);
};
startRecordingWithRetry('cloud', cloudInstanceId);
startRecordingWithRetry('raw-tracks', rawTracksInstanceId);
}
}, [meeting.recording_type, meeting.id, cloudInstanceId, rawTracksInstanceId, startRecordingMutation]);
const recordingIconUrl = useMemo(() => new URL('/recording-icon.svg', window.location.origin), []);
const [frame, { setCustomTrayButton }] = useFrame(container, {
onLeftMeeting: handleLeave,
onCustomButtonClick: handleCustomButtonClick,
onJoinMeeting: handleFrameJoinMeeting,
});
useEffect(() => {
if (!frame || !roomUrl) return;
frame.join({
url: roomUrl,
sendSettings: {
video: { allowAdaptiveLayers: true, maxQuality: 'medium' },
},
}).catch(console.error);
}, [frame, roomUrl]);
useEffect(() => {
setCustomTrayButton(
RECORDING_INDICATOR_ID,
showRecordingIndicator
? { iconPath: recordingIconUrl.href, label: 'Recording', tooltip: 'Recording in progress' }
: null
);
}, [showRecordingIndicator, recordingIconUrl, setCustomTrayButton]);
useEffect(() => {
setCustomTrayButton(
CONSENT_BUTTON_ID,
showConsentButton
? { iconPath: recordingIconUrl.href, label: 'Recording (click to consent)', tooltip: 'Recording (click to consent)' }
: null
);
}, [showConsentButton, recordingIconUrl, setCustomTrayButton]);
if (authLastUserId === undefined || joinMutation.isPending) {
return (
<div className="w-screen h-screen flex justify-center items-center bg-surface">
<Loader2 className="w-10 h-10 text-primary animate-spin" />
</div>
);
}
if (joinMutation.isError) {
return (
<div className="w-screen h-screen flex justify-center items-center bg-surface">
<p className="text-red-500 font-medium">Failed to join meeting. Please try again.</p>
</div>
);
}
if (!roomUrl) return null;
return (
<div className="relative w-screen h-screen">
<div ref={setContainer} style={{ width: '100%', height: '100%' }} />
</div>
);
}

View File

@@ -0,0 +1,75 @@
import React from 'react';
import { useNavigate, Link } from 'react-router-dom';
import { Button } from '../ui/Button';
import { Hexagon } from 'lucide-react';
interface MeetingMinimalHeaderProps {
roomName: string;
displayName?: string | null;
showLeaveButton?: boolean;
onLeave?: () => void;
showCreateButton?: boolean;
onCreateMeeting?: () => void;
isCreatingMeeting?: boolean;
}
export default function MeetingMinimalHeader({
roomName,
displayName,
showLeaveButton = true,
onLeave,
showCreateButton = false,
onCreateMeeting,
isCreatingMeeting = false,
}: MeetingMinimalHeaderProps) {
const navigate = useNavigate();
const handleLeaveMeeting = () => {
if (onLeave) {
onLeave();
} else {
navigate('/rooms');
}
};
const roomTitle = displayName
? displayName.endsWith("'s") || displayName.endsWith("s")
? `${displayName} Room`
: `${displayName}'s Room`
: `${roomName} Room`;
return (
<header className="flex justify-between items-center w-full py-4 px-6 bg-surface sticky top-0 z-10 border-b border-surface-high">
<div className="flex items-center gap-3">
<Link to="/" className="flex items-center">
<Hexagon className="w-6 h-6 text-primary fill-primary/20" />
</Link>
<span className="font-serif text-lg font-semibold text-on-surface">
{roomTitle}
</span>
</div>
<div className="flex items-center gap-3">
{showCreateButton && onCreateMeeting && (
<Button
variant="primary"
onClick={onCreateMeeting}
disabled={isCreatingMeeting}
>
{isCreatingMeeting ? 'Creating...' : 'Create Meeting'}
</Button>
)}
{showLeaveButton && (
<Button
variant="secondary"
onClick={handleLeaveMeeting}
disabled={isCreatingMeeting}
className="border-[1.5px] border-[#C8C8BE] text-on-surface-variant hover:bg-surface-high"
>
Leave Room
</Button>
)}
</div>
</header>
);
}

View File

@@ -0,0 +1,363 @@
import React from 'react';
import { partition } from 'remeda';
import { useNavigate } from 'react-router-dom';
import type { components } from '../../lib/reflector-api';
import {
useRoomActiveMeetings,
useRoomJoinMeeting,
useMeetingDeactivate,
useRoomGetByName,
} from '../../lib/apiHooks';
import MeetingMinimalHeader from './MeetingMinimalHeader';
import { Button } from '../ui/Button';
import { Card } from '../ui/Card';
import { ConfirmModal } from '../ui/ConfirmModal';
import { Users, Clock, Calendar, X as XIcon, Loader2 } from 'lucide-react';
import { formatDateTime, formatStartedAgo } from '../../lib/timeUtils';
type Meeting = components['schemas']['Meeting'];
type MeetingId = string;
interface MeetingSelectionProps {
roomName: string;
isOwner: boolean;
isSharedRoom: boolean;
authLoading: boolean;
onMeetingSelect: (meeting: Meeting) => void;
onCreateUnscheduled: () => void;
isCreatingMeeting?: boolean;
}
export default function MeetingSelection({
roomName,
isOwner,
isSharedRoom,
onMeetingSelect,
onCreateUnscheduled,
isCreatingMeeting = false,
}: MeetingSelectionProps) {
const navigate = useNavigate();
const roomQuery = useRoomGetByName(roomName);
const activeMeetingsQuery = useRoomActiveMeetings(roomName);
const joinMeetingMutation = useRoomJoinMeeting();
const deactivateMeetingMutation = useMeetingDeactivate();
const room = roomQuery.data;
const allMeetings = activeMeetingsQuery.data || [];
const now = new Date();
const [currentMeetings, nonCurrentMeetings] = partition(
allMeetings,
(meeting) => {
const startTime = new Date(meeting.start_date);
const endTime = new Date(meeting.end_date);
return now >= startTime && now <= endTime;
}
);
const upcomingMeetings = nonCurrentMeetings.filter((meeting) => {
const startTime = new Date(meeting.start_date);
return now < startTime;
});
const loading = roomQuery.isLoading || activeMeetingsQuery.isLoading;
const error = roomQuery.error || activeMeetingsQuery.error;
const handleJoinUpcoming = async (meeting: Meeting) => {
try {
const joinedMeeting = await joinMeetingMutation.mutateAsync({
params: {
path: {
room_name: roomName,
meeting_id: meeting.id,
},
},
});
onMeetingSelect(joinedMeeting);
} catch (err) {
console.error('Failed to join upcoming meeting:', err);
}
};
const handleJoinDirect = (meeting: Meeting) => {
onMeetingSelect(meeting);
};
const [meetingIdToEnd, setMeetingIdToEnd] = React.useState<MeetingId | null>(null);
const handleEndMeeting = async (meetingId: MeetingId) => {
try {
await deactivateMeetingMutation.mutateAsync({
params: {
path: {
meeting_id: meetingId,
},
},
});
setMeetingIdToEnd(null);
} catch (err) {
console.error('Failed to end meeting:', err);
}
};
const handleLeaveMeeting = () => {
navigate('/rooms');
};
if (loading) {
return (
<div className="p-8 text-center flex flex-col justify-center items-center h-screen bg-surface">
<Loader2 className="w-10 h-10 text-primary animate-spin mb-4" />
<p className="font-serif italic text-muted">Retrieving meetings...</p>
</div>
);
}
if (error) {
return (
<div className="p-4 rounded-md bg-red-50 border-l-4 border-red-400 max-w-lg mx-auto mt-20">
<p className="font-semibold text-red-800">Error</p>
<p className="text-red-700">Failed to load meetings</p>
</div>
);
}
return (
<div className="flex flex-col min-h-screen relative bg-surface selection:bg-primary-fixed">
{isCreatingMeeting && (
<div className="fixed inset-0 bg-[#1b1c14]/45 z-50 flex items-center justify-center backdrop-blur-sm">
<div className="bg-white p-8 rounded-xl shadow-xl flex flex-col gap-4 items-center">
<Loader2 className="w-10 h-10 text-primary animate-spin" />
<p className="text-lg font-medium text-on-surface">Creating meeting...</p>
</div>
</div>
)}
<MeetingMinimalHeader
roomName={roomName}
displayName={room?.name}
showLeaveButton={true}
onLeave={handleLeaveMeeting}
showCreateButton={isOwner || isSharedRoom}
onCreateMeeting={onCreateUnscheduled}
isCreatingMeeting={isCreatingMeeting}
/>
<div className="flex flex-col w-full max-w-4xl mx-auto px-4 py-8 md:py-12 flex-1 gap-6 md:gap-8">
{/* Current Ongoing Meetings */}
{currentMeetings.length > 0 ? (
<div className="flex flex-col gap-6 mb-8">
{currentMeetings.map((meeting) => (
<Card key={meeting.id} className="w-full bg-surface-low p-6 md:p-8 rounded-xl">
<div className="flex flex-col md:flex-row justify-between items-stretch md:items-start gap-6">
<div className="flex flex-col items-start gap-4 flex-1">
<div className="flex items-center gap-2">
<Calendar className="w-6 h-6 text-primary" />
<h2 className="text-xl md:text-2xl font-bold font-serif text-on-surface">
{(meeting.calendar_metadata as any)?.title || 'Live Meeting'}
</h2>
</div>
{isOwner && (meeting.calendar_metadata as any)?.description && (
<p className="text-md md:text-lg text-on-surface-variant font-sans">
{(meeting.calendar_metadata as any).description}
</p>
)}
<div className="flex gap-4 md:gap-8 text-sm md:text-base text-muted flex-wrap font-sans">
<div className="flex items-center gap-1.5">
<Users className="w-4 h-4" />
<span className="font-medium">
{meeting.num_clients || 0} participant{meeting.num_clients !== 1 ? 's' : ''}
</span>
</div>
<div className="flex items-center gap-1.5">
<Clock className="w-4 h-4" />
<span>Started {formatStartedAgo(new Date(meeting.start_date))}</span>
</div>
</div>
{isOwner && (meeting.calendar_metadata as any)?.attendees && (
<div className="flex gap-2 flex-wrap mt-2">
{(meeting.calendar_metadata as any).attendees.slice(0, 4).map((att: any, idx: number) => (
<span key={idx} className="bg-primary/10 text-primary text-xs px-2.5 py-1 rounded-full font-semibold">
{att.name || att.email}
</span>
))}
{(meeting.calendar_metadata as any).attendees.length > 4 && (
<span className="bg-surface-high text-muted text-xs px-2.5 py-1 rounded-full font-semibold">
+ {(meeting.calendar_metadata as any).attendees.length - 4} more
</span>
)}
</div>
)}
</div>
<div className="flex flex-col gap-3 w-full md:w-auto mt-4 md:mt-0">
<Button
variant="primary"
className="py-3 px-6 text-base"
onClick={() => handleJoinDirect(meeting)}
>
<Users className="w-5 h-5 mr-2" />
Join Now
</Button>
{isOwner && (
<Button
variant="secondary"
className="py-2.5 border-red-200 text-red-600 hover:bg-red-50 hover:border-red-300"
onClick={() => setMeetingIdToEnd(meeting.id as string)}
disabled={deactivateMeetingMutation.isPending}
>
{deactivateMeetingMutation.isPending ? <Loader2 className="w-4 h-4 mr-2 animate-spin" /> : <XIcon className="w-4 h-4 mr-2" />}
End Meeting
</Button>
)}
</div>
</div>
</Card>
))}
</div>
) : upcomingMeetings.length > 0 ? (
/* Upcoming Meetings - Big Display */
<div className="flex flex-col gap-6 mb-8">
<h3 className="text-xl font-bold font-serif text-on-surface">
Upcoming Meeting{upcomingMeetings.length > 1 ? 's' : ''}
</h3>
{upcomingMeetings.map((meeting) => {
const now = new Date();
const startTime = new Date(meeting.start_date);
const minutesUntilStart = Math.floor((startTime.getTime() - now.getTime()) / (1000 * 60));
return (
<Card key={meeting.id} className="w-full bg-[#E5ECE5]/40 border-primary/20 p-6 md:p-8 rounded-xl">
<div className="flex flex-col md:flex-row justify-between items-stretch md:items-start gap-6">
<div className="flex flex-col items-start gap-4 flex-1">
<div className="flex items-center gap-2">
<Calendar className="w-6 h-6 text-primary" />
<h2 className="text-xl md:text-2xl font-bold font-serif text-primary">
{(meeting.calendar_metadata as any)?.title || 'Upcoming Meeting'}
</h2>
</div>
{isOwner && (meeting.calendar_metadata as any)?.description && (
<p className="text-md md:text-lg text-on-surface-variant">
{(meeting.calendar_metadata as any).description}
</p>
)}
<div className="flex gap-4 md:gap-6 text-sm md:text-base text-muted flex-wrap items-center">
<span className="bg-primary/10 text-primary font-semibold text-sm px-3 py-1 rounded-full">
Starts in {minutesUntilStart} minute{minutesUntilStart !== 1 ? 's' : ''}
</span>
<span className="text-muted font-sans">
{formatDateTime(new Date(meeting.start_date))}
</span>
</div>
{isOwner && (meeting.calendar_metadata as any)?.attendees && (
<div className="flex gap-2 flex-wrap">
{(meeting.calendar_metadata as any).attendees.slice(0, 4).map((att: any, idx: number) => (
<span key={idx} className="bg-white/50 border border-primary/10 text-primary text-xs px-2.5 py-1 rounded-full font-semibold">
{att.name || att.email}
</span>
))}
{(meeting.calendar_metadata as any).attendees.length > 4 && (
<span className="bg-surface-high text-muted text-xs px-2.5 py-1 rounded-full font-semibold">
+ {(meeting.calendar_metadata as any).attendees.length - 4} more
</span>
)}
</div>
)}
</div>
<div className="flex flex-col gap-3 w-full md:w-auto mt-4 md:mt-0">
<Button
variant="primary"
onClick={() => handleJoinUpcoming(meeting)}
className="bg-primary hover:bg-primary-hover shadow-sm"
>
<Clock className="w-4 h-4 mr-2" />
Join Early
</Button>
{isOwner && (
<Button
variant="secondary"
onClick={() => setMeetingIdToEnd(meeting.id as string)}
disabled={deactivateMeetingMutation.isPending}
className="border-surface-highest text-muted hover:text-red-600 hover:border-red-200"
>
Cancel Meeting
</Button>
)}
</div>
</div>
</Card>
);
})}
</div>
) : null}
{/* Small Upcoming Display if Ongoing EXISTS */}
{currentMeetings.length > 0 && upcomingMeetings.length > 0 && (
<div className="flex flex-col gap-4 mb-6 pt-4 border-t border-surface-high">
<h3 className="text-lg font-semibold font-serif text-on-surface-variant">Starting Soon</h3>
<div className="flex gap-4 flex-wrap flex-col sm:flex-row">
{upcomingMeetings.map((meeting) => {
const now = new Date();
const startTime = new Date(meeting.start_date);
const minutesUntilStart = Math.floor((startTime.getTime() - now.getTime()) / (1000 * 60));
return (
<div key={meeting.id} className="bg-surface border border-primary/20 rounded-lg p-5 min-w-[280px] hover:border-primary/40 transition-colors">
<div className="flex flex-col items-start gap-3">
<div className="flex items-center gap-2">
<Calendar className="w-4 h-4 text-primary" />
<span className="font-semibold text-md text-on-surface">
{(meeting.calendar_metadata as any)?.title || 'Upcoming'}
</span>
</div>
<span className="bg-primary/10 text-primary font-semibold text-xs px-2 py-1 rounded-md">
in {minutesUntilStart} minute{minutesUntilStart !== 1 ? 's' : ''}
</span>
<span className="text-xs text-muted font-sans">
Starts: {formatDateTime(new Date(meeting.start_date))}
</span>
<Button variant="primary" className="w-full mt-1 text-sm py-1.5" onClick={() => handleJoinUpcoming(meeting)}>
Join Early
</Button>
</div>
</div>
);
})}
</div>
</div>
)}
{/* No Meetings Fallback */}
{currentMeetings.length === 0 && upcomingMeetings.length === 0 && (
<div className="flex flex-col w-full flex-1 justify-center items-center text-center pb-20 mt-10">
<div className="w-20 h-20 rounded-full bg-surface-high flex items-center justify-center mb-6">
<Calendar className="w-10 h-10 text-on-surface-variant opacity-40" />
</div>
<h2 className="text-2xl font-semibold font-serif text-on-surface mb-2">No meetings active</h2>
<p className="text-muted max-w-sm font-sans text-[0.9375rem] leading-relaxed">
There are no ongoing or upcoming calendar meetings parsed for this room currently.
</p>
</div>
)}
</div>
<ConfirmModal
isOpen={meetingIdToEnd !== null}
onClose={() => setMeetingIdToEnd(null)}
onConfirm={() => meetingIdToEnd && handleEndMeeting(meetingIdToEnd)}
title="End Meeting"
description="Are you sure you want to end this calendar event's recording context? This will deactivate the session for all participants and cannot be undone."
confirmText="End Meeting"
isDestructive={true}
isLoading={deactivateMeetingMutation.isPending}
/>
</div>
);
}

View File

@@ -0,0 +1,127 @@
import React, { useCallback, useEffect, useRef } from 'react';
import { useNavigate } from 'react-router-dom';
import type { components } from '../../lib/reflector-api';
import { useAuth } from '../../lib/AuthProvider';
import { getWherebyUrl, useWhereby } from '../../lib/wherebyClient';
import { assertMeetingId } from '../../lib/types';
import { ConsentDialogButton as BaseConsentDialogButton, useConsentDialog } from '../../lib/consent';
type Meeting = components['schemas']['Meeting'];
type Room = components['schemas']['RoomDetails'];
type MeetingId = string;
interface WherebyRoomProps {
meeting: Meeting;
room: Room;
}
function WherebyConsentDialogButton({
onClick,
wherebyRef,
}: {
onClick: () => void;
wherebyRef: React.RefObject<HTMLElement | null>;
}) {
const previousFocusRef = useRef<HTMLElement | null>(null);
useEffect(() => {
const element = wherebyRef.current;
if (!element) return;
const handleWherebyReady = () => {
previousFocusRef.current = document.activeElement as HTMLElement;
};
element.addEventListener('ready', handleWherebyReady);
return () => {
element.removeEventListener('ready', handleWherebyReady);
if (previousFocusRef.current && document.activeElement === element) {
previousFocusRef.current.focus();
}
};
}, [wherebyRef]);
return (
<BaseConsentDialogButton onClick={onClick} />
);
}
export default function WherebyRoom({ meeting, room }: WherebyRoomProps) {
const wherebyLoaded = useWhereby();
const wherebyRef = useRef<HTMLElement>(null);
const navigate = useNavigate();
const auth = useAuth();
const status = auth.status;
const isAuthenticated = status === 'authenticated';
const wherebyRoomUrl = getWherebyUrl(meeting);
const meetingId = meeting.id;
const { showConsentButton, showConsentModal } = useConsentDialog({
meetingId: assertMeetingId(meetingId),
recordingType: meeting.recording_type,
skipConsent: room.skip_consent,
});
const showConsentModalRef = useRef(showConsentModal);
showConsentModalRef.current = showConsentModal;
const isLoading = status === 'loading';
const handleLeave = useCallback(() => {
navigate('/transcriptions');
}, [navigate]);
useEffect(() => {
if (isLoading || !isAuthenticated || !wherebyRoomUrl || !wherebyLoaded) return;
const currentRef = wherebyRef.current;
if (currentRef) {
currentRef.addEventListener('leave', handleLeave as EventListener);
}
return () => {
if (currentRef) {
currentRef.removeEventListener('leave', handleLeave as EventListener);
}
};
}, [handleLeave, wherebyRoomUrl, isLoading, isAuthenticated, wherebyLoaded]);
if (!wherebyRoomUrl || !wherebyLoaded) {
return null;
}
// Inject Web Component tag for whereby native support
return (
<>
<whereby-embed
ref={wherebyRef as any}
room={wherebyRoomUrl}
style={{ width: '100vw', height: '100vh', border: 'none' }}
/>
{showConsentButton && (
<WherebyConsentDialogButton
onClick={() => showConsentModalRef.current()}
wherebyRef={wherebyRef}
/>
)}
</>
);
}
// Add the web component declaration for React TypeScript integration
declare global {
namespace JSX {
interface IntrinsicElements {
'whereby-embed': React.DetailedHTMLProps<
React.HTMLAttributes<HTMLElement> & {
room: string;
style?: React.CSSProperties;
ref?: React.Ref<any>;
},
HTMLElement
>;
}
}
}

View File

@@ -0,0 +1,28 @@
import React from 'react';
import { Bot, Sparkles } from 'lucide-react';
export function ProcessingView() {
return (
<div className="flex-1 min-h-[500px] w-full max-w-2xl mx-auto px-6 flex flex-col items-center justify-center">
<div className="relative mb-12">
<div className="absolute inset-0 w-32 h-32 bg-primary/20 rounded-full blur-2xl animate-pulse" />
<div className="relative bg-surface p-6 rounded-3xl border border-primary/20 shadow-xl shadow-primary/5 flex items-center justify-center">
<Bot className="w-12 h-12 text-primary animate-bounce" />
</div>
</div>
<div className="text-center space-y-4 max-w-md">
<h2 className="font-serif font-bold text-3xl text-on-surface">Curating your archive...</h2>
<p className="text-on-surface-variant text-[0.9375rem] leading-relaxed">
The Reflector extraction engine is analyzing the audio. This typically takes a few moments depending on the recording length.
</p>
</div>
<div className="mt-12 flex space-x-2">
<div className="w-2 h-2 bg-primary rounded-full animate-bounce [animation-delay:-0.3s]" />
<div className="w-2 h-2 bg-primary rounded-full animate-bounce [animation-delay:-0.15s]" />
<div className="w-2 h-2 bg-primary rounded-full animate-bounce" />
</div>
</div>
);
}

View File

@@ -0,0 +1,255 @@
import React, { useEffect, useRef, useState } from 'react';
import WaveSurfer from 'wavesurfer.js';
import RecordPlugin from 'wavesurfer.js/dist/plugins/record.js';
import { useAudioDevice } from '../../hooks/useAudioDevice';
import { useWebSockets } from '../../hooks/transcripts/useWebSockets';
import { useWebRTC } from '../../hooks/transcripts/useWebRTC';
import { Button } from '../ui/Button';
import { ConfirmModal } from '../ui/ConfirmModal';
import { Mic, Play, Square, StopCircle } from 'lucide-react';
import { useNavigate } from 'react-router-dom';
interface RecordViewProps {
transcriptId: string;
}
export function RecordView({ transcriptId }: RecordViewProps) {
const navigate = useNavigate();
const waveformRef = useRef<HTMLDivElement>(null);
const [wavesurfer, setWavesurfer] = useState<WaveSurfer | null>(null);
const [recordPlugin, setRecordPlugin] = useState<any>(null);
const [isRecording, setIsRecording] = useState(false);
const [isPaused, setIsPaused] = useState(false);
const [currentStream, setCurrentStream] = useState<MediaStream | null>(null);
const [isConfirmEndOpen, setIsConfirmEndOpen] = useState(false);
const { permissionOk, requestPermission, audioDevices } = useAudioDevice();
const [selectedDevice, setSelectedDevice] = useState<string>('');
// Establish WebSockets for transcription data and exact API duration tracking
const wsData = useWebSockets(transcriptId);
const _rtcPeerConnection = useWebRTC(currentStream, isRecording ? transcriptId : null);
useEffect(() => {
if (audioDevices.length > 0) {
setSelectedDevice(audioDevices[0].value);
}
}, [audioDevices]);
// Handle server redirection upon stream termination & successful inference processing
useEffect(() => {
if (wsData.status?.value === "ended" || wsData.status?.value === "error") {
navigate(`/transcriptions/${transcriptId}`);
}
}, [wsData.status?.value, navigate, transcriptId]);
useEffect(() => {
if (!waveformRef.current) return;
const ws = WaveSurfer.create({
container: waveformRef.current,
waveColor: 'rgba(160, 154, 142, 0.5)',
progressColor: '#DC5A28',
height: 100,
barWidth: 3,
barGap: 2,
barRadius: 3,
normalize: true,
cursorWidth: 0,
});
const rec = ws.registerPlugin(RecordPlugin.create({
scrollingWaveform: true,
renderRecordedAudio: false,
}));
setWavesurfer(ws);
setRecordPlugin(rec);
return () => {
rec.destroy();
ws.destroy();
};
}, []);
const startRecording = async () => {
if (!permissionOk) {
requestPermission();
return;
}
if (recordPlugin) {
try {
// Native browser constraints specifically isolated for the elected input device
const freshStream = await navigator.mediaDevices.getUserMedia({
audio: selectedDevice ? { deviceId: { exact: selectedDevice } } : true
});
setCurrentStream(freshStream);
// Push duplicate explicit stream into Wavesurfer record plugin
await recordPlugin.startRecording(freshStream);
setIsRecording(true);
setIsPaused(false);
} catch (err) {
console.error("Failed to inject stream into local constraints", err);
}
}
};
const pauseRecording = () => {
if (recordPlugin && isRecording) {
if (isPaused) {
recordPlugin.resumeRecording();
setIsPaused(false);
} else {
recordPlugin.pauseRecording();
setIsPaused(true);
}
}
};
const stopRecording = () => {
setIsConfirmEndOpen(false);
if (recordPlugin && isRecording) {
recordPlugin.stopRecording();
setIsRecording(false);
setIsPaused(false);
if (currentStream) {
currentStream.getTracks().forEach(t => t.stop());
setCurrentStream(null);
}
}
};
const formatDuration = (seconds: number | null) => {
if (seconds == null) return "00:00:00";
const hrs = Math.floor(seconds / 3600);
const mins = Math.floor((seconds % 3600) / 60);
const secs = Math.floor(seconds % 60);
if (hrs > 0) {
return `${hrs.toString().padStart(2, '0')}:${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
}
return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
};
return (
<div className="flex-1 flex flex-col w-full max-w-4xl mx-auto px-6 py-12 h-screen max-h-screen">
<div className="flex items-center justify-between mb-8">
<div>
<h2 className="font-serif font-bold text-3xl text-on-surface">Live Recording</h2>
<p className="text-on-surface-variant mt-1 text-sm">
Capturing audio for transcript ID: {transcriptId.substring(0, 8)}...
</p>
</div>
<div className="w-64">
<select
className="w-full bg-surface-low border border-outline-variant/30 text-on-surface text-sm rounded-lg px-3 py-2 outline-none focus:border-primary transition-colors cursor-pointer"
value={selectedDevice}
onChange={(e) => setSelectedDevice(e.target.value)}
disabled={isRecording}
>
{audioDevices.map(device => (
<option key={device.value} value={device.value}>{device.label}</option>
))}
</select>
</div>
</div>
<div className="bg-surface-low p-8 rounded-2xl border border-outline-variant/20 shadow-sm flex flex-col mb-8 relative">
{/* Dynamic websocket ping duration display mapped off python API output */}
{isRecording && (
<div className="absolute top-4 left-6 flex items-center gap-3 bg-red-500/10 text-red-600 px-3 py-1.5 rounded-full z-20">
<span className="w-2 h-2 rounded-full bg-red-500 animate-pulse" />
<span className="font-mono text-sm font-bold tracking-wider">{formatDuration(wsData.duration)}</span>
</div>
)}
{/* Visualization Area */}
<div className="bg-surface-mid rounded-xl overflow-hidden p-6 mb-8 border border-outline-variant/30 relative h-[160px] flex items-center justify-center">
{!permissionOk && !isRecording && (
<div className="absolute inset-0 flex flex-col items-center justify-center bg-black/60 z-10">
<Mic className="w-8 h-8 text-white/50 mb-2" />
<Button variant="secondary" onClick={requestPermission} className="bg-white/10 border-white/20 text-white hover:bg-white/20">
Allow Microphone
</Button>
</div>
)}
<div ref={waveformRef} className="w-full relative z-0" />
</div>
{/* Play/Pause/Stop Global Controls */}
<div className="flex items-center justify-center gap-6">
{!isRecording ? (
<Button
onClick={startRecording}
disabled={!permissionOk}
className="bg-red-500 hover:bg-red-600 text-white rounded-full w-16 h-16 flex items-center justify-center shadow-[0_0_15px_rgba(239,68,68,0.4)] hover:shadow-[0_0_20px_rgba(239,68,68,0.6)] transition-all"
>
<div className="w-5 h-5 bg-white rounded-full" />
</Button>
) : (
<>
{/* Note: WebRTC streams are natively active until tracks are stripped.
Therefore 'pause' locally suspends WaveSurfer drawing logic,
but active WebRTC pipe persists until standard "stop" terminates it.
*/}
<Button
onClick={pauseRecording}
className="bg-surface-high hover:bg-outline-variant/20 text-on-surface rounded-full w-14 h-14 flex items-center justify-center transition-colors"
title={isPaused ? "Resume visualization" : "Pause visualization"}
>
{isPaused ? <Play className="w-5 h-5 fill-current" /> : <Square className="w-5 h-5 fill-current" />}
</Button>
<Button
onClick={() => setIsConfirmEndOpen(true)}
className="bg-red-500 hover:bg-red-600 text-white rounded-full w-16 h-16 flex items-center justify-center shadow-lg shadow-red-500/20"
title="Conclude Recording & Proceed"
>
<StopCircle className="w-8 h-8" />
</Button>
</>
)}
</div>
</div>
{/* Live Transcript Pane tracking wsData real-time ingestion */}
<div className="flex-1 bg-surface-low rounded-2xl border border-outline-variant/20 p-6 flex flex-col overflow-hidden">
<h3 className="text-xs font-bold uppercase tracking-widest text-muted mb-4 flex items-center gap-2">
{isRecording && <span className="w-1.5 h-1.5 rounded-full bg-primary animate-pulse" />}
Live Transcript Pipeline
</h3>
<div className="flex-1 overflow-y-auto w-full max-w-full">
{wsData.transcriptTextLive || wsData.accumulatedText ? (
<div className="text-on-surface font-sans text-lg leading-relaxed flex flex-col gap-2">
<span className="opacity-60">{wsData.accumulatedText.replace(wsData.transcriptTextLive, '').trim()}</span>
<span className="font-semibold">{wsData.transcriptTextLive}</span>
</div>
) : (
<div className="h-full flex items-center justify-center text-on-surface-variant font-mono text-sm opacity-50">
{isRecording ? "Transmitting audio and calculating text..." : "Connect WebRTC to preview transcript pipeline."}
</div>
)}
</div>
</div>
<ConfirmModal
isOpen={isConfirmEndOpen}
onClose={() => setIsConfirmEndOpen(false)}
onConfirm={stopRecording}
title="End Live Recording"
description="Are you sure you want to stop recording? This will finalize the transcript and begin generating summaries. You will not be able to resume this session."
confirmText="Yes, End Recording"
isDestructive={false}
/>
</div>
);
}

View File

@@ -0,0 +1,134 @@
import React, { useState, useRef } from 'react';
import { useTranscriptUploadAudio } from '../../lib/apiHooks';
import { useNavigate } from 'react-router-dom';
import { Button } from '../ui/Button';
import { UploadCloud, CheckCircle2 } from 'lucide-react';
interface UploadViewProps {
transcriptId: string;
}
export function UploadView({ transcriptId }: UploadViewProps) {
const fileInputRef = useRef<HTMLInputElement>(null);
const uploadMutation = useTranscriptUploadAudio();
const [progress, setProgress] = useState(0);
const [error, setError] = useState<string | null>(null);
const triggerFileUpload = () => {
fileInputRef.current?.click();
};
const handleFileUpload = async (event: React.ChangeEvent<HTMLInputElement>) => {
const file = event.target.files?.[0];
if (!file) return;
setError(null);
const maxChunkSize = 50 * 1024 * 1024; // 50 MB
const totalChunks = Math.ceil(file.size / maxChunkSize);
let chunkNumber = 0;
let start = 0;
let uploadedSize = 0;
const uploadNextChunk = async () => {
if (chunkNumber === totalChunks) {
setProgress(100);
return;
}
const chunkSize = Math.min(maxChunkSize, file.size - start);
const end = start + chunkSize;
const chunk = file.slice(start, end);
try {
const formData = new FormData();
formData.append("chunk", chunk, file.name);
await uploadMutation.mutateAsync({
params: {
path: {
transcript_id: transcriptId as any,
},
query: {
chunk_number: chunkNumber,
total_chunks: totalChunks,
},
},
body: formData as any,
});
uploadedSize += chunkSize;
const currentProgress = Math.floor((uploadedSize / file.size) * 100);
setProgress(currentProgress);
chunkNumber++;
start = end;
await uploadNextChunk();
} catch (err: any) {
console.error(err);
setError("Failed to upload file. Please try again.");
setProgress(0);
}
};
uploadNextChunk();
};
return (
<div className="flex-1 flex flex-col items-center justify-center min-h-[500px] w-full max-w-2xl mx-auto px-6">
<div className="w-full text-center space-y-4 mb-8">
<h2 className="font-serif font-bold text-3xl text-on-surface">Upload Meeting Audio</h2>
<p className="text-on-surface-variant text-[0.9375rem]">
Select an audio or video file to generate an editorial transcript.
</p>
</div>
<div className="w-full bg-surface-low border-2 border-dashed border-outline-variant/30 rounded-xl p-12 flex flex-col items-center justify-center text-center space-y-6 transition-colors hover:border-primary/50 hover:bg-surface-low/80">
<div className="w-16 h-16 bg-primary/10 rounded-full flex items-center justify-center text-primary mb-2">
{progress === 100 ? <CheckCircle2 className="w-8 h-8 text-green-600" /> : <UploadCloud className="w-8 h-8" />}
</div>
{progress > 0 && progress < 100 ? (
<div className="w-full max-w-xs space-y-3">
<div className="flex justify-between text-sm font-medium text-on-surface-variant">
<span>Uploading...</span>
<span>{progress}%</span>
</div>
<div className="h-2 w-full bg-surface-high rounded-full overflow-hidden">
<div
className="h-full bg-primary transition-all duration-300 rounded-full"
style={{ width: `${progress}%` }}
/>
</div>
</div>
) : progress === 100 ? (
<div className="text-green-700 font-medium text-sm">Upload complete! Processing will begin momentarily.</div>
) : (
<>
<div>
<p className="font-semibold text-on-surface mb-1">Click to select a file</p>
<p className="text-xs text-muted">Supported formats: .mp3, .m4a, .wav, .mp4, .mov, .webm</p>
</div>
<Button
onClick={triggerFileUpload}
variant="primary"
className="px-8"
disabled={progress > 0}
>
Select File
</Button>
{error && <p className="text-red-500 text-sm mt-2">{error}</p>}
</>
)}
<input
type="file"
ref={fileInputRef}
className="hidden"
onChange={handleFileUpload}
accept="audio/*,video/mp4,video/webm,video/quicktime"
/>
</div>
</div>
);
}

View File

@@ -0,0 +1,139 @@
import { useState, useEffect } from "react";
import { TopicWordsEditor } from "./TopicWordsEditor";
import { ParticipantSidebar } from "./ParticipantSidebar";
import { SelectedText } from "./types";
import { useTranscriptParticipants } from "../../../lib/apiHooks";
import { ChevronLeft, ChevronRight, XIcon } from "lucide-react";
type CorrectionEditorProps = {
transcriptId: string;
topics: any[]; // List of topic objects [{id, title, ...}]
onClose: () => void;
};
export function CorrectionEditor({ transcriptId, topics, onClose }: CorrectionEditorProps) {
const [currentTopicId, setCurrentTopicId] = useState<string | null>(null);
const stateSelectedText = useState<SelectedText>(undefined);
const { data: participantsData, isLoading: isParticipantsLoading, refetch: refetchParticipants } = useTranscriptParticipants(transcriptId as any);
// Initialize with first topic or restored session topic
useEffect(() => {
if (topics && topics.length > 0 && !currentTopicId) {
const sessionTopic = window.localStorage.getItem(`${transcriptId}_correct_topic`);
if (sessionTopic && topics.find((t: any) => t.id === sessionTopic)) {
setCurrentTopicId(sessionTopic);
} else {
setCurrentTopicId(topics[0].id);
}
}
}, [topics, currentTopicId, transcriptId]);
// Persist current topic to local storage tracking
useEffect(() => {
if (currentTopicId) {
window.localStorage.setItem(`${transcriptId}_correct_topic`, currentTopicId);
}
}, [currentTopicId, transcriptId]);
const currentIndex = topics.findIndex((t: any) => t.id === currentTopicId);
const currentTopic = topics[currentIndex];
const canGoPrev = currentIndex > 0;
const canGoNext = currentIndex < topics.length - 1;
const onPrev = () => { if (canGoPrev) setCurrentTopicId(topics[currentIndex - 1].id); };
const onNext = () => { if (canGoNext) setCurrentTopicId(topics[currentIndex + 1].id); };
useEffect(() => {
const keyHandler = (e: KeyboardEvent) => {
// Don't intercept if they are typing in an input!
if (document.activeElement?.tagName === 'INPUT') return;
if (e.key === "ArrowLeft") onPrev();
else if (e.key === "ArrowRight") onNext();
};
document.addEventListener("keyup", keyHandler);
return () => document.removeEventListener("keyup", keyHandler);
}, [currentIndex, topics]);
return (
<div className="flex flex-col h-[calc(100vh-140px)] w-full relative">
<div className="flex items-center justify-between p-4 bg-surface-low border-b border-outline-variant/10 rounded-t-xl shrink-0">
<div className="flex items-center gap-4">
<div className="flex items-center gap-1 bg-surface-high p-1 rounded-md border border-outline-variant/20">
<button
onClick={onPrev}
disabled={!canGoPrev}
className="p-1.5 rounded hover:bg-surface disabled:opacity-30 disabled:hover:bg-transparent transition-colors"
title="Previous Topic (Left Arrow)"
>
<ChevronLeft className="w-4 h-4" />
</button>
<button
onClick={onNext}
disabled={!canGoNext}
className="p-1.5 rounded hover:bg-surface disabled:opacity-30 disabled:hover:bg-transparent transition-colors"
title="Next Topic (Right Arrow)"
>
<ChevronRight className="w-4 h-4" />
</button>
</div>
<div className="flex items-center gap-3">
<span className="text-xs font-bold text-muted bg-surface-high px-2 py-1 rounded">
{currentIndex >= 0 ? currentIndex + 1 : 0} / {topics.length}
</span>
<h3 className="font-serif text-lg font-bold truncate max-w-[300px]">
{currentTopic?.title || "Loading..."}
</h3>
</div>
</div>
<button
onClick={onClose}
className="p-2 text-muted hover:text-red-500 hover:bg-red-50 transition-colors rounded-full"
title="Exit Correction Mode"
>
<XIcon className="w-5 h-5" />
</button>
</div>
<div className="flex flex-1 overflow-hidden">
{/* Editor Central Area */}
<div className="flex-1 p-6 overflow-y-auto bg-surface relative min-h-0">
<div className="max-w-3xl mx-auto h-full pr-4">
<h4 className="text-xs font-bold tracking-widest text-primary uppercase mb-6 flex items-center">
<span className="w-2 h-2 rounded-full bg-primary mr-2 animate-pulse"></span>
Correction Mode
</h4>
{currentTopicId ? (
<TopicWordsEditor
transcriptId={transcriptId}
topicId={currentTopicId}
stateSelectedText={stateSelectedText}
participants={participantsData || []}
/>
) : (
<div className="text-muted text-sm text-center py-20">No topic selected</div>
)}
</div>
</div>
{/* Participant Assignment Sidebar */}
<div className="w-80 shrink-0 border-l border-outline-variant/10 bg-surface-low p-4 overflow-y-auto hidden md:block min-h-0">
{currentTopicId && (
<ParticipantSidebar
transcriptId={transcriptId}
topicId={currentTopicId}
participants={participantsData || []}
isParticipantsLoading={isParticipantsLoading}
refetchParticipants={refetchParticipants}
stateSelectedText={stateSelectedText}
/>
)}
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,331 @@
import { ChangeEvent, useEffect, useRef, useState } from "react";
import {
useTranscriptSpeakerAssign,
useTranscriptSpeakerMerge,
useTranscriptParticipantUpdate,
useTranscriptParticipantCreate,
useTranscriptParticipantDelete,
} from "../../../lib/apiHooks";
import { selectedTextIsSpeaker, selectedTextIsTimeSlice } from "./types";
import { Button } from "../../ui/Button";
import { CornerDownRight, Loader2 } from "lucide-react";
type ParticipantSidebarProps = {
transcriptId: string;
topicId: string;
participants: any[];
isParticipantsLoading: boolean;
refetchParticipants: () => void;
stateSelectedText: any;
};
export function ParticipantSidebar({
transcriptId,
participants,
isParticipantsLoading,
refetchParticipants,
stateSelectedText,
}: ParticipantSidebarProps) {
const speakerAssignMutation = useTranscriptSpeakerAssign();
const speakerMergeMutation = useTranscriptSpeakerMerge();
const participantUpdateMutation = useTranscriptParticipantUpdate();
const participantCreateMutation = useTranscriptParticipantCreate();
const participantDeleteMutation = useTranscriptParticipantDelete();
const loading =
speakerAssignMutation.isPending ||
speakerMergeMutation.isPending ||
participantUpdateMutation.isPending ||
participantCreateMutation.isPending ||
participantDeleteMutation.isPending;
const [participantInput, setParticipantInput] = useState("");
const inputRef = useRef<HTMLInputElement>(null);
const [selectedText, setSelectedText] = stateSelectedText;
const [selectedParticipant, setSelectedParticipant] = useState<any>();
const [action, setAction] = useState<"Create" | "Create to rename" | "Create and assign" | "Rename" | null>(null);
const [oneMatch, setOneMatch] = useState<any>();
useEffect(() => {
if (participants && participants.length > 0) {
if (selectedTextIsSpeaker(selectedText)) {
inputRef.current?.focus();
const participant = participants.find((p) => p.speaker === selectedText);
if (participant) {
setParticipantInput(participant.name);
setOneMatch(undefined);
setSelectedParticipant(participant);
setAction("Rename");
} else {
setSelectedParticipant(undefined);
setParticipantInput("");
setOneMatch(undefined);
setAction("Create to rename");
}
}
if (selectedTextIsTimeSlice(selectedText)) {
inputRef.current?.focus();
setParticipantInput("");
setOneMatch(undefined);
setAction("Create and assign");
setSelectedParticipant(undefined);
}
if (typeof selectedText === "undefined") {
inputRef.current?.blur();
setSelectedParticipant(undefined);
setAction(null);
}
}
}, [selectedText, participants]);
const onSuccess = () => {
refetchParticipants();
setAction(null);
setSelectedText(undefined);
setSelectedParticipant(undefined);
setParticipantInput("");
setOneMatch(undefined);
inputRef?.current?.blur();
};
const assignTo = (participant: any) => async (e?: React.MouseEvent<HTMLButtonElement>) => {
e?.preventDefault();
e?.stopPropagation();
if (loading || isParticipantsLoading) return;
if (!selectedTextIsTimeSlice(selectedText)) return;
try {
await speakerAssignMutation.mutateAsync({
params: { path: { transcript_id: transcriptId as any } },
body: {
participant: participant.id,
timestamp_from: selectedText.start,
timestamp_to: selectedText.end,
},
});
onSuccess();
} catch (error) {
console.error(error);
}
};
const mergeSpeaker = (speakerFrom: number, participantTo: any) => async () => {
if (loading || isParticipantsLoading) return;
if (participantTo.speaker) {
try {
await speakerMergeMutation.mutateAsync({
params: { path: { transcript_id: transcriptId as any } },
body: {
speaker_from: speakerFrom,
speaker_to: participantTo.speaker,
},
});
onSuccess();
} catch (error) {
console.error(error);
}
} else {
try {
await participantUpdateMutation.mutateAsync({
params: {
path: {
transcript_id: transcriptId as any,
participant_id: participantTo.id,
},
},
body: { speaker: speakerFrom },
});
onSuccess();
} catch (error) {
console.error(error);
}
}
};
const doAction = async (e?: any) => {
e?.preventDefault();
e?.stopPropagation();
if (loading || isParticipantsLoading || !participants) return;
if (action === "Rename" && selectedTextIsSpeaker(selectedText)) {
const participant = participants.find((p) => p.speaker === selectedText);
if (participant && participant.name !== participantInput) {
try {
await participantUpdateMutation.mutateAsync({
params: {
path: {
transcript_id: transcriptId as any,
participant_id: participant.id,
},
},
body: { name: participantInput },
});
refetchParticipants();
setAction(null);
} catch (e) {
console.error(e);
}
}
} else if (action === "Create to rename" && selectedTextIsSpeaker(selectedText)) {
try {
await participantCreateMutation.mutateAsync({
params: { path: { transcript_id: transcriptId as any } },
body: { name: participantInput, speaker: selectedText },
});
refetchParticipants();
setParticipantInput("");
setOneMatch(undefined);
} catch (e) {
console.error(e);
}
} else if (action === "Create and assign" && selectedTextIsTimeSlice(selectedText)) {
try {
const participant = await participantCreateMutation.mutateAsync({
params: { path: { transcript_id: transcriptId as any } },
body: { name: participantInput },
});
assignTo(participant)();
} catch (error) {
console.error(error);
}
} else if (action === "Create") {
try {
await participantCreateMutation.mutateAsync({
params: { path: { transcript_id: transcriptId as any } },
body: { name: participantInput },
});
refetchParticipants();
setParticipantInput("");
inputRef.current?.focus();
} catch (e) {
console.error(e);
}
}
};
const deleteParticipant = (participantId: string) => async (e: any) => {
e.stopPropagation();
if (loading || isParticipantsLoading) return;
try {
await participantDeleteMutation.mutateAsync({
params: {
path: {
transcript_id: transcriptId as any,
participant_id: participantId,
},
},
});
refetchParticipants();
} catch (e) {
console.error(e);
}
};
const selectParticipant = (participant: any) => (e: any) => {
e.stopPropagation();
setSelectedParticipant(participant);
setSelectedText(participant.speaker);
setAction("Rename");
setParticipantInput(participant.name);
oneMatch && setOneMatch(undefined);
};
const clearSelection = () => {
setSelectedParticipant(undefined);
setSelectedText(undefined);
setAction(null);
setParticipantInput("");
oneMatch && setOneMatch(undefined);
};
const changeParticipantInput = (e: ChangeEvent<HTMLInputElement>) => {
const value = e.target.value.replaceAll(/,|\.| /g, "");
setParticipantInput(value);
if (value.length > 0 && participants && (action === "Create and assign" || action === "Create to rename")) {
const matches = participants.filter((p) => p.name.toLowerCase().startsWith(value.toLowerCase()));
if (matches.length === 1) {
setOneMatch(matches[0]);
} else {
setOneMatch(undefined);
}
}
if (value.length > 0 && !action) {
setAction("Create");
}
};
const anyLoading = loading || isParticipantsLoading;
return (
<div className="h-full flex flex-col w-full bg-surface-low border border-outline-variant/20 rounded-xl overflow-hidden shadow-sm" onClick={clearSelection}>
<div className="p-4 border-b border-outline-variant/10 bg-surface/50" onClick={(e) => e.stopPropagation()}>
<input
ref={inputRef}
type="text"
onChange={changeParticipantInput}
value={participantInput}
placeholder="Participant Name"
className="w-full bg-surface border border-outline-variant/20 rounded-lg px-3 py-2 text-sm text-on-surface placeholder:text-muted focus:outline-none focus:ring-2 focus:ring-primary/20 mb-3"
/>
<Button
onClick={doAction}
disabled={!action || anyLoading}
className="w-full py-2 bg-primary text-white flex items-center justify-center font-medium rounded-lg disabled:opacity-50 disabled:cursor-not-allowed hover:bg-primary/90 transition-colors"
>
{!anyLoading ? (
<>
<CornerDownRight className="w-3 h-3 mr-2 opacity-70" />
{action || "Create"}
</>
) : (
<Loader2 className="w-4 h-4 animate-spin" />
)}
</Button>
</div>
<div className="flex-1 overflow-y-auto p-2 space-y-1" onClick={(e) => e.stopPropagation()}>
{participants?.map((participant) => (
<div
key={participant.id}
onClick={selectParticipant(participant)}
className={`flex items-center justify-between p-2 rounded-md cursor-pointer transition-colors group ${
(participantInput.length > 0 && selectedText && participant.name.toLowerCase().startsWith(participantInput.toLowerCase())
? "bg-primary/10 border-primary/20"
: "border-transparent") +
(participant.id === selectedParticipant?.id ? " bg-primary/10 border border-primary text-primary" : " hover:bg-surface border")
}`}
>
<span className="text-sm font-medium">{participant.name}</span>
<div className="flex items-center gap-1 opacity-0 group-hover:opacity-100 transition-opacity">
{action === "Create to rename" && !selectedParticipant && !loading && (
<button
onClick={mergeSpeaker(selectedText, participant)}
className="text-[10px] uppercase font-bold tracking-wider px-2 py-1 bg-surface-high rounded hover:bg-primary hover:text-white transition-colors"
>
Merge
</button>
)}
{selectedTextIsTimeSlice(selectedText) && !loading && (
<button
onClick={assignTo(participant)}
className="text-[10px] uppercase font-bold tracking-wider px-2 py-1 bg-surface-high rounded hover:bg-primary hover:text-white transition-colors"
>
Assign
</button>
)}
<button
onClick={deleteParticipant(participant.id)}
className="text-[10px] uppercase font-bold tracking-wider px-2 py-1 bg-red-500/10 text-red-600 rounded hover:bg-red-500 hover:text-white transition-colors"
>
Delete
</button>
</div>
</div>
))}
</div>
</div>
);
}

View File

@@ -0,0 +1,170 @@
import { Dispatch, SetStateAction, useEffect } from "react";
import { TimeSlice, selectedTextIsTimeSlice } from "./types";
import { useTranscriptTopicsWithWordsPerSpeaker } from "../../../lib/apiHooks";
import { Loader2 } from "lucide-react";
type TopicWordsEditorProps = {
transcriptId: string;
topicId: string;
stateSelectedText: [
number | TimeSlice | undefined,
Dispatch<SetStateAction<number | TimeSlice | undefined>>,
];
participants: any[]; // List of resolved participants
};
export function TopicWordsEditor({
transcriptId,
topicId,
stateSelectedText,
participants,
}: TopicWordsEditorProps) {
const [selectedText, setSelectedText] = stateSelectedText;
const { data: topicWithWords, isLoading } = useTranscriptTopicsWithWordsPerSpeaker(
transcriptId as any,
topicId,
);
useEffect(() => {
if (isLoading && selectedTextIsTimeSlice(selectedText)) {
setSelectedText(undefined);
}
}, [isLoading]);
const getStartTimeFromFirstNode = (node: any, offset: number, reverse: boolean) => {
if (node.parentElement?.dataset["start"]) {
if (node.textContent?.length === offset) {
const nextWordStartTime = node.parentElement.nextElementSibling?.dataset["start"];
if (nextWordStartTime) return nextWordStartTime;
const nextParaFirstWordStartTime = node.parentElement.parentElement.nextElementSibling?.childNodes[1]?.dataset["start"];
if (nextParaFirstWordStartTime) return nextParaFirstWordStartTime;
return reverse ? 0 : 9999999999999;
} else {
return node.parentElement.dataset["start"];
}
} else {
return node.parentElement.nextElementSibling?.dataset["start"];
}
};
const onMouseUp = () => {
const selection = window.getSelection();
if (
selection &&
selection.anchorNode &&
selection.focusNode &&
selection.anchorNode === selection.focusNode &&
selection.anchorOffset === selection.focusOffset
) {
setSelectedText(undefined);
selection.empty();
return;
}
if (
selection &&
selection.anchorNode &&
selection.focusNode &&
(selection.anchorNode !== selection.focusNode ||
selection.anchorOffset !== selection.focusOffset)
) {
const anchorNode = selection.anchorNode;
const anchorIsWord = !!selection.anchorNode.parentElement?.dataset["start"];
const focusNode = selection.focusNode;
const focusIsWord = !!selection.focusNode.parentElement?.dataset["end"];
// If selected a speaker:
if (!anchorIsWord && !focusIsWord && anchorNode.parentElement === focusNode.parentElement) {
const speaker = focusNode.parentElement?.dataset["speaker"];
setSelectedText(speaker ? parseInt(speaker, 10) : undefined);
return;
}
const anchorStart = getStartTimeFromFirstNode(anchorNode, selection.anchorOffset, false);
const focusEnd =
selection.focusOffset !== 0
? selection.focusNode.parentElement?.dataset["end"] ||
(selection.focusNode.parentElement?.parentElement?.previousElementSibling?.lastElementChild as any)?.dataset["end"]
: (selection.focusNode.parentElement?.previousElementSibling as any)?.dataset["end"] || 0;
const reverse = parseFloat(anchorStart) >= parseFloat(focusEnd);
if (!reverse) {
if (anchorStart && focusEnd) {
setSelectedText({
start: parseFloat(anchorStart),
end: parseFloat(focusEnd),
});
}
} else {
const anchorEnd =
anchorNode.parentElement?.dataset["end"] ||
(selection.anchorNode.parentElement?.parentElement?.previousElementSibling?.lastElementChild as any)?.dataset["end"];
const focusStart = getStartTimeFromFirstNode(focusNode, selection.focusOffset, true);
setSelectedText({
start: parseFloat(focusStart),
end: parseFloat(anchorEnd),
});
}
}
selection && selection.empty();
};
const getSpeakerName = (speakerNumber: number) => {
if (!participants) return `Speaker ${speakerNumber}`;
return (
participants.find((p: any) => p.speaker === speakerNumber)?.name ||
`Speaker ${speakerNumber}`
);
};
if (isLoading) {
return (
<div className="flex justify-center items-center py-20">
<Loader2 className="w-8 h-8 animate-spin text-primary/50" />
</div>
);
}
if (topicWithWords && participants) {
return (
<div
onMouseUp={onMouseUp}
className="max-h-full w-full overflow-y-auto pr-4 text-[0.9375rem] leading-relaxed selection:bg-primary/20"
>
{topicWithWords.words_per_speaker?.map((speakerWithWords: any, index: number) => (
<p key={index} className="mb-4 last:mb-0">
<span
data-speaker={speakerWithWords.speaker}
className={`font-semibold mr-2 cursor-pointer transition-colors ${
selectedText === speakerWithWords.speaker ? "bg-amber-200 text-amber-900 rounded px-1" : "text-on-surface hover:text-primary"
}`}
>
{getSpeakerName(speakerWithWords.speaker)}:
</span>
{speakerWithWords.words.map((word: any, wIndex: number) => {
const isActive =
selectedTextIsTimeSlice(selectedText) &&
selectedText.start <= word.start &&
selectedText.end >= word.end;
return (
<span
data-start={word.start}
data-end={word.end}
key={wIndex}
className={`transition-colors cursor-text ${
isActive ? "bg-amber-200 text-amber-900 rounded px-0.5" : "text-on-surface-variant hover:text-on-surface"
}`}
>
{word.text}{" "}
</span>
);
})}
</p>
))}
</div>
);
}
return null;
}

View File

@@ -0,0 +1,21 @@
export type TimeSlice = {
start: number;
end: number;
};
export type SelectedText = number | TimeSlice | undefined;
export function selectedTextIsSpeaker(
selectedText: SelectedText,
): selectedText is number {
return typeof selectedText === "number";
}
export function selectedTextIsTimeSlice(
selectedText: SelectedText,
): selectedText is TimeSlice {
return (
typeof (selectedText as any)?.start === "number" &&
typeof (selectedText as any)?.end === "number"
);
}

View File

@@ -0,0 +1,29 @@
import React from 'react';
interface ButtonProps extends React.ButtonHTMLAttributes<HTMLButtonElement> {
variant?: 'primary' | 'secondary' | 'tertiary';
}
export const Button = React.forwardRef<HTMLButtonElement, ButtonProps>(
({ variant = 'primary', className = '', children, ...props }, ref) => {
const baseStyles = 'rounded-sm px-5 py-2.5 font-sans font-semibold text-sm transition-all duration-200';
const variants = {
primary: 'bg-gradient-primary text-on-primary border-none hover:brightness-110 active:brightness-95',
secondary: 'bg-transparent border-[1.5px] border-primary text-primary hover:bg-primary/5',
tertiary: 'bg-transparent border-none text-primary hover:bg-surface-mid',
};
return (
<button
ref={ref}
className={`${baseStyles} ${variants[variant]} ${className}`}
{...props}
>
{children}
</button>
);
}
);
Button.displayName = 'Button';

View File

@@ -0,0 +1,19 @@
import React from 'react';
interface CardProps extends React.HTMLAttributes<HTMLDivElement> {}
export const Card = React.forwardRef<HTMLDivElement, CardProps>(
({ className = '', children, ...props }, ref) => {
return (
<div
ref={ref}
className={`bg-surface-highest rounded-md p-6 shadow-card ${className}`}
{...props}
>
{children}
</div>
);
}
);
Card.displayName = 'Card';

View File

@@ -0,0 +1,25 @@
import React from 'react';
interface CheckboxProps extends React.InputHTMLAttributes<HTMLInputElement> {
label?: string;
}
export const Checkbox = React.forwardRef<HTMLInputElement, CheckboxProps>(
({ className = '', label, ...props }, ref) => {
return (
<label className="flex items-center gap-2 cursor-pointer">
<input
type="checkbox"
ref={ref}
className={`appearance-none w-4 h-4 rounded-[4px] border-[1.5px] border-outline-variant/60 checked:bg-primary checked:border-primary transition-colors relative
checked:after:content-[''] checked:after:absolute checked:after:left-[4px] checked:after:top-[1px] checked:after:w-[5px] checked:after:h-[9px] checked:after:border-r-2 checked:after:border-b-2 checked:after:border-white checked:after:rotate-45
${className}`}
{...props}
/>
{label && <span className="font-sans text-sm text-on-surface">{label}</span>}
</label>
);
}
);
Checkbox.displayName = 'Checkbox';

View File

@@ -0,0 +1,96 @@
import React, { useEffect } from 'react';
import { Button } from './Button';
import { AlertTriangle, X, Trash2 } from 'lucide-react';
interface ConfirmModalProps {
isOpen: boolean;
title: string;
description: string;
confirmText?: string;
cancelText?: string;
onConfirm: () => void;
onClose: () => void;
isDestructive?: boolean;
isLoading?: boolean;
}
export function ConfirmModal({
isOpen,
title,
description,
confirmText = 'Confirm',
cancelText = 'Cancel',
onConfirm,
onClose,
isDestructive = true,
isLoading = false,
}: ConfirmModalProps) {
useEffect(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (e.key === 'Escape' && !isLoading) onClose();
};
if (isOpen) window.addEventListener('keydown', handleKeyDown);
return () => window.removeEventListener('keydown', handleKeyDown);
}, [isOpen, onClose, isLoading]);
if (!isOpen) return null;
return (
<div className="fixed inset-0 z-[100] flex items-center justify-center p-4">
{/* Backdrop */}
<div
className="absolute inset-0 bg-[#1b1c14]/40 backdrop-blur-sm transition-opacity animate-in fade-in duration-200"
onClick={() => !isLoading && onClose()}
/>
{/* Modal Box */}
<div className="relative w-full max-w-md bg-surface shadow-2xl rounded-2xl overflow-hidden animate-in zoom-in-95 fade-in duration-200 border border-outline-variant/20">
<button
onClick={onClose}
className="absolute right-4 top-4 p-2 text-muted hover:text-on-surface hover:bg-surface-high rounded-full transition-colors"
disabled={isLoading}
>
<X className="w-5 h-5" />
</button>
<div className="p-6 pt-8">
<div className="flex gap-4 items-start">
<div className={`p-3 rounded-full shrink-0 ${isDestructive ? 'bg-red-50 text-red-500' : 'bg-primary/10 text-primary'}`}>
{isDestructive ? <Trash2 className="w-6 h-6" /> : <AlertTriangle className="w-6 h-6" />}
</div>
<div className="space-y-2 mt-1 pr-6">
<h2 className="text-xl font-serif font-bold text-on-surface">{title}</h2>
<p className="text-[0.9375rem] font-sans text-on-surface-variant leading-relaxed">
{description}
</p>
</div>
</div>
</div>
<div className="p-5 bg-surface-low border-t border-outline-variant/10 flex flex-col-reverse sm:flex-row items-center justify-end gap-3 rounded-b-2xl">
<Button
variant="secondary"
className="w-full sm:w-auto px-5 py-2 hover:bg-surface-highest transition-colors"
onClick={onClose}
disabled={isLoading}
>
{cancelText}
</Button>
<Button
variant={isDestructive ? "secondary" : "primary"}
className={
isDestructive
? "w-full sm:w-auto px-5 py-2 !bg-red-50 !text-red-600 border border-red-200 hover:!bg-red-500 hover:!text-white hover:border-red-600 transition-colors shadow-sm"
: "w-full sm:w-auto px-5 py-2"
}
onClick={onConfirm}
disabled={isLoading}
>
{isLoading ? 'Processing...' : confirmText}
</Button>
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,15 @@
import React from 'react';
interface FieldErrorProps {
message?: string;
}
export const FieldError: React.FC<FieldErrorProps> = ({ message }) => {
if (!message) return null;
return (
<span className="font-sans text-[0.8125rem] text-primary mt-1 block">
{message}
</span>
);
};

View File

@@ -0,0 +1,17 @@
import React from 'react';
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {}
export const Input = React.forwardRef<HTMLInputElement, InputProps>(
({ className = '', ...props }, ref) => {
return (
<input
ref={ref}
className={`bg-surface-mid border border-outline-variant/40 rounded-sm px-3.5 py-2.5 font-sans text-on-surface placeholder:text-muted focus:outline-none focus:border-primary focus:ring-4 focus:ring-primary/15 transition-all ${className}`}
{...props}
/>
);
}
);
Input.displayName = 'Input';

View File

@@ -0,0 +1,19 @@
import React from 'react';
interface SelectProps extends React.SelectHTMLAttributes<HTMLSelectElement> {}
export const Select = React.forwardRef<HTMLSelectElement, SelectProps>(
({ className = '', children, ...props }, ref) => {
return (
<select
ref={ref}
className={`bg-surface-mid border border-outline-variant/40 rounded-sm px-3.5 py-2.5 font-sans text-on-surface focus:outline-none focus:border-primary focus:ring-4 focus:ring-primary/15 transition-all appearance-none ${className}`}
{...props}
>
{children}
</select>
);
}
);
Select.displayName = 'Select';

View File

@@ -0,0 +1,88 @@
import { useEffect, useState, useRef } from "react";
import { useError } from "../../lib/errorContext";
import type { components } from "../../lib/reflector-api";
import { shouldShowError } from "../../lib/errorUtils";
import { useRoomsCreateMeeting } from "../../lib/apiHooks";
import { ApiError } from "../../api/_error";
type Meeting = components["schemas"]["Meeting"];
type ErrorMeeting = {
error: ApiError;
loading: false;
response: null;
reload: () => void;
};
type LoadingMeeting = {
error: null;
response: null;
loading: true;
reload: () => void;
};
type SuccessMeeting = {
error: null;
response: Meeting;
loading: false;
reload: () => void;
};
const useRoomDefaultMeeting = (
roomName: string | null,
): ErrorMeeting | LoadingMeeting | SuccessMeeting => {
const [response, setResponse] = useState<Meeting | null>(null);
const [reload, setReload] = useState(0);
const { setError } = useError();
const createMeetingMutation = useRoomsCreateMeeting();
const reloadHandler = () => setReload((prev) => prev + 1);
// this is to undupe dev mode room creation
const creatingRef = useRef(false);
useEffect(() => {
if (!roomName) return;
if (creatingRef.current) return;
const createMeeting = async () => {
creatingRef.current = true;
try {
const result = await createMeetingMutation.mutateAsync({
params: {
path: {
room_name: roomName,
},
},
body: {
allow_duplicated: false,
},
});
setResponse(result);
} catch (error: any) {
const shouldShowHuman = shouldShowError(error);
if (shouldShowHuman && error.status !== 404) {
setError(
error,
"There was an error loading the meeting. Please try again by refreshing the page.",
);
} else {
setError(error);
}
} finally {
creatingRef.current = false;
}
};
createMeeting().catch(console.error);
}, [roomName, reload, createMeetingMutation, setError]);
const loading = createMeetingMutation.isPending && !response;
const error = createMeetingMutation.error;
return { response, loading, error, reload: reloadHandler } as
| ErrorMeeting
| LoadingMeeting
| SuccessMeeting;
};
export default useRoomDefaultMeeting;

View File

@@ -0,0 +1,85 @@
import { useEffect, useState } from "react";
import { useTranscriptWebRTC } from "../../lib/apiHooks";
export const useWebRTC = (stream: MediaStream | null, transcriptId: string | null): RTCPeerConnection | null => {
const [peer, setPeer] = useState<RTCPeerConnection | null>(null);
const { mutateAsync: mutateWebRtcTranscriptAsync } = useTranscriptWebRTC();
useEffect(() => {
if (!stream || !transcriptId) {
return;
}
let pc: RTCPeerConnection;
const setupConnection = async () => {
pc = new RTCPeerConnection({
iceServers: [{ urls: "stun:stun.l.google.com:19302" }],
});
// Add local audio tracks to the peer connection
stream.getTracks().forEach(track => {
pc.addTrack(track, stream);
});
try {
// Create an offer. Since HTTP signaling doesn't stream ICE candidates,
// we can wait for ICE gathering to complete before sending SDP.
const offer = await pc.createOffer();
await pc.setLocalDescription(offer);
// Wait for ICE gathering to complete so SDP has all local candidates
await new Promise<void>((resolve) => {
if (pc.iceGatheringState === "complete") {
resolve();
} else {
const checkState = () => {
if (pc.iceGatheringState === "complete") {
pc.removeEventListener("icegatheringstatechange", checkState);
resolve();
}
};
pc.addEventListener("icegatheringstatechange", checkState);
// Fallback timeout just in case ICE STUN gathering hangs
setTimeout(() => {
pc.removeEventListener("icegatheringstatechange", checkState);
resolve();
}, 2000);
}
});
const rtcOffer = {
sdp: pc.localDescription!.sdp,
type: pc.localDescription!.type,
};
const answer = await mutateWebRtcTranscriptAsync({
params: {
path: {
transcript_id: transcriptId as any,
},
},
body: rtcOffer as any,
});
await pc.setRemoteDescription(new RTCSessionDescription(answer as RTCSessionDescriptionInit));
setPeer(pc);
} catch (err) {
console.error("Failed to establish WebRTC connection", err);
}
};
setupConnection();
return () => {
if (pc) {
pc.close();
}
setPeer(null);
};
}, [stream, transcriptId, mutateWebRtcTranscriptAsync]);
return peer;
};

View File

@@ -0,0 +1,173 @@
import { useEffect, useState } from "react";
import { useQueryClient } from "@tanstack/react-query";
import { WEBSOCKET_URL } from "../../lib/apiClient";
import { useAuth } from "../../lib/AuthProvider";
import { parseNonEmptyString } from "../../lib/utils";
import { getReconnectDelayMs, MAX_RETRIES } from "./webSocketReconnect";
import { Topic, FinalSummary, Status } from "./webSocketTypes";
import type { components, operations } from "../../lib/reflector-api";
type AudioWaveform = components["schemas"]["AudioWaveform"];
type TranscriptWsEvent = operations["v1_transcript_get_websocket_events"]["responses"][200]["content"]["application/json"];
export type UseWebSockets = {
transcriptTextLive: string;
accumulatedText: string;
title: string;
topics: Topic[];
finalSummary: FinalSummary;
status: Status | null;
waveform: AudioWaveform | null;
duration: number | null;
};
export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
const auth = useAuth();
const queryClient = useQueryClient();
const [transcriptTextLive, setTranscriptTextLive] = useState<string>("");
const [accumulatedText, setAccumulatedText] = useState<string>("");
const [title, setTitle] = useState<string>("");
const [topics, setTopics] = useState<Topic[]>([]);
const [waveform, setWaveForm] = useState<AudioWaveform | null>(null);
const [duration, setDuration] = useState<number | null>(null);
const [finalSummary, setFinalSummary] = useState<FinalSummary>({ summary: "" });
const [status, setStatus] = useState<Status | null>(null);
const [textQueue, setTextQueue] = useState<string[]>([]);
const [isProcessing, setIsProcessing] = useState(false);
// Smooth out rapid text pushes
useEffect(() => {
if (isProcessing || textQueue.length === 0) return;
setIsProcessing(true);
const text = textQueue[0];
setTranscriptTextLive(text);
const WPM_READING = 200 + textQueue.length * 10;
const wordCount = text.split(/\s+/).length;
const delay = (wordCount / WPM_READING) * 60 * 1000;
setTimeout(() => {
setIsProcessing(false);
setTextQueue((prevQueue) => prevQueue.slice(1));
}, delay);
}, [textQueue, isProcessing]);
useEffect(() => {
if (!transcriptId) return;
const tsId = parseNonEmptyString(transcriptId);
const url = `${WEBSOCKET_URL}/v1/transcripts/${transcriptId}/events`;
let ws: WebSocket | null = null;
let retryCount = 0;
let retryTimeout: ReturnType<typeof setTimeout> | null = null;
let intentionalClose = false;
const connect = () => {
const subprotocols =
auth.status === "authenticated" && (auth as any).accessToken
? ["bearer", (auth as any).accessToken]
: undefined;
ws = new WebSocket(url, subprotocols);
ws.onopen = () => {
console.debug("Transcript WebSocket connected");
retryCount = 0;
};
ws.onmessage = (event) => {
try {
const message: TranscriptWsEvent = JSON.parse(event.data);
switch (message.event) {
case "TRANSCRIPT": {
const newText = (message.data.text ?? "").trim();
if (!newText) break;
setTextQueue((prev) => [...prev, newText]);
setAccumulatedText((prev) => prev + " " + newText);
break;
}
case "TOPIC":
setTopics((prevTopics) => {
const topic = message.data;
const index = prevTopics.findIndex((prev) => prev.id === topic.id);
if (index >= 0) {
prevTopics[index] = topic;
return [...prevTopics];
}
return [...prevTopics, topic];
});
break;
case "FINAL_LONG_SUMMARY":
setFinalSummary({ summary: message.data.long_summary });
break;
case "FINAL_TITLE":
setTitle(message.data.title);
break;
case "WAVEFORM":
setWaveForm({ data: message.data.waveform });
break;
case "DURATION":
setDuration(message.data.duration);
break;
case "STATUS":
setStatus(message.data as any);
if (message.data.value === "ended" || message.data.value === "error") {
intentionalClose = true;
ws?.close();
// We should invalidate standard hooks here theoretically...
// queryClient.invalidateQueries({ queryKey: ["transcript", tsId] });
}
break;
case "ACTION_ITEMS":
case "FINAL_SHORT_SUMMARY":
break;
default:
console.warn(`Unknown WebSocket event: ${(message as any).event}`);
}
} catch (error) {
console.error("Payload parse error", error);
}
};
ws.onerror = (error) => {
console.error("Transcript WebSocket error:", error);
};
ws.onclose = (event) => {
if (intentionalClose) return;
const normalCodes = [1000, 1001, 1005];
if (normalCodes.includes(event.code)) return;
if (retryCount < MAX_RETRIES) {
const delay = getReconnectDelayMs(retryCount);
retryCount++;
retryTimeout = setTimeout(connect, delay);
}
};
};
connect();
return () => {
intentionalClose = true;
if (retryTimeout) clearTimeout(retryTimeout);
ws?.close();
};
}, [transcriptId, auth.status, (auth as any).accessToken, queryClient]);
return {
transcriptTextLive,
accumulatedText,
topics,
finalSummary,
title,
status,
waveform,
duration,
};
};

View File

@@ -0,0 +1,5 @@
export const MAX_RETRIES = 10;
export function getReconnectDelayMs(retryIndex: number): number {
return Math.min(1000 * Math.pow(2, retryIndex), 30000);
}

View File

@@ -0,0 +1,24 @@
import type { components } from "../../lib/reflector-api";
type GetTranscriptTopic = components["schemas"]["GetTranscriptTopic"];
export type Topic = GetTranscriptTopic;
export type TranscriptStatus = "idle" | "recording" | "uploaded" | "processing" | "ended" | "error";
export type Transcript = {
text: string;
};
export type FinalSummary = {
summary: string;
};
export type Status = {
value: TranscriptStatus;
};
export type TranslatedTopic = {
text: string;
translation: string;
};

View File

@@ -0,0 +1,134 @@
import { useEffect, useState } from "react";
const MIC_QUERY = { name: "microphone" as PermissionName };
export type AudioDeviceOption = {
value: string;
label: string;
};
export const useAudioDevice = () => {
const [permissionOk, setPermissionOk] = useState<boolean>(false);
const [permissionDenied, setPermissionDenied] = useState<boolean>(false);
const [audioDevices, setAudioDevices] = useState<AudioDeviceOption[]>([]);
const [loading, setLoading] = useState(true);
useEffect(() => {
// skips on SSR
checkPermission();
}, []);
useEffect(() => {
if (permissionOk) {
updateDevices();
}
}, [permissionOk]);
const checkPermission = (): void => {
if (navigator.userAgent.includes("Firefox")) {
navigator.mediaDevices
.getUserMedia({ audio: true, video: false })
.then((stream) => {
setPermissionOk(true);
setPermissionDenied(false);
stream.getTracks().forEach((track) => track.stop());
})
.catch((e) => {
setPermissionOk(false);
setPermissionDenied(false);
})
.finally(() => setLoading(false));
return;
}
navigator.permissions
.query(MIC_QUERY)
.then((permissionStatus) => {
setPermissionOk(permissionStatus.state === "granted");
setPermissionDenied(permissionStatus.state === "denied");
permissionStatus.onchange = () => {
setPermissionOk(permissionStatus.state === "granted");
setPermissionDenied(permissionStatus.state === "denied");
};
})
.catch(() => {
setPermissionOk(false);
setPermissionDenied(false);
})
.finally(() => {
setLoading(false);
});
};
const requestPermission = () => {
navigator.mediaDevices
.getUserMedia({
audio: true,
})
.then((stream) => {
if (!navigator.userAgent.includes("Firefox"))
stream.getTracks().forEach((track) => track.stop());
setPermissionOk(true);
setPermissionDenied(false);
})
.catch(() => {
setPermissionDenied(true);
setPermissionOk(false);
})
.finally(() => {
setLoading(false);
});
};
const getAudioStream = async (
deviceId: string,
): Promise<MediaStream | null> => {
try {
const urlParams = new URLSearchParams(window.location.search);
const noiseSuppression = urlParams.get("noiseSuppression") === "true";
const echoCancellation = urlParams.get("echoCancellation") === "true";
console.debug(
"noiseSuppression",
noiseSuppression,
"echoCancellation",
echoCancellation,
);
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
deviceId,
noiseSuppression,
echoCancellation,
},
});
return stream;
} catch (e) {
setPermissionOk(false);
setAudioDevices([]);
return null;
}
};
const updateDevices = async (): Promise<void> => {
const devices = await navigator.mediaDevices.enumerateDevices();
const _audioDevices = devices
.filter(
(d) => d.kind === "audioinput" && d.deviceId != "" && d.label != "",
)
.map((d) => ({ value: d.deviceId, label: d.label }));
setPermissionOk(_audioDevices.length > 0);
setAudioDevices(_audioDevices);
};
return {
loading,
permissionOk,
permissionDenied,
audioDevices,
getAudioStream,
requestPermission,
};
};

View File

@@ -0,0 +1,271 @@
/**
* AuthProvider — Vite-compatible replacement for next-auth.
*
* Communicates with the Express auth proxy server for:
* - Session checking (GET /auth/session)
* - Login (POST /auth/login for credentials, GET /auth/login for SSO)
* - Token refresh (POST /auth/refresh)
* - Logout (POST /auth/logout)
*/
import React, {
createContext,
useContext,
useEffect,
useRef,
useState,
useCallback,
} from "react";
import { configureApiAuth } from "./apiClient";
// ─── Types ───────────────────────────────────────────────────────────────────
interface AuthUser {
id: string;
name?: string | null;
email?: string | null;
}
type AuthContextType = (
| { status: "loading" }
| { status: "unauthenticated"; error?: string }
| {
status: "authenticated";
accessToken: string;
accessTokenExpires: number;
user: AuthUser;
}
) & {
signIn: (
method: "credentials" | "sso",
credentials?: { email: string; password: string },
) => Promise<{ ok: boolean; error?: string }>;
signOut: () => Promise<void>;
update: () => Promise<void>;
};
const AuthContext = createContext<AuthContextType | undefined>(undefined);
// ─── Constants ───────────────────────────────────────────────────────────────
const AUTH_PROXY_BASE =
import.meta.env.VITE_AUTH_PROXY_URL || "/auth";
// 4 minutes — must refresh before token expires
const REFRESH_BEFORE_MS = 4 * 60 * 1000;
// Poll every 5 seconds for refresh check
const REFRESH_INTERVAL_MS = 5000;
// ─── Provider ────────────────────────────────────────────────────────────────
export function AuthProvider({ children }: { children: React.ReactNode }) {
const [state, setState] = useState<
| { status: "loading" }
| { status: "unauthenticated"; error?: string }
| {
status: "authenticated";
accessToken: string;
accessTokenExpires: number;
user: AuthUser;
}
>({ status: "loading" });
const refreshTimerRef = useRef<number | null>(null);
// ── Check session on mount ────────────────────────────────────────────────
const checkSession = useCallback(async () => {
try {
const res = await fetch(`${AUTH_PROXY_BASE}/session`, {
credentials: "include",
});
if (!res.ok) {
setState({ status: "unauthenticated" });
configureApiAuth(null);
return;
}
const data = await res.json();
if (data.status === "authenticated") {
setState({
status: "authenticated",
accessToken: data.accessToken,
accessTokenExpires: data.accessTokenExpires,
user: data.user,
});
configureApiAuth(data.accessToken);
} else if (data.status === "refresh_needed") {
// Try to refresh
await refreshToken();
} else {
setState({ status: "unauthenticated" });
configureApiAuth(null);
}
} catch (error) {
console.error("Session check failed:", error);
setState({ status: "unauthenticated" });
configureApiAuth(null);
}
}, []);
// ── Token refresh ─────────────────────────────────────────────────────────
const refreshToken = useCallback(async () => {
try {
const res = await fetch(`${AUTH_PROXY_BASE}/refresh`, {
method: "POST",
credentials: "include",
});
if (!res.ok) {
setState({ status: "unauthenticated" });
configureApiAuth(null);
return;
}
const data = await res.json();
setState({
status: "authenticated",
accessToken: data.accessToken,
accessTokenExpires: data.accessTokenExpires,
user: data.user,
});
configureApiAuth(data.accessToken);
} catch (error) {
console.error("Token refresh failed:", error);
setState({ status: "unauthenticated" });
configureApiAuth(null);
}
}, []);
// ── Auto-refresh polling ─────────────────────────────────────────────────
useEffect(() => {
checkSession();
}, [checkSession]);
useEffect(() => {
if (state.status !== "authenticated") {
if (refreshTimerRef.current) {
clearInterval(refreshTimerRef.current);
refreshTimerRef.current = null;
}
return;
}
const interval = window.setInterval(() => {
if (state.status !== "authenticated") return;
const timeLeft = state.accessTokenExpires - Date.now();
if (timeLeft < REFRESH_BEFORE_MS) {
refreshToken();
}
}, REFRESH_INTERVAL_MS);
refreshTimerRef.current = interval;
return () => clearInterval(interval);
}, [state.status, state.status === "authenticated" ? state.accessTokenExpires : null, refreshToken]);
// ── Sign in ───────────────────────────────────────────────────────────────
const signIn = useCallback(
async (
method: "credentials" | "sso",
credentials?: { email: string; password: string },
): Promise<{ ok: boolean; error?: string }> => {
if (method === "sso") {
// Redirect to Authentik SSO via the auth proxy
window.location.href = `${AUTH_PROXY_BASE}/login`;
return { ok: true };
}
// Credentials login
if (!credentials) {
return { ok: false, error: "Email and password are required" };
}
try {
const res = await fetch(`${AUTH_PROXY_BASE}/login`, {
method: "POST",
headers: { "Content-Type": "application/json" },
credentials: "include",
body: JSON.stringify(credentials),
});
console.log(res)
if (!res.ok) {
const data = await res.json().catch(() => ({}));
return { ok: false, error: data.error || "Invalid credentials" };
}
const data = await res.json();
setState({
status: "authenticated",
accessToken: data.accessToken,
accessTokenExpires: data.accessTokenExpires,
user: data.user,
});
configureApiAuth(data.accessToken);
return { ok: true };
} catch (error) {
console.error("Login error:", error);
return { ok: false, error: "An unexpected error occurred" };
}
},
[],
);
// ── Sign out ──────────────────────────────────────────────────────────────
const signOut = useCallback(async () => {
try {
await fetch(`${AUTH_PROXY_BASE}/logout`, {
method: "POST",
credentials: "include",
});
} catch (error) {
console.error("Logout error:", error);
}
setState({ status: "unauthenticated" });
configureApiAuth(null);
}, []);
// ── Update (re-check session) ─────────────────────────────────────────────
const update = useCallback(async () => {
await checkSession();
}, [checkSession]);
// ── Sync configureApiAuth ────────────────────────────────────────────────
// Not useEffect — we need the token set ASAP, not on next render
configureApiAuth(
state.status === "authenticated"
? state.accessToken
: state.status === "loading"
? undefined
: null,
);
const contextValue: AuthContextType = {
...state,
signIn,
signOut,
update,
};
return (
<AuthContext.Provider value={contextValue}>{children}</AuthContext.Provider>
);
}
// ─── Hook ────────────────────────────────────────────────────────────────────
export function useAuth() {
const context = useContext(AuthContext);
if (context === undefined) {
throw new Error("useAuth must be used within an AuthProvider");
}
return context;
}

View File

@@ -0,0 +1,186 @@
/**
* UserEventsProvider — ported from Next.js app/lib/UserEventsProvider.tsx
*
* Connects to the backend WebSocket for real-time transcript updates.
* Invalidates React Query caches when events are received.
*/
import React, { useEffect, useRef } from "react";
import { useQueryClient } from "@tanstack/react-query";
import { WEBSOCKET_URL } from "./apiClient";
import { useAuth } from "./AuthProvider";
import { invalidateTranscript, invalidateTranscriptLists } from "./apiHooks";
import { parseNonEmptyString } from "./utils";
import type { operations } from "./reflector-api";
type UserWsEvent =
operations["v1_user_get_websocket_events"]["responses"][200]["content"]["application/json"];
class UserEventsStore {
private socket: WebSocket | null = null;
private listeners: Set<(event: MessageEvent) => void> = new Set();
private closeTimeoutId: number | null = null;
private isConnecting = false;
ensureConnection(url: string, subprotocols?: string[]) {
if (typeof window === "undefined") return;
if (this.closeTimeoutId !== null) {
clearTimeout(this.closeTimeoutId);
this.closeTimeoutId = null;
}
if (this.isConnecting) return;
if (
this.socket &&
(this.socket.readyState === WebSocket.OPEN ||
this.socket.readyState === WebSocket.CONNECTING)
) {
return;
}
this.isConnecting = true;
const ws = new WebSocket(url, subprotocols || []);
this.socket = ws;
ws.onmessage = (event: MessageEvent) => {
this.listeners.forEach((listener) => {
try {
listener(event);
} catch (err) {
console.error("UserEvents listener error", err);
}
});
};
ws.onopen = () => {
if (this.socket === ws) this.isConnecting = false;
};
ws.onclose = () => {
if (this.socket === ws) {
this.socket = null;
this.isConnecting = false;
}
};
ws.onerror = () => {
if (this.socket === ws) this.isConnecting = false;
};
}
subscribe(listener: (event: MessageEvent) => void): () => void {
this.listeners.add(listener);
if (this.closeTimeoutId !== null) {
clearTimeout(this.closeTimeoutId);
this.closeTimeoutId = null;
}
return () => {
this.listeners.delete(listener);
if (this.listeners.size === 0) {
this.closeTimeoutId = window.setTimeout(() => {
if (this.socket) {
try {
this.socket.close();
} catch (err) {
console.warn("Error closing user events socket", err);
}
}
this.socket = null;
this.closeTimeoutId = null;
}, 1000);
}
};
}
}
const sharedStore = new UserEventsStore();
export function UserEventsProvider({
children,
}: {
children: React.ReactNode;
}) {
const auth = useAuth();
const queryClient = useQueryClient();
const tokenRef = useRef<string | null>(null);
const detachRef = useRef<(() => void) | null>(null);
useEffect(() => {
// Only tear down when the user is truly unauthenticated
if (auth.status === "unauthenticated") {
if (detachRef.current) {
try {
detachRef.current();
} catch (err) {
console.warn("Error detaching UserEvents listener", err);
}
detachRef.current = null;
}
tokenRef.current = null;
return;
}
// During loading, keep the existing connection intact
if (auth.status !== "authenticated") {
return;
}
// Authenticated: pin the initial token for the lifetime of this WS connection
if (!tokenRef.current && auth.accessToken) {
tokenRef.current = auth.accessToken;
}
const pinnedToken = tokenRef.current;
const url = `${WEBSOCKET_URL}/v1/events`;
// Ensure a single shared connection
sharedStore.ensureConnection(
url,
pinnedToken ? ["bearer", pinnedToken] : undefined,
);
// Subscribe once; avoid re-subscribing during transient status changes
if (!detachRef.current) {
const onMessage = (event: MessageEvent) => {
try {
const msg: UserWsEvent = JSON.parse(event.data);
switch (msg.event) {
case "TRANSCRIPT_CREATED":
case "TRANSCRIPT_DELETED":
case "TRANSCRIPT_STATUS":
case "TRANSCRIPT_FINAL_TITLE":
case "TRANSCRIPT_DURATION":
invalidateTranscriptLists(queryClient).then(() => {});
invalidateTranscript(
queryClient,
parseNonEmptyString(msg.data.id),
).then(() => {});
break;
default: {
const _exhaustive: never = msg;
console.warn(
`Unknown user event: ${(_exhaustive as UserWsEvent).event}`,
);
}
}
} catch (err) {
console.warn("Invalid user event message", event.data);
}
};
const unsubscribe = sharedStore.subscribe(onMessage);
detachRef.current = unsubscribe;
}
}, [auth.status, queryClient]);
// On unmount, detach the listener and clear the pinned token
useEffect(() => {
return () => {
if (detachRef.current) {
try {
detachRef.current();
} catch (err) {
console.warn("Error detaching UserEvents listener on unmount", err);
}
detachRef.current = null;
}
tokenRef.current = null;
};
}, []);
return <>{children}</>;
}

View File

@@ -0,0 +1,94 @@
/**
* API Client — ported from Next.js app/lib/apiClient.tsx
*
* Uses openapi-fetch + openapi-react-query for type-safe API calls.
* Token management delegated to configureApiAuth().
*/
import createClient from "openapi-fetch";
import type { paths } from "./reflector-api";
import createFetchClient from "openapi-react-query";
import { parseNonEmptyString, parseMaybeNonEmptyString } from "./utils";
// ─── URL Resolution ──────────────────────────────────────────────────────────
const resolveApiUrl = (): string => {
const envUrl = import.meta.env.VITE_API_URL;
if (envUrl) return envUrl;
// Default: assume API is accessed via proxy on same origin.
// OpenAPI spec paths already include /v1 prefix, so base is just "/".
return "/";
};
export const API_URL = resolveApiUrl();
/**
* Derive a WebSocket URL from the API_URL.
* Handles full URLs (http://host/api, https://host/api) and relative paths (/api).
*/
const deriveWebSocketUrl = (apiUrl: string): string => {
if (typeof window === "undefined") {
return "ws://localhost";
}
const parsed = new URL(apiUrl, window.location.origin);
const wsProtocol = parsed.protocol === "https:" ? "wss:" : "ws:";
const pathname = parsed.pathname.replace(/\/+$/, "");
return `${wsProtocol}//${parsed.host}${pathname}`;
};
const resolveWebSocketUrl = (): string => {
const raw = import.meta.env.VITE_WEBSOCKET_URL;
if (!raw || raw === "auto") {
return deriveWebSocketUrl(API_URL);
}
return raw;
};
export const WEBSOCKET_URL = resolveWebSocketUrl();
// ─── Client Setup ────────────────────────────────────────────────────────────
export const client = createClient<paths>({
baseUrl: API_URL,
});
let currentAuthToken: string | null | undefined = undefined;
// Auth middleware — attaches Bearer token to every request
client.use({
async onRequest({ request }) {
const token = currentAuthToken;
if (token) {
request.headers.set(
"Authorization",
`Bearer ${parseNonEmptyString(token, true, "panic! token is required")}`,
);
}
// Don't override Content-Type for FormData (file uploads set their own boundary)
if (
!request.headers.has("Content-Type") &&
!(request.body instanceof FormData)
) {
request.headers.set("Content-Type", "application/json");
}
return request;
},
});
export const $api = createFetchClient<paths>(client);
/**
* Set the auth token used for API requests.
* Called by the AuthProvider whenever auth state changes.
*
* Contract: lightweight, idempotent
* - undefined = "still loading / unknown"
* - null = "definitely logged out"
* - string = "access token"
*/
export const configureApiAuth = (token: string | null | undefined) => {
// Watch only for the initial loading; "reloading" state assumes token
// presence/absence
if (token === undefined && currentAuthToken !== undefined) return;
currentAuthToken = token;
};

View File

@@ -0,0 +1,967 @@
/**
* API Hooks — ported from Next.js app/lib/apiHooks.ts
*
* ~40 hooks covering Rooms, Transcripts, Meetings, Participants,
* Topics, Zulip, Config, API Keys, WebRTC, etc.
*
* Adaptations from Next.js version:
* - Removed "use client" directives
* - Replaced useError from Next.js ErrorProvider with our errorContext
* - useAuth comes from our AuthProvider (not next-auth)
*/
import { $api } from "./apiClient";
import { useError } from "./errorContext";
import { QueryClient, useQueryClient } from "@tanstack/react-query";
import type { components } from "./reflector-api";
import { useAuth } from "./AuthProvider";
import { MeetingId } from "./types";
import { NonEmptyString } from "./utils";
// ─── Transcript status types ─────────────────────────────────────────────────
type TranscriptStatus = "processing" | "uploaded" | "recording" | "processed" | "error";
// ─── Auth readiness ──────────────────────────────────────────────────────────
export const useAuthReady = () => {
const auth = useAuth();
return {
isAuthenticated: auth.status === "authenticated",
isLoading: auth.status === "loading",
};
};
// ─── Rooms ───────────────────────────────────────────────────────────────────
export function useRoomsList(page: number = 1) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/rooms",
{
params: {
query: { page },
},
},
{
enabled: isAuthenticated,
},
);
}
export function useRoomGet(roomId: string | null) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/rooms/{room_id}",
{
params: {
path: { room_id: roomId! },
},
},
{
enabled: !!roomId && isAuthenticated,
},
);
}
export function useRoomGetByName(roomName: string | null) {
return $api.useQuery(
"get",
"/v1/rooms/name/{room_name}",
{
params: {
path: { room_name: roomName! },
},
},
{
enabled: !!roomName,
},
);
}
export function useRoomCreate() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("post", "/v1/rooms", {
onSuccess: () => {
return queryClient.invalidateQueries({
queryKey: $api.queryOptions("get", "/v1/rooms").queryKey,
});
},
onError: (error) => {
setError(error as Error, "There was an error creating the room");
},
});
}
export function useRoomUpdate() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("patch", "/v1/rooms/{room_id}", {
onSuccess: async (room) => {
await Promise.all([
queryClient.invalidateQueries({
queryKey: $api.queryOptions("get", "/v1/rooms").queryKey,
}),
queryClient.invalidateQueries({
queryKey: $api.queryOptions("get", "/v1/rooms/{room_id}", {
params: {
path: {
room_id: room.id,
},
},
}).queryKey,
}),
]);
},
onError: (error) => {
setError(error as Error, "There was an error updating the room");
},
});
}
export function useRoomDelete() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("delete", "/v1/rooms/{room_id}", {
onSuccess: () => {
return queryClient.invalidateQueries({
queryKey: $api.queryOptions("get", "/v1/rooms").queryKey,
});
},
onError: (error) => {
setError(error as Error, "There was an error deleting the room");
},
});
}
export function useRoomTestWebhook() {
const { setError } = useError();
return $api.useMutation("post", "/v1/rooms/{room_id}/webhook/test", {
onError: (error) => {
setError(error as Error, "There was an error testing the webhook");
},
});
}
// ─── Transcripts ─────────────────────────────────────────────────────────────
type SourceKind = components["schemas"]["SourceKind"];
export const TRANSCRIPT_SEARCH_URL = "/v1/transcripts/search" as const;
export const invalidateTranscriptLists = (queryClient: QueryClient) =>
queryClient.invalidateQueries({
queryKey: ["get", TRANSCRIPT_SEARCH_URL],
});
export function useTranscriptsSearch(
q: string = "",
options: {
limit?: number;
offset?: number;
room_id?: string;
source_kind?: SourceKind;
} = {},
) {
return $api.useQuery(
"get",
TRANSCRIPT_SEARCH_URL,
{
params: {
query: {
q,
limit: options.limit,
offset: options.offset,
room_id: options.room_id,
source_kind: options.source_kind,
},
},
},
{
enabled: true,
},
);
}
export function useTranscriptGet(transcriptId: NonEmptyString | null) {
const ACTIVE_TRANSCRIPT_STATUSES = new Set<TranscriptStatus>([
"processing",
"uploaded",
"recording",
]);
return $api.useQuery(
"get",
"/v1/transcripts/{transcript_id}",
{
params: {
path: {
transcript_id: transcriptId!,
},
},
},
{
enabled: !!transcriptId,
refetchInterval: (query) => {
const status = query.state.data?.status;
return status && ACTIVE_TRANSCRIPT_STATUSES.has(status as TranscriptStatus) ? 5000 : false;
},
},
);
}
export const invalidateTranscript = (
queryClient: QueryClient,
transcriptId: NonEmptyString,
) =>
queryClient.invalidateQueries({
queryKey: $api.queryOptions("get", "/v1/transcripts/{transcript_id}", {
params: { path: { transcript_id: transcriptId } },
}).queryKey,
});
export function useTranscriptCreate() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("post", "/v1/transcripts", {
onSuccess: () => {
return queryClient.invalidateQueries({
queryKey: ["get", TRANSCRIPT_SEARCH_URL],
});
},
onError: (error) => {
setError(error as Error, "There was an error creating the transcript");
},
});
}
export function useTranscriptDelete() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("delete", "/v1/transcripts/{transcript_id}", {
onSuccess: () => {
return queryClient.invalidateQueries({
queryKey: ["get", TRANSCRIPT_SEARCH_URL],
});
},
onError: (error) => {
setError(error as Error, "There was an error deleting the transcript");
},
});
}
export function useTranscriptUpdate() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("patch", "/v1/transcripts/{transcript_id}", {
onSuccess: (_data, variables) => {
return queryClient.invalidateQueries({
queryKey: $api.queryOptions("get", "/v1/transcripts/{transcript_id}", {
params: {
path: { transcript_id: variables.params.path.transcript_id },
},
}).queryKey,
});
},
onError: (error) => {
setError(error as Error, "There was an error updating the transcript");
},
});
}
export function useTranscriptProcess() {
const { setError } = useError();
return $api.useMutation("post", "/v1/transcripts/{transcript_id}/process", {
onError: (error) => {
setError(error as Error, "There was an error processing the transcript");
},
});
}
// ─── Transcript Topics ───────────────────────────────────────────────────────
export function useTranscriptTopics(transcriptId: NonEmptyString | null) {
return $api.useQuery(
"get",
"/v1/transcripts/{transcript_id}/topics",
{
params: {
path: { transcript_id: transcriptId! },
},
},
{
enabled: !!transcriptId,
},
);
}
export const invalidateTranscriptTopics = (
queryClient: QueryClient,
transcriptId: NonEmptyString,
) =>
queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}/topics",
{
params: { path: { transcript_id: transcriptId } },
},
).queryKey,
});
export function useTranscriptTopicsWithWords(
transcriptId: NonEmptyString | null,
) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/transcripts/{transcript_id}/topics/with-words",
{
params: {
path: { transcript_id: transcriptId! },
},
},
{
enabled: !!transcriptId && isAuthenticated,
},
);
}
export function useTranscriptTopicsWithWordsPerSpeaker(
transcriptId: NonEmptyString | null,
topicId: string | null,
) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/transcripts/{transcript_id}/topics/{topic_id}/words-per-speaker",
{
params: {
path: {
transcript_id: transcriptId!,
topic_id: topicId!,
},
},
},
{
enabled: !!transcriptId && !!topicId && isAuthenticated,
},
);
}
// ─── Transcript Audio ────────────────────────────────────────────────────────
export function useTranscriptWaveform(transcriptId: NonEmptyString | null) {
return $api.useQuery(
"get",
"/v1/transcripts/{transcript_id}/audio/waveform",
{
params: {
path: { transcript_id: transcriptId! },
},
},
{
enabled: !!transcriptId,
retry: false,
},
);
}
export const invalidateTranscriptWaveform = (
queryClient: QueryClient,
transcriptId: NonEmptyString,
) =>
queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}/audio/waveform",
{
params: { path: { transcript_id: transcriptId } },
},
).queryKey,
});
export function useTranscriptMP3(transcriptId: NonEmptyString | null) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/transcripts/{transcript_id}/audio/mp3",
{
params: {
path: { transcript_id: transcriptId! },
},
},
{
enabled: !!transcriptId && isAuthenticated,
},
);
}
export function useTranscriptUploadAudio() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation(
"post",
"/v1/transcripts/{transcript_id}/record/upload",
{
onSuccess: (_data, variables) => {
return queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}",
{
params: {
path: { transcript_id: variables.params.path.transcript_id },
},
},
).queryKey,
});
},
onError: (error) => {
setError(error as Error, "There was an error uploading the audio file");
},
},
);
}
// ─── Transcript Participants ─────────────────────────────────────────────────
export function useTranscriptParticipants(transcriptId: NonEmptyString | null) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/transcripts/{transcript_id}/participants",
{
params: {
path: { transcript_id: transcriptId! },
},
},
{
enabled: !!transcriptId && isAuthenticated,
},
);
}
export function useTranscriptParticipantUpdate() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation(
"patch",
"/v1/transcripts/{transcript_id}/participants/{participant_id}",
{
onSuccess: (_data, variables) => {
return queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}/participants",
{
params: {
path: { transcript_id: variables.params.path.transcript_id },
},
},
).queryKey,
});
},
onError: (error) => {
setError(error as Error, "There was an error updating the participant");
},
},
);
}
export function useTranscriptParticipantCreate() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation(
"post",
"/v1/transcripts/{transcript_id}/participants",
{
onSuccess: (_data, variables) => {
return queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}/participants",
{
params: {
path: { transcript_id: variables.params.path.transcript_id },
},
},
).queryKey,
});
},
onError: (error) => {
setError(error as Error, "There was an error creating the participant");
},
},
);
}
export function useTranscriptParticipantDelete() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation(
"delete",
"/v1/transcripts/{transcript_id}/participants/{participant_id}",
{
onSuccess: (_data, variables) => {
return queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}/participants",
{
params: {
path: { transcript_id: variables.params.path.transcript_id },
},
},
).queryKey,
});
},
onError: (error) => {
setError(error as Error, "There was an error deleting the participant");
},
},
);
}
// ─── Transcript Speaker Management ──────────────────────────────────────────
export function useTranscriptSpeakerAssign() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation(
"patch",
"/v1/transcripts/{transcript_id}/speaker/assign",
{
onSuccess: (_data, variables) => {
return Promise.all([
queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}",
{
params: {
path: { transcript_id: variables.params.path.transcript_id },
},
},
).queryKey,
}),
queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}/participants",
{
params: {
path: { transcript_id: variables.params.path.transcript_id },
},
},
).queryKey,
}),
]);
},
onError: (error) => {
setError(error as Error, "There was an error assigning the speaker");
},
},
);
}
export function useTranscriptSpeakerMerge() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation(
"patch",
"/v1/transcripts/{transcript_id}/speaker/merge",
{
onSuccess: (_data, variables) => {
return Promise.all([
queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}",
{
params: {
path: { transcript_id: variables.params.path.transcript_id },
},
},
).queryKey,
}),
queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/transcripts/{transcript_id}/participants",
{
params: {
path: { transcript_id: variables.params.path.transcript_id },
},
},
).queryKey,
}),
]);
},
onError: (error) => {
setError(error as Error, "There was an error merging speakers");
},
},
);
}
// ─── Transcript Sharing ──────────────────────────────────────────────────────
export function useTranscriptPostToZulip() {
const { setError } = useError();
// @ts-ignore - Zulip endpoint not in OpenAPI spec
return $api.useMutation("post", "/v1/transcripts/{transcript_id}/zulip", {
onError: (error) => {
setError(error as Error, "There was an error posting to Zulip");
},
});
}
export function useTranscriptSendEmail() {
const { setError } = useError();
return $api.useMutation("post", "/v1/transcripts/{transcript_id}/email", {
onError: (error) => {
setError(error as Error, "There was an error sending the email");
},
});
}
// ─── Transcript WebRTC ───────────────────────────────────────────────────────
export function useTranscriptWebRTC() {
const { setError } = useError();
return $api.useMutation(
"post",
"/v1/transcripts/{transcript_id}/record/webrtc",
{
onError: (error) => {
setError(error as Error, "There was an error with WebRTC connection");
},
},
);
}
// ─── Meetings ────────────────────────────────────────────────────────────────
const MEETINGS_PATH_PARTIAL = "meetings" as const;
const MEETINGS_ACTIVE_PATH_PARTIAL = `${MEETINGS_PATH_PARTIAL}/active` as const;
const MEETINGS_UPCOMING_PATH_PARTIAL =
`${MEETINGS_PATH_PARTIAL}/upcoming` as const;
const MEETING_LIST_PATH_PARTIALS = [
MEETINGS_ACTIVE_PATH_PARTIAL,
MEETINGS_UPCOMING_PATH_PARTIAL,
];
export function useRoomsCreateMeeting() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("post", "/v1/rooms/{room_name}/meeting", {
onSuccess: async (_data, variables) => {
const roomName = variables.params.path.room_name;
await Promise.all([
queryClient.invalidateQueries({
queryKey: $api.queryOptions("get", "/v1/rooms").queryKey,
}),
queryClient.invalidateQueries({
queryKey: $api.queryOptions(
"get",
"/v1/rooms/{room_name}/meetings/active" as `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
{
params: {
path: { room_name: roomName },
},
},
).queryKey,
}),
]);
},
onError: (error) => {
setError(error as Error, "There was an error creating the meeting");
},
});
}
export function useRoomActiveMeetings(roomName: string | null) {
return $api.useQuery(
"get",
"/v1/rooms/{room_name}/meetings/active" as `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
{
params: {
path: { room_name: roomName! },
},
},
{
enabled: !!roomName,
},
);
}
export function useRoomUpcomingMeetings(roomName: string | null) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/rooms/{room_name}/meetings/upcoming" as `/v1/rooms/{room_name}/${typeof MEETINGS_UPCOMING_PATH_PARTIAL}`,
{
params: {
path: { room_name: roomName! },
},
},
{
enabled: !!roomName && isAuthenticated,
},
);
}
export function useRoomGetMeeting(
roomName: string | null,
meetingId: MeetingId | null,
) {
return $api.useQuery(
"get",
"/v1/rooms/{room_name}/meetings/{meeting_id}",
{
params: {
path: {
room_name: roomName!,
meeting_id: meetingId!,
},
},
},
{
enabled: !!roomName && !!meetingId,
},
);
}
export function useRoomJoinMeeting() {
const { setError } = useError();
return $api.useMutation(
"post",
"/v1/rooms/{room_name}/meetings/{meeting_id}/join",
{
onError: (error) => {
setError(error as Error, "There was an error joining the meeting");
},
},
);
}
export function useMeetingStartRecording() {
const { setError } = useError();
return $api.useMutation(
"post",
"/v1/meetings/{meeting_id}/recordings/start",
{
onError: (error) => {
setError(error as Error, "Failed to start recording");
},
},
);
}
export function useMeetingAudioConsent() {
const { setError } = useError();
return $api.useMutation("post", "/v1/meetings/{meeting_id}/consent", {
onError: (error) => {
setError(error as Error, "There was an error recording consent");
},
});
}
export function useMeetingAddEmailRecipient() {
const { setError } = useError();
return $api.useMutation("post", "/v1/meetings/{meeting_id}/email-recipient", {
onError: (error) => {
setError(error as Error, "There was an error adding the email");
},
});
}
export function useMeetingDeactivate() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("patch", `/v1/meetings/{meeting_id}/deactivate`, {
onError: (error) => {
setError(error as Error, "Failed to end meeting");
},
onSuccess: () => {
return queryClient.invalidateQueries({
predicate: (query) => {
const key = query.queryKey;
return key.some(
(k) =>
typeof k === "string" &&
!!MEETING_LIST_PATH_PARTIALS.find((e) => k.includes(e)),
);
},
});
},
});
}
// ─── API Keys ──────────────────────────────────────────────────────────────────
export function useApiKeysList() {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/user/api-keys",
{},
{
enabled: isAuthenticated,
},
);
}
export function useApiKeyCreate() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("post", "/v1/user/api-keys", {
onSuccess: () => {
return queryClient.invalidateQueries({
queryKey: $api.queryOptions("get", "/v1/user/api-keys").queryKey,
});
},
onError: (error) => {
setError(error as Error, "There was an error creating the API key");
},
});
}
export function useApiKeyRevoke() {
const { setError } = useError();
const queryClient = useQueryClient();
return $api.useMutation("delete", "/v1/user/api-keys/{key_id}", {
onSuccess: () => {
return queryClient.invalidateQueries({
queryKey: $api.queryOptions("get", "/v1/user/api-keys").queryKey,
});
},
onError: (error) => {
setError(error as Error, "There was an error rewoking the API key");
},
});
}
// ─── Config ──────────────────────────────────────────────────────────────────
export function useConfig() {
return $api.useQuery("get", "/v1/config", {});
}
// ─── Zulip ───────────────────────────────────────────────────────────────────
export function useZulipStreams(enabled: boolean = true) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/zulip/streams",
{},
{
enabled: enabled && isAuthenticated,
},
);
}
export function useZulipTopics(streamId: number | null) {
const { isAuthenticated } = useAuthReady();
const enabled = !!streamId && isAuthenticated;
return $api.useQuery(
"get",
"/v1/zulip/streams/{stream_id}/topics",
{
params: {
path: {
stream_id: enabled ? streamId : 0,
},
},
},
{
enabled,
},
);
}
// ─── Calendar / ICS ──────────────────────────────────────────────────────────
export function useRoomIcsSync() {
const { setError } = useError();
return $api.useMutation("post", "/v1/rooms/{room_name}/ics/sync", {
onError: (error) => {
setError(error as Error, "There was an error syncing the calendar");
},
});
}
export function useRoomIcsStatus(roomName: string | null) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/rooms/{room_name}/ics/status",
{
params: {
path: { room_name: roomName! },
},
},
{
enabled: !!roomName && isAuthenticated,
},
);
}
export function useRoomCalendarEvents(roomName: string | null) {
const { isAuthenticated } = useAuthReady();
return $api.useQuery(
"get",
"/v1/rooms/{room_name}/meetings",
{
params: {
path: { room_name: roomName! },
},
},
{
enabled: !!roomName && isAuthenticated,
},
);
}

View File

@@ -0,0 +1,12 @@
export type NonEmptyArray<T> = [T, ...T[]];
export const isNonEmptyArray = <T>(arr: T[]): arr is NonEmptyArray<T> =>
arr.length > 0;
export const assertNonEmptyArray = <T>(
arr: T[],
err?: string,
): NonEmptyArray<T> => {
if (isNonEmptyArray(arr)) {
return arr;
}
throw new Error(err ?? "Expected non-empty array");
};

View File

@@ -0,0 +1,45 @@
/**
* ConsentDialog — ported from Next.js, restyled with Tailwind.
*/
import { useState, useEffect, useRef } from "react";
import { CONSENT_DIALOG_TEXT } from "./constants";
interface ConsentDialogProps {
onAccept: () => void;
onReject: () => void;
}
export function ConsentDialog({ onAccept, onReject }: ConsentDialogProps) {
const acceptButtonRef = useRef<HTMLButtonElement>(null);
useEffect(() => {
// Auto-focus accept button so Escape key works
acceptButtonRef.current?.focus();
}, []);
return (
<div className="p-6 bg-white/90 backdrop-blur-sm rounded-lg shadow-lg max-w-md mx-auto">
<div className="flex flex-col items-center gap-4">
<p className="text-base text-center font-medium text-on-surface">
{CONSENT_DIALOG_TEXT.question}
</p>
<div className="flex items-center gap-4 justify-center">
<button
onClick={onReject}
className="px-4 py-2 text-sm text-on-surface-variant hover:bg-surface-mid rounded-sm transition-colors"
>
{CONSENT_DIALOG_TEXT.rejectButton}
</button>
<button
ref={acceptButtonRef}
onClick={onAccept}
className="px-4 py-2 text-sm font-semibold text-white bg-gradient-primary rounded-sm hover:brightness-110 active:brightness-95 transition-all"
>
{CONSENT_DIALOG_TEXT.acceptButton}
</button>
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,27 @@
/**
* ConsentDialogButton — floating "Meeting is being recorded" button.
* Restyled from Chakra to Tailwind.
*/
import { CONSENT_DIALOG_TEXT, CONSENT_BUTTON_TOP_OFFSET, CONSENT_BUTTON_LEFT_OFFSET, CONSENT_BUTTON_Z_INDEX } from "./constants";
interface ConsentDialogButtonProps {
onClick: () => void;
}
export function ConsentDialogButton({ onClick }: ConsentDialogButtonProps) {
return (
<button
onClick={onClick}
className="fixed flex items-center gap-2 px-3 py-2 bg-red-500 text-white text-xs font-semibold rounded-sm shadow-md hover:bg-red-600 active:bg-red-700 transition-colors animate-pulse"
style={{
top: CONSENT_BUTTON_TOP_OFFSET,
left: CONSENT_BUTTON_LEFT_OFFSET,
zIndex: CONSENT_BUTTON_Z_INDEX,
}}
>
<span className="w-2 h-2 rounded-full bg-white animate-ping" />
{CONSENT_DIALOG_TEXT.triggerButton}
</button>
);
}

View File

@@ -0,0 +1,15 @@
/**
* RecordingIndicator — visual indicator that a meeting is being recorded.
*/
export function RecordingIndicator() {
return (
<div className="flex items-center gap-1.5 text-red-500 text-xs font-medium">
<span className="relative flex h-2 w-2">
<span className="animate-ping absolute inline-flex h-full w-full rounded-full bg-red-400 opacity-75" />
<span className="relative inline-flex rounded-full h-2 w-2 bg-red-500" />
</span>
Recording
</div>
);
}

View File

@@ -0,0 +1,12 @@
export const CONSENT_BUTTON_TOP_OFFSET = "56px";
export const CONSENT_BUTTON_LEFT_OFFSET = "8px";
export const CONSENT_BUTTON_Z_INDEX = 1000;
export const TOAST_CHECK_INTERVAL_MS = 100;
export const CONSENT_DIALOG_TEXT = {
question:
"Can we have your permission to store this meeting's audio recording on our servers?",
acceptButton: "Yes, store the audio",
rejectButton: "No, delete after transcription",
triggerButton: "Meeting is being recorded",
} as const;

View File

@@ -0,0 +1,7 @@
export { ConsentDialogButton } from "./ConsentDialogButton";
export { ConsentDialog } from "./ConsentDialog";
export { RecordingIndicator } from "./RecordingIndicator";
export { useConsentDialog } from "./useConsentDialog";
export { recordingTypeRequiresConsent } from "./utils";
export * from "./constants";
export * from "./types";

View File

@@ -0,0 +1,14 @@
import { MeetingId } from "../types";
export type ConsentDialogResult = {
showConsentModal: () => void;
consentState: {
ready: boolean;
consentForMeetings?: Map<MeetingId, boolean>;
};
hasAnswered: (meetingId: MeetingId) => boolean;
hasAccepted: (meetingId: MeetingId) => boolean;
consentLoading: boolean;
showRecordingIndicator: boolean;
showConsentButton: boolean;
};

View File

@@ -0,0 +1,82 @@
/**
* useConsentDialog — ported from Next.js, adapted for Tailwind-based UI.
*
* Shows consent dialog as a modal overlay instead of Chakra toast.
*/
import { useCallback, useState } from "react";
import { useRecordingConsent } from "../recordingConsentContext";
import { useMeetingAudioConsent } from "../apiHooks";
import { recordingTypeRequiresConsent } from "./utils";
import type { ConsentDialogResult } from "./types";
import { MeetingId } from "../types";
import type { components } from "../reflector-api";
type Meeting = components["schemas"]["Meeting"];
type UseConsentDialogParams = {
meetingId: MeetingId;
recordingType: Meeting["recording_type"];
skipConsent: boolean;
};
export function useConsentDialog({
meetingId,
recordingType,
skipConsent,
}: UseConsentDialogParams): ConsentDialogResult {
const {
state: consentState,
touch,
hasAnswered,
hasAccepted,
} = useRecordingConsent();
const [modalOpen, setModalOpen] = useState(false);
const audioConsentMutation = useMeetingAudioConsent();
const handleConsent = useCallback(
async (given: boolean) => {
try {
await audioConsentMutation.mutateAsync({
params: {
path: { meeting_id: meetingId },
},
body: {
consent_given: given,
},
});
touch(meetingId, given);
} catch (error) {
console.error("Error submitting consent:", error);
}
setModalOpen(false);
},
[audioConsentMutation, touch, meetingId],
);
const showConsentModal = useCallback(() => {
if (modalOpen) return;
setModalOpen(true);
}, [modalOpen]);
const requiresConsent = Boolean(
recordingType && recordingTypeRequiresConsent(recordingType),
);
const showRecordingIndicator =
requiresConsent && (skipConsent || hasAccepted(meetingId));
const showConsentButton =
requiresConsent && !skipConsent && !hasAnswered(meetingId);
return {
showConsentModal,
consentState,
hasAnswered,
hasAccepted,
consentLoading: audioConsentMutation.isPending,
showRecordingIndicator,
showConsentButton,
};
}

View File

@@ -0,0 +1,10 @@
import type { components } from "../reflector-api";
type RecordingType = components["schemas"]["Meeting"]["recording_type"];
export const recordingTypeRequiresConsent = (
recordingType: RecordingType,
): boolean => {
const rt = recordingType as string;
return rt === "cloud" || rt === "raw-tracks";
};

View File

@@ -0,0 +1,91 @@
/**
* Error context — Vite-compatible replacement for the Next.js ErrorProvider.
* Provides a setError(error, message) function used by API mutation hooks.
*/
import React, { createContext, useContext, useState, useCallback } from "react";
interface ErrorState {
error: Error | null;
message: string | null;
}
interface ErrorContextValue {
errorState: ErrorState;
setError: (error: Error, message?: string) => void;
clearError: () => void;
}
const ErrorContext = createContext<ErrorContextValue | undefined>(undefined);
export function ErrorProvider({ children }: { children: React.ReactNode }) {
const [errorState, setErrorState] = useState<ErrorState>({
error: null,
message: null,
});
const setError = useCallback((error: Error, message?: string) => {
console.error(message || "An error occurred:", error);
setErrorState({ error, message: message || error.message });
// Auto-dismiss after 8 seconds
setTimeout(() => {
setErrorState((prev) =>
prev.error === error ? { error: null, message: null } : prev,
);
}, 8000);
}, []);
const clearError = useCallback(() => {
setErrorState({ error: null, message: null });
}, []);
return React.createElement(
ErrorContext.Provider,
{ value: { errorState, setError, clearError } },
children,
// Render error toast if there's an active error
errorState.message
? React.createElement(
"div",
{
className:
"fixed bottom-6 right-6 z-[9999] max-w-md bg-red-50 border border-red-200 text-red-800 px-5 py-4 rounded-md shadow-lg animate-in slide-in-from-bottom-4 flex items-start gap-3",
role: "alert",
},
React.createElement(
"div",
{ className: "flex-1" },
React.createElement(
"p",
{ className: "text-sm font-semibold" },
"Error",
),
React.createElement(
"p",
{ className: "text-sm mt-0.5 text-red-700" },
errorState.message,
),
),
React.createElement(
"button",
{
onClick: clearError,
className:
"text-red-400 hover:text-red-600 text-lg leading-none mt-0.5",
"aria-label": "Dismiss error",
},
"×",
),
)
: null,
);
}
export function useError() {
const context = useContext(ErrorContext);
if (context === undefined) {
throw new Error("useError must be used within an ErrorProvider");
}
return context;
}

View File

@@ -0,0 +1,49 @@
import { isNonEmptyArray, NonEmptyArray } from "./array";
export function shouldShowError(error: Error | null | undefined) {
if (
error?.name == "ResponseError" &&
(error["response"].status == 404 || error["response"].status == 403)
)
return false;
if (error?.name == "FetchError") return false;
return true;
}
const defaultMergeErrors = (ex: NonEmptyArray<unknown>): unknown => {
try {
return new Error(
ex
.map((e) =>
e ? (e.toString ? e.toString() : JSON.stringify(e)) : `${e}`,
)
.join("\n"),
);
} catch (e) {
console.error("Error merging errors:", e);
return ex[0];
}
};
type ReturnTypes<T extends readonly (() => any)[]> = {
[K in keyof T]: T[K] extends () => infer R ? R : never;
};
// sequence semantic for "throws"
// calls functions passed and collects its thrown values
export function sequenceThrows<Fns extends readonly (() => any)[]>(
...fs: Fns
): ReturnTypes<Fns> {
const results: unknown[] = [];
const errors: unknown[] = [];
for (const f of fs) {
try {
results.push(f());
} catch (e) {
errors.push(e);
}
}
if (errors.length) throw defaultMergeErrors(errors as NonEmptyArray<unknown>);
return results as ReturnTypes<Fns>;
}

View File

@@ -0,0 +1,44 @@
/**
* Feature flag system — ported from Next.js app/lib/features.ts
* Uses Vite env vars instead of server-side data-env injection.
*/
export const FEATURES = [
"requireLogin",
"privacy",
"browse",
"sendToZulip",
"rooms",
"emailTranscript",
] as const;
export type FeatureName = (typeof FEATURES)[number];
export type Features = Readonly<Record<FeatureName, boolean>>;
export const DEFAULT_FEATURES: Features = {
requireLogin: true,
privacy: true,
browse: true,
sendToZulip: true,
rooms: true,
emailTranscript: false,
} as const;
const FEATURE_TO_ENV: Record<FeatureName, string> = {
requireLogin: "VITE_FEATURE_REQUIRE_LOGIN",
privacy: "VITE_FEATURE_PRIVACY",
browse: "VITE_FEATURE_BROWSE",
sendToZulip: "VITE_FEATURE_SEND_TO_ZULIP",
rooms: "VITE_FEATURE_ROOMS",
emailTranscript: "VITE_FEATURE_EMAIL_TRANSCRIPT",
};
export const featureEnabled = (featureName: FeatureName): boolean => {
const envKey = FEATURE_TO_ENV[featureName];
const envValue = import.meta.env[envKey];
if (envValue === undefined || envValue === null || envValue === "") {
return DEFAULT_FEATURES[featureName];
}
return envValue === "true";
};

View File

@@ -0,0 +1,15 @@
import { QueryClient } from "@tanstack/react-query";
export const queryClient = new QueryClient({
defaultOptions: {
queries: {
staleTime: 60 * 1000, // 1 minute
gcTime: 5 * 60 * 1000, // 5 minutes (formerly cacheTime)
retry: 1,
refetchOnWindowFocus: false,
},
mutations: {
retry: 0,
},
},
});

View File

@@ -0,0 +1,153 @@
/**
* RecordingConsentProvider — ported from Next.js app/recordingConsentContext.tsx
*
* Manages per-meeting audio recording consent state in localStorage.
*/
import React, { createContext, useContext, useEffect, useState } from "react";
import { MeetingId } from "./types";
type ConsentMap = Map<MeetingId, boolean>;
type ConsentContextState =
| { ready: false }
| {
ready: true;
consentForMeetings: ConsentMap;
};
interface RecordingConsentContextValue {
state: ConsentContextState;
touch: (meetingId: MeetingId, accepted: boolean) => void;
hasAnswered: (meetingId: MeetingId) => boolean;
hasAccepted: (meetingId: MeetingId) => boolean;
}
const RecordingConsentContext = createContext<
RecordingConsentContextValue | undefined
>(undefined);
export const useRecordingConsent = () => {
const context = useContext(RecordingConsentContext);
if (!context) {
throw new Error(
"useRecordingConsent must be used within RecordingConsentProvider",
);
}
return context;
};
const LOCAL_STORAGE_KEY = "recording_consent_meetings";
const ACCEPTED = "T" as const;
const REJECTED = "F" as const;
type Consent = typeof ACCEPTED | typeof REJECTED;
const SEPARATOR = "|" as const;
type Entry = `${MeetingId}${typeof SEPARATOR}${Consent}`;
type EntryAndDefault = Entry | MeetingId;
const encodeEntry = (meetingId: MeetingId, accepted: boolean): Entry =>
`${meetingId}|${accepted ? ACCEPTED : REJECTED}`;
const decodeEntry = (
entry: EntryAndDefault,
): { meetingId: MeetingId; accepted: boolean } | null => {
const pipeIndex = entry.lastIndexOf(SEPARATOR);
if (pipeIndex === -1) {
// Legacy format: no pipe means accepted (backward compat)
return { meetingId: entry as MeetingId, accepted: true };
}
const suffix = entry.slice(pipeIndex + 1);
const meetingId = entry.slice(0, pipeIndex) as MeetingId;
const accepted = suffix !== REJECTED;
return { meetingId, accepted };
};
export const RecordingConsentProvider: React.FC<{
children: React.ReactNode;
}> = ({ children }) => {
const [state, setState] = useState<ConsentContextState>({ ready: false });
const safeWriteToStorage = (consentMap: ConsentMap): void => {
try {
if (typeof window !== "undefined" && window.localStorage) {
const entries = Array.from(consentMap.entries())
.slice(-5)
.map(([id, accepted]) => encodeEntry(id, accepted));
localStorage.setItem(LOCAL_STORAGE_KEY, JSON.stringify(entries));
}
} catch (error) {
console.error("Failed to save consent data to localStorage:", error);
}
};
const touch = (meetingId: MeetingId, accepted: boolean): void => {
if (!state.ready) {
console.warn("Attempted to touch consent before context is ready");
return;
}
const newMap = new Map<MeetingId, boolean>(state.consentForMeetings);
newMap.set(meetingId, accepted);
safeWriteToStorage(newMap);
setState({ ready: true, consentForMeetings: newMap });
};
const hasAnswered = (meetingId: MeetingId): boolean => {
if (!state.ready) return false;
return state.consentForMeetings.has(meetingId);
};
const hasAccepted = (meetingId: MeetingId): boolean => {
if (!state.ready) return false;
return state.consentForMeetings.get(meetingId) === true;
};
// Initialize on mount
useEffect(() => {
try {
if (typeof window === "undefined" || !window.localStorage) {
setState({ ready: true, consentForMeetings: new Map() });
return;
}
const stored = localStorage.getItem(LOCAL_STORAGE_KEY);
if (!stored) {
setState({ ready: true, consentForMeetings: new Map() });
return;
}
const parsed = JSON.parse(stored);
if (!Array.isArray(parsed)) {
console.warn("Invalid consent data format in localStorage, resetting");
setState({ ready: true, consentForMeetings: new Map() });
return;
}
const consentForMeetings = new Map<MeetingId, boolean>();
for (const entry of parsed) {
const decoded = decodeEntry(entry);
if (decoded) {
consentForMeetings.set(decoded.meetingId, decoded.accepted);
}
}
setState({ ready: true, consentForMeetings });
} catch (error) {
console.error("Failed to parse consent data from localStorage:", error);
setState({ ready: true, consentForMeetings: new Map() });
}
}, []);
const value: RecordingConsentContextValue = {
state,
touch,
hasAnswered,
hasAccepted,
};
return (
<RecordingConsentContext.Provider value={value}>
{children}
</RecordingConsentContext.Provider>
);
};

4421
www/appv2/src/lib/reflector-api.d.ts vendored Normal file

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More