Compare commits

..

5 Commits

Author SHA1 Message Date
Igor Loskutov
e1b790c5a8 Add Modal backend for audio mixdown 2026-01-21 17:06:17 -05:00
c8743fdf1c fix webhook tests (#826)
Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2026-01-21 14:31:20 -05:00
8a293882ad timeout to untighten ws python loop (#821)
Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2026-01-20 16:29:09 -05:00
d83c4a30b4 chore(main): release 0.28.0 (#820) 2026-01-20 12:35:26 -05:00
3b6540eae5 feat: worker affinity (#819)
* worker affinity

* worker affinity

* worker affinity

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2026-01-20 12:27:16 -05:00
17 changed files with 856 additions and 478 deletions

View File

@@ -1,5 +1,12 @@
# Changelog # Changelog
## [0.28.0](https://github.com/Monadical-SAS/reflector/compare/v0.27.0...v0.28.0) (2026-01-20)
### Features
* worker affinity ([#819](https://github.com/Monadical-SAS/reflector/issues/819)) ([3b6540e](https://github.com/Monadical-SAS/reflector/commit/3b6540eae5b597449f98661bdf15483b77be3268))
## [0.27.0](https://github.com/Monadical-SAS/reflector/compare/v0.26.0...v0.27.0) (2025-12-26) ## [0.27.0](https://github.com/Monadical-SAS/reflector/compare/v0.26.0...v0.27.0) (2025-12-26)

View File

@@ -34,7 +34,7 @@ services:
environment: environment:
ENTRYPOINT: beat ENTRYPOINT: beat
hatchet-worker: hatchet-worker-cpu:
build: build:
context: server context: server
volumes: volumes:
@@ -43,7 +43,20 @@ services:
env_file: env_file:
- ./server/.env - ./server/.env
environment: environment:
ENTRYPOINT: hatchet-worker ENTRYPOINT: hatchet-worker-cpu
depends_on:
hatchet:
condition: service_healthy
hatchet-worker-llm:
build:
context: server
volumes:
- ./server/:/app/
- /app/.venv
env_file:
- ./server/.env
environment:
ENTRYPOINT: hatchet-worker-llm
depends_on: depends_on:
hatchet: hatchet:
condition: service_healthy condition: service_healthy

View File

@@ -131,6 +131,15 @@ if [ -z "$DIARIZER_URL" ]; then
fi fi
echo " -> $DIARIZER_URL" echo " -> $DIARIZER_URL"
echo ""
echo "Deploying mixdown (CPU audio processing)..."
MIXDOWN_URL=$(modal deploy reflector_mixdown.py 2>&1 | grep -o 'https://[^ ]*web.modal.run' | head -1)
if [ -z "$MIXDOWN_URL" ]; then
echo "Error: Failed to deploy mixdown. Check Modal dashboard for details."
exit 1
fi
echo " -> $MIXDOWN_URL"
# --- Output Configuration --- # --- Output Configuration ---
echo "" echo ""
echo "==========================================" echo "=========================================="
@@ -147,4 +156,8 @@ echo ""
echo "DIARIZATION_BACKEND=modal" echo "DIARIZATION_BACKEND=modal"
echo "DIARIZATION_URL=$DIARIZER_URL" echo "DIARIZATION_URL=$DIARIZER_URL"
echo "DIARIZATION_MODAL_API_KEY=$API_KEY" echo "DIARIZATION_MODAL_API_KEY=$API_KEY"
echo ""
echo "MIXDOWN_BACKEND=modal"
echo "MIXDOWN_URL=$MIXDOWN_URL"
echo "MIXDOWN_MODAL_API_KEY=$API_KEY"
echo "# --- End Modal Configuration ---" echo "# --- End Modal Configuration ---"

View File

@@ -0,0 +1,379 @@
"""
Reflector GPU backend - audio mixdown
======================================
CPU-intensive audio mixdown service for combining multiple audio tracks.
Uses PyAV filter graph (amix) for high-quality audio mixing.
"""
import os
import tempfile
import time
from fractions import Fraction
import modal
MIXDOWN_TIMEOUT = 900 # 15 minutes
SCALEDOWN_WINDOW = 60 # 1 minute idle before shutdown
app = modal.App("reflector-mixdown")
# CPU-based image (no GPU needed for audio processing)
image = (
modal.Image.debian_slim(python_version="3.12")
.apt_install("ffmpeg") # Required by PyAV
.pip_install(
"av==13.1.0", # PyAV for audio processing
"requests==2.32.3", # HTTP for presigned URL downloads/uploads
"fastapi==0.115.12", # API framework
)
)
@app.function(
cpu=4.0, # 4 CPU cores for audio processing
timeout=MIXDOWN_TIMEOUT,
scaledown_window=SCALEDOWN_WINDOW,
secrets=[modal.Secret.from_name("reflector-gpu")],
image=image,
)
@modal.concurrent(max_inputs=10)
@modal.asgi_app()
def web():
import logging
import secrets
import shutil
import av
import requests
from av.audio.resampler import AudioResampler
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from pydantic import BaseModel
# Setup logging
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
app = FastAPI()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
# Validate API key exists at startup
API_KEY = os.environ.get("REFLECTOR_GPU_APIKEY")
if not API_KEY:
raise RuntimeError("REFLECTOR_GPU_APIKEY not configured in Modal secrets")
def apikey_auth(apikey: str = Depends(oauth2_scheme)):
# Use constant-time comparison to prevent timing attacks
if secrets.compare_digest(apikey, API_KEY):
return
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API key",
headers={"WWW-Authenticate": "Bearer"},
)
class MixdownRequest(BaseModel):
track_urls: list[str]
output_url: str
target_sample_rate: int = 48000
expected_duration_sec: float | None = None
class MixdownResponse(BaseModel):
duration_ms: float
tracks_mixed: int
audio_uploaded: bool
def download_track(url: str, temp_dir: str, index: int) -> str:
"""Download track from presigned URL to temp file using streaming."""
logger.info(f"Downloading track {index + 1}")
response = requests.get(url, stream=True, timeout=300)
if response.status_code == 404:
raise HTTPException(status_code=404, detail=f"Track {index} not found")
if response.status_code == 403:
raise HTTPException(
status_code=403, detail=f"Track {index} presigned URL expired"
)
response.raise_for_status()
temp_path = os.path.join(temp_dir, f"track_{index}.webm")
total_bytes = 0
with open(temp_path, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
total_bytes += len(chunk)
logger.info(f"Track {index + 1} downloaded: {total_bytes} bytes")
return temp_path
def mixdown_tracks_modal(
track_paths: list[str],
output_path: str,
target_sample_rate: int,
expected_duration_sec: float | None,
logger,
) -> float:
"""Mix multiple audio tracks using PyAV filter graph.
Args:
track_paths: List of local file paths to audio tracks
output_path: Local path for output MP3 file
target_sample_rate: Sample rate for output (Hz)
expected_duration_sec: Optional fallback duration if container metadata unavailable
logger: Logger instance for progress tracking
Returns:
Duration in milliseconds
"""
logger.info(f"Starting mixdown of {len(track_paths)} tracks")
# Build PyAV filter graph: N abuffer -> amix -> aformat -> sink
graph = av.filter.Graph()
inputs = []
for idx in range(len(track_paths)):
args = (
f"time_base=1/{target_sample_rate}:"
f"sample_rate={target_sample_rate}:"
f"sample_fmt=s32:"
f"channel_layout=stereo"
)
in_ctx = graph.add("abuffer", args=args, name=f"in{idx}")
inputs.append(in_ctx)
mixer = graph.add("amix", args=f"inputs={len(inputs)}:normalize=0", name="mix")
fmt = graph.add(
"aformat",
args=f"sample_fmts=s32:channel_layouts=stereo:sample_rates={target_sample_rate}",
name="fmt",
)
sink = graph.add("abuffersink", name="out")
# Connect inputs to mixer (no delays for Modal implementation)
for idx, in_ctx in enumerate(inputs):
in_ctx.link_to(mixer, 0, idx)
mixer.link_to(fmt)
fmt.link_to(sink)
graph.configure()
# Open all containers
containers = []
try:
for i, path in enumerate(track_paths):
try:
c = av.open(path)
containers.append(c)
except Exception as e:
logger.warning(
f"Failed to open container {i}: {e}",
)
if not containers:
raise ValueError("Could not open any track containers")
# Calculate total duration for progress reporting
max_duration_sec = 0.0
for c in containers:
if c.duration is not None:
dur_sec = c.duration / av.time_base
max_duration_sec = max(max_duration_sec, dur_sec)
if max_duration_sec == 0.0 and expected_duration_sec:
max_duration_sec = expected_duration_sec
# Setup output container
out_container = av.open(output_path, "w", format="mp3")
out_stream = out_container.add_stream("libmp3lame", rate=target_sample_rate)
decoders = [c.decode(audio=0) for c in containers]
active = [True] * len(decoders)
resamplers = [
AudioResampler(format="s32", layout="stereo", rate=target_sample_rate)
for _ in decoders
]
current_max_time = 0.0
last_log_time = time.monotonic()
start_time = time.monotonic()
total_duration = 0
while any(active):
for i, (dec, is_active) in enumerate(zip(decoders, active)):
if not is_active:
continue
try:
frame = next(dec)
except StopIteration:
active[i] = False
inputs[i].push(None) # Signal end of stream
continue
if frame.sample_rate != target_sample_rate:
continue
# Progress logging (every 5 seconds)
if frame.time is not None:
current_max_time = max(current_max_time, frame.time)
now = time.monotonic()
if now - last_log_time >= 5.0:
elapsed = now - start_time
if max_duration_sec > 0:
progress_pct = min(
100.0, (current_max_time / max_duration_sec) * 100
)
logger.info(
f"Mixdown progress: {progress_pct:.1f}% @ {current_max_time:.1f}s (elapsed: {elapsed:.1f}s)"
)
else:
logger.info(
f"Mixdown progress: @ {current_max_time:.1f}s (elapsed: {elapsed:.1f}s)"
)
last_log_time = now
out_frames = resamplers[i].resample(frame) or []
for rf in out_frames:
rf.sample_rate = target_sample_rate
rf.time_base = Fraction(1, target_sample_rate)
inputs[i].push(rf)
# Pull mixed frames from sink and encode
while True:
try:
mixed = sink.pull()
except Exception:
break
mixed.sample_rate = target_sample_rate
mixed.time_base = Fraction(1, target_sample_rate)
# Encode and mux
for packet in out_stream.encode(mixed):
out_container.mux(packet)
total_duration += packet.duration
# Flush remaining frames from filter graph
while True:
try:
mixed = sink.pull()
except Exception:
break
mixed.sample_rate = target_sample_rate
mixed.time_base = Fraction(1, target_sample_rate)
for packet in out_stream.encode(mixed):
out_container.mux(packet)
total_duration += packet.duration
# Flush encoder
for packet in out_stream.encode():
out_container.mux(packet)
total_duration += packet.duration
# Calculate duration in milliseconds
if total_duration > 0:
# Use the same calculation as AudioFileWriterProcessor
duration_ms = round(
float(total_duration * out_stream.time_base * 1000), 2
)
else:
duration_ms = 0.0
out_container.close()
logger.info(f"Mixdown complete: duration={duration_ms}ms")
finally:
# Cleanup all containers
for c in containers:
if c is not None:
try:
c.close()
except Exception:
pass
return duration_ms
@app.post("/v1/audio/mixdown", dependencies=[Depends(apikey_auth)])
def mixdown(request: MixdownRequest) -> MixdownResponse:
"""Mix multiple audio tracks into a single MP3 file.
Tracks are downloaded from presigned S3 URLs, mixed using PyAV,
and uploaded to a presigned S3 PUT URL.
"""
if not request.track_urls:
raise HTTPException(status_code=400, detail="No track URLs provided")
logger.info(f"Mixdown request: {len(request.track_urls)} tracks")
temp_dir = tempfile.mkdtemp()
temp_files = []
output_mp3_path = None
try:
# Download all tracks
for i, url in enumerate(request.track_urls):
temp_path = download_track(url, temp_dir, i)
temp_files.append(temp_path)
# Mix tracks
output_mp3_path = os.path.join(temp_dir, "mixed.mp3")
duration_ms = mixdown_tracks_modal(
temp_files,
output_mp3_path,
request.target_sample_rate,
request.expected_duration_sec,
logger,
)
# Upload result to S3
logger.info("Uploading result to S3")
file_size = os.path.getsize(output_mp3_path)
with open(output_mp3_path, "rb") as f:
upload_response = requests.put(
request.output_url, data=f, timeout=300
)
if upload_response.status_code == 403:
raise HTTPException(
status_code=403, detail="Output presigned URL expired"
)
upload_response.raise_for_status()
logger.info(f"Upload complete: {file_size} bytes")
return MixdownResponse(
duration_ms=duration_ms,
tracks_mixed=len(request.track_urls),
audio_uploaded=True,
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Mixdown failed: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Mixdown failed: {str(e)}")
finally:
# Cleanup temp files
for temp_path in temp_files:
try:
os.unlink(temp_path)
except Exception as e:
logger.warning(f"Failed to cleanup temp file {temp_path}: {e}")
if output_mp3_path and os.path.exists(output_mp3_path):
try:
os.unlink(output_mp3_path)
except Exception as e:
logger.warning(f"Failed to cleanup output file {output_mp3_path}: {e}")
try:
shutil.rmtree(temp_dir)
except Exception as e:
logger.warning(f"Failed to cleanup temp directory {temp_dir}: {e}")
return app

View File

@@ -1,77 +0,0 @@
"""
Run Hatchet workers for the multitrack pipeline.
Runs as a separate process, just like Celery workers.
Usage:
uv run -m reflector.hatchet.run_workers
# Or via docker:
docker compose exec server uv run -m reflector.hatchet.run_workers
"""
import signal
import sys
from hatchet_sdk.rate_limit import RateLimitDuration
from reflector.hatchet.constants import LLM_RATE_LIMIT_KEY, LLM_RATE_LIMIT_PER_SECOND
from reflector.logger import logger
from reflector.settings import settings
def main() -> None:
"""Start Hatchet worker polling."""
if not settings.HATCHET_ENABLED:
logger.error("HATCHET_ENABLED is False, not starting workers")
sys.exit(1)
if not settings.HATCHET_CLIENT_TOKEN:
logger.error("HATCHET_CLIENT_TOKEN is not set")
sys.exit(1)
logger.info(
"Starting Hatchet workers",
debug=settings.HATCHET_DEBUG,
)
# Import here (not top-level) - workflow modules call HatchetClientManager.get_client()
# at module level because Hatchet SDK decorators (@workflow.task) bind at import time.
# Can't use lazy init: decorators need the client object when function is defined.
from reflector.hatchet.client import HatchetClientManager # noqa: PLC0415
from reflector.hatchet.workflows import ( # noqa: PLC0415
daily_multitrack_pipeline,
subject_workflow,
topic_chunk_workflow,
track_workflow,
)
hatchet = HatchetClientManager.get_client()
hatchet.rate_limits.put(
LLM_RATE_LIMIT_KEY, LLM_RATE_LIMIT_PER_SECOND, RateLimitDuration.SECOND
)
worker = hatchet.worker(
"reflector-pipeline-worker",
workflows=[
daily_multitrack_pipeline,
subject_workflow,
topic_chunk_workflow,
track_workflow,
],
)
def shutdown_handler(signum: int, frame) -> None:
logger.info("Received shutdown signal, stopping workers...")
# Worker cleanup happens automatically on exit
sys.exit(0)
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
logger.info("Starting Hatchet worker polling...")
worker.start()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,48 @@
"""
CPU-heavy worker pool for audio processing tasks.
Handles ONLY: mixdown_tracks
Configuration:
- slots=1: Only mixdown (already serialized globally with max_runs=1)
- Worker affinity: pool=cpu-heavy
"""
from reflector.hatchet.client import HatchetClientManager
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
daily_multitrack_pipeline,
)
from reflector.logger import logger
from reflector.settings import settings
def main():
if not settings.HATCHET_ENABLED:
logger.error("HATCHET_ENABLED is False, not starting CPU workers")
return
hatchet = HatchetClientManager.get_client()
logger.info(
"Starting Hatchet CPU worker pool (mixdown only)",
worker_name="cpu-worker-pool",
slots=1,
labels={"pool": "cpu-heavy"},
)
cpu_worker = hatchet.worker(
"cpu-worker-pool",
slots=1, # Only 1 mixdown at a time (already serialized globally)
labels={
"pool": "cpu-heavy",
},
workflows=[daily_multitrack_pipeline],
)
try:
cpu_worker.start()
except KeyboardInterrupt:
logger.info("Received shutdown signal, stopping CPU workers...")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,56 @@
"""
LLM/I/O worker pool for all non-CPU tasks.
Handles: all tasks except mixdown_tracks (transcription, LLM inference, orchestration)
"""
from reflector.hatchet.client import HatchetClientManager
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
daily_multitrack_pipeline,
)
from reflector.hatchet.workflows.subject_processing import subject_workflow
from reflector.hatchet.workflows.topic_chunk_processing import topic_chunk_workflow
from reflector.hatchet.workflows.track_processing import track_workflow
from reflector.logger import logger
from reflector.settings import settings
SLOTS = 10
WORKER_NAME = "llm-worker-pool"
POOL = "llm-io"
def main():
if not settings.HATCHET_ENABLED:
logger.error("HATCHET_ENABLED is False, not starting LLM workers")
return
hatchet = HatchetClientManager.get_client()
logger.info(
"Starting Hatchet LLM worker pool (all tasks except mixdown)",
worker_name=WORKER_NAME,
slots=SLOTS,
labels={"pool": POOL},
)
llm_worker = hatchet.worker(
WORKER_NAME,
slots=SLOTS, # not all slots are probably used
labels={
"pool": POOL,
},
workflows=[
daily_multitrack_pipeline,
topic_chunk_workflow,
subject_workflow,
track_workflow,
],
)
try:
llm_worker.start()
except KeyboardInterrupt:
logger.info("Received shutdown signal, stopping LLM workers...")
if __name__ == "__main__":
main()

View File

@@ -23,7 +23,12 @@ from pathlib import Path
from typing import Any, Callable, Coroutine, Protocol, TypeVar from typing import Any, Callable, Coroutine, Protocol, TypeVar
import httpx import httpx
from hatchet_sdk import Context from hatchet_sdk import (
ConcurrencyExpression,
ConcurrencyLimitStrategy,
Context,
)
from hatchet_sdk.labels import DesiredWorkerLabel
from pydantic import BaseModel from pydantic import BaseModel
from reflector.dailyco_api.client import DailyApiClient from reflector.dailyco_api.client import DailyApiClient
@@ -467,10 +472,24 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
parents=[process_tracks], parents=[process_tracks],
execution_timeout=timedelta(seconds=TIMEOUT_AUDIO), execution_timeout=timedelta(seconds=TIMEOUT_AUDIO),
retries=3, retries=3,
desired_worker_labels={
"pool": DesiredWorkerLabel(
value="cpu-heavy",
required=True,
weight=100,
),
},
concurrency=[
ConcurrencyExpression(
expression="'mixdown-global'",
max_runs=1, # serialize mixdown to prevent resource contention
limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN, # Queue
)
],
) )
@with_error_handling(TaskName.MIXDOWN_TRACKS) @with_error_handling(TaskName.MIXDOWN_TRACKS)
async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult: async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
"""Mix all padded tracks into single audio file using PyAV (same as Celery).""" """Mix all padded tracks into single audio file using PyAV or Modal backend."""
ctx.log("mixdown_tracks: mixing padded tracks into single audio file") ctx.log("mixdown_tracks: mixing padded tracks into single audio file")
track_result = ctx.task_output(process_tracks) track_result = ctx.task_output(process_tracks)
@@ -494,7 +513,7 @@ async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
storage = _spawn_storage() storage = _spawn_storage()
# Presign URLs on demand (avoids stale URLs on workflow replay) # Presign URLs for padded tracks (same expiration for both backends)
padded_urls = [] padded_urls = []
for track_info in padded_tracks: for track_info in padded_tracks:
if track_info.key: if track_info.key:
@@ -515,13 +534,79 @@ async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
logger.error("Mixdown failed - no decodable audio frames found") logger.error("Mixdown failed - no decodable audio frames found")
raise ValueError("No decodable audio frames in any track") raise ValueError("No decodable audio frames in any track")
output_key = f"{input.transcript_id}/audio.mp3"
# Conditional: Modal or local backend
if settings.MIXDOWN_BACKEND == "modal":
ctx.log("mixdown_tracks: using Modal backend")
# Presign PUT URL for output (Modal will upload directly)
output_url = await storage.get_file_url(
output_key,
operation="put_object",
expires_in=PRESIGNED_URL_EXPIRATION_SECONDS,
)
from reflector.processors.audio_mixdown_modal import ( # noqa: PLC0415
AudioMixdownModalProcessor,
)
try:
processor = AudioMixdownModalProcessor()
result = await processor.mixdown(
track_urls=valid_urls,
output_url=output_url,
target_sample_rate=target_sample_rate,
expected_duration_sec=recording_duration
if recording_duration > 0
else None,
)
duration_ms = result.duration_ms
tracks_mixed = result.tracks_mixed
ctx.log(
f"mixdown_tracks: Modal returned duration={duration_ms}ms, tracks={tracks_mixed}"
)
except httpx.HTTPStatusError as e:
error_detail = e.response.text if hasattr(e.response, "text") else str(e)
logger.error(
"[Hatchet] Modal mixdown HTTP error",
transcript_id=input.transcript_id,
status_code=e.response.status_code if hasattr(e, "response") else None,
error=error_detail,
)
raise RuntimeError(
f"Modal mixdown failed with HTTP {e.response.status_code}: {error_detail}"
)
except httpx.TimeoutException:
logger.error(
"[Hatchet] Modal mixdown timeout",
transcript_id=input.transcript_id,
timeout=settings.MIXDOWN_TIMEOUT,
)
raise RuntimeError(
f"Modal mixdown timeout after {settings.MIXDOWN_TIMEOUT}s"
)
except ValueError as e:
logger.error(
"[Hatchet] Modal mixdown validation error",
transcript_id=input.transcript_id,
error=str(e),
)
raise
else:
ctx.log("mixdown_tracks: using local backend")
# Existing local implementation
output_path = tempfile.mktemp(suffix=".mp3") output_path = tempfile.mktemp(suffix=".mp3")
duration_ms_callback_capture_container = [0.0] duration_ms_callback_capture_container = [0.0]
async def capture_duration(d): async def capture_duration(d):
duration_ms_callback_capture_container[0] = d duration_ms_callback_capture_container[0] = d
writer = AudioFileWriterProcessor(path=output_path, on_duration=capture_duration) writer = AudioFileWriterProcessor(
path=output_path, on_duration=capture_duration
)
await mixdown_tracks_pyav( await mixdown_tracks_pyav(
valid_urls, valid_urls,
@@ -530,18 +615,23 @@ async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
offsets_seconds=None, offsets_seconds=None,
logger=logger, logger=logger,
progress_callback=make_audio_progress_logger(ctx, TaskName.MIXDOWN_TRACKS), progress_callback=make_audio_progress_logger(ctx, TaskName.MIXDOWN_TRACKS),
expected_duration_sec=recording_duration if recording_duration > 0 else None, expected_duration_sec=recording_duration
if recording_duration > 0
else None,
) )
await writer.flush() await writer.flush()
file_size = Path(output_path).stat().st_size file_size = Path(output_path).stat().st_size
storage_path = f"{input.transcript_id}/audio.mp3"
with open(output_path, "rb") as mixed_file: with open(output_path, "rb") as mixed_file:
await storage.put_file(storage_path, mixed_file) await storage.put_file(output_key, mixed_file)
Path(output_path).unlink(missing_ok=True) Path(output_path).unlink(missing_ok=True)
duration_ms = duration_ms_callback_capture_container[0]
tracks_mixed = len(valid_urls)
ctx.log(f"mixdown_tracks: local mixdown uploaded {file_size} bytes")
# Update DB (same for both backends)
async with fresh_db_connection(): async with fresh_db_connection():
from reflector.db.transcripts import transcripts_controller # noqa: PLC0415 from reflector.db.transcripts import transcripts_controller # noqa: PLC0415
@@ -551,12 +641,12 @@ async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:
transcript, {"audio_location": "storage"} transcript, {"audio_location": "storage"}
) )
ctx.log(f"mixdown_tracks complete: uploaded {file_size} bytes to {storage_path}") ctx.log(f"mixdown_tracks complete: uploaded to {output_key}")
return MixdownResult( return MixdownResult(
audio_key=storage_path, audio_key=output_key,
duration=duration_ms_callback_capture_container[0], duration=duration_ms,
tracks_mixed=len(valid_urls), tracks_mixed=tracks_mixed,
) )

View File

@@ -7,7 +7,11 @@ Spawned dynamically by detect_topics via aio_run_many() for parallel processing.
from datetime import timedelta from datetime import timedelta
from hatchet_sdk import ConcurrencyExpression, ConcurrencyLimitStrategy, Context from hatchet_sdk import (
ConcurrencyExpression,
ConcurrencyLimitStrategy,
Context,
)
from hatchet_sdk.rate_limit import RateLimit from hatchet_sdk.rate_limit import RateLimit
from pydantic import BaseModel from pydantic import BaseModel
@@ -34,11 +38,13 @@ hatchet = HatchetClientManager.get_client()
topic_chunk_workflow = hatchet.workflow( topic_chunk_workflow = hatchet.workflow(
name="TopicChunkProcessing", name="TopicChunkProcessing",
input_validator=TopicChunkInput, input_validator=TopicChunkInput,
concurrency=ConcurrencyExpression( concurrency=[
ConcurrencyExpression(
expression="'global'", # constant string = global limit across all runs expression="'global'", # constant string = global limit across all runs
max_runs=20, max_runs=20,
limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN, limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,
), )
],
) )

View File

@@ -0,0 +1,89 @@
"""
Modal.com backend for audio mixdown.
Uses Modal's CPU containers to offload audio mixing from Hatchet workers.
Communicates via presigned S3 URLs for both input and output.
"""
import httpx
from pydantic import BaseModel
from reflector.settings import settings
class MixdownResponse(BaseModel):
"""Response from Modal mixdown endpoint."""
duration_ms: float
tracks_mixed: int
audio_uploaded: bool
class AudioMixdownModalProcessor:
"""Audio mixdown processor using Modal.com CPU backend.
Sends track URLs (presigned GET) and output URL (presigned PUT) to Modal.
Modal handles download, mixdown via PyAV, and upload.
"""
def __init__(self, modal_api_key: str | None = None):
if not settings.MIXDOWN_URL:
raise ValueError("MIXDOWN_URL required to use AudioMixdownModalProcessor")
self.mixdown_url = settings.MIXDOWN_URL + "/v1"
self.timeout = settings.MIXDOWN_TIMEOUT
self.modal_api_key = modal_api_key or settings.MIXDOWN_MODAL_API_KEY
if not self.modal_api_key:
raise ValueError(
"MIXDOWN_MODAL_API_KEY required to use AudioMixdownModalProcessor"
)
async def mixdown(
self,
track_urls: list[str],
output_url: str,
target_sample_rate: int,
expected_duration_sec: float | None = None,
) -> MixdownResponse:
"""Mix multiple audio tracks via Modal backend.
Args:
track_urls: List of presigned GET URLs for audio tracks (non-empty)
output_url: Presigned PUT URL for output MP3
target_sample_rate: Sample rate for output (Hz, must be positive)
expected_duration_sec: Optional fallback duration if container metadata unavailable
Returns:
MixdownResponse with duration_ms, tracks_mixed, audio_uploaded
Raises:
ValueError: If track_urls is empty or target_sample_rate invalid
httpx.HTTPStatusError: On HTTP errors (404, 403, 500, etc.)
httpx.TimeoutException: On timeout
"""
# Validate inputs
if not track_urls:
raise ValueError("track_urls cannot be empty")
if target_sample_rate <= 0:
raise ValueError(
f"target_sample_rate must be positive, got {target_sample_rate}"
)
if expected_duration_sec is not None and expected_duration_sec < 0:
raise ValueError(
f"expected_duration_sec cannot be negative, got {expected_duration_sec}"
)
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.post(
f"{self.mixdown_url}/audio/mixdown",
headers={"Authorization": f"Bearer {self.modal_api_key}"},
json={
"track_urls": track_urls,
"output_url": output_url,
"target_sample_rate": target_sample_rate,
"expected_duration_sec": expected_duration_sec,
},
)
response.raise_for_status()
return MixdownResponse(**response.json())

View File

@@ -98,6 +98,17 @@ class Settings(BaseSettings):
# Diarization: local pyannote.audio # Diarization: local pyannote.audio
DIARIZATION_PYANNOTE_AUTH_TOKEN: str | None = None DIARIZATION_PYANNOTE_AUTH_TOKEN: str | None = None
# Audio Mixdown
# backends:
# - local: in-process PyAV mixdown (runs in same process as Hatchet worker)
# - modal: HTTP API client to Modal.com CPU container
MIXDOWN_BACKEND: str = "local"
MIXDOWN_URL: str | None = None
MIXDOWN_TIMEOUT: int = 900 # 15 minutes
# Mixdown: modal backend
MIXDOWN_MODAL_API_KEY: str | None = None
# Sentry # Sentry
SENTRY_DSN: str | None = None SENTRY_DSN: str | None = None

View File

@@ -7,8 +7,10 @@ elif [ "${ENTRYPOINT}" = "worker" ]; then
uv run celery -A reflector.worker.app worker --loglevel=info uv run celery -A reflector.worker.app worker --loglevel=info
elif [ "${ENTRYPOINT}" = "beat" ]; then elif [ "${ENTRYPOINT}" = "beat" ]; then
uv run celery -A reflector.worker.app beat --loglevel=info uv run celery -A reflector.worker.app beat --loglevel=info
elif [ "${ENTRYPOINT}" = "hatchet-worker" ]; then elif [ "${ENTRYPOINT}" = "hatchet-worker-cpu" ]; then
uv run python -m reflector.hatchet.run_workers uv run python -m reflector.hatchet.run_workers_cpu
elif [ "${ENTRYPOINT}" = "hatchet-worker-llm" ]; then
uv run python -m reflector.hatchet.run_workers_llm
else else
echo "Unknown command" echo "Unknown command"
fi fi

View File

@@ -1,12 +1,12 @@
import React, { useState, useEffect } from "react"; import React, { useState, useEffect } from "react";
import ScrollToBottom from "../../scrollToBottom"; import ScrollToBottom from "../../scrollToBottom";
import { Topic } from "../../webSocketTypes"; import { Topic } from "../../webSocketTypes";
import { Box, Flex, Text } from "@chakra-ui/react"; import useParticipants from "../../useParticipants";
import { formatTime } from "../../../../lib/time"; import { Box, Flex, Text, Accordion } from "@chakra-ui/react";
import { getTopicColor } from "../../../../lib/topicColors"; import { TopicItem } from "./TopicItem";
import { TranscriptStatus } from "../../../../lib/transcript"; import { TranscriptStatus } from "../../../../lib/transcript";
import { featureEnabled } from "../../../../lib/features"; import { featureEnabled } from "../../../../lib/features";
import { TOPICS_SCROLL_DIV_ID } from "./constants";
type TopicListProps = { type TopicListProps = {
topics: Topic[]; topics: Topic[];
@@ -18,7 +18,6 @@ type TopicListProps = {
transcriptId: string; transcriptId: string;
status: TranscriptStatus | null; status: TranscriptStatus | null;
currentTranscriptText: any; currentTranscriptText: any;
onTopicClick?: (topicId: string) => void;
}; };
export function TopicList({ export function TopicList({
@@ -28,13 +27,30 @@ export function TopicList({
transcriptId, transcriptId,
status, status,
currentTranscriptText, currentTranscriptText,
onTopicClick,
}: TopicListProps) { }: TopicListProps) {
const [activeTopic, setActiveTopic] = useActiveTopic; const [activeTopic, setActiveTopic] = useActiveTopic;
const [hoveredTopicId, setHoveredTopicId] = useState<string | null>(null);
const [autoscrollEnabled, setAutoscrollEnabled] = useState<boolean>(true); const [autoscrollEnabled, setAutoscrollEnabled] = useState<boolean>(true);
const participants = useParticipants(transcriptId);
const toggleScroll = (element: HTMLElement) => { const scrollToTopic = () => {
const topicDiv = document.getElementById(`topic-${activeTopic?.id}`);
setTimeout(() => {
topicDiv?.scrollIntoView({
behavior: "smooth",
block: "start",
inline: "nearest",
});
}, 200);
};
useEffect(() => {
if (activeTopic && autoscroll) scrollToTopic();
}, [activeTopic, autoscroll]);
// scroll top is not rounded, heights are, so exact match won't work.
// https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollHeight#determine_if_an_element_has_been_totally_scrolled
const toggleScroll = (element) => {
const bottom = const bottom =
Math.abs( Math.abs(
element.scrollHeight - element.clientHeight - element.scrollTop, element.scrollHeight - element.clientHeight - element.scrollTop,
@@ -45,19 +61,14 @@ export function TopicList({
setAutoscrollEnabled(true); setAutoscrollEnabled(true);
} }
}; };
const handleScroll = (e) => {
const handleScroll = (e: React.UIEvent<HTMLDivElement>) => { toggleScroll(e.target);
toggleScroll(e.target as HTMLElement);
};
const scrollToBottom = () => {
const topicsDiv = document.getElementById(TOPICS_SCROLL_DIV_ID);
if (topicsDiv) topicsDiv.scrollTop = topicsDiv.scrollHeight;
}; };
useEffect(() => { useEffect(() => {
if (autoscroll) { if (autoscroll) {
const topicsDiv = document.getElementById(TOPICS_SCROLL_DIV_ID); const topicsDiv = document.getElementById("scroll-div");
topicsDiv && toggleScroll(topicsDiv); topicsDiv && toggleScroll(topicsDiv);
} }
}, [activeTopic, autoscroll]); }, [activeTopic, autoscroll]);
@@ -66,41 +77,37 @@ export function TopicList({
if (autoscroll && autoscrollEnabled) scrollToBottom(); if (autoscroll && autoscrollEnabled) scrollToBottom();
}, [topics.length, currentTranscriptText]); }, [topics.length, currentTranscriptText]);
const scrollToBottom = () => {
const topicsDiv = document.getElementById("scroll-div");
if (topicsDiv) topicsDiv.scrollTop = topicsDiv.scrollHeight;
};
const getSpeakerName = (speakerNumber: number) => {
if (!participants.response) return;
return (
participants.response.find(
(participant) => participant.speaker == speakerNumber,
)?.name || `Speaker ${speakerNumber}`
);
};
const requireLogin = featureEnabled("requireLogin");
useEffect(() => { useEffect(() => {
if (autoscroll) { if (autoscroll) {
setActiveTopic(topics[topics.length - 1]); setActiveTopic(topics[topics.length - 1]);
} }
}, [topics, autoscroll]); }, [topics, autoscroll]);
const handleTopicClick = (topic: Topic) => {
setActiveTopic(topic);
if (onTopicClick) {
onTopicClick(topic.id);
}
};
const handleTopicMouseEnter = (topic: Topic) => {
setHoveredTopicId(topic.id);
// If already active, toggle off when mousing over
if (activeTopic?.id === topic.id) {
setActiveTopic(null);
} else {
setActiveTopic(topic);
}
};
const handleTopicMouseLeave = () => {
setHoveredTopicId(null);
};
const requireLogin = featureEnabled("requireLogin");
return ( return (
<Flex <Flex
position="relative" position={"relative"}
w="full" w={"100%"}
h="200px" h={"95%"}
flexDirection="column" flexDirection={"column"}
justify={"center"}
align={"center"}
flexShrink={0} flexShrink={0}
> >
{autoscroll && ( {autoscroll && (
@@ -111,71 +118,45 @@ export function TopicList({
)} )}
<Box <Box
id={TOPICS_SCROLL_DIV_ID} id="scroll-div"
overflowY="auto" overflowY={"auto"}
h="full" h={"100%"}
onScroll={handleScroll} onScroll={handleScroll}
width="full" width="full"
> >
{topics.length > 0 && ( {topics.length > 0 && (
<Flex direction="column" gap={1} p={2}> <Accordion.Root
{topics.map((topic, index) => { multiple={false}
const color = getTopicColor(index); collapsible={true}
const isActive = activeTopic?.id === topic.id; value={activeTopic ? [activeTopic.id] : []}
const isHovered = hoveredTopicId === topic.id; onValueChange={(details) => {
const selectedTopicId = details.value[0];
return ( const selectedTopic = selectedTopicId
<Flex ? topics.find((t) => t.id === selectedTopicId)
: null;
setActiveTopic(selectedTopic || null);
}}
>
{topics.map((topic) => (
<TopicItem
key={topic.id} key={topic.id}
id={`topic-${topic.id}`} topic={topic}
gap={2} isActive={activeTopic?.id === topic.id}
align="center" getSpeakerName={getSpeakerName}
py={1}
px={2}
cursor="pointer"
bg={isActive || isHovered ? "gray.100" : "transparent"}
_hover={{ bg: "gray.50" }}
onClick={() => handleTopicClick(topic)}
onMouseEnter={() => handleTopicMouseEnter(topic)}
onMouseLeave={handleTopicMouseLeave}
>
{/* Color indicator */}
<Box
w="12px"
h="12px"
borderRadius="full"
bg={color}
flexShrink={0}
/> />
))}
{/* Topic title */} </Accordion.Root>
<Text
flex={1}
fontSize="sm"
fontWeight={isActive ? "semibold" : "normal"}
>
{topic.title}
</Text>
{/* Timestamp */}
<Text as="span" color="gray.500" fontSize="xs" flexShrink={0}>
{formatTime(topic.timestamp)}
</Text>
</Flex>
);
})}
</Flex>
)} )}
{status == "recording" && ( {status == "recording" && (
<Box textAlign="center"> <Box textAlign={"center"}>
<Text>{currentTranscriptText}</Text> <Text>{currentTranscriptText}</Text>
</Box> </Box>
)} )}
{(status == "recording" || status == "idle") && {(status == "recording" || status == "idle") &&
currentTranscriptText.length == 0 && currentTranscriptText.length == 0 &&
topics.length == 0 && ( topics.length == 0 && (
<Box textAlign="center" color="gray"> <Box textAlign={"center"} color="gray">
<Text> <Text>
Full discussion transcript will appear here after you start Full discussion transcript will appear here after you start
recording. recording.
@@ -186,7 +167,7 @@ export function TopicList({
</Box> </Box>
)} )}
{status == "processing" && ( {status == "processing" && (
<Box textAlign="center" color="gray"> <Box textAlign={"center"} color="gray">
<Text>We are processing the recording, please wait.</Text> <Text>We are processing the recording, please wait.</Text>
{!requireLogin && ( {!requireLogin && (
<span> <span>
@@ -196,12 +177,12 @@ export function TopicList({
</Box> </Box>
)} )}
{status == "ended" && topics.length == 0 && ( {status == "ended" && topics.length == 0 && (
<Box textAlign="center" color="gray"> <Box textAlign={"center"} color="gray">
<Text>Recording has ended without topics being found.</Text> <Text>Recording has ended without topics being found.</Text>
</Box> </Box>
)} )}
{status == "error" && ( {status == "error" && (
<Box textAlign="center" color="gray"> <Box textAlign={"center"} color="gray">
<Text>There was an error processing your recording</Text> <Text>There was an error processing your recording</Text>
</Box> </Box>
)} )}

View File

@@ -1,106 +0,0 @@
import { Box, Text, IconButton } from "@chakra-ui/react";
import { ChevronUp } from "lucide-react";
import { Topic } from "../../webSocketTypes";
import { getTopicColor } from "../../../../lib/topicColors";
import { TOPICS_SCROLL_DIV_ID } from "./constants";
interface TranscriptWithGutterProps {
topics: Topic[];
getSpeakerName: (speakerNumber: number) => string | undefined;
onGutterClick: (topicId: string) => void;
}
export function TranscriptWithGutter({
topics,
getSpeakerName,
onGutterClick,
}: TranscriptWithGutterProps) {
const scrollToTopics = () => {
// Scroll to the topic list at the top
const topicList = document.getElementById(TOPICS_SCROLL_DIV_ID);
if (topicList) {
topicList.scrollIntoView({
behavior: "smooth",
block: "start",
});
}
};
return (
<Box>
{topics.map((topic, topicIndex) => {
const color = getTopicColor(topicIndex);
return (
<Box key={topic.id} position="relative">
{/* Topic Header with Up Button */}
<Box
py={3}
px={4}
fontWeight="semibold"
fontSize="lg"
display="flex"
alignItems="center"
justifyContent="space-between"
>
<Text>{topic.title}</Text>
<IconButton
aria-label="Scroll to topics"
size="sm"
variant="ghost"
onClick={scrollToTopics}
>
<ChevronUp size={16} />
</IconButton>
</Box>
{/* Segments container with single gutter */}
<Box position="relative">
{/* Single continuous gutter for entire topic */}
<Box
className="topic-gutter"
position="absolute"
left={0}
top={0}
bottom={0}
width="4px"
bg={color}
cursor="pointer"
transition="all 0.2s"
_hover={{
filter: "brightness(1.2)",
width: "6px",
}}
onClick={() => onGutterClick(topic.id)}
/>
{/* Segments */}
{topic.segments?.map((segment, segmentIndex) => (
<Box
key={segmentIndex}
id={`segment-${topic.id}-${segmentIndex}`}
py={2}
px={4}
pl={12}
_hover={{
bg: "gray.50",
}}
>
{/* Segment Content */}
<Text fontSize="sm">
<Text as="span" fontWeight="semibold" color="gray.700">
{getSpeakerName(segment.speaker) ||
`Speaker ${segment.speaker}`}
:
</Text>{" "}
{segment.text}
</Text>
</Box>
))}
</Box>
</Box>
);
})}
</Box>
);
}

View File

@@ -1 +0,0 @@
export const TOPICS_SCROLL_DIV_ID = "topics-scroll-div";

View File

@@ -3,9 +3,7 @@ import Modal from "../modal";
import useTopics from "../useTopics"; import useTopics from "../useTopics";
import useWaveform from "../useWaveform"; import useWaveform from "../useWaveform";
import useMp3 from "../useMp3"; import useMp3 from "../useMp3";
import useParticipants from "../useParticipants";
import { TopicList } from "./_components/TopicList"; import { TopicList } from "./_components/TopicList";
import { TranscriptWithGutter } from "./_components/TranscriptWithGutter";
import { Topic } from "../webSocketTypes"; import { Topic } from "../webSocketTypes";
import React, { useEffect, useState, use } from "react"; import React, { useEffect, useState, use } from "react";
import FinalSummary from "./finalSummary"; import FinalSummary from "./finalSummary";
@@ -47,91 +45,14 @@ export default function TranscriptDetails(details: TranscriptDetails) {
const mp3 = useMp3(transcriptId, waiting); const mp3 = useMp3(transcriptId, waiting);
const topics = useTopics(transcriptId); const topics = useTopics(transcriptId);
const participants = useParticipants(transcriptId);
const waveform = useWaveform( const waveform = useWaveform(
transcriptId, transcriptId,
waiting || mp3.audioDeleted === true, waiting || mp3.audioDeleted === true,
); );
const useActiveTopic = useState<Topic | null>(null); const useActiveTopic = useState<Topic | null>(null);
const [activeTopic, setActiveTopic] = useActiveTopic;
const [finalSummaryElement, setFinalSummaryElement] = const [finalSummaryElement, setFinalSummaryElement] =
useState<HTMLDivElement | null>(null); useState<HTMLDivElement | null>(null);
// IntersectionObserver for active topic detection based on scroll position
useEffect(() => {
if (!topics.topics || topics.topics.length === 0) return;
const observer = new IntersectionObserver(
(entries) => {
// Find the most visible segment
let mostVisibleEntry: IntersectionObserverEntry | null = null;
let maxRatio = 0;
entries.forEach((entry) => {
if (entry.isIntersecting && entry.intersectionRatio > maxRatio) {
maxRatio = entry.intersectionRatio;
mostVisibleEntry = entry;
}
});
if (mostVisibleEntry) {
// Extract topicId from segment id (format: "segment-{topicId}-{idx}")
const segmentId = mostVisibleEntry.target.id;
const match = segmentId.match(/^segment-([^-]+)-/);
if (match) {
const topicId = match[1];
const topic = topics.topics?.find((t) => t.id === topicId);
if (topic && activeTopic?.id !== topic.id) {
setActiveTopic(topic);
}
}
}
},
{
threshold: [0, 0.25, 0.5, 0.75, 1],
rootMargin: "-20% 0px -20% 0px",
},
);
// Observe all segment elements
const segments = document.querySelectorAll('[id^="segment-"]');
segments.forEach((segment) => observer.observe(segment));
return () => observer.disconnect();
}, [topics.topics, activeTopic?.id, setActiveTopic]);
// Scroll handlers for bidirectional navigation
const handleTopicClick = (topicId: string) => {
// Scroll to first segment of this topic in transcript
const firstSegment = document.querySelector(`[id^="segment-${topicId}-"]`);
if (firstSegment) {
firstSegment.scrollIntoView({
behavior: "smooth",
block: "center",
});
}
};
const handleGutterClick = (topicId: string) => {
// Scroll to topic in list
const topicChip = document.getElementById(`topic-${topicId}`);
if (topicChip) {
topicChip.scrollIntoView({
behavior: "smooth",
block: "center",
});
}
};
const getSpeakerName = (speakerNumber: number) => {
if (!participants.response) return `Speaker ${speakerNumber}`;
return (
participants.response.find(
(participant) => participant.speaker == speakerNumber,
)?.name || `Speaker ${speakerNumber}`
);
};
useEffect(() => { useEffect(() => {
if (!waiting || !transcript.data) return; if (!waiting || !transcript.data) return;
@@ -200,7 +121,7 @@ export default function TranscriptDetails(details: TranscriptDetails) {
<> <>
<Grid <Grid
templateColumns="1fr" templateColumns="1fr"
templateRows="auto auto" templateRows="auto minmax(0, 1fr)"
gap={4} gap={4}
mt={4} mt={4}
mb={4} mb={4}
@@ -232,18 +153,18 @@ export default function TranscriptDetails(details: TranscriptDetails) {
<Grid <Grid
templateColumns={{ base: "minmax(0, 1fr)", md: "repeat(2, 1fr)" }} templateColumns={{ base: "minmax(0, 1fr)", md: "repeat(2, 1fr)" }}
templateRows={{ templateRows={{
base: "auto auto auto", base: "auto minmax(0, 1fr) minmax(0, 1fr)",
md: "auto auto", md: "auto minmax(0, 1fr)",
}} }}
gap={4} gap={4}
gridRowGap={2} gridRowGap={2}
padding={4} padding={4}
paddingBottom={0}
background="gray.bg" background="gray.bg"
border={"2px solid"} border={"2px solid"}
borderColor={"gray.bg"} borderColor={"gray.bg"}
borderRadius={8} borderRadius={8}
> >
{/* Title */}
<GridItem colSpan={{ base: 1, md: 2 }}> <GridItem colSpan={{ base: 1, md: 2 }}>
<Flex direction="column" gap={0}> <Flex direction="column" gap={0}>
<Flex alignItems="center" gap={2}> <Flex alignItems="center" gap={2}>
@@ -266,9 +187,6 @@ export default function TranscriptDetails(details: TranscriptDetails) {
)} )}
</Flex> </Flex>
</GridItem> </GridItem>
{/* Left column: Topics List */}
<GridItem display="flex" flexDirection="column" gap={4} h="100%">
<TopicList <TopicList
topics={topics.topics || []} topics={topics.topics || []}
useActiveTopic={useActiveTopic} useActiveTopic={useActiveTopic}
@@ -276,43 +194,9 @@ export default function TranscriptDetails(details: TranscriptDetails) {
transcriptId={transcriptId} transcriptId={transcriptId}
status={transcript.data?.status || null} status={transcript.data?.status || null}
currentTranscriptText="" currentTranscriptText=""
onTopicClick={handleTopicClick}
/> />
{/* Transcript with colored gutter (scrollable) */}
{topics.topics && topics.topics.length > 0 && (
<Box
overflowY="auto"
flex={1}
minH="0"
pr={2}
css={{
"&::-webkit-scrollbar": {
width: "8px",
},
"&::-webkit-scrollbar-track": {
background: "transparent",
},
"&::-webkit-scrollbar-thumb": {
background: "#CBD5E0",
borderRadius: "4px",
},
"&::-webkit-scrollbar-thumb:hover": {
background: "#A0AEC0",
},
}}
>
<TranscriptWithGutter
topics={topics.topics}
getSpeakerName={getSpeakerName}
onGutterClick={handleGutterClick}
/>
</Box>
)}
</GridItem>
{/* Right column: Final Summary */}
{transcript.data && topics.topics ? ( {transcript.data && topics.topics ? (
<>
<FinalSummary <FinalSummary
transcript={transcript.data} transcript={transcript.data}
topics={topics.topics} topics={topics.topics}
@@ -321,9 +205,10 @@ export default function TranscriptDetails(details: TranscriptDetails) {
}} }}
finalSummaryRef={setFinalSummaryElement} finalSummaryRef={setFinalSummaryElement}
/> />
</>
) : ( ) : (
<Flex justify="center" alignItems="center" h="100%"> <Flex justify={"center"} alignItems={"center"} h={"100%"}>
<Flex direction="column" h="full" justify="center" align="center"> <div className="flex flex-col h-full justify-center content-center">
{transcript?.data?.status == "processing" ? ( {transcript?.data?.status == "processing" ? (
<Text>Loading Transcript</Text> <Text>Loading Transcript</Text>
) : ( ) : (
@@ -332,7 +217,7 @@ export default function TranscriptDetails(details: TranscriptDetails) {
back later back later
</Text> </Text>
)} )}
</Flex> </div>
</Flex> </Flex>
)} )}
</Grid> </Grid>

View File

@@ -1,18 +0,0 @@
// Predefined color palette for topics
// Colors chosen for good contrast and visual distinction
export const TOPIC_COLORS = [
"#3B82F6", // blue
"#10B981", // green
"#F59E0B", // amber
"#EF4444", // red
"#8B5CF6", // violet
"#EC4899", // pink
"#14B8A6", // teal
"#F97316", // orange
"#6366F1", // indigo
"#84CC16", // lime
] as const;
export function getTopicColor(topicIndex: number): string {
return TOPIC_COLORS[topicIndex % TOPIC_COLORS.length];
}