Compare commits

..

4 Commits

Author SHA1 Message Date
Igor Loskutov
23b6f5f5a1 transcript gutter 2026-01-19 19:18:54 -05:00
Igor Loskutov
95e58b943f transcript gutter 2026-01-16 12:28:38 -05:00
Igor Loskutov
13a3ec6148 transcription UI 2026-01-13 18:36:28 -05:00
Igor Loskutov
807b954340 transcription UI 2026-01-13 17:29:38 -05:00
13 changed files with 452 additions and 267 deletions

View File

@@ -1,12 +1,5 @@
# Changelog
## [0.28.0](https://github.com/Monadical-SAS/reflector/compare/v0.27.0...v0.28.0) (2026-01-20)
### Features
* worker affinity ([#819](https://github.com/Monadical-SAS/reflector/issues/819)) ([3b6540e](https://github.com/Monadical-SAS/reflector/commit/3b6540eae5b597449f98661bdf15483b77be3268))
## [0.27.0](https://github.com/Monadical-SAS/reflector/compare/v0.26.0...v0.27.0) (2025-12-26)

View File

@@ -34,7 +34,7 @@ services:
environment:
ENTRYPOINT: beat
hatchet-worker-cpu:
hatchet-worker:
build:
context: server
volumes:
@@ -43,20 +43,7 @@ services:
env_file:
- ./server/.env
environment:
ENTRYPOINT: hatchet-worker-cpu
depends_on:
hatchet:
condition: service_healthy
hatchet-worker-llm:
build:
context: server
volumes:
- ./server/:/app/
- /app/.venv
env_file:
- ./server/.env
environment:
ENTRYPOINT: hatchet-worker-llm
ENTRYPOINT: hatchet-worker
depends_on:
hatchet:
condition: service_healthy

View File

@@ -0,0 +1,77 @@
"""
Run Hatchet workers for the multitrack pipeline.
Runs as a separate process, just like Celery workers.
Usage:
uv run -m reflector.hatchet.run_workers
# Or via docker:
docker compose exec server uv run -m reflector.hatchet.run_workers
"""
import signal
import sys
from hatchet_sdk.rate_limit import RateLimitDuration
from reflector.hatchet.constants import LLM_RATE_LIMIT_KEY, LLM_RATE_LIMIT_PER_SECOND
from reflector.logger import logger
from reflector.settings import settings
def main() -> None:
"""Start Hatchet worker polling."""
if not settings.HATCHET_ENABLED:
logger.error("HATCHET_ENABLED is False, not starting workers")
sys.exit(1)
if not settings.HATCHET_CLIENT_TOKEN:
logger.error("HATCHET_CLIENT_TOKEN is not set")
sys.exit(1)
logger.info(
"Starting Hatchet workers",
debug=settings.HATCHET_DEBUG,
)
# Import here (not top-level) - workflow modules call HatchetClientManager.get_client()
# at module level because Hatchet SDK decorators (@workflow.task) bind at import time.
# Can't use lazy init: decorators need the client object when function is defined.
from reflector.hatchet.client import HatchetClientManager # noqa: PLC0415
from reflector.hatchet.workflows import ( # noqa: PLC0415
daily_multitrack_pipeline,
subject_workflow,
topic_chunk_workflow,
track_workflow,
)
hatchet = HatchetClientManager.get_client()
hatchet.rate_limits.put(
LLM_RATE_LIMIT_KEY, LLM_RATE_LIMIT_PER_SECOND, RateLimitDuration.SECOND
)
worker = hatchet.worker(
"reflector-pipeline-worker",
workflows=[
daily_multitrack_pipeline,
subject_workflow,
topic_chunk_workflow,
track_workflow,
],
)
def shutdown_handler(signum: int, frame) -> None:
logger.info("Received shutdown signal, stopping workers...")
# Worker cleanup happens automatically on exit
sys.exit(0)
signal.signal(signal.SIGINT, shutdown_handler)
signal.signal(signal.SIGTERM, shutdown_handler)
logger.info("Starting Hatchet worker polling...")
worker.start()
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
"""
CPU-heavy worker pool for audio processing tasks.
Handles ONLY: mixdown_tracks
Configuration:
- slots=1: Only mixdown (already serialized globally with max_runs=1)
- Worker affinity: pool=cpu-heavy
"""
from reflector.hatchet.client import HatchetClientManager
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
daily_multitrack_pipeline,
)
from reflector.logger import logger
from reflector.settings import settings
def main():
if not settings.HATCHET_ENABLED:
logger.error("HATCHET_ENABLED is False, not starting CPU workers")
return
hatchet = HatchetClientManager.get_client()
logger.info(
"Starting Hatchet CPU worker pool (mixdown only)",
worker_name="cpu-worker-pool",
slots=1,
labels={"pool": "cpu-heavy"},
)
cpu_worker = hatchet.worker(
"cpu-worker-pool",
slots=1, # Only 1 mixdown at a time (already serialized globally)
labels={
"pool": "cpu-heavy",
},
workflows=[daily_multitrack_pipeline],
)
try:
cpu_worker.start()
except KeyboardInterrupt:
logger.info("Received shutdown signal, stopping CPU workers...")
if __name__ == "__main__":
main()

View File

@@ -1,56 +0,0 @@
"""
LLM/I/O worker pool for all non-CPU tasks.
Handles: all tasks except mixdown_tracks (transcription, LLM inference, orchestration)
"""
from reflector.hatchet.client import HatchetClientManager
from reflector.hatchet.workflows.daily_multitrack_pipeline import (
daily_multitrack_pipeline,
)
from reflector.hatchet.workflows.subject_processing import subject_workflow
from reflector.hatchet.workflows.topic_chunk_processing import topic_chunk_workflow
from reflector.hatchet.workflows.track_processing import track_workflow
from reflector.logger import logger
from reflector.settings import settings
SLOTS = 10
WORKER_NAME = "llm-worker-pool"
POOL = "llm-io"
def main():
if not settings.HATCHET_ENABLED:
logger.error("HATCHET_ENABLED is False, not starting LLM workers")
return
hatchet = HatchetClientManager.get_client()
logger.info(
"Starting Hatchet LLM worker pool (all tasks except mixdown)",
worker_name=WORKER_NAME,
slots=SLOTS,
labels={"pool": POOL},
)
llm_worker = hatchet.worker(
WORKER_NAME,
slots=SLOTS, # not all slots are probably used
labels={
"pool": POOL,
},
workflows=[
daily_multitrack_pipeline,
topic_chunk_workflow,
subject_workflow,
track_workflow,
],
)
try:
llm_worker.start()
except KeyboardInterrupt:
logger.info("Received shutdown signal, stopping LLM workers...")
if __name__ == "__main__":
main()

View File

@@ -23,12 +23,7 @@ from pathlib import Path
from typing import Any, Callable, Coroutine, Protocol, TypeVar
import httpx
from hatchet_sdk import (
ConcurrencyExpression,
ConcurrencyLimitStrategy,
Context,
)
from hatchet_sdk.labels import DesiredWorkerLabel
from hatchet_sdk import Context
from pydantic import BaseModel
from reflector.dailyco_api.client import DailyApiClient
@@ -472,20 +467,6 @@ async def process_tracks(input: PipelineInput, ctx: Context) -> ProcessTracksRes
parents=[process_tracks],
execution_timeout=timedelta(seconds=TIMEOUT_AUDIO),
retries=3,
desired_worker_labels={
"pool": DesiredWorkerLabel(
value="cpu-heavy",
required=True,
weight=100,
),
},
concurrency=[
ConcurrencyExpression(
expression="'mixdown-global'",
max_runs=1, # serialize mixdown to prevent resource contention
limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN, # Queue
)
],
)
@with_error_handling(TaskName.MIXDOWN_TRACKS)
async def mixdown_tracks(input: PipelineInput, ctx: Context) -> MixdownResult:

View File

@@ -7,11 +7,7 @@ Spawned dynamically by detect_topics via aio_run_many() for parallel processing.
from datetime import timedelta
from hatchet_sdk import (
ConcurrencyExpression,
ConcurrencyLimitStrategy,
Context,
)
from hatchet_sdk import ConcurrencyExpression, ConcurrencyLimitStrategy, Context
from hatchet_sdk.rate_limit import RateLimit
from pydantic import BaseModel
@@ -38,13 +34,11 @@ hatchet = HatchetClientManager.get_client()
topic_chunk_workflow = hatchet.workflow(
name="TopicChunkProcessing",
input_validator=TopicChunkInput,
concurrency=[
ConcurrencyExpression(
expression="'global'", # constant string = global limit across all runs
max_runs=20,
limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,
)
],
concurrency=ConcurrencyExpression(
expression="'global'", # constant string = global limit across all runs
max_runs=20,
limit_strategy=ConcurrencyLimitStrategy.GROUP_ROUND_ROBIN,
),
)

View File

@@ -7,10 +7,8 @@ elif [ "${ENTRYPOINT}" = "worker" ]; then
uv run celery -A reflector.worker.app worker --loglevel=info
elif [ "${ENTRYPOINT}" = "beat" ]; then
uv run celery -A reflector.worker.app beat --loglevel=info
elif [ "${ENTRYPOINT}" = "hatchet-worker-cpu" ]; then
uv run python -m reflector.hatchet.run_workers_cpu
elif [ "${ENTRYPOINT}" = "hatchet-worker-llm" ]; then
uv run python -m reflector.hatchet.run_workers_llm
elif [ "${ENTRYPOINT}" = "hatchet-worker" ]; then
uv run python -m reflector.hatchet.run_workers
else
echo "Unknown command"
fi

View File

@@ -1,12 +1,12 @@
import React, { useState, useEffect } from "react";
import ScrollToBottom from "../../scrollToBottom";
import { Topic } from "../../webSocketTypes";
import useParticipants from "../../useParticipants";
import { Box, Flex, Text, Accordion } from "@chakra-ui/react";
import { TopicItem } from "./TopicItem";
import { Box, Flex, Text } from "@chakra-ui/react";
import { formatTime } from "../../../../lib/time";
import { getTopicColor } from "../../../../lib/topicColors";
import { TranscriptStatus } from "../../../../lib/transcript";
import { featureEnabled } from "../../../../lib/features";
import { TOPICS_SCROLL_DIV_ID } from "./constants";
type TopicListProps = {
topics: Topic[];
@@ -18,6 +18,7 @@ type TopicListProps = {
transcriptId: string;
status: TranscriptStatus | null;
currentTranscriptText: any;
onTopicClick?: (topicId: string) => void;
};
export function TopicList({
@@ -27,30 +28,13 @@ export function TopicList({
transcriptId,
status,
currentTranscriptText,
onTopicClick,
}: TopicListProps) {
const [activeTopic, setActiveTopic] = useActiveTopic;
const [hoveredTopicId, setHoveredTopicId] = useState<string | null>(null);
const [autoscrollEnabled, setAutoscrollEnabled] = useState<boolean>(true);
const participants = useParticipants(transcriptId);
const scrollToTopic = () => {
const topicDiv = document.getElementById(`topic-${activeTopic?.id}`);
setTimeout(() => {
topicDiv?.scrollIntoView({
behavior: "smooth",
block: "start",
inline: "nearest",
});
}, 200);
};
useEffect(() => {
if (activeTopic && autoscroll) scrollToTopic();
}, [activeTopic, autoscroll]);
// scroll top is not rounded, heights are, so exact match won't work.
// https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollHeight#determine_if_an_element_has_been_totally_scrolled
const toggleScroll = (element) => {
const toggleScroll = (element: HTMLElement) => {
const bottom =
Math.abs(
element.scrollHeight - element.clientHeight - element.scrollTop,
@@ -61,14 +45,19 @@ export function TopicList({
setAutoscrollEnabled(true);
}
};
const handleScroll = (e) => {
toggleScroll(e.target);
const handleScroll = (e: React.UIEvent<HTMLDivElement>) => {
toggleScroll(e.target as HTMLElement);
};
const scrollToBottom = () => {
const topicsDiv = document.getElementById(TOPICS_SCROLL_DIV_ID);
if (topicsDiv) topicsDiv.scrollTop = topicsDiv.scrollHeight;
};
useEffect(() => {
if (autoscroll) {
const topicsDiv = document.getElementById("scroll-div");
const topicsDiv = document.getElementById(TOPICS_SCROLL_DIV_ID);
topicsDiv && toggleScroll(topicsDiv);
}
}, [activeTopic, autoscroll]);
@@ -77,37 +66,41 @@ export function TopicList({
if (autoscroll && autoscrollEnabled) scrollToBottom();
}, [topics.length, currentTranscriptText]);
const scrollToBottom = () => {
const topicsDiv = document.getElementById("scroll-div");
if (topicsDiv) topicsDiv.scrollTop = topicsDiv.scrollHeight;
};
const getSpeakerName = (speakerNumber: number) => {
if (!participants.response) return;
return (
participants.response.find(
(participant) => participant.speaker == speakerNumber,
)?.name || `Speaker ${speakerNumber}`
);
};
const requireLogin = featureEnabled("requireLogin");
useEffect(() => {
if (autoscroll) {
setActiveTopic(topics[topics.length - 1]);
}
}, [topics, autoscroll]);
const handleTopicClick = (topic: Topic) => {
setActiveTopic(topic);
if (onTopicClick) {
onTopicClick(topic.id);
}
};
const handleTopicMouseEnter = (topic: Topic) => {
setHoveredTopicId(topic.id);
// If already active, toggle off when mousing over
if (activeTopic?.id === topic.id) {
setActiveTopic(null);
} else {
setActiveTopic(topic);
}
};
const handleTopicMouseLeave = () => {
setHoveredTopicId(null);
};
const requireLogin = featureEnabled("requireLogin");
return (
<Flex
position={"relative"}
w={"100%"}
h={"95%"}
flexDirection={"column"}
justify={"center"}
align={"center"}
position="relative"
w="full"
h="200px"
flexDirection="column"
flexShrink={0}
>
{autoscroll && (
@@ -118,45 +111,71 @@ export function TopicList({
)}
<Box
id="scroll-div"
overflowY={"auto"}
h={"100%"}
id={TOPICS_SCROLL_DIV_ID}
overflowY="auto"
h="full"
onScroll={handleScroll}
width="full"
>
{topics.length > 0 && (
<Accordion.Root
multiple={false}
collapsible={true}
value={activeTopic ? [activeTopic.id] : []}
onValueChange={(details) => {
const selectedTopicId = details.value[0];
const selectedTopic = selectedTopicId
? topics.find((t) => t.id === selectedTopicId)
: null;
setActiveTopic(selectedTopic || null);
}}
>
{topics.map((topic) => (
<TopicItem
key={topic.id}
topic={topic}
isActive={activeTopic?.id === topic.id}
getSpeakerName={getSpeakerName}
/>
))}
</Accordion.Root>
<Flex direction="column" gap={1} p={2}>
{topics.map((topic, index) => {
const color = getTopicColor(index);
const isActive = activeTopic?.id === topic.id;
const isHovered = hoveredTopicId === topic.id;
return (
<Flex
key={topic.id}
id={`topic-${topic.id}`}
gap={2}
align="center"
py={1}
px={2}
cursor="pointer"
bg={isActive || isHovered ? "gray.100" : "transparent"}
_hover={{ bg: "gray.50" }}
onClick={() => handleTopicClick(topic)}
onMouseEnter={() => handleTopicMouseEnter(topic)}
onMouseLeave={handleTopicMouseLeave}
>
{/* Color indicator */}
<Box
w="12px"
h="12px"
borderRadius="full"
bg={color}
flexShrink={0}
/>
{/* Topic title */}
<Text
flex={1}
fontSize="sm"
fontWeight={isActive ? "semibold" : "normal"}
>
{topic.title}
</Text>
{/* Timestamp */}
<Text as="span" color="gray.500" fontSize="xs" flexShrink={0}>
{formatTime(topic.timestamp)}
</Text>
</Flex>
);
})}
</Flex>
)}
{status == "recording" && (
<Box textAlign={"center"}>
<Box textAlign="center">
<Text>{currentTranscriptText}</Text>
</Box>
)}
{(status == "recording" || status == "idle") &&
currentTranscriptText.length == 0 &&
topics.length == 0 && (
<Box textAlign={"center"} color="gray">
<Box textAlign="center" color="gray">
<Text>
Full discussion transcript will appear here after you start
recording.
@@ -167,7 +186,7 @@ export function TopicList({
</Box>
)}
{status == "processing" && (
<Box textAlign={"center"} color="gray">
<Box textAlign="center" color="gray">
<Text>We are processing the recording, please wait.</Text>
{!requireLogin && (
<span>
@@ -177,12 +196,12 @@ export function TopicList({
</Box>
)}
{status == "ended" && topics.length == 0 && (
<Box textAlign={"center"} color="gray">
<Box textAlign="center" color="gray">
<Text>Recording has ended without topics being found.</Text>
</Box>
)}
{status == "error" && (
<Box textAlign={"center"} color="gray">
<Box textAlign="center" color="gray">
<Text>There was an error processing your recording</Text>
</Box>
)}

View File

@@ -0,0 +1,106 @@
import { Box, Text, IconButton } from "@chakra-ui/react";
import { ChevronUp } from "lucide-react";
import { Topic } from "../../webSocketTypes";
import { getTopicColor } from "../../../../lib/topicColors";
import { TOPICS_SCROLL_DIV_ID } from "./constants";
interface TranscriptWithGutterProps {
topics: Topic[];
getSpeakerName: (speakerNumber: number) => string | undefined;
onGutterClick: (topicId: string) => void;
}
export function TranscriptWithGutter({
topics,
getSpeakerName,
onGutterClick,
}: TranscriptWithGutterProps) {
const scrollToTopics = () => {
// Scroll to the topic list at the top
const topicList = document.getElementById(TOPICS_SCROLL_DIV_ID);
if (topicList) {
topicList.scrollIntoView({
behavior: "smooth",
block: "start",
});
}
};
return (
<Box>
{topics.map((topic, topicIndex) => {
const color = getTopicColor(topicIndex);
return (
<Box key={topic.id} position="relative">
{/* Topic Header with Up Button */}
<Box
py={3}
px={4}
fontWeight="semibold"
fontSize="lg"
display="flex"
alignItems="center"
justifyContent="space-between"
>
<Text>{topic.title}</Text>
<IconButton
aria-label="Scroll to topics"
size="sm"
variant="ghost"
onClick={scrollToTopics}
>
<ChevronUp size={16} />
</IconButton>
</Box>
{/* Segments container with single gutter */}
<Box position="relative">
{/* Single continuous gutter for entire topic */}
<Box
className="topic-gutter"
position="absolute"
left={0}
top={0}
bottom={0}
width="4px"
bg={color}
cursor="pointer"
transition="all 0.2s"
_hover={{
filter: "brightness(1.2)",
width: "6px",
}}
onClick={() => onGutterClick(topic.id)}
/>
{/* Segments */}
{topic.segments?.map((segment, segmentIndex) => (
<Box
key={segmentIndex}
id={`segment-${topic.id}-${segmentIndex}`}
py={2}
px={4}
pl={12}
_hover={{
bg: "gray.50",
}}
>
{/* Segment Content */}
<Text fontSize="sm">
<Text as="span" fontWeight="semibold" color="gray.700">
{getSpeakerName(segment.speaker) ||
`Speaker ${segment.speaker}`}
:
</Text>{" "}
{segment.text}
</Text>
</Box>
))}
</Box>
</Box>
);
})}
</Box>
);
}

View File

@@ -0,0 +1 @@
export const TOPICS_SCROLL_DIV_ID = "topics-scroll-div";

View File

@@ -3,7 +3,9 @@ import Modal from "../modal";
import useTopics from "../useTopics";
import useWaveform from "../useWaveform";
import useMp3 from "../useMp3";
import useParticipants from "../useParticipants";
import { TopicList } from "./_components/TopicList";
import { TranscriptWithGutter } from "./_components/TranscriptWithGutter";
import { Topic } from "../webSocketTypes";
import React, { useEffect, useState, use } from "react";
import FinalSummary from "./finalSummary";
@@ -45,14 +47,91 @@ export default function TranscriptDetails(details: TranscriptDetails) {
const mp3 = useMp3(transcriptId, waiting);
const topics = useTopics(transcriptId);
const participants = useParticipants(transcriptId);
const waveform = useWaveform(
transcriptId,
waiting || mp3.audioDeleted === true,
);
const useActiveTopic = useState<Topic | null>(null);
const [activeTopic, setActiveTopic] = useActiveTopic;
const [finalSummaryElement, setFinalSummaryElement] =
useState<HTMLDivElement | null>(null);
// IntersectionObserver for active topic detection based on scroll position
useEffect(() => {
if (!topics.topics || topics.topics.length === 0) return;
const observer = new IntersectionObserver(
(entries) => {
// Find the most visible segment
let mostVisibleEntry: IntersectionObserverEntry | null = null;
let maxRatio = 0;
entries.forEach((entry) => {
if (entry.isIntersecting && entry.intersectionRatio > maxRatio) {
maxRatio = entry.intersectionRatio;
mostVisibleEntry = entry;
}
});
if (mostVisibleEntry) {
// Extract topicId from segment id (format: "segment-{topicId}-{idx}")
const segmentId = mostVisibleEntry.target.id;
const match = segmentId.match(/^segment-([^-]+)-/);
if (match) {
const topicId = match[1];
const topic = topics.topics?.find((t) => t.id === topicId);
if (topic && activeTopic?.id !== topic.id) {
setActiveTopic(topic);
}
}
}
},
{
threshold: [0, 0.25, 0.5, 0.75, 1],
rootMargin: "-20% 0px -20% 0px",
},
);
// Observe all segment elements
const segments = document.querySelectorAll('[id^="segment-"]');
segments.forEach((segment) => observer.observe(segment));
return () => observer.disconnect();
}, [topics.topics, activeTopic?.id, setActiveTopic]);
// Scroll handlers for bidirectional navigation
const handleTopicClick = (topicId: string) => {
// Scroll to first segment of this topic in transcript
const firstSegment = document.querySelector(`[id^="segment-${topicId}-"]`);
if (firstSegment) {
firstSegment.scrollIntoView({
behavior: "smooth",
block: "center",
});
}
};
const handleGutterClick = (topicId: string) => {
// Scroll to topic in list
const topicChip = document.getElementById(`topic-${topicId}`);
if (topicChip) {
topicChip.scrollIntoView({
behavior: "smooth",
block: "center",
});
}
};
const getSpeakerName = (speakerNumber: number) => {
if (!participants.response) return `Speaker ${speakerNumber}`;
return (
participants.response.find(
(participant) => participant.speaker == speakerNumber,
)?.name || `Speaker ${speakerNumber}`
);
};
useEffect(() => {
if (!waiting || !transcript.data) return;
@@ -121,7 +200,7 @@ export default function TranscriptDetails(details: TranscriptDetails) {
<>
<Grid
templateColumns="1fr"
templateRows="auto minmax(0, 1fr)"
templateRows="auto auto"
gap={4}
mt={4}
mb={4}
@@ -153,18 +232,18 @@ export default function TranscriptDetails(details: TranscriptDetails) {
<Grid
templateColumns={{ base: "minmax(0, 1fr)", md: "repeat(2, 1fr)" }}
templateRows={{
base: "auto minmax(0, 1fr) minmax(0, 1fr)",
md: "auto minmax(0, 1fr)",
base: "auto auto auto",
md: "auto auto",
}}
gap={4}
gridRowGap={2}
padding={4}
paddingBottom={0}
background="gray.bg"
border={"2px solid"}
borderColor={"gray.bg"}
borderRadius={8}
>
{/* Title */}
<GridItem colSpan={{ base: 1, md: 2 }}>
<Flex direction="column" gap={0}>
<Flex alignItems="center" gap={2}>
@@ -187,28 +266,64 @@ export default function TranscriptDetails(details: TranscriptDetails) {
)}
</Flex>
</GridItem>
<TopicList
topics={topics.topics || []}
useActiveTopic={useActiveTopic}
autoscroll={false}
transcriptId={transcriptId}
status={transcript.data?.status || null}
currentTranscriptText=""
/>
{transcript.data && topics.topics ? (
<>
<FinalSummary
transcript={transcript.data}
topics={topics.topics}
onUpdate={() => {
transcript.refetch().then(() => {});
{/* Left column: Topics List */}
<GridItem display="flex" flexDirection="column" gap={4} h="100%">
<TopicList
topics={topics.topics || []}
useActiveTopic={useActiveTopic}
autoscroll={false}
transcriptId={transcriptId}
status={transcript.data?.status || null}
currentTranscriptText=""
onTopicClick={handleTopicClick}
/>
{/* Transcript with colored gutter (scrollable) */}
{topics.topics && topics.topics.length > 0 && (
<Box
overflowY="auto"
flex={1}
minH="0"
pr={2}
css={{
"&::-webkit-scrollbar": {
width: "8px",
},
"&::-webkit-scrollbar-track": {
background: "transparent",
},
"&::-webkit-scrollbar-thumb": {
background: "#CBD5E0",
borderRadius: "4px",
},
"&::-webkit-scrollbar-thumb:hover": {
background: "#A0AEC0",
},
}}
finalSummaryRef={setFinalSummaryElement}
/>
</>
>
<TranscriptWithGutter
topics={topics.topics}
getSpeakerName={getSpeakerName}
onGutterClick={handleGutterClick}
/>
</Box>
)}
</GridItem>
{/* Right column: Final Summary */}
{transcript.data && topics.topics ? (
<FinalSummary
transcript={transcript.data}
topics={topics.topics}
onUpdate={() => {
transcript.refetch().then(() => {});
}}
finalSummaryRef={setFinalSummaryElement}
/>
) : (
<Flex justify={"center"} alignItems={"center"} h={"100%"}>
<div className="flex flex-col h-full justify-center content-center">
<Flex justify="center" alignItems="center" h="100%">
<Flex direction="column" h="full" justify="center" align="center">
{transcript?.data?.status == "processing" ? (
<Text>Loading Transcript</Text>
) : (
@@ -217,7 +332,7 @@ export default function TranscriptDetails(details: TranscriptDetails) {
back later
</Text>
)}
</div>
</Flex>
</Flex>
)}
</Grid>

View File

@@ -0,0 +1,18 @@
// Predefined color palette for topics
// Colors chosen for good contrast and visual distinction
export const TOPIC_COLORS = [
"#3B82F6", // blue
"#10B981", // green
"#F59E0B", // amber
"#EF4444", // red
"#8B5CF6", // violet
"#EC4899", // pink
"#14B8A6", // teal
"#F97316", // orange
"#6366F1", // indigo
"#84CC16", // lime
] as const;
export function getTopicColor(topicIndex: number): string {
return TOPIC_COLORS[topicIndex % TOPIC_COLORS.length];
}