render transcript

This commit is contained in:
Sara
2024-02-08 20:15:13 +01:00
parent d09b0ae76a
commit 6acb2f6088
7 changed files with 264 additions and 290 deletions

View File

@@ -2,21 +2,16 @@
import React, { useEffect, useState } from "react";
import Recorder from "../../recorder";
import { TopicList } from "../../topicList";
import useWebRTC from "../../useWebRTC";
import useTranscript from "../../useTranscript";
import { useWebSockets } from "../../useWebSockets";
import useAudioDevice from "../../useAudioDevice";
import "../../../../styles/button.css";
import { Topic } from "../../webSocketTypes";
import LiveTrancription from "../../liveTranscription";
import DisconnectedIndicator from "../../disconnectedIndicator";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faGear } from "@fortawesome/free-solid-svg-icons";
import { lockWakeState, releaseWakeState } from "../../../../lib/wakeLock";
import { useRouter } from "next/navigation";
import Player from "../../player";
import useMp3 from "../../useMp3";
import WaveformLoading from "../../waveformLoading";
import { Box, Grid } from "@chakra-ui/react";
type TranscriptDetails = {
params: {
@@ -25,59 +20,36 @@ type TranscriptDetails = {
};
const TranscriptRecord = (details: TranscriptDetails) => {
const [stream, setStream] = useState<MediaStream | null>(null);
const [disconnected, setDisconnected] = useState<boolean>(false);
const transcript = useTranscript(details.params.transcriptId);
const useActiveTopic = useState<Topic | null>(null);
useEffect(() => {
if (process.env.NEXT_PUBLIC_ENV === "development") {
document.onkeyup = (e) => {
if (e.key === "d") {
setDisconnected((prev) => !prev);
}
};
}
}, []);
const transcript = useTranscript(details.params.transcriptId);
const webRTC = useWebRTC(stream, details.params.transcriptId);
const webSockets = useWebSockets(details.params.transcriptId);
const { audioDevices, getAudioStream } = useAudioDevice();
const [recordedTime, setRecordedTime] = useState(0);
const [startTime, setStartTime] = useState(0);
const [transcriptStarted, setTranscriptStarted] = useState(false);
let mp3 = useMp3(details.params.transcriptId, true);
const router = useRouter();
useEffect(() => {
if (!transcriptStarted && webSockets.transcriptText.length !== 0)
setTranscriptStarted(true);
}, [webSockets.transcriptText]);
const [status, setStatus] = useState(
webSockets.status.value || transcript.response?.status || "idle",
);
useEffect(() => {
const statusToRedirect = ["ended", "error"];
//TODO HANDLE ERROR STATUS BETTER
const newStatus =
webSockets.status.value || transcript.response?.status || "idle";
setStatus(newStatus);
if (newStatus && (newStatus == "ended" || newStatus == "error")) {
console.log(newStatus, "redirecting");
//TODO if has no topic and is error, get back to new
if (
transcript.response?.status &&
(statusToRedirect.includes(transcript.response?.status) ||
statusToRedirect.includes(webSockets.status.value))
) {
const newUrl = "/transcripts/" + details.params.transcriptId;
// Shallow redirection does not work on NextJS 13
// https://github.com/vercel/next.js/discussions/48110
// https://github.com/vercel/next.js/discussions/49540
router.replace(newUrl);
// history.replaceState({}, "", newUrl);
} // history.replaceState({}, "", newUrl);
}
}, [webSockets.status.value, transcript.response?.status]);
useEffect(() => {
if (transcript.response?.status === "ended") mp3.getNow();
}, [transcript.response]);
if (webSockets.waveform && webSockets.waveform) mp3.getNow();
}, [webSockets.waveform, webSockets.duration]);
useEffect(() => {
lockWakeState();
@@ -87,8 +59,31 @@ const TranscriptRecord = (details: TranscriptDetails) => {
}, []);
return (
<div className="grid grid-rows-layout-topbar gap-2 lg:gap-4 max-h-full h-full">
{webSockets.waveform && webSockets.duration && mp3?.media ? (
<Grid
templateColumns="1fr"
templateRows="minmax(0, 1fr) auto"
gap={4}
mb={4}
>
<Box
padding={4}
background="gray.bg"
borderColor={"gray.bg"}
borderRadius={8}
>
<TopicList
topics={webSockets.topics}
useActiveTopic={useActiveTopic}
autoscroll={true}
transcriptId={details.params.transcriptId}
status={status}
currentTranscriptText={webSockets.accumulatedText}
/>
</Box>
{status == "processing" && // todo send an event when the mp3 is ready
webSockets.waveform &&
webSockets.duration &&
mp3?.media ? (
<Player
topics={webSockets.topics || []}
useActiveTopic={useActiveTopic}
@@ -96,78 +91,13 @@ const TranscriptRecord = (details: TranscriptDetails) => {
media={mp3.media}
mediaDuration={webSockets.duration}
/>
) : recordedTime ? (
) : status == "processing" ? (
<WaveformLoading />
) : (
<Recorder
setStream={setStream}
onStop={() => {
setStream(null);
setRecordedTime(Date.now() - startTime);
webRTC?.send(JSON.stringify({ cmd: "STOP" }));
}}
onRecord={() => {
setStartTime(Date.now());
}}
getAudioStream={getAudioStream}
audioDevices={audioDevices}
transcriptId={details.params.transcriptId}
/>
// todo: only start recording animation when you get "recorded" status
<Recorder transcriptId={details.params.transcriptId} status={status} />
)}
<div className="grid grid-cols-1 lg:grid-cols-2 grid-rows-mobile-inner lg:grid-rows-1 gap-2 lg:gap-4 h-full">
<TopicList
topics={webSockets.topics}
useActiveTopic={useActiveTopic}
autoscroll={true}
transcriptId={details.params.transcriptId}
/>
<section
className={`w-full h-full bg-blue-400/20 rounded-lg md:rounded-xl p-2 md:px-4`}
>
{!recordedTime ? (
<>
{transcriptStarted && (
<h2 className="md:text-lg font-bold">Transcription</h2>
)}
<div className="flex flex-col justify-center align center text-center h-full">
<div className="py-2 h-auto">
{!transcriptStarted ? (
<div className="text-center text-gray-500">
The conversation transcript will appear here shortly after
you start recording.
</div>
) : (
<LiveTrancription
text={webSockets.transcriptText}
translateText={webSockets.translateText}
/>
)}
</div>
</div>
</>
) : (
<div className="flex flex-col justify-center align center text-center h-full text-gray-500">
<div className="p-2 md:p-4">
<FontAwesomeIcon
icon={faGear}
className="animate-spin-slow h-14 w-14 md:h-20 md:w-20"
/>
</div>
<p>
We are generating the final summary for you. This may take a
couple of minutes. Please do not navigate away from the page
during this time.
</p>
{/* NTH If login required remove last sentence */}
</div>
)}
</section>
</div>
{disconnected && <DisconnectedIndicator />}
</div>
</Grid>
);
};

View File

@@ -1,4 +1,4 @@
import React, { useEffect, useState } from "react";
import React from "react";
import Dropdown, { Option } from "react-dropdown";
import "react-dropdown/style.css";

View File

@@ -1,9 +1,10 @@
import React from "react";
import useApi from "../../lib/useApi";
import { Body_transcript_record_upload_v1_transcripts__transcript_id__record_upload_post } from "../../api";
import { Button } from "@chakra-ui/react";
type FileUploadButton = {
transcriptId: string;
disabled?: boolean;
};
export default function FileUploadButton(props: FileUploadButton) {
@@ -32,12 +33,14 @@ export default function FileUploadButton(props: FileUploadButton) {
return (
<>
<button
className="bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500 text-white ml-2 md:ml:4 md:h-[78px] md:min-w-[100px] text-lg"
<Button
onClick={triggerFileUpload}
colorScheme="blue"
mr={2}
disabled={props.disabled}
>
Upload File
</button>
</Button>
<input
type="file"

View File

@@ -3,39 +3,50 @@ import React, { useRef, useEffect, useState } from "react";
import WaveSurfer from "wavesurfer.js";
import RecordPlugin from "../../lib/custom-plugins/record";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faMicrophone } from "@fortawesome/free-solid-svg-icons";
import { formatTime } from "../../lib/time";
import AudioInputsDropdown from "./audioInputsDropdown";
import { Option } from "react-dropdown";
import { waveSurferStyles } from "../../styles/recorder";
import { useError } from "../../(errors)/errorContext";
import FileUploadButton from "./fileUploadButton";
import useWebRTC from "./useWebRTC";
import useAudioDevice from "./useAudioDevice";
import {
Box,
Flex,
IconButton,
Menu,
MenuButton,
MenuItemOption,
MenuList,
MenuOptionGroup,
} from "@chakra-ui/react";
import StopRecordIcon from "../../styles/icons/stopRecord";
import PlayIcon from "../../styles/icons/play";
import { LuScreenShare } from "react-icons/lu";
import { FaMicrophone } from "react-icons/fa";
type RecorderProps = {
setStream: React.Dispatch<React.SetStateAction<MediaStream | null>>;
onStop: () => void;
onRecord?: () => void;
getAudioStream: (deviceId) => Promise<MediaStream | null>;
audioDevices: Option[];
transcriptId: string;
status: string;
};
export default function Recorder(props: RecorderProps) {
const waveformRef = useRef<HTMLDivElement>(null);
const [wavesurfer, setWavesurfer] = useState<WaveSurfer | null>(null);
const [record, setRecord] = useState<RecordPlugin | null>(null);
const [isRecording, setIsRecording] = useState<boolean>(false);
const [hasRecorded, setHasRecorded] = useState<boolean>(false);
const [isPlaying, setIsPlaying] = useState<boolean>(false);
const [currentTime, setCurrentTime] = useState<number>(0);
const [timeInterval, setTimeInterval] = useState<number | null>(null);
const [duration, setDuration] = useState<number>(0);
const [deviceId, setDeviceId] = useState<string | null>(null);
const [recordStarted, setRecordStarted] = useState(false);
const [showDevices, setShowDevices] = useState(false);
const { setError } = useError();
const [stream, setStream] = useState<MediaStream | null>(null);
// Time tracking, iirc it was drifting without this. to be tested again.
const [startTime, setStartTime] = useState(0);
const [currentTime, setCurrentTime] = useState<number>(0);
const [timeInterval, setTimeInterval] = useState<number | null>(null);
const webRTC = useWebRTC(stream, props.transcriptId);
const { audioDevices, getAudioStream } = useAudioDevice();
// Function used to setup keyboard shortcuts for the streamdeck
const setupProjectorKeys = (): (() => void) => {
@@ -106,22 +117,13 @@ export default function Recorder(props: RecorderProps) {
waveSurferStyles.playerStyle.backgroundColor;
wsWrapper.style.borderRadius = waveSurferStyles.playerStyle.borderRadius;
_wavesurfer.on("play", () => {
setIsPlaying(true);
});
_wavesurfer.on("pause", () => {
setIsPlaying(false);
});
_wavesurfer.on("timeupdate", setCurrentTime);
setRecord(_wavesurfer.registerPlugin(RecordPlugin.create()));
setWavesurfer(_wavesurfer);
return () => {
_wavesurfer.destroy();
setIsRecording(false);
setIsPlaying(false);
setCurrentTime(0);
};
}
@@ -130,7 +132,7 @@ export default function Recorder(props: RecorderProps) {
useEffect(() => {
if (isRecording) {
const interval = window.setInterval(() => {
setCurrentTime((prev) => prev + 1);
setCurrentTime(Date.now() - startTime);
}, 1000);
setTimeInterval(interval);
return () => clearInterval(interval);
@@ -147,20 +149,20 @@ export default function Recorder(props: RecorderProps) {
if (!record) return console.log("no record");
if (record.isRecording()) {
if (props.onStop) props.onStop();
setStream(null);
webRTC?.send(JSON.stringify({ cmd: "STOP" }));
record.stopRecording();
if (screenMediaStream) {
screenMediaStream.getTracks().forEach((t) => t.stop());
}
setIsRecording(false);
setHasRecorded(true);
setScreenMediaStream(null);
setDestinationStream(null);
} else {
if (props.onRecord) props.onRecord();
const stream = await getCurrentStream();
const stream = await getMicrophoneStream();
setStartTime(Date.now());
if (props.setStream) props.setStream(stream);
setStream(stream);
if (stream) {
await record.startRecording(stream);
setIsRecording(true);
@@ -198,7 +200,7 @@ export default function Recorder(props: RecorderProps) {
if (destinationStream !== null) return console.log("already recording");
// connect mic audio (microphone)
const micStream = await getCurrentStream();
const micStream = await getMicrophoneStream();
if (!micStream) {
console.log("no microphone audio");
return;
@@ -227,7 +229,7 @@ export default function Recorder(props: RecorderProps) {
useEffect(() => {
if (!record) return;
if (!destinationStream) return;
if (props.setStream) props.setStream(destinationStream);
setStream(destinationStream);
if (destinationStream) {
record.startRecording(destinationStream);
setIsRecording(true);
@@ -238,115 +240,87 @@ export default function Recorder(props: RecorderProps) {
startTabRecording();
}, [record, screenMediaStream]);
const handlePlayClick = () => {
wavesurfer?.playPause();
};
const timeLabel = () => {
if (isRecording) return formatTime(currentTime);
if (duration) return `${formatTime(currentTime)}/${formatTime(duration)}`;
return "";
};
const getCurrentStream = async () => {
setRecordStarted(true);
return deviceId && props.getAudioStream
? await props.getAudioStream(deviceId)
: null;
const getMicrophoneStream = async () => {
return deviceId && getAudioStream ? await getAudioStream(deviceId) : null;
};
useEffect(() => {
if (props.audioDevices && props.audioDevices.length > 0) {
setDeviceId(props.audioDevices[0].value);
if (audioDevices && audioDevices.length > 0) {
setDeviceId(audioDevices[0].value);
}
}, [props.audioDevices]);
}, [audioDevices]);
return (
<div className="flex items-center w-full relative">
<div className="flex-grow items-end relative">
<div
ref={waveformRef}
className="flex-grow rounded-lg md:rounded-xl h-20"
></div>
<div className="absolute right-2 bottom-0">
{isRecording && (
<div className="inline-block bg-red-500 rounded-full w-2 h-2 my-auto mr-1 animate-ping"></div>
)}
{timeLabel()}
</div>
</div>
{hasRecorded && (
<>
<button
className={`${
isPlaying
? "bg-orange-400 hover:bg-orange-500 focus-visible:bg-orange-500"
: "bg-green-400 hover:bg-green-500 focus-visible:bg-green-500"
} text-white ml-2 md:ml:4 md:h-[78px] md:min-w-[100px] text-lg`}
id="play-btn"
onClick={handlePlayClick}
>
{isPlaying ? "Pause" : "Play"}
</button>
</>
)}
{!hasRecorded && (
<>
<button
className={`${
isRecording
? "bg-red-400 hover:bg-red-500 focus-visible:bg-red-500"
: "bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500"
} text-white ml-2 md:ml:4 md:h-[78px] md:min-w-[100px] text-lg`}
<Flex className="flex items-center w-full relative">
<IconButton
aria-label={isRecording ? "Stop" : "Record"}
icon={isRecording ? <StopRecordIcon /> : <PlayIcon />}
variant={"ghost"}
colorScheme={"blue"}
mr={2}
onClick={handleRecClick}
disabled={isPlaying}
>
{isRecording ? "Stop" : "Record"}
</button>
/>
<FileUploadButton
transcriptId={props.transcriptId}
disabled={isRecording}
></FileUploadButton>
{!isRecording && (
<button
className={`${
isRecording
? "bg-red-400 hover:bg-red-500 focus-visible:bg-red-500"
: "bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500"
} text-white ml-2 md:ml:4 md:h-[78px] md:min-w-[100px] text-lg`}
{!isRecording && (window as any).chrome && (
<IconButton
aria-label={"Record Tab"}
icon={<LuScreenShare />}
variant={"ghost"}
colorScheme={"blue"}
disabled={isRecording}
mr={2}
onClick={handleRecordTabClick}
>
Record
<br />a tab
</button>
)}
{props.audioDevices && props.audioDevices?.length > 0 && deviceId && (
<>
<button
className="text-center text-blue-400 hover:text-blue-700 ml-2 md:ml:4 p-2 rounded-lg focus-visible:outline outline-blue-400"
onClick={() => setShowDevices((prev) => !prev)}
>
<FontAwesomeIcon icon={faMicrophone} className="h-5 w-auto" />
</button>
<div
className={`absolute z-20 bottom-[-1rem] right-0 bg-white rounded ${
showDevices ? "visible" : "invisible"
}`}
>
<AudioInputsDropdown
setDeviceId={setDeviceId}
audioDevices={props.audioDevices}
disabled={recordStarted}
hide={() => setShowDevices(false)}
deviceId={deviceId}
/>
</div>
</>
)}
</>
{audioDevices && audioDevices?.length > 0 && deviceId && !isRecording && (
<Menu>
<MenuButton
as={IconButton}
aria-label={"Switch microphone"}
icon={<FaMicrophone />}
variant={"ghost"}
disabled={isRecording}
colorScheme={"blue"}
mr={2}
/>
<MenuList>
<MenuOptionGroup defaultValue={audioDevices[0].value} type="radio">
{audioDevices.map((device) => (
<MenuItemOption
key={device.value}
value={device.value}
onClick={() => setDeviceId(device.value)}
>
{device.label}
</MenuItemOption>
))}
</MenuOptionGroup>
</MenuList>
</Menu>
)}
</div>
<Box position="relative" flex={1}>
<Box ref={waveformRef} height={14}></Box>
<Box
zIndex={50}
backgroundColor="rgba(255, 255, 255, 0.5)"
fontSize={"sm"}
shadow={"0px 0px 4px 0px white"}
position={"absolute"}
right={0}
bottom={0}
>
{timeLabel()}
</Box>
</Box>
</Flex>
);
}

View File

@@ -14,6 +14,7 @@ import {
Flex,
Text,
} from "@chakra-ui/react";
import { featureEnabled } from "../domainContext";
type TopicListProps = {
topics: Topic[];
@@ -23,6 +24,8 @@ type TopicListProps = {
];
autoscroll: boolean;
transcriptId: string;
status: string;
currentTranscriptText: any;
};
export function TopicList({
@@ -30,6 +33,8 @@ export function TopicList({
useActiveTopic,
autoscroll,
transcriptId,
status,
currentTranscriptText,
}: TopicListProps) {
const [activeTopic, setActiveTopic] = useActiveTopic;
const [autoscrollEnabled, setAutoscrollEnabled] = useState<boolean>(true);
@@ -72,7 +77,7 @@ export function TopicList({
useEffect(() => {
if (autoscroll) {
const topicsDiv = document.getElementById("topics-div");
const topicsDiv = document.getElementById("scroll-div");
topicsDiv && toggleScroll(topicsDiv);
}
@@ -80,10 +85,10 @@ export function TopicList({
useEffect(() => {
if (autoscroll && autoscrollEnabled) scrollToBottom();
}, [topics.length]);
}, [topics.length, currentTranscriptText]);
const scrollToBottom = () => {
const topicsDiv = document.getElementById("topics-div");
const topicsDiv = document.getElementById("scroll-div");
if (topicsDiv) topicsDiv.scrollTop = topicsDiv.scrollHeight;
};
@@ -97,18 +102,26 @@ export function TopicList({
);
};
const requireLogin = featureEnabled("requireLogin");
useEffect(() => {
setActiveTopic(topics[topics.length - 1]);
}, [topics]);
useEffect(() => {
if (activeTopic && currentTranscriptText) setActiveTopic(null);
}, [activeTopic, currentTranscriptText]);
return (
<Flex
position={"relative"}
w={"100%"}
h={"100%"}
dir="column"
flexDirection={"column"}
justify={"center"}
align={"center"}
flexShrink={0}
>
{topics.length > 0 ? (
<>
{autoscroll && (
<ScrollToBottom
visible={!autoscrollEnabled}
@@ -116,11 +129,14 @@ export function TopicList({
/>
)}
<Accordion
id="topics-div"
<Box
id="scroll-div"
overflowY={"auto"}
h={"100%"}
onScroll={handleScroll}
>
{topics.length > 0 && (
<Accordion
index={topics.findIndex((topic) => topic.id == activeTopic?.id)}
variant="custom"
allowToggle
@@ -200,18 +216,47 @@ export function TopicList({
</AccordionItem>
))}
</Accordion>
</>
) : (
)}
{status == "recording" && (
<Box textAlign={"center"}>
<Text>{currentTranscriptText}</Text>
</Box>
)}
{(status == "recording" || status == "idle") &&
currentTranscriptText.length == 0 &&
topics.length == 0 && (
<Box textAlign={"center"} textColor="gray">
<Text>
Discussion topics will appear here after you start recording.
Discussion transcript will appear here after you start
recording.
</Text>
<Text>
It may take up to 5 minutes of conversation for the first topic to
appear.
It may take up to 5 minutes of conversation to first appear.
</Text>
</Box>
)}
{status == "processing" && (
<Box textAlign={"center"} textColor="gray">
<Text>We are processing the recording, please wait.</Text>
{!requireLogin && (
<span>
Please do not navigate away from the page during this time.
</span>
)}
</Box>
)}
{status == "ended" && topics.length == 0 && (
<Box textAlign={"center"} textColor="gray">
<Text>Recording has ended without topics being found.</Text>
</Box>
)}
{status == "error" && (
<Box textAlign={"center"} textColor="gray">
<Text>There was an error processing your recording</Text>
</Box>
)}
</Box>
</Flex>
);
}

View File

@@ -6,8 +6,9 @@ import { AudioWaveform, GetTranscriptSegmentTopic } from "../../api";
import useApi from "../../lib/useApi";
export type UseWebSockets = {
transcriptText: string;
transcriptTextLive: string;
translateText: string;
accumulatedText: string;
title: string;
topics: Topic[];
finalSummary: FinalSummary;
@@ -17,7 +18,7 @@ export type UseWebSockets = {
};
export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
const [transcriptText, setTranscriptText] = useState<string>("");
const [transcriptTextLive, setTranscriptTextLive] = useState<string>("");
const [translateText, setTranslateText] = useState<string>("");
const [title, setTitle] = useState<string>("");
const [textQueue, setTextQueue] = useState<string[]>([]);
@@ -29,12 +30,14 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
const [finalSummary, setFinalSummary] = useState<FinalSummary>({
summary: "",
});
const [status, setStatus] = useState<Status>({ value: "initial" });
const [status, setStatus] = useState<Status>({ value: "" });
const { setError } = useError();
const { websocket_url } = useContext(DomainContext);
const api = useApi();
const [accumulatedText, setAccumulatedText] = useState<string>("");
useEffect(() => {
if (isProcessing || textQueue.length === 0) {
return;
@@ -42,13 +45,12 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
setIsProcessing(true);
const text = textQueue[0];
setTranscriptText(text);
setTranscriptTextLive(text);
setTranslateText(translationQueue[0]);
const WPM_READING = 200 + textQueue.length * 10; // words per minute to read
const wordCount = text.split(/\s+/).length;
const delay = (wordCount / WPM_READING) * 60 * 1000;
console.log(`displaying "${text}" for ${delay}ms`);
setTimeout(() => {
setIsProcessing(false);
setTextQueue((prevQueue) => prevQueue.slice(1));
@@ -92,7 +94,7 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
},
];
setTranscriptText("Lorem Ipsum");
setTranscriptTextLive("Lorem Ipsum");
setTopics([
{
id: "1",
@@ -190,9 +192,13 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
setFinalSummary({ summary: "This is the final summary" });
}
if (e.key === "z" && process.env.NEXT_PUBLIC_ENV === "development") {
setTranscriptText(
setTranscriptTextLive(
"This text is in English, and it is a pretty long sentence to test the limits",
);
setAccumulatedText(
"This text is in English, and it is a pretty long sentence to test the limits. This text is in English, and it is a pretty long sentence to test the limits",
);
setStatus({ value: "recording" });
setTopics([
{
id: "1",
@@ -333,6 +339,8 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
console.debug("TRANSCRIPT event:", newText);
setTextQueue((prevQueue) => [...prevQueue, newText]);
setTranslationQueue((prevQueue) => [...prevQueue, newTranslation]);
setAccumulatedText((prevText) => prevText + " " + newText);
break;
case "TOPIC":
@@ -345,6 +353,10 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
prevTopics[index] = topic;
return prevTopics;
}
setAccumulatedText((prevText) =>
prevText.slice(topic.transcript.length),
);
return [...prevTopics, topic];
});
console.debug("TOPIC event:", message.data);
@@ -419,18 +431,18 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
break;
case 1005: // Closure by client FF
break;
case 1001: // Navigate away
break;
default:
setError(
new Error(`WebSocket closed unexpectedly with code: ${event.code}`),
"Disconnected",
"Disconnected from the server. Please refresh the page.",
);
console.log(
"Socket is closed. Reconnect will be attempted in 1 second.",
event.reason,
);
setTimeout(function () {
ws = new WebSocket(url);
}, 1000);
// todo handle reconnect with socket.io
}
};
@@ -440,8 +452,9 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
}, [transcriptId, !api]);
return {
transcriptText,
transcriptTextLive,
translateText,
accumulatedText,
topics,
finalSummary,
title,

View File

@@ -0,0 +1,9 @@
import { Icon } from "@chakra-ui/react";
export default function StopRecordIcon(props) {
return (
<Icon viewBox="0 0 20 20" {...props}>
<rect width="20" height="20" rx="1" fill="currentColor" />
</Icon>
);
}