Implement multi tenancy

This commit is contained in:
Sara
2023-10-27 11:26:00 +02:00
committed by Mathieu Virbel
parent 6bd5247bab
commit 4a69bffc9c
37 changed files with 409 additions and 236 deletions

View File

@@ -0,0 +1,109 @@
"use client";
import Modal from "../modal";
import getApi from "../../../lib/getApi";
import useTranscript from "../useTranscript";
import useTopics from "../useTopics";
import useWaveform from "../useWaveform";
import { TopicList } from "../topicList";
import Recorder from "../recorder";
import { Topic } from "../webSocketTypes";
import React, { useEffect, useState } from "react";
import "../../../styles/button.css";
import FinalSummary from "../finalSummary";
import ShareLink from "../shareLink";
import QRCode from "react-qr-code";
import TranscriptTitle from "../transcriptTitle";
import { useFiefIsAuthenticated } from "@fief/fief/nextjs/react";
import { featureEnabled } from "../../domainContext";
type TranscriptDetails = {
params: {
transcriptId: string;
};
};
export default function TranscriptDetails(details: TranscriptDetails) {
const isAuthenticated = useFiefIsAuthenticated();
const api = getApi();
const [transcriptId, setTranscriptId] = useState<string>("");
const transcript = useTranscript(api, transcriptId);
const topics = useTopics(api, transcriptId);
const waveform = useWaveform(api, transcriptId);
const useActiveTopic = useState<Topic | null>(null);
useEffect(() => {
if (featureEnabled("requireLogin") && !isAuthenticated) return;
setTranscriptId(details.params.transcriptId);
}, [api]);
if (transcript?.error /** || topics?.error || waveform?.error **/) {
return (
<Modal
title="Transcription Not Found"
text="A trascription with this ID does not exist."
/>
);
}
const fullTranscript =
topics.topics
?.map((topic) => topic.transcript)
.join("\n\n")
.replace(/ +/g, " ")
.trim() || "";
return (
<>
{transcript?.loading === true || topics?.loading === true ? (
<Modal title="Loading" text={"Loading transcript..."} />
) : (
<>
<div className="flex flex-col">
{transcript?.response?.title && (
<TranscriptTitle title={transcript.response.title} />
)}
{waveform?.loading === false && (
<Recorder
topics={topics?.topics || []}
useActiveTopic={useActiveTopic}
waveform={waveform?.waveform}
isPastMeeting={true}
transcriptId={transcript?.response?.id}
/>
)}
</div>
<div className="grid grid-cols-1 lg:grid-cols-2 grid-rows-2 lg:grid-rows-1 gap-2 lg:gap-4 h-full">
<TopicList
topics={topics?.topics || []}
useActiveTopic={useActiveTopic}
autoscroll={false}
/>
<div className="w-full h-full grid grid-rows-layout-one grid-cols-1 gap-2 lg:gap-4">
<section className=" bg-blue-400/20 rounded-lg md:rounded-xl p-2 md:px-4 h-full">
{transcript?.response?.longSummary && (
<FinalSummary
fullTranscript={fullTranscript}
summary={transcript?.response?.longSummary}
/>
)}
</section>
<section className="flex items-center">
<div className="mr-4 hidden md:block h-auto">
<QRCode
value={`${process.env.NEXT_PUBLIC_SITE_URL}transcripts/${details.params.transcriptId}`}
level="L"
size={98}
/>
</div>
<div className="flex-grow max-w-full">
<ShareLink />
</div>
</section>
</div>
</div>
</>
)}
</>
);
}

View File

@@ -0,0 +1,142 @@
"use client";
import React, { useEffect, useState } from "react";
import Recorder from "../../recorder";
import { TopicList } from "../../topicList";
import useWebRTC from "../../useWebRTC";
import useTranscript from "../../useTranscript";
import { useWebSockets } from "../../useWebSockets";
import useAudioDevice from "../../useAudioDevice";
import "../../../../styles/button.css";
import { Topic } from "../../webSocketTypes";
import getApi from "../../../../lib/getApi";
import LiveTrancription from "../../liveTranscription";
import DisconnectedIndicator from "../../disconnectedIndicator";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faGear } from "@fortawesome/free-solid-svg-icons";
import { lockWakeState, releaseWakeState } from "../../../../lib/wakeLock";
type TranscriptDetails = {
params: {
transcriptId: string;
};
};
const TranscriptRecord = (details: TranscriptDetails) => {
const [stream, setStream] = useState<MediaStream | null>(null);
const [disconnected, setDisconnected] = useState<boolean>(false);
const useActiveTopic = useState<Topic | null>(null);
useEffect(() => {
if (process.env.NEXT_PUBLIC_ENV === "development") {
document.onkeyup = (e) => {
if (e.key === "d") {
setDisconnected((prev) => !prev);
}
};
}
}, []);
const api = getApi();
const transcript = useTranscript(api, details.params.transcriptId);
const webRTC = useWebRTC(stream, details.params.transcriptId, api);
const webSockets = useWebSockets(details.params.transcriptId);
const { audioDevices, getAudioStream } = useAudioDevice();
const [hasRecorded, setHasRecorded] = useState(false);
const [transcriptStarted, setTranscriptStarted] = useState(false);
useEffect(() => {
if (!transcriptStarted && webSockets.transcriptText.length !== 0)
setTranscriptStarted(true);
}, [webSockets.transcriptText]);
useEffect(() => {
if (transcript?.response?.longSummary) {
const newUrl = `/transcripts/${transcript.response.id}`;
// Shallow redirection does not work on NextJS 13
// https://github.com/vercel/next.js/discussions/48110
// https://github.com/vercel/next.js/discussions/49540
// router.push(newUrl, undefined, { shallow: true });
history.replaceState({}, "", newUrl);
}
});
useEffect(() => {
lockWakeState();
return () => {
releaseWakeState();
};
}, []);
return (
<>
<Recorder
setStream={setStream}
onStop={() => {
setStream(null);
setHasRecorded(true);
webRTC?.send(JSON.stringify({ cmd: "STOP" }));
}}
topics={webSockets.topics}
getAudioStream={getAudioStream}
useActiveTopic={useActiveTopic}
isPastMeeting={false}
audioDevices={audioDevices}
/>
<div className="grid grid-cols-1 lg:grid-cols-2 grid-rows-mobile-inner lg:grid-rows-1 gap-2 lg:gap-4 h-full">
<TopicList
topics={webSockets.topics}
useActiveTopic={useActiveTopic}
autoscroll={true}
/>
<section
className={`w-full h-full bg-blue-400/20 rounded-lg md:rounded-xl p-2 md:px-4`}
>
{!hasRecorded ? (
<>
{transcriptStarted && (
<h2 className="md:text-lg font-bold">Transcription</h2>
)}
<div className="flex flex-col justify-center align center text-center h-full">
<div className="py-2 h-auto">
{!transcriptStarted ? (
<div className="text-center text-gray-500">
The conversation transcript will appear here shortly after
you start recording.
</div>
) : (
<LiveTrancription
text={webSockets.transcriptText}
translateText={webSockets.translateText}
/>
)}
</div>
</div>
</>
) : (
<div className="flex flex-col justify-center align center text-center h-full text-gray-500">
<div className="p-2 md:p-4">
<FontAwesomeIcon
icon={faGear}
className="animate-spin-slow h-14 w-14 md:h-20 md:w-20"
/>
</div>
<p>
We are generating the final summary for you. This may take a
couple of minutes. Please do not navigate away from the page
during this time.
</p>
</div>
)}
</section>
</div>
{disconnected && <DisconnectedIndicator />}
</>
);
};
export default TranscriptRecord;

View File

@@ -0,0 +1,28 @@
import React, { useEffect, useState } from "react";
import Dropdown, { Option } from "react-dropdown";
import "react-dropdown/style.css";
const AudioInputsDropdown: React.FC<{
audioDevices: Option[];
disabled: boolean;
hide: () => void;
deviceId: string;
setDeviceId: React.Dispatch<React.SetStateAction<string | null>>;
}> = (props) => {
const handleDropdownChange = (option: Option) => {
props.setDeviceId(option.value);
props.hide();
};
return (
<Dropdown
options={props.audioDevices}
onChange={handleDropdownChange}
value={props.deviceId}
className="flex-grow w-full"
disabled={props.disabled}
/>
);
};
export default AudioInputsDropdown;

View File

@@ -0,0 +1,57 @@
import { useEffect, useState } from "react";
import {
DefaultApi,
V1TranscriptsCreateRequest,
} from "../../api/apis/DefaultApi";
import { GetTranscript } from "../../api";
import { useError } from "../../(errors)/errorContext";
import getApi from "../../lib/getApi";
type CreateTranscript = {
response: GetTranscript | null;
loading: boolean;
error: Error | null;
create: (params: V1TranscriptsCreateRequest["createTranscript"]) => void;
};
const useCreateTranscript = (): CreateTranscript => {
const [response, setResponse] = useState<GetTranscript | null>(null);
const [loading, setLoading] = useState<boolean>(false);
const [error, setErrorState] = useState<Error | null>(null);
const { setError } = useError();
const api = getApi();
const create = (params: V1TranscriptsCreateRequest["createTranscript"]) => {
if (loading) return;
setLoading(true);
const requestParameters: V1TranscriptsCreateRequest = {
createTranscript: {
name: params.name || "Unnamed Transcript", // Default
targetLanguage: params.targetLanguage || "en", // Default
},
};
console.debug(
"POST - /v1/transcripts/ - Requesting new transcription creation",
requestParameters,
);
api
.v1TranscriptsCreate(requestParameters)
.then((result) => {
setResponse(result);
setLoading(false);
console.debug("New transcript created:", result);
})
.catch((err) => {
setError(err);
setErrorState(err);
setLoading(false);
});
};
return { response, loading, error, create };
};
export default useCreateTranscript;

View File

@@ -0,0 +1,13 @@
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faLinkSlash } from "@fortawesome/free-solid-svg-icons";
export default function DisconnectedIndicator() {
return (
<div className="absolute top-0 left-0 w-full h-full bg-black opacity-50 flex justify-center items-center">
<div className="text-white text-2xl">
<FontAwesomeIcon icon={faLinkSlash} className="mr-2" />
Disconnected
</div>
</div>
);
}

View File

@@ -0,0 +1,74 @@
import { useRef, useState } from "react";
import React from "react";
import ReactDom from "react-dom";
import Markdown from "react-markdown";
import "../../styles/markdown.css";
type FinalSummaryProps = {
summary: string;
fullTranscript: string;
};
export default function FinalSummary(props: FinalSummaryProps) {
const finalSummaryRef = useRef<HTMLParagraphElement>(null);
const [isCopiedSummary, setIsCopiedSummary] = useState(false);
const [isCopiedTranscript, setIsCopiedTranscript] = useState(false);
const handleCopySummaryClick = () => {
let text_to_copy = finalSummaryRef.current?.innerText;
text_to_copy &&
navigator.clipboard.writeText(text_to_copy).then(() => {
setIsCopiedSummary(true);
// Reset the copied state after 2 seconds
setTimeout(() => setIsCopiedSummary(false), 2000);
});
};
const handleCopyTranscriptClick = () => {
let text_to_copy = props.fullTranscript;
text_to_copy &&
navigator.clipboard.writeText(text_to_copy).then(() => {
setIsCopiedTranscript(true);
// Reset the copied state after 2 seconds
setTimeout(() => setIsCopiedTranscript(false), 2000);
});
};
return (
<div className="overflow-y-auto h-auto max-h-full">
<div className="flex flex-row flex-wrap-reverse justify-between items-center">
<h2 className="text-lg sm:text-xl md:text-2xl font-bold">
Final Summary
</h2>
<div className="ml-auto flex space-x-2 mb-2">
<button
onClick={handleCopyTranscriptClick}
className={
(isCopiedTranscript ? "bg-blue-500" : "bg-blue-400") +
" hover:bg-blue-500 focus-visible:bg-blue-500 text-white rounded p-2"
}
style={{ minHeight: "30px" }}
>
{isCopiedTranscript ? "Copied!" : "Copy Full Transcript"}
</button>
<button
onClick={handleCopySummaryClick}
className={
(isCopiedSummary ? "bg-blue-500" : "bg-blue-400") +
" hover:bg-blue-500 focus-visible:bg-blue-500 text-white rounded p-2"
}
style={{ minHeight: "30px" }}
>
{isCopiedSummary ? "Copied!" : "Copy Summary"}
</button>
</div>
</div>
<p ref={finalSummaryRef} className="markdown">
<Markdown>{props.summary}</Markdown>
</p>
</div>
);
}

View File

@@ -0,0 +1,23 @@
type LiveTranscriptionProps = {
text: string;
translateText: string;
};
export default function LiveTrancription(props: LiveTranscriptionProps) {
return (
<div className="text-center p-4">
<p
className={`text-lg md:text-xl lg:text-2xl font-bold ${
props.translateText ? "line-clamp-2 lg:line-clamp-5" : "line-clamp-4"
}`}
>
{props.text}
</p>
{props.translateText && (
<p className="text-base md:text-lg lg:text-xl font-bold line-clamp-2 lg:line-clamp-4 mt-4">
{props.translateText}
</p>
)}
</div>
);
}

View File

@@ -0,0 +1,15 @@
type ModalProps = {
title: string;
text: string;
};
export default function Modal(props: ModalProps) {
return (
<>
<div className="w-full flex flex-col items-center justify-center bg-white px-6 py-8 mt-8 rounded-xl">
<h1 className="text-2xl font-bold text-blue-500">{props.title}</h1>
<p className="text-gray-500 text-center mt-5">{props.text}</p>
</div>
</>
);
}

View File

@@ -0,0 +1,149 @@
"use client";
import React, { useContext, useEffect, useState } from "react";
import useAudioDevice from "../useAudioDevice";
import "react-select-search/style.css";
import "../../../styles/button.css";
import "../../../styles/form.scss";
import About from "../../../(aboutAndPrivacy)/about";
import Privacy from "../../../(aboutAndPrivacy)/privacy";
import { useRouter } from "next/navigation";
import useCreateTranscript from "../createTranscript";
import SelectSearch from "react-select-search";
import { supportedLatinLanguages } from "../../../supportedLanguages";
import { useFiefIsAuthenticated } from "@fief/fief/nextjs/react";
import { featureEnabled } from "../../domainContext";
const TranscriptCreate = () => {
const router = useRouter();
const isAuthenticated = useFiefIsAuthenticated();
const requireLogin = featureEnabled("requireLogin");
const [name, setName] = useState<string>();
const nameChange = (event: React.ChangeEvent<HTMLInputElement>) => {
setName(event.target.value);
};
const [targetLanguage, setTargetLanguage] = useState<string>();
const onLanguageChange = (newval) => {
(!newval || typeof newval === "string") && setTargetLanguage(newval);
};
const createTranscript = useCreateTranscript();
const [loadingSend, setLoadingSend] = useState(false);
const send = () => {
if (loadingSend || createTranscript.loading || permissionDenied) return;
setLoadingSend(true);
createTranscript.create({ name, targetLanguage });
};
useEffect(() => {
createTranscript.response &&
router.push(`/transcripts/${createTranscript.response.id}/record`);
}, [createTranscript.response]);
useEffect(() => {
if (createTranscript.error) setLoadingSend(false);
}, [createTranscript.error]);
const { loading, permissionOk, permissionDenied, requestPermission } =
useAudioDevice();
return (
<>
<div className="hidden lg:block"></div>
<div className="lg:grid lg:grid-cols-2 lg:grid-rows-1 lg:gap-4 lg:h-full h-auto flex flex-col">
<section className="flex flex-col w-full lg:h-full items-center justify-evenly p-4 md:px-6 md:py-8">
<div className="flex flex-col max-w-xl items-center justify-center">
<h1 className="text-2xl font-bold mb-2">
Welcome to reflector.media
</h1>
<p>
Reflector is a transcription and summarization pipeline that
transforms audio into knowledge.
<span className="hidden md:block">
The output is meeting minutes and topic summaries enabling
topic-specific analyses stored in your systems of record. This
is accomplished on your infrastructure without 3rd parties
keeping your data private, secure, and organized.
</span>
</p>
<About buttonText="Learn more" />
<p className="mt-6">
In order to use Reflector, we kindly request permission to access
your microphone during meetings and events.
</p>
{featureEnabled("privacy") && (
<Privacy buttonText="Privacy policy" />
)}
</div>
</section>
<section className="flex flex-col justify-center items-center w-full h-full">
{requireLogin && !isAuthenticated ? (
<button
className="mt-4 bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500 text-white font-bold py-2 px-4 rounded"
onClick={() => router.push("/login")}
>
Log in
</button>
) : (
<div className="rounded-xl md:bg-blue-200 md:w-96 p-4 lg:p-6 flex flex-col mb-4 md:mb-10">
<h2 className="text-2xl font-bold mt-2 mb-2">Try Reflector</h2>
<label className="mb-3">
<p>Recording name</p>
<div className="select-search-container">
<input
className="select-search-input"
type="text"
onChange={nameChange}
placeholder="Optional"
/>
</div>
</label>
<label className="mb-3">
<p>Do you want to enable live translation?</p>
<SelectSearch
search
options={supportedLatinLanguages}
value={targetLanguage}
onChange={onLanguageChange}
placeholder="Choose your language"
/>
</label>
{loading ? (
<p className="">Checking permissions...</p>
) : permissionOk ? (
<p className=""> Microphone permission granted </p>
) : permissionDenied ? (
<p className="">
Permission to use your microphone was denied, please change
the permission setting in your browser and refresh this page.
</p>
) : (
<button
className="mt-4 bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500 text-white font-bold py-2 px-4 rounded"
onClick={requestPermission}
disabled={permissionDenied}
>
Request Microphone Permission
</button>
)}
<button
className="mt-4 bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500 text-white font-bold py-2 px-4 rounded"
onClick={send}
disabled={!permissionOk || loadingSend}
>
{loadingSend ? "Loading..." : "Confirm"}
</button>
</div>
)}
</section>
</div>
</>
);
};
export default TranscriptCreate;

View File

@@ -0,0 +1,354 @@
import React, { useRef, useEffect, useState } from "react";
import WaveSurfer from "wavesurfer.js";
import RecordPlugin from "../../lib/custom-plugins/record";
import CustomRegionsPlugin from "../../lib/custom-plugins/regions";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faMicrophone } from "@fortawesome/free-solid-svg-icons";
import { faDownload } from "@fortawesome/free-solid-svg-icons";
import { formatTime } from "../../lib/time";
import { Topic } from "./webSocketTypes";
import { AudioWaveform } from "../../api";
import AudioInputsDropdown from "./audioInputsDropdown";
import { Option } from "react-dropdown";
import { useError } from "../../(errors)/errorContext";
import { waveSurferStyles } from "../../styles/recorder";
type RecorderProps = {
setStream?: React.Dispatch<React.SetStateAction<MediaStream | null>>;
onStop?: () => void;
topics: Topic[];
getAudioStream?: (deviceId) => Promise<MediaStream | null>;
audioDevices?: Option[];
useActiveTopic: [
Topic | null,
React.Dispatch<React.SetStateAction<Topic | null>>,
];
waveform?: AudioWaveform | null;
isPastMeeting: boolean;
transcriptId?: string | null;
};
export default function Recorder(props: RecorderProps) {
const waveformRef = useRef<HTMLDivElement>(null);
const [wavesurfer, setWavesurfer] = useState<WaveSurfer | null>(null);
const [record, setRecord] = useState<RecordPlugin | null>(null);
const [isRecording, setIsRecording] = useState<boolean>(false);
const [hasRecorded, setHasRecorded] = useState<boolean>(props.isPastMeeting);
const [isPlaying, setIsPlaying] = useState<boolean>(false);
const [currentTime, setCurrentTime] = useState<number>(0);
const [timeInterval, setTimeInterval] = useState<number | null>(null);
const [duration, setDuration] = useState<number>(0);
const [waveRegions, setWaveRegions] = useState<CustomRegionsPlugin | null>(
null,
);
const [deviceId, setDeviceId] = useState<string | null>(null);
const [recordStarted, setRecordStarted] = useState(false);
const [activeTopic, setActiveTopic] = props.useActiveTopic;
const topicsRef = useRef(props.topics);
const [showDevices, setShowDevices] = useState(false);
const { setError } = useError();
// Function used to setup keyboard shortcuts for the streamdeck
const setupProjectorKeys = (): (() => void) => {
if (!record) return () => {};
const handleKeyPress = (event: KeyboardEvent) => {
switch (event.key) {
case "~":
location.href = "";
break;
case ",":
location.href = "/transcripts/new";
break;
case "!":
if (record.isRecording()) return;
handleRecClick();
break;
case "@":
if (!record.isRecording()) return;
handleRecClick();
break;
case "%":
setError(new Error("Error triggered by '%' shortcut"));
break;
case "^":
throw new Error("Unhandled Exception thrown by '^' shortcut");
case "(":
location.href = "/login";
break;
case ")":
location.href = "/logout";
break;
default:
break;
}
};
document.addEventListener("keydown", handleKeyPress);
// Return the cleanup function
return () => {
document.removeEventListener("keydown", handleKeyPress);
};
};
// Setup Shortcuts
useEffect(() => {
if (!record) return;
return setupProjectorKeys();
}, [record, deviceId]);
// Waveform setup
useEffect(() => {
if (waveformRef.current) {
const _wavesurfer = WaveSurfer.create({
container: waveformRef.current,
url: props.transcriptId
? `${process.env.NEXT_PUBLIC_API_URL}/v1/transcripts/${props.transcriptId}/audio/mp3`
: undefined,
peaks: props.waveform?.data,
hideScrollbar: true,
autoCenter: true,
barWidth: 2,
height: "auto",
...waveSurferStyles.player,
});
if (!props.transcriptId) {
const _wshack: any = _wavesurfer;
_wshack.renderer.renderSingleCanvas = () => {};
}
// styling
const wsWrapper = _wavesurfer.getWrapper();
wsWrapper.style.cursor = waveSurferStyles.playerStyle.cursor;
wsWrapper.style.backgroundColor =
waveSurferStyles.playerStyle.backgroundColor;
wsWrapper.style.borderRadius = waveSurferStyles.playerStyle.borderRadius;
_wavesurfer.on("play", () => {
setIsPlaying(true);
});
_wavesurfer.on("pause", () => {
setIsPlaying(false);
});
_wavesurfer.on("timeupdate", setCurrentTime);
setRecord(_wavesurfer.registerPlugin(RecordPlugin.create()));
setWaveRegions(_wavesurfer.registerPlugin(CustomRegionsPlugin.create()));
if (props.isPastMeeting) _wavesurfer.toggleInteraction(true);
setWavesurfer(_wavesurfer);
return () => {
_wavesurfer.destroy();
setIsRecording(false);
setIsPlaying(false);
setCurrentTime(0);
};
}
}, []);
useEffect(() => {
topicsRef.current = props.topics;
if (!isRecording) renderMarkers();
}, [props.topics, waveRegions]);
const renderMarkers = () => {
if (!waveRegions) return;
waveRegions.clearRegions();
for (let topic of topicsRef.current) {
const content = document.createElement("div");
content.setAttribute("style", waveSurferStyles.marker);
content.onmouseover = () => {
content.style.backgroundColor =
waveSurferStyles.markerHover.backgroundColor;
content.style.zIndex = "999";
content.style.width = "300px";
};
content.onmouseout = () => {
content.setAttribute("style", waveSurferStyles.marker);
};
content.textContent = topic.title;
const region = waveRegions.addRegion({
start: topic.timestamp,
content,
color: "f00",
drag: false,
});
region.on("click", (e) => {
e.stopPropagation();
setActiveTopic(topic);
wavesurfer?.setTime(region.start);
});
}
};
useEffect(() => {
if (!record) return;
return record.on("stopRecording", () => {
renderMarkers();
});
}, [record]);
useEffect(() => {
if (isRecording) {
const interval = window.setInterval(() => {
setCurrentTime((prev) => prev + 1);
}, 1000);
setTimeInterval(interval);
return () => clearInterval(interval);
} else {
clearInterval(timeInterval as number);
setCurrentTime((prev) => {
setDuration(prev);
return 0;
});
}
}, [isRecording]);
useEffect(() => {
if (activeTopic) {
wavesurfer?.setTime(activeTopic.timestamp);
}
}, [activeTopic]);
const handleRecClick = async () => {
if (!record) return console.log("no record");
if (record.isRecording()) {
if (props.onStop) props.onStop();
record.stopRecording();
setIsRecording(false);
setHasRecorded(true);
} else {
const stream = await getCurrentStream();
if (props.setStream) props.setStream(stream);
waveRegions?.clearRegions();
if (stream) {
await record.startRecording(stream);
setIsRecording(true);
}
}
};
const handlePlayClick = () => {
wavesurfer?.playPause();
};
const timeLabel = () => {
if (isRecording) return formatTime(currentTime);
if (duration) return `${formatTime(currentTime)}/${formatTime(duration)}`;
return "";
};
const getCurrentStream = async () => {
setRecordStarted(true);
return deviceId && props.getAudioStream
? await props.getAudioStream(deviceId)
: null;
};
useEffect(() => {
if (props.audioDevices && props.audioDevices.length > 0) {
setDeviceId(props.audioDevices[0].value);
}
}, [props.audioDevices]);
return (
<div className="flex items-center w-full relative">
<div className="flex-grow items-end relative">
<div
ref={waveformRef}
className="flex-grow rounded-lg md:rounded-xl h-20"
></div>
<div className="absolute right-2 bottom-0">
{isRecording && (
<div className="inline-block bg-red-500 rounded-full w-2 h-2 my-auto mr-1 animate-ping"></div>
)}
{timeLabel()}
</div>
</div>
{hasRecorded && (
<>
<button
className={`${
isPlaying
? "bg-orange-400 hover:bg-orange-500 focus-visible:bg-orange-500"
: "bg-green-400 hover:bg-green-500 focus-visible:bg-green-500"
} text-white ml-2 md:ml:4 md:h-[78px] md:min-w-[100px] text-lg`}
id="play-btn"
onClick={handlePlayClick}
disabled={isRecording}
>
{isPlaying ? "Pause" : "Play"}
</button>
{props.transcriptId && (
<a
title="Download recording"
className="text-center cursor-pointer text-blue-400 hover:text-blue-700 ml-2 md:ml:4 p-2 rounded-lg outline-blue-400"
download={`recording-${
props.transcriptId?.split("-")[0] || "0000"
}`}
href={`${process.env.NEXT_PUBLIC_API_URL}/v1/transcripts/${props.transcriptId}/audio/mp3`}
>
<FontAwesomeIcon icon={faDownload} className="h-5 w-auto" />
</a>
)}
</>
)}
{!hasRecorded && (
<>
<button
className={`${
isRecording
? "bg-red-400 hover:bg-red-500 focus-visible:bg-red-500"
: "bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500"
} text-white ml-2 md:ml:4 md:h-[78px] md:min-w-[100px] text-lg`}
onClick={handleRecClick}
disabled={isPlaying}
>
{isRecording ? "Stop" : "Record"}
</button>
{props.audioDevices && props.audioDevices?.length > 0 && deviceId && (
<>
<button
className="text-center text-blue-400 hover:text-blue-700 ml-2 md:ml:4 p-2 rounded-lg focus-visible:outline outline-blue-400"
onClick={() => setShowDevices((prev) => !prev)}
>
<FontAwesomeIcon icon={faMicrophone} className="h-5 w-auto" />
</button>
<div
className={`absolute z-20 bottom-[-1rem] right-0 bg-white rounded ${
showDevices ? "visible" : "invisible"
}`}
>
<AudioInputsDropdown
setDeviceId={setDeviceId}
audioDevices={props.audioDevices}
disabled={recordStarted}
hide={() => setShowDevices(false)}
deviceId={deviceId}
/>
</div>
</>
)}
</>
)}
</div>
);
}

View File

@@ -0,0 +1,23 @@
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faArrowDown } from "@fortawesome/free-solid-svg-icons";
type ScrollToBottomProps = {
visible: boolean;
handleScrollBottom: () => void;
};
export default function ScrollToBottom(props: ScrollToBottomProps) {
return (
<div
className={`absolute bottom-0 right-[0.15rem] md:right-[0.65rem] ${
props.visible ? "flex" : "hidden"
} text-2xl cursor-pointer opacity-70 hover:opacity-100 transition-opacity duration-200 text-blue-400`}
onClick={() => {
props.handleScrollBottom();
return false;
}}
>
<FontAwesomeIcon icon={faArrowDown} className="animate-bounce" />
</div>
);
}

View File

@@ -0,0 +1,69 @@
import React, { useState, useRef, useEffect, use } from "react";
import { featureEnabled } from "../domainContext";
const ShareLink = () => {
const [isCopied, setIsCopied] = useState(false);
const inputRef = useRef<HTMLInputElement>(null);
const [currentUrl, setCurrentUrl] = useState<string>("");
useEffect(() => {
setCurrentUrl(window.location.href);
}, []);
const handleCopyClick = () => {
if (inputRef.current) {
let text_to_copy = inputRef.current.value;
text_to_copy &&
navigator.clipboard.writeText(text_to_copy).then(() => {
setIsCopied(true);
// Reset the copied state after 2 seconds
setTimeout(() => setIsCopied(false), 2000);
});
}
};
const privacyEnabled = featureEnabled("privacy");
return (
<div
className="p-2 md:p-4 rounded"
style={{ background: "rgba(96, 165, 250, 0.2)" }}
>
{privacyEnabled ? (
<p className="text-sm mb-2">
You can share this link with others. Anyone with the link will have
access to the page, including the full audio recording, for the next 7
days.
</p>
) : (
<p className="text-sm mb-2">
You can share this link with others. Anyone with the link will have
access to the page, including the full audio recording.
</p>
)}
<div className="flex items-center">
<input
type="text"
readOnly
value={currentUrl}
ref={inputRef}
onChange={() => {}}
className="border rounded-lg md:rounded-xl p-2 flex-grow flex-shrink overflow-auto mr-2 text-sm bg-slate-100 outline-slate-400"
/>
<button
onClick={handleCopyClick}
className={
(isCopied ? "bg-blue-500" : "bg-blue-400") +
" hover:bg-blue-500 focus-visible:bg-blue-500 text-white rounded p-2"
}
style={{ minHeight: "38px" }}
>
{isCopied ? "Copied!" : "Copy"}
</button>
</div>
</div>
);
};
export default ShareLink;

View File

@@ -0,0 +1,122 @@
import React, { useState, useEffect } from "react";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import {
faChevronRight,
faChevronDown,
} from "@fortawesome/free-solid-svg-icons";
import { formatTime } from "../../lib/time";
import ScrollToBottom from "./scrollToBottom";
import { Topic } from "./webSocketTypes";
type TopicListProps = {
topics: Topic[];
useActiveTopic: [
Topic | null,
React.Dispatch<React.SetStateAction<Topic | null>>,
];
autoscroll: boolean;
};
export function TopicList({
topics,
useActiveTopic,
autoscroll,
}: TopicListProps) {
const [activeTopic, setActiveTopic] = useActiveTopic;
const [autoscrollEnabled, setAutoscrollEnabled] = useState<boolean>(true);
useEffect(() => {
if (autoscroll && autoscrollEnabled) scrollToBottom();
}, [topics.length]);
const scrollToBottom = () => {
const topicsDiv = document.getElementById("topics-div");
if (topicsDiv) topicsDiv.scrollTop = topicsDiv.scrollHeight;
};
// scroll top is not rounded, heights are, so exact match won't work.
// https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollHeight#determine_if_an_element_has_been_totally_scrolled
const toggleScroll = (element) => {
const bottom =
Math.abs(
element.scrollHeight - element.clientHeight - element.scrollTop,
) < 2 || element.scrollHeight == element.clientHeight;
if (!bottom && autoscrollEnabled) {
setAutoscrollEnabled(false);
} else if (bottom && !autoscrollEnabled) {
setAutoscrollEnabled(true);
}
};
const handleScroll = (e) => {
toggleScroll(e.target);
};
useEffect(() => {
if (autoscroll) {
const topicsDiv = document.getElementById("topics-div");
topicsDiv && toggleScroll(topicsDiv);
}
}, [activeTopic, autoscroll]);
return (
<section className="relative w-full h-full bg-blue-400/20 rounded-lg md:rounded-xl p-1 sm:p-2 md:px-4 flex flex-col justify-center align-center">
{topics.length > 0 ? (
<>
<h2 className="ml-2 md:text-lg font-bold mb-2">Topics</h2>
{autoscroll && (
<ScrollToBottom
visible={!autoscrollEnabled}
handleScrollBottom={scrollToBottom}
/>
)}
<div
id="topics-div"
className="overflow-y-auto h-full"
onScroll={handleScroll}
>
{topics.map((topic, index) => (
<button
key={index}
className="rounded-none border-solid border-0 border-bluegrey border-b last:border-none last:rounded-b-lg p-2 hover:bg-blue-400/20 focus-visible:bg-blue-400/20 text-left block w-full"
onClick={() =>
setActiveTopic(activeTopic?.id == topic.id ? null : topic)
}
>
<div className="w-full flex justify-between items-center rounded-lg md:rounded-xl xs:text-base sm:text-lg md:text-xl font-bold leading-tight">
<p>
<span className="font-light font-mono text-slate-500 text-base md:text-lg">
[{formatTime(topic.timestamp)}]&nbsp;
</span>
<span>{topic.title}</span>
</p>
<FontAwesomeIcon
className="transform transition-transform duration-200 ml-2"
icon={
activeTopic?.id == topic.id
? faChevronDown
: faChevronRight
}
/>
</div>
{activeTopic?.id == topic.id && (
<div className="p-2">{topic.transcript}</div>
)}
</button>
))}
</div>
</>
) : (
<div className="text-center text-gray-500">
Discussion topics will appear here after you start recording.
<br />
It may take up to 5 minutes of conversation for the first topic to
appear.
</div>
)}
</section>
);
}

View File

@@ -0,0 +1,13 @@
type TranscriptTitle = {
title: string;
};
const TranscriptTitle = (props: TranscriptTitle) => {
return (
<h2 className="text-2xl lg:text-4xl font-extrabold text-center mb-4">
{props.title}
</h2>
);
};
export default TranscriptTitle;

View File

@@ -0,0 +1,130 @@
import { useEffect, useState } from "react";
import { Option } from "react-dropdown";
const MIC_QUERY = { name: "microphone" as PermissionName };
const useAudioDevice = () => {
const [permissionOk, setPermissionOk] = useState<boolean>(false);
const [permissionDenied, setPermissionDenied] = useState<boolean>(false);
const [audioDevices, setAudioDevices] = useState<Option[]>([]);
const [loading, setLoading] = useState(true);
useEffect(() => {
checkPermission();
}, []);
useEffect(() => {
if (permissionOk) {
updateDevices();
}
}, [permissionOk]);
const checkPermission = (): void => {
if (navigator.userAgent.includes("Firefox")) {
navigator.mediaDevices
.getUserMedia({ audio: true, video: false })
.then((stream) => {
setPermissionOk(true);
setPermissionDenied(false);
})
.catch((e) => {
setPermissionOk(false);
setPermissionDenied(false);
})
.finally(() => setLoading(false));
return;
}
navigator.permissions
.query(MIC_QUERY)
.then((permissionStatus) => {
setPermissionOk(permissionStatus.state === "granted");
setPermissionDenied(permissionStatus.state === "denied");
permissionStatus.onchange = () => {
setPermissionOk(permissionStatus.state === "granted");
setPermissionDenied(permissionStatus.state === "denied");
};
})
.catch(() => {
setPermissionOk(false);
setPermissionDenied(false);
})
.finally(() => {
setLoading(false);
});
};
const requestPermission = () => {
navigator.mediaDevices
.getUserMedia({
audio: true,
})
.then((stream) => {
if (!navigator.userAgent.includes("Firefox"))
stream.getTracks().forEach((track) => track.stop());
setPermissionOk(true);
})
.catch(() => {
setPermissionDenied(true);
setPermissionOk(false);
})
.finally(() => {
setLoading(false);
});
};
const getAudioStream = async (
deviceId: string,
): Promise<MediaStream | null> => {
try {
const urlParams = new URLSearchParams(window.location.search);
const noiseSuppression = urlParams.get("noiseSuppression") === "true";
const echoCancellation = urlParams.get("echoCancellation") === "true";
console.debug(
"noiseSuppression",
noiseSuppression,
"echoCancellation",
echoCancellation,
);
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
deviceId,
noiseSuppression,
echoCancellation,
},
});
return stream;
} catch (e) {
setPermissionOk(false);
setAudioDevices([]);
return null;
}
};
const updateDevices = async (): Promise<void> => {
const devices = await navigator.mediaDevices.enumerateDevices();
const _audioDevices = devices
.filter(
(d) => d.kind === "audioinput" && d.deviceId != "" && d.label != "",
)
.map((d) => ({ value: d.deviceId, label: d.label }));
setPermissionOk(_audioDevices.length > 0);
setAudioDevices(_audioDevices);
};
return {
loading,
permissionOk,
permissionDenied,
audioDevices,
getAudioStream,
requestPermission,
};
};
export default useAudioDevice;

View File

@@ -0,0 +1,48 @@
import { useEffect, useState } from "react";
import {
DefaultApi,
V1TranscriptGetAudioMp3Request,
} from "../../api/apis/DefaultApi";
import {} from "../../api";
import { useError } from "../../(errors)/errorContext";
type Mp3Response = {
url: string | null;
loading: boolean;
error: Error | null;
};
const useMp3 = (api: DefaultApi, id: string): Mp3Response => {
const [url, setUrl] = useState<string | null>(null);
const [loading, setLoading] = useState<boolean>(false);
const [error, setErrorState] = useState<Error | null>(null);
const { setError } = useError();
const getMp3 = (id: string) => {
if (!id) throw new Error("Transcript ID is required to get transcript Mp3");
setLoading(true);
const requestParameters: V1TranscriptGetAudioMp3Request = {
transcriptId: id,
};
api
.v1TranscriptGetAudioMp3(requestParameters)
.then((result) => {
setUrl(result);
setLoading(false);
console.debug("Transcript Mp3 loaded:", result);
})
.catch((err) => {
setError(err);
setErrorState(err);
});
};
useEffect(() => {
getMp3(id);
}, [id]);
return { url, loading, error };
};
export default useMp3;

View File

@@ -0,0 +1,48 @@
import { useEffect, useState } from "react";
import {
DefaultApi,
V1TranscriptGetTopicsRequest,
} from "../../api/apis/DefaultApi";
import { useError } from "../../(errors)/errorContext";
import { Topic } from "./webSocketTypes";
type TranscriptTopics = {
topics: Topic[] | null;
loading: boolean;
error: Error | null;
};
const useTopics = (api: DefaultApi, id: string): TranscriptTopics => {
const [topics, setTopics] = useState<Topic[] | null>(null);
const [loading, setLoading] = useState<boolean>(false);
const [error, setErrorState] = useState<Error | null>(null);
const { setError } = useError();
const getTopics = (id: string) => {
if (!id) return;
setLoading(true);
const requestParameters: V1TranscriptGetTopicsRequest = {
transcriptId: id,
};
api
.v1TranscriptGetTopics(requestParameters)
.then((result) => {
setTopics(result);
setLoading(false);
console.debug("Transcript topics loaded:", result);
})
.catch((err) => {
setError(err);
setErrorState(err);
});
};
useEffect(() => {
getTopics(id);
}, [id]);
return { topics, loading, error };
};
export default useTopics;

View File

@@ -0,0 +1,45 @@
import { useEffect, useState } from "react";
import { DefaultApi, V1TranscriptGetRequest } from "../../api/apis/DefaultApi";
import { GetTranscript } from "../../api";
import { useError } from "../../(errors)/errorContext";
type Transcript = {
response: GetTranscript | null;
loading: boolean;
error: Error | null;
};
const useTranscript = (api: DefaultApi, id: string | null): Transcript => {
const [response, setResponse] = useState<GetTranscript | null>(null);
const [loading, setLoading] = useState<boolean>(false);
const [error, setErrorState] = useState<Error | null>(null);
const { setError } = useError();
const getTranscript = (id: string | null) => {
if (!id) return;
setLoading(true);
const requestParameters: V1TranscriptGetRequest = {
transcriptId: id,
};
api
.v1TranscriptGet(requestParameters)
.then((result) => {
setResponse(result);
setLoading(false);
console.debug("Transcript Loaded:", result);
})
.catch((err) => {
setError(err);
setErrorState(err);
});
};
useEffect(() => {
getTranscript(id);
}, [id]);
return { response, loading, error };
};
export default useTranscript;

View File

@@ -0,0 +1,48 @@
import { useEffect, useState } from "react";
import {
DefaultApi,
V1TranscriptGetAudioWaveformRequest,
} from "../../api/apis/DefaultApi";
import { AudioWaveform } from "../../api";
import { useError } from "../../(errors)/errorContext";
type AudioWaveFormResponse = {
waveform: AudioWaveform | null;
loading: boolean;
error: Error | null;
};
const useWaveform = (api: DefaultApi, id: string): AudioWaveFormResponse => {
const [waveform, setWaveform] = useState<AudioWaveform | null>(null);
const [loading, setLoading] = useState<boolean>(false);
const [error, setErrorState] = useState<Error | null>(null);
const { setError } = useError();
const getWaveform = (id: string) => {
if (!id) return;
setLoading(true);
const requestParameters: V1TranscriptGetAudioWaveformRequest = {
transcriptId: id,
};
api
.v1TranscriptGetAudioWaveform(requestParameters)
.then((result) => {
setWaveform(result);
setLoading(false);
console.debug("Transcript waveform loaded:", result);
})
.catch((err) => {
setError(err);
setErrorState(err);
});
};
useEffect(() => {
getWaveform(id);
}, [id]);
return { waveform, loading, error };
};
export default useWaveform;

View File

@@ -0,0 +1,75 @@
import { useEffect, useState } from "react";
import Peer from "simple-peer";
import {
DefaultApi,
V1TranscriptRecordWebrtcRequest,
} from "../../api/apis/DefaultApi";
import { useError } from "../../(errors)/errorContext";
const useWebRTC = (
stream: MediaStream | null,
transcriptId: string | null,
api: DefaultApi,
): Peer => {
const [peer, setPeer] = useState<Peer | null>(null);
const { setError } = useError();
useEffect(() => {
if (!stream || !transcriptId) {
return;
}
console.debug("Using WebRTC", stream, transcriptId);
let p: Peer;
try {
p = new Peer({ initiator: true, stream: stream });
} catch (error) {
setError(error);
return;
}
p.on("error", (err) => {
setError(new Error(`WebRTC error: ${err}`));
});
p.on("signal", (data: any) => {
if ("sdp" in data) {
const requestParameters: V1TranscriptRecordWebrtcRequest = {
transcriptId: transcriptId,
rtcOffer: {
sdp: data.sdp,
type: data.type,
},
};
api
.v1TranscriptRecordWebrtc(requestParameters)
.then((answer) => {
try {
p.signal(answer);
} catch (error) {
setError(error);
}
})
.catch((error) => {
setError(error);
});
}
});
p.on("connect", () => {
console.log("WebRTC connected");
setPeer(p);
});
return () => {
p.destroy();
};
}, [stream, transcriptId]);
return peer;
};
export default useWebRTC;

View File

@@ -0,0 +1,244 @@
import { useEffect, useState } from "react";
import { Topic, FinalSummary, Status } from "./webSocketTypes";
import { useError } from "../../(errors)/errorContext";
import { useRouter } from "next/navigation";
type UseWebSockets = {
transcriptText: string;
translateText: string;
topics: Topic[];
finalSummary: FinalSummary;
status: Status;
};
export const useWebSockets = (transcriptId: string | null): UseWebSockets => {
const [transcriptText, setTranscriptText] = useState<string>("");
const [translateText, setTranslateText] = useState<string>("");
const [textQueue, setTextQueue] = useState<string[]>([]);
const [translationQueue, setTranslationQueue] = useState<string[]>([]);
const [isProcessing, setIsProcessing] = useState(false);
const [topics, setTopics] = useState<Topic[]>([]);
const [finalSummary, setFinalSummary] = useState<FinalSummary>({
summary: "",
});
const [status, setStatus] = useState<Status>({ value: "disconnected" });
const { setError } = useError();
const router = useRouter();
useEffect(() => {
if (isProcessing || textQueue.length === 0) {
return;
}
setIsProcessing(true);
const text = textQueue[0];
setTranscriptText(text);
setTranslateText(translationQueue[0]);
const WPM_READING = 200 + textQueue.length * 10; // words per minute to read
const wordCount = text.split(/\s+/).length;
const delay = (wordCount / WPM_READING) * 60 * 1000;
console.log(`displaying "${text}" for ${delay}ms`);
setTimeout(() => {
setIsProcessing(false);
setTextQueue((prevQueue) => prevQueue.slice(1));
setTranslationQueue((prevQueue) => prevQueue.slice(1));
}, delay);
}, [textQueue, isProcessing]);
useEffect(() => {
document.onkeyup = (e) => {
if (e.key === "a" && process.env.NEXT_PUBLIC_ENV === "development") {
setTranscriptText("Lorem Ipsum");
setTopics([
{
id: "1",
timestamp: 10,
summary: "This is test topic 1",
title: "Topic 1: Introduction to Quantum Mechanics",
transcript:
"A brief overview of quantum mechanics and its principles.",
},
{
id: "2",
timestamp: 20,
summary: "This is test topic 2",
title: "Topic 2: Machine Learning Algorithms",
transcript:
"Understanding the different types of machine learning algorithms.",
},
{
id: "3",
timestamp: 30,
summary: "This is test topic 3",
title: "Topic 3: Mental Health Awareness",
transcript: "Ways to improve mental health and reduce stigma.",
},
{
id: "4",
timestamp: 40,
summary: "This is test topic 4",
title: "Topic 4: Basics of Productivity",
transcript: "Tips and tricks to increase daily productivity.",
},
{
id: "5",
timestamp: 50,
summary: "This is test topic 5",
title: "Topic 5: Future of Aviation",
transcript:
"Exploring the advancements and possibilities in aviation.",
},
]);
setFinalSummary({ summary: "This is the final summary" });
}
if (e.key === "z" && process.env.NEXT_PUBLIC_ENV === "development") {
setTranscriptText(
"This text is in English, and it is a pretty long sentence to test the limits",
);
setTopics([
{
id: "1",
timestamp: 10,
summary: "This is test topic 1",
title:
"Topic 1: Introduction to Quantum Mechanics, a brief overview of quantum mechanics and its principles.",
transcript:
"A brief overview of quantum mechanics and its principles.",
},
{
id: "2",
timestamp: 20,
summary: "This is test topic 2",
title:
"Topic 2: Machine Learning Algorithms, understanding the different types of machine learning algorithms.",
transcript:
"Understanding the different types of machine learning algorithms.",
},
{
id: "3",
timestamp: 30,
summary: "This is test topic 3",
title:
"Topic 3: Mental Health Awareness, ways to improve mental health and reduce stigma.",
transcript: "Ways to improve mental health and reduce stigma.",
},
{
id: "4",
timestamp: 40,
summary: "This is test topic 4",
title:
"Topic 4: Basics of Productivity, tips and tricks to increase daily productivity.",
transcript: "Tips and tricks to increase daily productivity.",
},
{
id: "5",
timestamp: 50,
summary: "This is test topic 5",
title:
"Topic 5: Future of Aviation, exploring the advancements and possibilities in aviation.",
transcript:
"Exploring the advancements and possibilities in aviation.",
},
]);
setFinalSummary({ summary: "This is the final summary" });
}
};
if (!transcriptId) return;
const url = `${process.env.NEXT_PUBLIC_WEBSOCKET_URL}/v1/transcripts/${transcriptId}/events`;
const ws = new WebSocket(url);
ws.onopen = () => {
console.debug("WebSocket connection opened");
};
ws.onmessage = (event) => {
const message = JSON.parse(event.data);
try {
switch (message.event) {
case "TRANSCRIPT":
const newText = (message.data.text ?? "").trim();
const newTranslation = (message.data.translation ?? "").trim();
if (!newText) break;
console.debug("TRANSCRIPT event:", newText);
setTextQueue((prevQueue) => [...prevQueue, newText]);
setTranslationQueue((prevQueue) => [...prevQueue, newTranslation]);
break;
case "TOPIC":
setTopics((prevTopics) => [...prevTopics, message.data]);
console.debug("TOPIC event:", message.data);
break;
case "FINAL_SHORT_SUMMARY":
console.debug("FINAL_SHORT_SUMMARY event:", message.data);
break;
case "FINAL_LONG_SUMMARY":
if (message.data) {
setFinalSummary(message.data);
}
break;
case "FINAL_TITLE":
console.debug("FINAL_TITLE event:", message.data);
break;
case "STATUS":
console.log("STATUS event:", message.data);
if (message.data.value === "ended") {
const newUrl = "/transcripts/" + transcriptId;
router.push(newUrl);
console.debug(
"FINAL_LONG_SUMMARY event:",
message.data,
"newUrl",
newUrl,
);
}
setStatus(message.data);
break;
default:
setError(
new Error(`Received unknown WebSocket event: ${message.event}`),
);
}
} catch (error) {
setError(error);
}
};
ws.onerror = (error) => {
console.error("WebSocket error:", error);
setError(new Error("A WebSocket error occurred."));
};
ws.onclose = (event) => {
console.debug("WebSocket connection closed");
switch (event.code) {
case 1000: // Normal Closure:
case 1001: // Going Away:
case 1005:
break;
default:
setError(
new Error(`WebSocket closed unexpectedly with code: ${event.code}`),
);
}
};
return () => {
ws.close();
};
}, [transcriptId]);
return { transcriptText, translateText, topics, finalSummary, status };
};

View File

@@ -0,0 +1,24 @@
export type Topic = {
timestamp: number;
title: string;
transcript: string;
summary: string;
id: string;
};
export type Transcript = {
text: string;
};
export type FinalSummary = {
summary: string;
};
export type Status = {
value: string;
};
export type TranslatedTopic = {
text: string;
translation: string;
};