From 86b3b3c0e4f61d98d1b4a0dbbb343286db503371 Mon Sep 17 00:00:00 2001 From: Sara Date: Mon, 13 Nov 2023 17:23:27 +0100 Subject: [PATCH 1/8] split recorder player --- .../transcripts/[transcriptId]/page.tsx | 10 +- www/app/[domain]/transcripts/player.tsx | 167 ++++++++++++++++++ www/app/[domain]/transcripts/recorder.tsx | 109 +----------- 3 files changed, 174 insertions(+), 112 deletions(-) create mode 100644 www/app/[domain]/transcripts/player.tsx diff --git a/www/app/[domain]/transcripts/[transcriptId]/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/page.tsx index 56201c3c..bebfe261 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/page.tsx @@ -5,7 +5,6 @@ import useTopics from "../useTopics"; import useWaveform from "../useWaveform"; import useMp3 from "../useMp3"; import { TopicList } from "../topicList"; -import Recorder from "../recorder"; import { Topic } from "../webSocketTypes"; import React, { useState } from "react"; import "../../../styles/button.css"; @@ -13,6 +12,7 @@ import FinalSummary from "../finalSummary"; import ShareLink from "../shareLink"; import QRCode from "react-qr-code"; import TranscriptTitle from "../transcriptTitle"; +import Player from "../player"; type TranscriptDetails = { params: { @@ -62,14 +62,12 @@ export default function TranscriptDetails(details: TranscriptDetails) { /> )} {!waveform?.loading && ( - )} diff --git a/www/app/[domain]/transcripts/player.tsx b/www/app/[domain]/transcripts/player.tsx new file mode 100644 index 00000000..6143836e --- /dev/null +++ b/www/app/[domain]/transcripts/player.tsx @@ -0,0 +1,167 @@ +import React, { useRef, useEffect, useState } from "react"; + +import WaveSurfer from "wavesurfer.js"; +import CustomRegionsPlugin from "../../lib/custom-plugins/regions"; + +import { formatTime } from "../../lib/time"; +import { Topic } from "./webSocketTypes"; +import { AudioWaveform } from "../../api"; +import { waveSurferStyles } from "../../styles/recorder"; + +type PlayerProps = { + topics: Topic[]; + useActiveTopic: [ + Topic | null, + React.Dispatch>, + ]; + waveform: AudioWaveform; + media: HTMLMediaElement; + mediaDuration: number; +}; + +export default function Player(props: PlayerProps) { + const waveformRef = useRef(null); + const [wavesurfer, setWavesurfer] = useState(null); + const [isPlaying, setIsPlaying] = useState(false); + const [currentTime, setCurrentTime] = useState(0); + const [waveRegions, setWaveRegions] = useState( + null, + ); + const [activeTopic, setActiveTopic] = props.useActiveTopic; + const topicsRef = useRef(props.topics); + + // Waveform setup + useEffect(() => { + if (waveformRef.current) { + // XXX duration is required to prevent recomputing peaks from audio + // However, the current waveform returns only the peaks, and no duration + // And the backend does not save duration properly. + // So at the moment, we deduct the duration from the topics. + // This is not ideal, but it works for now. + const _wavesurfer = WaveSurfer.create({ + container: waveformRef.current, + peaks: props.waveform.data, + hideScrollbar: true, + autoCenter: true, + barWidth: 2, + height: "auto", + duration: props.mediaDuration, + + ...waveSurferStyles.player, + }); + + // styling + const wsWrapper = _wavesurfer.getWrapper(); + wsWrapper.style.cursor = waveSurferStyles.playerStyle.cursor; + wsWrapper.style.backgroundColor = + waveSurferStyles.playerStyle.backgroundColor; + wsWrapper.style.borderRadius = waveSurferStyles.playerStyle.borderRadius; + + _wavesurfer.on("play", () => { + setIsPlaying(true); + }); + _wavesurfer.on("pause", () => { + setIsPlaying(false); + }); + _wavesurfer.on("timeupdate", setCurrentTime); + + setWaveRegions(_wavesurfer.registerPlugin(CustomRegionsPlugin.create())); + + _wavesurfer.toggleInteraction(true); + + _wavesurfer.setMediaElement(props.media); + + setWavesurfer(_wavesurfer); + + return () => { + _wavesurfer.destroy(); + setIsPlaying(false); + setCurrentTime(0); + }; + } + }, []); + + useEffect(() => { + if (!wavesurfer) return; + if (!props.media) return; + wavesurfer.setMediaElement(props.media); + }, [props.media, wavesurfer]); + + useEffect(() => { + topicsRef.current = props.topics; + renderMarkers(); + }, [props.topics, waveRegions]); + + const renderMarkers = () => { + if (!waveRegions) return; + + waveRegions.clearRegions(); + + for (let topic of topicsRef.current) { + const content = document.createElement("div"); + content.setAttribute("style", waveSurferStyles.marker); + content.onmouseover = () => { + content.style.backgroundColor = + waveSurferStyles.markerHover.backgroundColor; + content.style.zIndex = "999"; + content.style.width = "300px"; + }; + content.onmouseout = () => { + content.setAttribute("style", waveSurferStyles.marker); + }; + content.textContent = topic.title; + + const region = waveRegions.addRegion({ + start: topic.timestamp, + content, + color: "f00", + drag: false, + }); + region.on("click", (e) => { + e.stopPropagation(); + setActiveTopic(topic); + wavesurfer?.setTime(region.start); + }); + } + }; + + useEffect(() => { + if (activeTopic) { + wavesurfer?.setTime(activeTopic.timestamp); + } + }, [activeTopic]); + + const handlePlayClick = () => { + wavesurfer?.playPause(); + }; + + const timeLabel = () => { + if (props.mediaDuration) + return `${formatTime(currentTime)}/${formatTime(props.mediaDuration)}`; + return ""; + }; + + return ( +
+
+
+
{timeLabel()}
+
+ + +
+ ); +} diff --git a/www/app/[domain]/transcripts/recorder.tsx b/www/app/[domain]/transcripts/recorder.tsx index 8db32ff7..5b4420c4 100644 --- a/www/app/[domain]/transcripts/recorder.tsx +++ b/www/app/[domain]/transcripts/recorder.tsx @@ -6,11 +6,8 @@ import CustomRegionsPlugin from "../../lib/custom-plugins/regions"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { faMicrophone } from "@fortawesome/free-solid-svg-icons"; -import { faDownload } from "@fortawesome/free-solid-svg-icons"; import { formatTime } from "../../lib/time"; -import { Topic } from "./webSocketTypes"; -import { AudioWaveform } from "../../api"; import AudioInputsDropdown from "./audioInputsDropdown"; import { Option } from "react-dropdown"; import { waveSurferStyles } from "../../styles/recorder"; @@ -19,17 +16,8 @@ import { useError } from "../../(errors)/errorContext"; type RecorderProps = { setStream?: React.Dispatch>; onStop?: () => void; - topics: Topic[]; getAudioStream?: (deviceId) => Promise; audioDevices?: Option[]; - useActiveTopic: [ - Topic | null, - React.Dispatch>, - ]; - waveform?: AudioWaveform | null; - isPastMeeting: boolean; - transcriptId?: string | null; - media?: HTMLMediaElement | null; mediaDuration?: number | null; }; @@ -38,7 +26,7 @@ export default function Recorder(props: RecorderProps) { const [wavesurfer, setWavesurfer] = useState(null); const [record, setRecord] = useState(null); const [isRecording, setIsRecording] = useState(false); - const [hasRecorded, setHasRecorded] = useState(props.isPastMeeting); + const [hasRecorded, setHasRecorded] = useState(false); const [isPlaying, setIsPlaying] = useState(false); const [currentTime, setCurrentTime] = useState(0); const [timeInterval, setTimeInterval] = useState(null); @@ -48,8 +36,6 @@ export default function Recorder(props: RecorderProps) { ); const [deviceId, setDeviceId] = useState(null); const [recordStarted, setRecordStarted] = useState(false); - const [activeTopic, setActiveTopic] = props.useActiveTopic; - const topicsRef = useRef(props.topics); const [showDevices, setShowDevices] = useState(false); const { setError } = useError(); @@ -73,8 +59,6 @@ export default function Recorder(props: RecorderProps) { if (!record.isRecording()) return; handleRecClick(); break; - case "^": - throw new Error("Unhandled Exception thrown by '^' shortcut"); case "(": location.href = "/login"; break; @@ -104,14 +88,8 @@ export default function Recorder(props: RecorderProps) { // Waveform setup useEffect(() => { if (waveformRef.current) { - // XXX duration is required to prevent recomputing peaks from audio - // However, the current waveform returns only the peaks, and no duration - // And the backend does not save duration properly. - // So at the moment, we deduct the duration from the topics. - // This is not ideal, but it works for now. const _wavesurfer = WaveSurfer.create({ container: waveformRef.current, - peaks: props.waveform?.data, hideScrollbar: true, autoCenter: true, barWidth: 2, @@ -121,10 +99,8 @@ export default function Recorder(props: RecorderProps) { ...waveSurferStyles.player, }); - if (!props.transcriptId) { - const _wshack: any = _wavesurfer; - _wshack.renderer.renderSingleCanvas = () => {}; - } + const _wshack: any = _wavesurfer; + _wshack.renderer.renderSingleCanvas = () => {}; // styling const wsWrapper = _wavesurfer.getWrapper(); @@ -144,12 +120,6 @@ export default function Recorder(props: RecorderProps) { setRecord(_wavesurfer.registerPlugin(RecordPlugin.create())); setWaveRegions(_wavesurfer.registerPlugin(CustomRegionsPlugin.create())); - if (props.isPastMeeting) _wavesurfer.toggleInteraction(true); - - if (props.media) { - _wavesurfer.setMediaElement(props.media); - } - setWavesurfer(_wavesurfer); return () => { @@ -161,58 +131,6 @@ export default function Recorder(props: RecorderProps) { } }, []); - useEffect(() => { - if (!wavesurfer) return; - if (!props.media) return; - wavesurfer.setMediaElement(props.media); - }, [props.media, wavesurfer]); - - useEffect(() => { - topicsRef.current = props.topics; - if (!isRecording) renderMarkers(); - }, [props.topics, waveRegions]); - - const renderMarkers = () => { - if (!waveRegions) return; - - waveRegions.clearRegions(); - - for (let topic of topicsRef.current) { - const content = document.createElement("div"); - content.setAttribute("style", waveSurferStyles.marker); - content.onmouseover = () => { - content.style.backgroundColor = - waveSurferStyles.markerHover.backgroundColor; - content.style.zIndex = "999"; - content.style.width = "300px"; - }; - content.onmouseout = () => { - content.setAttribute("style", waveSurferStyles.marker); - }; - content.textContent = topic.title; - - const region = waveRegions.addRegion({ - start: topic.timestamp, - content, - color: "f00", - drag: false, - }); - region.on("click", (e) => { - e.stopPropagation(); - setActiveTopic(topic); - wavesurfer?.setTime(region.start); - }); - } - }; - - useEffect(() => { - if (!record) return; - - return record.on("stopRecording", () => { - renderMarkers(); - }); - }, [record]); - useEffect(() => { if (isRecording) { const interval = window.setInterval(() => { @@ -229,12 +147,6 @@ export default function Recorder(props: RecorderProps) { } }, [isRecording]); - useEffect(() => { - if (activeTopic) { - wavesurfer?.setTime(activeTopic.timestamp); - } - }, [activeTopic]); - const handleRecClick = async () => { if (!record) return console.log("no record"); @@ -320,7 +232,6 @@ export default function Recorder(props: RecorderProps) { if (!record) return; if (!destinationStream) return; if (props.setStream) props.setStream(destinationStream); - waveRegions?.clearRegions(); if (destinationStream) { record.startRecording(destinationStream); setIsRecording(true); @@ -379,23 +290,9 @@ export default function Recorder(props: RecorderProps) { } text-white ml-2 md:ml:4 md:h-[78px] md:min-w-[100px] text-lg`} id="play-btn" onClick={handlePlayClick} - disabled={isRecording} > {isPlaying ? "Pause" : "Play"} - - {props.transcriptId && ( - - - - )} )} {!hasRecorded && ( From 14ebfa53a8c2f9abad2342ebe7d3ab4a018e52f5 Mon Sep 17 00:00:00 2001 From: Sara Date: Mon, 13 Nov 2023 17:33:12 +0100 Subject: [PATCH 2/8] wip loading and redirects --- .../transcripts/[transcriptId]/page.tsx | 38 +++++++++++++++---- .../[transcriptId]/record/page.tsx | 26 ++++++++----- www/app/[domain]/transcripts/useTranscript.ts | 27 ++++++++++--- www/app/[domain]/transcripts/useWebSockets.ts | 30 +++++++++------ 4 files changed, 87 insertions(+), 34 deletions(-) diff --git a/www/app/[domain]/transcripts/[transcriptId]/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/page.tsx index bebfe261..add5a824 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/page.tsx @@ -6,7 +6,7 @@ import useWaveform from "../useWaveform"; import useMp3 from "../useMp3"; import { TopicList } from "../topicList"; import { Topic } from "../webSocketTypes"; -import React, { useState } from "react"; +import React, { useEffect, useState } from "react"; import "../../../styles/button.css"; import FinalSummary from "../finalSummary"; import ShareLink from "../shareLink"; @@ -31,7 +31,7 @@ export default function TranscriptDetails(details: TranscriptDetails) { const useActiveTopic = useState(null); const mp3 = useMp3(protectedPath, transcriptId); - if (transcript?.error /** || topics?.error || waveform?.error **/) { + if (transcript?.error || topics?.error) { return ( { + const statusToRedirect = ["idle", "recording", "processing"]; + if (statusToRedirect.includes(transcript.response?.status)) { + const newUrl = "/transcripts/" + details.params.transcriptId + "/record"; + // Shallow redirection does not work on NextJS 13 + // https://github.com/vercel/next.js/discussions/48110 + // https://github.com/vercel/next.js/discussions/49540 + // router.push(newUrl, undefined, { shallow: true }); + history.replaceState({}, "", newUrl); + } + }, [transcript.response?.status]); + const fullTranscript = topics.topics ?.map((topic) => topic.transcript) .join("\n\n") .replace(/ +/g, " ") .trim() || ""; + console.log("calf full transcript"); return ( <> - {!transcriptId || transcript?.loading || topics?.loading ? ( + {transcript?.loading || topics?.loading ? ( ) : ( <> @@ -61,7 +74,7 @@ export default function TranscriptDetails(details: TranscriptDetails) { transcriptId={transcript.response.id} /> )} - {!waveform?.loading && ( + {waveform.waveform && mp3.media ? ( + ) : mp3.error || waveform.error ? ( + "error loading this recording" + ) : ( + "Loading Recording" )}
+
- {transcript?.response?.longSummary && ( + {transcript.response.longSummary ? ( + ) : transcript.response.status == "processing" ? ( + "Loading Transcript" + ) : ( + "error final summary" )}
diff --git a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx index 41a2d053..10297f5c 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx @@ -8,12 +8,12 @@ import { useWebSockets } from "../../useWebSockets"; import useAudioDevice from "../../useAudioDevice"; import "../../../../styles/button.css"; import { Topic } from "../../webSocketTypes"; -import getApi from "../../../../lib/getApi"; import LiveTrancription from "../../liveTranscription"; import DisconnectedIndicator from "../../disconnectedIndicator"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { faGear } from "@fortawesome/free-solid-svg-icons"; import { lockWakeState, releaseWakeState } from "../../../../lib/wakeLock"; +import { useRouter } from "next/navigation"; type TranscriptDetails = { params: { @@ -45,21 +45,31 @@ const TranscriptRecord = (details: TranscriptDetails) => { const [hasRecorded, setHasRecorded] = useState(false); const [transcriptStarted, setTranscriptStarted] = useState(false); + const router = useRouter(); + useEffect(() => { if (!transcriptStarted && webSockets.transcriptText.length !== 0) setTranscriptStarted(true); }, [webSockets.transcriptText]); useEffect(() => { - if (transcript?.response?.longSummary) { - const newUrl = `/transcripts/${transcript.response.id}`; + const statusToRedirect = ["ended", "error"]; + console.log(webSockets.status, "hey"); + console.log(transcript.response, "ho"); + + //TODO if has no topic and is error, get back to new + if ( + statusToRedirect.includes(transcript.response?.status) || + statusToRedirect.includes(webSockets.status.value) + ) { + const newUrl = "/transcripts/" + details.params.transcriptId; // Shallow redirection does not work on NextJS 13 // https://github.com/vercel/next.js/discussions/48110 // https://github.com/vercel/next.js/discussions/49540 - // router.push(newUrl, undefined, { shallow: true }); - history.replaceState({}, "", newUrl); + router.push(newUrl, undefined); + // history.replaceState({}, "", newUrl); } - }); + }, [webSockets.status.value, transcript.response?.status]); useEffect(() => { lockWakeState(); @@ -77,10 +87,7 @@ const TranscriptRecord = (details: TranscriptDetails) => { setHasRecorded(true); webRTC?.send(JSON.stringify({ cmd: "STOP" })); }} - topics={webSockets.topics} getAudioStream={getAudioStream} - useActiveTopic={useActiveTopic} - isPastMeeting={false} audioDevices={audioDevices} /> @@ -128,6 +135,7 @@ const TranscriptRecord = (details: TranscriptDetails) => { couple of minutes. Please do not navigate away from the page during this time.

+ {/* TODO If login required remove last sentence */}
)} diff --git a/www/app/[domain]/transcripts/useTranscript.ts b/www/app/[domain]/transcripts/useTranscript.ts index af60cd3b..987e57f3 100644 --- a/www/app/[domain]/transcripts/useTranscript.ts +++ b/www/app/[domain]/transcripts/useTranscript.ts @@ -5,16 +5,28 @@ import { useError } from "../../(errors)/errorContext"; import getApi from "../../lib/getApi"; import { shouldShowError } from "../../lib/errorUtils"; -type Transcript = { - response: GetTranscript | null; - loading: boolean; - error: Error | null; +type ErrorTranscript = { + error: Error; + loading: false; + response: any; +}; + +type LoadingTranscript = { + response: any; + loading: true; + error: false; +}; + +type SuccessTranscript = { + response: GetTranscript; + loading: false; + error: null; }; const useTranscript = ( protectedPath: boolean, id: string | null, -): Transcript => { +): ErrorTranscript | LoadingTranscript | SuccessTranscript => { const [response, setResponse] = useState(null); const [loading, setLoading] = useState(true); const [error, setErrorState] = useState(null); @@ -46,7 +58,10 @@ const useTranscript = ( }); }, [id, !api]); - return { response, loading, error }; + return { response, loading, error } as + | ErrorTranscript + | LoadingTranscript + | SuccessTranscript; }; export default useTranscript; diff --git a/www/app/[domain]/transcripts/useWebSockets.ts b/www/app/[domain]/transcripts/useWebSockets.ts index bcf6b163..cb74f86d 100644 --- a/www/app/[domain]/transcripts/useWebSockets.ts +++ b/www/app/[domain]/transcripts/useWebSockets.ts @@ -4,9 +4,10 @@ import { useError } from "../../(errors)/errorContext"; import { useRouter } from "next/navigation"; import { DomainContext } from "../domainContext"; -type UseWebSockets = { +export type UseWebSockets = { transcriptText: string; translateText: string; + title: string; topics: Topic[]; finalSummary: FinalSummary; status: Status; @@ -15,6 +16,7 @@ type UseWebSockets = { export const useWebSockets = (transcriptId: string | null): UseWebSockets => { const [transcriptText, setTranscriptText] = useState(""); const [translateText, setTranslateText] = useState(""); + const [title, setTitle] = useState(""); const [textQueue, setTextQueue] = useState([]); const [translationQueue, setTranslationQueue] = useState([]); const [isProcessing, setIsProcessing] = useState(false); @@ -24,7 +26,6 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { }); const [status, setStatus] = useState({ value: "initial" }); const { setError } = useError(); - const router = useRouter(); const { websocket_url } = useContext(DomainContext); @@ -294,7 +295,7 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { if (!transcriptId) return; const url = `${websocket_url}/v1/transcripts/${transcriptId}/events`; - const ws = new WebSocket(url); + let ws = new WebSocket(url); ws.onopen = () => { console.debug("WebSocket connection opened"); @@ -343,24 +344,23 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { case "FINAL_TITLE": console.debug("FINAL_TITLE event:", message.data); + if (message.data) { + setTitle(message.data.title); + } break; case "STATUS": console.log("STATUS event:", message.data); - if (message.data.value === "ended") { - const newUrl = "/transcripts/" + transcriptId; - router.push(newUrl); - console.debug("FINAL_LONG_SUMMARY event:", message.data); - } if (message.data.value === "error") { - const newUrl = "/transcripts/" + transcriptId; - router.push(newUrl); setError( Error("Websocket error status"), "There was an error processing this meeting.", ); } setStatus(message.data); + if (message.data.value === "ended") { + ws.close(); + } break; default: @@ -388,8 +388,16 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { default: setError( new Error(`WebSocket closed unexpectedly with code: ${event.code}`), + "Disconnected", ); } + console.log( + "Socket is closed. Reconnect will be attempted in 1 second.", + event.reason, + ); + setTimeout(function () { + ws = new WebSocket(url); + }, 1000); }; return () => { @@ -397,5 +405,5 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { }; }, [transcriptId]); - return { transcriptText, translateText, topics, finalSummary, status }; + return { transcriptText, translateText, topics, finalSummary, title, status }; }; From e98f1bf4bc7bed06fddecd22537ea819761b83b7 Mon Sep 17 00:00:00 2001 From: Sara Date: Mon, 13 Nov 2023 18:33:24 +0100 Subject: [PATCH 3/8] loading and redirecting front-end --- .../transcripts/[transcriptId]/page.tsx | 18 +++++-- .../[transcriptId]/record/page.tsx | 50 +++++++++++++------ www/app/[domain]/transcripts/finalSummary.tsx | 2 +- www/app/[domain]/transcripts/recorder.tsx | 13 +++-- www/app/[domain]/transcripts/useWebSockets.ts | 14 +++++- .../[domain]/transcripts/waveformLoading.tsx | 11 ++++ 6 files changed, 77 insertions(+), 31 deletions(-) create mode 100644 www/app/[domain]/transcripts/waveformLoading.tsx diff --git a/www/app/[domain]/transcripts/[transcriptId]/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/page.tsx index add5a824..079b41e8 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/page.tsx @@ -13,6 +13,7 @@ import ShareLink from "../shareLink"; import QRCode from "react-qr-code"; import TranscriptTitle from "../transcriptTitle"; import Player from "../player"; +import WaveformLoading from "../waveformLoading"; type TranscriptDetails = { params: { @@ -83,9 +84,9 @@ export default function TranscriptDetails(details: TranscriptDetails) { mediaDuration={transcript.response.duration} /> ) : mp3.error || waveform.error ? ( - "error loading this recording" +
"error loading this recording"
) : ( - "Loading Recording" + )}
@@ -104,10 +105,17 @@ export default function TranscriptDetails(details: TranscriptDetails) { summary={transcript.response.longSummary} transcriptId={transcript.response.id} /> - ) : transcript.response.status == "processing" ? ( - "Loading Transcript" ) : ( - "error final summary" +
+ {transcript.response.status == "processing" ? ( +

Loading Transcript

+ ) : ( +

+ There was an error generating the final summary, please + come back later +

+ )} +
)} diff --git a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx index 10297f5c..36f4bbe9 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx @@ -11,9 +11,12 @@ import { Topic } from "../../webSocketTypes"; import LiveTrancription from "../../liveTranscription"; import DisconnectedIndicator from "../../disconnectedIndicator"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; -import { faGear } from "@fortawesome/free-solid-svg-icons"; +import { faGear, faSpinner } from "@fortawesome/free-solid-svg-icons"; import { lockWakeState, releaseWakeState } from "../../../../lib/wakeLock"; import { useRouter } from "next/navigation"; +import Player from "../../player"; +import useMp3 from "../../useMp3"; +import WaveformLoading from "../../waveformLoading"; type TranscriptDetails = { params: { @@ -42,8 +45,10 @@ const TranscriptRecord = (details: TranscriptDetails) => { const { audioDevices, getAudioStream } = useAudioDevice(); - const [hasRecorded, setHasRecorded] = useState(false); + const [recordedTime, setRecordedTime] = useState(0); + const [startTime, setStartTime] = useState(0); const [transcriptStarted, setTranscriptStarted] = useState(false); + const mp3 = useMp3(true, ""); const router = useRouter(); @@ -54,8 +59,6 @@ const TranscriptRecord = (details: TranscriptDetails) => { useEffect(() => { const statusToRedirect = ["ended", "error"]; - console.log(webSockets.status, "hey"); - console.log(transcript.response, "ho"); //TODO if has no topic and is error, get back to new if ( @@ -80,16 +83,31 @@ const TranscriptRecord = (details: TranscriptDetails) => { return ( <> - { - setStream(null); - setHasRecorded(true); - webRTC?.send(JSON.stringify({ cmd: "STOP" })); - }} - getAudioStream={getAudioStream} - audioDevices={audioDevices} - /> + {webSockets.waveform && mp3.media ? ( + + ) : recordedTime ? ( + + ) : ( + { + setStream(null); + setRecordedTime(Date.now() - startTime); + webRTC?.send(JSON.stringify({ cmd: "STOP" })); + }} + onRecord={() => { + setStartTime(Date.now()); + }} + getAudioStream={getAudioStream} + audioDevices={audioDevices} + /> + )}
{
- {!hasRecorded ? ( + {!recordedTime ? ( <> {transcriptStarted && (

Transcription

@@ -135,7 +153,7 @@ const TranscriptRecord = (details: TranscriptDetails) => { couple of minutes. Please do not navigate away from the page during this time.

- {/* TODO If login required remove last sentence */} + {/* NTH If login required remove last sentence */}
)} diff --git a/www/app/[domain]/transcripts/finalSummary.tsx b/www/app/[domain]/transcripts/finalSummary.tsx index 463f6100..e0d0f1c9 100644 --- a/www/app/[domain]/transcripts/finalSummary.tsx +++ b/www/app/[domain]/transcripts/finalSummary.tsx @@ -87,7 +87,7 @@ export default function FinalSummary(props: FinalSummaryProps) {
diff --git a/www/app/[domain]/transcripts/recorder.tsx b/www/app/[domain]/transcripts/recorder.tsx index 5b4420c4..e7c016a7 100644 --- a/www/app/[domain]/transcripts/recorder.tsx +++ b/www/app/[domain]/transcripts/recorder.tsx @@ -14,11 +14,11 @@ import { waveSurferStyles } from "../../styles/recorder"; import { useError } from "../../(errors)/errorContext"; type RecorderProps = { - setStream?: React.Dispatch>; - onStop?: () => void; - getAudioStream?: (deviceId) => Promise; - audioDevices?: Option[]; - mediaDuration?: number | null; + setStream: React.Dispatch>; + onStop: () => void; + onRecord?: () => void; + getAudioStream: (deviceId) => Promise; + audioDevices: Option[]; }; export default function Recorder(props: RecorderProps) { @@ -94,7 +94,6 @@ export default function Recorder(props: RecorderProps) { autoCenter: true, barWidth: 2, height: "auto", - duration: props.mediaDuration || 1, ...waveSurferStyles.player, }); @@ -161,10 +160,10 @@ export default function Recorder(props: RecorderProps) { setScreenMediaStream(null); setDestinationStream(null); } else { + if (props.onRecord) props.onRecord(); const stream = await getCurrentStream(); if (props.setStream) props.setStream(stream); - waveRegions?.clearRegions(); if (stream) { await record.startRecording(stream); setIsRecording(true); diff --git a/www/app/[domain]/transcripts/useWebSockets.ts b/www/app/[domain]/transcripts/useWebSockets.ts index cb74f86d..3f3d20fc 100644 --- a/www/app/[domain]/transcripts/useWebSockets.ts +++ b/www/app/[domain]/transcripts/useWebSockets.ts @@ -1,8 +1,8 @@ import { useContext, useEffect, useState } from "react"; import { Topic, FinalSummary, Status } from "./webSocketTypes"; import { useError } from "../../(errors)/errorContext"; -import { useRouter } from "next/navigation"; import { DomainContext } from "../domainContext"; +import { AudioWaveform } from "../../api"; export type UseWebSockets = { transcriptText: string; @@ -11,6 +11,7 @@ export type UseWebSockets = { topics: Topic[]; finalSummary: FinalSummary; status: Status; + waveform: AudioWaveform | null; }; export const useWebSockets = (transcriptId: string | null): UseWebSockets => { @@ -21,6 +22,7 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { const [translationQueue, setTranslationQueue] = useState([]); const [isProcessing, setIsProcessing] = useState(false); const [topics, setTopics] = useState([]); + const [waveform, setWaveForm] = useState(null); const [finalSummary, setFinalSummary] = useState({ summary: "", }); @@ -405,5 +407,13 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { }; }, [transcriptId]); - return { transcriptText, translateText, topics, finalSummary, title, status }; + return { + transcriptText, + translateText, + topics, + finalSummary, + title, + status, + waveform, + }; }; diff --git a/www/app/[domain]/transcripts/waveformLoading.tsx b/www/app/[domain]/transcripts/waveformLoading.tsx new file mode 100644 index 00000000..68e0c80f --- /dev/null +++ b/www/app/[domain]/transcripts/waveformLoading.tsx @@ -0,0 +1,11 @@ +import { faSpinner } from "@fortawesome/free-solid-svg-icons"; +import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; + +export default () => ( +
+ +
+); From 1fc261a66999df014820f07836d6300977977fe4 Mon Sep 17 00:00:00 2001 From: Sara Date: Wed, 15 Nov 2023 20:30:00 +0100 Subject: [PATCH 4/8] try to move waveform to pipeline --- server/reflector/db/transcripts.py | 25 +++++--------- .../reflector/pipelines/main_live_pipeline.py | 26 +++++++++++++-- .../processors/audio_waveform_processor.py | 33 +++++++++++++++++++ server/reflector/views/transcripts.py | 3 +- server/tests/test_transcripts_rtc_ws.py | 4 +++ www/app/lib/edgeConfig.ts | 4 +-- 6 files changed, 72 insertions(+), 23 deletions(-) create mode 100644 server/reflector/processors/audio_waveform_processor.py diff --git a/server/reflector/db/transcripts.py b/server/reflector/db/transcripts.py index 6ac2e32a..f0dbc277 100644 --- a/server/reflector/db/transcripts.py +++ b/server/reflector/db/transcripts.py @@ -10,7 +10,6 @@ from pydantic import BaseModel, Field from reflector.db import database, metadata from reflector.processors.types import Word as ProcessorWord from reflector.settings import settings -from reflector.utils.audio_waveform import get_audio_waveform transcripts = sqlalchemy.Table( "transcript", @@ -79,6 +78,14 @@ class TranscriptFinalTitle(BaseModel): title: str +class TranscriptDuration(BaseModel): + duration: float + + +class TranscriptWaveform(BaseModel): + waveform: list[float] + + class TranscriptEvent(BaseModel): event: str data: dict @@ -118,22 +125,6 @@ class Transcript(BaseModel): def topics_dump(self, mode="json"): return [topic.model_dump(mode=mode) for topic in self.topics] - def convert_audio_to_waveform(self, segments_count=256): - fn = self.audio_waveform_filename - if fn.exists(): - return - waveform = get_audio_waveform( - path=self.audio_mp3_filename, segments_count=segments_count - ) - try: - with open(fn, "w") as fd: - json.dump(waveform, fd) - except Exception: - # remove file if anything happen during the write - fn.unlink(missing_ok=True) - raise - return waveform - def unlink(self): self.data_path.unlink(missing_ok=True) diff --git a/server/reflector/pipelines/main_live_pipeline.py b/server/reflector/pipelines/main_live_pipeline.py index 8c78c48f..b0576b92 100644 --- a/server/reflector/pipelines/main_live_pipeline.py +++ b/server/reflector/pipelines/main_live_pipeline.py @@ -21,11 +21,13 @@ from pydantic import BaseModel from reflector.app import app from reflector.db.transcripts import ( Transcript, + TranscriptDuration, TranscriptFinalLongSummary, TranscriptFinalShortSummary, TranscriptFinalTitle, TranscriptText, TranscriptTopic, + TranscriptWaveform, transcripts_controller, ) from reflector.logger import logger @@ -45,6 +47,7 @@ from reflector.processors import ( TranscriptTopicDetectorProcessor, TranscriptTranslatorProcessor, ) +from reflector.processors.audio_waveform_processor import AudioWaveformProcessor from reflector.processors.types import AudioDiarizationInput from reflector.processors.types import ( TitleSummaryWithId as TitleSummaryWithIdProcessorType, @@ -230,15 +233,29 @@ class PipelineMainBase(PipelineRunner): data=final_short_summary, ) - async def on_duration(self, duration: float): + async def on_duration(self, data): async with self.transaction(): + duration = TranscriptDuration(duration=data) + transcript = await self.get_transcript() await transcripts_controller.update( transcript, { - "duration": duration, + "duration": duration.duration, }, ) + return await transcripts_controller.append_event( + transcript=transcript, event="DURATION", data=duration + ) + + async def on_waveform(self, data): + waveform = TranscriptWaveform(waveform=data) + + transcript = await self.get_transcript() + + return await transcripts_controller.append_event( + transcript=transcript, event="WAVEFORM", data=waveform + ) class PipelineMainLive(PipelineMainBase): @@ -266,6 +283,11 @@ class PipelineMainLive(PipelineMainBase): BroadcastProcessor( processors=[ TranscriptFinalTitleProcessor.as_threaded(callback=self.on_title), + AudioWaveformProcessor( + audio_path=transcript.audio_mp3_filename, + waveform_path=transcript.audio_waveform_filename, + on_waveform=self.on_waveform, + ), ] ), ] diff --git a/server/reflector/processors/audio_waveform_processor.py b/server/reflector/processors/audio_waveform_processor.py new file mode 100644 index 00000000..acce904a --- /dev/null +++ b/server/reflector/processors/audio_waveform_processor.py @@ -0,0 +1,33 @@ +import json +from pathlib import Path + +from reflector.processors.base import Processor +from reflector.processors.types import TitleSummary +from reflector.utils.audio_waveform import get_audio_waveform + + +class AudioWaveformProcessor(Processor): + """ + Write the waveform for the final audio + """ + + INPUT_TYPE = TitleSummary + + def __init__(self, audio_path: Path | str, waveform_path: str, **kwargs): + super().__init__(**kwargs) + if isinstance(audio_path, str): + audio_path = Path(audio_path) + if audio_path.suffix not in (".mp3", ".wav"): + raise ValueError("Only mp3 and wav files are supported") + self.audio_path = audio_path + self.waveform_path = waveform_path + + async def _push(self, _data): + self.waveform_path.parent.mkdir(parents=True, exist_ok=True) + self.logger.info("Waveform Processing Started") + waveform = get_audio_waveform(path=self.audio_path, segments_count=255) + + with open(self.waveform_path, "w") as fd: + json.dump(waveform, fd) + self.logger.info("Waveform Processing Finished") + await self.emit(waveform, name="waveform") diff --git a/server/reflector/views/transcripts.py b/server/reflector/views/transcripts.py index 5de9ced3..77e4b0b7 100644 --- a/server/reflector/views/transcripts.py +++ b/server/reflector/views/transcripts.py @@ -22,7 +22,6 @@ from reflector.db.transcripts import ( from reflector.processors.types import Transcript as ProcessorTranscript from reflector.settings import settings from reflector.ws_manager import get_ws_manager -from starlette.concurrency import run_in_threadpool from ._range_requests_response import range_requests_response from .rtc_offer import RtcOffer, rtc_offer_base @@ -261,7 +260,7 @@ async def transcript_get_audio_waveform( if not transcript.audio_mp3_filename.exists(): raise HTTPException(status_code=404, detail="Audio not found") - await run_in_threadpool(transcript.convert_audio_to_waveform) + # await run_in_threadpool(transcript.convert_audio_to_waveform) return transcript.audio_waveform diff --git a/server/tests/test_transcripts_rtc_ws.py b/server/tests/test_transcripts_rtc_ws.py index cf2ea304..65660a5e 100644 --- a/server/tests/test_transcripts_rtc_ws.py +++ b/server/tests/test_transcripts_rtc_ws.py @@ -182,6 +182,10 @@ async def test_transcript_rtc_and_websocket( ev = events[eventnames.index("FINAL_TITLE")] assert ev["data"]["title"] == "LLM TITLE" + assert "WAVEFORM" in eventnames + ev = events[eventnames.index("FINAL_TITLE")] + assert ev["data"]["title"] == "LLM TITLE" + # check status order statuses = [e["data"]["value"] for e in events if e["event"] == "STATUS"] assert statuses.index("recording") < statuses.index("processing") diff --git a/www/app/lib/edgeConfig.ts b/www/app/lib/edgeConfig.ts index 5527121a..1140e555 100644 --- a/www/app/lib/edgeConfig.ts +++ b/www/app/lib/edgeConfig.ts @@ -3,9 +3,9 @@ import { isDevelopment } from "./utils"; const localConfig = { features: { - requireLogin: true, + requireLogin: false, privacy: true, - browse: true, + browse: false, }, api_url: "http://127.0.0.1:1250", websocket_url: "ws://127.0.0.1:1250", From 8b8e92ceac7a121bd16909ea2d8401a889ae44cb Mon Sep 17 00:00:00 2001 From: Sara Date: Wed, 15 Nov 2023 20:34:45 +0100 Subject: [PATCH 5/8] replace history instead of pushing --- www/app/[domain]/transcripts/[transcriptId]/record/page.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx index 36f4bbe9..147f8827 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx @@ -69,9 +69,9 @@ const TranscriptRecord = (details: TranscriptDetails) => { // Shallow redirection does not work on NextJS 13 // https://github.com/vercel/next.js/discussions/48110 // https://github.com/vercel/next.js/discussions/49540 - router.push(newUrl, undefined); + router.replace(newUrl); // history.replaceState({}, "", newUrl); - } + } // history.replaceState({}, "", newUrl); }, [webSockets.status.value, transcript.response?.status]); useEffect(() => { From a846e38fbdeb77aad23282a8068b08e4cd6b3921 Mon Sep 17 00:00:00 2001 From: Sara Date: Fri, 17 Nov 2023 13:38:32 +0100 Subject: [PATCH 6/8] fix waveform in pipeline --- .../reflector/pipelines/main_live_pipeline.py | 15 +++++++++------ .../processors/audio_waveform_processor.py | 5 ++++- server/tests/test_transcripts_audio_download.py | 12 ------------ server/tests/test_transcripts_rtc_ws.py | 17 ++++++++++++----- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/server/reflector/pipelines/main_live_pipeline.py b/server/reflector/pipelines/main_live_pipeline.py index b0576b92..fece6da5 100644 --- a/server/reflector/pipelines/main_live_pipeline.py +++ b/server/reflector/pipelines/main_live_pipeline.py @@ -233,6 +233,7 @@ class PipelineMainBase(PipelineRunner): data=final_short_summary, ) + @broadcast_to_sockets async def on_duration(self, data): async with self.transaction(): duration = TranscriptDuration(duration=data) @@ -248,14 +249,16 @@ class PipelineMainBase(PipelineRunner): transcript=transcript, event="DURATION", data=duration ) + @broadcast_to_sockets async def on_waveform(self, data): - waveform = TranscriptWaveform(waveform=data) + async with self.transaction(): + waveform = TranscriptWaveform(waveform=data) - transcript = await self.get_transcript() + transcript = await self.get_transcript() - return await transcripts_controller.append_event( - transcript=transcript, event="WAVEFORM", data=waveform - ) + return await transcripts_controller.append_event( + transcript=transcript, event="WAVEFORM", data=waveform + ) class PipelineMainLive(PipelineMainBase): @@ -283,7 +286,7 @@ class PipelineMainLive(PipelineMainBase): BroadcastProcessor( processors=[ TranscriptFinalTitleProcessor.as_threaded(callback=self.on_title), - AudioWaveformProcessor( + AudioWaveformProcessor.as_threaded( audio_path=transcript.audio_mp3_filename, waveform_path=transcript.audio_waveform_filename, on_waveform=self.on_waveform, diff --git a/server/reflector/processors/audio_waveform_processor.py b/server/reflector/processors/audio_waveform_processor.py index acce904a..f1a24ffd 100644 --- a/server/reflector/processors/audio_waveform_processor.py +++ b/server/reflector/processors/audio_waveform_processor.py @@ -22,7 +22,7 @@ class AudioWaveformProcessor(Processor): self.audio_path = audio_path self.waveform_path = waveform_path - async def _push(self, _data): + async def _flush(self): self.waveform_path.parent.mkdir(parents=True, exist_ok=True) self.logger.info("Waveform Processing Started") waveform = get_audio_waveform(path=self.audio_path, segments_count=255) @@ -31,3 +31,6 @@ class AudioWaveformProcessor(Processor): json.dump(waveform, fd) self.logger.info("Waveform Processing Finished") await self.emit(waveform, name="waveform") + + async def _push(_self, _data): + return diff --git a/server/tests/test_transcripts_audio_download.py b/server/tests/test_transcripts_audio_download.py index 69ae5f65..28f83fff 100644 --- a/server/tests/test_transcripts_audio_download.py +++ b/server/tests/test_transcripts_audio_download.py @@ -118,15 +118,3 @@ async def test_transcript_audio_download_range_with_seek( assert response.status_code == 206 assert response.headers["content-type"] == content_type assert response.headers["content-range"].startswith("bytes 100-") - - -@pytest.mark.asyncio -async def test_transcript_audio_download_waveform(fake_transcript): - from reflector.app import app - - ac = AsyncClient(app=app, base_url="http://test/v1") - response = await ac.get(f"/transcripts/{fake_transcript.id}/audio/waveform") - assert response.status_code == 200 - assert response.headers["content-type"] == "application/json" - assert isinstance(response.json()["data"], list) - assert len(response.json()["data"]) >= 255 diff --git a/server/tests/test_transcripts_rtc_ws.py b/server/tests/test_transcripts_rtc_ws.py index 65660a5e..b33b1db5 100644 --- a/server/tests/test_transcripts_rtc_ws.py +++ b/server/tests/test_transcripts_rtc_ws.py @@ -183,8 +183,14 @@ async def test_transcript_rtc_and_websocket( assert ev["data"]["title"] == "LLM TITLE" assert "WAVEFORM" in eventnames - ev = events[eventnames.index("FINAL_TITLE")] - assert ev["data"]["title"] == "LLM TITLE" + ev = events[eventnames.index("WAVEFORM")] + assert isinstance(ev["data"]["waveform"], list) + assert len(ev["data"]["waveform"]) >= 250 + waveform_resp = await ac.get(f"/transcripts/{tid}/audio/waveform") + assert waveform_resp.status_code == 200 + assert waveform_resp.headers["content-type"] == "application/json" + assert isinstance(waveform_resp.json()["data"], list) + assert len(waveform_resp.json()["data"]) >= 250 # check status order statuses = [e["data"]["value"] for e in events if e["event"] == "STATUS"] @@ -197,11 +203,12 @@ async def test_transcript_rtc_and_websocket( # check on the latest response that the audio duration is > 0 assert resp.json()["duration"] > 0 + assert "DURATION" in eventnames # check that audio/mp3 is available - resp = await ac.get(f"/transcripts/{tid}/audio/mp3") - assert resp.status_code == 200 - assert resp.headers["Content-Type"] == "audio/mpeg" + audio_resp = await ac.get(f"/transcripts/{tid}/audio/mp3") + assert audio_resp.status_code == 200 + assert audio_resp.headers["Content-Type"] == "audio/mpeg" @pytest.mark.usefixtures("celery_session_app") From a816e00769faba40098acd36085469a23089f614 Mon Sep 17 00:00:00 2001 From: Sara Date: Fri, 17 Nov 2023 15:16:53 +0100 Subject: [PATCH 7/8] get waveform from socket --- .../transcripts/[transcriptId]/page.tsx | 7 ++--- .../[transcriptId]/record/page.tsx | 16 ++++++---- www/app/[domain]/transcripts/player.tsx | 5 ++-- www/app/[domain]/transcripts/useMp3.ts | 29 ++++++++----------- www/app/[domain]/transcripts/useWebSockets.ts | 21 +++++++++++++- 5 files changed, 48 insertions(+), 30 deletions(-) diff --git a/www/app/[domain]/transcripts/[transcriptId]/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/page.tsx index 079b41e8..b0986e94 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/page.tsx @@ -30,7 +30,7 @@ export default function TranscriptDetails(details: TranscriptDetails) { const topics = useTopics(protectedPath, transcriptId); const waveform = useWaveform(protectedPath, transcriptId); const useActiveTopic = useState(null); - const mp3 = useMp3(protectedPath, transcriptId); + const mp3 = useMp3(transcriptId); if (transcript?.error || topics?.error) { return ( @@ -59,7 +59,6 @@ export default function TranscriptDetails(details: TranscriptDetails) { .join("\n\n") .replace(/ +/g, " ") .trim() || ""; - console.log("calf full transcript"); return ( <> @@ -79,11 +78,11 @@ export default function TranscriptDetails(details: TranscriptDetails) { - ) : mp3.error || waveform.error ? ( + ) : waveform.error ? (
"error loading this recording"
) : ( diff --git a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx index 147f8827..2c5b73e0 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx @@ -11,11 +11,11 @@ import { Topic } from "../../webSocketTypes"; import LiveTrancription from "../../liveTranscription"; import DisconnectedIndicator from "../../disconnectedIndicator"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; -import { faGear, faSpinner } from "@fortawesome/free-solid-svg-icons"; +import { faGear } from "@fortawesome/free-solid-svg-icons"; import { lockWakeState, releaseWakeState } from "../../../../lib/wakeLock"; import { useRouter } from "next/navigation"; import Player from "../../player"; -import useMp3 from "../../useMp3"; +import useMp3, { Mp3Response } from "../../useMp3"; import WaveformLoading from "../../waveformLoading"; type TranscriptDetails = { @@ -48,7 +48,7 @@ const TranscriptRecord = (details: TranscriptDetails) => { const [recordedTime, setRecordedTime] = useState(0); const [startTime, setStartTime] = useState(0); const [transcriptStarted, setTranscriptStarted] = useState(false); - const mp3 = useMp3(true, ""); + let mp3 = useMp3(details.params.transcriptId, true); const router = useRouter(); @@ -74,6 +74,12 @@ const TranscriptRecord = (details: TranscriptDetails) => { } // history.replaceState({}, "", newUrl); }, [webSockets.status.value, transcript.response?.status]); + useEffect(() => { + if (webSockets.duration) { + mp3.getNow(); + } + }, [webSockets.duration]); + useEffect(() => { lockWakeState(); return () => { @@ -83,13 +89,13 @@ const TranscriptRecord = (details: TranscriptDetails) => { return ( <> - {webSockets.waveform && mp3.media ? ( + {webSockets.waveform && webSockets.duration && mp3?.media ? ( ) : recordedTime ? ( diff --git a/www/app/[domain]/transcripts/player.tsx b/www/app/[domain]/transcripts/player.tsx index 6143836e..02151a68 100644 --- a/www/app/[domain]/transcripts/player.tsx +++ b/www/app/[domain]/transcripts/player.tsx @@ -14,7 +14,7 @@ type PlayerProps = { Topic | null, React.Dispatch>, ]; - waveform: AudioWaveform; + waveform: AudioWaveform["data"]; media: HTMLMediaElement; mediaDuration: number; }; @@ -29,7 +29,6 @@ export default function Player(props: PlayerProps) { ); const [activeTopic, setActiveTopic] = props.useActiveTopic; const topicsRef = useRef(props.topics); - // Waveform setup useEffect(() => { if (waveformRef.current) { @@ -40,7 +39,7 @@ export default function Player(props: PlayerProps) { // This is not ideal, but it works for now. const _wavesurfer = WaveSurfer.create({ container: waveformRef.current, - peaks: props.waveform.data, + peaks: props.waveform, hideScrollbar: true, autoCenter: true, barWidth: 2, diff --git a/www/app/[domain]/transcripts/useMp3.ts b/www/app/[domain]/transcripts/useMp3.ts index 570a6a25..23249f94 100644 --- a/www/app/[domain]/transcripts/useMp3.ts +++ b/www/app/[domain]/transcripts/useMp3.ts @@ -1,24 +1,19 @@ import { useContext, useEffect, useState } from "react"; -import { useError } from "../../(errors)/errorContext"; import { DomainContext } from "../domainContext"; import getApi from "../../lib/getApi"; import { useFiefAccessTokenInfo } from "@fief/fief/build/esm/nextjs/react"; -import { shouldShowError } from "../../lib/errorUtils"; -type Mp3Response = { - url: string | null; +export type Mp3Response = { media: HTMLMediaElement | null; loading: boolean; - error: Error | null; + getNow: () => void; }; -const useMp3 = (protectedPath: boolean, id: string): Mp3Response => { - const [url, setUrl] = useState(null); +const useMp3 = (id: string, waiting?: boolean): Mp3Response => { const [media, setMedia] = useState(null); + const [later, setLater] = useState(waiting); const [loading, setLoading] = useState(false); - const [error, setErrorState] = useState(null); - const { setError } = useError(); - const api = getApi(protectedPath); + const api = getApi(true); const { api_url } = useContext(DomainContext); const accessTokenInfo = useFiefAccessTokenInfo(); const [serviceWorkerReady, setServiceWorkerReady] = useState(false); @@ -42,8 +37,8 @@ const useMp3 = (protectedPath: boolean, id: string): Mp3Response => { }); }, [navigator.serviceWorker, serviceWorkerReady, accessTokenInfo]); - const getMp3 = (id: string) => { - if (!id || !api) return; + useEffect(() => { + if (!id || !api || later) return; // createa a audio element and set the source setLoading(true); @@ -53,13 +48,13 @@ const useMp3 = (protectedPath: boolean, id: string): Mp3Response => { audioElement.preload = "auto"; setMedia(audioElement); setLoading(false); + }, [id, api, later]); + + const getNow = () => { + setLater(false); }; - useEffect(() => { - getMp3(id); - }, [id, api]); - - return { url, media, loading, error }; + return { media, loading, getNow }; }; export default useMp3; diff --git a/www/app/[domain]/transcripts/useWebSockets.ts b/www/app/[domain]/transcripts/useWebSockets.ts index 3f3d20fc..f33a1347 100644 --- a/www/app/[domain]/transcripts/useWebSockets.ts +++ b/www/app/[domain]/transcripts/useWebSockets.ts @@ -11,7 +11,8 @@ export type UseWebSockets = { topics: Topic[]; finalSummary: FinalSummary; status: Status; - waveform: AudioWaveform | null; + waveform: AudioWaveform["data"] | null; + duration: number | null; }; export const useWebSockets = (transcriptId: string | null): UseWebSockets => { @@ -23,6 +24,7 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { const [isProcessing, setIsProcessing] = useState(false); const [topics, setTopics] = useState([]); const [waveform, setWaveForm] = useState(null); + const [duration, setDuration] = useState(null); const [finalSummary, setFinalSummary] = useState({ summary: "", }); @@ -351,6 +353,22 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { } break; + case "WAVEFORM": + console.debug( + "WAVEFORM event length:", + message.data.waveform.length, + ); + if (message.data) { + setWaveForm(message.data.waveform); + } + break; + case "DURATION": + console.debug("DURATION event:", message.data); + if (message.data) { + setDuration(message.data.duration); + } + break; + case "STATUS": console.log("STATUS event:", message.data); if (message.data.value === "error") { @@ -415,5 +433,6 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { title, status, waveform, + duration, }; }; From c40e0970b7d9dfe4d3df2cb159b7ffbecfece077 Mon Sep 17 00:00:00 2001 From: Sara Date: Mon, 20 Nov 2023 20:13:18 +0100 Subject: [PATCH 8/8] review fixes --- server/reflector/views/transcripts.py | 2 -- www/app/[domain]/transcripts/useWebSockets.ts | 17 +++++++---------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/server/reflector/views/transcripts.py b/server/reflector/views/transcripts.py index 77e4b0b7..6909b8ae 100644 --- a/server/reflector/views/transcripts.py +++ b/server/reflector/views/transcripts.py @@ -260,8 +260,6 @@ async def transcript_get_audio_waveform( if not transcript.audio_mp3_filename.exists(): raise HTTPException(status_code=404, detail="Audio not found") - # await run_in_threadpool(transcript.convert_audio_to_waveform) - return transcript.audio_waveform diff --git a/www/app/[domain]/transcripts/useWebSockets.ts b/www/app/[domain]/transcripts/useWebSockets.ts index f33a1347..f289adbb 100644 --- a/www/app/[domain]/transcripts/useWebSockets.ts +++ b/www/app/[domain]/transcripts/useWebSockets.ts @@ -402,22 +402,19 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { console.debug("WebSocket connection closed"); switch (event.code) { case 1000: // Normal Closure: - case 1001: // Going Away: - case 1005: - break; default: setError( new Error(`WebSocket closed unexpectedly with code: ${event.code}`), "Disconnected", ); + console.log( + "Socket is closed. Reconnect will be attempted in 1 second.", + event.reason, + ); + setTimeout(function () { + ws = new WebSocket(url); + }, 1000); } - console.log( - "Socket is closed. Reconnect will be attempted in 1 second.", - event.reason, - ); - setTimeout(function () { - ws = new WebSocket(url); - }, 1000); }; return () => {