diff --git a/server/reflector/db/transcripts.py b/server/reflector/db/transcripts.py index 6ac2e32a..f0dbc277 100644 --- a/server/reflector/db/transcripts.py +++ b/server/reflector/db/transcripts.py @@ -10,7 +10,6 @@ from pydantic import BaseModel, Field from reflector.db import database, metadata from reflector.processors.types import Word as ProcessorWord from reflector.settings import settings -from reflector.utils.audio_waveform import get_audio_waveform transcripts = sqlalchemy.Table( "transcript", @@ -79,6 +78,14 @@ class TranscriptFinalTitle(BaseModel): title: str +class TranscriptDuration(BaseModel): + duration: float + + +class TranscriptWaveform(BaseModel): + waveform: list[float] + + class TranscriptEvent(BaseModel): event: str data: dict @@ -118,22 +125,6 @@ class Transcript(BaseModel): def topics_dump(self, mode="json"): return [topic.model_dump(mode=mode) for topic in self.topics] - def convert_audio_to_waveform(self, segments_count=256): - fn = self.audio_waveform_filename - if fn.exists(): - return - waveform = get_audio_waveform( - path=self.audio_mp3_filename, segments_count=segments_count - ) - try: - with open(fn, "w") as fd: - json.dump(waveform, fd) - except Exception: - # remove file if anything happen during the write - fn.unlink(missing_ok=True) - raise - return waveform - def unlink(self): self.data_path.unlink(missing_ok=True) diff --git a/server/reflector/pipelines/main_live_pipeline.py b/server/reflector/pipelines/main_live_pipeline.py index 316ecbcc..3a9d1868 100644 --- a/server/reflector/pipelines/main_live_pipeline.py +++ b/server/reflector/pipelines/main_live_pipeline.py @@ -21,11 +21,13 @@ from pydantic import BaseModel from reflector.app import app from reflector.db.transcripts import ( Transcript, + TranscriptDuration, TranscriptFinalLongSummary, TranscriptFinalShortSummary, TranscriptFinalTitle, TranscriptText, TranscriptTopic, + TranscriptWaveform, transcripts_controller, ) from reflector.logger import logger @@ -45,6 +47,7 @@ from reflector.processors import ( TranscriptTopicDetectorProcessor, TranscriptTranslatorProcessor, ) +from reflector.processors.audio_waveform_processor import AudioWaveformProcessor from reflector.processors.types import AudioDiarizationInput from reflector.processors.types import ( TitleSummaryWithId as TitleSummaryWithIdProcessorType, @@ -230,15 +233,32 @@ class PipelineMainBase(PipelineRunner): data=final_short_summary, ) - async def on_duration(self, duration: float): + @broadcast_to_sockets + async def on_duration(self, data): async with self.transaction(): + duration = TranscriptDuration(duration=data) + transcript = await self.get_transcript() await transcripts_controller.update( transcript, { - "duration": duration, + "duration": duration.duration, }, ) + return await transcripts_controller.append_event( + transcript=transcript, event="DURATION", data=duration + ) + + @broadcast_to_sockets + async def on_waveform(self, data): + async with self.transaction(): + waveform = TranscriptWaveform(waveform=data) + + transcript = await self.get_transcript() + + return await transcripts_controller.append_event( + transcript=transcript, event="WAVEFORM", data=waveform + ) class PipelineMainLive(PipelineMainBase): @@ -263,7 +283,16 @@ class PipelineMainLive(PipelineMainBase): TranscriptLinerProcessor(), TranscriptTranslatorProcessor.as_threaded(callback=self.on_transcript), TranscriptTopicDetectorProcessor.as_threaded(callback=self.on_topic), - TranscriptFinalTitleProcessor.as_threaded(callback=self.on_title), + BroadcastProcessor( + processors=[ + TranscriptFinalTitleProcessor.as_threaded(callback=self.on_title), + AudioWaveformProcessor.as_threaded( + audio_path=transcript.audio_mp3_filename, + waveform_path=transcript.audio_waveform_filename, + on_waveform=self.on_waveform, + ), + ] + ), ] pipeline = Pipeline(*processors) pipeline.options = self diff --git a/server/reflector/processors/audio_waveform_processor.py b/server/reflector/processors/audio_waveform_processor.py new file mode 100644 index 00000000..f1a24ffd --- /dev/null +++ b/server/reflector/processors/audio_waveform_processor.py @@ -0,0 +1,36 @@ +import json +from pathlib import Path + +from reflector.processors.base import Processor +from reflector.processors.types import TitleSummary +from reflector.utils.audio_waveform import get_audio_waveform + + +class AudioWaveformProcessor(Processor): + """ + Write the waveform for the final audio + """ + + INPUT_TYPE = TitleSummary + + def __init__(self, audio_path: Path | str, waveform_path: str, **kwargs): + super().__init__(**kwargs) + if isinstance(audio_path, str): + audio_path = Path(audio_path) + if audio_path.suffix not in (".mp3", ".wav"): + raise ValueError("Only mp3 and wav files are supported") + self.audio_path = audio_path + self.waveform_path = waveform_path + + async def _flush(self): + self.waveform_path.parent.mkdir(parents=True, exist_ok=True) + self.logger.info("Waveform Processing Started") + waveform = get_audio_waveform(path=self.audio_path, segments_count=255) + + with open(self.waveform_path, "w") as fd: + json.dump(waveform, fd) + self.logger.info("Waveform Processing Finished") + await self.emit(waveform, name="waveform") + + async def _push(_self, _data): + return diff --git a/server/reflector/views/transcripts.py b/server/reflector/views/transcripts.py index 5de9ced3..6909b8ae 100644 --- a/server/reflector/views/transcripts.py +++ b/server/reflector/views/transcripts.py @@ -22,7 +22,6 @@ from reflector.db.transcripts import ( from reflector.processors.types import Transcript as ProcessorTranscript from reflector.settings import settings from reflector.ws_manager import get_ws_manager -from starlette.concurrency import run_in_threadpool from ._range_requests_response import range_requests_response from .rtc_offer import RtcOffer, rtc_offer_base @@ -261,8 +260,6 @@ async def transcript_get_audio_waveform( if not transcript.audio_mp3_filename.exists(): raise HTTPException(status_code=404, detail="Audio not found") - await run_in_threadpool(transcript.convert_audio_to_waveform) - return transcript.audio_waveform diff --git a/server/tests/test_transcripts_audio_download.py b/server/tests/test_transcripts_audio_download.py index 69ae5f65..28f83fff 100644 --- a/server/tests/test_transcripts_audio_download.py +++ b/server/tests/test_transcripts_audio_download.py @@ -118,15 +118,3 @@ async def test_transcript_audio_download_range_with_seek( assert response.status_code == 206 assert response.headers["content-type"] == content_type assert response.headers["content-range"].startswith("bytes 100-") - - -@pytest.mark.asyncio -async def test_transcript_audio_download_waveform(fake_transcript): - from reflector.app import app - - ac = AsyncClient(app=app, base_url="http://test/v1") - response = await ac.get(f"/transcripts/{fake_transcript.id}/audio/waveform") - assert response.status_code == 200 - assert response.headers["content-type"] == "application/json" - assert isinstance(response.json()["data"], list) - assert len(response.json()["data"]) >= 255 diff --git a/server/tests/test_transcripts_rtc_ws.py b/server/tests/test_transcripts_rtc_ws.py index cf2ea304..b33b1db5 100644 --- a/server/tests/test_transcripts_rtc_ws.py +++ b/server/tests/test_transcripts_rtc_ws.py @@ -182,6 +182,16 @@ async def test_transcript_rtc_and_websocket( ev = events[eventnames.index("FINAL_TITLE")] assert ev["data"]["title"] == "LLM TITLE" + assert "WAVEFORM" in eventnames + ev = events[eventnames.index("WAVEFORM")] + assert isinstance(ev["data"]["waveform"], list) + assert len(ev["data"]["waveform"]) >= 250 + waveform_resp = await ac.get(f"/transcripts/{tid}/audio/waveform") + assert waveform_resp.status_code == 200 + assert waveform_resp.headers["content-type"] == "application/json" + assert isinstance(waveform_resp.json()["data"], list) + assert len(waveform_resp.json()["data"]) >= 250 + # check status order statuses = [e["data"]["value"] for e in events if e["event"] == "STATUS"] assert statuses.index("recording") < statuses.index("processing") @@ -193,11 +203,12 @@ async def test_transcript_rtc_and_websocket( # check on the latest response that the audio duration is > 0 assert resp.json()["duration"] > 0 + assert "DURATION" in eventnames # check that audio/mp3 is available - resp = await ac.get(f"/transcripts/{tid}/audio/mp3") - assert resp.status_code == 200 - assert resp.headers["Content-Type"] == "audio/mpeg" + audio_resp = await ac.get(f"/transcripts/{tid}/audio/mp3") + assert audio_resp.status_code == 200 + assert audio_resp.headers["Content-Type"] == "audio/mpeg" @pytest.mark.usefixtures("celery_session_app") diff --git a/www/app/[domain]/transcripts/[transcriptId]/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/page.tsx index 56201c3c..b0986e94 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/page.tsx @@ -5,14 +5,15 @@ import useTopics from "../useTopics"; import useWaveform from "../useWaveform"; import useMp3 from "../useMp3"; import { TopicList } from "../topicList"; -import Recorder from "../recorder"; import { Topic } from "../webSocketTypes"; -import React, { useState } from "react"; +import React, { useEffect, useState } from "react"; import "../../../styles/button.css"; import FinalSummary from "../finalSummary"; import ShareLink from "../shareLink"; import QRCode from "react-qr-code"; import TranscriptTitle from "../transcriptTitle"; +import Player from "../player"; +import WaveformLoading from "../waveformLoading"; type TranscriptDetails = { params: { @@ -29,9 +30,9 @@ export default function TranscriptDetails(details: TranscriptDetails) { const topics = useTopics(protectedPath, transcriptId); const waveform = useWaveform(protectedPath, transcriptId); const useActiveTopic = useState(null); - const mp3 = useMp3(protectedPath, transcriptId); + const mp3 = useMp3(transcriptId); - if (transcript?.error /** || topics?.error || waveform?.error **/) { + if (transcript?.error || topics?.error) { return ( { + const statusToRedirect = ["idle", "recording", "processing"]; + if (statusToRedirect.includes(transcript.response?.status)) { + const newUrl = "/transcripts/" + details.params.transcriptId + "/record"; + // Shallow redirection does not work on NextJS 13 + // https://github.com/vercel/next.js/discussions/48110 + // https://github.com/vercel/next.js/discussions/49540 + // router.push(newUrl, undefined, { shallow: true }); + history.replaceState({}, "", newUrl); + } + }, [transcript.response?.status]); + const fullTranscript = topics.topics ?.map((topic) => topic.transcript) @@ -49,7 +62,7 @@ export default function TranscriptDetails(details: TranscriptDetails) { return ( <> - {!transcriptId || transcript?.loading || topics?.loading ? ( + {transcript?.loading || topics?.loading ? ( ) : ( <> @@ -61,33 +74,47 @@ export default function TranscriptDetails(details: TranscriptDetails) { transcriptId={transcript.response.id} /> )} - {!waveform?.loading && ( - + ) : waveform.error ? ( +
"error loading this recording"
+ ) : ( + )}
+
- {transcript?.response?.longSummary && ( + {transcript.response.longSummary ? ( + ) : ( +
+ {transcript.response.status == "processing" ? ( +

Loading Transcript

+ ) : ( +

+ There was an error generating the final summary, please + come back later +

+ )} +
)}
diff --git a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx index 41a2d053..2c5b73e0 100644 --- a/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx +++ b/www/app/[domain]/transcripts/[transcriptId]/record/page.tsx @@ -8,12 +8,15 @@ import { useWebSockets } from "../../useWebSockets"; import useAudioDevice from "../../useAudioDevice"; import "../../../../styles/button.css"; import { Topic } from "../../webSocketTypes"; -import getApi from "../../../../lib/getApi"; import LiveTrancription from "../../liveTranscription"; import DisconnectedIndicator from "../../disconnectedIndicator"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { faGear } from "@fortawesome/free-solid-svg-icons"; import { lockWakeState, releaseWakeState } from "../../../../lib/wakeLock"; +import { useRouter } from "next/navigation"; +import Player from "../../player"; +import useMp3, { Mp3Response } from "../../useMp3"; +import WaveformLoading from "../../waveformLoading"; type TranscriptDetails = { params: { @@ -42,8 +45,12 @@ const TranscriptRecord = (details: TranscriptDetails) => { const { audioDevices, getAudioStream } = useAudioDevice(); - const [hasRecorded, setHasRecorded] = useState(false); + const [recordedTime, setRecordedTime] = useState(0); + const [startTime, setStartTime] = useState(0); const [transcriptStarted, setTranscriptStarted] = useState(false); + let mp3 = useMp3(details.params.transcriptId, true); + + const router = useRouter(); useEffect(() => { if (!transcriptStarted && webSockets.transcriptText.length !== 0) @@ -51,15 +58,27 @@ const TranscriptRecord = (details: TranscriptDetails) => { }, [webSockets.transcriptText]); useEffect(() => { - if (transcript?.response?.longSummary) { - const newUrl = `/transcripts/${transcript.response.id}`; + const statusToRedirect = ["ended", "error"]; + + //TODO if has no topic and is error, get back to new + if ( + statusToRedirect.includes(transcript.response?.status) || + statusToRedirect.includes(webSockets.status.value) + ) { + const newUrl = "/transcripts/" + details.params.transcriptId; // Shallow redirection does not work on NextJS 13 // https://github.com/vercel/next.js/discussions/48110 // https://github.com/vercel/next.js/discussions/49540 - // router.push(newUrl, undefined, { shallow: true }); - history.replaceState({}, "", newUrl); + router.replace(newUrl); + // history.replaceState({}, "", newUrl); + } // history.replaceState({}, "", newUrl); + }, [webSockets.status.value, transcript.response?.status]); + + useEffect(() => { + if (webSockets.duration) { + mp3.getNow(); } - }); + }, [webSockets.duration]); useEffect(() => { lockWakeState(); @@ -70,19 +89,31 @@ const TranscriptRecord = (details: TranscriptDetails) => { return ( <> - { - setStream(null); - setHasRecorded(true); - webRTC?.send(JSON.stringify({ cmd: "STOP" })); - }} - topics={webSockets.topics} - getAudioStream={getAudioStream} - useActiveTopic={useActiveTopic} - isPastMeeting={false} - audioDevices={audioDevices} - /> + {webSockets.waveform && webSockets.duration && mp3?.media ? ( + + ) : recordedTime ? ( + + ) : ( + { + setStream(null); + setRecordedTime(Date.now() - startTime); + webRTC?.send(JSON.stringify({ cmd: "STOP" })); + }} + onRecord={() => { + setStartTime(Date.now()); + }} + getAudioStream={getAudioStream} + audioDevices={audioDevices} + /> + )}
{
- {!hasRecorded ? ( + {!recordedTime ? ( <> {transcriptStarted && (

Transcription

@@ -128,6 +159,7 @@ const TranscriptRecord = (details: TranscriptDetails) => { couple of minutes. Please do not navigate away from the page during this time.

+ {/* NTH If login required remove last sentence */}
)} diff --git a/www/app/[domain]/transcripts/finalSummary.tsx b/www/app/[domain]/transcripts/finalSummary.tsx index 463f6100..e0d0f1c9 100644 --- a/www/app/[domain]/transcripts/finalSummary.tsx +++ b/www/app/[domain]/transcripts/finalSummary.tsx @@ -87,7 +87,7 @@ export default function FinalSummary(props: FinalSummaryProps) {
diff --git a/www/app/[domain]/transcripts/player.tsx b/www/app/[domain]/transcripts/player.tsx new file mode 100644 index 00000000..02151a68 --- /dev/null +++ b/www/app/[domain]/transcripts/player.tsx @@ -0,0 +1,166 @@ +import React, { useRef, useEffect, useState } from "react"; + +import WaveSurfer from "wavesurfer.js"; +import CustomRegionsPlugin from "../../lib/custom-plugins/regions"; + +import { formatTime } from "../../lib/time"; +import { Topic } from "./webSocketTypes"; +import { AudioWaveform } from "../../api"; +import { waveSurferStyles } from "../../styles/recorder"; + +type PlayerProps = { + topics: Topic[]; + useActiveTopic: [ + Topic | null, + React.Dispatch>, + ]; + waveform: AudioWaveform["data"]; + media: HTMLMediaElement; + mediaDuration: number; +}; + +export default function Player(props: PlayerProps) { + const waveformRef = useRef(null); + const [wavesurfer, setWavesurfer] = useState(null); + const [isPlaying, setIsPlaying] = useState(false); + const [currentTime, setCurrentTime] = useState(0); + const [waveRegions, setWaveRegions] = useState( + null, + ); + const [activeTopic, setActiveTopic] = props.useActiveTopic; + const topicsRef = useRef(props.topics); + // Waveform setup + useEffect(() => { + if (waveformRef.current) { + // XXX duration is required to prevent recomputing peaks from audio + // However, the current waveform returns only the peaks, and no duration + // And the backend does not save duration properly. + // So at the moment, we deduct the duration from the topics. + // This is not ideal, but it works for now. + const _wavesurfer = WaveSurfer.create({ + container: waveformRef.current, + peaks: props.waveform, + hideScrollbar: true, + autoCenter: true, + barWidth: 2, + height: "auto", + duration: props.mediaDuration, + + ...waveSurferStyles.player, + }); + + // styling + const wsWrapper = _wavesurfer.getWrapper(); + wsWrapper.style.cursor = waveSurferStyles.playerStyle.cursor; + wsWrapper.style.backgroundColor = + waveSurferStyles.playerStyle.backgroundColor; + wsWrapper.style.borderRadius = waveSurferStyles.playerStyle.borderRadius; + + _wavesurfer.on("play", () => { + setIsPlaying(true); + }); + _wavesurfer.on("pause", () => { + setIsPlaying(false); + }); + _wavesurfer.on("timeupdate", setCurrentTime); + + setWaveRegions(_wavesurfer.registerPlugin(CustomRegionsPlugin.create())); + + _wavesurfer.toggleInteraction(true); + + _wavesurfer.setMediaElement(props.media); + + setWavesurfer(_wavesurfer); + + return () => { + _wavesurfer.destroy(); + setIsPlaying(false); + setCurrentTime(0); + }; + } + }, []); + + useEffect(() => { + if (!wavesurfer) return; + if (!props.media) return; + wavesurfer.setMediaElement(props.media); + }, [props.media, wavesurfer]); + + useEffect(() => { + topicsRef.current = props.topics; + renderMarkers(); + }, [props.topics, waveRegions]); + + const renderMarkers = () => { + if (!waveRegions) return; + + waveRegions.clearRegions(); + + for (let topic of topicsRef.current) { + const content = document.createElement("div"); + content.setAttribute("style", waveSurferStyles.marker); + content.onmouseover = () => { + content.style.backgroundColor = + waveSurferStyles.markerHover.backgroundColor; + content.style.zIndex = "999"; + content.style.width = "300px"; + }; + content.onmouseout = () => { + content.setAttribute("style", waveSurferStyles.marker); + }; + content.textContent = topic.title; + + const region = waveRegions.addRegion({ + start: topic.timestamp, + content, + color: "f00", + drag: false, + }); + region.on("click", (e) => { + e.stopPropagation(); + setActiveTopic(topic); + wavesurfer?.setTime(region.start); + }); + } + }; + + useEffect(() => { + if (activeTopic) { + wavesurfer?.setTime(activeTopic.timestamp); + } + }, [activeTopic]); + + const handlePlayClick = () => { + wavesurfer?.playPause(); + }; + + const timeLabel = () => { + if (props.mediaDuration) + return `${formatTime(currentTime)}/${formatTime(props.mediaDuration)}`; + return ""; + }; + + return ( +
+
+
+
{timeLabel()}
+
+ + +
+ ); +} diff --git a/www/app/[domain]/transcripts/recorder.tsx b/www/app/[domain]/transcripts/recorder.tsx index 8db32ff7..e7c016a7 100644 --- a/www/app/[domain]/transcripts/recorder.tsx +++ b/www/app/[domain]/transcripts/recorder.tsx @@ -6,31 +6,19 @@ import CustomRegionsPlugin from "../../lib/custom-plugins/regions"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { faMicrophone } from "@fortawesome/free-solid-svg-icons"; -import { faDownload } from "@fortawesome/free-solid-svg-icons"; import { formatTime } from "../../lib/time"; -import { Topic } from "./webSocketTypes"; -import { AudioWaveform } from "../../api"; import AudioInputsDropdown from "./audioInputsDropdown"; import { Option } from "react-dropdown"; import { waveSurferStyles } from "../../styles/recorder"; import { useError } from "../../(errors)/errorContext"; type RecorderProps = { - setStream?: React.Dispatch>; - onStop?: () => void; - topics: Topic[]; - getAudioStream?: (deviceId) => Promise; - audioDevices?: Option[]; - useActiveTopic: [ - Topic | null, - React.Dispatch>, - ]; - waveform?: AudioWaveform | null; - isPastMeeting: boolean; - transcriptId?: string | null; - media?: HTMLMediaElement | null; - mediaDuration?: number | null; + setStream: React.Dispatch>; + onStop: () => void; + onRecord?: () => void; + getAudioStream: (deviceId) => Promise; + audioDevices: Option[]; }; export default function Recorder(props: RecorderProps) { @@ -38,7 +26,7 @@ export default function Recorder(props: RecorderProps) { const [wavesurfer, setWavesurfer] = useState(null); const [record, setRecord] = useState(null); const [isRecording, setIsRecording] = useState(false); - const [hasRecorded, setHasRecorded] = useState(props.isPastMeeting); + const [hasRecorded, setHasRecorded] = useState(false); const [isPlaying, setIsPlaying] = useState(false); const [currentTime, setCurrentTime] = useState(0); const [timeInterval, setTimeInterval] = useState(null); @@ -48,8 +36,6 @@ export default function Recorder(props: RecorderProps) { ); const [deviceId, setDeviceId] = useState(null); const [recordStarted, setRecordStarted] = useState(false); - const [activeTopic, setActiveTopic] = props.useActiveTopic; - const topicsRef = useRef(props.topics); const [showDevices, setShowDevices] = useState(false); const { setError } = useError(); @@ -73,8 +59,6 @@ export default function Recorder(props: RecorderProps) { if (!record.isRecording()) return; handleRecClick(); break; - case "^": - throw new Error("Unhandled Exception thrown by '^' shortcut"); case "(": location.href = "/login"; break; @@ -104,27 +88,18 @@ export default function Recorder(props: RecorderProps) { // Waveform setup useEffect(() => { if (waveformRef.current) { - // XXX duration is required to prevent recomputing peaks from audio - // However, the current waveform returns only the peaks, and no duration - // And the backend does not save duration properly. - // So at the moment, we deduct the duration from the topics. - // This is not ideal, but it works for now. const _wavesurfer = WaveSurfer.create({ container: waveformRef.current, - peaks: props.waveform?.data, hideScrollbar: true, autoCenter: true, barWidth: 2, height: "auto", - duration: props.mediaDuration || 1, ...waveSurferStyles.player, }); - if (!props.transcriptId) { - const _wshack: any = _wavesurfer; - _wshack.renderer.renderSingleCanvas = () => {}; - } + const _wshack: any = _wavesurfer; + _wshack.renderer.renderSingleCanvas = () => {}; // styling const wsWrapper = _wavesurfer.getWrapper(); @@ -144,12 +119,6 @@ export default function Recorder(props: RecorderProps) { setRecord(_wavesurfer.registerPlugin(RecordPlugin.create())); setWaveRegions(_wavesurfer.registerPlugin(CustomRegionsPlugin.create())); - if (props.isPastMeeting) _wavesurfer.toggleInteraction(true); - - if (props.media) { - _wavesurfer.setMediaElement(props.media); - } - setWavesurfer(_wavesurfer); return () => { @@ -161,58 +130,6 @@ export default function Recorder(props: RecorderProps) { } }, []); - useEffect(() => { - if (!wavesurfer) return; - if (!props.media) return; - wavesurfer.setMediaElement(props.media); - }, [props.media, wavesurfer]); - - useEffect(() => { - topicsRef.current = props.topics; - if (!isRecording) renderMarkers(); - }, [props.topics, waveRegions]); - - const renderMarkers = () => { - if (!waveRegions) return; - - waveRegions.clearRegions(); - - for (let topic of topicsRef.current) { - const content = document.createElement("div"); - content.setAttribute("style", waveSurferStyles.marker); - content.onmouseover = () => { - content.style.backgroundColor = - waveSurferStyles.markerHover.backgroundColor; - content.style.zIndex = "999"; - content.style.width = "300px"; - }; - content.onmouseout = () => { - content.setAttribute("style", waveSurferStyles.marker); - }; - content.textContent = topic.title; - - const region = waveRegions.addRegion({ - start: topic.timestamp, - content, - color: "f00", - drag: false, - }); - region.on("click", (e) => { - e.stopPropagation(); - setActiveTopic(topic); - wavesurfer?.setTime(region.start); - }); - } - }; - - useEffect(() => { - if (!record) return; - - return record.on("stopRecording", () => { - renderMarkers(); - }); - }, [record]); - useEffect(() => { if (isRecording) { const interval = window.setInterval(() => { @@ -229,12 +146,6 @@ export default function Recorder(props: RecorderProps) { } }, [isRecording]); - useEffect(() => { - if (activeTopic) { - wavesurfer?.setTime(activeTopic.timestamp); - } - }, [activeTopic]); - const handleRecClick = async () => { if (!record) return console.log("no record"); @@ -249,10 +160,10 @@ export default function Recorder(props: RecorderProps) { setScreenMediaStream(null); setDestinationStream(null); } else { + if (props.onRecord) props.onRecord(); const stream = await getCurrentStream(); if (props.setStream) props.setStream(stream); - waveRegions?.clearRegions(); if (stream) { await record.startRecording(stream); setIsRecording(true); @@ -320,7 +231,6 @@ export default function Recorder(props: RecorderProps) { if (!record) return; if (!destinationStream) return; if (props.setStream) props.setStream(destinationStream); - waveRegions?.clearRegions(); if (destinationStream) { record.startRecording(destinationStream); setIsRecording(true); @@ -379,23 +289,9 @@ export default function Recorder(props: RecorderProps) { } text-white ml-2 md:ml:4 md:h-[78px] md:min-w-[100px] text-lg`} id="play-btn" onClick={handlePlayClick} - disabled={isRecording} > {isPlaying ? "Pause" : "Play"} - - {props.transcriptId && ( - - - - )} )} {!hasRecorded && ( diff --git a/www/app/[domain]/transcripts/useMp3.ts b/www/app/[domain]/transcripts/useMp3.ts index 570a6a25..23249f94 100644 --- a/www/app/[domain]/transcripts/useMp3.ts +++ b/www/app/[domain]/transcripts/useMp3.ts @@ -1,24 +1,19 @@ import { useContext, useEffect, useState } from "react"; -import { useError } from "../../(errors)/errorContext"; import { DomainContext } from "../domainContext"; import getApi from "../../lib/getApi"; import { useFiefAccessTokenInfo } from "@fief/fief/build/esm/nextjs/react"; -import { shouldShowError } from "../../lib/errorUtils"; -type Mp3Response = { - url: string | null; +export type Mp3Response = { media: HTMLMediaElement | null; loading: boolean; - error: Error | null; + getNow: () => void; }; -const useMp3 = (protectedPath: boolean, id: string): Mp3Response => { - const [url, setUrl] = useState(null); +const useMp3 = (id: string, waiting?: boolean): Mp3Response => { const [media, setMedia] = useState(null); + const [later, setLater] = useState(waiting); const [loading, setLoading] = useState(false); - const [error, setErrorState] = useState(null); - const { setError } = useError(); - const api = getApi(protectedPath); + const api = getApi(true); const { api_url } = useContext(DomainContext); const accessTokenInfo = useFiefAccessTokenInfo(); const [serviceWorkerReady, setServiceWorkerReady] = useState(false); @@ -42,8 +37,8 @@ const useMp3 = (protectedPath: boolean, id: string): Mp3Response => { }); }, [navigator.serviceWorker, serviceWorkerReady, accessTokenInfo]); - const getMp3 = (id: string) => { - if (!id || !api) return; + useEffect(() => { + if (!id || !api || later) return; // createa a audio element and set the source setLoading(true); @@ -53,13 +48,13 @@ const useMp3 = (protectedPath: boolean, id: string): Mp3Response => { audioElement.preload = "auto"; setMedia(audioElement); setLoading(false); + }, [id, api, later]); + + const getNow = () => { + setLater(false); }; - useEffect(() => { - getMp3(id); - }, [id, api]); - - return { url, media, loading, error }; + return { media, loading, getNow }; }; export default useMp3; diff --git a/www/app/[domain]/transcripts/useTranscript.ts b/www/app/[domain]/transcripts/useTranscript.ts index af60cd3b..987e57f3 100644 --- a/www/app/[domain]/transcripts/useTranscript.ts +++ b/www/app/[domain]/transcripts/useTranscript.ts @@ -5,16 +5,28 @@ import { useError } from "../../(errors)/errorContext"; import getApi from "../../lib/getApi"; import { shouldShowError } from "../../lib/errorUtils"; -type Transcript = { - response: GetTranscript | null; - loading: boolean; - error: Error | null; +type ErrorTranscript = { + error: Error; + loading: false; + response: any; +}; + +type LoadingTranscript = { + response: any; + loading: true; + error: false; +}; + +type SuccessTranscript = { + response: GetTranscript; + loading: false; + error: null; }; const useTranscript = ( protectedPath: boolean, id: string | null, -): Transcript => { +): ErrorTranscript | LoadingTranscript | SuccessTranscript => { const [response, setResponse] = useState(null); const [loading, setLoading] = useState(true); const [error, setErrorState] = useState(null); @@ -46,7 +58,10 @@ const useTranscript = ( }); }, [id, !api]); - return { response, loading, error }; + return { response, loading, error } as + | ErrorTranscript + | LoadingTranscript + | SuccessTranscript; }; export default useTranscript; diff --git a/www/app/[domain]/transcripts/useWebSockets.ts b/www/app/[domain]/transcripts/useWebSockets.ts index bcf6b163..f289adbb 100644 --- a/www/app/[domain]/transcripts/useWebSockets.ts +++ b/www/app/[domain]/transcripts/useWebSockets.ts @@ -1,30 +1,35 @@ import { useContext, useEffect, useState } from "react"; import { Topic, FinalSummary, Status } from "./webSocketTypes"; import { useError } from "../../(errors)/errorContext"; -import { useRouter } from "next/navigation"; import { DomainContext } from "../domainContext"; +import { AudioWaveform } from "../../api"; -type UseWebSockets = { +export type UseWebSockets = { transcriptText: string; translateText: string; + title: string; topics: Topic[]; finalSummary: FinalSummary; status: Status; + waveform: AudioWaveform["data"] | null; + duration: number | null; }; export const useWebSockets = (transcriptId: string | null): UseWebSockets => { const [transcriptText, setTranscriptText] = useState(""); const [translateText, setTranslateText] = useState(""); + const [title, setTitle] = useState(""); const [textQueue, setTextQueue] = useState([]); const [translationQueue, setTranslationQueue] = useState([]); const [isProcessing, setIsProcessing] = useState(false); const [topics, setTopics] = useState([]); + const [waveform, setWaveForm] = useState(null); + const [duration, setDuration] = useState(null); const [finalSummary, setFinalSummary] = useState({ summary: "", }); const [status, setStatus] = useState({ value: "initial" }); const { setError } = useError(); - const router = useRouter(); const { websocket_url } = useContext(DomainContext); @@ -294,7 +299,7 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { if (!transcriptId) return; const url = `${websocket_url}/v1/transcripts/${transcriptId}/events`; - const ws = new WebSocket(url); + let ws = new WebSocket(url); ws.onopen = () => { console.debug("WebSocket connection opened"); @@ -343,24 +348,39 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { case "FINAL_TITLE": console.debug("FINAL_TITLE event:", message.data); + if (message.data) { + setTitle(message.data.title); + } + break; + + case "WAVEFORM": + console.debug( + "WAVEFORM event length:", + message.data.waveform.length, + ); + if (message.data) { + setWaveForm(message.data.waveform); + } + break; + case "DURATION": + console.debug("DURATION event:", message.data); + if (message.data) { + setDuration(message.data.duration); + } break; case "STATUS": console.log("STATUS event:", message.data); - if (message.data.value === "ended") { - const newUrl = "/transcripts/" + transcriptId; - router.push(newUrl); - console.debug("FINAL_LONG_SUMMARY event:", message.data); - } if (message.data.value === "error") { - const newUrl = "/transcripts/" + transcriptId; - router.push(newUrl); setError( Error("Websocket error status"), "There was an error processing this meeting.", ); } setStatus(message.data); + if (message.data.value === "ended") { + ws.close(); + } break; default: @@ -382,13 +402,18 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { console.debug("WebSocket connection closed"); switch (event.code) { case 1000: // Normal Closure: - case 1001: // Going Away: - case 1005: - break; default: setError( new Error(`WebSocket closed unexpectedly with code: ${event.code}`), + "Disconnected", ); + console.log( + "Socket is closed. Reconnect will be attempted in 1 second.", + event.reason, + ); + setTimeout(function () { + ws = new WebSocket(url); + }, 1000); } }; @@ -397,5 +422,14 @@ export const useWebSockets = (transcriptId: string | null): UseWebSockets => { }; }, [transcriptId]); - return { transcriptText, translateText, topics, finalSummary, status }; + return { + transcriptText, + translateText, + topics, + finalSummary, + title, + status, + waveform, + duration, + }; }; diff --git a/www/app/[domain]/transcripts/waveformLoading.tsx b/www/app/[domain]/transcripts/waveformLoading.tsx new file mode 100644 index 00000000..68e0c80f --- /dev/null +++ b/www/app/[domain]/transcripts/waveformLoading.tsx @@ -0,0 +1,11 @@ +import { faSpinner } from "@fortawesome/free-solid-svg-icons"; +import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; + +export default () => ( +
+ +
+); diff --git a/www/app/lib/edgeConfig.ts b/www/app/lib/edgeConfig.ts index 5527121a..1140e555 100644 --- a/www/app/lib/edgeConfig.ts +++ b/www/app/lib/edgeConfig.ts @@ -3,9 +3,9 @@ import { isDevelopment } from "./utils"; const localConfig = { features: { - requireLogin: true, + requireLogin: false, privacy: true, - browse: true, + browse: false, }, api_url: "http://127.0.0.1:1250", websocket_url: "ws://127.0.0.1:1250",