mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-21 12:49:06 +00:00
adds meeting setup
This commit is contained in:
@@ -21,7 +21,7 @@ type TranscriptDetails = {
|
|||||||
|
|
||||||
export default function TranscriptDetails(details: TranscriptDetails) {
|
export default function TranscriptDetails(details: TranscriptDetails) {
|
||||||
const api = getApi();
|
const api = getApi();
|
||||||
const transcript = useTranscript(null, api, details.params.transcriptId);
|
const transcript = useTranscript(details.params.transcriptId);
|
||||||
const topics = useTopics(api, details.params.transcriptId);
|
const topics = useTopics(api, details.params.transcriptId);
|
||||||
const waveform = useWaveform(api, details.params.transcriptId);
|
const waveform = useWaveform(api, details.params.transcriptId);
|
||||||
const useActiveTopic = useState<Topic | null>(null);
|
const useActiveTopic = useState<Topic | null>(null);
|
||||||
|
|||||||
146
www/app/transcripts/[transcriptId]/record/page.tsx
Normal file
146
www/app/transcripts/[transcriptId]/record/page.tsx
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
"use client";
|
||||||
|
import React, { useEffect, useState } from "react";
|
||||||
|
import Recorder from "../../recorder";
|
||||||
|
import { TopicList } from "../../topicList";
|
||||||
|
import useWebRTC from "../../useWebRTC";
|
||||||
|
import useTranscript from "../../useTranscript";
|
||||||
|
import { useWebSockets } from "../../useWebSockets";
|
||||||
|
import useAudioDevice from "../../useAudioDevice";
|
||||||
|
import "../../../styles/button.css";
|
||||||
|
import { Topic } from "../../webSocketTypes";
|
||||||
|
import getApi from "../../../lib/getApi";
|
||||||
|
import LiveTrancription from "../../liveTranscription";
|
||||||
|
import DisconnectedIndicator from "../../disconnectedIndicator";
|
||||||
|
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
|
||||||
|
import { faGear } from "@fortawesome/free-solid-svg-icons";
|
||||||
|
import { lockWakeState, releaseWakeState } from "../../../lib/wakeLock";
|
||||||
|
|
||||||
|
type TranscriptDetails = {
|
||||||
|
params: {
|
||||||
|
transcriptId: string;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
const TranscriptRecord = (details: TranscriptDetails) => {
|
||||||
|
const [stream, setStream] = useState<MediaStream | null>(null);
|
||||||
|
const [disconnected, setDisconnected] = useState<boolean>(false);
|
||||||
|
const useActiveTopic = useState<Topic | null>(null);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (process.env.NEXT_PUBLIC_ENV === "development") {
|
||||||
|
document.onkeyup = (e) => {
|
||||||
|
if (e.key === "d") {
|
||||||
|
setDisconnected((prev) => !prev);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const transcript = useTranscript(details.params.transcriptId);
|
||||||
|
const api = getApi();
|
||||||
|
const webRTC = useWebRTC(stream, details.params.transcriptId, api);
|
||||||
|
const webSockets = useWebSockets(details.params.transcriptId);
|
||||||
|
|
||||||
|
const {
|
||||||
|
loading,
|
||||||
|
permissionOk,
|
||||||
|
permissionDenied,
|
||||||
|
audioDevices,
|
||||||
|
requestPermission,
|
||||||
|
getAudioStream,
|
||||||
|
} = useAudioDevice();
|
||||||
|
|
||||||
|
const [hasRecorded, setHasRecorded] = useState(false);
|
||||||
|
const [transcriptStarted, setTranscriptStarted] = useState(false);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (!transcriptStarted && webSockets.transcriptText.length !== 0)
|
||||||
|
setTranscriptStarted(true);
|
||||||
|
}, [webSockets.transcriptText]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (transcript?.response?.longSummary) {
|
||||||
|
const newUrl = `/transcripts/${transcript.response.id}`;
|
||||||
|
// Shallow redirection does not work on NextJS 13
|
||||||
|
// https://github.com/vercel/next.js/discussions/48110
|
||||||
|
// https://github.com/vercel/next.js/discussions/49540
|
||||||
|
// router.push(newUrl, undefined, { shallow: true });
|
||||||
|
history.replaceState({}, "", newUrl);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
lockWakeState();
|
||||||
|
return () => {
|
||||||
|
releaseWakeState();
|
||||||
|
};
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Recorder
|
||||||
|
setStream={setStream}
|
||||||
|
onStop={() => {
|
||||||
|
setStream(null);
|
||||||
|
setHasRecorded(true);
|
||||||
|
webRTC?.send(JSON.stringify({ cmd: "STOP" }));
|
||||||
|
}}
|
||||||
|
topics={webSockets.topics}
|
||||||
|
getAudioStream={getAudioStream}
|
||||||
|
useActiveTopic={useActiveTopic}
|
||||||
|
isPastMeeting={false}
|
||||||
|
audioDevices={audioDevices}
|
||||||
|
/>
|
||||||
|
|
||||||
|
<div className="grid grid-cols-1 lg:grid-cols-2 grid-rows-mobile-inner lg:grid-rows-1 gap-2 lg:gap-4 h-full">
|
||||||
|
<TopicList
|
||||||
|
topics={webSockets.topics}
|
||||||
|
useActiveTopic={useActiveTopic}
|
||||||
|
autoscroll={true}
|
||||||
|
/>
|
||||||
|
|
||||||
|
<section
|
||||||
|
className={`w-full h-full bg-blue-400/20 rounded-lg md:rounded-xl p-2 md:px-4`}
|
||||||
|
>
|
||||||
|
{!hasRecorded ? (
|
||||||
|
<>
|
||||||
|
{transcriptStarted && (
|
||||||
|
<h2 className="md:text-lg font-bold">Transcription</h2>
|
||||||
|
)}
|
||||||
|
<div className="flex flex-col justify-center align center text-center h-full">
|
||||||
|
<div className="py-2 h-auto">
|
||||||
|
{!transcriptStarted ? (
|
||||||
|
<div className="text-center text-gray-500">
|
||||||
|
The conversation transcript will appear here shortly after
|
||||||
|
you start recording.
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<LiveTrancription text={webSockets.transcriptText} />
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
<div className="flex flex-col justify-center align center text-center h-full text-gray-500">
|
||||||
|
<div className="p-2 md:p-4">
|
||||||
|
<FontAwesomeIcon
|
||||||
|
icon={faGear}
|
||||||
|
className="animate-spin-slow h-14 w-14 md:h-20 md:w-20"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<p>
|
||||||
|
We are generating the final summary for you. This may take a
|
||||||
|
couple of minutes. Please do not navigate away from the page
|
||||||
|
during this time.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</section>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{disconnected && <DisconnectedIndicator />}
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default TranscriptRecord;
|
||||||
54
www/app/transcripts/createTranscript.ts
Normal file
54
www/app/transcripts/createTranscript.ts
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
import { useEffect, useState } from "react";
|
||||||
|
import { DefaultApi, V1TranscriptsCreateRequest } from "../api/apis/DefaultApi";
|
||||||
|
import { GetTranscript } from "../api";
|
||||||
|
import { useError } from "../(errors)/errorContext";
|
||||||
|
import getApi from "../lib/getApi";
|
||||||
|
|
||||||
|
type CreateTranscript = {
|
||||||
|
response: GetTranscript | null;
|
||||||
|
loading: boolean;
|
||||||
|
error: Error | null;
|
||||||
|
create: (params: V1TranscriptsCreateRequest["createTranscript"]) => void;
|
||||||
|
};
|
||||||
|
|
||||||
|
const useCreateTranscript = (): CreateTranscript => {
|
||||||
|
const [response, setResponse] = useState<GetTranscript | null>(null);
|
||||||
|
const [loading, setLoading] = useState<boolean>(false);
|
||||||
|
const [error, setErrorState] = useState<Error | null>(null);
|
||||||
|
const { setError } = useError();
|
||||||
|
const api = getApi();
|
||||||
|
|
||||||
|
const create = (params: V1TranscriptsCreateRequest["createTranscript"]) => {
|
||||||
|
if (loading) return;
|
||||||
|
|
||||||
|
setLoading(true);
|
||||||
|
const requestParameters: V1TranscriptsCreateRequest = {
|
||||||
|
createTranscript: {
|
||||||
|
name: params.name || "Weekly All-Hands", // Default
|
||||||
|
targetLanguage: params.targetLanguage || "en", // Default
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
console.debug(
|
||||||
|
"POST - /v1/transcripts/ - Requesting new transcription creation",
|
||||||
|
requestParameters,
|
||||||
|
);
|
||||||
|
|
||||||
|
api
|
||||||
|
.v1TranscriptsCreate(requestParameters)
|
||||||
|
.then((result) => {
|
||||||
|
setResponse(result);
|
||||||
|
setLoading(false);
|
||||||
|
console.debug("New transcript created:", result);
|
||||||
|
})
|
||||||
|
.catch((err) => {
|
||||||
|
setError(err);
|
||||||
|
setErrorState(err);
|
||||||
|
setLoading(false);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
return { response, loading, error, create };
|
||||||
|
};
|
||||||
|
|
||||||
|
export default useCreateTranscript;
|
||||||
@@ -1,7 +1,5 @@
|
|||||||
"use client";
|
"use client";
|
||||||
import React, { useEffect, useState } from "react";
|
import React, { useEffect, useState } from "react";
|
||||||
import Recorder from "../recorder";
|
|
||||||
import { TopicList } from "../topicList";
|
|
||||||
import useWebRTC from "../useWebRTC";
|
import useWebRTC from "../useWebRTC";
|
||||||
import useTranscript from "../useTranscript";
|
import useTranscript from "../useTranscript";
|
||||||
import { useWebSockets } from "../useWebSockets";
|
import { useWebSockets } from "../useWebSockets";
|
||||||
@@ -9,35 +7,37 @@ import useAudioDevice from "../useAudioDevice";
|
|||||||
import "../../styles/button.css";
|
import "../../styles/button.css";
|
||||||
import { Topic } from "../webSocketTypes";
|
import { Topic } from "../webSocketTypes";
|
||||||
import getApi from "../../lib/getApi";
|
import getApi from "../../lib/getApi";
|
||||||
import LiveTrancription from "../liveTranscription";
|
|
||||||
import DisconnectedIndicator from "../disconnectedIndicator";
|
|
||||||
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
|
|
||||||
import { faGear } from "@fortawesome/free-solid-svg-icons";
|
|
||||||
import About from "../../(aboutAndPrivacy)/about";
|
import About from "../../(aboutAndPrivacy)/about";
|
||||||
import Privacy from "../../(aboutAndPrivacy)/privacy";
|
import Privacy from "../../(aboutAndPrivacy)/privacy";
|
||||||
import { lockWakeState, releaseWakeState } from "../../lib/wakeLock";
|
import { lockWakeState, releaseWakeState } from "../../lib/wakeLock";
|
||||||
import { useRouter } from "next/navigation";
|
import { useRouter } from "next/navigation";
|
||||||
|
import createTranscript from "../createTranscript";
|
||||||
|
import { GetTranscript } from "../../api";
|
||||||
|
import { Router } from "next/router";
|
||||||
|
import useCreateTranscript from "../createTranscript";
|
||||||
|
|
||||||
const TranscriptCreate = () => {
|
const TranscriptCreate = () => {
|
||||||
const [stream, setStream] = useState<MediaStream | null>(null);
|
// const transcript = useTranscript(stream, api);
|
||||||
const [disconnected, setDisconnected] = useState<boolean>(false);
|
|
||||||
const useActiveTopic = useState<Topic | null>(null);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (process.env.NEXT_PUBLIC_ENV === "development") {
|
|
||||||
document.onkeyup = (e) => {
|
|
||||||
if (e.key === "d") {
|
|
||||||
setDisconnected((prev) => !prev);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
const api = getApi();
|
|
||||||
const transcript = useTranscript(stream, api);
|
|
||||||
const webRTC = useWebRTC(stream, transcript?.response?.id, api);
|
|
||||||
const webSockets = useWebSockets(transcript?.response?.id);
|
|
||||||
const router = useRouter();
|
const router = useRouter();
|
||||||
|
const api = getApi();
|
||||||
|
|
||||||
|
const [name, setName] = useState<string>();
|
||||||
|
const nameChange = (event: React.ChangeEvent<HTMLInputElement>) => {
|
||||||
|
setName(event.target.value);
|
||||||
|
};
|
||||||
|
const [targetLanguage, setTargetLanguage] = useState<string>();
|
||||||
|
|
||||||
|
const createTranscript = useCreateTranscript();
|
||||||
|
|
||||||
|
const send = () => {
|
||||||
|
if (createTranscript.loading || permissionDenied) return;
|
||||||
|
createTranscript.create({ name, targetLanguage });
|
||||||
|
};
|
||||||
|
useEffect(() => {
|
||||||
|
createTranscript.response &&
|
||||||
|
router.push(`/transcripts/${createTranscript.response.id}/record`);
|
||||||
|
}, [createTranscript.response]);
|
||||||
|
|
||||||
const {
|
const {
|
||||||
loading,
|
loading,
|
||||||
permissionOk,
|
permissionOk,
|
||||||
@@ -47,151 +47,62 @@ const TranscriptCreate = () => {
|
|||||||
getAudioStream,
|
getAudioStream,
|
||||||
} = useAudioDevice();
|
} = useAudioDevice();
|
||||||
|
|
||||||
const [hasRecorded, setHasRecorded] = useState(false);
|
|
||||||
const [transcriptStarted, setTranscriptStarted] = useState(false);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (!transcriptStarted && webSockets.transcriptText.length !== 0)
|
|
||||||
setTranscriptStarted(true);
|
|
||||||
}, [webSockets.transcriptText]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (transcript?.response?.id) {
|
|
||||||
const newUrl = `/transcripts/${transcript.response.id}`;
|
|
||||||
// Shallow redirection does not work on NextJS 13
|
|
||||||
// https://github.com/vercel/next.js/discussions/48110
|
|
||||||
// https://github.com/vercel/next.js/discussions/49540
|
|
||||||
// router.push(newUrl, undefined, { shallow: true });
|
|
||||||
history.replaceState({}, "", newUrl);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
lockWakeState();
|
|
||||||
return () => {
|
|
||||||
releaseWakeState();
|
|
||||||
};
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
{permissionOk ? (
|
<div></div>
|
||||||
<>
|
<div className="max-h-full overflow-auto">
|
||||||
<Recorder
|
<section className="flex flex-col w-full h-full items-center justify-evenly p-4 md:px-6 md:py-8">
|
||||||
setStream={setStream}
|
<div>
|
||||||
onStop={() => {
|
<div className="flex flex-col max-w-xl items-center justify-center">
|
||||||
webRTC?.send(JSON.stringify({ cmd: "STOP" }));
|
<h1 className="text-2xl font-bold mb-2">
|
||||||
setStream(null);
|
Welcome to reflector.media
|
||||||
setHasRecorded(true);
|
</h1>
|
||||||
}}
|
<p>
|
||||||
topics={webSockets.topics}
|
Reflector is a transcription and summarization pipeline that
|
||||||
getAudioStream={getAudioStream}
|
transforms audio into knowledge. The output is meeting minutes
|
||||||
useActiveTopic={useActiveTopic}
|
and topic summaries enabling topic-specific analyses stored in
|
||||||
isPastMeeting={false}
|
your systems of record. This is accomplished on your
|
||||||
audioDevices={audioDevices}
|
infrastructure – without 3rd parties – keeping your data
|
||||||
/>
|
private, secure, and organized.
|
||||||
|
</p>
|
||||||
<div className="grid grid-cols-1 lg:grid-cols-2 grid-rows-mobile-inner lg:grid-rows-1 gap-2 lg:gap-4 h-full">
|
<About buttonText="Learn more" />
|
||||||
<TopicList
|
<input type="text" onChange={nameChange} />
|
||||||
topics={webSockets.topics}
|
<button onClick={() => setTargetLanguage("fr")}>Language</button>
|
||||||
useActiveTopic={useActiveTopic}
|
<h2 className="text-2xl font-bold mt-4 mb-2">
|
||||||
autoscroll={true}
|
Audio Permissions
|
||||||
/>
|
</h2>
|
||||||
|
{loading ? (
|
||||||
<section
|
<p className="text-center">Checking permission...</p>
|
||||||
className={`w-full h-full bg-blue-400/20 rounded-lg md:rounded-xl p-2 md:px-4`}
|
) : permissionOk ? (
|
||||||
>
|
<> Microphone permission granted </>
|
||||||
{!hasRecorded ? (
|
|
||||||
<>
|
|
||||||
{transcriptStarted && (
|
|
||||||
<h2 className="md:text-lg font-bold">Transcription</h2>
|
|
||||||
)}
|
|
||||||
<div className="flex flex-col justify-center align center text-center h-full">
|
|
||||||
<div className="py-2 h-auto">
|
|
||||||
{!transcriptStarted ? (
|
|
||||||
<div className="text-center text-gray-500">
|
|
||||||
The conversation transcript will appear here shortly
|
|
||||||
after you start recording.
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
<LiveTrancription text={webSockets.transcriptText} />
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</>
|
|
||||||
) : (
|
) : (
|
||||||
<div className="flex flex-col justify-center align center text-center h-full text-gray-500">
|
<>
|
||||||
<div className="p-2 md:p-4">
|
<p className="text-center">
|
||||||
<FontAwesomeIcon
|
In order to use Reflector, we kindly request permission to
|
||||||
icon={faGear}
|
access your microphone during meetings and events.
|
||||||
className="animate-spin-slow h-14 w-14 md:h-20 md:w-20"
|
<br />
|
||||||
/>
|
<Privacy buttonText="Privacy policy" />
|
||||||
</div>
|
<br />
|
||||||
<p>
|
{permissionDenied
|
||||||
We are generating the final summary for you. This may take a
|
? "Permission to use your microphone was denied, please change the permission setting in your browser and refresh this page."
|
||||||
couple of minutes. Please do not navigate away from the page
|
: "Please grant permission to continue."}
|
||||||
during this time.
|
|
||||||
</p>
|
</p>
|
||||||
</div>
|
<button
|
||||||
|
className="mt-4 bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500 text-white font-bold py-2 px-4 rounded m-auto"
|
||||||
|
onClick={requestPermission}
|
||||||
|
disabled={permissionDenied}
|
||||||
|
>
|
||||||
|
{permissionDenied ? "Access denied" : "Grant Permission"}
|
||||||
|
</button>
|
||||||
|
</>
|
||||||
)}
|
)}
|
||||||
</section>
|
</div>
|
||||||
|
<button onClick={send} disabled={!permissionOk}>
|
||||||
|
{createTranscript.loading ? "loading" : "Send"}
|
||||||
|
</button>
|
||||||
</div>
|
</div>
|
||||||
|
</section>
|
||||||
{disconnected && <DisconnectedIndicator />}
|
</div>
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
<>
|
|
||||||
<div></div>
|
|
||||||
<div className="max-h-full overflow-auto">
|
|
||||||
<section className="flex flex-col w-full h-full items-center justify-evenly p-4 md:px-6 md:py-8">
|
|
||||||
<div>
|
|
||||||
<div className="flex flex-col max-w-xl items-center justify-center">
|
|
||||||
<h1 className="text-2xl font-bold mb-2">
|
|
||||||
Welcome to reflector.media
|
|
||||||
</h1>
|
|
||||||
<p>
|
|
||||||
Reflector is a transcription and summarization pipeline that
|
|
||||||
transforms audio into knowledge. The output is meeting
|
|
||||||
minutes and topic summaries enabling topic-specific analyses
|
|
||||||
stored in your systems of record. This is accomplished on
|
|
||||||
your infrastructure – without 3rd parties – keeping your
|
|
||||||
data private, secure, and organized.
|
|
||||||
</p>
|
|
||||||
<About buttonText="Learn more" />
|
|
||||||
<h2 className="text-2xl font-bold mt-4 mb-2">
|
|
||||||
Audio Permissions
|
|
||||||
</h2>
|
|
||||||
{loading ? (
|
|
||||||
<p className="text-center">Checking permission...</p>
|
|
||||||
) : (
|
|
||||||
<>
|
|
||||||
<p className="text-center">
|
|
||||||
In order to use Reflector, we kindly request permission
|
|
||||||
to access your microphone during meetings and events.
|
|
||||||
<br />
|
|
||||||
<Privacy buttonText="Privacy policy" />
|
|
||||||
<br />
|
|
||||||
{permissionDenied
|
|
||||||
? "Permission to use your microphone was denied, please change the permission setting in your browser and refresh this page."
|
|
||||||
: "Please grant permission to continue."}
|
|
||||||
</p>
|
|
||||||
<button
|
|
||||||
className="mt-4 bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500 text-white font-bold py-2 px-4 rounded m-auto"
|
|
||||||
onClick={requestPermission}
|
|
||||||
disabled={permissionDenied}
|
|
||||||
>
|
|
||||||
{permissionDenied
|
|
||||||
? "Access denied"
|
|
||||||
: "Grant Permission"}
|
|
||||||
</button>
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</section>
|
|
||||||
</div>
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
</>
|
</>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
import { useEffect, useState } from "react";
|
import { useEffect, useState } from "react";
|
||||||
import {
|
import {
|
||||||
DefaultApi,
|
|
||||||
V1TranscriptGetRequest,
|
V1TranscriptGetRequest,
|
||||||
V1TranscriptsCreateRequest,
|
V1TranscriptsCreateRequest,
|
||||||
} from "../api/apis/DefaultApi";
|
} from "../api/apis/DefaultApi";
|
||||||
import { GetTranscript } from "../api";
|
import { GetTranscript } from "../api";
|
||||||
import { useError } from "../(errors)/errorContext";
|
import { useError } from "../(errors)/errorContext";
|
||||||
|
import getApi from "../lib/getApi";
|
||||||
|
|
||||||
type Transcript = {
|
type Transcript = {
|
||||||
response: GetTranscript | null;
|
response: GetTranscript | null;
|
||||||
@@ -13,23 +13,12 @@ type Transcript = {
|
|||||||
error: Error | null;
|
error: Error | null;
|
||||||
};
|
};
|
||||||
|
|
||||||
const useTranscript = (
|
const useTranscript = (id: string | null): Transcript => {
|
||||||
stream: MediaStream | null,
|
|
||||||
api: DefaultApi,
|
|
||||||
id: string | null = null,
|
|
||||||
): Transcript => {
|
|
||||||
const [response, setResponse] = useState<GetTranscript | null>(null);
|
const [response, setResponse] = useState<GetTranscript | null>(null);
|
||||||
const [loading, setLoading] = useState<boolean>(false);
|
const [loading, setLoading] = useState<boolean>(false);
|
||||||
const [error, setErrorState] = useState<Error | null>(null);
|
const [error, setErrorState] = useState<Error | null>(null);
|
||||||
const { setError } = useError();
|
const { setError } = useError();
|
||||||
|
const api = getApi();
|
||||||
const getOrCreateTranscript = (id: string | null) => {
|
|
||||||
if (id) {
|
|
||||||
getTranscript(id);
|
|
||||||
} else if (stream) {
|
|
||||||
createTranscript();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const getTranscript = (id: string | null) => {
|
const getTranscript = (id: string | null) => {
|
||||||
if (!id) throw new Error("Transcript ID is required to get transcript");
|
if (!id) throw new Error("Transcript ID is required to get transcript");
|
||||||
@@ -43,34 +32,7 @@ const useTranscript = (
|
|||||||
.then((result) => {
|
.then((result) => {
|
||||||
setResponse(result);
|
setResponse(result);
|
||||||
setLoading(false);
|
setLoading(false);
|
||||||
console.debug("New transcript created:", result);
|
console.debug("Transcript Loaded:", result);
|
||||||
})
|
|
||||||
.catch((err) => {
|
|
||||||
setError(err);
|
|
||||||
setErrorState(err);
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
const createTranscript = () => {
|
|
||||||
setLoading(true);
|
|
||||||
const requestParameters: V1TranscriptsCreateRequest = {
|
|
||||||
createTranscript: {
|
|
||||||
name: "Weekly All-Hands", // Hardcoded for now
|
|
||||||
targetLanguage: "en", // Hardcoded for now
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
console.debug(
|
|
||||||
"POST - /v1/transcripts/ - Requesting new transcription creation",
|
|
||||||
requestParameters,
|
|
||||||
);
|
|
||||||
|
|
||||||
api
|
|
||||||
.v1TranscriptsCreate(requestParameters)
|
|
||||||
.then((result) => {
|
|
||||||
setResponse(result);
|
|
||||||
setLoading(false);
|
|
||||||
console.debug("New transcript created:", result);
|
|
||||||
})
|
})
|
||||||
.catch((err) => {
|
.catch((err) => {
|
||||||
setError(err);
|
setError(err);
|
||||||
@@ -79,8 +41,8 @@ const useTranscript = (
|
|||||||
};
|
};
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
getOrCreateTranscript(id);
|
getTranscript(id);
|
||||||
}, [id, stream]);
|
}, [id]);
|
||||||
|
|
||||||
return { response, loading, error };
|
return { response, loading, error };
|
||||||
};
|
};
|
||||||
|
|||||||
Reference in New Issue
Block a user