adds meeting setup

This commit is contained in:
Sara
2023-10-13 19:10:15 +02:00
parent c5297be924
commit eca5330f44
5 changed files with 281 additions and 208 deletions

View File

@@ -21,7 +21,7 @@ type TranscriptDetails = {
export default function TranscriptDetails(details: TranscriptDetails) {
const api = getApi();
const transcript = useTranscript(null, api, details.params.transcriptId);
const transcript = useTranscript(details.params.transcriptId);
const topics = useTopics(api, details.params.transcriptId);
const waveform = useWaveform(api, details.params.transcriptId);
const useActiveTopic = useState<Topic | null>(null);

View File

@@ -0,0 +1,146 @@
"use client";
import React, { useEffect, useState } from "react";
import Recorder from "../../recorder";
import { TopicList } from "../../topicList";
import useWebRTC from "../../useWebRTC";
import useTranscript from "../../useTranscript";
import { useWebSockets } from "../../useWebSockets";
import useAudioDevice from "../../useAudioDevice";
import "../../../styles/button.css";
import { Topic } from "../../webSocketTypes";
import getApi from "../../../lib/getApi";
import LiveTrancription from "../../liveTranscription";
import DisconnectedIndicator from "../../disconnectedIndicator";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faGear } from "@fortawesome/free-solid-svg-icons";
import { lockWakeState, releaseWakeState } from "../../../lib/wakeLock";
type TranscriptDetails = {
params: {
transcriptId: string;
};
};
const TranscriptRecord = (details: TranscriptDetails) => {
const [stream, setStream] = useState<MediaStream | null>(null);
const [disconnected, setDisconnected] = useState<boolean>(false);
const useActiveTopic = useState<Topic | null>(null);
useEffect(() => {
if (process.env.NEXT_PUBLIC_ENV === "development") {
document.onkeyup = (e) => {
if (e.key === "d") {
setDisconnected((prev) => !prev);
}
};
}
}, []);
const transcript = useTranscript(details.params.transcriptId);
const api = getApi();
const webRTC = useWebRTC(stream, details.params.transcriptId, api);
const webSockets = useWebSockets(details.params.transcriptId);
const {
loading,
permissionOk,
permissionDenied,
audioDevices,
requestPermission,
getAudioStream,
} = useAudioDevice();
const [hasRecorded, setHasRecorded] = useState(false);
const [transcriptStarted, setTranscriptStarted] = useState(false);
useEffect(() => {
if (!transcriptStarted && webSockets.transcriptText.length !== 0)
setTranscriptStarted(true);
}, [webSockets.transcriptText]);
useEffect(() => {
if (transcript?.response?.longSummary) {
const newUrl = `/transcripts/${transcript.response.id}`;
// Shallow redirection does not work on NextJS 13
// https://github.com/vercel/next.js/discussions/48110
// https://github.com/vercel/next.js/discussions/49540
// router.push(newUrl, undefined, { shallow: true });
history.replaceState({}, "", newUrl);
}
});
useEffect(() => {
lockWakeState();
return () => {
releaseWakeState();
};
}, []);
return (
<>
<Recorder
setStream={setStream}
onStop={() => {
setStream(null);
setHasRecorded(true);
webRTC?.send(JSON.stringify({ cmd: "STOP" }));
}}
topics={webSockets.topics}
getAudioStream={getAudioStream}
useActiveTopic={useActiveTopic}
isPastMeeting={false}
audioDevices={audioDevices}
/>
<div className="grid grid-cols-1 lg:grid-cols-2 grid-rows-mobile-inner lg:grid-rows-1 gap-2 lg:gap-4 h-full">
<TopicList
topics={webSockets.topics}
useActiveTopic={useActiveTopic}
autoscroll={true}
/>
<section
className={`w-full h-full bg-blue-400/20 rounded-lg md:rounded-xl p-2 md:px-4`}
>
{!hasRecorded ? (
<>
{transcriptStarted && (
<h2 className="md:text-lg font-bold">Transcription</h2>
)}
<div className="flex flex-col justify-center align center text-center h-full">
<div className="py-2 h-auto">
{!transcriptStarted ? (
<div className="text-center text-gray-500">
The conversation transcript will appear here shortly after
you start recording.
</div>
) : (
<LiveTrancription text={webSockets.transcriptText} />
)}
</div>
</div>
</>
) : (
<div className="flex flex-col justify-center align center text-center h-full text-gray-500">
<div className="p-2 md:p-4">
<FontAwesomeIcon
icon={faGear}
className="animate-spin-slow h-14 w-14 md:h-20 md:w-20"
/>
</div>
<p>
We are generating the final summary for you. This may take a
couple of minutes. Please do not navigate away from the page
during this time.
</p>
</div>
)}
</section>
</div>
{disconnected && <DisconnectedIndicator />}
</>
);
};
export default TranscriptRecord;

View File

@@ -0,0 +1,54 @@
import { useEffect, useState } from "react";
import { DefaultApi, V1TranscriptsCreateRequest } from "../api/apis/DefaultApi";
import { GetTranscript } from "../api";
import { useError } from "../(errors)/errorContext";
import getApi from "../lib/getApi";
type CreateTranscript = {
response: GetTranscript | null;
loading: boolean;
error: Error | null;
create: (params: V1TranscriptsCreateRequest["createTranscript"]) => void;
};
const useCreateTranscript = (): CreateTranscript => {
const [response, setResponse] = useState<GetTranscript | null>(null);
const [loading, setLoading] = useState<boolean>(false);
const [error, setErrorState] = useState<Error | null>(null);
const { setError } = useError();
const api = getApi();
const create = (params: V1TranscriptsCreateRequest["createTranscript"]) => {
if (loading) return;
setLoading(true);
const requestParameters: V1TranscriptsCreateRequest = {
createTranscript: {
name: params.name || "Weekly All-Hands", // Default
targetLanguage: params.targetLanguage || "en", // Default
},
};
console.debug(
"POST - /v1/transcripts/ - Requesting new transcription creation",
requestParameters,
);
api
.v1TranscriptsCreate(requestParameters)
.then((result) => {
setResponse(result);
setLoading(false);
console.debug("New transcript created:", result);
})
.catch((err) => {
setError(err);
setErrorState(err);
setLoading(false);
});
};
return { response, loading, error, create };
};
export default useCreateTranscript;

View File

@@ -1,7 +1,5 @@
"use client";
import React, { useEffect, useState } from "react";
import Recorder from "../recorder";
import { TopicList } from "../topicList";
import useWebRTC from "../useWebRTC";
import useTranscript from "../useTranscript";
import { useWebSockets } from "../useWebSockets";
@@ -9,35 +7,37 @@ import useAudioDevice from "../useAudioDevice";
import "../../styles/button.css";
import { Topic } from "../webSocketTypes";
import getApi from "../../lib/getApi";
import LiveTrancription from "../liveTranscription";
import DisconnectedIndicator from "../disconnectedIndicator";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import { faGear } from "@fortawesome/free-solid-svg-icons";
import About from "../../(aboutAndPrivacy)/about";
import Privacy from "../../(aboutAndPrivacy)/privacy";
import { lockWakeState, releaseWakeState } from "../../lib/wakeLock";
import { useRouter } from "next/navigation";
import createTranscript from "../createTranscript";
import { GetTranscript } from "../../api";
import { Router } from "next/router";
import useCreateTranscript from "../createTranscript";
const TranscriptCreate = () => {
const [stream, setStream] = useState<MediaStream | null>(null);
const [disconnected, setDisconnected] = useState<boolean>(false);
const useActiveTopic = useState<Topic | null>(null);
useEffect(() => {
if (process.env.NEXT_PUBLIC_ENV === "development") {
document.onkeyup = (e) => {
if (e.key === "d") {
setDisconnected((prev) => !prev);
}
};
}
}, []);
const api = getApi();
const transcript = useTranscript(stream, api);
const webRTC = useWebRTC(stream, transcript?.response?.id, api);
const webSockets = useWebSockets(transcript?.response?.id);
// const transcript = useTranscript(stream, api);
const router = useRouter();
const api = getApi();
const [name, setName] = useState<string>();
const nameChange = (event: React.ChangeEvent<HTMLInputElement>) => {
setName(event.target.value);
};
const [targetLanguage, setTargetLanguage] = useState<string>();
const createTranscript = useCreateTranscript();
const send = () => {
if (createTranscript.loading || permissionDenied) return;
createTranscript.create({ name, targetLanguage });
};
useEffect(() => {
createTranscript.response &&
router.push(`/transcripts/${createTranscript.response.id}/record`);
}, [createTranscript.response]);
const {
loading,
permissionOk,
@@ -47,151 +47,62 @@ const TranscriptCreate = () => {
getAudioStream,
} = useAudioDevice();
const [hasRecorded, setHasRecorded] = useState(false);
const [transcriptStarted, setTranscriptStarted] = useState(false);
useEffect(() => {
if (!transcriptStarted && webSockets.transcriptText.length !== 0)
setTranscriptStarted(true);
}, [webSockets.transcriptText]);
useEffect(() => {
if (transcript?.response?.id) {
const newUrl = `/transcripts/${transcript.response.id}`;
// Shallow redirection does not work on NextJS 13
// https://github.com/vercel/next.js/discussions/48110
// https://github.com/vercel/next.js/discussions/49540
// router.push(newUrl, undefined, { shallow: true });
history.replaceState({}, "", newUrl);
}
});
useEffect(() => {
lockWakeState();
return () => {
releaseWakeState();
};
}, []);
return (
<>
{permissionOk ? (
<>
<Recorder
setStream={setStream}
onStop={() => {
webRTC?.send(JSON.stringify({ cmd: "STOP" }));
setStream(null);
setHasRecorded(true);
}}
topics={webSockets.topics}
getAudioStream={getAudioStream}
useActiveTopic={useActiveTopic}
isPastMeeting={false}
audioDevices={audioDevices}
/>
<div className="grid grid-cols-1 lg:grid-cols-2 grid-rows-mobile-inner lg:grid-rows-1 gap-2 lg:gap-4 h-full">
<TopicList
topics={webSockets.topics}
useActiveTopic={useActiveTopic}
autoscroll={true}
/>
<section
className={`w-full h-full bg-blue-400/20 rounded-lg md:rounded-xl p-2 md:px-4`}
>
{!hasRecorded ? (
<>
{transcriptStarted && (
<h2 className="md:text-lg font-bold">Transcription</h2>
)}
<div className="flex flex-col justify-center align center text-center h-full">
<div className="py-2 h-auto">
{!transcriptStarted ? (
<div className="text-center text-gray-500">
The conversation transcript will appear here shortly
after you start recording.
</div>
) : (
<LiveTrancription text={webSockets.transcriptText} />
)}
</div>
</div>
</>
<div></div>
<div className="max-h-full overflow-auto">
<section className="flex flex-col w-full h-full items-center justify-evenly p-4 md:px-6 md:py-8">
<div>
<div className="flex flex-col max-w-xl items-center justify-center">
<h1 className="text-2xl font-bold mb-2">
Welcome to reflector.media
</h1>
<p>
Reflector is a transcription and summarization pipeline that
transforms audio into knowledge. The output is meeting minutes
and topic summaries enabling topic-specific analyses stored in
your systems of record. This is accomplished on your
infrastructure without 3rd parties keeping your data
private, secure, and organized.
</p>
<About buttonText="Learn more" />
<input type="text" onChange={nameChange} />
<button onClick={() => setTargetLanguage("fr")}>Language</button>
<h2 className="text-2xl font-bold mt-4 mb-2">
Audio Permissions
</h2>
{loading ? (
<p className="text-center">Checking permission...</p>
) : permissionOk ? (
<> Microphone permission granted </>
) : (
<div className="flex flex-col justify-center align center text-center h-full text-gray-500">
<div className="p-2 md:p-4">
<FontAwesomeIcon
icon={faGear}
className="animate-spin-slow h-14 w-14 md:h-20 md:w-20"
/>
</div>
<p>
We are generating the final summary for you. This may take a
couple of minutes. Please do not navigate away from the page
during this time.
<>
<p className="text-center">
In order to use Reflector, we kindly request permission to
access your microphone during meetings and events.
<br />
<Privacy buttonText="Privacy policy" />
<br />
{permissionDenied
? "Permission to use your microphone was denied, please change the permission setting in your browser and refresh this page."
: "Please grant permission to continue."}
</p>
</div>
<button
className="mt-4 bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500 text-white font-bold py-2 px-4 rounded m-auto"
onClick={requestPermission}
disabled={permissionDenied}
>
{permissionDenied ? "Access denied" : "Grant Permission"}
</button>
</>
)}
</section>
</div>
<button onClick={send} disabled={!permissionOk}>
{createTranscript.loading ? "loading" : "Send"}
</button>
</div>
{disconnected && <DisconnectedIndicator />}
</>
) : (
<>
<div></div>
<div className="max-h-full overflow-auto">
<section className="flex flex-col w-full h-full items-center justify-evenly p-4 md:px-6 md:py-8">
<div>
<div className="flex flex-col max-w-xl items-center justify-center">
<h1 className="text-2xl font-bold mb-2">
Welcome to reflector.media
</h1>
<p>
Reflector is a transcription and summarization pipeline that
transforms audio into knowledge. The output is meeting
minutes and topic summaries enabling topic-specific analyses
stored in your systems of record. This is accomplished on
your infrastructure without 3rd parties keeping your
data private, secure, and organized.
</p>
<About buttonText="Learn more" />
<h2 className="text-2xl font-bold mt-4 mb-2">
Audio Permissions
</h2>
{loading ? (
<p className="text-center">Checking permission...</p>
) : (
<>
<p className="text-center">
In order to use Reflector, we kindly request permission
to access your microphone during meetings and events.
<br />
<Privacy buttonText="Privacy policy" />
<br />
{permissionDenied
? "Permission to use your microphone was denied, please change the permission setting in your browser and refresh this page."
: "Please grant permission to continue."}
</p>
<button
className="mt-4 bg-blue-400 hover:bg-blue-500 focus-visible:bg-blue-500 text-white font-bold py-2 px-4 rounded m-auto"
onClick={requestPermission}
disabled={permissionDenied}
>
{permissionDenied
? "Access denied"
: "Grant Permission"}
</button>
</>
)}
</div>
</div>
</section>
</div>
</>
)}
</section>
</div>
</>
);
};

View File

@@ -1,11 +1,11 @@
import { useEffect, useState } from "react";
import {
DefaultApi,
V1TranscriptGetRequest,
V1TranscriptsCreateRequest,
} from "../api/apis/DefaultApi";
import { GetTranscript } from "../api";
import { useError } from "../(errors)/errorContext";
import getApi from "../lib/getApi";
type Transcript = {
response: GetTranscript | null;
@@ -13,23 +13,12 @@ type Transcript = {
error: Error | null;
};
const useTranscript = (
stream: MediaStream | null,
api: DefaultApi,
id: string | null = null,
): Transcript => {
const useTranscript = (id: string | null): Transcript => {
const [response, setResponse] = useState<GetTranscript | null>(null);
const [loading, setLoading] = useState<boolean>(false);
const [error, setErrorState] = useState<Error | null>(null);
const { setError } = useError();
const getOrCreateTranscript = (id: string | null) => {
if (id) {
getTranscript(id);
} else if (stream) {
createTranscript();
}
};
const api = getApi();
const getTranscript = (id: string | null) => {
if (!id) throw new Error("Transcript ID is required to get transcript");
@@ -43,34 +32,7 @@ const useTranscript = (
.then((result) => {
setResponse(result);
setLoading(false);
console.debug("New transcript created:", result);
})
.catch((err) => {
setError(err);
setErrorState(err);
});
};
const createTranscript = () => {
setLoading(true);
const requestParameters: V1TranscriptsCreateRequest = {
createTranscript: {
name: "Weekly All-Hands", // Hardcoded for now
targetLanguage: "en", // Hardcoded for now
},
};
console.debug(
"POST - /v1/transcripts/ - Requesting new transcription creation",
requestParameters,
);
api
.v1TranscriptsCreate(requestParameters)
.then((result) => {
setResponse(result);
setLoading(false);
console.debug("New transcript created:", result);
console.debug("Transcript Loaded:", result);
})
.catch((err) => {
setError(err);
@@ -79,8 +41,8 @@ const useTranscript = (
};
useEffect(() => {
getOrCreateTranscript(id);
}, [id, stream]);
getTranscript(id);
}, [id]);
return { response, loading, error };
};