"use client"; import React, { useEffect, useState } from "react"; import Recorder from "../recorder"; import { TopicList } from "../topicList"; import useWebRTC from "../useWebRTC"; import useTranscript from "../useTranscript"; import { useWebSockets } from "../useWebSockets"; import useAudioDevice from "../useAudioDevice"; import "../../styles/button.css"; import { Topic } from "../webSocketTypes"; import getApi from "../../lib/getApi"; import LiveTrancription from "../liveTranscription"; import DisconnectedIndicator from "../disconnectedIndicator"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { faGear } from "@fortawesome/free-solid-svg-icons"; import About from "../../(aboutAndPrivacy)/about"; import Privacy from "../../(aboutAndPrivacy)/privacy"; const TranscriptCreate = () => { const [stream, setStream] = useState(null); const [disconnected, setDisconnected] = useState(false); const useActiveTopic = useState(null); useEffect(() => { if (process.env.NEXT_PUBLIC_ENV === "development") { document.onkeyup = (e) => { if (e.key === "d") { setDisconnected((prev) => !prev); } }; } }, []); const api = getApi(); const transcript = useTranscript(stream, api); const webRTC = useWebRTC(stream, transcript?.response?.id, api); const webSockets = useWebSockets(transcript?.response?.id); const { loading, permissionOk, permissionDenied, audioDevices, requestPermission, getAudioStream, } = useAudioDevice(); const [hasRecorded, setHasRecorded] = useState(false); const [transcriptStarted, setTranscriptStarted] = useState(false); useEffect(() => { if (!transcriptStarted && webSockets.transcriptText.length !== 0) setTranscriptStarted(true); }, [webSockets.transcriptText]); return ( <> {permissionOk ? ( <> { webRTC?.peer?.send(JSON.stringify({ cmd: "STOP" })); setStream(null); setHasRecorded(true); }} topics={webSockets.topics} getAudioStream={getAudioStream} useActiveTopic={useActiveTopic} isPastMeeting={false} audioDevices={audioDevices} />
{!hasRecorded ? ( <> {transcriptStarted && (

Transcription

)}
{!transcriptStarted ? (
The conversation transcript will appear here after you start recording.
) : ( )}
) : (

We are generating the final summary for you. This may take a couple of minutes. Please do not navigate away from the page during this time.

)}
{disconnected && } ) : ( <>

Welcome to reflector.media

Reflector is a transcription and summarization pipeline that transforms audio into knowledge. The output is meeting minutes and topic summaries enabling topic-specific analyses stored in your systems of record. This is accomplished on your infrastructure - without 3rd parties - keeping your data private, secure, and organized.

Audio Permissions

{loading ? (

Checking permission...

) : ( <>

To enable Reflector, we kindly request permission to access your microphone during meetings and events.

{permissionDenied ? "Permission to use your microphone was denied, please change the permission setting in your browser and refresh this page." : "Please grant permission to continue."}

)}
)} ); }; export default TranscriptCreate;