From 2576a6e4e20cd0865f1b86c746b8d8fb81c730a1 Mon Sep 17 00:00:00 2001 From: Sara Date: Wed, 20 Sep 2023 17:08:47 +0200 Subject: [PATCH] microphone switch and design improvements --- www/app/(errors)/errorMessage.tsx | 2 +- www/app/transcripts/audioInputsDropdown.tsx | 42 ++++---- www/app/transcripts/new/page.tsx | 109 ++++++++++---------- www/app/transcripts/recorder.tsx | 78 ++++++++++---- 4 files changed, 134 insertions(+), 97 deletions(-) diff --git a/www/app/(errors)/errorMessage.tsx b/www/app/(errors)/errorMessage.tsx index d5109733..f048cac5 100644 --- a/www/app/(errors)/errorMessage.tsx +++ b/www/app/(errors)/errorMessage.tsx @@ -23,7 +23,7 @@ const ErrorMessage: React.FC = () => { setIsVisible(false); setError(null); }} - className="max-w-xs z-50 fixed top-16 right-10 bg-red-100 border border-red-400 text-red-700 px-4 py-3 rounded transition-opacity duration-300 ease-out opacity-100 hover:opacity-75 cursor-pointer transform hover:scale-105" + className="max-w-xs z-50 fixed bottom-5 right-5 md:bottom-10 md:right-10 bg-red-100 border border-red-400 text-red-700 px-4 py-3 rounded transition-opacity duration-300 ease-out opacity-100 hover:opacity-80 cursor-pointer transform hover:scale-105" role="alert" > {error?.message} diff --git a/www/app/transcripts/audioInputsDropdown.tsx b/www/app/transcripts/audioInputsDropdown.tsx index 4b7b3cdc..7772358b 100644 --- a/www/app/transcripts/audioInputsDropdown.tsx +++ b/www/app/transcripts/audioInputsDropdown.tsx @@ -1,5 +1,3 @@ -import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; -import { faMicrophone } from "@fortawesome/free-solid-svg-icons"; import React, { useEffect, useState } from "react"; import Dropdown, { Option } from "react-dropdown"; import "react-dropdown/style.css"; @@ -7,36 +5,34 @@ import "react-dropdown/style.css"; const AudioInputsDropdown: React.FC<{ audioDevices: Option[]; disabled: boolean; + hide: () => void; setDeviceId: React.Dispatch>; -}> = ({ audioDevices, disabled, setDeviceId }) => { +}> = (props) => { const [ddOptions, setDdOptions] = useState([]); useEffect(() => { - if (audioDevices) { - setDdOptions(audioDevices); - setDeviceId(audioDevices.length > 0 ? audioDevices[0].value : null); + if (props.audioDevices) { + setDdOptions(props.audioDevices); + props.setDeviceId( + props.audioDevices.length > 0 ? props.audioDevices[0].value : null, + ); } - }, [audioDevices]); + }, [props.audioDevices]); const handleDropdownChange = (option: Option) => { - setDeviceId(option.value); + props.setDeviceId(option.value); + props.hide(); }; - if (audioDevices?.length > 0) { - return ( -
- - -
- ); - } - return null; + return ( + + ); }; export default AudioInputsDropdown; diff --git a/www/app/transcripts/new/page.tsx b/www/app/transcripts/new/page.tsx index eddf4fcc..4bbba050 100644 --- a/www/app/transcripts/new/page.tsx +++ b/www/app/transcripts/new/page.tsx @@ -17,8 +17,6 @@ const TranscriptCreate = () => { const [stream, setStream] = useState(null); const [disconnected, setDisconnected] = useState(false); const useActiveTopic = useState(null); - const [deviceId, setDeviceId] = useState(null); - const [recordStarted, setRecordStarted] = useState(false); useEffect(() => { if (process.env.NEXT_PUBLIC_ENV === "development") { @@ -43,17 +41,6 @@ const TranscriptCreate = () => { getAudioStream, } = useAudioDevice(); - const getCurrentStream = async () => { - setRecordStarted(true); - return deviceId ? await getAudioStream(deviceId) : null; - }; - - useEffect(() => { - if (audioDevices.length > 0) { - setDeviceId[audioDevices[0].value]; - } - }, [audioDevices]); - return ( <> {permissionOk ? ( @@ -65,60 +52,74 @@ const TranscriptCreate = () => { setStream(null); }} topics={webSockets.topics} - getAudioStream={getCurrentStream} + getAudioStream={getAudioStream} useActiveTopic={useActiveTopic} isPastMeeting={false} + audioDevices={audioDevices} /> +
-
-
- -
-
-
- -
-
-
- - {disconnected && } +
+
+ +
+
+ + {disconnected && } ) : ( <> -
-

Audio Permissions

- {loading ? ( -

- Checking permission... +

+
+
+

Reflector

+

+ Meet Monadical's own Reflector, your audio ally for hassle-free + insights.

- ) : ( - <> -

- Reflector needs access to your microphone to work. -
- {permissionDenied - ? "Please reset microphone permissions to continue." - : "Please grant permission to continue."} -

- - - )} -
+

+ With real-time transcriptions, translations, and summaries, + Reflector captures and categorizes the details of your meetings + and events, all while keeping your data locked down tight on + your own infrastructure. Forget the scribbled notes, endless + recordings, or third-party apps. Discover Reflector, a powerful + new way to elevate knowledge management and accessibility for + all. +

+
+
+
+

Audio Permissions

+ {loading ? ( +

+ Checking permission... +

+ ) : ( + <> +

+ Reflector needs access to your microphone to work. +
+ {permissionDenied + ? "Please reset microphone permissions to continue." + : "Please grant permission to continue."} +

+ + + )} +
+
+ )} diff --git a/www/app/transcripts/recorder.tsx b/www/app/transcripts/recorder.tsx index 87e9955a..91cdcfac 100644 --- a/www/app/transcripts/recorder.tsx +++ b/www/app/transcripts/recorder.tsx @@ -5,17 +5,21 @@ import RecordPlugin from "../lib/custom-plugins/record"; import CustomRegionsPlugin from "../lib/custom-plugins/regions"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; +import { faMicrophone } from "@fortawesome/free-solid-svg-icons"; import { faDownload } from "@fortawesome/free-solid-svg-icons"; import { formatTime } from "../lib/time"; import { Topic } from "./webSocketTypes"; import { AudioWaveform } from "../api"; +import AudioInputsDropdown from "./audioInputsDropdown"; +import { Option } from "react-dropdown"; type RecorderProps = { setStream?: React.Dispatch>; onStop?: () => void; topics: Topic[]; - getAudioStream?: () => Promise; + getAudioStream?: (deviceId) => Promise; + audioDevices?: Option[]; useActiveTopic: [ Topic | null, React.Dispatch>, @@ -38,10 +42,11 @@ export default function Recorder(props: RecorderProps) { const [waveRegions, setWaveRegions] = useState( null, ); - + const [deviceId, setDeviceId] = useState(null); + const [recordStarted, setRecordStarted] = useState(false); const [activeTopic, setActiveTopic] = props.useActiveTopic; - const topicsRef = useRef(props.topics); + const [showDevices, setShowDevices] = useState(false); useEffect(() => { if (waveformRef.current) { @@ -186,8 +191,8 @@ export default function Recorder(props: RecorderProps) { record.stopRecording(); setIsRecording(false); setHasRecorded(true); - } else if (props.getAudioStream) { - const stream = await props.getAudioStream(); + } else { + const stream = await getCurrentStream(); if (props.setStream) props.setStream(stream); waveRegions?.clearRegions(); @@ -195,8 +200,6 @@ export default function Recorder(props: RecorderProps) { await record.startRecording(stream); setIsRecording(true); } - } else { - throw new Error("No getAudioStream function provided"); } }; @@ -210,8 +213,21 @@ export default function Recorder(props: RecorderProps) { return ""; }; + const getCurrentStream = async () => { + setRecordStarted(true); + return deviceId && props.getAudioStream + ? await props.getAudioStream(deviceId) + : null; + }; + + useEffect(() => { + if (props.audioDevices && props.audioDevices.length > 0) { + setDeviceId[props.audioDevices[0].value]; + } + }, [props.audioDevices]); + return ( -
+
@@ -259,17 +275,41 @@ export default function Recorder(props: RecorderProps) { )} {!hasRecorded && ( - + <> + + {props.audioDevices && props.audioDevices?.length > 0 && ( + <> + +
+ setShowDevices(false)} + /> +
+ + )} + )}
);