Merge pull request #43 from Monadical-SAS/www/jose-waveform

Improve recording waveform speed + query permissions (by Jose)
This commit is contained in:
Jose
2023-07-28 11:22:49 -05:00
committed by GitHub
4 changed files with 127 additions and 47 deletions

View File

@@ -17,6 +17,83 @@ class CustomRecordPlugin extends RecordPlugin {
static create(options) { static create(options) {
return new CustomRecordPlugin(options || {}); return new CustomRecordPlugin(options || {});
} }
render(stream) {
if (!this.wavesurfer) return () => undefined
const container = this.wavesurfer.getWrapper()
const canvas = document.createElement('canvas')
canvas.width = container.clientWidth
canvas.height = container.clientHeight
canvas.style.zIndex = '10'
container.appendChild(canvas)
const canvasCtx = canvas.getContext('2d')
const audioContext = new AudioContext()
const source = audioContext.createMediaStreamSource(stream)
const analyser = audioContext.createAnalyser()
analyser.fftSize = 2 ** 5
source.connect(analyser)
const bufferLength = analyser.frequencyBinCount
const dataArray = new Uint8Array(bufferLength)
let animationId, previousTimeStamp;
const BUFFER_SIZE = 2 ** 8
const dataBuffer = new Array(BUFFER_SIZE).fill(canvas.height)
const drawWaveform = (timeStamp) => {
if (!canvasCtx) return
analyser.getByteTimeDomainData(dataArray)
canvasCtx.clearRect(0, 0, canvas.width, canvas.height)
canvasCtx.fillStyle = '#cc3347'
if (previousTimeStamp === undefined) {
previousTimeStamp = timeStamp
dataBuffer.push(Math.min(...dataArray))
dataBuffer.splice(0, 1)
}
const elapsed = timeStamp - previousTimeStamp;
if (elapsed > 10) {
previousTimeStamp = timeStamp
dataBuffer.push(Math.min(...dataArray))
dataBuffer.splice(0, 1)
}
// Drawing
const sliceWidth = canvas.width / dataBuffer.length
let x = 0
for (let i = 0; i < dataBuffer.length; i++) {
const valueNormalized = dataBuffer[i] / canvas.height
const y = valueNormalized * canvas.height / 2
const sliceHeight = canvas.height + 1 - y * 2
canvasCtx.fillRect(x, y, sliceWidth * 2 / 3, sliceHeight)
x += sliceWidth
}
animationId = requestAnimationFrame(drawWaveform)
}
drawWaveform()
return () => {
if (animationId) {
cancelAnimationFrame(animationId)
}
if (source) {
source.disconnect()
source.mediaStream.getTracks().forEach((track) => track.stop())
}
if (audioContext) {
audioContext.close()
}
canvas?.remove()
}
}
startRecording(stream) { startRecording(stream) {
this.preventInteraction(); this.preventInteraction();
this.cleanUp(); this.cleanUp();

View File

@@ -7,6 +7,42 @@ import "react-dropdown/style.css";
import CustomRecordPlugin from "./CustomRecordPlugin"; import CustomRecordPlugin from "./CustomRecordPlugin";
const AudioInputsDropdown = (props) => {
const [ddOptions, setDdOptions] = useState([]);
useEffect(() => {
const init = async () => {
// Request permission to use audio inputs
await navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => stream.getTracks().forEach((t) => t.stop()))
const devices = await navigator.mediaDevices.enumerateDevices()
const audioDevices = devices
.filter((d) => d.kind === "audioinput" && d.deviceId != "")
.map((d) => ({ value: d.deviceId, label: d.label }))
if (audioDevices.length < 1) return console.log("no audio input devices")
setDdOptions(audioDevices)
props.setDeviceId(audioDevices[0].value)
}
init()
}, [])
const handleDropdownChange = (e) => {
props.setDeviceId(e.value);
};
return (
<Dropdown
options={ddOptions}
onChange={handleDropdownChange}
value={ddOptions[0]}
disabled={props.disabled}
/>
)
}
export default function Recorder(props) { export default function Recorder(props) {
const waveformRef = useRef(); const waveformRef = useRef();
const [wavesurfer, setWavesurfer] = useState(null); const [wavesurfer, setWavesurfer] = useState(null);
@@ -14,27 +50,15 @@ export default function Recorder(props) {
const [isRecording, setIsRecording] = useState(false); const [isRecording, setIsRecording] = useState(false);
const [isPlaying, setIsPlaying] = useState(false); const [isPlaying, setIsPlaying] = useState(false);
const [deviceId, setDeviceId] = useState(null); const [deviceId, setDeviceId] = useState(null);
const [ddOptions, setDdOptions] = useState([]);
useEffect(() => { useEffect(() => {
document.getElementById("play-btn").disabled = true; document.getElementById("play-btn").disabled = true;
navigator.mediaDevices.enumerateDevices().then((devices) => {
const audioDevices = devices
.filter((d) => d.kind === "audioinput")
.map((d) => ({ value: d.deviceId, label: d.label }));
if (audioDevices.length < 1) return console.log("no audio input devices");
setDdOptions(audioDevices);
setDeviceId(audioDevices[0].value);
});
if (waveformRef.current) { if (waveformRef.current) {
const _wavesurfer = WaveSurfer.create({ const _wavesurfer = WaveSurfer.create({
container: waveformRef.current, container: waveformRef.current,
waveColor: "#cc3347", waveColor: "#777",
progressColor: "#0178FFπ", progressColor: "#222",
cursorColor: "OrangeRed", cursorColor: "OrangeRed",
hideScrollbar: true, hideScrollbar: true,
autoCenter: true, autoCenter: true,
@@ -65,9 +89,8 @@ export default function Recorder(props) {
const handleRecClick = async () => { const handleRecClick = async () => {
if (!record) return console.log("no record"); if (!record) return console.log("no record");
if (record?.isRecording()) { if (record.isRecording()) {
props.onStop();
props.serverData.peer.send(JSON.stringify({ cmd: "STOP" }));
record.stopRecording(); record.stopRecording();
setIsRecording(false); setIsRecording(false);
document.getElementById("play-btn").disabled = false; document.getElementById("play-btn").disabled = false;
@@ -85,22 +108,15 @@ export default function Recorder(props) {
wavesurfer?.playPause(); wavesurfer?.playPause();
}; };
const handleDropdownChange = (e) => {
setDeviceId(e.value);
};
return ( return (
<div className="flex flex-col items-center justify-center max-w-[75vw] w-full"> <div className="flex flex-col items-center justify-center max-w-[75vw] w-full">
<div className="flex my-2 mx-auto"> <div className="flex my-2 mx-auto">
<Dropdown <AudioInputsDropdown setDeviceId={setDeviceId} disabled={isRecording} />
options={ddOptions}
onChange={handleDropdownChange}
value={ddOptions[0]}
/>
&nbsp; &nbsp;
<button <button
onClick={handleRecClick} onClick={handleRecClick}
data-color={isRecording ? "red" : "blue"} data-color={isRecording ? "red" : "blue"}
disabled={!deviceId}
> >
{isRecording ? "Stop" : "Record"} {isRecording ? "Stop" : "Record"}
</button> </button>

View File

@@ -3,7 +3,7 @@ import Peer from "simple-peer";
const WebRTC_SERVER_URL = "http://127.0.0.1:1250/offer"; const WebRTC_SERVER_URL = "http://127.0.0.1:1250/offer";
const useWebRTC = (stream, setIsRecording) => { const useWebRTC = (stream) => {
const [data, setData] = useState({ const [data, setData] = useState({
peer: null, peer: null,
}); });
@@ -30,7 +30,7 @@ const useWebRTC = (stream, setIsRecording) => {
.then((response) => response.json()) .then((response) => response.json())
.then((answer) => peer.signal(answer)) .then((answer) => peer.signal(answer))
.catch((e) => { .catch((e) => {
alert(e); console.log("Error signaling:", e);
}); });
} }
}); });
@@ -66,7 +66,6 @@ const useWebRTC = (stream, setIsRecording) => {
}, },
text: '' text: ''
})); }));
setIsRecording(false);
break; break;
default: default:
console.error(`Unknown command ${serverData.cmd}`); console.error(`Unknown command ${serverData.cmd}`);
@@ -76,7 +75,7 @@ const useWebRTC = (stream, setIsRecording) => {
return () => { return () => {
peer.destroy(); peer.destroy();
}; };
}, [stream, setIsRecording]); }, [stream]);
return data; return data;
}; };

View File

@@ -6,23 +6,13 @@ import useWebRTC from "./components/webrtc.js";
import "../public/button.css"; import "../public/button.css";
const App = () => { const App = () => {
const [isRecording, setIsRecording] = useState(false);
const [stream, setStream] = useState(null); const [stream, setStream] = useState(null);
const handleRecord = (recording) => { // This is where you'd send the stream and receive the data from the server.
setIsRecording(recording); // transcription, summary, etc
const serverData = useWebRTC(stream);
if (recording) { const sendStopCmd = () => serverData?.peer?.send(JSON.stringify({ cmd: "STOP" }))
navigator.mediaDevices
.getUserMedia({ audio: true })
.then(setStream)
.catch((err) => console.error(err));
} else if (!recording && serverData.peer) {
serverData.peer.send(JSON.stringify({ cmd: "STOP" }));
}
};
const serverData = useWebRTC(stream, setIsRecording);
return ( return (
<div className="flex flex-col items-center h-[100svh]"> <div className="flex flex-col items-center h-[100svh]">
@@ -31,10 +21,8 @@ const App = () => {
<p className="text-gray-500">Capture The Signal, Not The Noise</p> <p className="text-gray-500">Capture The Signal, Not The Noise</p>
</div> </div>
<Recorder setStream={setStream} serverData={serverData} /> <Recorder setStream={setStream} onStop={sendStopCmd} />
<Dashboard <Dashboard
isRecording={isRecording}
onRecord={(recording) => handleRecord(recording)}
transcriptionText={serverData.text ?? "(No transcription yet)"} transcriptionText={serverData.text ?? "(No transcription yet)"}
finalSummary={serverData.finalSummary} finalSummary={serverData.finalSummary}
topics={serverData.topics ?? []} topics={serverData.topics ?? []}