Merge pull request #43 from Monadical-SAS/www/jose-waveform

Improve recording waveform speed + query permissions (by Jose)
This commit is contained in:
Jose
2023-07-28 11:22:49 -05:00
committed by GitHub
4 changed files with 127 additions and 47 deletions

View File

@@ -17,6 +17,83 @@ class CustomRecordPlugin extends RecordPlugin {
static create(options) {
return new CustomRecordPlugin(options || {});
}
render(stream) {
if (!this.wavesurfer) return () => undefined
const container = this.wavesurfer.getWrapper()
const canvas = document.createElement('canvas')
canvas.width = container.clientWidth
canvas.height = container.clientHeight
canvas.style.zIndex = '10'
container.appendChild(canvas)
const canvasCtx = canvas.getContext('2d')
const audioContext = new AudioContext()
const source = audioContext.createMediaStreamSource(stream)
const analyser = audioContext.createAnalyser()
analyser.fftSize = 2 ** 5
source.connect(analyser)
const bufferLength = analyser.frequencyBinCount
const dataArray = new Uint8Array(bufferLength)
let animationId, previousTimeStamp;
const BUFFER_SIZE = 2 ** 8
const dataBuffer = new Array(BUFFER_SIZE).fill(canvas.height)
const drawWaveform = (timeStamp) => {
if (!canvasCtx) return
analyser.getByteTimeDomainData(dataArray)
canvasCtx.clearRect(0, 0, canvas.width, canvas.height)
canvasCtx.fillStyle = '#cc3347'
if (previousTimeStamp === undefined) {
previousTimeStamp = timeStamp
dataBuffer.push(Math.min(...dataArray))
dataBuffer.splice(0, 1)
}
const elapsed = timeStamp - previousTimeStamp;
if (elapsed > 10) {
previousTimeStamp = timeStamp
dataBuffer.push(Math.min(...dataArray))
dataBuffer.splice(0, 1)
}
// Drawing
const sliceWidth = canvas.width / dataBuffer.length
let x = 0
for (let i = 0; i < dataBuffer.length; i++) {
const valueNormalized = dataBuffer[i] / canvas.height
const y = valueNormalized * canvas.height / 2
const sliceHeight = canvas.height + 1 - y * 2
canvasCtx.fillRect(x, y, sliceWidth * 2 / 3, sliceHeight)
x += sliceWidth
}
animationId = requestAnimationFrame(drawWaveform)
}
drawWaveform()
return () => {
if (animationId) {
cancelAnimationFrame(animationId)
}
if (source) {
source.disconnect()
source.mediaStream.getTracks().forEach((track) => track.stop())
}
if (audioContext) {
audioContext.close()
}
canvas?.remove()
}
}
startRecording(stream) {
this.preventInteraction();
this.cleanUp();

View File

@@ -7,6 +7,42 @@ import "react-dropdown/style.css";
import CustomRecordPlugin from "./CustomRecordPlugin";
const AudioInputsDropdown = (props) => {
const [ddOptions, setDdOptions] = useState([]);
useEffect(() => {
const init = async () => {
// Request permission to use audio inputs
await navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => stream.getTracks().forEach((t) => t.stop()))
const devices = await navigator.mediaDevices.enumerateDevices()
const audioDevices = devices
.filter((d) => d.kind === "audioinput" && d.deviceId != "")
.map((d) => ({ value: d.deviceId, label: d.label }))
if (audioDevices.length < 1) return console.log("no audio input devices")
setDdOptions(audioDevices)
props.setDeviceId(audioDevices[0].value)
}
init()
}, [])
const handleDropdownChange = (e) => {
props.setDeviceId(e.value);
};
return (
<Dropdown
options={ddOptions}
onChange={handleDropdownChange}
value={ddOptions[0]}
disabled={props.disabled}
/>
)
}
export default function Recorder(props) {
const waveformRef = useRef();
const [wavesurfer, setWavesurfer] = useState(null);
@@ -14,27 +50,15 @@ export default function Recorder(props) {
const [isRecording, setIsRecording] = useState(false);
const [isPlaying, setIsPlaying] = useState(false);
const [deviceId, setDeviceId] = useState(null);
const [ddOptions, setDdOptions] = useState([]);
useEffect(() => {
document.getElementById("play-btn").disabled = true;
navigator.mediaDevices.enumerateDevices().then((devices) => {
const audioDevices = devices
.filter((d) => d.kind === "audioinput")
.map((d) => ({ value: d.deviceId, label: d.label }));
if (audioDevices.length < 1) return console.log("no audio input devices");
setDdOptions(audioDevices);
setDeviceId(audioDevices[0].value);
});
if (waveformRef.current) {
const _wavesurfer = WaveSurfer.create({
container: waveformRef.current,
waveColor: "#cc3347",
progressColor: "#0178FFπ",
waveColor: "#777",
progressColor: "#222",
cursorColor: "OrangeRed",
hideScrollbar: true,
autoCenter: true,
@@ -65,9 +89,8 @@ export default function Recorder(props) {
const handleRecClick = async () => {
if (!record) return console.log("no record");
if (record?.isRecording()) {
props.serverData.peer.send(JSON.stringify({ cmd: "STOP" }));
if (record.isRecording()) {
props.onStop();
record.stopRecording();
setIsRecording(false);
document.getElementById("play-btn").disabled = false;
@@ -85,22 +108,15 @@ export default function Recorder(props) {
wavesurfer?.playPause();
};
const handleDropdownChange = (e) => {
setDeviceId(e.value);
};
return (
<div className="flex flex-col items-center justify-center max-w-[75vw] w-full">
<div className="flex my-2 mx-auto">
<Dropdown
options={ddOptions}
onChange={handleDropdownChange}
value={ddOptions[0]}
/>
<AudioInputsDropdown setDeviceId={setDeviceId} disabled={isRecording} />
&nbsp;
<button
onClick={handleRecClick}
data-color={isRecording ? "red" : "blue"}
disabled={!deviceId}
>
{isRecording ? "Stop" : "Record"}
</button>

View File

@@ -3,7 +3,7 @@ import Peer from "simple-peer";
const WebRTC_SERVER_URL = "http://127.0.0.1:1250/offer";
const useWebRTC = (stream, setIsRecording) => {
const useWebRTC = (stream) => {
const [data, setData] = useState({
peer: null,
});
@@ -30,7 +30,7 @@ const useWebRTC = (stream, setIsRecording) => {
.then((response) => response.json())
.then((answer) => peer.signal(answer))
.catch((e) => {
alert(e);
console.log("Error signaling:", e);
});
}
});
@@ -66,7 +66,6 @@ const useWebRTC = (stream, setIsRecording) => {
},
text: ''
}));
setIsRecording(false);
break;
default:
console.error(`Unknown command ${serverData.cmd}`);
@@ -76,7 +75,7 @@ const useWebRTC = (stream, setIsRecording) => {
return () => {
peer.destroy();
};
}, [stream, setIsRecording]);
}, [stream]);
return data;
};

View File

@@ -6,23 +6,13 @@ import useWebRTC from "./components/webrtc.js";
import "../public/button.css";
const App = () => {
const [isRecording, setIsRecording] = useState(false);
const [stream, setStream] = useState(null);
const handleRecord = (recording) => {
setIsRecording(recording);
// This is where you'd send the stream and receive the data from the server.
// transcription, summary, etc
const serverData = useWebRTC(stream);
if (recording) {
navigator.mediaDevices
.getUserMedia({ audio: true })
.then(setStream)
.catch((err) => console.error(err));
} else if (!recording && serverData.peer) {
serverData.peer.send(JSON.stringify({ cmd: "STOP" }));
}
};
const serverData = useWebRTC(stream, setIsRecording);
const sendStopCmd = () => serverData?.peer?.send(JSON.stringify({ cmd: "STOP" }))
return (
<div className="flex flex-col items-center h-[100svh]">
@@ -31,10 +21,8 @@ const App = () => {
<p className="text-gray-500">Capture The Signal, Not The Noise</p>
</div>
<Recorder setStream={setStream} serverData={serverData} />
<Recorder setStream={setStream} onStop={sendStopCmd} />
<Dashboard
isRecording={isRecording}
onRecord={(recording) => handleRecord(recording)}
transcriptionText={serverData.text ?? "(No transcription yet)"}
finalSummary={serverData.finalSummary}
topics={serverData.topics ?? []}