diff --git a/www/app/components/CustomRecordPlugin.js b/www/app/components/CustomRecordPlugin.js index bbe21195..b6f4fd88 100644 --- a/www/app/components/CustomRecordPlugin.js +++ b/www/app/components/CustomRecordPlugin.js @@ -17,6 +17,83 @@ class CustomRecordPlugin extends RecordPlugin { static create(options) { return new CustomRecordPlugin(options || {}); } + render(stream) { + if (!this.wavesurfer) return () => undefined + + const container = this.wavesurfer.getWrapper() + const canvas = document.createElement('canvas') + canvas.width = container.clientWidth + canvas.height = container.clientHeight + canvas.style.zIndex = '10' + container.appendChild(canvas) + + const canvasCtx = canvas.getContext('2d') + const audioContext = new AudioContext() + const source = audioContext.createMediaStreamSource(stream) + const analyser = audioContext.createAnalyser() + analyser.fftSize = 2 ** 5 + source.connect(analyser) + const bufferLength = analyser.frequencyBinCount + const dataArray = new Uint8Array(bufferLength) + + let animationId, previousTimeStamp; + const BUFFER_SIZE = 2 ** 8 + const dataBuffer = new Array(BUFFER_SIZE).fill(canvas.height) + + const drawWaveform = (timeStamp) => { + if (!canvasCtx) return + + analyser.getByteTimeDomainData(dataArray) + canvasCtx.clearRect(0, 0, canvas.width, canvas.height) + canvasCtx.fillStyle = 'black' + + if (previousTimeStamp === undefined) { + previousTimeStamp = timeStamp + dataBuffer.push(Math.min(...dataArray)) + dataBuffer.splice(0, 1) + } + const elapsed = timeStamp - previousTimeStamp; + if (elapsed > 10) { + previousTimeStamp = timeStamp + dataBuffer.push(Math.min(...dataArray)) + dataBuffer.splice(0, 1) + } + + // Drawing + const sliceWidth = canvas.width / dataBuffer.length + let x = 0 + + for (let i = 0; i < dataBuffer.length; i++) { + const valueNormalized = dataBuffer[i] / canvas.height + const y = valueNormalized * canvas.height / 2 + const sliceHeight = canvas.height + 1 - y * 2 + + canvasCtx.fillRect(x, y, sliceWidth * 2 / 3, sliceHeight) + x += sliceWidth + } + + animationId = requestAnimationFrame(drawWaveform) + } + + drawWaveform() + + return () => { + if (animationId) { + cancelAnimationFrame(animationId) + } + + if (source) { + source.disconnect() + source.mediaStream.getTracks().forEach((track) => track.stop()) + } + + if (audioContext) { + audioContext.close() + } + + canvas?.remove() + } + } startRecording(stream) { this.preventInteraction(); this.cleanUp(); diff --git a/www/app/components/record.js b/www/app/components/record.js index 70336e5d..3b697ad4 100644 --- a/www/app/components/record.js +++ b/www/app/components/record.js @@ -7,6 +7,47 @@ import "react-dropdown/style.css"; import CustomRecordPlugin from "./CustomRecordPlugin"; +const queryAndPromptAudio = async () => { + const permissionStatus = await navigator.permissions.query({ name: 'microphone' }) + if (permissionStatus.state == 'prompt') { + await navigator.mediaDevices.getUserMedia({ audio: true }) + } +} + +const AudioInputsDropdown = (props) => { + const [ddOptions, setDdOptions] = useState([]); + + useEffect(() => { + const init = async () => { + await queryAndPromptAudio() + + const devices = await navigator.mediaDevices.enumerateDevices() + const audioDevices = devices + .filter((d) => d.kind === "audioinput" && d.deviceId != "") + .map((d) => ({ value: d.deviceId, label: d.label })) + + if (audioDevices.length < 1) return console.log("no audio input devices") + + setDdOptions(audioDevices) + props.setDeviceId(audioDevices[0].value) + } + init() + }, []) + + const handleDropdownChange = (e) => { + props.setDeviceId(e.value); + }; + + return ( + + ) +} + export default function Recorder(props) { const waveformRef = useRef(); const [wavesurfer, setWavesurfer] = useState(null); @@ -14,22 +55,10 @@ export default function Recorder(props) { const [isRecording, setIsRecording] = useState(false); const [isPlaying, setIsPlaying] = useState(false); const [deviceId, setDeviceId] = useState(null); - const [ddOptions, setDdOptions] = useState([]); useEffect(() => { document.getElementById("play-btn").disabled = true; - navigator.mediaDevices.enumerateDevices().then((devices) => { - const audioDevices = devices - .filter((d) => d.kind === "audioinput") - .map((d) => ({ value: d.deviceId, label: d.label })); - - if (audioDevices.length < 1) return console.log("no audio input devices"); - - setDdOptions(audioDevices); - setDeviceId(audioDevices[0].value); - }); - if (waveformRef.current) { const _wavesurfer = WaveSurfer.create({ container: waveformRef.current, @@ -85,22 +114,15 @@ export default function Recorder(props) { wavesurfer?.playPause(); }; - const handleDropdownChange = (e) => { - setDeviceId(e.value); - }; - return (
- +   diff --git a/www/app/page.js b/www/app/page.js index 2e4bfc93..635caa5e 100644 --- a/www/app/page.js +++ b/www/app/page.js @@ -6,23 +6,11 @@ import useWebRTC from "./components/webrtc.js"; import "../public/button.css"; const App = () => { - const [isRecording, setIsRecording] = useState(false); const [stream, setStream] = useState(null); - const handleRecord = (recording) => { - setIsRecording(recording); - - if (recording) { - navigator.mediaDevices - .getUserMedia({ audio: true }) - .then(setStream) - .catch((err) => console.error(err)); - } else if (!recording && serverData.peer) { - serverData.peer.send(JSON.stringify({ cmd: "STOP" })); - } - }; - - const serverData = useWebRTC(stream, setIsRecording); + // This is where you'd send the stream and receive the data from the server. + // transcription, summary, etc + const serverData = useWebRTC(stream, setIsRecording); return (
@@ -33,8 +21,6 @@ const App = () => { handleRecord(recording)} transcriptionText={serverData.text ?? "(No transcription yet)"} finalSummary={serverData.finalSummary} topics={serverData.topics ?? []}