mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-20 20:29:06 +00:00
Revert "Merge pull request #9 from Monadical-SAS/jose/vertical-waveform"
This reverts commit521777744f, reversing changes made to6e3fe756c5.
This commit is contained in:
@@ -17,83 +17,6 @@ class CustomRecordPlugin extends RecordPlugin {
|
|||||||
static create(options) {
|
static create(options) {
|
||||||
return new CustomRecordPlugin(options || {});
|
return new CustomRecordPlugin(options || {});
|
||||||
}
|
}
|
||||||
render(stream) {
|
|
||||||
if (!this.wavesurfer) return () => undefined
|
|
||||||
|
|
||||||
const container = this.wavesurfer.getWrapper()
|
|
||||||
const canvas = document.createElement('canvas')
|
|
||||||
canvas.width = container.clientWidth
|
|
||||||
canvas.height = container.clientHeight
|
|
||||||
canvas.style.zIndex = '10'
|
|
||||||
container.appendChild(canvas)
|
|
||||||
|
|
||||||
const canvasCtx = canvas.getContext('2d')
|
|
||||||
const audioContext = new AudioContext()
|
|
||||||
const source = audioContext.createMediaStreamSource(stream)
|
|
||||||
const analyser = audioContext.createAnalyser()
|
|
||||||
analyser.fftSize = 2 ** 5
|
|
||||||
source.connect(analyser)
|
|
||||||
const bufferLength = analyser.frequencyBinCount
|
|
||||||
const dataArray = new Uint8Array(bufferLength)
|
|
||||||
|
|
||||||
let animationId, previousTimeStamp;
|
|
||||||
const BUFFER_SIZE = 2 ** 8
|
|
||||||
const dataBuffer = new Array(BUFFER_SIZE).fill(canvas.height)
|
|
||||||
|
|
||||||
const drawWaveform = (timeStamp) => {
|
|
||||||
if (!canvasCtx) return
|
|
||||||
|
|
||||||
analyser.getByteTimeDomainData(dataArray)
|
|
||||||
canvasCtx.clearRect(0, 0, canvas.width, canvas.height)
|
|
||||||
canvasCtx.fillStyle = 'black'
|
|
||||||
|
|
||||||
if (previousTimeStamp === undefined) {
|
|
||||||
previousTimeStamp = timeStamp
|
|
||||||
dataBuffer.push(Math.min(...dataArray))
|
|
||||||
dataBuffer.splice(0, 1)
|
|
||||||
}
|
|
||||||
const elapsed = timeStamp - previousTimeStamp;
|
|
||||||
if (elapsed > 10) {
|
|
||||||
previousTimeStamp = timeStamp
|
|
||||||
dataBuffer.push(Math.min(...dataArray))
|
|
||||||
dataBuffer.splice(0, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drawing
|
|
||||||
const sliceWidth = canvas.width / dataBuffer.length
|
|
||||||
let x = 0
|
|
||||||
|
|
||||||
for (let i = 0; i < dataBuffer.length; i++) {
|
|
||||||
const valueNormalized = dataBuffer[i] / canvas.height
|
|
||||||
const y = valueNormalized * canvas.height / 2
|
|
||||||
const sliceHeight = canvas.height + 1 - y * 2
|
|
||||||
|
|
||||||
canvasCtx.fillRect(x, y, sliceWidth * 2 / 3, sliceHeight)
|
|
||||||
x += sliceWidth
|
|
||||||
}
|
|
||||||
|
|
||||||
animationId = requestAnimationFrame(drawWaveform)
|
|
||||||
}
|
|
||||||
|
|
||||||
drawWaveform()
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
if (animationId) {
|
|
||||||
cancelAnimationFrame(animationId)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (source) {
|
|
||||||
source.disconnect()
|
|
||||||
source.mediaStream.getTracks().forEach((track) => track.stop())
|
|
||||||
}
|
|
||||||
|
|
||||||
if (audioContext) {
|
|
||||||
audioContext.close()
|
|
||||||
}
|
|
||||||
|
|
||||||
canvas?.remove()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
startRecording(stream) {
|
startRecording(stream) {
|
||||||
this.preventInteraction();
|
this.preventInteraction();
|
||||||
this.cleanUp();
|
this.cleanUp();
|
||||||
|
|||||||
@@ -7,47 +7,6 @@ import "react-dropdown/style.css";
|
|||||||
|
|
||||||
import CustomRecordPlugin from "./CustomRecordPlugin";
|
import CustomRecordPlugin from "./CustomRecordPlugin";
|
||||||
|
|
||||||
const queryAndPromptAudio = async () => {
|
|
||||||
const permissionStatus = await navigator.permissions.query({name: 'microphone'})
|
|
||||||
if (permissionStatus.state == 'prompt') {
|
|
||||||
await navigator.mediaDevices.getUserMedia({ audio: true })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const AudioInputsDropdown = (props) => {
|
|
||||||
const [ddOptions, setDdOptions] = useState([]);
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
const init = async () => {
|
|
||||||
await queryAndPromptAudio()
|
|
||||||
|
|
||||||
const devices = await navigator.mediaDevices.enumerateDevices()
|
|
||||||
const audioDevices = devices
|
|
||||||
.filter((d) => d.kind === "audioinput" && d.deviceId != "")
|
|
||||||
.map((d) => ({ value: d.deviceId, label: d.label }))
|
|
||||||
|
|
||||||
if (audioDevices.length < 1) return console.log("no audio input devices")
|
|
||||||
|
|
||||||
setDdOptions(audioDevices)
|
|
||||||
props.setDeviceId(audioDevices[0].value)
|
|
||||||
}
|
|
||||||
init()
|
|
||||||
}, [])
|
|
||||||
|
|
||||||
const handleDropdownChange = (e) => {
|
|
||||||
props.setDeviceId(e.value);
|
|
||||||
};
|
|
||||||
|
|
||||||
return (
|
|
||||||
<Dropdown
|
|
||||||
options={ddOptions}
|
|
||||||
onChange={handleDropdownChange}
|
|
||||||
value={ddOptions[0]}
|
|
||||||
disabled={props.disabled}
|
|
||||||
/>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
export default function Recorder(props) {
|
export default function Recorder(props) {
|
||||||
const waveformRef = useRef();
|
const waveformRef = useRef();
|
||||||
const [wavesurfer, setWavesurfer] = useState(null);
|
const [wavesurfer, setWavesurfer] = useState(null);
|
||||||
@@ -55,10 +14,22 @@ export default function Recorder(props) {
|
|||||||
const [isRecording, setIsRecording] = useState(false);
|
const [isRecording, setIsRecording] = useState(false);
|
||||||
const [isPlaying, setIsPlaying] = useState(false);
|
const [isPlaying, setIsPlaying] = useState(false);
|
||||||
const [deviceId, setDeviceId] = useState(null);
|
const [deviceId, setDeviceId] = useState(null);
|
||||||
|
const [ddOptions, setDdOptions] = useState([]);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
document.getElementById("play-btn").disabled = true;
|
document.getElementById("play-btn").disabled = true;
|
||||||
|
|
||||||
|
navigator.mediaDevices.enumerateDevices().then((devices) => {
|
||||||
|
const audioDevices = devices
|
||||||
|
.filter((d) => d.kind === "audioinput")
|
||||||
|
.map((d) => ({ value: d.deviceId, label: d.label }));
|
||||||
|
|
||||||
|
if (audioDevices.length < 1) return console.log("no audio input devices");
|
||||||
|
|
||||||
|
setDdOptions(audioDevices);
|
||||||
|
setDeviceId(audioDevices[0].value);
|
||||||
|
});
|
||||||
|
|
||||||
if (waveformRef.current) {
|
if (waveformRef.current) {
|
||||||
const _wavesurfer = WaveSurfer.create({
|
const _wavesurfer = WaveSurfer.create({
|
||||||
container: waveformRef.current,
|
container: waveformRef.current,
|
||||||
@@ -114,15 +85,22 @@ export default function Recorder(props) {
|
|||||||
wavesurfer?.playPause();
|
wavesurfer?.playPause();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const handleDropdownChange = (e) => {
|
||||||
|
setDeviceId(e.value);
|
||||||
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="flex flex-col items-center justify-center max-w-[75vw] w-full">
|
<div className="flex flex-col items-center justify-center max-w-[75vw] w-full">
|
||||||
<div className="flex my-2 mx-auto">
|
<div className="flex my-2 mx-auto">
|
||||||
<AudioInputsDropdown setDeviceId={setDeviceId} disabled={isRecording} />
|
<Dropdown
|
||||||
|
options={ddOptions}
|
||||||
|
onChange={handleDropdownChange}
|
||||||
|
value={ddOptions[0]}
|
||||||
|
/>
|
||||||
|
|
||||||
<button
|
<button
|
||||||
onClick={handleRecClick}
|
onClick={handleRecClick}
|
||||||
data-color={isRecording ? "red" : "blue"}
|
data-color={isRecording ? "red" : "blue"}
|
||||||
disabled={!deviceId}
|
|
||||||
>
|
>
|
||||||
{isRecording ? "Stop" : "Record"}
|
{isRecording ? "Stop" : "Record"}
|
||||||
</button>
|
</button>
|
||||||
|
|||||||
@@ -6,11 +6,23 @@ import useWebRTC from "./components/webrtc.js";
|
|||||||
import "../public/button.css";
|
import "../public/button.css";
|
||||||
|
|
||||||
const App = () => {
|
const App = () => {
|
||||||
|
const [isRecording, setIsRecording] = useState(false);
|
||||||
const [stream, setStream] = useState(null);
|
const [stream, setStream] = useState(null);
|
||||||
|
|
||||||
// This is where you'd send the stream and receive the data from the server.
|
const handleRecord = (recording) => {
|
||||||
// transcription, summary, etc
|
setIsRecording(recording);
|
||||||
const serverData = useWebRTC(stream, () => {});
|
|
||||||
|
if (recording) {
|
||||||
|
navigator.mediaDevices
|
||||||
|
.getUserMedia({ audio: true })
|
||||||
|
.then(setStream)
|
||||||
|
.catch((err) => console.error(err));
|
||||||
|
} else if (!recording && serverData.peer) {
|
||||||
|
serverData.peer.send(JSON.stringify({ cmd: "STOP" }));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const serverData = useWebRTC(stream, setIsRecording);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="flex flex-col items-center h-[100svh]">
|
<div className="flex flex-col items-center h-[100svh]">
|
||||||
@@ -21,6 +33,8 @@ const App = () => {
|
|||||||
|
|
||||||
<Recorder setStream={setStream} serverData={serverData} />
|
<Recorder setStream={setStream} serverData={serverData} />
|
||||||
<Dashboard
|
<Dashboard
|
||||||
|
isRecording={isRecording}
|
||||||
|
onRecord={(recording) => handleRecord(recording)}
|
||||||
transcriptionText={serverData.text ?? "..."}
|
transcriptionText={serverData.text ?? "..."}
|
||||||
finalSummary={serverData.finalSummary}
|
finalSummary={serverData.finalSummary}
|
||||||
topics={serverData.topics ?? []}
|
topics={serverData.topics ?? []}
|
||||||
|
|||||||
Reference in New Issue
Block a user