diff --git a/www/app/components/CustomRecordPlugin.js b/www/app/components/CustomRecordPlugin.js
index b6f4fd88..bbe21195 100644
--- a/www/app/components/CustomRecordPlugin.js
+++ b/www/app/components/CustomRecordPlugin.js
@@ -17,83 +17,6 @@ class CustomRecordPlugin extends RecordPlugin {
static create(options) {
return new CustomRecordPlugin(options || {});
}
- render(stream) {
- if (!this.wavesurfer) return () => undefined
-
- const container = this.wavesurfer.getWrapper()
- const canvas = document.createElement('canvas')
- canvas.width = container.clientWidth
- canvas.height = container.clientHeight
- canvas.style.zIndex = '10'
- container.appendChild(canvas)
-
- const canvasCtx = canvas.getContext('2d')
- const audioContext = new AudioContext()
- const source = audioContext.createMediaStreamSource(stream)
- const analyser = audioContext.createAnalyser()
- analyser.fftSize = 2 ** 5
- source.connect(analyser)
- const bufferLength = analyser.frequencyBinCount
- const dataArray = new Uint8Array(bufferLength)
-
- let animationId, previousTimeStamp;
- const BUFFER_SIZE = 2 ** 8
- const dataBuffer = new Array(BUFFER_SIZE).fill(canvas.height)
-
- const drawWaveform = (timeStamp) => {
- if (!canvasCtx) return
-
- analyser.getByteTimeDomainData(dataArray)
- canvasCtx.clearRect(0, 0, canvas.width, canvas.height)
- canvasCtx.fillStyle = 'black'
-
- if (previousTimeStamp === undefined) {
- previousTimeStamp = timeStamp
- dataBuffer.push(Math.min(...dataArray))
- dataBuffer.splice(0, 1)
- }
- const elapsed = timeStamp - previousTimeStamp;
- if (elapsed > 10) {
- previousTimeStamp = timeStamp
- dataBuffer.push(Math.min(...dataArray))
- dataBuffer.splice(0, 1)
- }
-
- // Drawing
- const sliceWidth = canvas.width / dataBuffer.length
- let x = 0
-
- for (let i = 0; i < dataBuffer.length; i++) {
- const valueNormalized = dataBuffer[i] / canvas.height
- const y = valueNormalized * canvas.height / 2
- const sliceHeight = canvas.height + 1 - y * 2
-
- canvasCtx.fillRect(x, y, sliceWidth * 2 / 3, sliceHeight)
- x += sliceWidth
- }
-
- animationId = requestAnimationFrame(drawWaveform)
- }
-
- drawWaveform()
-
- return () => {
- if (animationId) {
- cancelAnimationFrame(animationId)
- }
-
- if (source) {
- source.disconnect()
- source.mediaStream.getTracks().forEach((track) => track.stop())
- }
-
- if (audioContext) {
- audioContext.close()
- }
-
- canvas?.remove()
- }
- }
startRecording(stream) {
this.preventInteraction();
this.cleanUp();
diff --git a/www/app/components/record.js b/www/app/components/record.js
index 900851dd..70336e5d 100644
--- a/www/app/components/record.js
+++ b/www/app/components/record.js
@@ -7,47 +7,6 @@ import "react-dropdown/style.css";
import CustomRecordPlugin from "./CustomRecordPlugin";
-const queryAndPromptAudio = async () => {
- const permissionStatus = await navigator.permissions.query({name: 'microphone'})
- if (permissionStatus.state == 'prompt') {
- await navigator.mediaDevices.getUserMedia({ audio: true })
- }
-}
-
-const AudioInputsDropdown = (props) => {
- const [ddOptions, setDdOptions] = useState([]);
-
- useEffect(() => {
- const init = async () => {
- await queryAndPromptAudio()
-
- const devices = await navigator.mediaDevices.enumerateDevices()
- const audioDevices = devices
- .filter((d) => d.kind === "audioinput" && d.deviceId != "")
- .map((d) => ({ value: d.deviceId, label: d.label }))
-
- if (audioDevices.length < 1) return console.log("no audio input devices")
-
- setDdOptions(audioDevices)
- props.setDeviceId(audioDevices[0].value)
- }
- init()
- }, [])
-
- const handleDropdownChange = (e) => {
- props.setDeviceId(e.value);
- };
-
- return (
-