mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-20 20:29:06 +00:00
Merge pull request #265 from Monadical-SAS/sara/recorder-memory
Sara/recorder memory
This commit is contained in:
@@ -9,11 +9,12 @@ def get_audio_waveform(path: Path | str, segments_count: int = 256) -> list[int]
|
||||
path = path.as_posix()
|
||||
|
||||
container = av.open(path)
|
||||
stream = container.streams.get(audio=0)[0]
|
||||
stream = container.streams.audio[0]
|
||||
duration = container.duration / av.time_base
|
||||
|
||||
chunk_size_secs = duration / segments_count
|
||||
chunk_size = int(chunk_size_secs * stream.rate * stream.channels)
|
||||
|
||||
if chunk_size == 0:
|
||||
# there is not enough data to fill the chunks
|
||||
# so basically we use chunk_size of 1.
|
||||
@@ -22,7 +23,7 @@ def get_audio_waveform(path: Path | str, segments_count: int = 256) -> list[int]
|
||||
# 1.1 is a safety margin as it seems that pyav decode
|
||||
# does not always return the exact number of chunks
|
||||
# that we expect.
|
||||
volumes = np.zeros(int(segments_count * 1.1), dtype=int)
|
||||
volumes = np.zeros(int(segments_count * 1.1), dtype=float)
|
||||
current_chunk_idx = 0
|
||||
current_chunk_size = 0
|
||||
current_chunk_volume = 0
|
||||
@@ -35,7 +36,6 @@ def get_audio_waveform(path: Path | str, segments_count: int = 256) -> list[int]
|
||||
count += len(data)
|
||||
frames += 1
|
||||
samples += frame.samples
|
||||
|
||||
while len(data) > 0:
|
||||
datalen = len(data)
|
||||
|
||||
@@ -53,13 +53,13 @@ def get_audio_waveform(path: Path | str, segments_count: int = 256) -> list[int]
|
||||
current_chunk_idx += 1
|
||||
current_chunk_size = 0
|
||||
current_chunk_volume = 0
|
||||
|
||||
volumes = volumes[:current_chunk_idx]
|
||||
|
||||
# normalize the volumes 0-128
|
||||
volumes = volumes * 128 / volumes.max()
|
||||
# number of decimals to use when rounding the peak value
|
||||
digits = 2
|
||||
volumes = np.round(volumes / volumes.max(), digits)
|
||||
|
||||
return volumes.astype("uint8").tolist()
|
||||
return volumes.tolist()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -41,7 +41,7 @@ def generate_transcript_name():
|
||||
|
||||
|
||||
class AudioWaveform(BaseModel):
|
||||
data: list[int]
|
||||
data: list[float]
|
||||
|
||||
|
||||
class TranscriptText(BaseModel):
|
||||
|
||||
30
www/app/styles/recorder.js
Normal file
30
www/app/styles/recorder.js
Normal file
@@ -0,0 +1,30 @@
|
||||
export const waveSurferStyles = {
|
||||
playerSettings: {
|
||||
waveColor: "#777",
|
||||
progressColor: "#222",
|
||||
cursorColor: "OrangeRed",
|
||||
},
|
||||
playerStyle: {
|
||||
cursor: "pointer",
|
||||
backgroundColor: "RGB(240 240 240)",
|
||||
borderRadius: "15px",
|
||||
},
|
||||
marker: `
|
||||
border-left: solid 1px orange;
|
||||
padding: 0 2px 0 5px;
|
||||
font-size: 0.7rem;
|
||||
border-radius: 0 3px 3px 0;
|
||||
|
||||
position: absolute;
|
||||
width: 100px;
|
||||
max-width: fit-content;
|
||||
cursor: pointer;
|
||||
background-color: white;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
transition: width 100ms linear;
|
||||
z-index: 0;
|
||||
`,
|
||||
markerHover: { backgroundColor: "orange" },
|
||||
};
|
||||
@@ -14,6 +14,7 @@ import { AudioWaveform } from "../api";
|
||||
import AudioInputsDropdown from "./audioInputsDropdown";
|
||||
import { Option } from "react-dropdown";
|
||||
import { useError } from "../(errors)/errorContext";
|
||||
import { waveSurferStyles } from "../styles/recorder";
|
||||
|
||||
type RecorderProps = {
|
||||
setStream?: React.Dispatch<React.SetStateAction<MediaStream | null>>;
|
||||
@@ -94,20 +95,29 @@ export default function Recorder(props: RecorderProps) {
|
||||
};
|
||||
};
|
||||
|
||||
// Setup Shortcuts
|
||||
useEffect(() => {
|
||||
if (!record) return;
|
||||
|
||||
return setupProjectorKeys();
|
||||
}, [record, deviceId]);
|
||||
|
||||
// Waveform setup
|
||||
useEffect(() => {
|
||||
if (waveformRef.current) {
|
||||
const _wavesurfer = WaveSurfer.create({
|
||||
container: waveformRef.current,
|
||||
waveColor: "#777",
|
||||
progressColor: "#222",
|
||||
cursorColor: "OrangeRed",
|
||||
url: props.transcriptId
|
||||
? `${process.env.NEXT_PUBLIC_API_URL}/v1/transcripts/${props.transcriptId}/audio/mp3`
|
||||
: undefined,
|
||||
peaks: props.waveform?.data,
|
||||
|
||||
hideScrollbar: true,
|
||||
autoCenter: true,
|
||||
barWidth: 2,
|
||||
height: "auto",
|
||||
url: props.transcriptId
|
||||
? `${process.env.NEXT_PUBLIC_API_URL}/v1/transcripts/${props.transcriptId}/audio/mp3`
|
||||
: undefined,
|
||||
|
||||
...waveSurferStyles.player,
|
||||
});
|
||||
|
||||
if (!props.transcriptId) {
|
||||
@@ -115,10 +125,12 @@ export default function Recorder(props: RecorderProps) {
|
||||
_wshack.renderer.renderSingleCanvas = () => {};
|
||||
}
|
||||
|
||||
// styling
|
||||
const wsWrapper = _wavesurfer.getWrapper();
|
||||
wsWrapper.style.cursor = "pointer";
|
||||
wsWrapper.style.backgroundColor = "RGB(240 240 240)";
|
||||
wsWrapper.style.borderRadius = "15px";
|
||||
wsWrapper.style.cursor = waveSurferStyles.playerStyle.cursor;
|
||||
wsWrapper.style.backgroundColor =
|
||||
waveSurferStyles.playerStyle.backgroundColor;
|
||||
wsWrapper.style.borderRadius = waveSurferStyles.playerStyle.borderRadius;
|
||||
|
||||
_wavesurfer.on("play", () => {
|
||||
setIsPlaying(true);
|
||||
@@ -131,9 +143,10 @@ export default function Recorder(props: RecorderProps) {
|
||||
setRecord(_wavesurfer.registerPlugin(RecordPlugin.create()));
|
||||
setWaveRegions(_wavesurfer.registerPlugin(CustomRegionsPlugin.create()));
|
||||
|
||||
if (props.transcriptId) _wavesurfer.toggleInteraction(true);
|
||||
if (props.isPastMeeting) _wavesurfer.toggleInteraction(true);
|
||||
|
||||
setWavesurfer(_wavesurfer);
|
||||
|
||||
return () => {
|
||||
_wavesurfer.destroy();
|
||||
setIsRecording(false);
|
||||
@@ -152,35 +165,18 @@ export default function Recorder(props: RecorderProps) {
|
||||
if (!waveRegions) return;
|
||||
|
||||
waveRegions.clearRegions();
|
||||
|
||||
for (let topic of topicsRef.current) {
|
||||
const content = document.createElement("div");
|
||||
content.setAttribute(
|
||||
"style",
|
||||
`
|
||||
position: absolute;
|
||||
border-left: solid 1px orange;
|
||||
padding: 0 2px 0 5px;
|
||||
font-size: 0.7rem;
|
||||
width: 100px;
|
||||
max-width: fit-content;
|
||||
cursor: pointer;
|
||||
background-color: white;
|
||||
border-radius: 0 3px 3px 0;
|
||||
white-space: nowrap;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
transition: width 100ms linear;
|
||||
`,
|
||||
);
|
||||
content.setAttribute("style", waveSurferStyles.marker);
|
||||
content.onmouseover = () => {
|
||||
content.style.backgroundColor = "orange";
|
||||
content.style.backgroundColor =
|
||||
waveSurferStyles.markerHover.backgroundColor;
|
||||
content.style.zIndex = "999";
|
||||
content.style.width = "300px";
|
||||
};
|
||||
content.onmouseout = () => {
|
||||
content.style.backgroundColor = "white";
|
||||
content.style.zIndex = "0";
|
||||
content.style.width = "100px";
|
||||
content.setAttribute("style", waveSurferStyles.marker);
|
||||
};
|
||||
content.textContent = topic.title;
|
||||
|
||||
@@ -198,12 +194,6 @@ export default function Recorder(props: RecorderProps) {
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (!record) return;
|
||||
|
||||
return setupProjectorKeys();
|
||||
}, [record, deviceId]);
|
||||
|
||||
useEffect(() => {
|
||||
if (!record) return;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user