mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-20 20:29:06 +00:00
This commit restore the original behavior with frame cutting. While silero is used on our gpu for files, look like it's not working great on the live pipeline. To be investigated, but at the moment, what we keep is: - refactored to extract the downscale for further processing in the pipeline - remove any downscale implementation from audio_chunker and audio_merge - removed batching from audio_merge too for now
35 lines
981 B
Python
35 lines
981 B
Python
from typing import Optional
|
|
|
|
import av
|
|
|
|
from reflector.processors.audio_chunker import AudioChunkerProcessor
|
|
from reflector.processors.audio_chunker_auto import AudioChunkerAutoProcessor
|
|
|
|
|
|
class AudioChunkerFramesProcessor(AudioChunkerProcessor):
|
|
"""
|
|
Simple frame-based audio chunker that emits chunks after a fixed number of frames
|
|
"""
|
|
|
|
def __init__(self, max_frames=256, **kwargs):
|
|
super().__init__(**kwargs)
|
|
self.max_frames = max_frames
|
|
|
|
async def _chunk(self, data: av.AudioFrame) -> Optional[list[av.AudioFrame]]:
|
|
self.frames.append(data)
|
|
if len(self.frames) >= self.max_frames:
|
|
frames_to_emit = self.frames[:]
|
|
self.frames = []
|
|
return frames_to_emit
|
|
|
|
return None
|
|
|
|
async def _flush(self):
|
|
frames = self.frames[:]
|
|
self.frames = []
|
|
if frames:
|
|
await self.emit(frames)
|
|
|
|
|
|
AudioChunkerAutoProcessor.register("frames", AudioChunkerFramesProcessor)
|