docs: docs website + installation (#778)

* feat: WIP doc (vibe started and iterated)

* install from scratch docs

* caddyfile.example

* gitignore

* authentik script

* authentik script

* authentik script

* llm doc

* authentik ongoing

* more daily setup logs

* doc website

* gpu self hosted setup guide (no-mistakes)

* doc review round

* doc review round

* doc review round

* update doc site sidebars

* feat(docs): add mermaid diagram support

* docs polishing

* live pipeline doc

* move pipeline dev docs to dev docs location

* doc pr review iteration

* dockerfile healthcheck

* docs/pr-comments

* remove jwt comment

* llm suggestion

* pr comments

* pr comments

* document auto migrations

* cleanup docs

---------

Co-authored-by: Mathieu Virbel <mat@meltingrocks.com>
Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
This commit is contained in:
2026-01-06 17:25:02 -05:00
committed by GitHub
parent e644d6497b
commit 407c15299f
61 changed files with 32653 additions and 26 deletions

View File

@@ -90,6 +90,12 @@ image = (
)
# IMPORTANT: This function is duplicated in multiple files for deployment isolation.
# If you modify the audio format detection logic, you MUST update all copies:
# - gpu/self_hosted/app/utils.py
# - gpu/modal_deployments/reflector_transcriber.py (2 copies)
# - gpu/modal_deployments/reflector_transcriber_parakeet.py (this file)
# - gpu/modal_deployments/reflector_diarizer.py
def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtension:
parsed_url = urlparse(url)
url_path = parsed_url.path
@@ -105,6 +111,8 @@ def detect_audio_format(url: str, headers: Mapping[str, str]) -> AudioFileExtens
return AudioFileExtension("wav")
if "audio/mp4" in content_type:
return AudioFileExtension("mp4")
if "audio/webm" in content_type or "video/webm" in content_type:
return AudioFileExtension("webm")
raise ValueError(
f"Unsupported audio format for URL: {url}. "
@@ -301,6 +309,11 @@ class TranscriberParakeetFile:
audio_array, sample_rate = librosa.load(file_path, sr=SAMPLERATE, mono=True)
return audio_array
# IMPORTANT: This VAD segment logic is duplicated in multiple files for deployment isolation.
# If you modify this function, you MUST update all copies:
# - gpu/modal_deployments/reflector_transcriber.py
# - gpu/modal_deployments/reflector_transcriber_parakeet.py (this file)
# - gpu/self_hosted/app/services/transcriber.py
def vad_segment_generator(
audio_array,
) -> Generator[TimeSegment, None, None]: