Compare commits

...

11 Commits

Author SHA1 Message Date
32a049c134 chore(main): release 0.23.0 (#770) 2025-12-10 13:42:28 +01:00
91650ec65f fix: deploy frontend to coolify (#779)
* Ignore act secrets

* Deploy frontend container to ECR

* Use published image

* Remove ecr workflows

* Trigger coolify deployment

* Deploy on release please pr merge

* Upgrade nextjs

* Update secrets example
2025-12-10 13:35:53 +01:00
Igor Monadical
61f0e29d4c feat: llm retries (#739)
* llm retries no-mistakes

* self-review (no-mistakes)

* self-review (no-mistakes)

* bigger retry intervals by default

* tests and dry

* restore to main state

* parse retries

* json retries (no-mistakes)

* json retries (no-mistakes)

* json retries (no-mistakes)

* json retries (no-mistakes) self-review

* additional network retry test

* more lindt

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2025-12-05 12:08:21 -05:00
Igor Monadical
ec17ed7b58 fix: celery inspect bug sidestep in restart script (#766)
* celery bug sidestep

* Update server/reflector/services/transcript_process.py

Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com>

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com>
2025-12-04 09:22:51 -05:00
Igor Monadical
00549f153a feat: dockerhub ci (#772)
* dockerhub ci

* ci test

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2025-12-03 13:26:08 -05:00
3ad78be762 fix: hide rooms settings instead of disabling (#763)
* Hide rooms settings instead of disabling

* Reset recording trigger
2025-12-03 16:49:17 +01:00
d3a5cd12d2 fix: return participant emails from transcript endpoint (#769)
* Return participant emails from transcript endpoint

* Fix broken test
2025-12-03 16:47:56 +01:00
af921ce927 chore(main): release 0.22.4 (#765) 2025-12-02 17:11:48 -05:00
Igor Monadical
bd5df1ce2e fix: Multitrack mixdown optimisation 2 (#764)
* Revert "fix: Skip mixdown for multitrack (#760)"

This reverts commit b51b7aa917.

* multitrack mixdown optimisation

* return the "good" ui part of "skip mixdown"

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2025-12-02 17:10:06 -05:00
c8024484b3 chore(main): release 0.22.3 (#761) 2025-12-02 09:08:22 +01:00
28f87c09dc fix: align daily room settings (#759)
* Switch platform ui

* Update room settings based on platform

* Add local and none recording options to daily

* Don't create tokens for unauthentikated users

* Enable knocking for private rooms

* Create new meeting on room settings change

* Always use 2-200 option for daily

* Show recording start trigger for daily

* Fix broken test
2025-12-02 09:06:36 +01:00
25 changed files with 1128 additions and 363 deletions

View File

@@ -1,90 +0,0 @@
name: Build container/push to container registry
on: [workflow_dispatch]
env:
# 950402358378.dkr.ecr.us-east-1.amazonaws.com/reflector
AWS_REGION: us-east-1
ECR_REPOSITORY: reflector
jobs:
build:
strategy:
matrix:
include:
- platform: linux/amd64
runner: linux-amd64
arch: amd64
- platform: linux/arm64
runner: linux-arm64
arch: arm64
runs-on: ${{ matrix.runner }}
permissions:
contents: read
outputs:
registry: ${{ steps.login-ecr.outputs.registry }}
steps:
- uses: actions/checkout@v4
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build and push ${{ matrix.arch }}
uses: docker/build-push-action@v5
with:
context: server
platforms: ${{ matrix.platform }}
push: true
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest-${{ matrix.arch }}
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
provenance: false
create-manifest:
runs-on: ubuntu-latest
needs: [build]
permissions:
deployments: write
contents: read
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
uses: aws-actions/amazon-ecr-login@v2
- name: Create and push multi-arch manifest
run: |
# Get the registry URL (since we can't easily access job outputs in matrix)
ECR_REGISTRY=$(aws ecr describe-registry --query 'registryId' --output text).dkr.ecr.${{ env.AWS_REGION }}.amazonaws.com
docker manifest create \
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest \
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-amd64 \
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-arm64
docker manifest push $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest
echo "✅ Multi-arch manifest pushed: $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest"

View File

@@ -1,35 +1,39 @@
name: Build and Push Frontend Docker Image
name: Build and Push Backend Docker Image (Docker Hub)
on:
push:
branches:
- main
pull_request:
types:
- closed
paths:
- 'www/**'
- '.github/workflows/docker-frontend.yml'
- "server/**"
- ".github/workflows/dockerhub-backend.yml"
workflow_dispatch:
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}-frontend
REGISTRY: docker.io
IMAGE_NAME: monadicalsas/reflector-backend
jobs:
build-and-push:
runs-on: ubuntu-latest
if: |
github.event_name == 'workflow_dispatch' ||
(github.event.pull_request.merged == true &&
startsWith(github.event.pull_request.head.ref, 'release-please--branches--'))
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to GitHub Container Registry
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
username: monadicalsas
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata
id: meta
@@ -47,8 +51,8 @@ jobs:
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: ./www
file: ./www/Dockerfile
context: ./server
file: ./server/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -0,0 +1,70 @@
name: Build and Push Frontend Docker Image
on:
pull_request:
types:
- closed
paths:
- "www/**"
- ".github/workflows/dockerhub-frontend.yml"
workflow_dispatch:
env:
REGISTRY: docker.io
IMAGE_NAME: monadicalsas/reflector-frontend
jobs:
build-and-push:
runs-on: ubuntu-latest
if: |
github.event_name == 'workflow_dispatch' ||
(github.event.pull_request.merged == true &&
startsWith(github.event.pull_request.head.ref, 'release-please--branches--'))
permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: monadicalsas
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=sha,prefix={{branch}}-
type=raw,value=latest,enable={{is_default_branch}}
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: ./www
file: ./www/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
- name: Trigger Coolify deployment
if: success()
run: |
curl -X POST "${{ secrets.COOLIFY_WEBHOOK_URL }}" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${{ secrets.COOLIFY_WEBHOOK_TOKEN }}" \
-f || (echo "Failed to trigger Coolify deployment" && exit 1)

1
.gitignore vendored
View File

@@ -18,3 +18,4 @@ CLAUDE.local.md
www/.env.development
www/.env.production
.playwright-mcp
.secrets

22
.secrets.example Normal file
View File

@@ -0,0 +1,22 @@
# Example secrets file for GitHub Actions workflows
# Copy this to .secrets and fill in your values
# These secrets should be configured in GitHub repository settings:
# Settings > Secrets and variables > Actions
# DockerHub Configuration (required for frontend and backend deployment)
# Create a Docker Hub access token at https://hub.docker.com/settings/security
# Username: monadicalsas
DOCKERHUB_TOKEN=your-dockerhub-access-token
# GitHub Token (required for frontend and backend deployment)
# Used by docker/metadata-action for extracting image metadata
# Can use the default GITHUB_TOKEN or create a personal access token
GITHUB_TOKEN=your-github-token-or-use-default-GITHUB_TOKEN
# Coolify Deployment Webhook (required for frontend deployment)
# Used to trigger automatic deployment after image push
COOLIFY_WEBHOOK_URL=https://app.monadical.io/api/v1/deploy?uuid=your-uuid&force=false
COOLIFY_WEBHOOK_TOKEN=your-coolify-webhook-token
# Optional: GitHub Actions Cache Token (for local testing with act)
GHA_CACHE_TOKEN=your-github-token-or-empty

View File

@@ -1,5 +1,35 @@
# Changelog
## [0.23.0](https://github.com/Monadical-SAS/reflector/compare/v0.22.4...v0.23.0) (2025-12-10)
### Features
* dockerhub ci ([#772](https://github.com/Monadical-SAS/reflector/issues/772)) ([00549f1](https://github.com/Monadical-SAS/reflector/commit/00549f153ade922cf4cb6c5358a7d11a39c426d2))
* llm retries ([#739](https://github.com/Monadical-SAS/reflector/issues/739)) ([61f0e29](https://github.com/Monadical-SAS/reflector/commit/61f0e29d4c51eab54ee67af92141fbb171e8ccaa))
### Bug Fixes
* celery inspect bug sidestep in restart script ([#766](https://github.com/Monadical-SAS/reflector/issues/766)) ([ec17ed7](https://github.com/Monadical-SAS/reflector/commit/ec17ed7b587cf6ee143646baaee67a7c017044d4))
* deploy frontend to coolify ([#779](https://github.com/Monadical-SAS/reflector/issues/779)) ([91650ec](https://github.com/Monadical-SAS/reflector/commit/91650ec65f65713faa7ee0dcfb75af427b7c4ba0))
* hide rooms settings instead of disabling ([#763](https://github.com/Monadical-SAS/reflector/issues/763)) ([3ad78be](https://github.com/Monadical-SAS/reflector/commit/3ad78be7628c0d029296b301a0e87236c76b7598))
* return participant emails from transcript endpoint ([#769](https://github.com/Monadical-SAS/reflector/issues/769)) ([d3a5cd1](https://github.com/Monadical-SAS/reflector/commit/d3a5cd12d2d0d9c32af2d5bd9322e030ef69b85d))
## [0.22.4](https://github.com/Monadical-SAS/reflector/compare/v0.22.3...v0.22.4) (2025-12-02)
### Bug Fixes
* Multitrack mixdown optimisation 2 ([#764](https://github.com/Monadical-SAS/reflector/issues/764)) ([bd5df1c](https://github.com/Monadical-SAS/reflector/commit/bd5df1ce2ebf35d7f3413b295e56937a9a28ef7b))
## [0.22.3](https://github.com/Monadical-SAS/reflector/compare/v0.22.2...v0.22.3) (2025-12-02)
### Bug Fixes
* align daily room settings ([#759](https://github.com/Monadical-SAS/reflector/issues/759)) ([28f87c0](https://github.com/Monadical-SAS/reflector/commit/28f87c09dc459846873d0dde65b03e3d7b2b9399))
## [0.22.2](https://github.com/Monadical-SAS/reflector/compare/v0.22.1...v0.22.2) (2025-12-02)

View File

@@ -3,10 +3,7 @@
services:
web:
build:
context: ./www
dockerfile: Dockerfile
image: reflector-frontend:latest
image: monadicalsas/reflector-frontend:latest
environment:
- KV_URL=${KV_URL:-redis://redis:6379}
- SITE_URL=${SITE_URL}

View File

@@ -126,6 +126,7 @@ markers = [
select = [
"I", # isort - import sorting
"F401", # unused imports
"E402", # module level import not at top of file
"PLC0415", # import-outside-top-level - detect inline imports
]

View File

@@ -1,13 +1,19 @@
import asyncio
import functools
from uuid import uuid4
from celery import current_task
from reflector.db import get_database
from reflector.llm import llm_session_id
def asynctask(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
async def run_with_db():
task_id = current_task.request.id if current_task else None
llm_session_id.set(task_id or f"random-{uuid4().hex}")
database = get_database()
await database.connect()
try:

View File

@@ -40,6 +40,10 @@ class RoomProperties(BaseModel):
)
enable_chat: bool = Field(default=True, description="Enable in-meeting chat")
enable_screenshare: bool = Field(default=True, description="Enable screen sharing")
enable_knocking: bool = Field(
default=False,
description="Enable knocking for private rooms (allows participants to request access)",
)
start_video_off: bool = Field(
default=False, description="Start with video off for all participants"
)

View File

@@ -88,5 +88,11 @@ class UserController:
results = await get_database().fetch_all(query)
return [User(**r) for r in results]
@staticmethod
async def get_by_ids(user_ids: list[NonEmptyString]) -> dict[str, User]:
query = users.select().where(users.c.id.in_(user_ids))
results = await get_database().fetch_all(query)
return {user.id: User(**user) for user in results}
user_controller = UserController()

View File

@@ -1,14 +1,29 @@
import logging
from typing import Type, TypeVar
from contextvars import ContextVar
from typing import Generic, Type, TypeVar
from uuid import uuid4
from llama_index.core import Settings
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.workflow import (
Context,
Event,
StartEvent,
StopEvent,
Workflow,
step,
)
from llama_index.llms.openai_like import OpenAILike
from pydantic import BaseModel, ValidationError
T = TypeVar("T", bound=BaseModel)
OutputT = TypeVar("OutputT", bound=BaseModel)
# Session ID for LiteLLM request grouping - set per processing run
llm_session_id: ContextVar[str | None] = ContextVar("llm_session_id", default=None)
logger = logging.getLogger(__name__)
STRUCTURED_RESPONSE_PROMPT_TEMPLATE = """
Based on the following analysis, provide the information in the requested JSON format:
@@ -20,6 +35,158 @@ Analysis:
"""
class LLMParseError(Exception):
"""Raised when LLM output cannot be parsed after retries."""
def __init__(self, output_cls: Type[BaseModel], error_msg: str, attempts: int):
self.output_cls = output_cls
self.error_msg = error_msg
self.attempts = attempts
super().__init__(
f"Failed to parse {output_cls.__name__} after {attempts} attempts: {error_msg}"
)
class ExtractionDone(Event):
"""Event emitted when LLM JSON formatting completes."""
output: str
class ValidationErrorEvent(Event):
"""Event emitted when validation fails."""
error: str
wrong_output: str
class StructuredOutputWorkflow(Workflow, Generic[OutputT]):
"""Workflow for structured output extraction with validation retry.
This workflow handles parse/validation retries only. Network error retries
are handled internally by Settings.llm (OpenAILike max_retries=3).
The caller should NOT wrap this workflow in additional retry logic.
"""
def __init__(
self,
output_cls: Type[OutputT],
max_retries: int = 3,
**kwargs,
):
super().__init__(**kwargs)
self.output_cls: Type[OutputT] = output_cls
self.max_retries = max_retries
self.output_parser = PydanticOutputParser(output_cls)
@step
async def extract(
self, ctx: Context, ev: StartEvent | ValidationErrorEvent
) -> StopEvent | ExtractionDone:
"""Extract structured data from text using two-step LLM process.
Step 1 (first call only): TreeSummarize generates text analysis
Step 2 (every call): Settings.llm.acomplete formats analysis as JSON
"""
current_retries = await ctx.store.get("retries", default=0)
await ctx.store.set("retries", current_retries + 1)
if current_retries >= self.max_retries:
last_error = await ctx.store.get("last_error", default=None)
logger.error(
f"Max retries ({self.max_retries}) reached for {self.output_cls.__name__}"
)
return StopEvent(result={"error": last_error, "attempts": current_retries})
if isinstance(ev, StartEvent):
# First call: run TreeSummarize to get analysis, store in context
prompt = ev.get("prompt")
texts = ev.get("texts")
tone_name = ev.get("tone_name")
if not prompt or not isinstance(texts, list):
raise ValueError(
"StartEvent must contain 'prompt' (str) and 'texts' (list)"
)
summarizer = TreeSummarize(verbose=False)
analysis = await summarizer.aget_response(
prompt, texts, tone_name=tone_name
)
await ctx.store.set("analysis", str(analysis))
reflection = ""
else:
# Retry: reuse analysis from context
analysis = await ctx.store.get("analysis")
if not analysis:
raise RuntimeError("Internal error: analysis not found in context")
wrong_output = ev.wrong_output
if len(wrong_output) > 2000:
wrong_output = wrong_output[:2000] + "... [truncated]"
reflection = (
f"\n\nYour previous response could not be parsed:\n{wrong_output}\n\n"
f"Error:\n{ev.error}\n\n"
"Please try again. Return ONLY valid JSON matching the schema above, "
"with no markdown formatting or extra text."
)
# Step 2: Format analysis as JSON using LLM completion
format_instructions = self.output_parser.format(
"Please structure the above information in the following JSON format:"
)
json_prompt = STRUCTURED_RESPONSE_PROMPT_TEMPLATE.format(
analysis=analysis,
format_instructions=format_instructions + reflection,
)
# Network retries handled by OpenAILike (max_retries=3)
response = await Settings.llm.acomplete(json_prompt)
return ExtractionDone(output=response.text)
@step
async def validate(
self, ctx: Context, ev: ExtractionDone
) -> StopEvent | ValidationErrorEvent:
"""Validate extracted output against Pydantic schema."""
raw_output = ev.output
retries = await ctx.store.get("retries", default=0)
try:
parsed = self.output_parser.parse(raw_output)
if retries > 1:
logger.info(
f"LLM parse succeeded on attempt {retries}/{self.max_retries} "
f"for {self.output_cls.__name__}"
)
return StopEvent(result={"success": parsed})
except (ValidationError, ValueError) as e:
error_msg = self._format_error(e, raw_output)
await ctx.store.set("last_error", error_msg)
logger.error(
f"LLM parse error (attempt {retries}/{self.max_retries}): "
f"{type(e).__name__}: {e}\nRaw response: {raw_output[:500]}"
)
return ValidationErrorEvent(
error=error_msg,
wrong_output=raw_output,
)
def _format_error(self, error: Exception, raw_output: str) -> str:
"""Format error for LLM feedback."""
if isinstance(error, ValidationError):
error_messages = []
for err in error.errors():
field = ".".join(str(loc) for loc in err["loc"])
error_messages.append(f"- {err['msg']} in field '{field}'")
return "Schema validation errors:\n" + "\n".join(error_messages)
else:
return f"Parse error: {str(error)}"
class LLM:
def __init__(self, settings, temperature: float = 0.4, max_tokens: int = 2048):
self.settings_obj = settings
@@ -30,11 +197,12 @@ class LLM:
self.temperature = temperature
self.max_tokens = max_tokens
# Configure llamaindex Settings
self._configure_llamaindex()
def _configure_llamaindex(self):
"""Configure llamaindex Settings with OpenAILike LLM"""
session_id = llm_session_id.get() or f"fallback-{uuid4().hex}"
Settings.llm = OpenAILike(
model=self.model_name,
api_base=self.url,
@@ -44,6 +212,7 @@ class LLM:
is_function_calling_model=False,
temperature=self.temperature,
max_tokens=self.max_tokens,
additional_kwargs={"extra_body": {"litellm_session_id": session_id}},
)
async def get_response(
@@ -61,43 +230,25 @@ class LLM:
output_cls: Type[T],
tone_name: str | None = None,
) -> T:
"""Get structured output from LLM for non-function-calling models"""
logger = logging.getLogger(__name__)
summarizer = TreeSummarize(verbose=True)
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
output_parser = PydanticOutputParser(output_cls)
program = LLMTextCompletionProgram.from_defaults(
output_parser=output_parser,
prompt_template_str=STRUCTURED_RESPONSE_PROMPT_TEMPLATE,
verbose=False,
"""Get structured output from LLM with validation retry via Workflow."""
workflow = StructuredOutputWorkflow(
output_cls=output_cls,
max_retries=self.settings_obj.LLM_PARSE_MAX_RETRIES + 1,
timeout=120,
)
format_instructions = output_parser.format(
"Please structure the above information in the following JSON format:"
result = await workflow.run(
prompt=prompt,
texts=texts,
tone_name=tone_name,
)
try:
output = await program.acall(
analysis=str(response), format_instructions=format_instructions
if "error" in result:
error_msg = result["error"] or "Max retries exceeded"
raise LLMParseError(
output_cls=output_cls,
error_msg=error_msg,
attempts=result.get("attempts", 0),
)
except ValidationError as e:
# Extract the raw JSON from the error details
errors = e.errors()
if errors and "input" in errors[0]:
raw_json = errors[0]["input"]
logger.error(
f"JSON validation failed for {output_cls.__name__}. "
f"Full raw JSON output:\n{raw_json}\n"
f"Validation errors: {errors}"
)
else:
logger.error(
f"JSON validation failed for {output_cls.__name__}. "
f"Validation errors: {errors}"
)
raise
return output
return result["success"]

View File

@@ -340,7 +340,6 @@ async def task_send_webhook_if_needed(*, transcript_id: str):
@asynctask
async def task_pipeline_file_process(*, transcript_id: str):
"""Celery task for file pipeline processing"""
transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript:
raise Exception(f"Transcript {transcript_id} not found")

View File

@@ -31,7 +31,6 @@ from reflector.processors import AudioFileWriterProcessor
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
from reflector.processors.types import TitleSummary
from reflector.processors.types import Transcript as TranscriptType
from reflector.settings import settings
from reflector.storage import Storage, get_transcripts_storage
from reflector.utils.daily import (
filter_cam_audio_tracks,
@@ -423,7 +422,15 @@ class PipelineMainMultitrack(PipelineMainBase):
# Open all containers with cleanup guaranteed
for i, url in enumerate(valid_track_urls):
try:
c = av.open(url)
c = av.open(
url,
options={
# it's trying to stream from s3 by default
"reconnect": "1",
"reconnect_streamed": "1",
"reconnect_delay_max": "5",
},
)
containers.append(c)
except Exception as e:
self.logger.warning(
@@ -452,6 +459,8 @@ class PipelineMainMultitrack(PipelineMainBase):
frame = next(dec)
except StopIteration:
active[i] = False
# causes stream to move on / unclogs memory
inputs[i].push(None)
continue
if frame.sample_rate != target_sample_rate:
@@ -471,8 +480,6 @@ class PipelineMainMultitrack(PipelineMainBase):
mixed.time_base = Fraction(1, target_sample_rate)
await writer.push(mixed)
for in_ctx in inputs:
in_ctx.push(None)
while True:
try:
mixed = sink.pull()
@@ -632,21 +639,11 @@ class PipelineMainMultitrack(PipelineMainBase):
transcript.data_path.mkdir(parents=True, exist_ok=True)
if settings.SKIP_MIXDOWN:
self.logger.warning(
"SKIP_MIXDOWN enabled: Skipping mixdown and waveform generation. "
"UI will have no audio playback or waveform.",
num_tracks=len(padded_track_urls),
transcript_id=transcript.id,
)
else:
mp3_writer = AudioFileWriterProcessor(
path=str(transcript.audio_mp3_filename),
on_duration=self.on_duration,
)
await self.mixdown_tracks(
padded_track_urls, mp3_writer, offsets_seconds=None
)
await self.mixdown_tracks(padded_track_urls, mp3_writer, offsets_seconds=None)
await mp3_writer.flush()
if not transcript.audio_mp3_filename.exists():
@@ -661,9 +658,7 @@ class PipelineMainMultitrack(PipelineMainBase):
await transcript_storage.put_file(storage_path, mp3_file)
mp3_url = await transcript_storage.get_file_url(storage_path)
await transcripts_controller.update(
transcript, {"audio_location": "storage"}
)
await transcripts_controller.update(transcript, {"audio_location": "storage"})
self.logger.info(
f"Uploaded mixed audio to storage",

View File

@@ -160,7 +160,10 @@ def dispatch_transcript_processing(config: ProcessingConfig) -> AsyncResult:
def task_is_scheduled_or_active(task_name: str, **kwargs):
inspect = celery.current_app.control.inspect()
for worker, tasks in (inspect.scheduled() | inspect.active()).items():
scheduled = inspect.scheduled() or {}
active = inspect.active() or {}
all = scheduled | active
for worker, tasks in all.items():
for task in tasks:
if task["name"] == task_name and task["kwargs"] == kwargs:
return True

View File

@@ -74,6 +74,10 @@ class Settings(BaseSettings):
LLM_API_KEY: str | None = None
LLM_CONTEXT_WINDOW: int = 16000
LLM_PARSE_MAX_RETRIES: int = (
3 # Max retries for JSON/validation errors (total attempts = retries + 1)
)
# Diarization
DIARIZATION_ENABLED: bool = True
DIARIZATION_BACKEND: str = "modal"
@@ -138,14 +142,6 @@ class Settings(BaseSettings):
DAILY_WEBHOOK_UUID: str | None = (
None # Webhook UUID for this environment. Not used by production code
)
# Multitrack processing
# SKIP_MIXDOWN: When True, skips audio mixdown and waveform generation.
# Transcription still works using individual tracks. Useful for:
# - Diagnosing OOM issues in mixdown
# - Fast processing when audio playback is not needed
# Note: UI will have no audio playback or waveform when enabled.
SKIP_MIXDOWN: bool = True
# Platform Configuration
DEFAULT_VIDEO_PLATFORM: Platform = WHEREBY_PLATFORM

View File

@@ -31,6 +31,7 @@ class DailyClient(VideoPlatformClient):
PLATFORM_NAME: Platform = "daily"
TIMESTAMP_FORMAT = "%Y%m%d%H%M%S"
RECORDING_NONE: RecordingType = "none"
RECORDING_LOCAL: RecordingType = "local"
RECORDING_CLOUD: RecordingType = "cloud"
def __init__(self, config: VideoPlatformConfig):
@@ -54,19 +55,23 @@ class DailyClient(VideoPlatformClient):
timestamp = datetime.now().strftime(self.TIMESTAMP_FORMAT)
room_name = f"{room_name_prefix}{ROOM_PREFIX_SEPARATOR}{timestamp}"
enable_recording = None
if room.recording_type == self.RECORDING_LOCAL:
enable_recording = "local"
elif room.recording_type == self.RECORDING_CLOUD:
enable_recording = "raw-tracks"
properties = RoomProperties(
enable_recording="raw-tracks"
if room.recording_type != self.RECORDING_NONE
else False,
enable_recording=enable_recording,
enable_chat=True,
enable_screenshare=True,
enable_knocking=room.is_locked,
start_video_off=False,
start_audio_off=False,
exp=int(end_date.timestamp()),
)
# Only configure recordings_bucket if recording is enabled
if room.recording_type != self.RECORDING_NONE:
if room.recording_type == self.RECORDING_CLOUD:
daily_storage = get_dailyco_storage()
assert daily_storage.bucket_name, "S3 bucket must be configured"
properties.recordings_bucket = RecordingsBucketConfig(
@@ -172,15 +177,16 @@ class DailyClient(VideoPlatformClient):
async def create_meeting_token(
self,
room_name: DailyRoomName,
enable_recording: bool,
start_cloud_recording: bool,
enable_recording_ui: bool,
user_id: NonEmptyString | None = None,
is_owner: bool = False,
) -> NonEmptyString:
properties = MeetingTokenProperties(
room_name=room_name,
user_id=user_id,
start_cloud_recording=enable_recording,
enable_recording_ui=False,
start_cloud_recording=start_cloud_recording,
enable_recording_ui=enable_recording_ui,
is_owner=is_owner,
)
request = CreateMeetingTokenRequest(properties=properties)

View File

@@ -89,7 +89,7 @@ class CreateRoom(BaseModel):
ics_url: Optional[str] = None
ics_fetch_interval: int = 300
ics_enabled: bool = False
platform: Optional[Platform] = None
platform: Platform
class UpdateRoom(BaseModel):
@@ -248,7 +248,7 @@ async def rooms_create(
ics_url=room.ics_url,
ics_fetch_interval=room.ics_fetch_interval,
ics_enabled=room.ics_enabled,
platform=room.platform or settings.DEFAULT_VIDEO_PLATFORM,
platform=room.platform,
)
@@ -310,6 +310,22 @@ async def rooms_create_meeting(
room=room, current_time=current_time
)
if meeting is not None:
settings_match = (
meeting.is_locked == room.is_locked
and meeting.room_mode == room.room_mode
and meeting.recording_type == room.recording_type
and meeting.recording_trigger == room.recording_trigger
and meeting.platform == room.platform
)
if not settings_match:
logger.info(
f"Room settings changed for {room_name}, creating new meeting",
room_id=room.id,
old_meeting_id=meeting.id,
)
meeting = None
if meeting is None:
end_date = current_time + timedelta(hours=8)
@@ -549,21 +565,16 @@ async def rooms_join_meeting(
if meeting.end_date <= current_time:
raise HTTPException(status_code=400, detail="Meeting has ended")
if meeting.platform == "daily":
if meeting.platform == "daily" and user_id is not None:
client = create_platform_client(meeting.platform)
enable_recording = room.recording_trigger != "none"
token = await client.create_meeting_token(
meeting.room_name,
enable_recording=enable_recording,
start_cloud_recording=meeting.recording_type == "cloud",
enable_recording_ui=meeting.recording_type == "local",
user_id=user_id,
is_owner=user_id == room.user_id,
)
meeting = meeting.model_copy()
meeting.room_url = add_query_param(meeting.room_url, "t", token)
if meeting.host_room_url:
meeting.host_room_url = add_query_param(meeting.host_room_url, "t", token)
if user_id != room.user_id and meeting.platform == "whereby":
meeting.host_room_url = ""
return meeting

View File

@@ -37,6 +37,7 @@ from reflector.db.transcripts import (
TranscriptTopic,
transcripts_controller,
)
from reflector.db.users import user_controller
from reflector.processors.types import Transcript as ProcessorTranscript
from reflector.processors.types import Word
from reflector.schemas.transcript_formats import TranscriptFormat, TranscriptSegment
@@ -111,8 +112,12 @@ class GetTranscriptMinimal(BaseModel):
audio_deleted: bool | None = None
class TranscriptParticipantWithEmail(TranscriptParticipant):
email: str | None = None
class GetTranscriptWithParticipants(GetTranscriptMinimal):
participants: list[TranscriptParticipant] | None
participants: list[TranscriptParticipantWithEmail] | None
class GetTranscriptWithText(GetTranscriptWithParticipants):
@@ -468,6 +473,18 @@ async def transcript_get(
is_multitrack = await _get_is_multitrack(transcript)
participants = []
if transcript.participants:
user_ids = [p.user_id for p in transcript.participants if p.user_id is not None]
users_dict = await user_controller.get_by_ids(user_ids) if user_ids else {}
for p in transcript.participants:
user = users_dict.get(p.user_id) if p.user_id else None
participants.append(
TranscriptParticipantWithEmail(
**p.model_dump(), email=user.email if user else None
)
)
base_data = {
"id": transcript.id,
"user_id": transcript.user_id,
@@ -487,7 +504,7 @@ async def transcript_get(
"source_kind": transcript.source_kind,
"room_id": transcript.room_id,
"audio_deleted": transcript.audio_deleted,
"participants": transcript.participants,
"participants": participants,
}
if transcript_format == "text":

View File

@@ -318,6 +318,14 @@ async def dummy_storage():
yield
@pytest.fixture
def test_settings():
"""Provide isolated settings for tests to avoid modifying global settings"""
from reflector.settings import Settings
return Settings()
@pytest.fixture(scope="session")
def celery_enable_logging():
return True

View File

@@ -0,0 +1,357 @@
"""Tests for LLM parse error recovery using llama-index Workflow"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from pydantic import BaseModel, Field
from workflows.errors import WorkflowRuntimeError
from reflector.llm import LLM, LLMParseError, StructuredOutputWorkflow
class TestResponse(BaseModel):
"""Test response model for structured output"""
title: str = Field(description="A title")
summary: str = Field(description="A summary")
confidence: float = Field(description="Confidence score", ge=0, le=1)
def make_completion_response(text: str):
"""Create a mock CompletionResponse with .text attribute"""
response = MagicMock()
response.text = text
return response
class TestLLMParseErrorRecovery:
"""Test parse error recovery with Workflow feedback loop"""
@pytest.mark.asyncio
async def test_parse_error_recovery_with_feedback(self, test_settings):
"""Test that parse errors trigger retry with error feedback"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
# TreeSummarize returns plain text analysis (step 1)
mock_summarizer.aget_response = AsyncMock(
return_value="The analysis shows a test with summary and high confidence."
)
call_count = {"count": 0}
async def acomplete_handler(prompt, *args, **kwargs):
call_count["count"] += 1
if call_count["count"] == 1:
# First JSON formatting call returns invalid JSON
return make_completion_response('{"title": "Test"}')
else:
# Second call should have error feedback in prompt
assert "Your previous response could not be parsed:" in prompt
assert '{"title": "Test"}' in prompt
assert "Error:" in prompt
assert "Please try again" in prompt
return make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
)
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
result = await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
assert result.title == "Test"
assert result.summary == "Summary"
assert result.confidence == 0.95
# TreeSummarize called once, Settings.llm.acomplete called twice
assert mock_summarizer.aget_response.call_count == 1
assert call_count["count"] == 2
@pytest.mark.asyncio
async def test_max_parse_retry_attempts(self, test_settings):
"""Test that parse error retry stops after max attempts"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
# Always return invalid JSON from acomplete
mock_settings.llm.acomplete = AsyncMock(
return_value=make_completion_response(
'{"invalid": "missing required fields"}'
)
)
with pytest.raises(LLMParseError, match="Failed to parse"):
await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
expected_attempts = test_settings.LLM_PARSE_MAX_RETRIES + 1
# TreeSummarize called once, acomplete called max_retries times
assert mock_summarizer.aget_response.call_count == 1
assert mock_settings.llm.acomplete.call_count == expected_attempts
@pytest.mark.asyncio
async def test_raw_response_logging_on_parse_error(self, test_settings, caplog):
"""Test that raw response is logged when parse error occurs"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
caplog.at_level("ERROR"),
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
call_count = {"count": 0}
async def acomplete_handler(*args, **kwargs):
call_count["count"] += 1
if call_count["count"] == 1:
return make_completion_response('{"title": "Test"}') # Invalid
return make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
)
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
result = await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
assert result.title == "Test"
error_logs = [r for r in caplog.records if r.levelname == "ERROR"]
raw_response_logged = any("Raw response:" in r.message for r in error_logs)
assert raw_response_logged, "Raw response should be logged on parse error"
@pytest.mark.asyncio
async def test_multiple_validation_errors_in_feedback(self, test_settings):
"""Test that validation errors are included in feedback"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
call_count = {"count": 0}
async def acomplete_handler(prompt, *args, **kwargs):
call_count["count"] += 1
if call_count["count"] == 1:
# Missing title and summary
return make_completion_response('{"confidence": 0.5}')
else:
# Should have schema validation errors in prompt
assert (
"Schema validation errors" in prompt
or "error" in prompt.lower()
)
return make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
)
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
result = await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
assert result.title == "Test"
assert call_count["count"] == 2
@pytest.mark.asyncio
async def test_success_on_first_attempt(self, test_settings):
"""Test that no retry happens when first attempt succeeds"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
mock_settings.llm.acomplete = AsyncMock(
return_value=make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
)
)
result = await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
assert result.title == "Test"
assert result.summary == "Summary"
assert result.confidence == 0.95
assert mock_summarizer.aget_response.call_count == 1
assert mock_settings.llm.acomplete.call_count == 1
class TestStructuredOutputWorkflow:
"""Direct tests for the StructuredOutputWorkflow"""
@pytest.mark.asyncio
async def test_workflow_retries_on_validation_error(self):
"""Test workflow retries when validation fails"""
workflow = StructuredOutputWorkflow(
output_cls=TestResponse,
max_retries=3,
timeout=30,
)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
call_count = {"count": 0}
async def acomplete_handler(*args, **kwargs):
call_count["count"] += 1
if call_count["count"] < 2:
return make_completion_response('{"title": "Only title"}')
return make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.9}'
)
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
result = await workflow.run(
prompt="Extract data",
texts=["Some text"],
tone_name=None,
)
assert "success" in result
assert result["success"].title == "Test"
assert call_count["count"] == 2
@pytest.mark.asyncio
async def test_workflow_returns_error_after_max_retries(self):
"""Test workflow returns error after exhausting retries"""
workflow = StructuredOutputWorkflow(
output_cls=TestResponse,
max_retries=2,
timeout=30,
)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
# Always return invalid JSON
mock_settings.llm.acomplete = AsyncMock(
return_value=make_completion_response('{"invalid": true}')
)
result = await workflow.run(
prompt="Extract data",
texts=["Some text"],
tone_name=None,
)
assert "error" in result
# TreeSummarize called once, acomplete called max_retries times
assert mock_summarizer.aget_response.call_count == 1
assert mock_settings.llm.acomplete.call_count == 2
class TestNetworkErrorRetries:
"""Test that network error retries are handled by OpenAILike, not Workflow"""
@pytest.mark.asyncio
async def test_network_error_propagates_after_openai_retries(self, test_settings):
"""Test that network errors are retried by OpenAILike and then propagate.
Network retries are handled by OpenAILike (max_retries=3), not by our
StructuredOutputWorkflow. This test verifies that network errors propagate
up after OpenAILike exhausts its retries.
"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
# Simulate network error from acomplete (after OpenAILike retries exhausted)
network_error = ConnectionError("Connection refused")
mock_settings.llm.acomplete = AsyncMock(side_effect=network_error)
# Network error wrapped in WorkflowRuntimeError
with pytest.raises(WorkflowRuntimeError, match="Connection refused"):
await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
# acomplete called only once - network error propagates, not retried by Workflow
assert mock_settings.llm.acomplete.call_count == 1
@pytest.mark.asyncio
async def test_network_error_not_retried_by_workflow(self, test_settings):
"""Test that Workflow does NOT retry network errors (OpenAILike handles those).
This verifies the separation of concerns:
- StructuredOutputWorkflow: retries parse/validation errors
- OpenAILike: retries network errors (internally, max_retries=3)
"""
workflow = StructuredOutputWorkflow(
output_cls=TestResponse,
max_retries=3,
timeout=30,
)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
# Network error should propagate immediately, not trigger Workflow retry
mock_settings.llm.acomplete = AsyncMock(
side_effect=TimeoutError("Request timed out")
)
# Network error wrapped in WorkflowRuntimeError
with pytest.raises(WorkflowRuntimeError, match="Request timed out"):
await workflow.run(
prompt="Extract data",
texts=["Some text"],
tone_name=None,
)
# Only called once - Workflow doesn't retry network errors
assert mock_settings.llm.acomplete.call_count == 1

View File

@@ -15,9 +15,12 @@ import {
createListCollection,
useDisclosure,
Tabs,
Popover,
Text,
HStack,
} from "@chakra-ui/react";
import { useEffect, useMemo, useState } from "react";
import { LuEye, LuEyeOff } from "react-icons/lu";
import { LuEye, LuEyeOff, LuInfo } from "react-icons/lu";
import useRoomList from "./useRoomList";
import type { components } from "../../reflector-api";
import {
@@ -67,6 +70,11 @@ const recordingTypeOptions: SelectOption[] = [
{ label: "Cloud", value: "cloud" },
];
const platformOptions: SelectOption[] = [
{ label: "Whereby", value: "whereby" },
{ label: "Daily", value: "daily" },
];
const roomInitialState = {
name: "",
zulipAutoPost: false,
@@ -82,6 +90,7 @@ const roomInitialState = {
icsUrl: "",
icsEnabled: false,
icsFetchInterval: 5,
platform: "whereby",
};
export default function RoomsList() {
@@ -99,6 +108,11 @@ export default function RoomsList() {
const recordingTypeCollection = createListCollection({
items: recordingTypeOptions,
});
const platformCollection = createListCollection({
items: platformOptions,
});
const [roomInput, setRoomInput] = useState<null | typeof roomInitialState>(
null,
);
@@ -143,15 +157,24 @@ export default function RoomsList() {
zulipStream: detailedEditedRoom.zulip_stream,
zulipTopic: detailedEditedRoom.zulip_topic,
isLocked: detailedEditedRoom.is_locked,
roomMode: detailedEditedRoom.room_mode,
roomMode:
detailedEditedRoom.platform === "daily"
? "group"
: detailedEditedRoom.room_mode,
recordingType: detailedEditedRoom.recording_type,
recordingTrigger: detailedEditedRoom.recording_trigger,
recordingTrigger:
detailedEditedRoom.platform === "daily"
? detailedEditedRoom.recording_type === "cloud"
? "automatic-2nd-participant"
: "none"
: detailedEditedRoom.recording_trigger,
isShared: detailedEditedRoom.is_shared,
webhookUrl: detailedEditedRoom.webhook_url || "",
webhookSecret: detailedEditedRoom.webhook_secret || "",
icsUrl: detailedEditedRoom.ics_url || "",
icsEnabled: detailedEditedRoom.ics_enabled || false,
icsFetchInterval: detailedEditedRoom.ics_fetch_interval || 5,
platform: detailedEditedRoom.platform,
}
: null,
[detailedEditedRoom],
@@ -277,21 +300,32 @@ export default function RoomsList() {
return;
}
const platform: "whereby" | "daily" | null =
room.platform === "whereby" || room.platform === "daily"
? room.platform
: null;
const roomData = {
name: room.name,
zulip_auto_post: room.zulipAutoPost,
zulip_stream: room.zulipStream,
zulip_topic: room.zulipTopic,
is_locked: room.isLocked,
room_mode: room.roomMode,
room_mode: platform === "daily" ? "group" : room.roomMode,
recording_type: room.recordingType,
recording_trigger: room.recordingTrigger,
recording_trigger:
platform === "daily"
? room.recordingType === "cloud"
? "automatic-2nd-participant"
: "none"
: room.recordingTrigger,
is_shared: room.isShared,
webhook_url: room.webhookUrl,
webhook_secret: room.webhookSecret,
ics_url: room.icsUrl,
ics_enabled: room.icsEnabled,
ics_fetch_interval: room.icsFetchInterval,
platform,
};
if (isEditing) {
@@ -339,15 +373,21 @@ export default function RoomsList() {
zulipStream: roomData.zulip_stream,
zulipTopic: roomData.zulip_topic,
isLocked: roomData.is_locked,
roomMode: roomData.room_mode,
roomMode: roomData.platform === "daily" ? "group" : roomData.room_mode, // Daily always uses 2-200
recordingType: roomData.recording_type,
recordingTrigger: roomData.recording_trigger,
recordingTrigger:
roomData.platform === "daily"
? roomData.recording_type === "cloud"
? "automatic-2nd-participant"
: "none"
: roomData.recording_trigger,
isShared: roomData.is_shared,
webhookUrl: roomData.webhook_url || "",
webhookSecret: roomData.webhook_secret || "",
icsUrl: roomData.ics_url || "",
icsEnabled: roomData.ics_enabled || false,
icsFetchInterval: roomData.ics_fetch_interval || 5,
platform: roomData.platform,
});
setEditRoomId(roomId);
setIsEditing(true);
@@ -482,6 +522,52 @@ export default function RoomsList() {
)}
</Field.Root>
<Field.Root mt={4}>
<Field.Label>Platform</Field.Label>
<Select.Root
value={[room.platform]}
onValueChange={(e) => {
const newPlatform = e.value[0] as "whereby" | "daily";
const updates: Partial<typeof room> = {
platform: newPlatform,
};
if (newPlatform === "daily") {
updates.roomMode = "group";
updates.recordingTrigger =
room.recordingType === "cloud"
? "automatic-2nd-participant"
: "none";
} else {
if (room.recordingType !== "cloud") {
updates.recordingTrigger = "none";
}
}
setRoomInput({ ...room, ...updates });
}}
collection={platformCollection}
>
<Select.HiddenSelect />
<Select.Control>
<Select.Trigger>
<Select.ValueText placeholder="Select platform" />
</Select.Trigger>
<Select.IndicatorGroup>
<Select.Indicator />
</Select.IndicatorGroup>
</Select.Control>
<Select.Positioner>
<Select.Content>
{platformOptions.map((option) => (
<Select.Item key={option.value} item={option}>
{option.label}
<Select.ItemIndicator />
</Select.Item>
))}
</Select.Content>
</Select.Positioner>
</Select.Root>
</Field.Root>
<Field.Root mt={4}>
<Checkbox.Root
name="isLocked"
@@ -504,6 +590,7 @@ export default function RoomsList() {
<Checkbox.Label>Locked room</Checkbox.Label>
</Checkbox.Root>
</Field.Root>
{room.platform !== "daily" && (
<Field.Root mt={4}>
<Field.Label>Room size</Field.Label>
<Select.Root
@@ -534,20 +621,64 @@ export default function RoomsList() {
</Select.Positioner>
</Select.Root>
</Field.Root>
)}
<Field.Root mt={4}>
<HStack gap={2} alignItems="center">
<Field.Label>Recording type</Field.Label>
<Popover.Root>
<Popover.Trigger asChild>
<IconButton
aria-label="Recording type help"
variant="ghost"
size="xs"
colorPalette="gray"
>
<LuInfo />
</IconButton>
</Popover.Trigger>
<Popover.Positioner>
<Popover.Content>
<Popover.Arrow />
<Popover.Body>
<Text fontSize="sm" lineHeight="1.6">
<strong>None:</strong> No recording will be
created.
<br />
<br />
<strong>Local:</strong> Recording happens on
each participant's device. Files are saved
locally.
<br />
<br />
<strong>Cloud:</strong> Recording happens on
the platform's servers and is available after
the meeting ends.
</Text>
</Popover.Body>
</Popover.Content>
</Popover.Positioner>
</Popover.Root>
</HStack>
<Select.Root
value={[room.recordingType]}
onValueChange={(e) =>
setRoomInput({
...room,
recordingType: e.value[0],
recordingTrigger:
e.value[0] !== "cloud"
onValueChange={(e) => {
const newRecordingType = e.value[0];
const updates: Partial<typeof room> = {
recordingType: newRecordingType,
};
if (room.platform === "daily") {
updates.recordingTrigger =
newRecordingType === "cloud"
? "automatic-2nd-participant"
: "none";
} else {
updates.recordingTrigger =
newRecordingType !== "cloud"
? "none"
: room.recordingTrigger,
})
: room.recordingTrigger;
}
setRoomInput({ ...room, ...updates });
}}
collection={recordingTypeCollection}
>
<Select.HiddenSelect />
@@ -571,8 +702,45 @@ export default function RoomsList() {
</Select.Positioner>
</Select.Root>
</Field.Root>
{room.recordingType === "cloud" &&
room.platform !== "daily" && (
<Field.Root mt={4}>
<Field.Label>Cloud recording start trigger</Field.Label>
<HStack gap={2} alignItems="center">
<Field.Label>Recording start trigger</Field.Label>
<Popover.Root>
<Popover.Trigger asChild>
<IconButton
aria-label="Recording start trigger help"
variant="ghost"
size="xs"
colorPalette="gray"
>
<LuInfo />
</IconButton>
</Popover.Trigger>
<Popover.Positioner>
<Popover.Content>
<Popover.Arrow />
<Popover.Body>
<Text fontSize="sm" lineHeight="1.6">
<strong>None:</strong> Recording must be
started manually by a participant.
<br />
<br />
<strong>Prompt:</strong> Participants will
be prompted to start recording when they
join.
<br />
<br />
<strong>Automatic:</strong> Recording
starts automatically when a second
participant joins.
</Text>
</Popover.Body>
</Popover.Content>
</Popover.Positioner>
</Popover.Root>
</HStack>
<Select.Root
value={[room.recordingTrigger]}
onValueChange={(e) =>
@@ -582,7 +750,6 @@ export default function RoomsList() {
})
}
collection={recordingTriggerCollection}
disabled={room.recordingType !== "cloud"}
>
<Select.HiddenSelect />
<Select.Control>
@@ -605,6 +772,7 @@ export default function RoomsList() {
</Select.Positioner>
</Select.Root>
</Field.Root>
)}
<Field.Root mt={4}>
<Checkbox.Root

View File

@@ -52,7 +52,7 @@ export default function DailyRoom({ meeting }: DailyRoomProps) {
join();
}, [meeting?.id, roomName, authLastUserId]);
const roomUrl = joinedMeeting?.host_room_url || joinedMeeting?.room_url;
const roomUrl = joinedMeeting?.room_url;
const handleLeave = useCallback(() => {
router.push("/browse");
@@ -89,14 +89,17 @@ export default function DailyRoom({ meeting }: DailyRoomProps) {
frame.on("left-meeting", handleLeave);
// TODO this method must not ignore no-recording option
// TODO this method is here to make dev environments work in some cases (not examined which)
frame.on("joined-meeting", async () => {
try {
assertExists(
const frameInstance = assertExists(
frame,
"frame object got lost somewhere after frame.on was called",
).startRecording({ type: "raw-tracks" });
);
if (meeting.recording_type === "cloud") {
console.log("Starting cloud recording");
await frameInstance.startRecording({ type: "raw-tracks" });
}
} catch (error) {
console.error("Failed to start recording:", error);
}

View File

@@ -31,7 +31,7 @@
"ioredis": "^5.7.0",
"jest-worker": "^29.6.2",
"lucide-react": "^0.525.0",
"next": "^15.5.3",
"next": "^15.5.7",
"next-auth": "^4.24.7",
"next-themes": "^0.4.6",
"nuqs": "^2.4.3",

100
www/pnpm-lock.yaml generated
View File

@@ -27,7 +27,7 @@ importers:
version: 0.2.3(@fortawesome/fontawesome-svg-core@6.7.2)(react@18.3.1)
"@sentry/nextjs":
specifier: ^10.11.0
version: 10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)
version: 10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)
"@tanstack/react-query":
specifier: ^5.85.9
version: 5.85.9(react@18.3.1)
@@ -62,17 +62,17 @@ importers:
specifier: ^0.525.0
version: 0.525.0(react@18.3.1)
next:
specifier: ^15.5.3
version: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
specifier: ^15.5.7
version: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
next-auth:
specifier: ^4.24.7
version: 4.24.11(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
version: 4.24.11(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
next-themes:
specifier: ^0.4.6
version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
nuqs:
specifier: ^2.4.3
version: 2.4.3(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)
version: 2.4.3(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)
openapi-fetch:
specifier: ^0.14.0
version: 0.14.0
@@ -1184,10 +1184,10 @@ packages:
integrity: sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==,
}
"@next/env@15.5.3":
"@next/env@15.5.7":
resolution:
{
integrity: sha512-RSEDTRqyihYXygx/OJXwvVupfr9m04+0vH8vyy0HfZ7keRto6VX9BbEk0J2PUk0VGy6YhklJUSrgForov5F9pw==,
integrity: sha512-4h6Y2NyEkIEN7Z8YxkA27pq6zTkS09bUSYC0xjd0NpwFxjnIKeZEeH591o5WECSmjpUhLn3H2QLJcDye3Uzcvg==,
}
"@next/eslint-plugin-next@15.5.3":
@@ -1196,73 +1196,73 @@ packages:
integrity: sha512-SdhaKdko6dpsSr0DldkESItVrnPYB1NS2NpShCSX5lc7SSQmLZt5Mug6t2xbiuVWEVDLZSuIAoQyYVBYp0dR5g==,
}
"@next/swc-darwin-arm64@15.5.3":
"@next/swc-darwin-arm64@15.5.7":
resolution:
{
integrity: sha512-nzbHQo69+au9wJkGKTU9lP7PXv0d1J5ljFpvb+LnEomLtSbJkbZyEs6sbF3plQmiOB2l9OBtN2tNSvCH1nQ9Jg==,
integrity: sha512-IZwtxCEpI91HVU/rAUOOobWSZv4P2DeTtNaCdHqLcTJU4wdNXgAySvKa/qJCgR5m6KI8UsKDXtO2B31jcaw1Yw==,
}
engines: { node: ">= 10" }
cpu: [arm64]
os: [darwin]
"@next/swc-darwin-x64@15.5.3":
"@next/swc-darwin-x64@15.5.7":
resolution:
{
integrity: sha512-w83w4SkOOhekJOcA5HBvHyGzgV1W/XvOfpkrxIse4uPWhYTTRwtGEM4v/jiXwNSJvfRvah0H8/uTLBKRXlef8g==,
integrity: sha512-UP6CaDBcqaCBuiq/gfCEJw7sPEoX1aIjZHnBWN9v9qYHQdMKvCKcAVs4OX1vIjeE+tC5EIuwDTVIoXpUes29lg==,
}
engines: { node: ">= 10" }
cpu: [x64]
os: [darwin]
"@next/swc-linux-arm64-gnu@15.5.3":
"@next/swc-linux-arm64-gnu@15.5.7":
resolution:
{
integrity: sha512-+m7pfIs0/yvgVu26ieaKrifV8C8yiLe7jVp9SpcIzg7XmyyNE7toC1fy5IOQozmr6kWl/JONC51osih2RyoXRw==,
integrity: sha512-NCslw3GrNIw7OgmRBxHtdWFQYhexoUCq+0oS2ccjyYLtcn1SzGzeM54jpTFonIMUjNbHmpKpziXnpxhSWLcmBA==,
}
engines: { node: ">= 10" }
cpu: [arm64]
os: [linux]
"@next/swc-linux-arm64-musl@15.5.3":
"@next/swc-linux-arm64-musl@15.5.7":
resolution:
{
integrity: sha512-u3PEIzuguSenoZviZJahNLgCexGFhso5mxWCrrIMdvpZn6lkME5vc/ADZG8UUk5K1uWRy4hqSFECrON6UKQBbQ==,
integrity: sha512-nfymt+SE5cvtTrG9u1wdoxBr9bVB7mtKTcj0ltRn6gkP/2Nu1zM5ei8rwP9qKQP0Y//umK+TtkKgNtfboBxRrw==,
}
engines: { node: ">= 10" }
cpu: [arm64]
os: [linux]
"@next/swc-linux-x64-gnu@15.5.3":
"@next/swc-linux-x64-gnu@15.5.7":
resolution:
{
integrity: sha512-lDtOOScYDZxI2BENN9m0pfVPJDSuUkAD1YXSvlJF0DKwZt0WlA7T7o3wrcEr4Q+iHYGzEaVuZcsIbCps4K27sA==,
integrity: sha512-hvXcZvCaaEbCZcVzcY7E1uXN9xWZfFvkNHwbe/n4OkRhFWrs1J1QV+4U1BN06tXLdaS4DazEGXwgqnu/VMcmqw==,
}
engines: { node: ">= 10" }
cpu: [x64]
os: [linux]
"@next/swc-linux-x64-musl@15.5.3":
"@next/swc-linux-x64-musl@15.5.7":
resolution:
{
integrity: sha512-9vWVUnsx9PrY2NwdVRJ4dUURAQ8Su0sLRPqcCCxtX5zIQUBES12eRVHq6b70bbfaVaxIDGJN2afHui0eDm+cLg==,
integrity: sha512-4IUO539b8FmF0odY6/SqANJdgwn1xs1GkPO5doZugwZ3ETF6JUdckk7RGmsfSf7ws8Qb2YB5It33mvNL/0acqA==,
}
engines: { node: ">= 10" }
cpu: [x64]
os: [linux]
"@next/swc-win32-arm64-msvc@15.5.3":
"@next/swc-win32-arm64-msvc@15.5.7":
resolution:
{
integrity: sha512-1CU20FZzY9LFQigRi6jM45oJMU3KziA5/sSG+dXeVaTm661snQP6xu3ykGxxwU5sLG3sh14teO/IOEPVsQMRfA==,
integrity: sha512-CpJVTkYI3ZajQkC5vajM7/ApKJUOlm6uP4BknM3XKvJ7VXAvCqSjSLmM0LKdYzn6nBJVSjdclx8nYJSa3xlTgQ==,
}
engines: { node: ">= 10" }
cpu: [arm64]
os: [win32]
"@next/swc-win32-x64-msvc@15.5.3":
"@next/swc-win32-x64-msvc@15.5.7":
resolution:
{
integrity: sha512-JMoLAq3n3y5tKXPQwCK5c+6tmwkuFDa2XAxz8Wm4+IVthdBZdZGh+lmiLUHg9f9IDwIQpUjp+ysd6OkYTyZRZw==,
integrity: sha512-gMzgBX164I6DN+9/PGA+9dQiwmTkE4TloBNx8Kv9UiGARsr9Nba7IpcBRA1iTV9vwlYnrE3Uy6I7Aj6qLjQuqw==,
}
engines: { node: ">= 10" }
cpu: [x64]
@@ -6863,10 +6863,10 @@ packages:
react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
next@15.5.3:
next@15.5.7:
resolution:
{
integrity: sha512-r/liNAx16SQj4D+XH/oI1dlpv9tdKJ6cONYPwwcCC46f2NjpaRWY+EKCzULfgQYV6YKXjHBchff2IZBSlZmJNw==,
integrity: sha512-+t2/0jIJ48kUpGKkdlhgkv+zPTEOoXyr60qXe68eB/pl3CMJaLeIGjzp5D6Oqt25hCBiBTt8wEeeAzfJvUKnPQ==,
}
engines: { node: ^18.18.0 || ^19.8.0 || >= 20.0.0 }
hasBin: true
@@ -9877,34 +9877,34 @@ snapshots:
"@tybys/wasm-util": 0.10.0
optional: true
"@next/env@15.5.3": {}
"@next/env@15.5.7": {}
"@next/eslint-plugin-next@15.5.3":
dependencies:
fast-glob: 3.3.1
"@next/swc-darwin-arm64@15.5.3":
"@next/swc-darwin-arm64@15.5.7":
optional: true
"@next/swc-darwin-x64@15.5.3":
"@next/swc-darwin-x64@15.5.7":
optional: true
"@next/swc-linux-arm64-gnu@15.5.3":
"@next/swc-linux-arm64-gnu@15.5.7":
optional: true
"@next/swc-linux-arm64-musl@15.5.3":
"@next/swc-linux-arm64-musl@15.5.7":
optional: true
"@next/swc-linux-x64-gnu@15.5.3":
"@next/swc-linux-x64-gnu@15.5.7":
optional: true
"@next/swc-linux-x64-musl@15.5.3":
"@next/swc-linux-x64-musl@15.5.7":
optional: true
"@next/swc-win32-arm64-msvc@15.5.3":
"@next/swc-win32-arm64-msvc@15.5.7":
optional: true
"@next/swc-win32-x64-msvc@15.5.3":
"@next/swc-win32-x64-msvc@15.5.7":
optional: true
"@nodelib/fs.scandir@2.1.5":
@@ -10684,7 +10684,7 @@ snapshots:
"@sentry/core@8.55.0": {}
"@sentry/nextjs@10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)":
"@sentry/nextjs@10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)":
dependencies:
"@opentelemetry/api": 1.9.0
"@opentelemetry/semantic-conventions": 1.37.0
@@ -10698,7 +10698,7 @@ snapshots:
"@sentry/vercel-edge": 10.11.0
"@sentry/webpack-plugin": 4.3.0(webpack@5.101.3)
chalk: 3.0.0
next: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
next: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
resolve: 1.22.8
rollup: 4.50.1
stacktrace-parser: 0.1.11
@@ -14093,13 +14093,13 @@ snapshots:
neo-async@2.6.2: {}
next-auth@4.24.11(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
next-auth@4.24.11(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
"@babel/runtime": 7.28.2
"@panva/hkdf": 1.2.1
cookie: 0.7.2
jose: 4.15.9
next: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
next: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
oauth: 0.9.15
openid-client: 5.7.1
preact: 10.27.0
@@ -14113,9 +14113,9 @@ snapshots:
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0):
next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0):
dependencies:
"@next/env": 15.5.3
"@next/env": 15.5.7
"@swc/helpers": 0.5.15
caniuse-lite: 1.0.30001734
postcss: 8.4.31
@@ -14123,14 +14123,14 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
styled-jsx: 5.1.6(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react@18.3.1)
optionalDependencies:
"@next/swc-darwin-arm64": 15.5.3
"@next/swc-darwin-x64": 15.5.3
"@next/swc-linux-arm64-gnu": 15.5.3
"@next/swc-linux-arm64-musl": 15.5.3
"@next/swc-linux-x64-gnu": 15.5.3
"@next/swc-linux-x64-musl": 15.5.3
"@next/swc-win32-arm64-msvc": 15.5.3
"@next/swc-win32-x64-msvc": 15.5.3
"@next/swc-darwin-arm64": 15.5.7
"@next/swc-darwin-x64": 15.5.7
"@next/swc-linux-arm64-gnu": 15.5.7
"@next/swc-linux-arm64-musl": 15.5.7
"@next/swc-linux-x64-gnu": 15.5.7
"@next/swc-linux-x64-musl": 15.5.7
"@next/swc-win32-arm64-msvc": 15.5.7
"@next/swc-win32-x64-msvc": 15.5.7
"@opentelemetry/api": 1.9.0
sass: 1.90.0
sharp: 0.34.3
@@ -14159,12 +14159,12 @@ snapshots:
dependencies:
path-key: 3.1.1
nuqs@2.4.3(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1):
nuqs@2.4.3(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1):
dependencies:
mitt: 3.0.1
react: 18.3.1
optionalDependencies:
next: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
next: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
oauth@0.9.15: {}