Compare commits

..

12 Commits

Author SHA1 Message Date
a47a5f5781 chore(main): release 0.23.1 (#784) 2025-12-11 12:43:25 +01:00
0eba147018 fix: populate room_name in transcript GET endpoint (#783)
Fixes monadical/internalai#14
2025-12-11 12:37:59 +01:00
18a27f7b45 Fix image tags (#781) 2025-12-10 13:57:13 -05:00
32a049c134 chore(main): release 0.23.0 (#770) 2025-12-10 13:42:28 +01:00
91650ec65f fix: deploy frontend to coolify (#779)
* Ignore act secrets

* Deploy frontend container to ECR

* Use published image

* Remove ecr workflows

* Trigger coolify deployment

* Deploy on release please pr merge

* Upgrade nextjs

* Update secrets example
2025-12-10 13:35:53 +01:00
Igor Monadical
61f0e29d4c feat: llm retries (#739)
* llm retries no-mistakes

* self-review (no-mistakes)

* self-review (no-mistakes)

* bigger retry intervals by default

* tests and dry

* restore to main state

* parse retries

* json retries (no-mistakes)

* json retries (no-mistakes)

* json retries (no-mistakes)

* json retries (no-mistakes) self-review

* additional network retry test

* more lindt

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2025-12-05 12:08:21 -05:00
Igor Monadical
ec17ed7b58 fix: celery inspect bug sidestep in restart script (#766)
* celery bug sidestep

* Update server/reflector/services/transcript_process.py

Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com>

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
Co-authored-by: pr-agent-monadical[bot] <198624643+pr-agent-monadical[bot]@users.noreply.github.com>
2025-12-04 09:22:51 -05:00
Igor Monadical
00549f153a feat: dockerhub ci (#772)
* dockerhub ci

* ci test

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2025-12-03 13:26:08 -05:00
3ad78be762 fix: hide rooms settings instead of disabling (#763)
* Hide rooms settings instead of disabling

* Reset recording trigger
2025-12-03 16:49:17 +01:00
d3a5cd12d2 fix: return participant emails from transcript endpoint (#769)
* Return participant emails from transcript endpoint

* Fix broken test
2025-12-03 16:47:56 +01:00
af921ce927 chore(main): release 0.22.4 (#765) 2025-12-02 17:11:48 -05:00
Igor Monadical
bd5df1ce2e fix: Multitrack mixdown optimisation 2 (#764)
* Revert "fix: Skip mixdown for multitrack (#760)"

This reverts commit b51b7aa917.

* multitrack mixdown optimisation

* return the "good" ui part of "skip mixdown"

---------

Co-authored-by: Igor Loskutov <igor.loskutoff@gmail.com>
2025-12-02 17:10:06 -05:00
22 changed files with 1048 additions and 333 deletions

View File

@@ -1,90 +0,0 @@
name: Build container/push to container registry
on: [workflow_dispatch]
env:
# 950402358378.dkr.ecr.us-east-1.amazonaws.com/reflector
AWS_REGION: us-east-1
ECR_REPOSITORY: reflector
jobs:
build:
strategy:
matrix:
include:
- platform: linux/amd64
runner: linux-amd64
arch: amd64
- platform: linux/arm64
runner: linux-arm64
arch: arm64
runs-on: ${{ matrix.runner }}
permissions:
contents: read
outputs:
registry: ${{ steps.login-ecr.outputs.registry }}
steps:
- uses: actions/checkout@v4
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build and push ${{ matrix.arch }}
uses: docker/build-push-action@v5
with:
context: server
platforms: ${{ matrix.platform }}
push: true
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest-${{ matrix.arch }}
cache-from: type=gha,scope=${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
provenance: false
create-manifest:
runs-on: ubuntu-latest
needs: [build]
permissions:
deployments: write
contents: read
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
uses: aws-actions/amazon-ecr-login@v2
- name: Create and push multi-arch manifest
run: |
# Get the registry URL (since we can't easily access job outputs in matrix)
ECR_REGISTRY=$(aws ecr describe-registry --query 'registryId' --output text).dkr.ecr.${{ env.AWS_REGION }}.amazonaws.com
docker manifest create \
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest \
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-amd64 \
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-arm64
docker manifest push $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest
echo "✅ Multi-arch manifest pushed: $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest"

View File

@@ -1,35 +1,39 @@
name: Build and Push Frontend Docker Image name: Build and Push Backend Docker Image (Docker Hub)
on: on:
push: pull_request:
branches: types:
- main - closed
paths: paths:
- 'www/**' - "server/**"
- '.github/workflows/docker-frontend.yml' - ".github/workflows/dockerhub-backend.yml"
workflow_dispatch: workflow_dispatch:
env: env:
REGISTRY: ghcr.io REGISTRY: docker.io
IMAGE_NAME: ${{ github.repository }}-frontend IMAGE_NAME: monadicalsas/reflector-backend
jobs: jobs:
build-and-push: build-and-push:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: |
github.event_name == 'workflow_dispatch' ||
(github.event.pull_request.merged == true &&
startsWith(github.event.pull_request.head.ref, 'release-please--branches--'))
permissions: permissions:
contents: read contents: read
packages: write
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Log in to GitHub Container Registry - name: Log in to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
registry: ${{ env.REGISTRY }} registry: ${{ env.REGISTRY }}
username: ${{ github.actor }} username: monadicalsas
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata - name: Extract metadata
id: meta id: meta
@@ -38,7 +42,7 @@ jobs:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: | tags: |
type=ref,event=branch type=ref,event=branch
type=sha,prefix={{branch}}- type=ref,event=tag
type=raw,value=latest,enable={{is_default_branch}} type=raw,value=latest,enable={{is_default_branch}}
- name: Set up Docker Buildx - name: Set up Docker Buildx
@@ -47,8 +51,8 @@ jobs:
- name: Build and push Docker image - name: Build and push Docker image
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: ./www context: ./server
file: ./www/Dockerfile file: ./server/Dockerfile
push: true push: true
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}

View File

@@ -0,0 +1,78 @@
name: Build and Push Frontend Docker Image
on:
pull_request:
types:
- closed
paths:
- "www/**"
- ".github/workflows/dockerhub-frontend.yml"
workflow_dispatch:
env:
REGISTRY: docker.io
IMAGE_NAME: monadicalsas/reflector-frontend
jobs:
build-and-push:
runs-on: ubuntu-latest
if: |
github.event_name == 'workflow_dispatch' ||
(github.event.pull_request.merged == true &&
startsWith(github.event.pull_request.head.ref, 'release-please--branches--'))
permissions:
contents: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: monadicalsas
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=raw,value=latest,enable={{is_default_branch}}
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: ./www
file: ./www/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64
deploy:
needs: build-and-push
runs-on: ubuntu-latest
if: success()
strategy:
matrix:
environment: [reflector-monadical, reflector-media]
environment: ${{ matrix.environment }}
steps:
- name: Trigger Coolify deployment
run: |
curl -X POST "${{ secrets.COOLIFY_WEBHOOK_URL }}" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer ${{ secrets.COOLIFY_WEBHOOK_TOKEN }}" \
-f || (echo "Failed to trigger Coolify deployment for ${{ matrix.environment }}" && exit 1)

1
.gitignore vendored
View File

@@ -18,3 +18,4 @@ CLAUDE.local.md
www/.env.development www/.env.development
www/.env.production www/.env.production
.playwright-mcp .playwright-mcp
.secrets

24
.secrets.example Normal file
View File

@@ -0,0 +1,24 @@
# Example secrets file for GitHub Actions workflows
# Copy this to .secrets and fill in your values
# These secrets should be configured in GitHub repository settings:
# Settings > Secrets and variables > Actions
# DockerHub Configuration (required for frontend and backend deployment)
# Create a Docker Hub access token at https://hub.docker.com/settings/security
# Username: monadicalsas
DOCKERHUB_TOKEN=your-dockerhub-access-token
# GitHub Token (required for frontend and backend deployment)
# Used by docker/metadata-action for extracting image metadata
# Can use the default GITHUB_TOKEN or create a personal access token
GITHUB_TOKEN=your-github-token-or-use-default-GITHUB_TOKEN
# Coolify Deployment Webhook (required for frontend deployment)
# Used to trigger automatic deployment after image push
# Configure these secrets in GitHub Environments:
# Each environment should have:
# - COOLIFY_WEBHOOK_URL: The webhook URL for that specific deployment
# - COOLIFY_WEBHOOK_TOKEN: The webhook token (can be the same for both if using same token)
# Optional: GitHub Actions Cache Token (for local testing with act)
GHA_CACHE_TOKEN=your-github-token-or-empty

View File

@@ -1,5 +1,35 @@
# Changelog # Changelog
## [0.23.1](https://github.com/Monadical-SAS/reflector/compare/v0.23.0...v0.23.1) (2025-12-11)
### Bug Fixes
* populate room_name in transcript GET endpoint ([#783](https://github.com/Monadical-SAS/reflector/issues/783)) ([0eba147](https://github.com/Monadical-SAS/reflector/commit/0eba1470181c7b9e0a79964a1ef28c09bcbdd9d7))
## [0.23.0](https://github.com/Monadical-SAS/reflector/compare/v0.22.4...v0.23.0) (2025-12-10)
### Features
* dockerhub ci ([#772](https://github.com/Monadical-SAS/reflector/issues/772)) ([00549f1](https://github.com/Monadical-SAS/reflector/commit/00549f153ade922cf4cb6c5358a7d11a39c426d2))
* llm retries ([#739](https://github.com/Monadical-SAS/reflector/issues/739)) ([61f0e29](https://github.com/Monadical-SAS/reflector/commit/61f0e29d4c51eab54ee67af92141fbb171e8ccaa))
### Bug Fixes
* celery inspect bug sidestep in restart script ([#766](https://github.com/Monadical-SAS/reflector/issues/766)) ([ec17ed7](https://github.com/Monadical-SAS/reflector/commit/ec17ed7b587cf6ee143646baaee67a7c017044d4))
* deploy frontend to coolify ([#779](https://github.com/Monadical-SAS/reflector/issues/779)) ([91650ec](https://github.com/Monadical-SAS/reflector/commit/91650ec65f65713faa7ee0dcfb75af427b7c4ba0))
* hide rooms settings instead of disabling ([#763](https://github.com/Monadical-SAS/reflector/issues/763)) ([3ad78be](https://github.com/Monadical-SAS/reflector/commit/3ad78be7628c0d029296b301a0e87236c76b7598))
* return participant emails from transcript endpoint ([#769](https://github.com/Monadical-SAS/reflector/issues/769)) ([d3a5cd1](https://github.com/Monadical-SAS/reflector/commit/d3a5cd12d2d0d9c32af2d5bd9322e030ef69b85d))
## [0.22.4](https://github.com/Monadical-SAS/reflector/compare/v0.22.3...v0.22.4) (2025-12-02)
### Bug Fixes
* Multitrack mixdown optimisation 2 ([#764](https://github.com/Monadical-SAS/reflector/issues/764)) ([bd5df1c](https://github.com/Monadical-SAS/reflector/commit/bd5df1ce2ebf35d7f3413b295e56937a9a28ef7b))
## [0.22.3](https://github.com/Monadical-SAS/reflector/compare/v0.22.2...v0.22.3) (2025-12-02) ## [0.22.3](https://github.com/Monadical-SAS/reflector/compare/v0.22.2...v0.22.3) (2025-12-02)

View File

@@ -3,10 +3,7 @@
services: services:
web: web:
build: image: monadicalsas/reflector-frontend:latest
context: ./www
dockerfile: Dockerfile
image: reflector-frontend:latest
environment: environment:
- KV_URL=${KV_URL:-redis://redis:6379} - KV_URL=${KV_URL:-redis://redis:6379}
- SITE_URL=${SITE_URL} - SITE_URL=${SITE_URL}

View File

@@ -126,6 +126,7 @@ markers = [
select = [ select = [
"I", # isort - import sorting "I", # isort - import sorting
"F401", # unused imports "F401", # unused imports
"E402", # module level import not at top of file
"PLC0415", # import-outside-top-level - detect inline imports "PLC0415", # import-outside-top-level - detect inline imports
] ]

View File

@@ -1,13 +1,19 @@
import asyncio import asyncio
import functools import functools
from uuid import uuid4
from celery import current_task
from reflector.db import get_database from reflector.db import get_database
from reflector.llm import llm_session_id
def asynctask(f): def asynctask(f):
@functools.wraps(f) @functools.wraps(f)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
async def run_with_db(): async def run_with_db():
task_id = current_task.request.id if current_task else None
llm_session_id.set(task_id or f"random-{uuid4().hex}")
database = get_database() database = get_database()
await database.connect() await database.connect()
try: try:

View File

@@ -88,5 +88,11 @@ class UserController:
results = await get_database().fetch_all(query) results = await get_database().fetch_all(query)
return [User(**r) for r in results] return [User(**r) for r in results]
@staticmethod
async def get_by_ids(user_ids: list[NonEmptyString]) -> dict[str, User]:
query = users.select().where(users.c.id.in_(user_ids))
results = await get_database().fetch_all(query)
return {user.id: User(**user) for user in results}
user_controller = UserController() user_controller = UserController()

View File

@@ -1,14 +1,29 @@
import logging import logging
from typing import Type, TypeVar from contextvars import ContextVar
from typing import Generic, Type, TypeVar
from uuid import uuid4
from llama_index.core import Settings from llama_index.core import Settings
from llama_index.core.output_parsers import PydanticOutputParser from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.response_synthesizers import TreeSummarize from llama_index.core.response_synthesizers import TreeSummarize
from llama_index.core.workflow import (
Context,
Event,
StartEvent,
StopEvent,
Workflow,
step,
)
from llama_index.llms.openai_like import OpenAILike from llama_index.llms.openai_like import OpenAILike
from pydantic import BaseModel, ValidationError from pydantic import BaseModel, ValidationError
T = TypeVar("T", bound=BaseModel) T = TypeVar("T", bound=BaseModel)
OutputT = TypeVar("OutputT", bound=BaseModel)
# Session ID for LiteLLM request grouping - set per processing run
llm_session_id: ContextVar[str | None] = ContextVar("llm_session_id", default=None)
logger = logging.getLogger(__name__)
STRUCTURED_RESPONSE_PROMPT_TEMPLATE = """ STRUCTURED_RESPONSE_PROMPT_TEMPLATE = """
Based on the following analysis, provide the information in the requested JSON format: Based on the following analysis, provide the information in the requested JSON format:
@@ -20,6 +35,158 @@ Analysis:
""" """
class LLMParseError(Exception):
"""Raised when LLM output cannot be parsed after retries."""
def __init__(self, output_cls: Type[BaseModel], error_msg: str, attempts: int):
self.output_cls = output_cls
self.error_msg = error_msg
self.attempts = attempts
super().__init__(
f"Failed to parse {output_cls.__name__} after {attempts} attempts: {error_msg}"
)
class ExtractionDone(Event):
"""Event emitted when LLM JSON formatting completes."""
output: str
class ValidationErrorEvent(Event):
"""Event emitted when validation fails."""
error: str
wrong_output: str
class StructuredOutputWorkflow(Workflow, Generic[OutputT]):
"""Workflow for structured output extraction with validation retry.
This workflow handles parse/validation retries only. Network error retries
are handled internally by Settings.llm (OpenAILike max_retries=3).
The caller should NOT wrap this workflow in additional retry logic.
"""
def __init__(
self,
output_cls: Type[OutputT],
max_retries: int = 3,
**kwargs,
):
super().__init__(**kwargs)
self.output_cls: Type[OutputT] = output_cls
self.max_retries = max_retries
self.output_parser = PydanticOutputParser(output_cls)
@step
async def extract(
self, ctx: Context, ev: StartEvent | ValidationErrorEvent
) -> StopEvent | ExtractionDone:
"""Extract structured data from text using two-step LLM process.
Step 1 (first call only): TreeSummarize generates text analysis
Step 2 (every call): Settings.llm.acomplete formats analysis as JSON
"""
current_retries = await ctx.store.get("retries", default=0)
await ctx.store.set("retries", current_retries + 1)
if current_retries >= self.max_retries:
last_error = await ctx.store.get("last_error", default=None)
logger.error(
f"Max retries ({self.max_retries}) reached for {self.output_cls.__name__}"
)
return StopEvent(result={"error": last_error, "attempts": current_retries})
if isinstance(ev, StartEvent):
# First call: run TreeSummarize to get analysis, store in context
prompt = ev.get("prompt")
texts = ev.get("texts")
tone_name = ev.get("tone_name")
if not prompt or not isinstance(texts, list):
raise ValueError(
"StartEvent must contain 'prompt' (str) and 'texts' (list)"
)
summarizer = TreeSummarize(verbose=False)
analysis = await summarizer.aget_response(
prompt, texts, tone_name=tone_name
)
await ctx.store.set("analysis", str(analysis))
reflection = ""
else:
# Retry: reuse analysis from context
analysis = await ctx.store.get("analysis")
if not analysis:
raise RuntimeError("Internal error: analysis not found in context")
wrong_output = ev.wrong_output
if len(wrong_output) > 2000:
wrong_output = wrong_output[:2000] + "... [truncated]"
reflection = (
f"\n\nYour previous response could not be parsed:\n{wrong_output}\n\n"
f"Error:\n{ev.error}\n\n"
"Please try again. Return ONLY valid JSON matching the schema above, "
"with no markdown formatting or extra text."
)
# Step 2: Format analysis as JSON using LLM completion
format_instructions = self.output_parser.format(
"Please structure the above information in the following JSON format:"
)
json_prompt = STRUCTURED_RESPONSE_PROMPT_TEMPLATE.format(
analysis=analysis,
format_instructions=format_instructions + reflection,
)
# Network retries handled by OpenAILike (max_retries=3)
response = await Settings.llm.acomplete(json_prompt)
return ExtractionDone(output=response.text)
@step
async def validate(
self, ctx: Context, ev: ExtractionDone
) -> StopEvent | ValidationErrorEvent:
"""Validate extracted output against Pydantic schema."""
raw_output = ev.output
retries = await ctx.store.get("retries", default=0)
try:
parsed = self.output_parser.parse(raw_output)
if retries > 1:
logger.info(
f"LLM parse succeeded on attempt {retries}/{self.max_retries} "
f"for {self.output_cls.__name__}"
)
return StopEvent(result={"success": parsed})
except (ValidationError, ValueError) as e:
error_msg = self._format_error(e, raw_output)
await ctx.store.set("last_error", error_msg)
logger.error(
f"LLM parse error (attempt {retries}/{self.max_retries}): "
f"{type(e).__name__}: {e}\nRaw response: {raw_output[:500]}"
)
return ValidationErrorEvent(
error=error_msg,
wrong_output=raw_output,
)
def _format_error(self, error: Exception, raw_output: str) -> str:
"""Format error for LLM feedback."""
if isinstance(error, ValidationError):
error_messages = []
for err in error.errors():
field = ".".join(str(loc) for loc in err["loc"])
error_messages.append(f"- {err['msg']} in field '{field}'")
return "Schema validation errors:\n" + "\n".join(error_messages)
else:
return f"Parse error: {str(error)}"
class LLM: class LLM:
def __init__(self, settings, temperature: float = 0.4, max_tokens: int = 2048): def __init__(self, settings, temperature: float = 0.4, max_tokens: int = 2048):
self.settings_obj = settings self.settings_obj = settings
@@ -30,11 +197,12 @@ class LLM:
self.temperature = temperature self.temperature = temperature
self.max_tokens = max_tokens self.max_tokens = max_tokens
# Configure llamaindex Settings
self._configure_llamaindex() self._configure_llamaindex()
def _configure_llamaindex(self): def _configure_llamaindex(self):
"""Configure llamaindex Settings with OpenAILike LLM""" """Configure llamaindex Settings with OpenAILike LLM"""
session_id = llm_session_id.get() or f"fallback-{uuid4().hex}"
Settings.llm = OpenAILike( Settings.llm = OpenAILike(
model=self.model_name, model=self.model_name,
api_base=self.url, api_base=self.url,
@@ -44,6 +212,7 @@ class LLM:
is_function_calling_model=False, is_function_calling_model=False,
temperature=self.temperature, temperature=self.temperature,
max_tokens=self.max_tokens, max_tokens=self.max_tokens,
additional_kwargs={"extra_body": {"litellm_session_id": session_id}},
) )
async def get_response( async def get_response(
@@ -61,43 +230,25 @@ class LLM:
output_cls: Type[T], output_cls: Type[T],
tone_name: str | None = None, tone_name: str | None = None,
) -> T: ) -> T:
"""Get structured output from LLM for non-function-calling models""" """Get structured output from LLM with validation retry via Workflow."""
logger = logging.getLogger(__name__) workflow = StructuredOutputWorkflow(
output_cls=output_cls,
summarizer = TreeSummarize(verbose=True) max_retries=self.settings_obj.LLM_PARSE_MAX_RETRIES + 1,
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name) timeout=120,
output_parser = PydanticOutputParser(output_cls)
program = LLMTextCompletionProgram.from_defaults(
output_parser=output_parser,
prompt_template_str=STRUCTURED_RESPONSE_PROMPT_TEMPLATE,
verbose=False,
) )
format_instructions = output_parser.format( result = await workflow.run(
"Please structure the above information in the following JSON format:" prompt=prompt,
texts=texts,
tone_name=tone_name,
) )
try: if "error" in result:
output = await program.acall( error_msg = result["error"] or "Max retries exceeded"
analysis=str(response), format_instructions=format_instructions raise LLMParseError(
output_cls=output_cls,
error_msg=error_msg,
attempts=result.get("attempts", 0),
) )
except ValidationError as e:
# Extract the raw JSON from the error details
errors = e.errors()
if errors and "input" in errors[0]:
raw_json = errors[0]["input"]
logger.error(
f"JSON validation failed for {output_cls.__name__}. "
f"Full raw JSON output:\n{raw_json}\n"
f"Validation errors: {errors}"
)
else:
logger.error(
f"JSON validation failed for {output_cls.__name__}. "
f"Validation errors: {errors}"
)
raise
return output return result["success"]

View File

@@ -340,7 +340,6 @@ async def task_send_webhook_if_needed(*, transcript_id: str):
@asynctask @asynctask
async def task_pipeline_file_process(*, transcript_id: str): async def task_pipeline_file_process(*, transcript_id: str):
"""Celery task for file pipeline processing""" """Celery task for file pipeline processing"""
transcript = await transcripts_controller.get_by_id(transcript_id) transcript = await transcripts_controller.get_by_id(transcript_id)
if not transcript: if not transcript:
raise Exception(f"Transcript {transcript_id} not found") raise Exception(f"Transcript {transcript_id} not found")

View File

@@ -31,7 +31,6 @@ from reflector.processors import AudioFileWriterProcessor
from reflector.processors.audio_waveform_processor import AudioWaveformProcessor from reflector.processors.audio_waveform_processor import AudioWaveformProcessor
from reflector.processors.types import TitleSummary from reflector.processors.types import TitleSummary
from reflector.processors.types import Transcript as TranscriptType from reflector.processors.types import Transcript as TranscriptType
from reflector.settings import settings
from reflector.storage import Storage, get_transcripts_storage from reflector.storage import Storage, get_transcripts_storage
from reflector.utils.daily import ( from reflector.utils.daily import (
filter_cam_audio_tracks, filter_cam_audio_tracks,
@@ -423,7 +422,15 @@ class PipelineMainMultitrack(PipelineMainBase):
# Open all containers with cleanup guaranteed # Open all containers with cleanup guaranteed
for i, url in enumerate(valid_track_urls): for i, url in enumerate(valid_track_urls):
try: try:
c = av.open(url) c = av.open(
url,
options={
# it's trying to stream from s3 by default
"reconnect": "1",
"reconnect_streamed": "1",
"reconnect_delay_max": "5",
},
)
containers.append(c) containers.append(c)
except Exception as e: except Exception as e:
self.logger.warning( self.logger.warning(
@@ -452,6 +459,8 @@ class PipelineMainMultitrack(PipelineMainBase):
frame = next(dec) frame = next(dec)
except StopIteration: except StopIteration:
active[i] = False active[i] = False
# causes stream to move on / unclogs memory
inputs[i].push(None)
continue continue
if frame.sample_rate != target_sample_rate: if frame.sample_rate != target_sample_rate:
@@ -471,8 +480,6 @@ class PipelineMainMultitrack(PipelineMainBase):
mixed.time_base = Fraction(1, target_sample_rate) mixed.time_base = Fraction(1, target_sample_rate)
await writer.push(mixed) await writer.push(mixed)
for in_ctx in inputs:
in_ctx.push(None)
while True: while True:
try: try:
mixed = sink.pull() mixed = sink.pull()
@@ -632,21 +639,11 @@ class PipelineMainMultitrack(PipelineMainBase):
transcript.data_path.mkdir(parents=True, exist_ok=True) transcript.data_path.mkdir(parents=True, exist_ok=True)
if settings.SKIP_MIXDOWN:
self.logger.warning(
"SKIP_MIXDOWN enabled: Skipping mixdown and waveform generation. "
"UI will have no audio playback or waveform.",
num_tracks=len(padded_track_urls),
transcript_id=transcript.id,
)
else:
mp3_writer = AudioFileWriterProcessor( mp3_writer = AudioFileWriterProcessor(
path=str(transcript.audio_mp3_filename), path=str(transcript.audio_mp3_filename),
on_duration=self.on_duration, on_duration=self.on_duration,
) )
await self.mixdown_tracks( await self.mixdown_tracks(padded_track_urls, mp3_writer, offsets_seconds=None)
padded_track_urls, mp3_writer, offsets_seconds=None
)
await mp3_writer.flush() await mp3_writer.flush()
if not transcript.audio_mp3_filename.exists(): if not transcript.audio_mp3_filename.exists():
@@ -661,9 +658,7 @@ class PipelineMainMultitrack(PipelineMainBase):
await transcript_storage.put_file(storage_path, mp3_file) await transcript_storage.put_file(storage_path, mp3_file)
mp3_url = await transcript_storage.get_file_url(storage_path) mp3_url = await transcript_storage.get_file_url(storage_path)
await transcripts_controller.update( await transcripts_controller.update(transcript, {"audio_location": "storage"})
transcript, {"audio_location": "storage"}
)
self.logger.info( self.logger.info(
f"Uploaded mixed audio to storage", f"Uploaded mixed audio to storage",

View File

@@ -160,7 +160,10 @@ def dispatch_transcript_processing(config: ProcessingConfig) -> AsyncResult:
def task_is_scheduled_or_active(task_name: str, **kwargs): def task_is_scheduled_or_active(task_name: str, **kwargs):
inspect = celery.current_app.control.inspect() inspect = celery.current_app.control.inspect()
for worker, tasks in (inspect.scheduled() | inspect.active()).items(): scheduled = inspect.scheduled() or {}
active = inspect.active() or {}
all = scheduled | active
for worker, tasks in all.items():
for task in tasks: for task in tasks:
if task["name"] == task_name and task["kwargs"] == kwargs: if task["name"] == task_name and task["kwargs"] == kwargs:
return True return True

View File

@@ -74,6 +74,10 @@ class Settings(BaseSettings):
LLM_API_KEY: str | None = None LLM_API_KEY: str | None = None
LLM_CONTEXT_WINDOW: int = 16000 LLM_CONTEXT_WINDOW: int = 16000
LLM_PARSE_MAX_RETRIES: int = (
3 # Max retries for JSON/validation errors (total attempts = retries + 1)
)
# Diarization # Diarization
DIARIZATION_ENABLED: bool = True DIARIZATION_ENABLED: bool = True
DIARIZATION_BACKEND: str = "modal" DIARIZATION_BACKEND: str = "modal"
@@ -138,14 +142,6 @@ class Settings(BaseSettings):
DAILY_WEBHOOK_UUID: str | None = ( DAILY_WEBHOOK_UUID: str | None = (
None # Webhook UUID for this environment. Not used by production code None # Webhook UUID for this environment. Not used by production code
) )
# Multitrack processing
# SKIP_MIXDOWN: When True, skips audio mixdown and waveform generation.
# Transcription still works using individual tracks. Useful for:
# - Diagnosing OOM issues in mixdown
# - Fast processing when audio playback is not needed
# Note: UI will have no audio playback or waveform when enabled.
SKIP_MIXDOWN: bool = True
# Platform Configuration # Platform Configuration
DEFAULT_VIDEO_PLATFORM: Platform = WHEREBY_PLATFORM DEFAULT_VIDEO_PLATFORM: Platform = WHEREBY_PLATFORM

View File

@@ -17,6 +17,7 @@ from pydantic import (
import reflector.auth as auth import reflector.auth as auth
from reflector.db import get_database from reflector.db import get_database
from reflector.db.recordings import recordings_controller from reflector.db.recordings import recordings_controller
from reflector.db.rooms import rooms_controller
from reflector.db.search import ( from reflector.db.search import (
DEFAULT_SEARCH_LIMIT, DEFAULT_SEARCH_LIMIT,
SearchLimit, SearchLimit,
@@ -37,6 +38,7 @@ from reflector.db.transcripts import (
TranscriptTopic, TranscriptTopic,
transcripts_controller, transcripts_controller,
) )
from reflector.db.users import user_controller
from reflector.processors.types import Transcript as ProcessorTranscript from reflector.processors.types import Transcript as ProcessorTranscript
from reflector.processors.types import Word from reflector.processors.types import Word
from reflector.schemas.transcript_formats import TranscriptFormat, TranscriptSegment from reflector.schemas.transcript_formats import TranscriptFormat, TranscriptSegment
@@ -111,8 +113,12 @@ class GetTranscriptMinimal(BaseModel):
audio_deleted: bool | None = None audio_deleted: bool | None = None
class TranscriptParticipantWithEmail(TranscriptParticipant):
email: str | None = None
class GetTranscriptWithParticipants(GetTranscriptMinimal): class GetTranscriptWithParticipants(GetTranscriptMinimal):
participants: list[TranscriptParticipant] | None participants: list[TranscriptParticipantWithEmail] | None
class GetTranscriptWithText(GetTranscriptWithParticipants): class GetTranscriptWithText(GetTranscriptWithParticipants):
@@ -468,6 +474,23 @@ async def transcript_get(
is_multitrack = await _get_is_multitrack(transcript) is_multitrack = await _get_is_multitrack(transcript)
room_name = None
if transcript.room_id:
room = await rooms_controller.get_by_id(transcript.room_id)
room_name = room.name if room else None
participants = []
if transcript.participants:
user_ids = [p.user_id for p in transcript.participants if p.user_id is not None]
users_dict = await user_controller.get_by_ids(user_ids) if user_ids else {}
for p in transcript.participants:
user = users_dict.get(p.user_id) if p.user_id else None
participants.append(
TranscriptParticipantWithEmail(
**p.model_dump(), email=user.email if user else None
)
)
base_data = { base_data = {
"id": transcript.id, "id": transcript.id,
"user_id": transcript.user_id, "user_id": transcript.user_id,
@@ -486,8 +509,9 @@ async def transcript_get(
"meeting_id": transcript.meeting_id, "meeting_id": transcript.meeting_id,
"source_kind": transcript.source_kind, "source_kind": transcript.source_kind,
"room_id": transcript.room_id, "room_id": transcript.room_id,
"room_name": room_name,
"audio_deleted": transcript.audio_deleted, "audio_deleted": transcript.audio_deleted,
"participants": transcript.participants, "participants": participants,
} }
if transcript_format == "text": if transcript_format == "text":

View File

@@ -318,6 +318,14 @@ async def dummy_storage():
yield yield
@pytest.fixture
def test_settings():
"""Provide isolated settings for tests to avoid modifying global settings"""
from reflector.settings import Settings
return Settings()
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def celery_enable_logging(): def celery_enable_logging():
return True return True

View File

@@ -0,0 +1,357 @@
"""Tests for LLM parse error recovery using llama-index Workflow"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from pydantic import BaseModel, Field
from workflows.errors import WorkflowRuntimeError
from reflector.llm import LLM, LLMParseError, StructuredOutputWorkflow
class TestResponse(BaseModel):
"""Test response model for structured output"""
title: str = Field(description="A title")
summary: str = Field(description="A summary")
confidence: float = Field(description="Confidence score", ge=0, le=1)
def make_completion_response(text: str):
"""Create a mock CompletionResponse with .text attribute"""
response = MagicMock()
response.text = text
return response
class TestLLMParseErrorRecovery:
"""Test parse error recovery with Workflow feedback loop"""
@pytest.mark.asyncio
async def test_parse_error_recovery_with_feedback(self, test_settings):
"""Test that parse errors trigger retry with error feedback"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
# TreeSummarize returns plain text analysis (step 1)
mock_summarizer.aget_response = AsyncMock(
return_value="The analysis shows a test with summary and high confidence."
)
call_count = {"count": 0}
async def acomplete_handler(prompt, *args, **kwargs):
call_count["count"] += 1
if call_count["count"] == 1:
# First JSON formatting call returns invalid JSON
return make_completion_response('{"title": "Test"}')
else:
# Second call should have error feedback in prompt
assert "Your previous response could not be parsed:" in prompt
assert '{"title": "Test"}' in prompt
assert "Error:" in prompt
assert "Please try again" in prompt
return make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
)
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
result = await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
assert result.title == "Test"
assert result.summary == "Summary"
assert result.confidence == 0.95
# TreeSummarize called once, Settings.llm.acomplete called twice
assert mock_summarizer.aget_response.call_count == 1
assert call_count["count"] == 2
@pytest.mark.asyncio
async def test_max_parse_retry_attempts(self, test_settings):
"""Test that parse error retry stops after max attempts"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
# Always return invalid JSON from acomplete
mock_settings.llm.acomplete = AsyncMock(
return_value=make_completion_response(
'{"invalid": "missing required fields"}'
)
)
with pytest.raises(LLMParseError, match="Failed to parse"):
await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
expected_attempts = test_settings.LLM_PARSE_MAX_RETRIES + 1
# TreeSummarize called once, acomplete called max_retries times
assert mock_summarizer.aget_response.call_count == 1
assert mock_settings.llm.acomplete.call_count == expected_attempts
@pytest.mark.asyncio
async def test_raw_response_logging_on_parse_error(self, test_settings, caplog):
"""Test that raw response is logged when parse error occurs"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
caplog.at_level("ERROR"),
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
call_count = {"count": 0}
async def acomplete_handler(*args, **kwargs):
call_count["count"] += 1
if call_count["count"] == 1:
return make_completion_response('{"title": "Test"}') # Invalid
return make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
)
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
result = await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
assert result.title == "Test"
error_logs = [r for r in caplog.records if r.levelname == "ERROR"]
raw_response_logged = any("Raw response:" in r.message for r in error_logs)
assert raw_response_logged, "Raw response should be logged on parse error"
@pytest.mark.asyncio
async def test_multiple_validation_errors_in_feedback(self, test_settings):
"""Test that validation errors are included in feedback"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
call_count = {"count": 0}
async def acomplete_handler(prompt, *args, **kwargs):
call_count["count"] += 1
if call_count["count"] == 1:
# Missing title and summary
return make_completion_response('{"confidence": 0.5}')
else:
# Should have schema validation errors in prompt
assert (
"Schema validation errors" in prompt
or "error" in prompt.lower()
)
return make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
)
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
result = await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
assert result.title == "Test"
assert call_count["count"] == 2
@pytest.mark.asyncio
async def test_success_on_first_attempt(self, test_settings):
"""Test that no retry happens when first attempt succeeds"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
mock_settings.llm.acomplete = AsyncMock(
return_value=make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
)
)
result = await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
assert result.title == "Test"
assert result.summary == "Summary"
assert result.confidence == 0.95
assert mock_summarizer.aget_response.call_count == 1
assert mock_settings.llm.acomplete.call_count == 1
class TestStructuredOutputWorkflow:
"""Direct tests for the StructuredOutputWorkflow"""
@pytest.mark.asyncio
async def test_workflow_retries_on_validation_error(self):
"""Test workflow retries when validation fails"""
workflow = StructuredOutputWorkflow(
output_cls=TestResponse,
max_retries=3,
timeout=30,
)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
call_count = {"count": 0}
async def acomplete_handler(*args, **kwargs):
call_count["count"] += 1
if call_count["count"] < 2:
return make_completion_response('{"title": "Only title"}')
return make_completion_response(
'{"title": "Test", "summary": "Summary", "confidence": 0.9}'
)
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
result = await workflow.run(
prompt="Extract data",
texts=["Some text"],
tone_name=None,
)
assert "success" in result
assert result["success"].title == "Test"
assert call_count["count"] == 2
@pytest.mark.asyncio
async def test_workflow_returns_error_after_max_retries(self):
"""Test workflow returns error after exhausting retries"""
workflow = StructuredOutputWorkflow(
output_cls=TestResponse,
max_retries=2,
timeout=30,
)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
# Always return invalid JSON
mock_settings.llm.acomplete = AsyncMock(
return_value=make_completion_response('{"invalid": true}')
)
result = await workflow.run(
prompt="Extract data",
texts=["Some text"],
tone_name=None,
)
assert "error" in result
# TreeSummarize called once, acomplete called max_retries times
assert mock_summarizer.aget_response.call_count == 1
assert mock_settings.llm.acomplete.call_count == 2
class TestNetworkErrorRetries:
"""Test that network error retries are handled by OpenAILike, not Workflow"""
@pytest.mark.asyncio
async def test_network_error_propagates_after_openai_retries(self, test_settings):
"""Test that network errors are retried by OpenAILike and then propagate.
Network retries are handled by OpenAILike (max_retries=3), not by our
StructuredOutputWorkflow. This test verifies that network errors propagate
up after OpenAILike exhausts its retries.
"""
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
# Simulate network error from acomplete (after OpenAILike retries exhausted)
network_error = ConnectionError("Connection refused")
mock_settings.llm.acomplete = AsyncMock(side_effect=network_error)
# Network error wrapped in WorkflowRuntimeError
with pytest.raises(WorkflowRuntimeError, match="Connection refused"):
await llm.get_structured_response(
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
)
# acomplete called only once - network error propagates, not retried by Workflow
assert mock_settings.llm.acomplete.call_count == 1
@pytest.mark.asyncio
async def test_network_error_not_retried_by_workflow(self, test_settings):
"""Test that Workflow does NOT retry network errors (OpenAILike handles those).
This verifies the separation of concerns:
- StructuredOutputWorkflow: retries parse/validation errors
- OpenAILike: retries network errors (internally, max_retries=3)
"""
workflow = StructuredOutputWorkflow(
output_cls=TestResponse,
max_retries=3,
timeout=30,
)
with (
patch("reflector.llm.TreeSummarize") as mock_summarize,
patch("reflector.llm.Settings") as mock_settings,
):
mock_summarizer = MagicMock()
mock_summarize.return_value = mock_summarizer
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
# Network error should propagate immediately, not trigger Workflow retry
mock_settings.llm.acomplete = AsyncMock(
side_effect=TimeoutError("Request timed out")
)
# Network error wrapped in WorkflowRuntimeError
with pytest.raises(WorkflowRuntimeError, match="Request timed out"):
await workflow.run(
prompt="Extract data",
texts=["Some text"],
tone_name=None,
)
# Only called once - Workflow doesn't retry network errors
assert mock_settings.llm.acomplete.call_count == 1

View File

@@ -1,5 +1,8 @@
import pytest import pytest
from reflector.db.rooms import rooms_controller
from reflector.db.transcripts import transcripts_controller
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_transcript_create(client): async def test_transcript_create(client):
@@ -182,3 +185,51 @@ async def test_transcript_mark_reviewed(authenticated_client, client):
response = await client.get(f"/transcripts/{tid}") response = await client.get(f"/transcripts/{tid}")
assert response.status_code == 200 assert response.status_code == 200
assert response.json()["reviewed"] is True assert response.json()["reviewed"] is True
@pytest.mark.asyncio
async def test_transcript_get_returns_room_name(authenticated_client, client):
"""Test that getting a transcript returns its room_name when linked to a room."""
# Create a room
room = await rooms_controller.add(
name="test-room-for-transcript",
user_id="test-user",
zulip_auto_post=False,
zulip_stream="",
zulip_topic="",
is_locked=False,
room_mode="normal",
recording_type="cloud",
recording_trigger="automatic-2nd-participant",
is_shared=False,
webhook_url="",
webhook_secret="",
)
# Create a transcript linked to the room
transcript = await transcripts_controller.add(
name="transcript-with-room",
source_kind="file",
room_id=room.id,
)
# Get the transcript and verify room_name is returned
response = await client.get(f"/transcripts/{transcript.id}")
assert response.status_code == 200
assert response.json()["room_id"] == room.id
assert response.json()["room_name"] == "test-room-for-transcript"
@pytest.mark.asyncio
async def test_transcript_get_returns_null_room_name_when_no_room(
authenticated_client, client
):
"""Test that room_name is null when transcript has no room."""
response = await client.post("/transcripts", json={"name": "no-room-transcript"})
assert response.status_code == 200
tid = response.json()["id"]
response = await client.get(f"/transcripts/{tid}")
assert response.status_code == 200
assert response.json()["room_id"] is None
assert response.json()["room_name"] is None

View File

@@ -15,9 +15,12 @@ import {
createListCollection, createListCollection,
useDisclosure, useDisclosure,
Tabs, Tabs,
Popover,
Text,
HStack,
} from "@chakra-ui/react"; } from "@chakra-ui/react";
import { useEffect, useMemo, useState } from "react"; import { useEffect, useMemo, useState } from "react";
import { LuEye, LuEyeOff } from "react-icons/lu"; import { LuEye, LuEyeOff, LuInfo } from "react-icons/lu";
import useRoomList from "./useRoomList"; import useRoomList from "./useRoomList";
import type { components } from "../../reflector-api"; import type { components } from "../../reflector-api";
import { import {
@@ -534,6 +537,10 @@ export default function RoomsList() {
room.recordingType === "cloud" room.recordingType === "cloud"
? "automatic-2nd-participant" ? "automatic-2nd-participant"
: "none"; : "none";
} else {
if (room.recordingType !== "cloud") {
updates.recordingTrigger = "none";
}
} }
setRoomInput({ ...room, ...updates }); setRoomInput({ ...room, ...updates });
}} }}
@@ -583,6 +590,7 @@ export default function RoomsList() {
<Checkbox.Label>Locked room</Checkbox.Label> <Checkbox.Label>Locked room</Checkbox.Label>
</Checkbox.Root> </Checkbox.Root>
</Field.Root> </Field.Root>
{room.platform !== "daily" && (
<Field.Root mt={4}> <Field.Root mt={4}>
<Field.Label>Room size</Field.Label> <Field.Label>Room size</Field.Label>
<Select.Root <Select.Root
@@ -591,7 +599,6 @@ export default function RoomsList() {
setRoomInput({ ...room, roomMode: e.value[0] }) setRoomInput({ ...room, roomMode: e.value[0] })
} }
collection={roomModeCollection} collection={roomModeCollection}
disabled={room.platform === "daily"}
> >
<Select.HiddenSelect /> <Select.HiddenSelect />
<Select.Control> <Select.Control>
@@ -614,8 +621,44 @@ export default function RoomsList() {
</Select.Positioner> </Select.Positioner>
</Select.Root> </Select.Root>
</Field.Root> </Field.Root>
)}
<Field.Root mt={4}> <Field.Root mt={4}>
<HStack gap={2} alignItems="center">
<Field.Label>Recording type</Field.Label> <Field.Label>Recording type</Field.Label>
<Popover.Root>
<Popover.Trigger asChild>
<IconButton
aria-label="Recording type help"
variant="ghost"
size="xs"
colorPalette="gray"
>
<LuInfo />
</IconButton>
</Popover.Trigger>
<Popover.Positioner>
<Popover.Content>
<Popover.Arrow />
<Popover.Body>
<Text fontSize="sm" lineHeight="1.6">
<strong>None:</strong> No recording will be
created.
<br />
<br />
<strong>Local:</strong> Recording happens on
each participant's device. Files are saved
locally.
<br />
<br />
<strong>Cloud:</strong> Recording happens on
the platform's servers and is available after
the meeting ends.
</Text>
</Popover.Body>
</Popover.Content>
</Popover.Positioner>
</Popover.Root>
</HStack>
<Select.Root <Select.Root
value={[room.recordingType]} value={[room.recordingType]}
onValueChange={(e) => { onValueChange={(e) => {
@@ -623,14 +666,12 @@ export default function RoomsList() {
const updates: Partial<typeof room> = { const updates: Partial<typeof room> = {
recordingType: newRecordingType, recordingType: newRecordingType,
}; };
// For Daily: if cloud, use automatic; otherwise none
if (room.platform === "daily") { if (room.platform === "daily") {
updates.recordingTrigger = updates.recordingTrigger =
newRecordingType === "cloud" newRecordingType === "cloud"
? "automatic-2nd-participant" ? "automatic-2nd-participant"
: "none"; : "none";
} else { } else {
// For Whereby: if not cloud, set to none
updates.recordingTrigger = updates.recordingTrigger =
newRecordingType !== "cloud" newRecordingType !== "cloud"
? "none" ? "none"
@@ -661,8 +702,45 @@ export default function RoomsList() {
</Select.Positioner> </Select.Positioner>
</Select.Root> </Select.Root>
</Field.Root> </Field.Root>
{room.recordingType === "cloud" &&
room.platform !== "daily" && (
<Field.Root mt={4}> <Field.Root mt={4}>
<HStack gap={2} alignItems="center">
<Field.Label>Recording start trigger</Field.Label> <Field.Label>Recording start trigger</Field.Label>
<Popover.Root>
<Popover.Trigger asChild>
<IconButton
aria-label="Recording start trigger help"
variant="ghost"
size="xs"
colorPalette="gray"
>
<LuInfo />
</IconButton>
</Popover.Trigger>
<Popover.Positioner>
<Popover.Content>
<Popover.Arrow />
<Popover.Body>
<Text fontSize="sm" lineHeight="1.6">
<strong>None:</strong> Recording must be
started manually by a participant.
<br />
<br />
<strong>Prompt:</strong> Participants will
be prompted to start recording when they
join.
<br />
<br />
<strong>Automatic:</strong> Recording
starts automatically when a second
participant joins.
</Text>
</Popover.Body>
</Popover.Content>
</Popover.Positioner>
</Popover.Root>
</HStack>
<Select.Root <Select.Root
value={[room.recordingTrigger]} value={[room.recordingTrigger]}
onValueChange={(e) => onValueChange={(e) =>
@@ -672,11 +750,6 @@ export default function RoomsList() {
}) })
} }
collection={recordingTriggerCollection} collection={recordingTriggerCollection}
disabled={
room.recordingType !== "cloud" ||
(room.platform === "daily" &&
room.recordingType === "cloud")
}
> >
<Select.HiddenSelect /> <Select.HiddenSelect />
<Select.Control> <Select.Control>
@@ -699,6 +772,7 @@ export default function RoomsList() {
</Select.Positioner> </Select.Positioner>
</Select.Root> </Select.Root>
</Field.Root> </Field.Root>
)}
<Field.Root mt={4}> <Field.Root mt={4}>
<Checkbox.Root <Checkbox.Root

View File

@@ -31,7 +31,7 @@
"ioredis": "^5.7.0", "ioredis": "^5.7.0",
"jest-worker": "^29.6.2", "jest-worker": "^29.6.2",
"lucide-react": "^0.525.0", "lucide-react": "^0.525.0",
"next": "^15.5.3", "next": "^15.5.7",
"next-auth": "^4.24.7", "next-auth": "^4.24.7",
"next-themes": "^0.4.6", "next-themes": "^0.4.6",
"nuqs": "^2.4.3", "nuqs": "^2.4.3",

100
www/pnpm-lock.yaml generated
View File

@@ -27,7 +27,7 @@ importers:
version: 0.2.3(@fortawesome/fontawesome-svg-core@6.7.2)(react@18.3.1) version: 0.2.3(@fortawesome/fontawesome-svg-core@6.7.2)(react@18.3.1)
"@sentry/nextjs": "@sentry/nextjs":
specifier: ^10.11.0 specifier: ^10.11.0
version: 10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3) version: 10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)
"@tanstack/react-query": "@tanstack/react-query":
specifier: ^5.85.9 specifier: ^5.85.9
version: 5.85.9(react@18.3.1) version: 5.85.9(react@18.3.1)
@@ -62,17 +62,17 @@ importers:
specifier: ^0.525.0 specifier: ^0.525.0
version: 0.525.0(react@18.3.1) version: 0.525.0(react@18.3.1)
next: next:
specifier: ^15.5.3 specifier: ^15.5.7
version: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) version: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
next-auth: next-auth:
specifier: ^4.24.7 specifier: ^4.24.7
version: 4.24.11(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1) version: 4.24.11(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
next-themes: next-themes:
specifier: ^0.4.6 specifier: ^0.4.6
version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1) version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
nuqs: nuqs:
specifier: ^2.4.3 specifier: ^2.4.3
version: 2.4.3(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1) version: 2.4.3(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)
openapi-fetch: openapi-fetch:
specifier: ^0.14.0 specifier: ^0.14.0
version: 0.14.0 version: 0.14.0
@@ -1184,10 +1184,10 @@ packages:
integrity: sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==, integrity: sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==,
} }
"@next/env@15.5.3": "@next/env@15.5.7":
resolution: resolution:
{ {
integrity: sha512-RSEDTRqyihYXygx/OJXwvVupfr9m04+0vH8vyy0HfZ7keRto6VX9BbEk0J2PUk0VGy6YhklJUSrgForov5F9pw==, integrity: sha512-4h6Y2NyEkIEN7Z8YxkA27pq6zTkS09bUSYC0xjd0NpwFxjnIKeZEeH591o5WECSmjpUhLn3H2QLJcDye3Uzcvg==,
} }
"@next/eslint-plugin-next@15.5.3": "@next/eslint-plugin-next@15.5.3":
@@ -1196,73 +1196,73 @@ packages:
integrity: sha512-SdhaKdko6dpsSr0DldkESItVrnPYB1NS2NpShCSX5lc7SSQmLZt5Mug6t2xbiuVWEVDLZSuIAoQyYVBYp0dR5g==, integrity: sha512-SdhaKdko6dpsSr0DldkESItVrnPYB1NS2NpShCSX5lc7SSQmLZt5Mug6t2xbiuVWEVDLZSuIAoQyYVBYp0dR5g==,
} }
"@next/swc-darwin-arm64@15.5.3": "@next/swc-darwin-arm64@15.5.7":
resolution: resolution:
{ {
integrity: sha512-nzbHQo69+au9wJkGKTU9lP7PXv0d1J5ljFpvb+LnEomLtSbJkbZyEs6sbF3plQmiOB2l9OBtN2tNSvCH1nQ9Jg==, integrity: sha512-IZwtxCEpI91HVU/rAUOOobWSZv4P2DeTtNaCdHqLcTJU4wdNXgAySvKa/qJCgR5m6KI8UsKDXtO2B31jcaw1Yw==,
} }
engines: { node: ">= 10" } engines: { node: ">= 10" }
cpu: [arm64] cpu: [arm64]
os: [darwin] os: [darwin]
"@next/swc-darwin-x64@15.5.3": "@next/swc-darwin-x64@15.5.7":
resolution: resolution:
{ {
integrity: sha512-w83w4SkOOhekJOcA5HBvHyGzgV1W/XvOfpkrxIse4uPWhYTTRwtGEM4v/jiXwNSJvfRvah0H8/uTLBKRXlef8g==, integrity: sha512-UP6CaDBcqaCBuiq/gfCEJw7sPEoX1aIjZHnBWN9v9qYHQdMKvCKcAVs4OX1vIjeE+tC5EIuwDTVIoXpUes29lg==,
} }
engines: { node: ">= 10" } engines: { node: ">= 10" }
cpu: [x64] cpu: [x64]
os: [darwin] os: [darwin]
"@next/swc-linux-arm64-gnu@15.5.3": "@next/swc-linux-arm64-gnu@15.5.7":
resolution: resolution:
{ {
integrity: sha512-+m7pfIs0/yvgVu26ieaKrifV8C8yiLe7jVp9SpcIzg7XmyyNE7toC1fy5IOQozmr6kWl/JONC51osih2RyoXRw==, integrity: sha512-NCslw3GrNIw7OgmRBxHtdWFQYhexoUCq+0oS2ccjyYLtcn1SzGzeM54jpTFonIMUjNbHmpKpziXnpxhSWLcmBA==,
} }
engines: { node: ">= 10" } engines: { node: ">= 10" }
cpu: [arm64] cpu: [arm64]
os: [linux] os: [linux]
"@next/swc-linux-arm64-musl@15.5.3": "@next/swc-linux-arm64-musl@15.5.7":
resolution: resolution:
{ {
integrity: sha512-u3PEIzuguSenoZviZJahNLgCexGFhso5mxWCrrIMdvpZn6lkME5vc/ADZG8UUk5K1uWRy4hqSFECrON6UKQBbQ==, integrity: sha512-nfymt+SE5cvtTrG9u1wdoxBr9bVB7mtKTcj0ltRn6gkP/2Nu1zM5ei8rwP9qKQP0Y//umK+TtkKgNtfboBxRrw==,
} }
engines: { node: ">= 10" } engines: { node: ">= 10" }
cpu: [arm64] cpu: [arm64]
os: [linux] os: [linux]
"@next/swc-linux-x64-gnu@15.5.3": "@next/swc-linux-x64-gnu@15.5.7":
resolution: resolution:
{ {
integrity: sha512-lDtOOScYDZxI2BENN9m0pfVPJDSuUkAD1YXSvlJF0DKwZt0WlA7T7o3wrcEr4Q+iHYGzEaVuZcsIbCps4K27sA==, integrity: sha512-hvXcZvCaaEbCZcVzcY7E1uXN9xWZfFvkNHwbe/n4OkRhFWrs1J1QV+4U1BN06tXLdaS4DazEGXwgqnu/VMcmqw==,
} }
engines: { node: ">= 10" } engines: { node: ">= 10" }
cpu: [x64] cpu: [x64]
os: [linux] os: [linux]
"@next/swc-linux-x64-musl@15.5.3": "@next/swc-linux-x64-musl@15.5.7":
resolution: resolution:
{ {
integrity: sha512-9vWVUnsx9PrY2NwdVRJ4dUURAQ8Su0sLRPqcCCxtX5zIQUBES12eRVHq6b70bbfaVaxIDGJN2afHui0eDm+cLg==, integrity: sha512-4IUO539b8FmF0odY6/SqANJdgwn1xs1GkPO5doZugwZ3ETF6JUdckk7RGmsfSf7ws8Qb2YB5It33mvNL/0acqA==,
} }
engines: { node: ">= 10" } engines: { node: ">= 10" }
cpu: [x64] cpu: [x64]
os: [linux] os: [linux]
"@next/swc-win32-arm64-msvc@15.5.3": "@next/swc-win32-arm64-msvc@15.5.7":
resolution: resolution:
{ {
integrity: sha512-1CU20FZzY9LFQigRi6jM45oJMU3KziA5/sSG+dXeVaTm661snQP6xu3ykGxxwU5sLG3sh14teO/IOEPVsQMRfA==, integrity: sha512-CpJVTkYI3ZajQkC5vajM7/ApKJUOlm6uP4BknM3XKvJ7VXAvCqSjSLmM0LKdYzn6nBJVSjdclx8nYJSa3xlTgQ==,
} }
engines: { node: ">= 10" } engines: { node: ">= 10" }
cpu: [arm64] cpu: [arm64]
os: [win32] os: [win32]
"@next/swc-win32-x64-msvc@15.5.3": "@next/swc-win32-x64-msvc@15.5.7":
resolution: resolution:
{ {
integrity: sha512-JMoLAq3n3y5tKXPQwCK5c+6tmwkuFDa2XAxz8Wm4+IVthdBZdZGh+lmiLUHg9f9IDwIQpUjp+ysd6OkYTyZRZw==, integrity: sha512-gMzgBX164I6DN+9/PGA+9dQiwmTkE4TloBNx8Kv9UiGARsr9Nba7IpcBRA1iTV9vwlYnrE3Uy6I7Aj6qLjQuqw==,
} }
engines: { node: ">= 10" } engines: { node: ">= 10" }
cpu: [x64] cpu: [x64]
@@ -6863,10 +6863,10 @@ packages:
react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
next@15.5.3: next@15.5.7:
resolution: resolution:
{ {
integrity: sha512-r/liNAx16SQj4D+XH/oI1dlpv9tdKJ6cONYPwwcCC46f2NjpaRWY+EKCzULfgQYV6YKXjHBchff2IZBSlZmJNw==, integrity: sha512-+t2/0jIJ48kUpGKkdlhgkv+zPTEOoXyr60qXe68eB/pl3CMJaLeIGjzp5D6Oqt25hCBiBTt8wEeeAzfJvUKnPQ==,
} }
engines: { node: ^18.18.0 || ^19.8.0 || >= 20.0.0 } engines: { node: ^18.18.0 || ^19.8.0 || >= 20.0.0 }
hasBin: true hasBin: true
@@ -9877,34 +9877,34 @@ snapshots:
"@tybys/wasm-util": 0.10.0 "@tybys/wasm-util": 0.10.0
optional: true optional: true
"@next/env@15.5.3": {} "@next/env@15.5.7": {}
"@next/eslint-plugin-next@15.5.3": "@next/eslint-plugin-next@15.5.3":
dependencies: dependencies:
fast-glob: 3.3.1 fast-glob: 3.3.1
"@next/swc-darwin-arm64@15.5.3": "@next/swc-darwin-arm64@15.5.7":
optional: true optional: true
"@next/swc-darwin-x64@15.5.3": "@next/swc-darwin-x64@15.5.7":
optional: true optional: true
"@next/swc-linux-arm64-gnu@15.5.3": "@next/swc-linux-arm64-gnu@15.5.7":
optional: true optional: true
"@next/swc-linux-arm64-musl@15.5.3": "@next/swc-linux-arm64-musl@15.5.7":
optional: true optional: true
"@next/swc-linux-x64-gnu@15.5.3": "@next/swc-linux-x64-gnu@15.5.7":
optional: true optional: true
"@next/swc-linux-x64-musl@15.5.3": "@next/swc-linux-x64-musl@15.5.7":
optional: true optional: true
"@next/swc-win32-arm64-msvc@15.5.3": "@next/swc-win32-arm64-msvc@15.5.7":
optional: true optional: true
"@next/swc-win32-x64-msvc@15.5.3": "@next/swc-win32-x64-msvc@15.5.7":
optional: true optional: true
"@nodelib/fs.scandir@2.1.5": "@nodelib/fs.scandir@2.1.5":
@@ -10684,7 +10684,7 @@ snapshots:
"@sentry/core@8.55.0": {} "@sentry/core@8.55.0": {}
"@sentry/nextjs@10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)": "@sentry/nextjs@10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)":
dependencies: dependencies:
"@opentelemetry/api": 1.9.0 "@opentelemetry/api": 1.9.0
"@opentelemetry/semantic-conventions": 1.37.0 "@opentelemetry/semantic-conventions": 1.37.0
@@ -10698,7 +10698,7 @@ snapshots:
"@sentry/vercel-edge": 10.11.0 "@sentry/vercel-edge": 10.11.0
"@sentry/webpack-plugin": 4.3.0(webpack@5.101.3) "@sentry/webpack-plugin": 4.3.0(webpack@5.101.3)
chalk: 3.0.0 chalk: 3.0.0
next: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) next: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
resolve: 1.22.8 resolve: 1.22.8
rollup: 4.50.1 rollup: 4.50.1
stacktrace-parser: 0.1.11 stacktrace-parser: 0.1.11
@@ -14093,13 +14093,13 @@ snapshots:
neo-async@2.6.2: {} neo-async@2.6.2: {}
next-auth@4.24.11(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1): next-auth@4.24.11(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies: dependencies:
"@babel/runtime": 7.28.2 "@babel/runtime": 7.28.2
"@panva/hkdf": 1.2.1 "@panva/hkdf": 1.2.1
cookie: 0.7.2 cookie: 0.7.2
jose: 4.15.9 jose: 4.15.9
next: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) next: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
oauth: 0.9.15 oauth: 0.9.15
openid-client: 5.7.1 openid-client: 5.7.1
preact: 10.27.0 preact: 10.27.0
@@ -14113,9 +14113,9 @@ snapshots:
react: 18.3.1 react: 18.3.1
react-dom: 18.3.1(react@18.3.1) react-dom: 18.3.1(react@18.3.1)
next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0): next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0):
dependencies: dependencies:
"@next/env": 15.5.3 "@next/env": 15.5.7
"@swc/helpers": 0.5.15 "@swc/helpers": 0.5.15
caniuse-lite: 1.0.30001734 caniuse-lite: 1.0.30001734
postcss: 8.4.31 postcss: 8.4.31
@@ -14123,14 +14123,14 @@ snapshots:
react-dom: 18.3.1(react@18.3.1) react-dom: 18.3.1(react@18.3.1)
styled-jsx: 5.1.6(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react@18.3.1) styled-jsx: 5.1.6(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react@18.3.1)
optionalDependencies: optionalDependencies:
"@next/swc-darwin-arm64": 15.5.3 "@next/swc-darwin-arm64": 15.5.7
"@next/swc-darwin-x64": 15.5.3 "@next/swc-darwin-x64": 15.5.7
"@next/swc-linux-arm64-gnu": 15.5.3 "@next/swc-linux-arm64-gnu": 15.5.7
"@next/swc-linux-arm64-musl": 15.5.3 "@next/swc-linux-arm64-musl": 15.5.7
"@next/swc-linux-x64-gnu": 15.5.3 "@next/swc-linux-x64-gnu": 15.5.7
"@next/swc-linux-x64-musl": 15.5.3 "@next/swc-linux-x64-musl": 15.5.7
"@next/swc-win32-arm64-msvc": 15.5.3 "@next/swc-win32-arm64-msvc": 15.5.7
"@next/swc-win32-x64-msvc": 15.5.3 "@next/swc-win32-x64-msvc": 15.5.7
"@opentelemetry/api": 1.9.0 "@opentelemetry/api": 1.9.0
sass: 1.90.0 sass: 1.90.0
sharp: 0.34.3 sharp: 0.34.3
@@ -14159,12 +14159,12 @@ snapshots:
dependencies: dependencies:
path-key: 3.1.1 path-key: 3.1.1
nuqs@2.4.3(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1): nuqs@2.4.3(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1):
dependencies: dependencies:
mitt: 3.0.1 mitt: 3.0.1
react: 18.3.1 react: 18.3.1
optionalDependencies: optionalDependencies:
next: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0) next: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
oauth@0.9.15: {} oauth@0.9.15: {}