mirror of
https://github.com/Monadical-SAS/reflector.git
synced 2025-12-20 12:19:06 +00:00
merge
This commit is contained in:
90
.github/workflows/deploy.yml
vendored
90
.github/workflows/deploy.yml
vendored
@@ -1,90 +0,0 @@
|
||||
name: Build container/push to container registry
|
||||
|
||||
on: [workflow_dispatch]
|
||||
|
||||
env:
|
||||
# 950402358378.dkr.ecr.us-east-1.amazonaws.com/reflector
|
||||
AWS_REGION: us-east-1
|
||||
ECR_REPOSITORY: reflector
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: linux/amd64
|
||||
runner: linux-amd64
|
||||
arch: amd64
|
||||
- platform: linux/arm64
|
||||
runner: linux-arm64
|
||||
arch: arm64
|
||||
|
||||
runs-on: ${{ matrix.runner }}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
outputs:
|
||||
registry: ${{ steps.login-ecr.outputs.registry }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push ${{ matrix.arch }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: server
|
||||
platforms: ${{ matrix.platform }}
|
||||
push: true
|
||||
tags: ${{ steps.login-ecr.outputs.registry }}/${{ env.ECR_REPOSITORY }}:latest-${{ matrix.arch }}
|
||||
cache-from: type=gha,scope=${{ matrix.arch }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.arch }}
|
||||
github-token: ${{ secrets.GHA_CACHE_TOKEN }}
|
||||
provenance: false
|
||||
|
||||
create-manifest:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build]
|
||||
|
||||
permissions:
|
||||
deployments: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Create and push multi-arch manifest
|
||||
run: |
|
||||
# Get the registry URL (since we can't easily access job outputs in matrix)
|
||||
ECR_REGISTRY=$(aws ecr describe-registry --query 'registryId' --output text).dkr.ecr.${{ env.AWS_REGION }}.amazonaws.com
|
||||
|
||||
docker manifest create \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-amd64 \
|
||||
$ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest-arm64
|
||||
|
||||
docker manifest push $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest
|
||||
|
||||
echo "✅ Multi-arch manifest pushed: $ECR_REGISTRY/${{ env.ECR_REPOSITORY }}:latest"
|
||||
17
.github/workflows/dockerhub-backend.yml
vendored
17
.github/workflows/dockerhub-backend.yml
vendored
@@ -1,13 +1,12 @@
|
||||
name: Build and Push Backend Docker Image (Docker Hub)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- dockerhub-2
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
paths:
|
||||
- 'server/**'
|
||||
- '.github/workflows/dockerhub-backend.yml'
|
||||
- "server/**"
|
||||
- ".github/workflows/dockerhub-backend.yml"
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
@@ -17,6 +16,10 @@ env:
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.pull_request.merged == true &&
|
||||
startsWith(github.event.pull_request.head.ref, 'release-please--branches--'))
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -39,7 +42,7 @@ jobs:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=sha,prefix={{branch}}-
|
||||
type=ref,event=tag
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
|
||||
36
.github/workflows/dockerhub-frontend.yml
vendored
36
.github/workflows/dockerhub-frontend.yml
vendored
@@ -1,13 +1,12 @@
|
||||
name: Build and Push Frontend Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- dockerhub-2
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
paths:
|
||||
- 'www/**'
|
||||
- '.github/workflows/dockerhub-frontend.yml'
|
||||
- "www/**"
|
||||
- ".github/workflows/dockerhub-frontend.yml"
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
@@ -17,6 +16,10 @@ env:
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.pull_request.merged == true &&
|
||||
startsWith(github.event.pull_request.head.ref, 'release-please--branches--'))
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -39,8 +42,9 @@ jobs:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=sha,prefix={{branch}}-
|
||||
type=ref,event=tag
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@@ -55,4 +59,20 @@ jobs:
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
deploy:
|
||||
needs: build-and-push
|
||||
runs-on: ubuntu-latest
|
||||
if: success()
|
||||
strategy:
|
||||
matrix:
|
||||
environment: [reflector-monadical, reflector-media]
|
||||
environment: ${{ matrix.environment }}
|
||||
steps:
|
||||
- name: Trigger Coolify deployment
|
||||
run: |
|
||||
curl -X POST "${{ secrets.COOLIFY_WEBHOOK_URL }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer ${{ secrets.COOLIFY_WEBHOOK_TOKEN }}" \
|
||||
-f || (echo "Failed to trigger Coolify deployment for ${{ matrix.environment }}" && exit 1)
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -20,3 +20,4 @@ www/.env.development
|
||||
www/.env.production
|
||||
.playwright-mcp
|
||||
docs/pnpm-lock.yaml
|
||||
.secrets
|
||||
|
||||
24
.secrets.example
Normal file
24
.secrets.example
Normal file
@@ -0,0 +1,24 @@
|
||||
# Example secrets file for GitHub Actions workflows
|
||||
# Copy this to .secrets and fill in your values
|
||||
# These secrets should be configured in GitHub repository settings:
|
||||
# Settings > Secrets and variables > Actions
|
||||
|
||||
# DockerHub Configuration (required for frontend and backend deployment)
|
||||
# Create a Docker Hub access token at https://hub.docker.com/settings/security
|
||||
# Username: monadicalsas
|
||||
DOCKERHUB_TOKEN=your-dockerhub-access-token
|
||||
|
||||
# GitHub Token (required for frontend and backend deployment)
|
||||
# Used by docker/metadata-action for extracting image metadata
|
||||
# Can use the default GITHUB_TOKEN or create a personal access token
|
||||
GITHUB_TOKEN=your-github-token-or-use-default-GITHUB_TOKEN
|
||||
|
||||
# Coolify Deployment Webhook (required for frontend deployment)
|
||||
# Used to trigger automatic deployment after image push
|
||||
# Configure these secrets in GitHub Environments:
|
||||
# Each environment should have:
|
||||
# - COOLIFY_WEBHOOK_URL: The webhook URL for that specific deployment
|
||||
# - COOLIFY_WEBHOOK_TOKEN: The webhook token (can be the same for both if using same token)
|
||||
|
||||
# Optional: GitHub Actions Cache Token (for local testing with act)
|
||||
GHA_CACHE_TOKEN=your-github-token-or-empty
|
||||
16
CHANGELOG.md
16
CHANGELOG.md
@@ -1,5 +1,21 @@
|
||||
# Changelog
|
||||
|
||||
## [0.23.0](https://github.com/Monadical-SAS/reflector/compare/v0.22.4...v0.23.0) (2025-12-10)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* dockerhub ci ([#772](https://github.com/Monadical-SAS/reflector/issues/772)) ([00549f1](https://github.com/Monadical-SAS/reflector/commit/00549f153ade922cf4cb6c5358a7d11a39c426d2))
|
||||
* llm retries ([#739](https://github.com/Monadical-SAS/reflector/issues/739)) ([61f0e29](https://github.com/Monadical-SAS/reflector/commit/61f0e29d4c51eab54ee67af92141fbb171e8ccaa))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* celery inspect bug sidestep in restart script ([#766](https://github.com/Monadical-SAS/reflector/issues/766)) ([ec17ed7](https://github.com/Monadical-SAS/reflector/commit/ec17ed7b587cf6ee143646baaee67a7c017044d4))
|
||||
* deploy frontend to coolify ([#779](https://github.com/Monadical-SAS/reflector/issues/779)) ([91650ec](https://github.com/Monadical-SAS/reflector/commit/91650ec65f65713faa7ee0dcfb75af427b7c4ba0))
|
||||
* hide rooms settings instead of disabling ([#763](https://github.com/Monadical-SAS/reflector/issues/763)) ([3ad78be](https://github.com/Monadical-SAS/reflector/commit/3ad78be7628c0d029296b301a0e87236c76b7598))
|
||||
* return participant emails from transcript endpoint ([#769](https://github.com/Monadical-SAS/reflector/issues/769)) ([d3a5cd1](https://github.com/Monadical-SAS/reflector/commit/d3a5cd12d2d0d9c32af2d5bd9322e030ef69b85d))
|
||||
|
||||
## [0.22.4](https://github.com/Monadical-SAS/reflector/compare/v0.22.3...v0.22.4) (2025-12-02)
|
||||
|
||||
|
||||
|
||||
@@ -126,6 +126,7 @@ markers = [
|
||||
select = [
|
||||
"I", # isort - import sorting
|
||||
"F401", # unused imports
|
||||
"E402", # module level import not at top of file
|
||||
"PLC0415", # import-outside-top-level - detect inline imports
|
||||
]
|
||||
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
import asyncio
|
||||
import functools
|
||||
from uuid import uuid4
|
||||
|
||||
from celery import current_task
|
||||
|
||||
from reflector.db import get_database
|
||||
from reflector.llm import llm_session_id
|
||||
|
||||
|
||||
def asynctask(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
async def run_with_db():
|
||||
task_id = current_task.request.id if current_task else None
|
||||
llm_session_id.set(task_id or f"random-{uuid4().hex}")
|
||||
database = get_database()
|
||||
await database.connect()
|
||||
try:
|
||||
|
||||
@@ -1,14 +1,29 @@
|
||||
import logging
|
||||
from typing import Type, TypeVar
|
||||
from contextvars import ContextVar
|
||||
from typing import Generic, Type, TypeVar
|
||||
from uuid import uuid4
|
||||
|
||||
from llama_index.core import Settings
|
||||
from llama_index.core.output_parsers import PydanticOutputParser
|
||||
from llama_index.core.program import LLMTextCompletionProgram
|
||||
from llama_index.core.response_synthesizers import TreeSummarize
|
||||
from llama_index.core.workflow import (
|
||||
Context,
|
||||
Event,
|
||||
StartEvent,
|
||||
StopEvent,
|
||||
Workflow,
|
||||
step,
|
||||
)
|
||||
from llama_index.llms.openai_like import OpenAILike
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
OutputT = TypeVar("OutputT", bound=BaseModel)
|
||||
|
||||
# Session ID for LiteLLM request grouping - set per processing run
|
||||
llm_session_id: ContextVar[str | None] = ContextVar("llm_session_id", default=None)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STRUCTURED_RESPONSE_PROMPT_TEMPLATE = """
|
||||
Based on the following analysis, provide the information in the requested JSON format:
|
||||
@@ -20,6 +35,158 @@ Analysis:
|
||||
"""
|
||||
|
||||
|
||||
class LLMParseError(Exception):
|
||||
"""Raised when LLM output cannot be parsed after retries."""
|
||||
|
||||
def __init__(self, output_cls: Type[BaseModel], error_msg: str, attempts: int):
|
||||
self.output_cls = output_cls
|
||||
self.error_msg = error_msg
|
||||
self.attempts = attempts
|
||||
super().__init__(
|
||||
f"Failed to parse {output_cls.__name__} after {attempts} attempts: {error_msg}"
|
||||
)
|
||||
|
||||
|
||||
class ExtractionDone(Event):
|
||||
"""Event emitted when LLM JSON formatting completes."""
|
||||
|
||||
output: str
|
||||
|
||||
|
||||
class ValidationErrorEvent(Event):
|
||||
"""Event emitted when validation fails."""
|
||||
|
||||
error: str
|
||||
wrong_output: str
|
||||
|
||||
|
||||
class StructuredOutputWorkflow(Workflow, Generic[OutputT]):
|
||||
"""Workflow for structured output extraction with validation retry.
|
||||
|
||||
This workflow handles parse/validation retries only. Network error retries
|
||||
are handled internally by Settings.llm (OpenAILike max_retries=3).
|
||||
The caller should NOT wrap this workflow in additional retry logic.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
output_cls: Type[OutputT],
|
||||
max_retries: int = 3,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.output_cls: Type[OutputT] = output_cls
|
||||
self.max_retries = max_retries
|
||||
self.output_parser = PydanticOutputParser(output_cls)
|
||||
|
||||
@step
|
||||
async def extract(
|
||||
self, ctx: Context, ev: StartEvent | ValidationErrorEvent
|
||||
) -> StopEvent | ExtractionDone:
|
||||
"""Extract structured data from text using two-step LLM process.
|
||||
|
||||
Step 1 (first call only): TreeSummarize generates text analysis
|
||||
Step 2 (every call): Settings.llm.acomplete formats analysis as JSON
|
||||
"""
|
||||
current_retries = await ctx.store.get("retries", default=0)
|
||||
await ctx.store.set("retries", current_retries + 1)
|
||||
|
||||
if current_retries >= self.max_retries:
|
||||
last_error = await ctx.store.get("last_error", default=None)
|
||||
logger.error(
|
||||
f"Max retries ({self.max_retries}) reached for {self.output_cls.__name__}"
|
||||
)
|
||||
return StopEvent(result={"error": last_error, "attempts": current_retries})
|
||||
|
||||
if isinstance(ev, StartEvent):
|
||||
# First call: run TreeSummarize to get analysis, store in context
|
||||
prompt = ev.get("prompt")
|
||||
texts = ev.get("texts")
|
||||
tone_name = ev.get("tone_name")
|
||||
if not prompt or not isinstance(texts, list):
|
||||
raise ValueError(
|
||||
"StartEvent must contain 'prompt' (str) and 'texts' (list)"
|
||||
)
|
||||
|
||||
summarizer = TreeSummarize(verbose=False)
|
||||
analysis = await summarizer.aget_response(
|
||||
prompt, texts, tone_name=tone_name
|
||||
)
|
||||
await ctx.store.set("analysis", str(analysis))
|
||||
reflection = ""
|
||||
else:
|
||||
# Retry: reuse analysis from context
|
||||
analysis = await ctx.store.get("analysis")
|
||||
if not analysis:
|
||||
raise RuntimeError("Internal error: analysis not found in context")
|
||||
|
||||
wrong_output = ev.wrong_output
|
||||
if len(wrong_output) > 2000:
|
||||
wrong_output = wrong_output[:2000] + "... [truncated]"
|
||||
reflection = (
|
||||
f"\n\nYour previous response could not be parsed:\n{wrong_output}\n\n"
|
||||
f"Error:\n{ev.error}\n\n"
|
||||
"Please try again. Return ONLY valid JSON matching the schema above, "
|
||||
"with no markdown formatting or extra text."
|
||||
)
|
||||
|
||||
# Step 2: Format analysis as JSON using LLM completion
|
||||
format_instructions = self.output_parser.format(
|
||||
"Please structure the above information in the following JSON format:"
|
||||
)
|
||||
|
||||
json_prompt = STRUCTURED_RESPONSE_PROMPT_TEMPLATE.format(
|
||||
analysis=analysis,
|
||||
format_instructions=format_instructions + reflection,
|
||||
)
|
||||
|
||||
# Network retries handled by OpenAILike (max_retries=3)
|
||||
response = await Settings.llm.acomplete(json_prompt)
|
||||
return ExtractionDone(output=response.text)
|
||||
|
||||
@step
|
||||
async def validate(
|
||||
self, ctx: Context, ev: ExtractionDone
|
||||
) -> StopEvent | ValidationErrorEvent:
|
||||
"""Validate extracted output against Pydantic schema."""
|
||||
raw_output = ev.output
|
||||
retries = await ctx.store.get("retries", default=0)
|
||||
|
||||
try:
|
||||
parsed = self.output_parser.parse(raw_output)
|
||||
if retries > 1:
|
||||
logger.info(
|
||||
f"LLM parse succeeded on attempt {retries}/{self.max_retries} "
|
||||
f"for {self.output_cls.__name__}"
|
||||
)
|
||||
return StopEvent(result={"success": parsed})
|
||||
|
||||
except (ValidationError, ValueError) as e:
|
||||
error_msg = self._format_error(e, raw_output)
|
||||
await ctx.store.set("last_error", error_msg)
|
||||
|
||||
logger.error(
|
||||
f"LLM parse error (attempt {retries}/{self.max_retries}): "
|
||||
f"{type(e).__name__}: {e}\nRaw response: {raw_output[:500]}"
|
||||
)
|
||||
|
||||
return ValidationErrorEvent(
|
||||
error=error_msg,
|
||||
wrong_output=raw_output,
|
||||
)
|
||||
|
||||
def _format_error(self, error: Exception, raw_output: str) -> str:
|
||||
"""Format error for LLM feedback."""
|
||||
if isinstance(error, ValidationError):
|
||||
error_messages = []
|
||||
for err in error.errors():
|
||||
field = ".".join(str(loc) for loc in err["loc"])
|
||||
error_messages.append(f"- {err['msg']} in field '{field}'")
|
||||
return "Schema validation errors:\n" + "\n".join(error_messages)
|
||||
else:
|
||||
return f"Parse error: {str(error)}"
|
||||
|
||||
|
||||
class LLM:
|
||||
def __init__(self, settings, temperature: float = 0.4, max_tokens: int = 2048):
|
||||
self.settings_obj = settings
|
||||
@@ -30,11 +197,12 @@ class LLM:
|
||||
self.temperature = temperature
|
||||
self.max_tokens = max_tokens
|
||||
|
||||
# Configure llamaindex Settings
|
||||
self._configure_llamaindex()
|
||||
|
||||
def _configure_llamaindex(self):
|
||||
"""Configure llamaindex Settings with OpenAILike LLM"""
|
||||
session_id = llm_session_id.get() or f"fallback-{uuid4().hex}"
|
||||
|
||||
Settings.llm = OpenAILike(
|
||||
model=self.model_name,
|
||||
api_base=self.url,
|
||||
@@ -44,6 +212,7 @@ class LLM:
|
||||
is_function_calling_model=False,
|
||||
temperature=self.temperature,
|
||||
max_tokens=self.max_tokens,
|
||||
additional_kwargs={"extra_body": {"litellm_session_id": session_id}},
|
||||
)
|
||||
|
||||
async def get_response(
|
||||
@@ -61,43 +230,25 @@ class LLM:
|
||||
output_cls: Type[T],
|
||||
tone_name: str | None = None,
|
||||
) -> T:
|
||||
"""Get structured output from LLM for non-function-calling models"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
summarizer = TreeSummarize(verbose=True)
|
||||
response = await summarizer.aget_response(prompt, texts, tone_name=tone_name)
|
||||
|
||||
output_parser = PydanticOutputParser(output_cls)
|
||||
|
||||
program = LLMTextCompletionProgram.from_defaults(
|
||||
output_parser=output_parser,
|
||||
prompt_template_str=STRUCTURED_RESPONSE_PROMPT_TEMPLATE,
|
||||
verbose=False,
|
||||
"""Get structured output from LLM with validation retry via Workflow."""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=output_cls,
|
||||
max_retries=self.settings_obj.LLM_PARSE_MAX_RETRIES + 1,
|
||||
timeout=120,
|
||||
)
|
||||
|
||||
format_instructions = output_parser.format(
|
||||
"Please structure the above information in the following JSON format:"
|
||||
result = await workflow.run(
|
||||
prompt=prompt,
|
||||
texts=texts,
|
||||
tone_name=tone_name,
|
||||
)
|
||||
|
||||
try:
|
||||
output = await program.acall(
|
||||
analysis=str(response), format_instructions=format_instructions
|
||||
if "error" in result:
|
||||
error_msg = result["error"] or "Max retries exceeded"
|
||||
raise LLMParseError(
|
||||
output_cls=output_cls,
|
||||
error_msg=error_msg,
|
||||
attempts=result.get("attempts", 0),
|
||||
)
|
||||
except ValidationError as e:
|
||||
# Extract the raw JSON from the error details
|
||||
errors = e.errors()
|
||||
if errors and "input" in errors[0]:
|
||||
raw_json = errors[0]["input"]
|
||||
logger.error(
|
||||
f"JSON validation failed for {output_cls.__name__}. "
|
||||
f"Full raw JSON output:\n{raw_json}\n"
|
||||
f"Validation errors: {errors}"
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
f"JSON validation failed for {output_cls.__name__}. "
|
||||
f"Validation errors: {errors}"
|
||||
)
|
||||
raise
|
||||
|
||||
return output
|
||||
return result["success"]
|
||||
|
||||
@@ -340,7 +340,6 @@ async def task_send_webhook_if_needed(*, transcript_id: str):
|
||||
@asynctask
|
||||
async def task_pipeline_file_process(*, transcript_id: str):
|
||||
"""Celery task for file pipeline processing"""
|
||||
|
||||
transcript = await transcripts_controller.get_by_id(transcript_id)
|
||||
if not transcript:
|
||||
raise Exception(f"Transcript {transcript_id} not found")
|
||||
|
||||
@@ -160,7 +160,10 @@ def dispatch_transcript_processing(config: ProcessingConfig) -> AsyncResult:
|
||||
def task_is_scheduled_or_active(task_name: str, **kwargs):
|
||||
inspect = celery.current_app.control.inspect()
|
||||
|
||||
for worker, tasks in (inspect.scheduled() | inspect.active()).items():
|
||||
scheduled = inspect.scheduled() or {}
|
||||
active = inspect.active() or {}
|
||||
all = scheduled | active
|
||||
for worker, tasks in all.items():
|
||||
for task in tasks:
|
||||
if task["name"] == task_name and task["kwargs"] == kwargs:
|
||||
return True
|
||||
|
||||
@@ -76,6 +76,10 @@ class Settings(BaseSettings):
|
||||
LLM_API_KEY: str | None = None
|
||||
LLM_CONTEXT_WINDOW: int = 16000
|
||||
|
||||
LLM_PARSE_MAX_RETRIES: int = (
|
||||
3 # Max retries for JSON/validation errors (total attempts = retries + 1)
|
||||
)
|
||||
|
||||
# Diarization
|
||||
# backends:
|
||||
# - pyannote: in-process model loading (no HTTP, runs in same process)
|
||||
|
||||
@@ -318,6 +318,14 @@ async def dummy_storage():
|
||||
yield
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_settings():
|
||||
"""Provide isolated settings for tests to avoid modifying global settings"""
|
||||
from reflector.settings import Settings
|
||||
|
||||
return Settings()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def celery_enable_logging():
|
||||
return True
|
||||
|
||||
357
server/tests/test_llm_retry.py
Normal file
357
server/tests/test_llm_retry.py
Normal file
@@ -0,0 +1,357 @@
|
||||
"""Tests for LLM parse error recovery using llama-index Workflow"""
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
from workflows.errors import WorkflowRuntimeError
|
||||
|
||||
from reflector.llm import LLM, LLMParseError, StructuredOutputWorkflow
|
||||
|
||||
|
||||
class TestResponse(BaseModel):
|
||||
"""Test response model for structured output"""
|
||||
|
||||
title: str = Field(description="A title")
|
||||
summary: str = Field(description="A summary")
|
||||
confidence: float = Field(description="Confidence score", ge=0, le=1)
|
||||
|
||||
|
||||
def make_completion_response(text: str):
|
||||
"""Create a mock CompletionResponse with .text attribute"""
|
||||
response = MagicMock()
|
||||
response.text = text
|
||||
return response
|
||||
|
||||
|
||||
class TestLLMParseErrorRecovery:
|
||||
"""Test parse error recovery with Workflow feedback loop"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_parse_error_recovery_with_feedback(self, test_settings):
|
||||
"""Test that parse errors trigger retry with error feedback"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
# TreeSummarize returns plain text analysis (step 1)
|
||||
mock_summarizer.aget_response = AsyncMock(
|
||||
return_value="The analysis shows a test with summary and high confidence."
|
||||
)
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(prompt, *args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# First JSON formatting call returns invalid JSON
|
||||
return make_completion_response('{"title": "Test"}')
|
||||
else:
|
||||
# Second call should have error feedback in prompt
|
||||
assert "Your previous response could not be parsed:" in prompt
|
||||
assert '{"title": "Test"}' in prompt
|
||||
assert "Error:" in prompt
|
||||
assert "Please try again" in prompt
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
assert result.title == "Test"
|
||||
assert result.summary == "Summary"
|
||||
assert result.confidence == 0.95
|
||||
# TreeSummarize called once, Settings.llm.acomplete called twice
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_max_parse_retry_attempts(self, test_settings):
|
||||
"""Test that parse error retry stops after max attempts"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Always return invalid JSON from acomplete
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"invalid": "missing required fields"}'
|
||||
)
|
||||
)
|
||||
|
||||
with pytest.raises(LLMParseError, match="Failed to parse"):
|
||||
await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
expected_attempts = test_settings.LLM_PARSE_MAX_RETRIES + 1
|
||||
# TreeSummarize called once, acomplete called max_retries times
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert mock_settings.llm.acomplete.call_count == expected_attempts
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_raw_response_logging_on_parse_error(self, test_settings, caplog):
|
||||
"""Test that raw response is logged when parse error occurs"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
caplog.at_level("ERROR"),
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(*args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
return make_completion_response('{"title": "Test"}') # Invalid
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
assert result.title == "Test"
|
||||
|
||||
error_logs = [r for r in caplog.records if r.levelname == "ERROR"]
|
||||
raw_response_logged = any("Raw response:" in r.message for r in error_logs)
|
||||
assert raw_response_logged, "Raw response should be logged on parse error"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_validation_errors_in_feedback(self, test_settings):
|
||||
"""Test that validation errors are included in feedback"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(prompt, *args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] == 1:
|
||||
# Missing title and summary
|
||||
return make_completion_response('{"confidence": 0.5}')
|
||||
else:
|
||||
# Should have schema validation errors in prompt
|
||||
assert (
|
||||
"Schema validation errors" in prompt
|
||||
or "error" in prompt.lower()
|
||||
)
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
assert result.title == "Test"
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_on_first_attempt(self, test_settings):
|
||||
"""Test that no retry happens when first attempt succeeds"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.95}'
|
||||
)
|
||||
)
|
||||
|
||||
result = await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
assert result.title == "Test"
|
||||
assert result.summary == "Summary"
|
||||
assert result.confidence == 0.95
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert mock_settings.llm.acomplete.call_count == 1
|
||||
|
||||
|
||||
class TestStructuredOutputWorkflow:
|
||||
"""Direct tests for the StructuredOutputWorkflow"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_workflow_retries_on_validation_error(self):
|
||||
"""Test workflow retries when validation fails"""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=TestResponse,
|
||||
max_retries=3,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
call_count = {"count": 0}
|
||||
|
||||
async def acomplete_handler(*args, **kwargs):
|
||||
call_count["count"] += 1
|
||||
if call_count["count"] < 2:
|
||||
return make_completion_response('{"title": "Only title"}')
|
||||
return make_completion_response(
|
||||
'{"title": "Test", "summary": "Summary", "confidence": 0.9}'
|
||||
)
|
||||
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=acomplete_handler)
|
||||
|
||||
result = await workflow.run(
|
||||
prompt="Extract data",
|
||||
texts=["Some text"],
|
||||
tone_name=None,
|
||||
)
|
||||
|
||||
assert "success" in result
|
||||
assert result["success"].title == "Test"
|
||||
assert call_count["count"] == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_workflow_returns_error_after_max_retries(self):
|
||||
"""Test workflow returns error after exhausting retries"""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=TestResponse,
|
||||
max_retries=2,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Always return invalid JSON
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
return_value=make_completion_response('{"invalid": true}')
|
||||
)
|
||||
|
||||
result = await workflow.run(
|
||||
prompt="Extract data",
|
||||
texts=["Some text"],
|
||||
tone_name=None,
|
||||
)
|
||||
|
||||
assert "error" in result
|
||||
# TreeSummarize called once, acomplete called max_retries times
|
||||
assert mock_summarizer.aget_response.call_count == 1
|
||||
assert mock_settings.llm.acomplete.call_count == 2
|
||||
|
||||
|
||||
class TestNetworkErrorRetries:
|
||||
"""Test that network error retries are handled by OpenAILike, not Workflow"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_propagates_after_openai_retries(self, test_settings):
|
||||
"""Test that network errors are retried by OpenAILike and then propagate.
|
||||
|
||||
Network retries are handled by OpenAILike (max_retries=3), not by our
|
||||
StructuredOutputWorkflow. This test verifies that network errors propagate
|
||||
up after OpenAILike exhausts its retries.
|
||||
"""
|
||||
llm = LLM(settings=test_settings, temperature=0.4, max_tokens=100)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Simulate network error from acomplete (after OpenAILike retries exhausted)
|
||||
network_error = ConnectionError("Connection refused")
|
||||
mock_settings.llm.acomplete = AsyncMock(side_effect=network_error)
|
||||
|
||||
# Network error wrapped in WorkflowRuntimeError
|
||||
with pytest.raises(WorkflowRuntimeError, match="Connection refused"):
|
||||
await llm.get_structured_response(
|
||||
prompt="Test prompt", texts=["Test text"], output_cls=TestResponse
|
||||
)
|
||||
|
||||
# acomplete called only once - network error propagates, not retried by Workflow
|
||||
assert mock_settings.llm.acomplete.call_count == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_not_retried_by_workflow(self, test_settings):
|
||||
"""Test that Workflow does NOT retry network errors (OpenAILike handles those).
|
||||
|
||||
This verifies the separation of concerns:
|
||||
- StructuredOutputWorkflow: retries parse/validation errors
|
||||
- OpenAILike: retries network errors (internally, max_retries=3)
|
||||
"""
|
||||
workflow = StructuredOutputWorkflow(
|
||||
output_cls=TestResponse,
|
||||
max_retries=3,
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
with (
|
||||
patch("reflector.llm.TreeSummarize") as mock_summarize,
|
||||
patch("reflector.llm.Settings") as mock_settings,
|
||||
):
|
||||
mock_summarizer = MagicMock()
|
||||
mock_summarize.return_value = mock_summarizer
|
||||
mock_summarizer.aget_response = AsyncMock(return_value="Some analysis")
|
||||
|
||||
# Network error should propagate immediately, not trigger Workflow retry
|
||||
mock_settings.llm.acomplete = AsyncMock(
|
||||
side_effect=TimeoutError("Request timed out")
|
||||
)
|
||||
|
||||
# Network error wrapped in WorkflowRuntimeError
|
||||
with pytest.raises(WorkflowRuntimeError, match="Request timed out"):
|
||||
await workflow.run(
|
||||
prompt="Extract data",
|
||||
texts=["Some text"],
|
||||
tone_name=None,
|
||||
)
|
||||
|
||||
# Only called once - Workflow doesn't retry network errors
|
||||
assert mock_settings.llm.acomplete.call_count == 1
|
||||
@@ -31,7 +31,7 @@
|
||||
"ioredis": "^5.7.0",
|
||||
"jest-worker": "^29.6.2",
|
||||
"lucide-react": "^0.525.0",
|
||||
"next": "^15.5.3",
|
||||
"next": "^15.5.7",
|
||||
"next-auth": "^4.24.7",
|
||||
"next-themes": "^0.4.6",
|
||||
"nuqs": "^2.4.3",
|
||||
|
||||
100
www/pnpm-lock.yaml
generated
100
www/pnpm-lock.yaml
generated
@@ -27,7 +27,7 @@ importers:
|
||||
version: 0.2.3(@fortawesome/fontawesome-svg-core@6.7.2)(react@18.3.1)
|
||||
"@sentry/nextjs":
|
||||
specifier: ^10.11.0
|
||||
version: 10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)
|
||||
version: 10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)
|
||||
"@tanstack/react-query":
|
||||
specifier: ^5.85.9
|
||||
version: 5.85.9(react@18.3.1)
|
||||
@@ -62,17 +62,17 @@ importers:
|
||||
specifier: ^0.525.0
|
||||
version: 0.525.0(react@18.3.1)
|
||||
next:
|
||||
specifier: ^15.5.3
|
||||
version: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
|
||||
specifier: ^15.5.7
|
||||
version: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
|
||||
next-auth:
|
||||
specifier: ^4.24.7
|
||||
version: 4.24.11(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
version: 4.24.11(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
next-themes:
|
||||
specifier: ^0.4.6
|
||||
version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
|
||||
nuqs:
|
||||
specifier: ^2.4.3
|
||||
version: 2.4.3(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)
|
||||
version: 2.4.3(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)
|
||||
openapi-fetch:
|
||||
specifier: ^0.14.0
|
||||
version: 0.14.0
|
||||
@@ -1184,10 +1184,10 @@ packages:
|
||||
integrity: sha512-ZVWUcfwY4E/yPitQJl481FjFo3K22D6qF0DuFH6Y/nbnE11GY5uguDxZMGXPQ8WQ0128MXQD7TnfHyK4oWoIJQ==,
|
||||
}
|
||||
|
||||
"@next/env@15.5.3":
|
||||
"@next/env@15.5.7":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-RSEDTRqyihYXygx/OJXwvVupfr9m04+0vH8vyy0HfZ7keRto6VX9BbEk0J2PUk0VGy6YhklJUSrgForov5F9pw==,
|
||||
integrity: sha512-4h6Y2NyEkIEN7Z8YxkA27pq6zTkS09bUSYC0xjd0NpwFxjnIKeZEeH591o5WECSmjpUhLn3H2QLJcDye3Uzcvg==,
|
||||
}
|
||||
|
||||
"@next/eslint-plugin-next@15.5.3":
|
||||
@@ -1196,73 +1196,73 @@ packages:
|
||||
integrity: sha512-SdhaKdko6dpsSr0DldkESItVrnPYB1NS2NpShCSX5lc7SSQmLZt5Mug6t2xbiuVWEVDLZSuIAoQyYVBYp0dR5g==,
|
||||
}
|
||||
|
||||
"@next/swc-darwin-arm64@15.5.3":
|
||||
"@next/swc-darwin-arm64@15.5.7":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-nzbHQo69+au9wJkGKTU9lP7PXv0d1J5ljFpvb+LnEomLtSbJkbZyEs6sbF3plQmiOB2l9OBtN2tNSvCH1nQ9Jg==,
|
||||
integrity: sha512-IZwtxCEpI91HVU/rAUOOobWSZv4P2DeTtNaCdHqLcTJU4wdNXgAySvKa/qJCgR5m6KI8UsKDXtO2B31jcaw1Yw==,
|
||||
}
|
||||
engines: { node: ">= 10" }
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
"@next/swc-darwin-x64@15.5.3":
|
||||
"@next/swc-darwin-x64@15.5.7":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-w83w4SkOOhekJOcA5HBvHyGzgV1W/XvOfpkrxIse4uPWhYTTRwtGEM4v/jiXwNSJvfRvah0H8/uTLBKRXlef8g==,
|
||||
integrity: sha512-UP6CaDBcqaCBuiq/gfCEJw7sPEoX1aIjZHnBWN9v9qYHQdMKvCKcAVs4OX1vIjeE+tC5EIuwDTVIoXpUes29lg==,
|
||||
}
|
||||
engines: { node: ">= 10" }
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
"@next/swc-linux-arm64-gnu@15.5.3":
|
||||
"@next/swc-linux-arm64-gnu@15.5.7":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-+m7pfIs0/yvgVu26ieaKrifV8C8yiLe7jVp9SpcIzg7XmyyNE7toC1fy5IOQozmr6kWl/JONC51osih2RyoXRw==,
|
||||
integrity: sha512-NCslw3GrNIw7OgmRBxHtdWFQYhexoUCq+0oS2ccjyYLtcn1SzGzeM54jpTFonIMUjNbHmpKpziXnpxhSWLcmBA==,
|
||||
}
|
||||
engines: { node: ">= 10" }
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
"@next/swc-linux-arm64-musl@15.5.3":
|
||||
"@next/swc-linux-arm64-musl@15.5.7":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-u3PEIzuguSenoZviZJahNLgCexGFhso5mxWCrrIMdvpZn6lkME5vc/ADZG8UUk5K1uWRy4hqSFECrON6UKQBbQ==,
|
||||
integrity: sha512-nfymt+SE5cvtTrG9u1wdoxBr9bVB7mtKTcj0ltRn6gkP/2Nu1zM5ei8rwP9qKQP0Y//umK+TtkKgNtfboBxRrw==,
|
||||
}
|
||||
engines: { node: ">= 10" }
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
"@next/swc-linux-x64-gnu@15.5.3":
|
||||
"@next/swc-linux-x64-gnu@15.5.7":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-lDtOOScYDZxI2BENN9m0pfVPJDSuUkAD1YXSvlJF0DKwZt0WlA7T7o3wrcEr4Q+iHYGzEaVuZcsIbCps4K27sA==,
|
||||
integrity: sha512-hvXcZvCaaEbCZcVzcY7E1uXN9xWZfFvkNHwbe/n4OkRhFWrs1J1QV+4U1BN06tXLdaS4DazEGXwgqnu/VMcmqw==,
|
||||
}
|
||||
engines: { node: ">= 10" }
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
"@next/swc-linux-x64-musl@15.5.3":
|
||||
"@next/swc-linux-x64-musl@15.5.7":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-9vWVUnsx9PrY2NwdVRJ4dUURAQ8Su0sLRPqcCCxtX5zIQUBES12eRVHq6b70bbfaVaxIDGJN2afHui0eDm+cLg==,
|
||||
integrity: sha512-4IUO539b8FmF0odY6/SqANJdgwn1xs1GkPO5doZugwZ3ETF6JUdckk7RGmsfSf7ws8Qb2YB5It33mvNL/0acqA==,
|
||||
}
|
||||
engines: { node: ">= 10" }
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
"@next/swc-win32-arm64-msvc@15.5.3":
|
||||
"@next/swc-win32-arm64-msvc@15.5.7":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-1CU20FZzY9LFQigRi6jM45oJMU3KziA5/sSG+dXeVaTm661snQP6xu3ykGxxwU5sLG3sh14teO/IOEPVsQMRfA==,
|
||||
integrity: sha512-CpJVTkYI3ZajQkC5vajM7/ApKJUOlm6uP4BknM3XKvJ7VXAvCqSjSLmM0LKdYzn6nBJVSjdclx8nYJSa3xlTgQ==,
|
||||
}
|
||||
engines: { node: ">= 10" }
|
||||
cpu: [arm64]
|
||||
os: [win32]
|
||||
|
||||
"@next/swc-win32-x64-msvc@15.5.3":
|
||||
"@next/swc-win32-x64-msvc@15.5.7":
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-JMoLAq3n3y5tKXPQwCK5c+6tmwkuFDa2XAxz8Wm4+IVthdBZdZGh+lmiLUHg9f9IDwIQpUjp+ysd6OkYTyZRZw==,
|
||||
integrity: sha512-gMzgBX164I6DN+9/PGA+9dQiwmTkE4TloBNx8Kv9UiGARsr9Nba7IpcBRA1iTV9vwlYnrE3Uy6I7Aj6qLjQuqw==,
|
||||
}
|
||||
engines: { node: ">= 10" }
|
||||
cpu: [x64]
|
||||
@@ -6863,10 +6863,10 @@ packages:
|
||||
react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
|
||||
react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
|
||||
|
||||
next@15.5.3:
|
||||
next@15.5.7:
|
||||
resolution:
|
||||
{
|
||||
integrity: sha512-r/liNAx16SQj4D+XH/oI1dlpv9tdKJ6cONYPwwcCC46f2NjpaRWY+EKCzULfgQYV6YKXjHBchff2IZBSlZmJNw==,
|
||||
integrity: sha512-+t2/0jIJ48kUpGKkdlhgkv+zPTEOoXyr60qXe68eB/pl3CMJaLeIGjzp5D6Oqt25hCBiBTt8wEeeAzfJvUKnPQ==,
|
||||
}
|
||||
engines: { node: ^18.18.0 || ^19.8.0 || >= 20.0.0 }
|
||||
hasBin: true
|
||||
@@ -9877,34 +9877,34 @@ snapshots:
|
||||
"@tybys/wasm-util": 0.10.0
|
||||
optional: true
|
||||
|
||||
"@next/env@15.5.3": {}
|
||||
"@next/env@15.5.7": {}
|
||||
|
||||
"@next/eslint-plugin-next@15.5.3":
|
||||
dependencies:
|
||||
fast-glob: 3.3.1
|
||||
|
||||
"@next/swc-darwin-arm64@15.5.3":
|
||||
"@next/swc-darwin-arm64@15.5.7":
|
||||
optional: true
|
||||
|
||||
"@next/swc-darwin-x64@15.5.3":
|
||||
"@next/swc-darwin-x64@15.5.7":
|
||||
optional: true
|
||||
|
||||
"@next/swc-linux-arm64-gnu@15.5.3":
|
||||
"@next/swc-linux-arm64-gnu@15.5.7":
|
||||
optional: true
|
||||
|
||||
"@next/swc-linux-arm64-musl@15.5.3":
|
||||
"@next/swc-linux-arm64-musl@15.5.7":
|
||||
optional: true
|
||||
|
||||
"@next/swc-linux-x64-gnu@15.5.3":
|
||||
"@next/swc-linux-x64-gnu@15.5.7":
|
||||
optional: true
|
||||
|
||||
"@next/swc-linux-x64-musl@15.5.3":
|
||||
"@next/swc-linux-x64-musl@15.5.7":
|
||||
optional: true
|
||||
|
||||
"@next/swc-win32-arm64-msvc@15.5.3":
|
||||
"@next/swc-win32-arm64-msvc@15.5.7":
|
||||
optional: true
|
||||
|
||||
"@next/swc-win32-x64-msvc@15.5.3":
|
||||
"@next/swc-win32-x64-msvc@15.5.7":
|
||||
optional: true
|
||||
|
||||
"@nodelib/fs.scandir@2.1.5":
|
||||
@@ -10684,7 +10684,7 @@ snapshots:
|
||||
|
||||
"@sentry/core@8.55.0": {}
|
||||
|
||||
"@sentry/nextjs@10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)":
|
||||
"@sentry/nextjs@10.11.0(@opentelemetry/context-async-hooks@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/core@2.1.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.1.0(@opentelemetry/api@1.9.0))(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1)(webpack@5.101.3)":
|
||||
dependencies:
|
||||
"@opentelemetry/api": 1.9.0
|
||||
"@opentelemetry/semantic-conventions": 1.37.0
|
||||
@@ -10698,7 +10698,7 @@ snapshots:
|
||||
"@sentry/vercel-edge": 10.11.0
|
||||
"@sentry/webpack-plugin": 4.3.0(webpack@5.101.3)
|
||||
chalk: 3.0.0
|
||||
next: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
|
||||
next: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
|
||||
resolve: 1.22.8
|
||||
rollup: 4.50.1
|
||||
stacktrace-parser: 0.1.11
|
||||
@@ -14093,13 +14093,13 @@ snapshots:
|
||||
|
||||
neo-async@2.6.2: {}
|
||||
|
||||
next-auth@4.24.11(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
next-auth@4.24.11(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
|
||||
dependencies:
|
||||
"@babel/runtime": 7.28.2
|
||||
"@panva/hkdf": 1.2.1
|
||||
cookie: 0.7.2
|
||||
jose: 4.15.9
|
||||
next: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
|
||||
next: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
|
||||
oauth: 0.9.15
|
||||
openid-client: 5.7.1
|
||||
preact: 10.27.0
|
||||
@@ -14113,9 +14113,9 @@ snapshots:
|
||||
react: 18.3.1
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
|
||||
next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0):
|
||||
next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0):
|
||||
dependencies:
|
||||
"@next/env": 15.5.3
|
||||
"@next/env": 15.5.7
|
||||
"@swc/helpers": 0.5.15
|
||||
caniuse-lite: 1.0.30001734
|
||||
postcss: 8.4.31
|
||||
@@ -14123,14 +14123,14 @@ snapshots:
|
||||
react-dom: 18.3.1(react@18.3.1)
|
||||
styled-jsx: 5.1.6(@babel/core@7.28.3)(babel-plugin-macros@3.1.0)(react@18.3.1)
|
||||
optionalDependencies:
|
||||
"@next/swc-darwin-arm64": 15.5.3
|
||||
"@next/swc-darwin-x64": 15.5.3
|
||||
"@next/swc-linux-arm64-gnu": 15.5.3
|
||||
"@next/swc-linux-arm64-musl": 15.5.3
|
||||
"@next/swc-linux-x64-gnu": 15.5.3
|
||||
"@next/swc-linux-x64-musl": 15.5.3
|
||||
"@next/swc-win32-arm64-msvc": 15.5.3
|
||||
"@next/swc-win32-x64-msvc": 15.5.3
|
||||
"@next/swc-darwin-arm64": 15.5.7
|
||||
"@next/swc-darwin-x64": 15.5.7
|
||||
"@next/swc-linux-arm64-gnu": 15.5.7
|
||||
"@next/swc-linux-arm64-musl": 15.5.7
|
||||
"@next/swc-linux-x64-gnu": 15.5.7
|
||||
"@next/swc-linux-x64-musl": 15.5.7
|
||||
"@next/swc-win32-arm64-msvc": 15.5.7
|
||||
"@next/swc-win32-x64-msvc": 15.5.7
|
||||
"@opentelemetry/api": 1.9.0
|
||||
sass: 1.90.0
|
||||
sharp: 0.34.3
|
||||
@@ -14159,12 +14159,12 @@ snapshots:
|
||||
dependencies:
|
||||
path-key: 3.1.1
|
||||
|
||||
nuqs@2.4.3(next@15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1):
|
||||
nuqs@2.4.3(next@15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0))(react@18.3.1):
|
||||
dependencies:
|
||||
mitt: 3.0.1
|
||||
react: 18.3.1
|
||||
optionalDependencies:
|
||||
next: 15.5.3(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
|
||||
next: 15.5.7(@babel/core@7.28.3)(@opentelemetry/api@1.9.0)(babel-plugin-macros@3.1.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(sass@1.90.0)
|
||||
|
||||
oauth@0.9.15: {}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user