Compare commits

..

2 Commits

Author SHA1 Message Date
Igor Loskutov
8373874cbd feat: add LLM_ENABLE_THINKING env var for thinking-mode LLMs
Some LLMs (e.g. GLM-4.5-Air) default to thinking mode which returns
content in reasoning_content instead of content field, breaking
structured output parsing. This setting passes enable_thinking through
extra_body to control the behavior per deployment.

Three states: None (default, don't send), True, False.
2026-02-06 20:19:53 -05:00
cd2255cfbc chore(main): release 0.33.0 (#847) 2026-02-06 18:12:06 -05:00
16 changed files with 139 additions and 1285 deletions

View File

@@ -13,9 +13,6 @@ on:
jobs: jobs:
test-next-server: test-next-server:
runs-on: ubuntu-latest runs-on: ubuntu-latest
concurrency:
group: test-next-server-${{ github.ref }}
cancel-in-progress: true
defaults: defaults:
run: run:
@@ -24,12 +21,17 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install pnpm - name: Install pnpm
uses: pnpm/action-setup@v4 uses: pnpm/action-setup@v4
with: with:
package_json_file: './www/package.json' version: 8
- name: Setup Node.js - name: Setup Node.js cache
uses: actions/setup-node@v4 uses: actions/setup-node@v4
with: with:
node-version: '20' node-version: '20'

View File

@@ -1,5 +1,17 @@
# Changelog # Changelog
## [0.33.0](https://github.com/Monadical-SAS/reflector/compare/v0.32.2...v0.33.0) (2026-02-05)
### Features
* Daily+hatchet default ([#846](https://github.com/Monadical-SAS/reflector/issues/846)) ([15ab2e3](https://github.com/Monadical-SAS/reflector/commit/15ab2e306eacf575494b4b5d2b2ad779d44a1c7f))
### Bug Fixes
* websocket tests ([#825](https://github.com/Monadical-SAS/reflector/issues/825)) ([1ce1c7a](https://github.com/Monadical-SAS/reflector/commit/1ce1c7a910b6c374115d2437b17f9d288ef094dc))
## [0.32.2](https://github.com/Monadical-SAS/reflector/compare/v0.32.1...v0.32.2) (2026-02-03) ## [0.32.2](https://github.com/Monadical-SAS/reflector/compare/v0.32.1...v0.32.2) (2026-02-03)

View File

@@ -104,26 +104,6 @@ class CalendarEventController:
results = await get_database().fetch_all(query) results = await get_database().fetch_all(query)
return [CalendarEvent(**result) for result in results] return [CalendarEvent(**result) for result in results]
async def get_upcoming_for_rooms(
self, room_ids: list[str], minutes_ahead: int = 120
) -> list[CalendarEvent]:
now = datetime.now(timezone.utc)
future_time = now + timedelta(minutes=minutes_ahead)
query = (
calendar_events.select()
.where(
sa.and_(
calendar_events.c.room_id.in_(room_ids),
calendar_events.c.is_deleted == False,
calendar_events.c.start_time <= future_time,
calendar_events.c.end_time >= now,
)
)
.order_by(calendar_events.c.start_time.asc())
)
results = await get_database().fetch_all(query)
return [CalendarEvent(**result) for result in results]
async def get_by_id(self, event_id: str) -> CalendarEvent | None: async def get_by_id(self, event_id: str) -> CalendarEvent | None:
query = calendar_events.select().where(calendar_events.c.id == event_id) query = calendar_events.select().where(calendar_events.c.id == event_id)
result = await get_database().fetch_one(query) result = await get_database().fetch_one(query)

View File

@@ -301,23 +301,6 @@ class MeetingController:
results = await get_database().fetch_all(query) results = await get_database().fetch_all(query)
return [Meeting(**result) for result in results] return [Meeting(**result) for result in results]
async def get_all_active_for_rooms(
self, room_ids: list[str], current_time: datetime
) -> list[Meeting]:
query = (
meetings.select()
.where(
sa.and_(
meetings.c.room_id.in_(room_ids),
meetings.c.end_date > current_time,
meetings.c.is_active,
)
)
.order_by(meetings.c.end_date.desc())
)
results = await get_database().fetch_all(query)
return [Meeting(**result) for result in results]
async def get_active_by_calendar_event( async def get_active_by_calendar_event(
self, room: Room, calendar_event_id: str, current_time: datetime self, room: Room, calendar_event_id: str, current_time: datetime
) -> Meeting | None: ) -> Meeting | None:

View File

@@ -238,11 +238,6 @@ class RoomController:
return room return room
async def get_by_names(self, names: list[str]) -> list[Room]:
query = rooms.select().where(rooms.c.name.in_(names))
results = await get_database().fetch_all(query)
return [Room(**r) for r in results]
async def get_ics_enabled(self) -> list[Room]: async def get_ics_enabled(self) -> list[Room]:
query = rooms.select().where( query = rooms.select().where(
rooms.c.ics_enabled == True, rooms.c.ics_url != None rooms.c.ics_enabled == True, rooms.c.ics_url != None

View File

@@ -206,6 +206,12 @@ class LLM:
"""Configure llamaindex Settings with OpenAILike LLM""" """Configure llamaindex Settings with OpenAILike LLM"""
session_id = llm_session_id.get() or f"fallback-{uuid4().hex}" session_id = llm_session_id.get() or f"fallback-{uuid4().hex}"
extra_body: dict = {"litellm_session_id": session_id}
# Only send enable_thinking when explicitly set (not None/unset).
# Models that don't support it will ignore the param.
if self.settings_obj.LLM_ENABLE_THINKING is not None:
extra_body["enable_thinking"] = self.settings_obj.LLM_ENABLE_THINKING
Settings.llm = OpenAILike( Settings.llm = OpenAILike(
model=self.model_name, model=self.model_name,
api_base=self.url, api_base=self.url,
@@ -215,7 +221,7 @@ class LLM:
is_function_calling_model=False, is_function_calling_model=False,
temperature=self.temperature, temperature=self.temperature,
max_tokens=self.max_tokens, max_tokens=self.max_tokens,
additional_kwargs={"extra_body": {"litellm_session_id": session_id}}, additional_kwargs={"extra_body": extra_body},
) )
async def get_response( async def get_response(

View File

@@ -75,6 +75,7 @@ class Settings(BaseSettings):
LLM_URL: str | None = None LLM_URL: str | None = None
LLM_API_KEY: str | None = None LLM_API_KEY: str | None = None
LLM_CONTEXT_WINDOW: int = 16000 LLM_CONTEXT_WINDOW: int = 16000
LLM_ENABLE_THINKING: bool | None = None
LLM_PARSE_MAX_RETRIES: int = ( LLM_PARSE_MAX_RETRIES: int = (
3 # Max retries for JSON/validation errors (total attempts = retries + 1) 3 # Max retries for JSON/validation errors (total attempts = retries + 1)

View File

@@ -1,6 +1,4 @@
import asyncio
import logging import logging
from collections import defaultdict
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from enum import Enum from enum import Enum
from typing import Annotated, Any, Literal, Optional from typing import Annotated, Any, Literal, Optional
@@ -8,14 +6,13 @@ from typing import Annotated, Any, Literal, Optional
from fastapi import APIRouter, Depends, HTTPException from fastapi import APIRouter, Depends, HTTPException
from fastapi_pagination import Page from fastapi_pagination import Page
from fastapi_pagination.ext.databases import apaginate from fastapi_pagination.ext.databases import apaginate
from pydantic import BaseModel, Field from pydantic import BaseModel
from redis.exceptions import LockError from redis.exceptions import LockError
import reflector.auth as auth import reflector.auth as auth
from reflector.db import get_database from reflector.db import get_database
from reflector.db.calendar_events import calendar_events_controller from reflector.db.calendar_events import calendar_events_controller
from reflector.db.meetings import meetings_controller from reflector.db.meetings import meetings_controller
from reflector.db.rooms import Room as DbRoom
from reflector.db.rooms import rooms_controller from reflector.db.rooms import rooms_controller
from reflector.redis_cache import RedisAsyncLock from reflector.redis_cache import RedisAsyncLock
from reflector.schemas.platform import Platform from reflector.schemas.platform import Platform
@@ -198,73 +195,6 @@ async def rooms_list(
return paginated return paginated
class BulkStatusRequest(BaseModel):
room_names: list[str] = Field(max_length=100)
class RoomMeetingStatus(BaseModel):
active_meetings: list[Meeting]
upcoming_events: list[CalendarEventResponse]
@router.post("/rooms/meetings/bulk-status", response_model=dict[str, RoomMeetingStatus])
async def rooms_bulk_meeting_status(
request: BulkStatusRequest,
user: Annotated[Optional[auth.UserInfo], Depends(auth.current_user_optional)],
):
user_id = user["sub"] if user else None
all_rooms = await rooms_controller.get_by_names(request.room_names)
# Filter to rooms the user can see (owned or shared), matching rooms_list behavior
rooms = [
r
for r in all_rooms
if r.is_shared or (user_id is not None and r.user_id == user_id)
]
room_by_id: dict[str, DbRoom] = {r.id: r for r in rooms}
room_ids = list(room_by_id.keys())
current_time = datetime.now(timezone.utc)
active_meetings, upcoming_events = await asyncio.gather(
meetings_controller.get_all_active_for_rooms(room_ids, current_time),
calendar_events_controller.get_upcoming_for_rooms(room_ids),
)
# Group by room name
active_by_room: dict[str, list[Meeting]] = defaultdict(list)
for m in active_meetings:
room = room_by_id.get(m.room_id)
if not room:
continue
m.platform = room.platform
if user_id != room.user_id and m.platform == "whereby":
m.host_room_url = ""
active_by_room[room.name].append(
Meeting.model_validate(m, from_attributes=True)
)
upcoming_by_room: dict[str, list[CalendarEventResponse]] = defaultdict(list)
for e in upcoming_events:
room = room_by_id.get(e.room_id)
if not room:
continue
if user_id != room.user_id:
e.description = None
e.attendees = None
upcoming_by_room[room.name].append(
CalendarEventResponse.model_validate(e, from_attributes=True)
)
result: dict[str, RoomMeetingStatus] = {}
for name in request.room_names:
result[name] = RoomMeetingStatus(
active_meetings=active_by_room.get(name, []),
upcoming_events=upcoming_by_room.get(name, []),
)
return result
@router.get("/rooms/{room_id}", response_model=RoomDetails) @router.get("/rooms/{room_id}", response_model=RoomDetails)
async def rooms_get( async def rooms_get(
room_id: str, room_id: str,

View File

@@ -8,6 +8,7 @@ from pydantic import BaseModel, Field
from workflows.errors import WorkflowRuntimeError, WorkflowTimeoutError from workflows.errors import WorkflowRuntimeError, WorkflowTimeoutError
from reflector.llm import LLM, LLMParseError, StructuredOutputWorkflow from reflector.llm import LLM, LLMParseError, StructuredOutputWorkflow
from reflector.settings import Settings
from reflector.utils.retry import RetryException from reflector.utils.retry import RetryException
@@ -26,6 +27,57 @@ def make_completion_response(text: str):
return response return response
class TestLLMEnableThinking:
"""Test that LLM_ENABLE_THINKING setting is passed through to OpenAILike"""
def test_enable_thinking_false_passed_in_extra_body(self):
"""enable_thinking=False should be in extra_body when LLM_ENABLE_THINKING=False"""
settings = Settings(
LLM_ENABLE_THINKING=False,
LLM_URL="http://fake",
LLM_API_KEY="fake",
)
with (
patch("reflector.llm.OpenAILike") as mock_openai,
patch("reflector.llm.Settings"),
):
LLM(settings=settings)
extra_body = mock_openai.call_args.kwargs["additional_kwargs"]["extra_body"]
assert extra_body["enable_thinking"] is False
def test_enable_thinking_true_passed_in_extra_body(self):
"""enable_thinking=True should be in extra_body when LLM_ENABLE_THINKING=True"""
settings = Settings(
LLM_ENABLE_THINKING=True,
LLM_URL="http://fake",
LLM_API_KEY="fake",
)
with (
patch("reflector.llm.OpenAILike") as mock_openai,
patch("reflector.llm.Settings"),
):
LLM(settings=settings)
extra_body = mock_openai.call_args.kwargs["additional_kwargs"]["extra_body"]
assert extra_body["enable_thinking"] is True
def test_enable_thinking_none_not_in_extra_body(self):
"""enable_thinking should not be in extra_body when LLM_ENABLE_THINKING is None (default)"""
settings = Settings(
LLM_URL="http://fake",
LLM_API_KEY="fake",
)
with (
patch("reflector.llm.OpenAILike") as mock_openai,
patch("reflector.llm.Settings"),
):
LLM(settings=settings)
extra_body = mock_openai.call_args.kwargs["additional_kwargs"]["extra_body"]
assert "enable_thinking" not in extra_body
class TestLLMParseErrorRecovery: class TestLLMParseErrorRecovery:
"""Test parse error recovery with Workflow feedback loop""" """Test parse error recovery with Workflow feedback loop"""

View File

@@ -1,217 +0,0 @@
import "@testing-library/jest-dom";
// --- Module mocks (hoisted before imports) ---
jest.mock("../apiClient", () => ({
client: {
GET: jest.fn(),
POST: jest.fn(),
PUT: jest.fn(),
PATCH: jest.fn(),
DELETE: jest.fn(),
use: jest.fn(),
},
$api: {
useQuery: jest.fn(),
useMutation: jest.fn(),
queryOptions: (method: string, path: string, init?: unknown) =>
init === undefined
? { queryKey: [method, path] }
: { queryKey: [method, path, init] },
},
API_URL: "http://test",
WEBSOCKET_URL: "ws://test",
configureApiAuth: jest.fn(),
}));
jest.mock("../AuthProvider", () => ({
useAuth: () => ({
status: "authenticated" as const,
accessToken: "test-token",
accessTokenExpires: Date.now() + 3600000,
user: { id: "user1", name: "Test User" },
update: jest.fn(),
signIn: jest.fn(),
signOut: jest.fn(),
lastUserId: "user1",
}),
}));
// Recreate the batcher with a 0ms window. setTimeout(fn, 0) defers to the next
// macrotask boundary — after all synchronous React rendering completes. All
// useQuery queryFns fire within the same macrotask, so they all queue into one
// batch before the timer fires. This is deterministic and avoids fake timers.
jest.mock("../meetingStatusBatcher", () => {
const actual = jest.requireActual("../meetingStatusBatcher");
return {
...actual,
meetingStatusBatcher: actual.createMeetingStatusBatcher(0),
};
});
// --- Imports (after mocks) ---
import React from "react";
import { render, waitFor, screen } from "@testing-library/react";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import { useRoomActiveMeetings, useRoomUpcomingMeetings } from "../apiHooks";
import { client } from "../apiClient";
import { ErrorProvider } from "../../(errors)/errorContext";
const mockClient = client as { POST: jest.Mock };
// --- Helpers ---
function mockBulkStatusEndpoint(
roomData?: Record<
string,
{ active_meetings: unknown[]; upcoming_events: unknown[] }
>,
) {
mockClient.POST.mockImplementation(
async (_path: string, options: { body: { room_names: string[] } }) => {
const roomNames: string[] = options.body.room_names;
const src = roomData ?? {};
const data = Object.fromEntries(
roomNames.map((name) => [
name,
src[name] ?? { active_meetings: [], upcoming_events: [] },
]),
);
return { data, error: undefined, response: {} };
},
);
}
// --- Test component: renders N room cards, each using both hooks ---
function RoomCard({ roomName }: { roomName: string }) {
const active = useRoomActiveMeetings(roomName);
const upcoming = useRoomUpcomingMeetings(roomName);
if (active.isLoading || upcoming.isLoading) {
return <div data-testid={`room-${roomName}`}>loading</div>;
}
return (
<div data-testid={`room-${roomName}`}>
{active.data?.length ?? 0} active, {upcoming.data?.length ?? 0} upcoming
</div>
);
}
function RoomList({ roomNames }: { roomNames: string[] }) {
return (
<>
{roomNames.map((name) => (
<RoomCard key={name} roomName={name} />
))}
</>
);
}
function createWrapper() {
const queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
return function Wrapper({ children }: { children: React.ReactNode }) {
return (
<QueryClientProvider client={queryClient}>
<ErrorProvider>{children}</ErrorProvider>
</QueryClientProvider>
);
};
}
// --- Tests ---
describe("meeting status batcher integration", () => {
afterEach(() => jest.clearAllMocks());
it("batches multiple room queries into a single POST request", async () => {
const rooms = Array.from({ length: 10 }, (_, i) => `room-${i}`);
mockBulkStatusEndpoint();
render(<RoomList roomNames={rooms} />, { wrapper: createWrapper() });
await waitFor(() => {
for (const name of rooms) {
expect(screen.getByTestId(`room-${name}`)).toHaveTextContent(
"0 active, 0 upcoming",
);
}
});
const postCalls = mockClient.POST.mock.calls.filter(
([path]: [string]) => path === "/v1/rooms/meetings/bulk-status",
);
// Without batching this would be 20 calls (2 hooks x 10 rooms).
expect(postCalls).toHaveLength(1);
// The single call should contain all 10 rooms (deduplicated)
const requestedRooms: string[] = postCalls[0][1].body.room_names;
for (const name of rooms) {
expect(requestedRooms).toContain(name);
}
});
it("batcher fetcher returns room-specific data", async () => {
const {
meetingStatusBatcher: batcher,
} = require("../meetingStatusBatcher");
mockBulkStatusEndpoint({
"room-a": {
active_meetings: [{ id: "m1", room_name: "room-a" }],
upcoming_events: [],
},
"room-b": {
active_meetings: [],
upcoming_events: [{ id: "e1", title: "Standup" }],
},
});
const [resultA, resultB] = await Promise.all([
batcher.fetch("room-a"),
batcher.fetch("room-b"),
]);
expect(mockClient.POST).toHaveBeenCalledTimes(1);
expect(resultA.active_meetings).toEqual([
{ id: "m1", room_name: "room-a" },
]);
expect(resultA.upcoming_events).toEqual([]);
expect(resultB.active_meetings).toEqual([]);
expect(resultB.upcoming_events).toEqual([{ id: "e1", title: "Standup" }]);
});
it("renders room-specific meeting data through hooks", async () => {
mockBulkStatusEndpoint({
"room-a": {
active_meetings: [{ id: "m1", room_name: "room-a" }],
upcoming_events: [],
},
"room-b": {
active_meetings: [],
upcoming_events: [{ id: "e1", title: "Standup" }],
},
});
render(<RoomList roomNames={["room-a", "room-b"]} />, {
wrapper: createWrapper(),
});
await waitFor(() => {
expect(screen.getByTestId("room-room-a")).toHaveTextContent(
"1 active, 0 upcoming",
);
expect(screen.getByTestId("room-room-b")).toHaveTextContent(
"0 active, 1 upcoming",
);
});
});
});

View File

@@ -2,10 +2,9 @@
import { $api } from "./apiClient"; import { $api } from "./apiClient";
import { useError } from "../(errors)/errorContext"; import { useError } from "../(errors)/errorContext";
import { QueryClient, useQuery, useQueryClient } from "@tanstack/react-query"; import { QueryClient, useQueryClient } from "@tanstack/react-query";
import type { components } from "../reflector-api"; import type { components } from "../reflector-api";
import { useAuth } from "./AuthProvider"; import { useAuth } from "./AuthProvider";
import { meetingStatusBatcher } from "./meetingStatusBatcher";
import { MeetingId } from "./types"; import { MeetingId } from "./types";
import { NonEmptyString } from "./utils"; import { NonEmptyString } from "./utils";
@@ -698,7 +697,15 @@ export function useRoomsCreateMeeting() {
queryKey: $api.queryOptions("get", "/v1/rooms").queryKey, queryKey: $api.queryOptions("get", "/v1/rooms").queryKey,
}), }),
queryClient.invalidateQueries({ queryClient.invalidateQueries({
queryKey: meetingStatusKeys.active(roomName), queryKey: $api.queryOptions(
"get",
"/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
{
params: {
path: { room_name: roomName },
},
},
).queryKey,
}), }),
]); ]);
}, },
@@ -727,39 +734,42 @@ export function useRoomGetByName(roomName: string | null) {
export function useRoomUpcomingMeetings(roomName: string | null) { export function useRoomUpcomingMeetings(roomName: string | null) {
const { isAuthenticated } = useAuthReady(); const { isAuthenticated } = useAuthReady();
return useQuery({ return $api.useQuery(
queryKey: meetingStatusKeys.upcoming(roomName!), "get",
queryFn: async () => { "/v1/rooms/{room_name}/meetings/upcoming" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_UPCOMING_PATH_PARTIAL}`,
const result = await meetingStatusBatcher.fetch(roomName!); {
return result.upcoming_events; params: {
path: { room_name: roomName! },
},
}, },
enabled: !!roomName && isAuthenticated, {
}); enabled: !!roomName && isAuthenticated,
},
);
} }
// Query keys reuse $api.queryOptions so cache identity matches the original const MEETINGS_PATH_PARTIAL = "meetings" as const;
// per-room GET endpoints. The actual fetch goes through the batcher, but the const MEETINGS_ACTIVE_PATH_PARTIAL = `${MEETINGS_PATH_PARTIAL}/active` as const;
// keys stay consistent with the rest of the codebase. const MEETINGS_UPCOMING_PATH_PARTIAL =
const meetingStatusKeys = { `${MEETINGS_PATH_PARTIAL}/upcoming` as const;
active: (roomName: string) => const MEETING_LIST_PATH_PARTIALS = [
$api.queryOptions("get", "/v1/rooms/{room_name}/meetings/active", { MEETINGS_ACTIVE_PATH_PARTIAL,
params: { path: { room_name: roomName } }, MEETINGS_UPCOMING_PATH_PARTIAL,
}).queryKey, ];
upcoming: (roomName: string) =>
$api.queryOptions("get", "/v1/rooms/{room_name}/meetings/upcoming", {
params: { path: { room_name: roomName } },
}).queryKey,
};
export function useRoomActiveMeetings(roomName: string | null) { export function useRoomActiveMeetings(roomName: string | null) {
return useQuery({ return $api.useQuery(
queryKey: meetingStatusKeys.active(roomName!), "get",
queryFn: async () => { "/v1/rooms/{room_name}/meetings/active" satisfies `/v1/rooms/{room_name}/${typeof MEETINGS_ACTIVE_PATH_PARTIAL}`,
const result = await meetingStatusBatcher.fetch(roomName!); {
return result.active_meetings; params: {
path: { room_name: roomName! },
},
}, },
enabled: !!roomName, {
}); enabled: !!roomName,
},
);
} }
export function useRoomGetMeeting( export function useRoomGetMeeting(

View File

@@ -1,37 +0,0 @@
import { create, keyResolver, windowScheduler } from "@yornaath/batshit";
import { client } from "./apiClient";
import type { components } from "../reflector-api";
type MeetingStatusResult = {
roomName: string;
active_meetings: components["schemas"]["Meeting"][];
upcoming_events: components["schemas"]["CalendarEventResponse"][];
};
const BATCH_WINDOW_MS = 10;
export function createMeetingStatusBatcher(windowMs: number = BATCH_WINDOW_MS) {
return create({
fetcher: async (roomNames: string[]): Promise<MeetingStatusResult[]> => {
const unique = [...new Set(roomNames)];
const { data, error } = await client.POST(
"/v1/rooms/meetings/bulk-status",
{ body: { room_names: unique } },
);
if (error || !data) {
throw new Error(
`bulk-status fetch failed: ${JSON.stringify(error ?? "no data")}`,
);
}
return roomNames.map((name) => ({
roomName: name,
active_meetings: data[name]?.active_meetings ?? [],
upcoming_events: data[name]?.upcoming_events ?? [],
}));
},
resolver: keyResolver("roomName"),
scheduler: windowScheduler(windowMs),
});
}
export const meetingStatusBatcher = createMeetingStatusBatcher();

View File

@@ -118,23 +118,6 @@ export interface paths {
patch?: never; patch?: never;
trace?: never; trace?: never;
}; };
"/v1/rooms/meetings/bulk-status": {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
get?: never;
put?: never;
/** Rooms Bulk Meeting Status */
post: operations["v1_rooms_bulk_meeting_status"];
delete?: never;
options?: never;
head?: never;
patch?: never;
trace?: never;
};
"/v1/rooms/{room_id}": { "/v1/rooms/{room_id}": {
parameters: { parameters: {
query?: never; query?: never;
@@ -816,11 +799,6 @@ export interface components {
*/ */
chunk: string; chunk: string;
}; };
/** BulkStatusRequest */
BulkStatusRequest: {
/** Room Names */
room_names: string[];
};
/** CalendarEventResponse */ /** CalendarEventResponse */
CalendarEventResponse: { CalendarEventResponse: {
/** Id */ /** Id */
@@ -1757,13 +1735,6 @@ export interface components {
/** Webhook Secret */ /** Webhook Secret */
webhook_secret: string | null; webhook_secret: string | null;
}; };
/** RoomMeetingStatus */
RoomMeetingStatus: {
/** Active Meetings */
active_meetings: components["schemas"]["Meeting"][];
/** Upcoming Events */
upcoming_events: components["schemas"]["CalendarEventResponse"][];
};
/** RtcOffer */ /** RtcOffer */
RtcOffer: { RtcOffer: {
/** Sdp */ /** Sdp */
@@ -2301,41 +2272,6 @@ export interface operations {
}; };
}; };
}; };
v1_rooms_bulk_meeting_status: {
parameters: {
query?: never;
header?: never;
path?: never;
cookie?: never;
};
requestBody: {
content: {
"application/json": components["schemas"]["BulkStatusRequest"];
};
};
responses: {
/** @description Successful Response */
200: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": {
[key: string]: components["schemas"]["RoomMeetingStatus"];
};
};
};
/** @description Validation Error */
422: {
headers: {
[name: string]: unknown;
};
content: {
"application/json": components["schemas"]["HTTPValidationError"];
};
};
};
};
v1_rooms_get: { v1_rooms_get: {
parameters: { parameters: {
query?: never; query?: never;

View File

@@ -1,22 +1,8 @@
module.exports = { module.exports = {
testEnvironment: "jest-environment-jsdom", preset: "ts-jest",
testEnvironment: "node",
roots: ["<rootDir>/app"], roots: ["<rootDir>/app"],
testMatch: ["**/__tests__/**/*.test.ts", "**/__tests__/**/*.test.tsx"], testMatch: ["**/__tests__/**/*.test.ts"],
collectCoverage: false, collectCoverage: true,
transform: { collectCoverageFrom: ["app/**/*.ts", "!app/**/*.d.ts"],
"^.+\\.[jt]sx?$": [
"ts-jest",
{
tsconfig: {
jsx: "react-jsx",
module: "esnext",
moduleResolution: "bundler",
esModuleInterop: true,
strict: true,
downlevelIteration: true,
lib: ["dom", "dom.iterable", "esnext"],
},
},
],
},
}; };

View File

@@ -23,7 +23,6 @@
"@tanstack/react-query": "^5.85.9", "@tanstack/react-query": "^5.85.9",
"@types/ioredis": "^5.0.0", "@types/ioredis": "^5.0.0",
"@whereby.com/browser-sdk": "^3.3.4", "@whereby.com/browser-sdk": "^3.3.4",
"@yornaath/batshit": "^0.14.0",
"autoprefixer": "10.4.20", "autoprefixer": "10.4.20",
"axios": "^1.8.2", "axios": "^1.8.2",
"eslint": "^9.33.0", "eslint": "^9.33.0",
@@ -62,13 +61,9 @@
"author": "Andreas <andreas@monadical.com>", "author": "Andreas <andreas@monadical.com>",
"license": "All Rights Reserved", "license": "All Rights Reserved",
"devDependencies": { "devDependencies": {
"@testing-library/dom": "^10.4.1",
"@testing-library/jest-dom": "^6.9.1",
"@testing-library/react": "^16.3.2",
"@types/jest": "^30.0.0", "@types/jest": "^30.0.0",
"@types/react": "18.2.20", "@types/react": "18.2.20",
"jest": "^30.1.3", "jest": "^30.1.3",
"jest-environment-jsdom": "^30.2.0",
"openapi-typescript": "^7.9.1", "openapi-typescript": "^7.9.1",
"prettier": "^3.0.0", "prettier": "^3.0.0",
"ts-jest": "^29.4.1" "ts-jest": "^29.4.1"

808
www/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff