fix(core): ensure compaction is more reliable, add reserve token buffer to ensure that input window has enough room to compact (#12924)

Co-authored-by: James Lal <james@littlebearlabs.io>
This commit is contained in:
Aiden Cline
2026-02-10 19:55:22 -06:00
committed by GitHub
parent 60bdb6e9ba
commit 0fd6f365be
16 changed files with 262 additions and 189 deletions

View File

@@ -1,6 +1,6 @@
You are a helpful AI assistant tasked with summarizing conversations.
When asked to summarize, provide a detailed but concise summary of the conversation.
When asked to summarize, provide a detailed but concise summary of the conversation.
Focus on information that would be helpful for continuing the conversation, including:
- What was done
- What is currently being worked on
@@ -10,3 +10,5 @@ Focus on information that would be helpful for continuing the conversation, incl
- Important technical decisions and why they were made
Your summary should be comprehensive enough to provide context but concise enough to be quickly understood.
Do not respond to any questions in the conversation, only output the summary.

View File

@@ -1161,6 +1161,12 @@ export namespace Config {
.object({
auto: z.boolean().optional().describe("Enable automatic compaction when context is full (default: true)"),
prune: z.boolean().optional().describe("Enable pruning of old tool outputs (default: true)"),
reserved: z
.number()
.int()
.min(0)
.optional()
.describe("Token buffer for compaction. Leaves enough window to avoid overflow during compaction."),
})
.optional(),
experimental: z

View File

@@ -5,6 +5,7 @@ import type { JSONSchema } from "zod/v4/core"
import type { Provider } from "./provider"
import type { ModelsDev } from "./models"
import { iife } from "@/util/iife"
import { Flag } from "@/flag/flag"
type Modality = NonNullable<ModelsDev.Model["modalities"]>["input"][number]
@@ -17,6 +18,8 @@ function mimeToModality(mime: string): Modality | undefined {
}
export namespace ProviderTransform {
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
// Maps npm package to the key the AI SDK expects for providerOptions
function sdkKey(npm: string): string | undefined {
switch (npm) {
@@ -723,29 +726,8 @@ export namespace ProviderTransform {
return { [key]: options }
}
export function maxOutputTokens(
npm: string,
options: Record<string, any>,
modelLimit: number,
globalLimit: number,
): number {
const modelCap = modelLimit || globalLimit
const standardLimit = Math.min(modelCap, globalLimit)
if (npm === "@ai-sdk/anthropic" || npm === "@ai-sdk/google-vertex/anthropic") {
const thinking = options?.["thinking"]
const budgetTokens = typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : 0
const enabled = thinking?.["type"] === "enabled"
if (enabled && budgetTokens > 0) {
// Return text tokens so that text + thinking <= model cap, preferring 32k text when possible.
if (budgetTokens + standardLimit <= modelCap) {
return standardLimit
}
return modelCap - budgetTokens
}
}
return standardLimit
export function maxOutputTokens(model: Provider.Model): number {
return Math.min(model.limit.output, OUTPUT_TOKEN_MAX) || OUTPUT_TOKEN_MAX
}
export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema | JSONSchema7): JSONSchema7 {

View File

@@ -6,7 +6,6 @@ import { Instance } from "../project/instance"
import { Provider } from "../provider/provider"
import { MessageV2 } from "./message-v2"
import z from "zod"
import { SessionPrompt } from "./prompt"
import { Token } from "../util/token"
import { Log } from "../util/log"
import { SessionProcessor } from "./processor"
@@ -14,6 +13,7 @@ import { fn } from "@/util/fn"
import { Agent } from "@/agent/agent"
import { Plugin } from "@/plugin"
import { Config } from "@/config/config"
import { ProviderTransform } from "@/provider/transform"
export namespace SessionCompaction {
const log = Log.create({ service: "session.compaction" })
@@ -27,15 +27,22 @@ export namespace SessionCompaction {
),
}
const COMPACTION_BUFFER = 20_000
export async function isOverflow(input: { tokens: MessageV2.Assistant["tokens"]; model: Provider.Model }) {
const config = await Config.get()
if (config.compaction?.auto === false) return false
const context = input.model.limit.context
if (context === 0) return false
const count = input.tokens.input + input.tokens.cache.read + input.tokens.output
const output = Math.min(input.model.limit.output, SessionPrompt.OUTPUT_TOKEN_MAX) || SessionPrompt.OUTPUT_TOKEN_MAX
const usable = input.model.limit.input || context - output
return count > usable
const count =
input.tokens.total ||
input.tokens.input + input.tokens.output + input.tokens.cache.read + input.tokens.cache.write
const reserved =
config.compaction?.reserved ?? Math.min(COMPACTION_BUFFER, ProviderTransform.maxOutputTokens(input.model))
const usable = input.model.limit.input ? input.model.limit.input - reserved : context - reserved
return count >= usable
}
export const PRUNE_MINIMUM = 20_000
@@ -139,8 +146,34 @@ export namespace SessionCompaction {
{ sessionID: input.sessionID },
{ context: [], prompt: undefined },
)
const defaultPrompt =
"Provide a detailed prompt for continuing our conversation above. Focus on information that would be helpful for continuing the conversation, including what we did, what we're doing, which files we're working on, and what we're going to do next considering new session will not have access to our conversation."
const defaultPrompt = `Provide a detailed prompt for continuing our conversation above.
Focus on information that would be helpful for continuing the conversation, including what we did, what we're doing, which files we're working on, and what we're going to do next.
The summary that you construct will be used so that another agent can read it and continue the work.
When constructing the summary, try to stick to this template:
---
## Goal
[What goal(s) is the user trying to accomplish?]
## Instructions
- [What important instructions did the user give you that are relevant]
- [If there is a plan or spec, include information about it so next agent can continue using it]
## Discoveries
[What notable things were learned during this conversation that would be useful for the next agent to know when continuing the work]
## Accomplished
[What work has been completed, what work is still in progress, and what work is left?]
## Relevant files / directories
[Construct a structured list of relevant files that have been read, edited, or created that pertain to the task at hand. If all the files in a directory are relevant, include the path to the directory.]
---`
const promptText = compacting.prompt ?? [defaultPrompt, ...compacting.context].join("\n\n")
const result = await processor.process({
user: userMessage,
@@ -181,7 +214,7 @@ export namespace SessionCompaction {
sessionID: input.sessionID,
type: "text",
synthetic: true,
text: "Continue if you have next steps",
text: "Continue if you have next steps, or stop and ask for clarification if you are unsure how to proceed.",
time: {
start: Date.now(),
end: Date.now(),

View File

@@ -4,7 +4,7 @@ import { BusEvent } from "@/bus/bus-event"
import { Bus } from "@/bus"
import { Decimal } from "decimal.js"
import z from "zod"
import { type LanguageModelUsage, type ProviderMetadata } from "ai"
import { type ProviderMetadata } from "ai"
import { Config } from "../config/config"
import { Flag } from "../flag/flag"
import { Identifier } from "../id/id"
@@ -22,6 +22,8 @@ import { Snapshot } from "@/snapshot"
import type { Provider } from "@/provider/provider"
import { PermissionNext } from "@/permission/next"
import { Global } from "@/global"
import type { LanguageModelV2Usage } from "@ai-sdk/provider"
import { iife } from "@/util/iife"
export namespace Session {
const log = Log.create({ service: "session" })
@@ -439,34 +441,58 @@ export namespace Session {
export const getUsage = fn(
z.object({
model: z.custom<Provider.Model>(),
usage: z.custom<LanguageModelUsage>(),
usage: z.custom<LanguageModelV2Usage>(),
metadata: z.custom<ProviderMetadata>().optional(),
}),
(input) => {
const cacheReadInputTokens = input.usage.cachedInputTokens ?? 0
const cacheWriteInputTokens = (input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number
const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = excludesCachedTokens
? (input.usage.inputTokens ?? 0)
: (input.usage.inputTokens ?? 0) - cacheReadInputTokens - cacheWriteInputTokens
const safe = (value: number) => {
if (!Number.isFinite(value)) return 0
return value
}
const inputTokens = safe(input.usage.inputTokens ?? 0)
const outputTokens = safe(input.usage.outputTokens ?? 0)
const reasoningTokens = safe(input.usage.reasoningTokens ?? 0)
const cacheReadInputTokens = safe(input.usage.cachedInputTokens ?? 0)
const cacheWriteInputTokens = safe(
(input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number,
)
// OpenRouter provides inputTokens as the total count of input tokens (including cached).
// AFAIK other providers (OpenRouter/OpenAI/Gemini etc.) do it the same way e.g. vercel/ai#8794 (comment)
// Anthropic does it differently though - inputTokens doesn't include cached tokens.
// It looks like OpenCode's cost calculation assumes all providers return inputTokens the same way Anthropic does (I'm guessing getUsage logic was originally implemented with anthropic), so it's causing incorrect cost calculation for OpenRouter and others.
const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = safe(
excludesCachedTokens ? inputTokens : inputTokens - cacheReadInputTokens - cacheWriteInputTokens,
)
const total = iife(() => {
// Anthropic doesn't provide total_tokens, also ai sdk will vastly undercount if we
// don't compute from components
if (
input.model.api.npm === "@ai-sdk/anthropic" ||
input.model.api.npm === "@ai-sdk/amazon-bedrock" ||
input.model.api.npm === "@ai-sdk/google-vertex/anthropic"
) {
return adjustedInputTokens + outputTokens + cacheReadInputTokens + cacheWriteInputTokens
}
return input.usage.totalTokens
})
const tokens = {
input: safe(adjustedInputTokens),
output: safe(input.usage.outputTokens ?? 0),
reasoning: safe(input.usage?.reasoningTokens ?? 0),
total,
input: adjustedInputTokens,
output: outputTokens,
reasoning: reasoningTokens,
cache: {
write: safe(cacheWriteInputTokens),
read: safe(cacheReadInputTokens),
write: cacheWriteInputTokens,
read: cacheReadInputTokens,
},
}

View File

@@ -25,8 +25,7 @@ import { Auth } from "@/auth"
export namespace LLM {
const log = Log.create({ service: "llm" })
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
export const OUTPUT_TOKEN_MAX = ProviderTransform.OUTPUT_TOKEN_MAX
export type StreamInput = {
user: MessageV2.User
@@ -149,14 +148,7 @@ export namespace LLM {
)
const maxOutputTokens =
isCodex || provider.id.includes("github-copilot")
? undefined
: ProviderTransform.maxOutputTokens(
input.model.api.npm,
params.options,
input.model.limit.output,
OUTPUT_TOKEN_MAX,
)
isCodex || provider.id.includes("github-copilot") ? undefined : ProviderTransform.maxOutputTokens(input.model)
const tools = await resolveTools(input)

View File

@@ -210,6 +210,7 @@ export namespace MessageV2 {
snapshot: z.string().optional(),
cost: z.number(),
tokens: z.object({
total: z.number().optional(),
input: z.number(),
output: z.number(),
reasoning: z.number(),
@@ -383,6 +384,7 @@ export namespace MessageV2 {
summary: z.boolean().optional(),
cost: z.number(),
tokens: z.object({
total: z.number().optional(),
input: z.number(),
output: z.number(),
reasoning: z.number(),

View File

@@ -342,6 +342,9 @@ export namespace SessionProcessor {
stack: JSON.stringify(e.stack),
})
const error = MessageV2.fromError(e, { providerID: input.model.providerID })
if (MessageV2.ContextOverflowError.isInstance(error)) {
// TODO: Handle context overflow error
}
const retry = SessionRetry.retryable(error)
if (retry !== undefined) {
attempt++

View File

@@ -52,7 +52,6 @@ globalThis.AI_SDK_LOG_WARNINGS = false
export namespace SessionPrompt {
const log = Log.create({ service: "session.prompt" })
export const OUTPUT_TOKEN_MAX = Flag.OPENCODE_EXPERIMENTAL_OUTPUT_TOKEN_MAX || 32_000
const state = Instance.state(
() => {

View File

@@ -59,9 +59,8 @@ export namespace SessionRetry {
}
export function retryable(error: ReturnType<NamedError["toObject"]>) {
// DO NOT retry context overflow errors
// context overflow errors should not be retried
if (MessageV2.ContextOverflowError.isInstance(error)) return undefined
if (MessageV2.APIError.isInstance(error)) {
if (!error.data.isRetryable) return undefined
if (error.data.responseBody?.includes("FreeUsageLimitError"))

View File

@@ -175,100 +175,6 @@ describe("ProviderTransform.options - gpt-5 textVerbosity", () => {
})
})
describe("ProviderTransform.maxOutputTokens", () => {
test("returns 32k when modelLimit > 32k", () => {
const modelLimit = 100000
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(OUTPUT_TOKEN_MAX)
})
test("returns modelLimit when modelLimit < 32k", () => {
const modelLimit = 16000
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai", {}, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(16000)
})
describe("azure", () => {
test("returns 32k when modelLimit > 32k", () => {
const modelLimit = 100000
const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(OUTPUT_TOKEN_MAX)
})
test("returns modelLimit when modelLimit < 32k", () => {
const modelLimit = 16000
const result = ProviderTransform.maxOutputTokens("@ai-sdk/azure", {}, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(16000)
})
})
describe("bedrock", () => {
test("returns 32k when modelLimit > 32k", () => {
const modelLimit = 100000
const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(OUTPUT_TOKEN_MAX)
})
test("returns modelLimit when modelLimit < 32k", () => {
const modelLimit = 16000
const result = ProviderTransform.maxOutputTokens("@ai-sdk/amazon-bedrock", {}, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(16000)
})
})
describe("anthropic without thinking options", () => {
test("returns 32k when modelLimit > 32k", () => {
const modelLimit = 100000
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(OUTPUT_TOKEN_MAX)
})
test("returns modelLimit when modelLimit < 32k", () => {
const modelLimit = 16000
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", {}, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(16000)
})
})
describe("anthropic with thinking options", () => {
test("returns 32k when budgetTokens + 32k <= modelLimit", () => {
const modelLimit = 100000
const options = {
thinking: {
type: "enabled",
budgetTokens: 10000,
},
}
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(OUTPUT_TOKEN_MAX)
})
test("returns modelLimit - budgetTokens when budgetTokens + 32k > modelLimit", () => {
const modelLimit = 50000
const options = {
thinking: {
type: "enabled",
budgetTokens: 30000,
},
}
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(20000)
})
test("returns 32k when thinking type is not enabled", () => {
const modelLimit = 100000
const options = {
thinking: {
type: "disabled",
budgetTokens: 10000,
},
}
const result = ProviderTransform.maxOutputTokens("@ai-sdk/anthropic", options, modelLimit, OUTPUT_TOKEN_MAX)
expect(result).toBe(OUTPUT_TOKEN_MAX)
})
})
})
describe("ProviderTransform.schema - gemini array items", () => {
test("adds missing items for array properties", () => {
const geminiModel = {

View File

@@ -15,6 +15,7 @@ function createModel(opts: {
output: number
input?: number
cost?: Provider.Model["cost"]
npm?: string
}): Provider.Model {
return {
id: "test-model",
@@ -34,7 +35,7 @@ function createModel(opts: {
input: { text: true, image: false, audio: false, video: false },
output: { text: true, image: false, audio: false, video: false },
},
api: { npm: "@ai-sdk/anthropic" },
api: { npm: opts.npm ?? "@ai-sdk/anthropic" },
options: {},
} as Provider.Model
}
@@ -70,7 +71,7 @@ describe("session.compaction.isOverflow", () => {
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 100_000, output: 32_000 })
const tokens = { input: 50_000, output: 10_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
const tokens = { input: 60_000, output: 10_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
},
})
@@ -112,6 +113,86 @@ describe("session.compaction.isOverflow", () => {
})
})
// ─── Bug reproduction tests ───────────────────────────────────────────
// These tests demonstrate that when limit.input is set, isOverflow()
// does not subtract any headroom for the next model response. This means
// compaction only triggers AFTER we've already consumed the full input
// budget, leaving zero room for the next API call's output tokens.
//
// Compare: without limit.input, usable = context - output (reserves space).
// With limit.input, usable = limit.input (reserves nothing).
//
// Related issues: #10634, #8089, #11086, #12621
// Open PRs: #6875, #12924
test("BUG: no headroom when limit.input is set — compaction should trigger near boundary but does not", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
// Simulate Claude with prompt caching: input limit = 200K, output limit = 32K
const model = createModel({ context: 200_000, input: 200_000, output: 32_000 })
// We've used 198K tokens total. Only 2K under the input limit.
// On the next turn, the full conversation (198K) becomes input,
// plus the model needs room to generate output — this WILL overflow.
const tokens = { input: 180_000, output: 15_000, reasoning: 0, cache: { read: 3_000, write: 0 } }
// count = 180K + 3K + 15K = 198K
// usable = limit.input = 200K (no output subtracted!)
// 198K > 200K = false → no compaction triggered
// WITHOUT limit.input: usable = 200K - 32K = 168K, and 198K > 168K = true ✓
// WITH limit.input: usable = 200K, and 198K > 200K = false ✗
// With 198K used and only 2K headroom, the next turn will overflow.
// Compaction MUST trigger here.
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
},
})
})
test("BUG: without limit.input, same token count correctly triggers compaction", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
// Same model but without limit.input — uses context - output instead
const model = createModel({ context: 200_000, output: 32_000 })
// Same token usage as above
const tokens = { input: 180_000, output: 15_000, reasoning: 0, cache: { read: 3_000, write: 0 } }
// count = 198K
// usable = context - output = 200K - 32K = 168K
// 198K > 168K = true → compaction correctly triggered
const result = await SessionCompaction.isOverflow({ tokens, model })
expect(result).toBe(true) // ← Correct: headroom is reserved
},
})
})
test("BUG: asymmetry — limit.input model allows 30K more usage before compaction than equivalent model without it", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
// Two models with identical context/output limits, differing only in limit.input
const withInputLimit = createModel({ context: 200_000, input: 200_000, output: 32_000 })
const withoutInputLimit = createModel({ context: 200_000, output: 32_000 })
// 170K total tokens — well above context-output (168K) but below input limit (200K)
const tokens = { input: 166_000, output: 10_000, reasoning: 0, cache: { read: 5_000, write: 0 } }
const withLimit = await SessionCompaction.isOverflow({ tokens, model: withInputLimit })
const withoutLimit = await SessionCompaction.isOverflow({ tokens, model: withoutInputLimit })
// Both models have identical real capacity — they should agree:
expect(withLimit).toBe(true) // should compact (170K leaves no room for 32K output)
expect(withoutLimit).toBe(true) // correctly compacts (170K > 168K)
},
})
})
test("returns false when model context limit is 0", async () => {
await using tmp = await tmpdir()
await Instance.provide({
@@ -290,4 +371,53 @@ describe("session.getUsage", () => {
expect(result.cost).toBe(3 + 1.5)
})
test.each(["@ai-sdk/anthropic", "@ai-sdk/amazon-bedrock", "@ai-sdk/google-vertex/anthropic"])(
"computes total from components for %s models",
(npm) => {
const model = createModel({ context: 100_000, output: 32_000, npm })
const usage = {
inputTokens: 1000,
outputTokens: 500,
// These providers typically report total as input + output only,
// excluding cache read/write.
totalTokens: 1500,
cachedInputTokens: 200,
}
if (npm === "@ai-sdk/amazon-bedrock") {
const result = Session.getUsage({
model,
usage,
metadata: {
bedrock: {
usage: {
cacheWriteInputTokens: 300,
},
},
},
})
expect(result.tokens.input).toBe(1000)
expect(result.tokens.cache.read).toBe(200)
expect(result.tokens.cache.write).toBe(300)
expect(result.tokens.total).toBe(2000)
return
}
const result = Session.getUsage({
model,
usage,
metadata: {
anthropic: {
cacheCreationInputTokens: 300,
},
},
})
expect(result.tokens.input).toBe(1000)
expect(result.tokens.cache.read).toBe(200)
expect(result.tokens.cache.write).toBe(300)
expect(result.tokens.total).toBe(2000)
},
)
})

View File

@@ -314,12 +314,7 @@ describe("session.llm.stream", () => {
expect(body.stream).toBe(true)
const maxTokens = (body.max_tokens as number | undefined) ?? (body.max_output_tokens as number | undefined)
const expectedMaxTokens = ProviderTransform.maxOutputTokens(
resolved.api.npm,
ProviderTransform.options({ model: resolved, sessionID }),
resolved.limit.output,
LLM.OUTPUT_TOKEN_MAX,
)
const expectedMaxTokens = ProviderTransform.maxOutputTokens(resolved)
expect(maxTokens).toBe(expectedMaxTokens)
const reasoning = (body.reasoningEffort as string | undefined) ?? (body.reasoning_effort as string | undefined)
@@ -442,12 +437,7 @@ describe("session.llm.stream", () => {
expect((body.reasoning as { effort?: string } | undefined)?.effort).toBe("high")
const maxTokens = body.max_output_tokens as number | undefined
const expectedMaxTokens = ProviderTransform.maxOutputTokens(
resolved.api.npm,
ProviderTransform.options({ model: resolved, sessionID }),
resolved.limit.output,
LLM.OUTPUT_TOKEN_MAX,
)
const expectedMaxTokens = ProviderTransform.maxOutputTokens(resolved)
expect(maxTokens).toBe(expectedMaxTokens)
},
})
@@ -565,14 +555,7 @@ describe("session.llm.stream", () => {
expect(capture.url.pathname.endsWith("/messages")).toBe(true)
expect(body.model).toBe(resolved.api.id)
expect(body.max_tokens).toBe(
ProviderTransform.maxOutputTokens(
resolved.api.npm,
ProviderTransform.options({ model: resolved, sessionID }),
resolved.limit.output,
LLM.OUTPUT_TOKEN_MAX,
),
)
expect(body.max_tokens).toBe(ProviderTransform.maxOutputTokens(resolved))
expect(body.temperature).toBe(0.4)
expect(body.top_p).toBe(0.9)
},
@@ -677,14 +660,7 @@ describe("session.llm.stream", () => {
expect(capture.url.pathname).toBe(pathSuffix)
expect(config?.temperature).toBe(0.3)
expect(config?.topP).toBe(0.8)
expect(config?.maxOutputTokens).toBe(
ProviderTransform.maxOutputTokens(
resolved.api.npm,
ProviderTransform.options({ model: resolved, sessionID }),
resolved.limit.output,
LLM.OUTPUT_TOKEN_MAX,
),
)
expect(config?.maxOutputTokens).toBe(ProviderTransform.maxOutputTokens(resolved))
},
})
})

View File

@@ -112,6 +112,15 @@ describe("session.retry.retryable", () => {
const error = wrap("not-json")
expect(SessionRetry.retryable(error)).toBeUndefined()
})
test("does not retry context overflow errors", () => {
const error = new MessageV2.ContextOverflowError({
message: "Input exceeds context window of this model",
responseBody: '{"error":{"code":"context_length_exceeded"}}',
}).toObject() as ReturnType<NamedError["toObject"]>
expect(SessionRetry.retryable(error)).toBeUndefined()
})
})
describe("session.message-v2.fromError", () => {

View File

@@ -203,6 +203,7 @@ export type AssistantMessage = {
summary?: boolean
cost: number
tokens: {
total?: number
input: number
output: number
reasoning: number
@@ -418,6 +419,7 @@ export type StepFinishPart = {
snapshot?: string
cost: number
tokens: {
total?: number
input: number
output: number
reasoning: number
@@ -1822,6 +1824,10 @@ export type Config = {
* Enable pruning of old tool outputs (default: true)
*/
prune?: boolean
/**
* Token buffer for compaction. Leaves enough window to avoid overflow during compaction.
*/
reserved?: number
}
experimental?: {
disable_paste_summary?: boolean

View File

@@ -490,13 +490,15 @@ You can control context compaction behavior through the `compaction` option.
"$schema": "https://opencode.ai/config.json",
"compaction": {
"auto": true,
"prune": true
"prune": true,
"reserved": 10000
}
}
```
- `auto` - Automatically compact the session when context is full (default: `true`).
- `prune` - Remove old tool outputs to save tokens (default: `true`).
- `reserved` - Token buffer for compaction. Leaves enough window to avoid overflow during compaction
---