fix(opencode): add input limit for compaction (#8465)

This commit is contained in:
Brandon Smith
2026-01-15 01:35:16 -06:00
committed by GitHub
parent 92931437c4
commit 8d720f9463
5 changed files with 47 additions and 34 deletions

View File

@@ -361,38 +361,6 @@ export async function CodexAuthPlugin(input: PluginInput): Promise<Hooks> {
}
}
if (!provider.models["gpt-5.2-codex"]) {
const model = {
id: "gpt-5.2-codex",
providerID: "openai",
api: {
id: "gpt-5.2-codex",
url: "https://chatgpt.com/backend-api/codex",
npm: "@ai-sdk/openai",
},
name: "GPT-5.2 Codex",
capabilities: {
temperature: false,
reasoning: true,
attachment: true,
toolcall: true,
input: { text: true, audio: false, image: true, video: false, pdf: false },
output: { text: true, audio: false, image: false, video: false, pdf: false },
interleaved: false,
},
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
limit: { context: 400000, output: 128000 },
status: "active" as const,
options: {},
headers: {},
release_date: "2025-12-18",
variants: {} as Record<string, Record<string, any>>,
family: "gpt-codex",
}
model.variants = ProviderTransform.variants(model)
provider.models["gpt-5.2-codex"] = model
}
// Zero out costs for Codex (included with ChatGPT subscription)
for (const model of Object.values(provider.models)) {
model.cost = {

View File

@@ -47,6 +47,7 @@ export namespace ModelsDev {
.optional(),
limit: z.object({
context: z.number(),
input: z.number().optional(),
output: z.number(),
}),
modalities: z

View File

@@ -557,6 +557,7 @@ export namespace Provider {
}),
limit: z.object({
context: z.number(),
input: z.number().optional(),
output: z.number(),
}),
status: z.enum(["alpha", "beta", "deprecated", "active"]),
@@ -619,6 +620,7 @@ export namespace Provider {
},
limit: {
context: model.limit.context,
input: model.limit.input,
output: model.limit.output,
},
capabilities: {

View File

@@ -34,7 +34,7 @@ export namespace SessionCompaction {
if (context === 0) return false
const count = input.tokens.input + input.tokens.cache.read + input.tokens.output
const output = Math.min(input.model.limit.output, SessionPrompt.OUTPUT_TOKEN_MAX) || SessionPrompt.OUTPUT_TOKEN_MAX
const usable = context - output
const usable = input.model.limit.input || context - output
return count > usable
}

View File

@@ -10,13 +10,19 @@ import type { Provider } from "../../src/provider/provider"
Log.init({ print: false })
function createModel(opts: { context: number; output: number; cost?: Provider.Model["cost"] }): Provider.Model {
function createModel(opts: {
context: number
output: number
input?: number
cost?: Provider.Model["cost"]
}): Provider.Model {
return {
id: "test-model",
providerID: "test",
name: "Test",
limit: {
context: opts.context,
input: opts.input,
output: opts.output,
},
cost: opts.cost ?? { input: 0, output: 0, cache: { read: 0, write: 0 } },
@@ -70,6 +76,42 @@ describe("session.compaction.isOverflow", () => {
})
})
test("respects input limit for input caps", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
const tokens = { input: 271_000, output: 1_000, reasoning: 0, cache: { read: 2_000, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(true)
},
})
})
test("returns false when input/output are within input caps", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 400_000, input: 272_000, output: 128_000 })
const tokens = { input: 200_000, output: 20_000, reasoning: 0, cache: { read: 10_000, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
},
})
})
test("returns false when output within limit with input caps", async () => {
await using tmp = await tmpdir()
await Instance.provide({
directory: tmp.path,
fn: async () => {
const model = createModel({ context: 200_000, input: 120_000, output: 10_000 })
const tokens = { input: 50_000, output: 9_999, reasoning: 0, cache: { read: 0, write: 0 } }
expect(await SessionCompaction.isOverflow({ tokens, model })).toBe(false)
},
})
})
test("returns false when model context limit is 0", async () => {
await using tmp = await tmpdir()
await Instance.provide({