chore: generate

This commit is contained in:
opencode-agent[bot]
2026-01-31 01:32:46 +00:00
parent 0c32afbc35
commit 252b2c450d
2 changed files with 34 additions and 8 deletions

View File

@@ -683,13 +683,19 @@ export namespace ProviderTransform {
// Handle thinking mode for @ai-sdk/anthropic, @ai-sdk/google-vertex/anthropic (budgetTokens) // Handle thinking mode for @ai-sdk/anthropic, @ai-sdk/google-vertex/anthropic (budgetTokens)
// and @ai-sdk/openai-compatible with Claude (budget_tokens) // and @ai-sdk/openai-compatible with Claude (budget_tokens)
if (npm === "@ai-sdk/anthropic" || npm === "@ai-sdk/google-vertex/anthropic" || npm === "@ai-sdk/openai-compatible") { if (
npm === "@ai-sdk/anthropic" ||
npm === "@ai-sdk/google-vertex/anthropic" ||
npm === "@ai-sdk/openai-compatible"
) {
const thinking = options?.["thinking"] const thinking = options?.["thinking"]
// Support both camelCase (for @ai-sdk/anthropic) and snake_case (for openai-compatible) // Support both camelCase (for @ai-sdk/anthropic) and snake_case (for openai-compatible)
const budgetTokens = const budgetTokens =
typeof thinking?.["budgetTokens"] === "number" ? thinking["budgetTokens"] : typeof thinking?.["budgetTokens"] === "number"
typeof thinking?.["budget_tokens"] === "number" ? thinking["budget_tokens"] : ? thinking["budgetTokens"]
0 : typeof thinking?.["budget_tokens"] === "number"
? thinking["budget_tokens"]
: 0
const enabled = thinking?.["type"] === "enabled" const enabled = thinking?.["type"] === "enabled"
if (enabled && budgetTokens > 0) { if (enabled && budgetTokens > 0) {
// Return text tokens so that text + thinking <= model cap, preferring 32k text when possible. // Return text tokens so that text + thinking <= model cap, preferring 32k text when possible.

View File

@@ -277,7 +277,12 @@ describe("ProviderTransform.maxOutputTokens", () => {
budget_tokens: 10000, budget_tokens: 10000,
}, },
} }
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai-compatible", options, modelLimit, OUTPUT_TOKEN_MAX) const result = ProviderTransform.maxOutputTokens(
"@ai-sdk/openai-compatible",
options,
modelLimit,
OUTPUT_TOKEN_MAX,
)
expect(result).toBe(OUTPUT_TOKEN_MAX) expect(result).toBe(OUTPUT_TOKEN_MAX)
}) })
@@ -289,7 +294,12 @@ describe("ProviderTransform.maxOutputTokens", () => {
budget_tokens: 30000, budget_tokens: 30000,
}, },
} }
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai-compatible", options, modelLimit, OUTPUT_TOKEN_MAX) const result = ProviderTransform.maxOutputTokens(
"@ai-sdk/openai-compatible",
options,
modelLimit,
OUTPUT_TOKEN_MAX,
)
expect(result).toBe(20000) expect(result).toBe(20000)
}) })
@@ -301,7 +311,12 @@ describe("ProviderTransform.maxOutputTokens", () => {
budget_tokens: 10000, budget_tokens: 10000,
}, },
} }
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai-compatible", options, modelLimit, OUTPUT_TOKEN_MAX) const result = ProviderTransform.maxOutputTokens(
"@ai-sdk/openai-compatible",
options,
modelLimit,
OUTPUT_TOKEN_MAX,
)
expect(result).toBe(OUTPUT_TOKEN_MAX) expect(result).toBe(OUTPUT_TOKEN_MAX)
}) })
@@ -313,7 +328,12 @@ describe("ProviderTransform.maxOutputTokens", () => {
budget_tokens: 0, budget_tokens: 0,
}, },
} }
const result = ProviderTransform.maxOutputTokens("@ai-sdk/openai-compatible", options, modelLimit, OUTPUT_TOKEN_MAX) const result = ProviderTransform.maxOutputTokens(
"@ai-sdk/openai-compatible",
options,
modelLimit,
OUTPUT_TOKEN_MAX,
)
expect(result).toBe(OUTPUT_TOKEN_MAX) expect(result).toBe(OUTPUT_TOKEN_MAX)
}) })
}) })