fix: ensure switching anthropic models mid convo on copilot works without errors, fix issue with reasoning opaque not being picked up for gemini models (#11569)

This commit is contained in:
Aiden Cline
2026-02-01 01:36:44 -06:00
committed by GitHub
parent c3faeae9d0
commit d1d7447493
4 changed files with 83 additions and 6 deletions

View File

@@ -100,7 +100,7 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Pro
break break
} }
case "reasoning": { case "reasoning": {
reasoningText = part.text if (part.text) reasoningText = part.text
break break
} }
case "tool-call": { case "tool-call": {
@@ -122,7 +122,7 @@ export function convertToOpenAICompatibleChatMessages(prompt: LanguageModelV2Pro
role: "assistant", role: "assistant",
content: text || null, content: text || null,
tool_calls: toolCalls.length > 0 ? toolCalls : undefined, tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
reasoning_text: reasoningText, reasoning_text: reasoningOpaque ? reasoningText : undefined,
reasoning_opaque: reasoningOpaque, reasoning_opaque: reasoningOpaque,
...metadata, ...metadata,
}) })

View File

@@ -219,7 +219,13 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
// text content: // text content:
const text = choice.message.content const text = choice.message.content
if (text != null && text.length > 0) { if (text != null && text.length > 0) {
content.push({ type: "text", text }) content.push({
type: "text",
text,
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
})
} }
// reasoning content (Copilot uses reasoning_text): // reasoning content (Copilot uses reasoning_text):
@@ -243,6 +249,9 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
toolCallId: toolCall.id ?? generateId(), toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name, toolName: toolCall.function.name,
input: toolCall.function.arguments!, input: toolCall.function.arguments!,
providerMetadata: choice.message.reasoning_opaque
? { copilot: { reasoningOpaque: choice.message.reasoning_opaque } }
: undefined,
}) })
} }
} }
@@ -478,7 +487,11 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
} }
if (!isActiveText) { if (!isActiveText) {
controller.enqueue({ type: "text-start", id: "txt-0" }) controller.enqueue({
type: "text-start",
id: "txt-0",
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
})
isActiveText = true isActiveText = true
} }
@@ -559,6 +572,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
toolCallId: toolCall.id ?? generateId(), toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name, toolName: toolCall.function.name,
input: toolCall.function.arguments, input: toolCall.function.arguments,
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
}) })
toolCall.hasFinished = true toolCall.hasFinished = true
} }
@@ -601,6 +615,7 @@ export class OpenAICompatibleChatLanguageModel implements LanguageModelV2 {
toolCallId: toolCall.id ?? generateId(), toolCallId: toolCall.id ?? generateId(),
toolName: toolCall.function.name, toolName: toolCall.function.name,
input: toolCall.function.arguments, input: toolCall.function.arguments,
providerMetadata: reasoningOpaque ? { copilot: { reasoningOpaque } } : undefined,
}) })
toolCall.hasFinished = true toolCall.hasFinished = true
} }

View File

@@ -354,7 +354,7 @@ describe("tool calls", () => {
}) })
describe("reasoning (copilot-specific)", () => { describe("reasoning (copilot-specific)", () => {
test("should include reasoning_text from reasoning part", () => { test("should omit reasoning_text without reasoning_opaque", () => {
const result = convertToCopilotMessages([ const result = convertToCopilotMessages([
{ {
role: "assistant", role: "assistant",
@@ -370,7 +370,7 @@ describe("reasoning (copilot-specific)", () => {
role: "assistant", role: "assistant",
content: "The answer is 42.", content: "The answer is 42.",
tool_calls: undefined, tool_calls: undefined,
reasoning_text: "Let me think about this...", reasoning_text: undefined,
reasoning_opaque: undefined, reasoning_opaque: undefined,
}, },
]) ])
@@ -404,6 +404,33 @@ describe("reasoning (copilot-specific)", () => {
]) ])
}) })
test("should include reasoning_opaque from text part providerOptions", () => {
const result = convertToCopilotMessages([
{
role: "assistant",
content: [
{
type: "text",
text: "Done!",
providerOptions: {
copilot: { reasoningOpaque: "opaque-text-456" },
},
},
],
},
])
expect(result).toEqual([
{
role: "assistant",
content: "Done!",
tool_calls: undefined,
reasoning_text: undefined,
reasoning_opaque: "opaque-text-456",
},
])
})
test("should handle reasoning-only assistant message", () => { test("should handle reasoning-only assistant message", () => {
const result = convertToCopilotMessages([ const result = convertToCopilotMessages([
{ {

View File

@@ -65,6 +65,12 @@ const FIXTURES = {
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"code\\":\\"1 + 1\\"}","name":"project_eval"},"id":"call_MHw3RDhmT1J5Z3B6WlhpVjlveTc","index":0,"type":"function"}],"reasoning_opaque":"ytGNWFf2doK38peANDvm7whkLPKrd+Fv6/k34zEPBF6Qwitj4bTZT0FBXleydLb6"}}],"created":1766068644,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":12,"prompt_tokens":8677,"prompt_tokens_details":{"cached_tokens":3692},"total_tokens":8768,"reasoning_tokens":79},"model":"gemini-3-pro-preview"}`, `data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{\\"code\\":\\"1 + 1\\"}","name":"project_eval"},"id":"call_MHw3RDhmT1J5Z3B6WlhpVjlveTc","index":0,"type":"function"}],"reasoning_opaque":"ytGNWFf2doK38peANDvm7whkLPKrd+Fv6/k34zEPBF6Qwitj4bTZT0FBXleydLb6"}}],"created":1766068644,"id":"oBFEaafzD9DVlOoPkY3l4Qs","usage":{"completion_tokens":12,"prompt_tokens":8677,"prompt_tokens_details":{"cached_tokens":3692},"total_tokens":8768,"reasoning_tokens":79},"model":"gemini-3-pro-preview"}`,
`data: [DONE]`, `data: [DONE]`,
], ],
reasoningOpaqueWithToolCallsNoReasoningText: [
`data: {"choices":[{"index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{}","name":"read_file"},"id":"call_reasoning_only","index":0,"type":"function"}],"reasoning_opaque":"opaque-xyz"}}],"created":1769917420,"id":"opaque-only","usage":{"completion_tokens":0,"prompt_tokens":0,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":0,"reasoning_tokens":0},"model":"gemini-3-flash-preview"}`,
`data: {"choices":[{"finish_reason":"tool_calls","index":0,"delta":{"content":null,"role":"assistant","tool_calls":[{"function":{"arguments":"{}","name":"read_file"},"id":"call_reasoning_only_2","index":1,"type":"function"}]}}],"created":1769917420,"id":"opaque-only","usage":{"completion_tokens":12,"prompt_tokens":123,"prompt_tokens_details":{"cached_tokens":0},"total_tokens":135,"reasoning_tokens":0},"model":"gemini-3-flash-preview"}`,
`data: [DONE]`,
],
} }
function createMockFetch(chunks: string[]) { function createMockFetch(chunks: string[]) {
@@ -447,6 +453,35 @@ describe("doStream", () => {
}) })
}) })
test("should attach reasoning_opaque to tool calls without reasoning_text", async () => {
const mockFetch = createMockFetch(FIXTURES.reasoningOpaqueWithToolCallsNoReasoningText)
const model = createModel(mockFetch)
const { stream } = await model.doStream({
prompt: TEST_PROMPT,
includeRawChunks: false,
})
const parts = await convertReadableStreamToArray(stream)
const reasoningParts = parts.filter(
(p) => p.type === "reasoning-start" || p.type === "reasoning-delta" || p.type === "reasoning-end",
)
expect(reasoningParts).toHaveLength(0)
const toolCall = parts.find((p) => p.type === "tool-call" && p.toolCallId === "call_reasoning_only")
expect(toolCall).toMatchObject({
type: "tool-call",
toolCallId: "call_reasoning_only",
toolName: "read_file",
providerMetadata: {
copilot: {
reasoningOpaque: "opaque-xyz",
},
},
})
})
test("should include response metadata from first chunk", async () => { test("should include response metadata from first chunk", async () => {
const mockFetch = createMockFetch(FIXTURES.basicText) const mockFetch = createMockFetch(FIXTURES.basicText)
const model = createModel(mockFetch) const model = createModel(mockFetch)