fix: track reasoning by output_index for copilot compatibility (#9124)
Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com>
This commit is contained in:
@@ -615,13 +615,13 @@ export namespace Provider {
|
|||||||
},
|
},
|
||||||
experimentalOver200K: model.cost?.context_over_200k
|
experimentalOver200K: model.cost?.context_over_200k
|
||||||
? {
|
? {
|
||||||
cache: {
|
cache: {
|
||||||
read: model.cost.context_over_200k.cache_read ?? 0,
|
read: model.cost.context_over_200k.cache_read ?? 0,
|
||||||
write: model.cost.context_over_200k.cache_write ?? 0,
|
write: model.cost.context_over_200k.cache_write ?? 0,
|
||||||
},
|
},
|
||||||
input: model.cost.context_over_200k.input,
|
input: model.cost.context_over_200k.input,
|
||||||
output: model.cost.context_over_200k.output,
|
output: model.cost.context_over_200k.output,
|
||||||
}
|
}
|
||||||
: undefined,
|
: undefined,
|
||||||
},
|
},
|
||||||
limit: {
|
limit: {
|
||||||
|
|||||||
@@ -815,14 +815,20 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||||||
// flag that checks if there have been client-side tool calls (not executed by openai)
|
// flag that checks if there have been client-side tool calls (not executed by openai)
|
||||||
let hasFunctionCall = false
|
let hasFunctionCall = false
|
||||||
|
|
||||||
|
// Track reasoning by output_index instead of item_id
|
||||||
|
// GitHub Copilot rotates encrypted item IDs on every event
|
||||||
const activeReasoning: Record<
|
const activeReasoning: Record<
|
||||||
string,
|
number,
|
||||||
{
|
{
|
||||||
|
canonicalId: string // the item.id from output_item.added
|
||||||
encryptedContent?: string | null
|
encryptedContent?: string | null
|
||||||
summaryParts: number[]
|
summaryParts: number[]
|
||||||
}
|
}
|
||||||
> = {}
|
> = {}
|
||||||
|
|
||||||
|
// Track current active reasoning output_index for correlating summary events
|
||||||
|
let currentReasoningOutputIndex: number | null = null
|
||||||
|
|
||||||
// Track a stable text part id for the current assistant message.
|
// Track a stable text part id for the current assistant message.
|
||||||
// Copilot may change item_id across text deltas; normalize to one id.
|
// Copilot may change item_id across text deltas; normalize to one id.
|
||||||
let currentTextId: string | null = null
|
let currentTextId: string | null = null
|
||||||
@@ -933,10 +939,12 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
} else if (isResponseOutputItemAddedReasoningChunk(value)) {
|
} else if (isResponseOutputItemAddedReasoningChunk(value)) {
|
||||||
activeReasoning[value.item.id] = {
|
activeReasoning[value.output_index] = {
|
||||||
|
canonicalId: value.item.id,
|
||||||
encryptedContent: value.item.encrypted_content,
|
encryptedContent: value.item.encrypted_content,
|
||||||
summaryParts: [0],
|
summaryParts: [0],
|
||||||
}
|
}
|
||||||
|
currentReasoningOutputIndex = value.output_index
|
||||||
|
|
||||||
controller.enqueue({
|
controller.enqueue({
|
||||||
type: "reasoning-start",
|
type: "reasoning-start",
|
||||||
@@ -1091,22 +1099,25 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||||||
currentTextId = null
|
currentTextId = null
|
||||||
}
|
}
|
||||||
} else if (isResponseOutputItemDoneReasoningChunk(value)) {
|
} else if (isResponseOutputItemDoneReasoningChunk(value)) {
|
||||||
const activeReasoningPart = activeReasoning[value.item.id]
|
const activeReasoningPart = activeReasoning[value.output_index]
|
||||||
if (activeReasoningPart) {
|
if (activeReasoningPart) {
|
||||||
for (const summaryIndex of activeReasoningPart.summaryParts) {
|
for (const summaryIndex of activeReasoningPart.summaryParts) {
|
||||||
controller.enqueue({
|
controller.enqueue({
|
||||||
type: "reasoning-end",
|
type: "reasoning-end",
|
||||||
id: `${value.item.id}:${summaryIndex}`,
|
id: `${activeReasoningPart.canonicalId}:${summaryIndex}`,
|
||||||
providerMetadata: {
|
providerMetadata: {
|
||||||
openai: {
|
openai: {
|
||||||
itemId: value.item.id,
|
itemId: activeReasoningPart.canonicalId,
|
||||||
reasoningEncryptedContent: value.item.encrypted_content ?? null,
|
reasoningEncryptedContent: value.item.encrypted_content ?? null,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
delete activeReasoning[value.output_index]
|
||||||
|
if (currentReasoningOutputIndex === value.output_index) {
|
||||||
|
currentReasoningOutputIndex = null
|
||||||
|
}
|
||||||
}
|
}
|
||||||
delete activeReasoning[value.item.id]
|
|
||||||
}
|
}
|
||||||
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
||||||
const toolCall = ongoingToolCalls[value.output_index]
|
const toolCall = ongoingToolCalls[value.output_index]
|
||||||
@@ -1198,32 +1209,40 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|||||||
logprobs.push(value.logprobs)
|
logprobs.push(value.logprobs)
|
||||||
}
|
}
|
||||||
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
||||||
|
const activeItem =
|
||||||
|
currentReasoningOutputIndex !== null ? activeReasoning[currentReasoningOutputIndex] : null
|
||||||
|
|
||||||
// the first reasoning start is pushed in isResponseOutputItemAddedReasoningChunk.
|
// the first reasoning start is pushed in isResponseOutputItemAddedReasoningChunk.
|
||||||
if (value.summary_index > 0) {
|
if (activeItem && value.summary_index > 0) {
|
||||||
activeReasoning[value.item_id]?.summaryParts.push(value.summary_index)
|
activeItem.summaryParts.push(value.summary_index)
|
||||||
|
|
||||||
controller.enqueue({
|
controller.enqueue({
|
||||||
type: "reasoning-start",
|
type: "reasoning-start",
|
||||||
id: `${value.item_id}:${value.summary_index}`,
|
id: `${activeItem.canonicalId}:${value.summary_index}`,
|
||||||
providerMetadata: {
|
providerMetadata: {
|
||||||
openai: {
|
openai: {
|
||||||
itemId: value.item_id,
|
itemId: activeItem.canonicalId,
|
||||||
reasoningEncryptedContent: activeReasoning[value.item_id]?.encryptedContent ?? null,
|
reasoningEncryptedContent: activeItem.encryptedContent ?? null,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
||||||
controller.enqueue({
|
const activeItem =
|
||||||
type: "reasoning-delta",
|
currentReasoningOutputIndex !== null ? activeReasoning[currentReasoningOutputIndex] : null
|
||||||
id: `${value.item_id}:${value.summary_index}`,
|
|
||||||
delta: value.delta,
|
if (activeItem) {
|
||||||
providerMetadata: {
|
controller.enqueue({
|
||||||
openai: {
|
type: "reasoning-delta",
|
||||||
itemId: value.item_id,
|
id: `${activeItem.canonicalId}:${value.summary_index}`,
|
||||||
|
delta: value.delta,
|
||||||
|
providerMetadata: {
|
||||||
|
openai: {
|
||||||
|
itemId: activeItem.canonicalId,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
})
|
}
|
||||||
} else if (isResponseFinishedChunk(value)) {
|
} else if (isResponseFinishedChunk(value)) {
|
||||||
finishReason = mapOpenAIResponseFinishReason({
|
finishReason = mapOpenAIResponseFinishReason({
|
||||||
finishReason: value.response.incomplete_details?.reason,
|
finishReason: value.response.incomplete_details?.reason,
|
||||||
|
|||||||
Reference in New Issue
Block a user