feat: use anthropic compat messages api for anthropic models through copilot

This commit is contained in:
Aiden Cline
2026-01-26 13:17:59 -05:00
parent 7795cae0b5
commit ac53a372b0
3 changed files with 35 additions and 18 deletions

View File

@@ -26,6 +26,9 @@ export async function CopilotAuthPlugin(input: PluginInput): Promise<Hooks> {
const info = await getAuth() const info = await getAuth()
if (!info || info.type !== "oauth") return {} if (!info || info.type !== "oauth") return {}
const enterpriseUrl = info.enterpriseUrl
const baseURL = enterpriseUrl ? `https://copilot-api.${normalizeDomain(enterpriseUrl)}` : undefined
if (provider && provider.models) { if (provider && provider.models) {
for (const model of Object.values(provider.models)) { for (const model of Object.values(provider.models)) {
model.cost = { model.cost = {
@@ -36,16 +39,23 @@ export async function CopilotAuthPlugin(input: PluginInput): Promise<Hooks> {
write: 0, write: 0,
}, },
} }
// TODO: move some of this hacky-ness to models.dev presets once we have better grasp of things here...
const base = baseURL ?? model.api.url
const claude = model.id.includes("claude")
const url = iife(() => {
if (!claude) return base
if (base.endsWith("/v1")) return base
if (base.endsWith("/")) return `${base}v1`
return `${base}/v1`
})
model.api.url = url
model.api.npm = claude ? "@ai-sdk/anthropic" : "@ai-sdk/github-copilot"
} }
} }
const enterpriseUrl = info.enterpriseUrl
const baseURL = enterpriseUrl
? `https://copilot-api.${normalizeDomain(enterpriseUrl)}`
: "https://api.githubcopilot.com"
return { return {
baseURL,
apiKey: "", apiKey: "",
async fetch(request: RequestInfo | URL, init?: RequestInit) { async fetch(request: RequestInfo | URL, init?: RequestInit) {
const info = await getAuth() const info = await getAuth()

View File

@@ -132,6 +132,7 @@ export namespace Provider {
return { return {
autoload: false, autoload: false,
async getModel(sdk: any, modelID: string, _options?: Record<string, any>) { async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
if (sdk.responses === undefined && sdk.chat === undefined) return sdk.languageModel(modelID)
return shouldUseCopilotResponsesApi(modelID) ? sdk.responses(modelID) : sdk.chat(modelID) return shouldUseCopilotResponsesApi(modelID) ? sdk.responses(modelID) : sdk.chat(modelID)
}, },
options: {}, options: {},
@@ -141,6 +142,7 @@ export namespace Provider {
return { return {
autoload: false, autoload: false,
async getModel(sdk: any, modelID: string, _options?: Record<string, any>) { async getModel(sdk: any, modelID: string, _options?: Record<string, any>) {
if (sdk.responses === undefined && sdk.chat === undefined) return sdk.languageModel(modelID)
return shouldUseCopilotResponsesApi(modelID) ? sdk.responses(modelID) : sdk.chat(modelID) return shouldUseCopilotResponsesApi(modelID) ? sdk.responses(modelID) : sdk.chat(modelID)
}, },
options: {}, options: {},
@@ -601,10 +603,7 @@ export namespace Provider {
api: { api: {
id: model.id, id: model.id,
url: provider.api!, url: provider.api!,
npm: iife(() => { npm: model.provider?.npm ?? provider.npm ?? "@ai-sdk/openai-compatible",
if (provider.id.startsWith("github-copilot")) return "@ai-sdk/github-copilot"
return model.provider?.npm ?? provider.npm ?? "@ai-sdk/openai-compatible"
}),
}, },
status: model.status ?? "active", status: model.status ?? "active",
headers: model.headers ?? {}, headers: model.headers ?? {},
@@ -924,6 +923,8 @@ export namespace Provider {
) )
delete provider.models[modelID] delete provider.models[modelID]
model.variants = mapValues(ProviderTransform.variants(model), (v) => v)
// Filter out disabled variants from config // Filter out disabled variants from config
const configVariants = configProvider?.models?.[modelID]?.variants const configVariants = configProvider?.models?.[modelID]?.variants
if (configVariants && model.variants) { if (configVariants && model.variants) {

View File

@@ -150,14 +150,20 @@ export namespace LLM {
}, },
) )
const maxOutputTokens = isCodex const maxOutputTokens = isCodex ? undefined : undefined
? undefined log.info("max_output_tokens", {
: ProviderTransform.maxOutputTokens( tokens: ProviderTransform.maxOutputTokens(
input.model.api.npm, input.model.api.npm,
params.options, params.options,
input.model.limit.output, input.model.limit.output,
OUTPUT_TOKEN_MAX, OUTPUT_TOKEN_MAX,
) ),
modelOptions: params.options,
outputLimit: input.model.limit.output,
})
// tokens = 32000
// outputLimit = 64000
// modelOptions={"reasoningEffort":"minimal"}
const tools = await resolveTools(input) const tools = await resolveTools(input)