Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
148 changes: 100 additions & 48 deletions bun.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
"@tailwindcss/vite": "4.1.11",
"diff": "8.0.2",
"dompurify": "3.3.1",
"ai": "5.0.124",
"ai": "6.0.72",
"hono": "4.10.7",
"hono-openapi": "1.1.2",
"fuzzysort": "3.1.0",
Expand Down
38 changes: 19 additions & 19 deletions packages/opencode/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -51,25 +51,25 @@
"@actions/core": "1.11.1",
"@actions/github": "6.0.1",
"@agentclientprotocol/sdk": "0.14.1",
"@ai-sdk/amazon-bedrock": "3.0.74",
"@ai-sdk/anthropic": "2.0.58",
"@ai-sdk/azure": "2.0.91",
"@ai-sdk/cerebras": "1.0.36",
"@ai-sdk/cohere": "2.0.22",
"@ai-sdk/deepinfra": "1.0.33",
"@ai-sdk/gateway": "2.0.30",
"@ai-sdk/google": "2.0.52",
"@ai-sdk/google-vertex": "3.0.98",
"@ai-sdk/groq": "2.0.34",
"@ai-sdk/mistral": "2.0.27",
"@ai-sdk/openai": "2.0.89",
"@ai-sdk/openai-compatible": "1.0.32",
"@ai-sdk/perplexity": "2.0.23",
"@ai-sdk/provider": "2.0.1",
"@ai-sdk/provider-utils": "3.0.20",
"@ai-sdk/togetherai": "1.0.34",
"@ai-sdk/vercel": "1.0.33",
"@ai-sdk/xai": "2.0.51",
"@ai-sdk/amazon-bedrock": "4.0.49",
"@ai-sdk/anthropic": "3.0.37",
"@ai-sdk/azure": "3.0.26",
"@ai-sdk/cerebras": "2.0.30",
"@ai-sdk/cohere": "3.0.18",
"@ai-sdk/deepinfra": "2.0.31",
"@ai-sdk/gateway": "3.0.35",
"@ai-sdk/google": "3.0.21",
"@ai-sdk/google-vertex": "4.0.44",
"@ai-sdk/groq": "3.0.21",
"@ai-sdk/mistral": "3.0.18",
"@ai-sdk/openai": "3.0.25",
"@ai-sdk/openai-compatible": "2.0.27",
"@ai-sdk/perplexity": "3.0.17",
"@ai-sdk/provider": "3.0.7",
"@ai-sdk/provider-utils": "4.0.13",
"@ai-sdk/togetherai": "2.0.30",
"@ai-sdk/vercel": "2.0.29",
"@ai-sdk/xai": "3.0.47",
"@clack/prompts": "1.0.0-alpha.1",
"@gitlab/gitlab-ai-provider": "3.4.0",
"@gitlab/opencode-gitlab-auth": "1.3.2",
Expand Down
2 changes: 1 addition & 1 deletion packages/opencode/src/plugin/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import { gitlabAuthPlugin as GitlabAuthPlugin } from "@gitlab/opencode-gitlab-au
export namespace Plugin {
const log = Log.create({ service: "plugin" })

const BUILTIN = ["opencode-anthropic-auth@0.0.13"]
const BUILTIN = ["github:okhsunrog/opencode-anthropic-auth#feat/oauth-context-cap"]
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

opencode core shouldn't have builtin plugins that pull from forks.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

correct

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It was only supposed for testing till changes to opencode-anthropoc-auth are accepted


// Built-in plugins that are directly imported (not installed from npm)
const INTERNAL_PLUGINS: PluginInstance[] = [CodexAuthPlugin, CopilotAuthPlugin, GitlabAuthPlugin]
Expand Down
20 changes: 14 additions & 6 deletions packages/opencode/src/provider/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,13 @@ import os from "os"
import fuzzysort from "fuzzysort"
import { Config } from "../config/config"
import { mapValues, mergeDeep, omit, pickBy, sortBy } from "remeda"
import { NoSuchModelError, type Provider as SDK } from "ai"
import { NoSuchModelError } from "ai"

// SDK type for provider factories - we only use languageModel() so we don't require all Provider methods
type SDK = {
languageModel: (modelId: string) => any
[key: string]: any
}
import { Log } from "../util/log"
import { BunProc } from "../bun"
import { Plugin } from "../plugin"
Expand All @@ -24,7 +30,8 @@ import { createVertex } from "@ai-sdk/google-vertex"
import { createVertexAnthropic } from "@ai-sdk/google-vertex/anthropic"
import { createOpenAI } from "@ai-sdk/openai"
import { createOpenAICompatible } from "@ai-sdk/openai-compatible"
import { createOpenRouter, type LanguageModelV2 } from "@openrouter/ai-sdk-provider"
import { createOpenRouter } from "@openrouter/ai-sdk-provider"
import type { LanguageModelV3 } from "@ai-sdk/provider"
import { createOpenaiCompatible as createGitHubCopilotOpenAICompatible } from "./sdk/copilot"
import { createXai } from "@ai-sdk/xai"
import { createMistral } from "@ai-sdk/mistral"
Expand Down Expand Up @@ -94,7 +101,7 @@ export namespace Provider {
options: {
headers: {
"anthropic-beta":
"claude-code-20250219,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14",
"claude-code-20250219,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14,adaptive-thinking-2026-01-28,context-1m-2025-08-07",
},
},
}
Expand Down Expand Up @@ -513,7 +520,8 @@ export namespace Provider {
autoload: true,
async getModel(_sdk: any, modelID: string, _options?: Record<string, any>) {
// Model IDs use Unified API format: provider/model (e.g., "anthropic/claude-sonnet-4-5")
return aigateway(unified(modelID))
// Cast to any as ai-gateway-provider may return V2 models
return aigateway(unified(modelID) as any) as any
},
options: {},
}
Expand Down Expand Up @@ -710,7 +718,7 @@ export namespace Provider {
}

const providers: { [providerID: string]: Info } = {}
const languages = new Map<string, LanguageModelV2>()
const languages = new Map<string, LanguageModelV3>()
const modelLoaders: {
[providerID: string]: CustomModelLoader
} = {}
Expand Down Expand Up @@ -1102,7 +1110,7 @@ export namespace Provider {
return info
}

export async function getLanguage(model: Model): Promise<LanguageModelV2> {
export async function getLanguage(model: Model): Promise<LanguageModelV3> {
const s = await state()
const key = `${model.providerID}/${model.id}`
if (s.models.has(key)) return s.models.get(key)!
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"

export const codeInterpreterInputSchema = z.object({
Expand Down Expand Up @@ -37,7 +37,7 @@ type CodeInterpreterArgs = {
container?: string | { fileIds?: string[] }
}

export const codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
export const codeInterpreterToolFactory = createProviderToolFactoryWithOutputSchema<
{
/**
* The code to run, or null if not available.
Expand Down Expand Up @@ -76,7 +76,6 @@ export const codeInterpreterToolFactory = createProviderDefinedToolFactoryWithOu
CodeInterpreterArgs
>({
id: "openai.code_interpreter",
name: "code_interpreter",
inputSchema: codeInterpreterInputSchema,
outputSchema: codeInterpreterOutputSchema,
})
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import type {
OpenAIResponsesFileSearchToolComparisonFilter,
OpenAIResponsesFileSearchToolCompoundFilter,
Expand Down Expand Up @@ -43,7 +43,7 @@ export const fileSearchOutputSchema = z.object({
.nullable(),
})

export const fileSearch = createProviderDefinedToolFactoryWithOutputSchema<
export const fileSearch = createProviderToolFactoryWithOutputSchema<
{},
{
/**
Expand Down Expand Up @@ -122,7 +122,6 @@ export const fileSearch = createProviderDefinedToolFactoryWithOutputSchema<
}
>({
id: "openai.file_search",
name: "file_search",
inputSchema: z.object({}),
outputSchema: fileSearchOutputSchema,
})
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"

export const imageGenerationArgsSchema = z
Expand Down Expand Up @@ -92,7 +92,7 @@ type ImageGenerationArgs = {
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024"
}

const imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSchema<
const imageGenerationToolFactory = createProviderToolFactoryWithOutputSchema<
{},
{
/**
Expand All @@ -103,7 +103,6 @@ const imageGenerationToolFactory = createProviderDefinedToolFactoryWithOutputSch
ImageGenerationArgs
>({
id: "openai.image_generation",
name: "image_generation",
inputSchema: z.object({}),
outputSchema: imageGenerationOutputSchema,
})
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { createProviderToolFactoryWithOutputSchema } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"

export const localShellInputSchema = z.object({
Expand All @@ -16,7 +16,7 @@ export const localShellOutputSchema = z.object({
output: z.string(),
})

export const localShell = createProviderDefinedToolFactoryWithOutputSchema<
export const localShell = createProviderToolFactoryWithOutputSchema<
{
/**
* Execute a shell command on the server.
Expand Down Expand Up @@ -59,7 +59,6 @@ export const localShell = createProviderDefinedToolFactoryWithOutputSchema<
{}
>({
id: "openai.local_shell",
name: "local_shell",
inputSchema: localShellInputSchema,
outputSchema: localShellOutputSchema,
})
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
import { createProviderToolFactory } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"

// Args validation schema
Expand Down Expand Up @@ -40,7 +40,7 @@ export const webSearchPreviewArgsSchema = z.object({
.optional(),
})

export const webSearchPreview = createProviderDefinedToolFactory<
export const webSearchPreview = createProviderToolFactory<
{
// Web search doesn't take input parameters - it's controlled by the prompt
},
Expand Down Expand Up @@ -81,7 +81,6 @@ export const webSearchPreview = createProviderDefinedToolFactory<
}
>({
id: "openai.web_search_preview",
name: "web_search_preview",
inputSchema: z.object({
action: z
.discriminatedUnion("type", [
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils"
import { createProviderToolFactory } from "@ai-sdk/provider-utils"
import { z } from "zod/v4"

export const webSearchArgsSchema = z.object({
Expand All @@ -21,7 +21,7 @@ export const webSearchArgsSchema = z.object({
.optional(),
})

export const webSearchToolFactory = createProviderDefinedToolFactory<
export const webSearchToolFactory = createProviderToolFactory<
{
// Web search doesn't take input parameters - it's controlled by the prompt
},
Expand Down Expand Up @@ -74,7 +74,6 @@ export const webSearchToolFactory = createProviderDefinedToolFactory<
}
>({
id: "openai.web_search",
name: "web_search",
inputSchema: z.object({
action: z
.discriminatedUnion("type", [
Expand Down
33 changes: 28 additions & 5 deletions packages/opencode/src/provider/transform.ts
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ export namespace ProviderTransform {
}
}
return part
})
}) as typeof msg.content
}
return msg
})
Expand All @@ -95,7 +95,7 @@ export namespace ProviderTransform {
const nextMsg = msgs[i + 1]

if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
msg.content = msg.content.map((part) => {
msg.content = (msg.content.map((part) => {
if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
// Mistral requires alphanumeric tool call IDs with exactly 9 characters
const normalizedId = part.toolCallId
Expand All @@ -109,7 +109,7 @@ export namespace ProviderTransform {
}
}
return part
})
})) as typeof msg.content
}

result.push(msg)
Expand Down Expand Up @@ -195,7 +195,7 @@ export namespace ProviderTransform {
const shouldUseContentOptions = !useMessageLevelOptions && Array.isArray(msg.content) && msg.content.length > 0

if (shouldUseContentOptions) {
const lastContent = msg.content[msg.content.length - 1]
const lastContent = msg.content[msg.content.length - 1] as any
if (lastContent && typeof lastContent === "object") {
lastContent.providerOptions = mergeDeep(lastContent.providerOptions ?? {}, providerOptions)
continue
Expand Down Expand Up @@ -277,7 +277,7 @@ export namespace ProviderTransform {
return {
...msg,
providerOptions: remap(msg.providerOptions),
content: msg.content.map((part) => ({ ...part, providerOptions: remap(part.providerOptions) })),
content: msg.content.map((part) => ({ ...part, providerOptions: remap((part as any).providerOptions) })),
} as typeof msg
})
}
Expand Down Expand Up @@ -453,6 +453,29 @@ export namespace ProviderTransform {
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/anthropic
case "@ai-sdk/google-vertex/anthropic":
// https://v5.ai-sdk.dev/providers/ai-sdk-providers/google-vertex#anthropic-provider
// Opus 4.6 uses adaptive thinking with effort parameter
// https://docs.anthropic.com/en/docs/build-with-claude/adaptive-thinking
if (id.includes("opus-4-6")) {
return {
low: {
thinking: { type: "adaptive" },
effort: "low",
},
medium: {
thinking: { type: "adaptive" },
effort: "medium",
},
high: {
thinking: { type: "adaptive" },
effort: "high",
},
max: {
thinking: { type: "adaptive" },
effort: "max",
},
}
}
// Older models use manual thinking with budgetTokens
return {
high: {
thinking: {
Expand Down
2 changes: 1 addition & 1 deletion packages/opencode/src/session/compaction.ts
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ export namespace SessionCompaction {
tools: {},
system: [],
messages: [
...MessageV2.toModelMessages(input.messages, model),
...(await MessageV2.toModelMessages(input.messages, model)),
{
role: "user",
content: [
Expand Down
25 changes: 13 additions & 12 deletions packages/opencode/src/session/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -443,18 +443,19 @@ export namespace Session {
metadata: z.custom<ProviderMetadata>().optional(),
}),
(input) => {
const cacheReadInputTokens = input.usage.cachedInputTokens ?? 0
const cacheWriteInputTokens = (input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number

const excludesCachedTokens = !!(input.metadata?.["anthropic"] || input.metadata?.["bedrock"])
const adjustedInputTokens = excludesCachedTokens
? (input.usage.inputTokens ?? 0)
: (input.usage.inputTokens ?? 0) - cacheReadInputTokens - cacheWriteInputTokens
const cacheReadInputTokens = input.usage.inputTokenDetails?.cacheReadTokens ?? input.usage.cachedInputTokens ?? 0
const cacheWriteInputTokens =
input.usage.inputTokenDetails?.cacheWriteTokens ??
((input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
// @ts-expect-error
input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
// @ts-expect-error
input.metadata?.["venice"]?.["usage"]?.["cacheCreationInputTokens"] ??
0) as number)

const adjustedInputTokens =
input.usage.inputTokenDetails?.noCacheTokens ??
(input.usage.inputTokens ?? 0) - cacheReadInputTokens - cacheWriteInputTokens
const safe = (value: number) => {
if (!Number.isFinite(value)) return 0
return value
Expand Down
3 changes: 2 additions & 1 deletion packages/opencode/src/session/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ export namespace LLM {
retries?: number
}

export type StreamOutput = StreamTextResult<ToolSet, unknown>
export type StreamOutput = StreamTextResult<ToolSet, any>

export async function stream(input: StreamInput) {
const l = log
Expand Down Expand Up @@ -245,6 +245,7 @@ export namespace LLM {
model: language,
middleware: [
{
specificationVersion: "v3" as const,
async transformParams(args) {
if (args.type === "stream") {
// @ts-expect-error
Expand Down
Loading
Loading