diff --git a/.opencode/agent/docs.md b/.cerebras/agent/docs.md
similarity index 100%
rename from .opencode/agent/docs.md
rename to .cerebras/agent/docs.md
diff --git a/.opencode/agent/git-committer.md b/.cerebras/agent/git-committer.md
similarity index 100%
rename from .opencode/agent/git-committer.md
rename to .cerebras/agent/git-committer.md
diff --git a/.opencode/opencode.jsonc b/.cerebras/cerebras.jsonc
similarity index 100%
rename from .opencode/opencode.jsonc
rename to .cerebras/cerebras.jsonc
diff --git a/.opencode/command/commit.md b/.cerebras/command/commit.md
similarity index 100%
rename from .opencode/command/commit.md
rename to .cerebras/command/commit.md
diff --git a/.opencode/command/issues.md b/.cerebras/command/issues.md
similarity index 100%
rename from .opencode/command/issues.md
rename to .cerebras/command/issues.md
diff --git a/.opencode/command/rmslop.md b/.cerebras/command/rmslop.md
similarity index 100%
rename from .opencode/command/rmslop.md
rename to .cerebras/command/rmslop.md
diff --git a/.opencode/command/spellcheck.md b/.cerebras/command/spellcheck.md
similarity index 100%
rename from .opencode/command/spellcheck.md
rename to .cerebras/command/spellcheck.md
diff --git a/.opencode/skill/frontend-dev/SKILL.md b/.cerebras/skill/frontend-dev/SKILL.md
similarity index 100%
rename from .opencode/skill/frontend-dev/SKILL.md
rename to .cerebras/skill/frontend-dev/SKILL.md
diff --git a/.cerebras/skill/sparc/SKILL.md b/.cerebras/skill/sparc/SKILL.md
new file mode 100644
index 0000000000..a11b066ff9
--- /dev/null
+++ b/.cerebras/skill/sparc/SKILL.md
@@ -0,0 +1,70 @@
+---
+name: sparc
+description: Use for ANY complex feature, refactor, or multi-step development task. SPARC walks through Specification, Pseudocode, Architecture, Refinement, and Completion — a structured methodology that turns vague requests into solid implementations. Triggers on keywords like build, implement, create, design, refactor, add feature, architect, plan and build, or any task that benefits from thinking before coding.
+---
+
+Systematic development methodology: **Specification, Pseudocode, Architecture, Refinement, Completion**. Each phase produces a concrete artifact before moving to the next. Cerebras inference speed makes this feel instant — you get the rigor of structured engineering without the wait.
+
+The user provides a feature request, bug description, or development task. It may be vague or detailed.
+
+## Phase 1: Specification
+
+Before touching code, define what you're building:
+
+- **Problem**: What exactly needs to change? What's broken or missing?
+- **Constraints**: What can't change? Performance requirements? Backwards compatibility?
+- **Success criteria**: How do we know it's done? What should a reviewer see?
+- **Scope boundary**: What is explicitly NOT part of this task?
+
+Output a brief spec (5-10 lines). If requirements are ambiguous, ask the user before proceeding.
+
+## Phase 2: Pseudocode
+
+Think through the logic in plain language before writing real code:
+
+- Outline the control flow, data transformations, and edge cases
+- Identify the key functions/components and their signatures
+- Note where existing code will be touched vs new code created
+- Flag any risky areas (concurrency, error handling, migrations)
+
+Keep it short — pseudocode is a thinking tool, not documentation. Use numbered steps or bullet points, not full syntax.
+
+## Phase 3: Architecture
+
+Map the pseudocode onto the actual codebase:
+
+- **Read first**: Examine the files you'll modify. Understand existing patterns before proposing new ones.
+- **File plan**: List each file to create or modify, with a one-line summary of the change
+- **Interface contracts**: Define function signatures, types, or API shapes that connect components
+- **Dependencies**: What does this touch? What could break?
+
+Follow existing conventions. Don't introduce new patterns unless the existing ones are clearly wrong for this use case.
+
+## Phase 4: Refinement (TDD)
+
+Implement with a test-first approach:
+
+1. **Write a failing test** that captures the core behavior from your spec
+2. **Write the minimum code** to make it pass — no more
+3. **Run the test** to confirm it passes
+4. **Refactor** if the code is unclear, but don't add features
+5. **Repeat** for the next behavior
+
+If the project doesn't have a test framework set up, skip to direct implementation but still work incrementally — one function at a time, verifying each before moving on.
+
+## Phase 5: Completion
+
+Wrap up with confidence:
+
+- Run the full test suite (not just your new tests)
+- Check for regressions in related functionality
+- Remove any temporary code, debug logs, or commented-out blocks
+- Verify the original success criteria from Phase 1 are met
+
+If anything fails, loop back to the relevant phase rather than patching blindly.
+
+## How to Use This
+
+Don't announce each phase — just follow the progression naturally. For small tasks, phases collapse (a 3-line fix doesn't need pseudocode). For large tasks, each phase should produce visible output the user can react to before you continue.
+
+The value is thinking before coding. Cerebras makes the thinking fast enough that it doesn't feel like overhead.
diff --git a/.opencode/themes/mytheme.json b/.cerebras/themes/mytheme.json
similarity index 100%
rename from .opencode/themes/mytheme.json
rename to .cerebras/themes/mytheme.json
diff --git a/package.json b/package.json
index 248b804cd9..155113232f 100644
--- a/package.json
+++ b/package.json
@@ -4,7 +4,7 @@
"description": "AI-powered development tool",
"private": true,
"type": "module",
- "packageManager": "bun@1.3.4",
+ "packageManager": "bun@1.3.5",
"scripts": {
"dev": "bun run --cwd packages/opencode --conditions=browser src/index.ts",
"typecheck": "bun turbo typecheck",
diff --git a/packages/console/app/.opencode/agent/css.md b/packages/console/app/.cerebras/agent/css.md
similarity index 100%
rename from packages/console/app/.opencode/agent/css.md
rename to packages/console/app/.cerebras/agent/css.md
diff --git a/packages/opencode/src/agent/agent.ts b/packages/opencode/src/agent/agent.ts
index 73a7a79963..0d58c7b57a 100644
--- a/packages/opencode/src/agent/agent.ts
+++ b/packages/opencode/src/agent/agent.ts
@@ -1,7 +1,8 @@
import { Config } from "../config/config"
import z from "zod"
import { Provider } from "../provider/provider"
-import { generateObject, type ModelMessage } from "ai"
+import { generateObject } from "ai"
+import type { ModelMessage } from "@ai-sdk/provider-utils"
import PROMPT_GENERATE from "./generate.txt"
import { SystemPrompt } from "../session/system"
import { Instance } from "../project/instance"
diff --git a/packages/opencode/src/cli/cmd/agent.ts b/packages/opencode/src/cli/cmd/agent.ts
index a774c6d026..cbe3c761f1 100644
--- a/packages/opencode/src/cli/cmd/agent.ts
+++ b/packages/opencode/src/cli/cmd/agent.ts
@@ -120,7 +120,7 @@ const AgentCreateCommand = cmd({
const content = matter.stringify(generated.systemPrompt, frontmatter)
const filePath = path.join(
- scope === "global" ? Global.Path.config : path.join(Instance.worktree, ".opencode"),
+ scope === "global" ? Global.Path.config : path.join(Instance.worktree, ".cerebras"),
`agent`,
`${generated.identifier}.md`,
)
diff --git a/packages/opencode/src/cli/cmd/tui/component/dialog-notification.tsx b/packages/opencode/src/cli/cmd/tui/component/dialog-notification.tsx
index 012bdff4a1..9b62d88830 100644
--- a/packages/opencode/src/cli/cmd/tui/component/dialog-notification.tsx
+++ b/packages/opencode/src/cli/cmd/tui/component/dialog-notification.tsx
@@ -18,6 +18,7 @@ export function FullscreenNotification(props: { notification: Notification; onCl
// Any key dismisses (after ready)
if (ready()) {
evt.preventDefault?.()
+ // @ts-expect-error stopPropagation exists at runtime
evt.stopPropagation?.()
// Small delay to prevent key from reaching prompt
setReady(false)
@@ -60,10 +61,12 @@ export function FullscreenNotification(props: { notification: Notification; onCl
flexDirection="column"
gap={2}
>
+ {/* @ts-expect-error fontSize exists at runtime */}
{icon()} {props.notification.title}
+ {/* @ts-expect-error textAlign exists at runtime */}
{props.notification.message}
diff --git a/packages/opencode/src/cli/cmd/tui/context/theme.tsx b/packages/opencode/src/cli/cmd/tui/context/theme.tsx
index 5f3a0ad4ad..7298284ccc 100644
--- a/packages/opencode/src/cli/cmd/tui/context/theme.tsx
+++ b/packages/opencode/src/cli/cmd/tui/context/theme.tsx
@@ -343,7 +343,7 @@ async function getCustomThemes() {
Global.Path.config,
...(await Array.fromAsync(
Filesystem.up({
- targets: [".opencode"],
+ targets: [".cerebras", ".opencode"],
start: process.cwd(),
}),
)),
diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts
index 84e7eb6d1b..d895c3f1e8 100644
--- a/packages/opencode/src/config/config.ts
+++ b/packages/opencode/src/config/config.ts
@@ -45,7 +45,7 @@ export namespace Config {
log.debug("loaded custom config", { path: Flag.OPENCODE_CONFIG })
}
- for (const file of ["opencode.jsonc", "opencode.json"]) {
+ for (const file of ["cerebras.jsonc", "cerebras.json", "opencode.jsonc", "opencode.json"]) {
const found = await Filesystem.findUp(file, Instance.directory, Instance.worktree)
for (const resolved of found.toReversed()) {
result = mergeConfigWithPlugins(result, await loadFile(resolved))
@@ -73,7 +73,7 @@ export namespace Config {
Global.Path.config,
...(await Array.fromAsync(
Filesystem.up({
- targets: [".opencode"],
+ targets: [".cerebras", ".opencode"],
start: Instance.directory,
stop: Instance.worktree,
}),
@@ -89,8 +89,8 @@ export namespace Config {
for (const dir of directories) {
await assertValid(dir)
- if (dir.endsWith(".opencode") || dir === Flag.OPENCODE_CONFIG_DIR) {
- for (const file of ["opencode.jsonc", "opencode.json"]) {
+ if (dir.endsWith(".cerebras") || dir.endsWith(".opencode") || dir === Flag.OPENCODE_CONFIG_DIR) {
+ for (const file of ["cerebras.jsonc", "cerebras.json", "opencode.jsonc", "opencode.json"]) {
log.debug(`loading config from ${path.join(dir, file)}`)
result = mergeConfigWithPlugins(result, await loadFile(path.join(dir, file)))
// to satisy the type checker
@@ -193,7 +193,7 @@ export namespace Config {
if (!md.data) continue
const name = (() => {
- const patterns = ["/.opencode/command/", "/command/"]
+ const patterns = ["/.cerebras/command/", "/.opencode/command/", "/command/"]
const pattern = patterns.find((p) => item.includes(p))
if (pattern) {
@@ -233,11 +233,13 @@ export namespace Config {
// Extract relative path from agent folder for nested agents
let agentName = path.basename(item, ".md")
- const agentFolderPath = item.includes("/.opencode/agent/")
- ? item.split("/.opencode/agent/")[1]
- : item.includes("/agent/")
- ? item.split("/agent/")[1]
- : agentName + ".md"
+ const agentFolderPath = item.includes("/.cerebras/agent/")
+ ? item.split("/.cerebras/agent/")[1]
+ : item.includes("/.opencode/agent/")
+ ? item.split("/.opencode/agent/")[1]
+ : item.includes("/agent/")
+ ? item.split("/agent/")[1]
+ : agentName + ".md"
// If agent is in a subfolder, include folder path in name
if (agentFolderPath.includes("/")) {
@@ -747,6 +749,8 @@ export namespace Config {
mergeDeep(await loadFile(path.join(Global.Path.config, "config.json"))),
mergeDeep(await loadFile(path.join(Global.Path.config, "opencode.json"))),
mergeDeep(await loadFile(path.join(Global.Path.config, "opencode.jsonc"))),
+ mergeDeep(await loadFile(path.join(Global.Path.config, "cerebras.json"))),
+ mergeDeep(await loadFile(path.join(Global.Path.config, "cerebras.jsonc"))),
)
await import(path.join(Global.Path.config, "config"), {
@@ -906,8 +910,8 @@ export namespace Config {
}
function globalConfigFile() {
- const candidates = ["opencode.jsonc", "opencode.json", "config.json"].map((file) =>
- path.join(Global.Path.config, file),
+ const candidates = ["cerebras.jsonc", "cerebras.json", "opencode.jsonc", "opencode.json", "config.json"].map(
+ (file) => path.join(Global.Path.config, file),
)
for (const file of candidates) {
if (existsSync(file)) return file
diff --git a/packages/opencode/src/file/ripgrep.ts b/packages/opencode/src/file/ripgrep.ts
index 00d9e8c386..b96c1e49a2 100644
--- a/packages/opencode/src/file/ripgrep.ts
+++ b/packages/opencode/src/file/ripgrep.ts
@@ -286,7 +286,7 @@ export namespace Ripgrep {
children: [],
}
for (const file of files) {
- if (file.includes(".opencode")) continue
+ if (file.includes(".cerebras") || file.includes(".opencode")) continue
const parts = file.split(path.sep)
getPath(root, parts, true)
}
diff --git a/packages/opencode/src/flag/flag.ts b/packages/opencode/src/flag/flag.ts
index b5a925de98..216246ce1d 100644
--- a/packages/opencode/src/flag/flag.ts
+++ b/packages/opencode/src/flag/flag.ts
@@ -1,29 +1,62 @@
export namespace Flag {
- export const OPENCODE_AUTO_SHARE = truthy("OPENCODE_AUTO_SHARE")
- export const OPENCODE_CONFIG = process.env["OPENCODE_CONFIG"]
- export const OPENCODE_CONFIG_DIR = process.env["OPENCODE_CONFIG_DIR"]
- export const OPENCODE_CONFIG_CONTENT = process.env["OPENCODE_CONFIG_CONTENT"]
- export const OPENCODE_DISABLE_AUTOUPDATE = truthy("OPENCODE_DISABLE_AUTOUPDATE")
- export const OPENCODE_DISABLE_PRUNE = truthy("OPENCODE_DISABLE_PRUNE")
- export const OPENCODE_PERMISSION = process.env["OPENCODE_PERMISSION"]
- export const OPENCODE_DISABLE_DEFAULT_PLUGINS = truthy("OPENCODE_DISABLE_DEFAULT_PLUGINS")
- export const OPENCODE_DISABLE_CLAUDE_CODE_SKILLS = truthy("OPENCODE_DISABLE_CLAUDE_CODE_SKILLS")
- export const OPENCODE_DISABLE_LSP_DOWNLOAD = truthy("OPENCODE_DISABLE_LSP_DOWNLOAD")
- export const OPENCODE_ENABLE_EXPERIMENTAL_MODELS = truthy("OPENCODE_ENABLE_EXPERIMENTAL_MODELS")
- export const OPENCODE_DISABLE_AUTOCOMPACT = truthy("OPENCODE_DISABLE_AUTOCOMPACT")
- export const OPENCODE_FAKE_VCS = process.env["OPENCODE_FAKE_VCS"]
+ export const OPENCODE_AUTO_SHARE = truthyWithFallback("CEREBRAS_CODE_AUTO_SHARE", "OPENCODE_AUTO_SHARE")
+ export const OPENCODE_CONFIG = process.env["CEREBRAS_CODE_CONFIG"] || process.env["OPENCODE_CONFIG"]
+ export const OPENCODE_CONFIG_DIR = process.env["CEREBRAS_CODE_CONFIG_DIR"] || process.env["OPENCODE_CONFIG_DIR"]
+ export const OPENCODE_CONFIG_CONTENT =
+ process.env["CEREBRAS_CODE_CONFIG_CONTENT"] || process.env["OPENCODE_CONFIG_CONTENT"]
+ export const OPENCODE_DISABLE_AUTOUPDATE = truthyWithFallback(
+ "CEREBRAS_CODE_DISABLE_AUTOUPDATE",
+ "OPENCODE_DISABLE_AUTOUPDATE",
+ )
+ export const OPENCODE_DISABLE_PRUNE = truthyWithFallback("CEREBRAS_CODE_DISABLE_PRUNE", "OPENCODE_DISABLE_PRUNE")
+ export const OPENCODE_PERMISSION =
+ process.env["CEREBRAS_CODE_PERMISSION"] || process.env["OPENCODE_PERMISSION"]
+ export const OPENCODE_DISABLE_DEFAULT_PLUGINS = truthyWithFallback(
+ "CEREBRAS_CODE_DISABLE_DEFAULT_PLUGINS",
+ "OPENCODE_DISABLE_DEFAULT_PLUGINS",
+ )
+ export const OPENCODE_DISABLE_CLAUDE_CODE_SKILLS = truthyWithFallback(
+ "CEREBRAS_CODE_DISABLE_CLAUDE_CODE_SKILLS",
+ "OPENCODE_DISABLE_CLAUDE_CODE_SKILLS",
+ )
+ export const OPENCODE_DISABLE_LSP_DOWNLOAD = truthyWithFallback(
+ "CEREBRAS_CODE_DISABLE_LSP_DOWNLOAD",
+ "OPENCODE_DISABLE_LSP_DOWNLOAD",
+ )
+ export const OPENCODE_ENABLE_EXPERIMENTAL_MODELS = truthyWithFallback(
+ "CEREBRAS_CODE_ENABLE_EXPERIMENTAL_MODELS",
+ "OPENCODE_ENABLE_EXPERIMENTAL_MODELS",
+ )
+ export const OPENCODE_DISABLE_AUTOCOMPACT = truthyWithFallback(
+ "CEREBRAS_CODE_DISABLE_AUTOCOMPACT",
+ "OPENCODE_DISABLE_AUTOCOMPACT",
+ )
+ export const OPENCODE_FAKE_VCS =
+ process.env["CEREBRAS_CODE_FAKE_VCS"] || process.env["OPENCODE_FAKE_VCS"]
export const OPENCODE_EXPERIMENTAL_BASH_MAX_OUTPUT_LENGTH =
+ process.env["CEREBRAS_CODE_EXPERIMENTAL_BASH_MAX_OUTPUT_LENGTH"] ||
process.env["OPENCODE_EXPERIMENTAL_BASH_MAX_OUTPUT_LENGTH"]
// Experimental
- export const OPENCODE_EXPERIMENTAL = truthy("OPENCODE_EXPERIMENTAL")
- export const OPENCODE_EXPERIMENTAL_WATCHER = OPENCODE_EXPERIMENTAL || truthy("OPENCODE_EXPERIMENTAL_WATCHER")
- export const OPENCODE_EXPERIMENTAL_DISABLE_COPY_ON_SELECT = truthy("OPENCODE_EXPERIMENTAL_DISABLE_COPY_ON_SELECT")
+ export const OPENCODE_EXPERIMENTAL = truthyWithFallback("CEREBRAS_CODE_EXPERIMENTAL", "OPENCODE_EXPERIMENTAL")
+ export const OPENCODE_EXPERIMENTAL_WATCHER =
+ OPENCODE_EXPERIMENTAL ||
+ truthyWithFallback("CEREBRAS_CODE_EXPERIMENTAL_WATCHER", "OPENCODE_EXPERIMENTAL_WATCHER")
+ export const OPENCODE_EXPERIMENTAL_DISABLE_COPY_ON_SELECT = truthyWithFallback(
+ "CEREBRAS_CODE_EXPERIMENTAL_DISABLE_COPY_ON_SELECT",
+ "OPENCODE_EXPERIMENTAL_DISABLE_COPY_ON_SELECT",
+ )
export const OPENCODE_ENABLE_EXA =
- truthy("OPENCODE_ENABLE_EXA") || OPENCODE_EXPERIMENTAL || truthy("OPENCODE_EXPERIMENTAL_EXA")
+ truthyWithFallback("CEREBRAS_CODE_ENABLE_EXA", "OPENCODE_ENABLE_EXA") ||
+ OPENCODE_EXPERIMENTAL ||
+ truthyWithFallback("CEREBRAS_CODE_EXPERIMENTAL_EXA", "OPENCODE_EXPERIMENTAL_EXA")
function truthy(key: string) {
const value = process.env[key]?.toLowerCase()
return value === "true" || value === "1"
}
+
+ function truthyWithFallback(primary: string, fallback: string) {
+ return truthy(primary) || truthy(fallback)
+ }
}
diff --git a/packages/opencode/src/global/index.ts b/packages/opencode/src/global/index.ts
index 2504a47dc5..189245c7ce 100644
--- a/packages/opencode/src/global/index.ts
+++ b/packages/opencode/src/global/index.ts
@@ -3,7 +3,7 @@ import { xdgData, xdgCache, xdgConfig, xdgState } from "xdg-basedir"
import path from "path"
import os from "os"
-const app = "opencode"
+const app = "cerebras"
const data = path.join(xdgData!, app)
const cache = path.join(xdgCache!, app)
diff --git a/packages/opencode/src/mcp/index.ts b/packages/opencode/src/mcp/index.ts
index e030f83b53..5fe0120f56 100644
--- a/packages/opencode/src/mcp/index.ts
+++ b/packages/opencode/src/mcp/index.ts
@@ -1,4 +1,4 @@
-import { type Tool } from "ai"
+import type { Tool } from "@ai-sdk/provider-utils"
import { experimental_createMCPClient } from "@ai-sdk/mcp"
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js"
import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js"
@@ -382,7 +382,8 @@ export namespace MCP {
}
export async function tools() {
- const result: Record = {}
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const result: Record> = {}
const s = await state()
const clientsSnapshot = await clients()
@@ -407,7 +408,8 @@ export namespace MCP {
for (const [toolName, tool] of Object.entries(tools)) {
const sanitizedClientName = clientName.replace(/[^a-zA-Z0-9_-]/g, "_")
const sanitizedToolName = toolName.replace(/[^a-zA-Z0-9_-]/g, "_")
- result[sanitizedClientName + "_" + sanitizedToolName] = tool
+ // Cast to avoid strict type variance issues with FlexibleSchema vs FlexibleSchema
+ result[sanitizedClientName + "_" + sanitizedToolName] = tool as Tool
}
}
return result
diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts
index 55631c6ca3..bbb53c2592 100644
--- a/packages/opencode/src/provider/provider.ts
+++ b/packages/opencode/src/provider/provider.ts
@@ -2,7 +2,8 @@ import z from "zod"
import fuzzysort from "fuzzysort"
import { Config } from "../config/config"
import { mapValues, mergeDeep, sortBy } from "remeda"
-import { NoSuchModelError, type Provider as SDK } from "ai"
+import { NoSuchModelError } from "ai"
+import type { ProviderV2 as SDK } from "@ai-sdk/provider"
import { Log } from "../util/log"
import { BunProc } from "../bun"
import { Plugin } from "../plugin"
diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts
index e71658c2fa..bdb01b6734 100644
--- a/packages/opencode/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts
+++ b/packages/opencode/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts
@@ -69,7 +69,7 @@ export function createOpenaiCompatible(options: OpenaiCompatibleProviderSettings
return new OpenAICompatibleChatLanguageModel(modelId, {
provider: `${options.name ?? "openai-compatible"}.chat`,
headers: getHeaders,
- url: ({ path }) => `${baseURL}${path}`,
+ url: ({ path }: { path: string }) => `${baseURL}${path}`,
fetch: options.fetch,
})
}
@@ -78,7 +78,7 @@ export function createOpenaiCompatible(options: OpenaiCompatibleProviderSettings
return new OpenAIResponsesLanguageModel(modelId, {
provider: `${options.name ?? "openai-compatible"}.responses`,
headers: getHeaders,
- url: ({ path }) => `${baseURL}${path}`,
+ url: ({ path }: { path: string }) => `${baseURL}${path}`,
fetch: options.fetch,
})
}
diff --git a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts
index 10a854ac06..b50bf186d8 100644
--- a/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts
+++ b/packages/opencode/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts
@@ -488,7 +488,7 @@ export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
]),
),
service_tier: z.string().nullish(),
- incomplete_details: z.object({ reason: z.union([z.string(), z.record(z.any())]) }).nullish(),
+ incomplete_details: z.object({ reason: z.union([z.string(), z.record(z.string(), z.any())]) }).nullish(),
usage: usageSchema,
}),
),
@@ -1322,7 +1322,7 @@ const errorChunkSchema = z.object({
const responseFinishedChunkSchema = z.object({
type: z.enum(["response.completed", "response.incomplete"]),
response: z.object({
- incomplete_details: z.object({ reason: z.union([z.string(), z.record(z.any())]) }).nullish(),
+ incomplete_details: z.object({ reason: z.union([z.string(), z.record(z.string(), z.any())]) }).nullish(),
usage: usageSchema,
service_tier: z.string().nullish(),
}),
diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts
index 17fbf18f5f..2f73ab467c 100644
--- a/packages/opencode/src/provider/transform.ts
+++ b/packages/opencode/src/provider/transform.ts
@@ -1,4 +1,5 @@
-import type { APICallError, ModelMessage } from "ai"
+import type { ModelMessage } from "@ai-sdk/provider-utils"
+import type { APICallError } from "@ai-sdk/provider"
import { unique } from "remeda"
import type { JSONSchema } from "zod/v4/core"
import type { Provider } from "./provider"
@@ -20,10 +21,11 @@ export namespace ProviderTransform {
return msgs.map((msg) => {
if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
msg.content = msg.content.map((part) => {
- if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
+ const p = part as { type: string; toolCallId?: string }
+ if ((p.type === "tool-call" || p.type === "tool-result") && "toolCallId" in p && p.toolCallId) {
return {
...part,
- toolCallId: part.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
+ toolCallId: p.toolCallId.replace(/[^a-zA-Z0-9_-]/g, "_"),
}
}
return part
@@ -40,9 +42,10 @@ export namespace ProviderTransform {
if ((msg.role === "assistant" || msg.role === "tool") && Array.isArray(msg.content)) {
msg.content = msg.content.map((part) => {
- if ((part.type === "tool-call" || part.type === "tool-result") && "toolCallId" in part) {
+ const p = part as { type: string; toolCallId?: string }
+ if ((p.type === "tool-call" || p.type === "tool-result") && "toolCallId" in p && p.toolCallId) {
// Mistral requires alphanumeric tool call IDs with exactly 9 characters
- const normalizedId = part.toolCallId
+ const normalizedId = p.toolCallId
.replace(/[^a-zA-Z0-9]/g, "") // Remove non-alphanumeric characters
.substring(0, 9) // Take first 9 characters
.padEnd(9, "0") // Pad with zeros if less than 9 characters
@@ -164,11 +167,12 @@ export namespace ProviderTransform {
if (msg.role !== "user" || !Array.isArray(msg.content)) return msg
const filtered = msg.content.map((part) => {
- if (part.type !== "file" && part.type !== "image") return part
+ const p = part as { type: string; image?: unknown; mediaType?: string; filename?: string }
+ if (p.type !== "file" && p.type !== "image") return part
- const mime = part.type === "image" ? part.image.toString().split(";")[0].replace("data:", "") : part.mediaType
- const filename = part.type === "file" ? part.filename : undefined
- const modality = mimeToModality(mime)
+ const mime = p.type === "image" ? String(p.image).split(";")[0].replace("data:", "") : p.mediaType
+ const filename = p.type === "file" ? p.filename : undefined
+ const modality = mimeToModality(mime ?? "")
if (!modality) return part
if (model.capabilities.input[modality]) return part
diff --git a/packages/opencode/src/session/compaction.ts b/packages/opencode/src/session/compaction.ts
index de75eda6e4..71312586b4 100644
--- a/packages/opencode/src/session/compaction.ts
+++ b/packages/opencode/src/session/compaction.ts
@@ -1,4 +1,5 @@
-import { wrapLanguageModel, type ModelMessage } from "ai"
+import { wrapLanguageModel } from "ai"
+import type { ModelMessage } from "@ai-sdk/provider-utils"
import { Session } from "."
import { Identifier } from "../id/id"
import { Instance } from "../project/instance"
@@ -132,7 +133,7 @@ export namespace SessionCompaction {
abort: input.abort,
})
const result = await processor.process({
- onError(error) {
+ onError(error: unknown) {
log.error("stream error", {
error,
})
@@ -183,10 +184,9 @@ export namespace SessionCompaction {
model: language,
middleware: [
{
- async transformParams(args) {
+ async transformParams(args: { type: string; params: { prompt?: ModelMessage[] } }) {
if (args.type === "stream") {
- // @ts-expect-error
- args.params.prompt = ProviderTransform.message(args.params.prompt, model)
+ args.params.prompt = ProviderTransform.message(args.params.prompt as ModelMessage[], model)
}
return args.params
},
diff --git a/packages/opencode/src/session/index.ts b/packages/opencode/src/session/index.ts
index b8b7af742e..4fe18215ca 100644
--- a/packages/opencode/src/session/index.ts
+++ b/packages/opencode/src/session/index.ts
@@ -1,6 +1,6 @@
import { Decimal } from "decimal.js"
import z from "zod"
-import { type LanguageModelUsage, type ProviderMetadata } from "ai"
+import type { LanguageModelV2Usage as LanguageModelUsage, SharedV2ProviderMetadata as ProviderMetadata } from "@ai-sdk/provider"
import { Bus } from "../bus"
import { Config } from "../config/config"
import { Flag } from "../flag/flag"
@@ -415,8 +415,7 @@ export namespace Session {
cache: {
write: safe(
(input.metadata?.["anthropic"]?.["cacheCreationInputTokens"] ??
- // @ts-expect-error
- input.metadata?.["bedrock"]?.["usage"]?.["cacheWriteInputTokens"] ??
+ (input.metadata?.["bedrock"] as Record)?.["usage"]?.["cacheWriteInputTokens"] ??
0) as number,
),
read: safe(cachedInputTokens),
diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts
index 50a480626e..512ef3d5dd 100644
--- a/packages/opencode/src/session/message-v2.ts
+++ b/packages/opencode/src/session/message-v2.ts
@@ -2,7 +2,15 @@ import z from "zod"
import { Bus } from "../bus"
import { NamedError } from "@opencode-ai/util/error"
import { Message } from "./message"
-import { APICallError, convertToModelMessages, LoadAPIKeyError, type ModelMessage, type UIMessage } from "ai"
+import { APICallError, convertToModelMessages, LoadAPIKeyError } from "ai"
+import type { ModelMessage } from "@ai-sdk/provider-utils"
+
+// Define UIMessage interface locally to avoid tsgo's namespace interpretation
+interface UIMessage {
+ id: string
+ role: "user" | "assistant"
+ parts: unknown[]
+}
import { Identifier } from "../id/id"
import { LSP } from "../lsp"
import { Snapshot } from "@/snapshot"
@@ -581,61 +589,65 @@ export namespace MessageV2 {
}
export function fromError(e: unknown, ctx: { providerID: string }) {
- switch (true) {
- case e instanceof DOMException && e.name === "AbortError":
- return new MessageV2.AbortedError(
- { message: e.message },
- {
- cause: e,
- },
- ).toObject()
- case MessageV2.OutputLengthError.isInstance(e):
- return e
- case LoadAPIKeyError.isInstance(e):
- return new MessageV2.AuthError(
- {
- providerID: ctx.providerID,
- message: e.message,
- },
- { cause: e },
- ).toObject()
- case APICallError.isInstance(e):
- const message = iife(() => {
- let msg = e.message
- const transformed = ProviderTransform.error(ctx.providerID, e)
- if (transformed !== msg) {
- return transformed
- }
- if (!e.responseBody || (e.statusCode && msg !== STATUS_CODES[e.statusCode])) {
- return msg
+ if (e instanceof DOMException && e.name === "AbortError") {
+ return new MessageV2.AbortedError(
+ { message: e.message },
+ {
+ cause: e,
+ },
+ ).toObject()
+ }
+ if (MessageV2.OutputLengthError.isInstance(e)) {
+ return e
+ }
+ if (LoadAPIKeyError.isInstance(e)) {
+ const err = e as InstanceType
+ return new MessageV2.AuthError(
+ {
+ providerID: ctx.providerID,
+ message: err.message,
+ },
+ { cause: err },
+ ).toObject()
+ }
+ if (APICallError.isInstance(e)) {
+ const err = e as InstanceType
+ const message = iife(() => {
+ let msg = err.message
+ const transformed = ProviderTransform.error(ctx.providerID, err)
+ if (transformed !== msg) {
+ return transformed
+ }
+ if (!err.responseBody || (err.statusCode && msg !== STATUS_CODES[err.statusCode])) {
+ return msg
+ }
+
+ try {
+ const body = JSON.parse(err.responseBody)
+ // try to extract common error message fields
+ const errMsg = body.message || body.error || body.error?.message
+ if (errMsg && typeof errMsg === "string") {
+ return `${msg}: ${errMsg}`
}
+ } catch {}
- try {
- const body = JSON.parse(e.responseBody)
- // try to extract common error message fields
- const errMsg = body.message || body.error || body.error?.message
- if (errMsg && typeof errMsg === "string") {
- return `${msg}: ${errMsg}`
- }
- } catch {}
-
- return `${msg}: ${e.responseBody}`
- })
-
- return new MessageV2.APIError(
- {
- message,
- statusCode: e.statusCode,
- isRetryable: e.isRetryable,
- responseHeaders: e.responseHeaders,
- responseBody: e.responseBody,
- },
- { cause: e },
- ).toObject()
- case e instanceof Error:
- return new NamedError.Unknown({ message: e.toString() }, { cause: e }).toObject()
- default:
- return new NamedError.Unknown({ message: JSON.stringify(e) }, { cause: e })
+ return `${msg}: ${err.responseBody}`
+ })
+
+ return new MessageV2.APIError(
+ {
+ message,
+ statusCode: err.statusCode,
+ isRetryable: err.isRetryable,
+ responseHeaders: err.responseHeaders,
+ responseBody: err.responseBody,
+ },
+ { cause: err },
+ ).toObject()
+ }
+ if (e instanceof Error) {
+ return new NamedError.Unknown({ message: e.toString() }, { cause: e }).toObject()
}
+ return new NamedError.Unknown({ message: JSON.stringify(e) }, { cause: e })
}
}
diff --git a/packages/opencode/src/session/processor.ts b/packages/opencode/src/session/processor.ts
index 5eb6cdb83e..5ed306ab08 100644
--- a/packages/opencode/src/session/processor.ts
+++ b/packages/opencode/src/session/processor.ts
@@ -401,7 +401,7 @@ export namespace SessionProcessor {
const delay = SessionRetry.delay(attempt, error.name === "APIError" ? error : undefined)
if (delay !== undefined) {
const seconds = Math.max(1, Math.ceil(delay / 1000))
- const message = `Rate limit hit, retrying in ${seconds}s`
+ const message = `${retry}, retrying in ${seconds}s (attempt ${attempt})`
SessionStatus.set(input.sessionID, {
type: "retry",
attempt,
diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts
index 6b8fcf0fd9..2228f59e89 100644
--- a/packages/opencode/src/session/prompt.ts
+++ b/packages/opencode/src/session/prompt.ts
@@ -12,13 +12,12 @@ import { Provider } from "../provider/provider"
import {
generateText,
generateObject,
- type ModelMessage,
- type Tool as AITool,
tool,
wrapLanguageModel,
stepCountIs,
jsonSchema,
} from "ai"
+import type { ModelMessage, Tool as AITool } from "@ai-sdk/provider-utils"
import { SessionCompaction } from "./compaction"
import { Instance } from "../project/instance"
import { Bus } from "../bus"
@@ -580,12 +579,15 @@ export namespace SessionPrompt {
}
const result = await processor.process({
- onError(error) {
+ onError(error: unknown) {
log.error("stream error", {
error,
})
},
- async experimental_repairToolCall(input) {
+ async experimental_repairToolCall(input: {
+ toolCall: { toolName: string; input: string }
+ error: Error
+ }) {
const lower = input.toolCall.toolName.toLowerCase()
if (lower !== input.toolCall.toolName && tools[lower]) {
log.info("repairing tool call", {
@@ -690,20 +692,20 @@ export namespace SessionPrompt {
model: language,
middleware: [
{
- async transformParams(args) {
+ async transformParams(args: { type: string; params: { prompt?: ModelMessage[]; tools?: unknown[] } }) {
if (args.type === "stream") {
- // @ts-expect-error - prompt types are compatible at runtime
- args.params.prompt = ProviderTransform.message(args.params.prompt, model)
+ args.params.prompt = ProviderTransform.message(args.params.prompt as ModelMessage[], model)
}
// Transform tool schemas for provider compatibility
if (args.params.tools && Array.isArray(args.params.tools)) {
- args.params.tools = args.params.tools.map((tool: any) => {
+ args.params.tools = args.params.tools.map((t: unknown) => {
+ const tool = t as { inputSchema?: object }
// Tools at middleware level have inputSchema, not parameters
if (tool.inputSchema && typeof tool.inputSchema === "object") {
// Transform the inputSchema for provider compatibility
return {
...tool,
- inputSchema: ProviderTransform.schema(model, tool.inputSchema),
+ inputSchema: ProviderTransform.schema(model, tool.inputSchema as Record),
}
}
// If no inputSchema, return tool unchanged
@@ -793,7 +795,7 @@ export namespace SessionPrompt {
id: item.id as any,
description: item.description,
inputSchema: jsonSchema(schema as any),
- async execute(args, options) {
+ async execute(args: unknown, options: { toolCallId: string; abortSignal?: AbortSignal }) {
await Plugin.trigger(
"tool.execute.before",
{
@@ -821,7 +823,7 @@ export namespace SessionPrompt {
title: val.title,
metadata: val.metadata,
status: "running",
- input: args,
+ input: args as Record,
time: {
start: Date.now(),
},
@@ -841,7 +843,7 @@ export namespace SessionPrompt {
)
return result
},
- toModelOutput(result) {
+ toModelOutput(result: { output: string }) {
return {
type: "text",
value: result.output,
@@ -856,7 +858,7 @@ export namespace SessionPrompt {
if (!execute) continue
// Wrap execute to add plugin hooks and format output
- item.execute = async (args, opts) => {
+ item.execute = async (args: unknown, opts: { toolCallId: string; messages: ModelMessage[] }) => {
await Plugin.trigger(
"tool.execute.before",
{
@@ -907,7 +909,7 @@ export namespace SessionPrompt {
content: result.content, // directly return content to preserve ordering when outputting to model
}
}
- item.toModelOutput = (result) => {
+ item.toModelOutput = (result: { output: string }) => {
return {
type: "text",
value: result.output,
@@ -1604,21 +1606,21 @@ export namespace SessionPrompt {
model: language,
experimental_telemetry: { isEnabled: cfg.experimental?.openTelemetry },
})
- .then((result) => {
+ .then((result: { text: string }) => {
if (result.text)
return Session.update(input.session.id, (draft) => {
const cleaned = result.text
.replace(/[\s\S]*?<\/think>\s*/g, "")
.split("\n")
- .map((line) => line.trim())
- .find((line) => line.length > 0)
+ .map((line: string) => line.trim())
+ .find((line: string) => line.length > 0)
if (!cleaned) return
const title = cleaned.length > 100 ? cleaned.substring(0, 97) + "..." : cleaned
draft.title = title
})
})
- .catch((error) => {
+ .catch((error: unknown) => {
log.error("failed to generate title", { error, model: small.id })
})
}
diff --git a/packages/opencode/src/session/retry.ts b/packages/opencode/src/session/retry.ts
index 6e2a0bbe46..8d606fc579 100644
--- a/packages/opencode/src/session/retry.ts
+++ b/packages/opencode/src/session/retry.ts
@@ -2,9 +2,8 @@ import type { NamedError } from "@opencode-ai/util/error"
import { MessageV2 } from "./message-v2"
export namespace SessionRetry {
- // Hand-tuned backoff schedule (ms): 10s, 10s, 10s, 15s, 15s; after that, stop retrying
- const BACKOFF_SCHEDULE = [10_000, 10_000, 10_000, 15_000, 15_000]
- export const RETRY_MAX_DELAY = 60_000 // absolute cap
+ export const RETRY_MAX_DELAY = 60_000 // absolute cap per retry (ms)
+ const BASE_DELAY = 1_000 // 1s starting point for exponential backoff
export async function sleep(ms: number, signal: AbortSignal): Promise {
return new Promise((resolve, reject) => {
@@ -20,43 +19,55 @@ export namespace SessionRetry {
})
}
- export function delay(attempt: number, error?: MessageV2.APIError) {
- if (attempt > BACKOFF_SCHEDULE.length) {
- return undefined
+ function msUntilNextHour(): number {
+ const now = new Date()
+ return (60 - now.getMinutes()) * 60_000 - now.getSeconds() * 1_000 - now.getMilliseconds()
+ }
+
+ function serverDelay(error: MessageV2.APIError): number | undefined {
+ const headers = error.data.responseHeaders
+ if (!headers) return undefined
+
+ const retryAfterMs = headers["retry-after-ms"]
+ if (retryAfterMs) {
+ const parsed = Number.parseFloat(retryAfterMs)
+ if (!Number.isNaN(parsed) && parsed > 0) return parsed
}
- const idx = Math.min(attempt - 1, BACKOFF_SCHEDULE.length - 1)
- const baseDelay = BACKOFF_SCHEDULE[idx]
- if (error) {
- const headers = error.data.responseHeaders
- if (headers) {
- const retryAfterMs = headers["retry-after-ms"]
- if (retryAfterMs) {
- const parsedMs = Number.parseFloat(retryAfterMs)
- if (!Number.isNaN(parsedMs)) {
- return Math.min(parsedMs, baseDelay, RETRY_MAX_DELAY)
- }
- }
+ const retryAfter = headers["retry-after"]
+ if (retryAfter) {
+ const parsedSeconds = Number.parseFloat(retryAfter)
+ if (!Number.isNaN(parsedSeconds) && parsedSeconds > 0) {
+ return Math.ceil(parsedSeconds * 1_000)
+ }
+ const parsedDate = Date.parse(retryAfter) - Date.now()
+ if (!Number.isNaN(parsedDate) && parsedDate > 0) {
+ return Math.ceil(parsedDate)
+ }
+ }
- const retryAfter = headers["retry-after"]
- if (retryAfter) {
- const parsedSeconds = Number.parseFloat(retryAfter)
- if (!Number.isNaN(parsedSeconds)) {
- // convert seconds to milliseconds
- return Math.min(Math.ceil(parsedSeconds * 1000), baseDelay, RETRY_MAX_DELAY)
- }
- // Try parsing as HTTP date format
- const parsed = Date.parse(retryAfter) - Date.now()
- if (!Number.isNaN(parsed) && parsed > 0) {
- return Math.min(Math.ceil(parsed), baseDelay, RETRY_MAX_DELAY)
- }
- }
+ return undefined
+ }
+
+ export function delay(attempt: number, error?: MessageV2.APIError): number | undefined {
+ // Estimate cumulative wait so far: geometric series BASE_DELAY * (2^(attempt) - 2)
+ const cumulativeEstimate = BASE_DELAY * (Math.pow(2, attempt) - 2)
+ if (cumulativeEstimate >= msUntilNextHour()) return undefined
+
+ // Exponential backoff with jitter (±25%)
+ const exponential = BASE_DELAY * Math.pow(2, attempt)
+ const jitter = 0.75 + Math.random() * 0.5
+ let computed = Math.min(Math.round(exponential * jitter), RETRY_MAX_DELAY)
- return Math.min(baseDelay, RETRY_MAX_DELAY)
+ // Prefer server guidance when it asks for longer than our calculation
+ if (error) {
+ const server = serverDelay(error)
+ if (server !== undefined) {
+ computed = Math.min(Math.max(computed, server), RETRY_MAX_DELAY)
}
}
- return baseDelay
+ return computed
}
export function retryable(error: ReturnType) {
@@ -66,14 +77,28 @@ export namespace SessionRetry {
}
if (typeof error.data?.message === "string") {
+ const msg = error.data.message
+
+ // Transient network/server errors worth retrying
+ if (msg.includes("ECONNRESET") || msg.includes("ETIMEDOUT") || msg.includes("ENOTFOUND")) {
+ return "Network error"
+ }
+ if (msg.includes("socket hang up") || msg.includes("fetch failed")) {
+ return "Connection lost"
+ }
+
try {
- const json = JSON.parse(error.data.message)
+ const json = JSON.parse(msg)
if (json.type === "error" && json.error?.type === "too_many_requests") {
return "Too Many Requests"
}
if (json.code === "Some resource has been exhausted") {
return "Provider is overloaded"
}
+ const status = json.status ?? json.statusCode
+ if (status === 500 || status === 502 || status === 503) {
+ return "Server error"
+ }
} catch {}
}
diff --git a/packages/opencode/src/session/summary.ts b/packages/opencode/src/session/summary.ts
index ba0a1a00c6..28260f4d85 100644
--- a/packages/opencode/src/session/summary.ts
+++ b/packages/opencode/src/session/summary.ts
@@ -3,7 +3,8 @@ import { Config } from "@/config/config"
import { fn } from "@/util/fn"
import z from "zod"
import { Session } from "."
-import { generateText, type ModelMessage } from "ai"
+import { generateText } from "ai"
+import type { ModelMessage } from "@ai-sdk/provider-utils"
import { MessageV2 } from "./message-v2"
import { Identifier } from "@/id/id"
import { Snapshot } from "@/snapshot"
diff --git a/packages/opencode/src/skill/skill.ts b/packages/opencode/src/skill/skill.ts
index ec2065077c..f8ee934f30 100644
--- a/packages/opencode/src/skill/skill.ts
+++ b/packages/opencode/src/skill/skill.ts
@@ -112,7 +112,7 @@ export namespace Skill {
}
}
- // Scan .opencode/skill/ directories
+ // Scan .cerebras/skill/ and .opencode/skill/ directories
for (const dir of await Config.directories()) {
for await (const match of OPENCODE_SKILL_GLOB.scan({
cwd: dir,