Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
0cc8e67
feat: auto-detect local Ollama models
Mar 8, 2026
84cc792
Merge branch 'dev' into dev
koryboyd Mar 9, 2026
3ffa090
feat(ollama): add reasoning model detection with config overrides
Mar 9, 2026
048d8c0
Merge branch 'dev' into dev
koryboyd Mar 9, 2026
034ada2
fix(ollama): fix type errors - use correct config model properties
Mar 9, 2026
d73e696
fix(ollama): generate variants for Ollama reasoning models
Mar 9, 2026
47ade4e
fix(ollama): apply reasoning detection to config-loaded Ollama models
Mar 9, 2026
b578e75
fix(ollama): add fallback to languageModel for OpenAI-compatible SDK
Mar 9, 2026
5caf077
fix(ollama): only enable think parameter for models that support it
Mar 9, 2026
ab17a98
Revert "fix(ollama): only enable think parameter for models that supp…
Mar 9, 2026
35e7c14
Revert "fix(ollama): add fallback to languageModel for OpenAI-compati…
Mar 9, 2026
5f941cf
Revert "fix(ollama): apply reasoning detection to config-loaded Ollam…
Mar 9, 2026
a807e2f
Revert "fix(ollama): generate variants for Ollama reasoning models"
Mar 9, 2026
351eb59
Revert "fix(ollama): fix type errors - use correct config model prope…
Mar 9, 2026
0f40a61
Revert "feat(ollama): add reasoning model detection with config overr…
Mar 9, 2026
2b18b4e
Revert "feat: auto-detect local Ollama models"
Mar 9, 2026
b1271d0
feat: add multi-model parallel execution and Ollama auto-detection
Mar 9, 2026
33ecf76
fix: improve Ollama model detection and ID generation
Mar 9, 2026
bd1c0b5
fix: force-add Ollama provider since it doesn't require API key
Mar 9, 2026
61c4809
fix: force-add Ollama provider after mergeProvider is defined
Mar 9, 2026
007c6bc
debug: add logging to trace Ollama provider loading
Mar 9, 2026
87940ed
fix: directly assign Ollama provider to skip merging issues
Mar 9, 2026
9805a40
fix: reuse localModels variable and add count to log
Mar 9, 2026
a0c2509
fix: move Ollama force-add to end of provider processing
Mar 9, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 37 additions & 7 deletions packages/opencode/src/agent/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,17 @@ import { Plugin } from "@/plugin"
import { Skill } from "../skill"

export namespace Agent {
export const ModelConfig = z.object({
modelID: z.string(),
providerID: z.string(),
role: z.enum(["primary", "reasoning", "coding", "assistant"]).default("assistant"),
})

export const LegacyModelConfig = z.object({
modelID: z.string(),
providerID: z.string(),
})

export const Info = z
.object({
name: z.string(),
Expand All @@ -32,21 +43,23 @@ export namespace Agent {
temperature: z.number().optional(),
color: z.string().optional(),
permission: PermissionNext.Ruleset,
model: z
.object({
modelID: z.string(),
providerID: z.string(),
})
.optional(),
model: LegacyModelConfig.optional(),
models: z.array(ModelConfig).optional(),
variant: z.string().optional(),
prompt: z.string().optional(),
options: z.record(z.string(), z.any()),
steps: z.number().int().positive().optional(),
multiAgent: z.object({
enabled: z.boolean().default(true),
parallel: z.boolean().default(true),
mergeStrategy: z.enum(["primary-wins", "reasoning-wins", "all-responses"]).default("all-responses"),
}).optional(),
})
.meta({
ref: "Agent",
})
export type Info = z.infer<typeof Info>
export type ModelConfig = z.infer<typeof ModelConfig>

const state = Instance.state(async () => {
const cfg = await Config.get()
Expand Down Expand Up @@ -216,7 +229,17 @@ export namespace Agent {
options: {},
native: false,
}
if (value.model) item.model = Provider.parseModel(value.model)
if (value.model) item.model = LegacyModelConfig.parse(Provider.parseModel(value.model))
if (value.models) {
item.models = value.models.map((m: any) => {
const parsed = Provider.parseModel(m.model)
return {
providerID: parsed.providerID,
modelID: parsed.modelID,
role: m.role ?? "assistant",
}
})
}
item.variant = value.variant ?? item.variant
item.prompt = value.prompt ?? item.prompt
item.description = value.description ?? item.description
Expand All @@ -229,6 +252,13 @@ export namespace Agent {
item.steps = value.steps ?? item.steps
item.options = mergeDeep(item.options, value.options ?? {})
item.permission = PermissionNext.merge(item.permission, PermissionNext.fromConfig(value.permission ?? {}))
if (value.multiAgent) {
item.multiAgent = {
enabled: value.multiAgent.enabled ?? true,
parallel: value.multiAgent.parallel ?? true,
mergeStrategy: value.multiAgent.mergeStrategy ?? "all-responses",
}
}
}

// Ensure Truncate.GLOB is allowed unless explicitly configured
Expand Down
222 changes: 222 additions & 0 deletions packages/opencode/src/provider/ollama-autodetect.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,222 @@
import { Log } from "../util/log"
import { Config } from "../config/config"
import type { Provider } from "../provider/provider"

const log = Log.create({ service: "ollama-autodetect" })

export interface OllamaModel {
name: string
model: string
modified_at: string
size: number
digest: string
details: {
parent_model: string
format: string
family: string
families: string[]
parameter_size: string
quantization_level: string
}
}

export interface OllamaTagsResponse {
models: OllamaModel[]
}

const REASONING_FAMILIES = [
"deepseek2",
"qwen3",
"gptoss",
"r1",
]

const CODING_FAMILIES = [
"qwen3moe",
"coder",
]

function detectModelCapabilities(model: OllamaModel): {
reasoning: boolean
coding: boolean
supportsImages: boolean
} {
const family = model.details.family?.toLowerCase() ?? ""
const families = model.details.families?.map(f => f.toLowerCase()) ?? []
const name = model.name.toLowerCase()

const isReasoning =
REASONING_FAMILIES.some(f => family.includes(f) || families.some(fam => fam.includes(f))) ||
name.includes("r1") ||
name.includes("reasoning") ||
name.includes("think")

const isCoding =
CODING_FAMILIES.some(f => family.includes(f) || families.some(fam => fam.includes(f))) ||
name.includes("coder") ||
name.includes("code")

const supportsImages =
family.includes("llama") ||
family.includes("gemma") ||
family.includes("qwen") ||
name.includes("vision") ||
name.includes("vl")

return {
reasoning: isReasoning,
coding: isCoding,
supportsImages,
}
}

export async function detectLocalOllamaModels(baseURL?: string): Promise<OllamaModel[]> {
try {
const url = baseURL ?? "http://localhost:11434"
const timeout = 5000

const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), timeout)

const response = await fetch(`${url}/api/tags`, {
signal: controller.signal,
})

clearTimeout(timeoutId)

if (!response.ok) {
log.warn("Ollama API returned non-ok status", { status: response.status })
return []
}

const data = (await response.json()) as OllamaTagsResponse
log.info("Detected Ollama models", { count: data.models.length })
return data.models
} catch (error) {
if (error instanceof Error && error.name === "AbortError") {
log.warn("Ollama detection timed out")
} else {
log.warn("Failed to detect Ollama models", { error })
}
return []
}
}

export async function registerOllamaModels() {
const models = await detectLocalOllamaModels()

if (models.length === 0) {
return
}

const cfg = await Config.get()
const ollamaModels: Record<string, any> = {}

for (const model of models) {
const capabilities = detectModelCapabilities(model)
const modelName = model.name.replace(/:/g, "-").replace(/\//g, "-")

ollamaModels[modelName] = {
id: model.name,
name: model.name,
family: model.details.family,
reasoning: capabilities.reasoning,
attachment: capabilities.supportsImages,
temperature: true,
tool_call: true,
interleaved: capabilities.reasoning,
cost: {
input: 0,
output: 0,
},
limit: {
context: 128000,
output: 8192,
},
modalities: {
input: capabilities.supportsImages
? ["text", "image"]
: ["text"],
output: ["text"],
},
options: {
model: model.name,
},
}

log.info("Registered Ollama model", {
name: model.name,
reasoning: capabilities.reasoning,
coding: capabilities.coding,
})
}

const existingProvider = cfg.provider?.["ollama"] ?? {}

return {
provider: {
...existingProvider,
name: existingProvider.name ?? "Ollama (Local)",
api: existingProvider.api ?? "http://localhost:11434/v1",
npm: existingProvider.npm ?? "@ai-sdk/openai-compatible",
models: ollamaModels,
}
}
}

export function convertOllamaModelToModel(model: OllamaModel): Provider.Model {
const capabilities = detectModelCapabilities(model)
// Use full model name with colons replaced, but ensure uniqueness
const modelId = model.name.replace(/\//g, "-").replace(/:/g, "--")

return {
id: modelId,
providerID: "ollama",
name: model.name,
family: model.details.family,
api: {
id: model.name,
url: "http://localhost:11434/v1",
npm: "@ai-sdk/openai-compatible",
},
capabilities: {
temperature: true,
reasoning: capabilities.reasoning,
attachment: capabilities.supportsImages,
toolcall: true,
input: {
text: true,
audio: false,
image: capabilities.supportsImages,
video: false,
pdf: false,
},
output: {
text: true,
audio: false,
image: false,
video: false,
pdf: false,
},
interleaved: capabilities.reasoning,
},
cost: {
input: 0,
output: 0,
cache: {
read: 0,
write: 0,
},
},
limit: {
context: 128000,
output: 8192,
},
status: "active",
options: {
model: model.name,
},
headers: {},
release_date: model.modified_at,
}
}
49 changes: 49 additions & 0 deletions packages/opencode/src/provider/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -610,6 +610,21 @@ export namespace Provider {
},
}
},
async ollama(_input) {
const { registerOllamaModels } = await import("./ollama-autodetect")
const detected = await registerOllamaModels()

if (!detected?.provider?.models || Object.keys(detected.provider.models).length === 0) {
return { autoload: false }
}

return {
autoload: true,
options: {
baseURL: detected.provider.api ?? "http://localhost:11434/v1",
},
}
},
}

export const Model = z
Expand Down Expand Up @@ -782,6 +797,34 @@ export namespace Provider {
const modelsDev = await ModelsDev.get()
const database = mapValues(modelsDev, fromModelsDevProvider)

// Auto-detect local Ollama models and add to database
const { detectLocalOllamaModels, convertOllamaModelToModel } = await import("./ollama-autodetect")
const localModels = await detectLocalOllamaModels()
if (localModels.length > 0) {
// Merge with existing ollama provider if it exists
const existingOllama = database["ollama"]
const ollamaProvider: Info = existingOllama
? { ...existingOllama, models: { ...existingOllama.models } }
: {
id: "ollama",
source: "custom",
name: "Ollama (Local)",
env: [],
options: {
baseURL: "http://localhost:11434/v1",
},
models: {},
}

for (const model of localModels) {
const modelInfo = convertOllamaModelToModel(model)
ollamaProvider.models[modelInfo.id] = modelInfo
log.info("Added local Ollama model", { name: model.name, id: modelInfo.id, count: localModels.length })
}

database["ollama"] = ollamaProvider
}

const disabled = new Set(config.disabled_providers ?? [])
const enabled = config.enabled_providers ? new Set(config.enabled_providers) : null

Expand Down Expand Up @@ -1048,6 +1091,12 @@ export namespace Provider {
log.info("found", { providerID })
}

// Force-add ollama provider at the very end, after all processing/filtering
if (localModels && localModels.length > 0 && database["ollama"]) {
providers["ollama"] = database["ollama"]
log.info("Ollama provider force-added at end", { modelsCount: Object.keys(providers["ollama"]?.models ?? {}).length })
}

return {
models: languages,
providers,
Expand Down
Loading
Loading