diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 141f6156985..46eac37579c 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -950,6 +950,13 @@ export namespace Config { }), ) .optional(), + shouldFetchModels: z + .boolean() + .default(true) + .optional() + .describe( + "Dynamically fetch available models from the provider's OpenAI-compatible /models endpoint at startup. Defaults to true. Fetched models are merged with manually configured ones (manual config takes precedence). Set to false to disable.", + ), options: z .object({ apiKey: z.string().optional(), diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 022ec316795..6f65eab191d 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -754,6 +754,129 @@ export namespace Provider { } } + const DEFAULT_CONTEXT = 128000 + const DEFAULT_OUTPUT = 32000 + + function emptyModel(providerID: string, id: string, npm: string, baseURL: string): Model { + return { + id, + providerID, + name: id, + api: { id, npm, url: baseURL }, + status: "active", + family: "", + release_date: "", + headers: {}, + options: {}, + cost: { input: 0, output: 0, cache: { read: 0, write: 0 } }, + limit: { context: DEFAULT_CONTEXT, output: DEFAULT_OUTPUT }, + capabilities: { + temperature: true, + reasoning: false, + attachment: false, + toolcall: true, + input: { text: true, audio: false, image: false, video: false, pdf: false }, + output: { text: true, audio: false, image: false, video: false, pdf: false }, + interleaved: false, + }, + variants: {}, + } + } + + async function fetchModelInfo(providerID: string, baseURL: string, npm: string, headers: Record) { + const response = await fetch(`${baseURL}/model/info`, { headers, signal: AbortSignal.timeout(10_000) }).catch( + () => undefined, + ) + if (!response?.ok) return undefined + + const body = (await response.json()) as { + data?: Array<{ + model_name: string + model_info?: { + max_input_tokens?: number | null + max_output_tokens?: number | null + max_tokens?: number | null + input_cost_per_token?: number | null + output_cost_per_token?: number | null + supports_vision?: boolean | null + supports_function_calling?: boolean | null + supports_reasoning?: boolean | null + supports_pdf_input?: boolean | null + } + }> + } + const items = body.data ?? [] + if (items.length === 0) return undefined + + const models: Record = {} + for (const item of items) { + if (!item.model_name || models[item.model_name]) continue + const info = item.model_info ?? {} + const vision = info.supports_vision === true + const model = emptyModel(providerID, item.model_name, npm, baseURL) + model.limit = { + context: info.max_input_tokens ?? DEFAULT_CONTEXT, + output: info.max_output_tokens ?? info.max_tokens ?? DEFAULT_OUTPUT, + } + model.cost = { + input: info.input_cost_per_token ?? 0, + output: info.output_cost_per_token ?? 0, + cache: { read: 0, write: 0 }, + } + model.capabilities = { + ...model.capabilities, + reasoning: info.supports_reasoning === true, + toolcall: info.supports_function_calling !== false, + attachment: vision, + input: { ...model.capabilities.input, image: vision, pdf: info.supports_pdf_input === true }, + } + models[item.model_name] = model + } + + log.info("fetchModels: fetched from /model/info", { providerID, count: Object.keys(models).length }) + return models + } + + async function fetchModelList(providerID: string, baseURL: string, npm: string, headers: Record) { + const response = await fetch(`${baseURL}/models`, { headers, signal: AbortSignal.timeout(10_000) }).catch( + (e: unknown) => { + log.warn("fetchModels: error fetching /models", { providerID, error: e }) + return undefined + }, + ) + if (!response?.ok) { + if (response) log.warn("fetchModels: failed to fetch /models", { providerID, status: response.status }) + return {} + } + + const body = (await response.json()) as { data?: Array<{ id: string }> } + const models: Record = {} + for (const item of body.data ?? []) { + if (!item.id) continue + models[item.id] = emptyModel(providerID, item.id, npm, baseURL) + } + + log.info("fetchModels: fetched from /models", { providerID, count: Object.keys(models).length }) + return models + } + + async function fetchModels(providerID: string, options: Record) { + const baseURL = options["baseURL"]?.replace(/\/+$/, "") + if (!baseURL) { + log.warn("fetchModels: no baseURL for provider", { providerID }) + return {} as Record + } + + const npm = options["npm"] ?? "@ai-sdk/openai-compatible" + const headers: Record = { Accept: "application/json" } + if (options["apiKey"]) headers["Authorization"] = `Bearer ${options["apiKey"]}` + + // try LiteLLM /model/info first (has limits, costs, capabilities), fall back to /models + const rich = await fetchModelInfo(providerID, baseURL, npm, headers) + if (rich && Object.keys(rich).length > 0) return rich + return fetchModelList(providerID, baseURL, npm, headers) + } + const state = Instance.state(async () => { using _ = log.time("state") const config = await Config.get() @@ -1026,6 +1149,41 @@ export namespace Provider { log.info("found", { providerID }) } + // fetch models dynamically in background for providers with shouldFetchModels enabled + const fetchTargets = configProviders.filter(([, p]) => p.shouldFetchModels !== false) + if (fetchTargets.length > 0) { + Promise.all( + fetchTargets.map(async ([providerID, provider]) => { + const info = database[providerID] + if (!info) return + const options = info.options ?? provider.options ?? {} + const npm = provider.npm ?? info.models[Object.keys(info.models)[0]]?.api.npm + log.info("fetchModels: fetching in background", { providerID }) + const fetched = await fetchModels(providerID, { ...options, npm }) + if (Object.keys(fetched).length === 0) return + + const configProvider = config.provider?.[providerID] + + // merge fetched models as base, existing manual models override + const existing = providers[providerID] + if (!existing) return + for (const [modelID, model] of Object.entries(fetched)) { + if (existing.models[modelID]) continue + if (model.status === "deprecated") continue + if (configProvider?.blacklist?.includes(modelID)) continue + if (configProvider?.whitelist && !configProvider.whitelist.includes(modelID)) continue + model.variants = mapValues(ProviderTransform.variants(model), (v) => v) + existing.models[modelID] = model + } + + log.info("fetchModels: background fetch complete", { + providerID, + count: Object.keys(fetched).length, + }) + }), + ).catch((e) => log.warn("fetchModels: background fetch failed", { error: e })) + } + return { models: languages, providers, diff --git a/packages/opencode/test/provider/fetch-models.test.ts b/packages/opencode/test/provider/fetch-models.test.ts new file mode 100644 index 00000000000..acb2064b378 --- /dev/null +++ b/packages/opencode/test/provider/fetch-models.test.ts @@ -0,0 +1,185 @@ +import { describe, expect, test } from "bun:test" +import path from "path" +import { tmpdir } from "../fixture/fixture" +import { Instance } from "../../src/project/instance" +import { Provider } from "../../src/provider/provider" + +describe("provider.shouldFetchModels", () => { + test("shouldFetchModels: false prevents API calls to /models", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: ["test-provider"], + provider: { + "test-provider": { + name: "Test Provider", + shouldFetchModels: false, + npm: "@ai-sdk/openai-compatible", + api: "https://api.test.com/v1", + models: { + "test-model": { + name: "Test Model", + }, + }, + options: { + apiKey: "test-key", + baseURL: "https://api.test.com/v1", + }, + }, + }, + }), + ) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + expect(providers["test-provider"]).toBeDefined() + expect(providers["test-provider"].models["test-model"]).toBeDefined() + }, + }) + }) + + test("manual models are available when shouldFetchModels is false", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: ["custom-ai"], + provider: { + "custom-ai": { + name: "Custom AI", + shouldFetchModels: false, + npm: "@ai-sdk/openai-compatible", + api: "https://custom.ai/v1", + models: { + "model-a": { + name: "Model A", + limit: { context: 128000, output: 4096 }, + }, + "model-b": { + name: "Model B", + limit: { context: 64000, output: 2048 }, + }, + }, + options: { + apiKey: "custom-key", + baseURL: "https://custom.ai/v1", + }, + }, + }, + }), + ) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const provider = providers["custom-ai"] + expect(provider).toBeDefined() + expect(Object.keys(provider.models)).toHaveLength(2) + expect(provider.models["model-a"].name).toBe("Model A") + expect(provider.models["model-b"].name).toBe("Model B") + }, + }) + }) + + test("blacklist filters manual models", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: ["test-provider"], + provider: { + "test-provider": { + name: "Test Provider", + shouldFetchModels: false, + npm: "@ai-sdk/openai-compatible", + api: "https://api.test.com/v1", + blacklist: ["blocked-model"], + models: { + "allowed-model": { + name: "Allowed Model", + }, + "blocked-model": { + name: "Blocked Model", + }, + }, + options: { + apiKey: "test-key", + baseURL: "https://api.test.com/v1", + }, + }, + }, + }), + ) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const models = providers["test-provider"].models + expect(models["allowed-model"]).toBeDefined() + expect(models["blocked-model"]).toBeUndefined() + }, + }) + }) + + test("whitelist filters manual models", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: ["test-provider"], + provider: { + "test-provider": { + name: "Test Provider", + shouldFetchModels: false, + npm: "@ai-sdk/openai-compatible", + api: "https://api.test.com/v1", + whitelist: ["whitelisted-model"], + models: { + "whitelisted-model": { + name: "Whitelisted Model", + }, + "other-model": { + name: "Other Model", + }, + }, + options: { + apiKey: "test-key", + baseURL: "https://api.test.com/v1", + }, + }, + }, + }), + ) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const providers = await Provider.list() + const models = providers["test-provider"].models + expect(models["whitelisted-model"]).toBeDefined() + expect(models["other-model"]).toBeUndefined() + }, + }) + }) +}) diff --git a/packages/opencode/test/session/llm.test.ts b/packages/opencode/test/session/llm.test.ts index a89a00ebc05..d426dbfe9b0 100644 --- a/packages/opencode/test/session/llm.test.ts +++ b/packages/opencode/test/session/llm.test.ts @@ -134,7 +134,7 @@ beforeAll(() => { } const url = new URL(req.url) - const body = (await req.json()) as Record + const body = req.method === "GET" ? {} : ((await req.json()) as Record) next.resolve({ url, headers: req.headers, body }) if (!url.pathname.endsWith(next.path)) { @@ -250,6 +250,7 @@ describe("session.llm.stream", () => { enabled_providers: [providerID], provider: { [providerID]: { + shouldFetchModels: false, options: { apiKey: "test-key", baseURL: `${server.url.origin}/v1`, @@ -374,6 +375,7 @@ describe("session.llm.stream", () => { provider: { openai: { name: "OpenAI", + shouldFetchModels: false, env: ["OPENAI_API_KEY"], npm: "@ai-sdk/openai", api: "https://api.openai.com/v1", @@ -502,6 +504,7 @@ describe("session.llm.stream", () => { enabled_providers: [providerID], provider: { [providerID]: { + shouldFetchModels: false, options: { apiKey: "test-anthropic-key", baseURL: `${server.url.origin}/v1`, @@ -603,6 +606,7 @@ describe("session.llm.stream", () => { enabled_providers: [providerID], provider: { [providerID]: { + shouldFetchModels: false, options: { apiKey: "test-google-key", baseURL: `${server.url.origin}/v1beta`,