Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/opencode/src/auth/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ export namespace Auth {
.object({
type: z.literal("api"),
key: z.string(),
baseURL: z.string().optional(),
})
.meta({ ref: "ApiAuth" })

Expand Down
36 changes: 30 additions & 6 deletions packages/opencode/src/cli/cmd/auth.ts
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ export const AuthCommand = cmd({
describe: "manage credentials",
builder: (yargs) =>
yargs.command(AuthLoginCommand).command(AuthLogoutCommand).command(AuthListCommand).demandCommand(),
async handler() {},
async handler() { },
})

export const AuthListCommand = cmd({
Expand Down Expand Up @@ -288,7 +288,7 @@ export const AuthLoginCommand = cmd({
prompts.outro("Done")
return
}
await ModelsDev.refresh().catch(() => {})
await ModelsDev.refresh().catch(() => { })

const config = await Config.get()

Expand Down Expand Up @@ -386,10 +386,10 @@ export const AuthLoginCommand = cmd({
if (provider === "amazon-bedrock") {
prompts.log.info(
"Amazon Bedrock authentication priority:\n" +
" 1. Bearer token (AWS_BEARER_TOKEN_BEDROCK or /connect)\n" +
" 2. AWS credential chain (profile, access keys, IAM roles, EKS IRSA)\n\n" +
"Configure via opencode.json options (profile, region, endpoint) or\n" +
"AWS environment variables (AWS_PROFILE, AWS_REGION, AWS_ACCESS_KEY_ID, AWS_WEB_IDENTITY_TOKEN_FILE).",
" 1. Bearer token (AWS_BEARER_TOKEN_BEDROCK or /connect)\n" +
" 2. AWS credential chain (profile, access keys, IAM roles, EKS IRSA)\n\n" +
"Configure via opencode.json options (profile, region, endpoint) or\n" +
"AWS environment variables (AWS_PROFILE, AWS_REGION, AWS_ACCESS_KEY_ID, AWS_WEB_IDENTITY_TOKEN_FILE).",
)
}

Expand All @@ -407,6 +407,30 @@ export const AuthLoginCommand = cmd({
)
}

if (provider === "lmstudio") {
const baseURL = await prompts.text({
message: "Enter LM Studio server address",
placeholder: "http://127.0.0.1:1234/v1",
defaultValue: "http://127.0.0.1:1234/v1",
validate: (x) => (x && x.startsWith("http") ? undefined : "Must start with http:// or https://"),
})
if (prompts.isCancel(baseURL)) throw new UI.CancelledError()

const key = await prompts.password({
message: "Enter LM Studio API key (optional)",
})
if (prompts.isCancel(key)) throw new UI.CancelledError()

await Auth.set(provider, {
type: "api",
key: key || "sk-nothing",
baseURL,
})

prompts.outro("Done")
return
}

const key = await prompts.password({
message: "Enter your API key",
validate: (x) => (x && x.length > 0 ? undefined : "Required"),
Expand Down
83 changes: 75 additions & 8 deletions packages/opencode/src/provider/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,7 @@ export namespace Provider {
if (!apiToken) {
throw new Error(
"CLOUDFLARE_API_TOKEN (or CF_AIG_TOKEN) is required for Cloudflare AI Gateway. " +
"Set it via environment variable or run `opencode auth cloudflare-ai-gateway`.",
"Set it via environment variable or run `opencode auth cloudflare-ai-gateway`.",
)
}

Expand Down Expand Up @@ -588,6 +588,73 @@ export namespace Provider {
},
}
},
lmstudio: async (provider) => {
const auth = await Auth.get("lmstudio")
const baseURL =
(auth?.type === "api" ? auth.baseURL : undefined) ?? "http://127.0.0.1:1234/v1"
const apiKey = auth?.type === "api" ? auth.key : undefined

try {
const response = await fetch(`${baseURL}/models`, {
headers: apiKey && apiKey !== "sk-nothing" ? { Authorization: `Bearer ${apiKey}` } : {},
signal: AbortSignal.timeout(2000),
})
if (response.ok) {
const json = (await response.json()) as {
data: { id: string; max_context_length?: number }[]
}
if (Array.isArray(json.data)) {
for (const m of json.data) {
if (!provider.models[m.id]) {
const discoveredContext = m.max_context_length ?? 128000
const context = Math.max(discoveredContext, 8192) // Floor at 8k for stability
const output = Math.min(Math.floor(context / 4), 16384)

provider.models[m.id] = {
id: m.id,
name: `${m.id} (local)`,
providerID: "lmstudio",
api: {
id: m.id,
url: baseURL,
npm: "@ai-sdk/openai-compatible",
},
capabilities: {
temperature: true,
reasoning: false,
attachment: true,
toolcall: true,
input: { text: true, audio: false, image: true, video: false, pdf: false },
output: { text: true, audio: false, image: false, video: false, pdf: false },
interleaved: false,
},
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
limit: { context, output },
headers: {},
options: {},
release_date: "",
status: "active",
family: "",
}

if (context < 32768) {
log.warn("LM Studio model has a small context limit, which may cause errors with OpenCode's large system prompt. Consider increasing it in LM Studio settings.", { id: m.id, context })
} else {
log.info("Discovered LM Studio model", { id: m.id, context })
}
}
}
}
}
} catch (e) {
log.error("Failed to discover LM Studio models", { error: e })
}

return {
autoload: !!auth,
options: { baseURL, apiKey },
}
},
}

export const Model = z
Expand Down Expand Up @@ -699,13 +766,13 @@ export namespace Provider {
},
experimentalOver200K: model.cost?.context_over_200k
? {
cache: {
read: model.cost.context_over_200k.cache_read ?? 0,
write: model.cost.context_over_200k.cache_write ?? 0,
},
input: model.cost.context_over_200k.input,
output: model.cost.context_over_200k.output,
}
cache: {
read: model.cost.context_over_200k.cache_read ?? 0,
write: model.cost.context_over_200k.cache_write ?? 0,
},
input: model.cost.context_over_200k.input,
output: model.cost.context_over_200k.output,
}
: undefined,
},
limit: {
Expand Down
Loading