Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion apps/web/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@
"next-themes": "0.4.6",
"nodemailer": "7.0.9",
"nuqs": "2.7.2",
"ollama-ai-provider": "1.2.0",
"ollama-ai-provider-v2": "^1.5.5",
"openai": "6.6.0",
"p-queue": "9.0.0",
"p-retry": "7.1.0",
Expand Down
5 changes: 4 additions & 1 deletion apps/web/utils/actions/settings.validation.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,10 @@ export const saveAiSettingsBody = z
aiApiKey: z.string().optional(),
})
.superRefine((val, ctx) => {
if (!val.aiApiKey && val.aiProvider !== DEFAULT_PROVIDER) {
const requiresApiKey =
val.aiProvider !== DEFAULT_PROVIDER && val.aiProvider !== Provider.OLLAMA;

if (!val.aiApiKey && requiresApiKey) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "You must provide an API key for this provider",
Expand Down
58 changes: 46 additions & 12 deletions apps/web/utils/llms/model.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import { getModel } from "./model";
import { Provider } from "./config";
import { env } from "@/env";
import type { UserAIFields } from "./types";
import { createOllama } from "ollama-ai-provider-v2";
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Mock new Ollama provider in tests

The test file now imports createOllama from ollama-ai-provider-v2, but the mock still targets the old ollama-ai-provider module later in the file, so createOllama remains the real implementation. The new Ollama tests that call expect(createOllama).toHaveBeenCalledWith(...) will therefore fail with Vitest’s “received value must be a mock or spy” error when running pnpm vitest run utils/llms/model.test.ts. Update the mock to point at the -v2 module so the spy assertions execute against a mocked function.

Useful? React with 👍 / 👎.


// Mock AI provider imports
vi.mock("@ai-sdk/openai", () => ({
Expand Down Expand Up @@ -31,7 +32,7 @@ vi.mock("@openrouter/ai-sdk-provider", () => ({
})),
}));

vi.mock("ollama-ai-provider", () => ({
vi.mock("ollama-ai-provider-v2", () => ({
createOllama: vi.fn(() => (model: string) => ({ model })),
}));

Expand All @@ -50,6 +51,7 @@ vi.mock("@/env", () => ({
ANTHROPIC_API_KEY: "test-anthropic-key",
GROQ_API_KEY: "test-groq-key",
OPENROUTER_API_KEY: "test-openrouter-key",
OPENROUTER_BACKUP_MODEL: "google/gemini-2.5-flash",
OLLAMA_BASE_URL: "http://localhost:11434",
NEXT_PUBLIC_OLLAMA_MODEL: "llama3",
BEDROCK_REGION: "us-west-2",
Expand All @@ -75,6 +77,8 @@ describe("Models", () => {
vi.mocked(env).DEFAULT_LLM_MODEL = undefined;
vi.mocked(env).BEDROCK_ACCESS_KEY = "";
vi.mocked(env).BEDROCK_SECRET_KEY = "";
vi.mocked(env).NEXT_PUBLIC_OLLAMA_MODEL = "llama3";
vi.mocked(env).OLLAMA_BASE_URL = "http://localhost:11434";
});

describe("getModel", () => {
Expand Down Expand Up @@ -153,18 +157,48 @@ describe("Models", () => {
expect(result.model).toBeDefined();
});

// it("should configure Ollama model correctly", () => {
// const userAi: UserAIFields = {
// aiApiKey: "user-api-key",
// aiProvider: Provider.OLLAMA!,
// aiModel: "llama3",
// };
it("should configure Ollama model correctly", () => {
const userAi: UserAIFields = {
aiApiKey: null,
aiProvider: Provider.OLLAMA!,
aiModel: "llama3",
};

// const result = getModel(userAi);
// expect(result.provider).toBe(Provider.OLLAMA);
// expect(result.modelName).toBe("llama3");
// expect(result.model).toBeDefined();
// });
const result = getModel(userAi);
expect(result.provider).toBe(Provider.OLLAMA);
expect(result.modelName).toBe("llama3");
expect(result.model).toBeDefined();
expect(createOllama).toHaveBeenCalledWith({
baseURL: "http://localhost:11434",
});
expect(result.backupModel).toBeNull();
});

it("should throw when Ollama model is missing", () => {
const userAi: UserAIFields = {
aiApiKey: null,
aiProvider: Provider.OLLAMA!,
aiModel: null,
};

expect(() => getModel(userAi)).toThrow("Ollama model must be specified");
});

it("should fall back to default Ollama base URL when env missing", () => {
vi.mocked(env).OLLAMA_BASE_URL = undefined as any;

const userAi: UserAIFields = {
aiApiKey: null,
aiProvider: Provider.OLLAMA!,
aiModel: "llama3",
};

getModel(userAi);

expect(createOllama).toHaveBeenCalledWith({
baseURL: "http://localhost:11434",
});
});

it("should configure Anthropic model correctly without Bedrock credentials", () => {
const userAi: UserAIFields = {
Expand Down
37 changes: 23 additions & 14 deletions apps/web/utils/llms/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import { createGoogleGenerativeAI } from "@ai-sdk/google";
import { createGroq } from "@ai-sdk/groq";
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
import { createGateway } from "@ai-sdk/gateway";
// import { createOllama } from "ollama-ai-provider";
import { createOllama } from "ollama-ai-provider-v2";
import { env } from "@/env";
import { Provider } from "@/utils/llms/config";
import type { UserAIFields } from "@/utils/llms/types";
Expand Down Expand Up @@ -128,16 +128,21 @@ function selectModel(
};
}
case Provider.OLLAMA: {
throw new Error(
"Ollama is not supported. Revert to version v1.7.28 or older to use it.",
);
// const modelName = aiModel || env.NEXT_PUBLIC_OLLAMA_MODEL;
// if (!modelName) throw new Error("Ollama model is not set");
// return {
// provider: Provider.OLLAMA!,
// modelName,
// model: createOllama({ baseURL: env.OLLAMA_BASE_URL })(model),
// };
const modelName = aiModel;

if (!modelName) {
throw new Error("Ollama model must be specified");
}

const baseURL = env.OLLAMA_BASE_URL || "http://localhost:11434";
const ollama = createOllama({ baseURL });

return {
provider: Provider.OLLAMA!,
modelName,
model: ollama(modelName),
backupModel: null,
};
}

case Provider.BEDROCK: {
Expand Down Expand Up @@ -208,7 +213,7 @@ function createOpenRouterProviderOptions(
function selectEconomyModel(userAi: UserAIFields): SelectModel {
if (env.ECONOMY_LLM_PROVIDER && env.ECONOMY_LLM_MODEL) {
const apiKey = getProviderApiKey(env.ECONOMY_LLM_PROVIDER);
if (!apiKey) {
if (!apiKey && providerRequiresApiKey(env.ECONOMY_LLM_PROVIDER)) {
logger.warn("Economy LLM provider configured but API key not found", {
provider: env.ECONOMY_LLM_PROVIDER,
});
Expand Down Expand Up @@ -245,7 +250,7 @@ function selectEconomyModel(userAi: UserAIFields): SelectModel {
function selectChatModel(userAi: UserAIFields): SelectModel {
if (env.CHAT_LLM_PROVIDER && env.CHAT_LLM_MODEL) {
const apiKey = getProviderApiKey(env.CHAT_LLM_PROVIDER);
if (!apiKey) {
if (!apiKey && providerRequiresApiKey(env.CHAT_LLM_PROVIDER)) {
logger.warn("Chat LLM provider configured but API key not found", {
provider: env.CHAT_LLM_PROVIDER,
});
Expand Down Expand Up @@ -285,7 +290,7 @@ function selectDefaultModel(userAi: UserAIFields): SelectModel {

// If user has not api key set, then use default model
// If they do they can use the model of their choice
if (aiApiKey) {
if (aiApiKey || userAi.aiProvider === Provider.OLLAMA) {
aiProvider = userAi.aiProvider || env.DEFAULT_LLM_PROVIDER;
aiModel = userAi.aiModel || null;
} else {
Expand Down Expand Up @@ -337,6 +342,10 @@ function getProviderApiKey(provider: string) {
return providerApiKeys[provider];
}

function providerRequiresApiKey(provider: string) {
return provider !== Provider.OLLAMA;
}

function getBackupModel(userApiKey: string | null): LanguageModelV2 | null {
// disable backup model if user is using their own api key
if (userApiKey) return null;
Expand Down
14 changes: 14 additions & 0 deletions docs/hosting/self-hosting.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,20 @@ If doing this manually edit then you'll need to configure:
- **LLM Provider**: Uncomment one provider block and add your API key
- **Optional**: Microsoft OAuth, external Redis, etc.

#### Using Ollama (local LLM)

To use a locally hosted Ollama model instead of a cloud LLM provider:

1. Set `NEXT_PUBLIC_OLLAMA_MODEL` in `apps/web/.env` to the exact model name you have pulled in Ollama (e.g., `llama3` or `qwen2.5`).
2. (Optional) Set `OLLAMA_BASE_URL` if your Ollama server is not on the default `http://localhost:11434`. When running the app in Docker but Ollama is on the host, use `http://host.docker.internal:11434`.
3. Restart the stack so the updated environment variables are loaded:

```bash
NEXT_PUBLIC_BASE_URL=https://yourdomain.com docker compose --env-file apps/web/.env --profile all up -d
```

No API key is required for Ollama. The UI will only show Ollama as a selectable provider when `NEXT_PUBLIC_OLLAMA_MODEL` is set.

For detailed configuration instructions, see the [Environment Variables Reference](./environment-variables.md).

**Note**: If you only want to use Microsoft and not Google OAuth then add skipped for the the Google client id and secret.
Expand Down
58 changes: 22 additions & 36 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.