diff --git a/crates/goose/src/config/signup_openrouter/mod.rs b/crates/goose/src/config/signup_openrouter/mod.rs
index 47eb5dd9b38b..cc5c8b079c06 100644
--- a/crates/goose/src/config/signup_openrouter/mod.rs
+++ b/crates/goose/src/config/signup_openrouter/mod.rs
@@ -14,7 +14,7 @@ use tokio::sync::oneshot;
use tokio::time::timeout;
/// Default models for openrouter config configuration
-const OPENROUTER_DEFAULT_MODEL: &str = "qwen/qwen3-coder";
+const OPENROUTER_DEFAULT_MODEL: &str = "anthropic/claude-sonnet-4";
const OPENROUTER_AUTH_URL: &str = "https://openrouter.ai/auth";
const OPENROUTER_TOKEN_URL: &str = "https://openrouter.ai/api/v1/auth/keys";
diff --git a/crates/goose/src/providers/openrouter.rs b/crates/goose/src/providers/openrouter.rs
index e98c49fc1690..00fa77bdea7a 100644
--- a/crates/goose/src/providers/openrouter.rs
+++ b/crates/goose/src/providers/openrouter.rs
@@ -202,6 +202,12 @@ fn create_request_based_on_model(
payload = update_request_for_anthropic(&payload);
}
+ // Always add transforms: ["middle-out"] for OpenRouter to handle prompts > context size
+ payload
+ .as_object_mut()
+ .unwrap()
+ .insert("transforms".to_string(), json!(["middle-out"]));
+
Ok(payload)
}
diff --git a/ui/desktop/src/App.test.tsx b/ui/desktop/src/App.test.tsx
new file mode 100644
index 000000000000..80f7908f2c45
--- /dev/null
+++ b/ui/desktop/src/App.test.tsx
@@ -0,0 +1,291 @@
+/* eslint-disable @typescript-eslint/no-explicit-any */
+
+/**
+ * @vitest-environment jsdom
+ */
+import React from 'react';
+import { render, waitFor } from '@testing-library/react';
+import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
+import App from './App';
+
+// Set up globals for jsdom
+Object.defineProperty(window, 'location', {
+ value: {
+ hash: '',
+ search: '',
+ href: 'http://localhost:3000',
+ origin: 'http://localhost:3000',
+ },
+ writable: true,
+});
+
+Object.defineProperty(window, 'history', {
+ value: {
+ replaceState: vi.fn(),
+ state: null,
+ },
+ writable: true,
+});
+
+// Mock dependencies
+vi.mock('./utils/providerUtils', () => ({
+ initializeSystem: vi.fn().mockResolvedValue(undefined),
+}));
+
+vi.mock('./utils/costDatabase', () => ({
+ initializeCostDatabase: vi.fn().mockResolvedValue(undefined),
+}));
+
+vi.mock('./api/sdk.gen', () => ({
+ initConfig: vi.fn().mockResolvedValue(undefined),
+ readAllConfig: vi.fn().mockResolvedValue(undefined),
+ backupConfig: vi.fn().mockResolvedValue(undefined),
+ recoverConfig: vi.fn().mockResolvedValue(undefined),
+ validateConfig: vi.fn().mockResolvedValue(undefined),
+}));
+
+vi.mock('./utils/openRouterSetup', () => ({
+ startOpenRouterSetup: vi.fn().mockResolvedValue({ success: false, message: 'Test' }),
+}));
+
+vi.mock('./utils/ollamaDetection', () => ({
+ checkOllamaStatus: vi.fn().mockResolvedValue({ isRunning: false }),
+}));
+
+// Mock the ConfigContext module
+vi.mock('./components/ConfigContext', () => ({
+ useConfig: () => ({
+ read: vi.fn().mockResolvedValue(null),
+ update: vi.fn(),
+ getExtensions: vi.fn().mockReturnValue([]),
+ addExtension: vi.fn(),
+ updateExtension: vi.fn(),
+ createProviderDefaults: vi.fn(),
+ }),
+ ConfigProvider: ({ children }: { children: React.ReactNode }) => <>{children}>,
+}));
+
+// Mock other components to simplify testing
+vi.mock('./components/ErrorBoundary', () => ({
+ ErrorUI: ({ error }: { error: Error }) =>
Error: {error.message}
,
+}));
+
+// Mock ProviderGuard to show the welcome screen when no provider is configured
+vi.mock('./components/ProviderGuard', () => ({
+ default: ({ children }: { children: React.ReactNode }) => {
+ // In a real app, ProviderGuard would check for provider and show welcome screen
+ // For this test, we'll simulate that behavior
+ const hasProvider = window.electron?.getConfig()?.GOOSE_DEFAULT_PROVIDER;
+ if (!hasProvider) {
+ return Welcome to Goose!
;
+ }
+ return <>{children}>;
+ },
+}));
+
+vi.mock('./components/ModelAndProviderContext', () => ({
+ ModelAndProviderProvider: ({ children }: { children: React.ReactNode }) => <>{children}>,
+ useModelAndProvider: () => ({
+ provider: null,
+ model: null,
+ getCurrentModelAndProvider: vi.fn(),
+ setCurrentModelAndProvider: vi.fn(),
+ }),
+}));
+
+vi.mock('./contexts/ChatContext', () => ({
+ ChatProvider: ({ children }: { children: React.ReactNode }) => <>{children}>,
+}));
+
+vi.mock('./contexts/DraftContext', () => ({
+ DraftProvider: ({ children }: { children: React.ReactNode }) => <>{children}>,
+}));
+
+vi.mock('./components/ui/ConfirmationModal', () => ({
+ ConfirmationModal: () => null,
+}));
+
+vi.mock('react-toastify', () => ({
+ ToastContainer: () => null,
+}));
+
+vi.mock('./components/GoosehintsModal', () => ({
+ GoosehintsModal: () => null,
+}));
+
+vi.mock('./components/AnnouncementModal', () => ({
+ default: () => null,
+}));
+
+vi.mock('./hooks/useChat', () => ({
+ useChat: () => ({
+ chat: {
+ id: 'test-id',
+ title: 'Test Chat',
+ messages: [],
+ messageHistoryIndex: 0,
+ recipeConfig: null,
+ },
+ setChat: vi.fn(),
+ }),
+}));
+
+// Mock react-router-dom to avoid HashRouter issues in tests
+vi.mock('react-router-dom', () => ({
+ HashRouter: ({ children }: { children: React.ReactNode }) => <>{children}>,
+ Routes: ({ children }: { children: React.ReactNode }) => <>{children}>,
+ Route: ({ element }: { element: React.ReactNode }) => element,
+ useNavigate: () => vi.fn(),
+ useLocation: () => ({ state: null, pathname: '/' }),
+ Outlet: () => null,
+}));
+
+// Mock electron API
+const mockElectron = {
+ getConfig: vi.fn().mockReturnValue({
+ GOOSE_ALLOWLIST_WARNING: false,
+ GOOSE_WORKING_DIR: '/test/dir',
+ }),
+ logInfo: vi.fn(),
+ on: vi.fn(),
+ off: vi.fn(),
+ reactReady: vi.fn(),
+ getAllowedExtensions: vi.fn().mockResolvedValue([]),
+ platform: 'darwin',
+ createChatWindow: vi.fn(),
+};
+
+// Mock appConfig
+const mockAppConfig = {
+ get: vi.fn((key: string) => {
+ if (key === 'GOOSE_WORKING_DIR') return '/test/dir';
+ return null;
+ }),
+};
+
+// Attach mocks to window
+(window as any).electron = mockElectron;
+(window as any).appConfig = mockAppConfig;
+
+// Mock matchMedia
+Object.defineProperty(window, 'matchMedia', {
+ writable: true,
+ value: vi.fn().mockImplementation((query) => ({
+ matches: false,
+ media: query,
+ onchange: null,
+ addListener: vi.fn(), // deprecated
+ removeListener: vi.fn(), // deprecated
+ addEventListener: vi.fn(),
+ removeEventListener: vi.fn(),
+ dispatchEvent: vi.fn(),
+ })),
+});
+
+describe('App Component - Brand New State', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ window.location.hash = '';
+ window.location.search = '';
+ window.sessionStorage.clear();
+ window.localStorage.clear();
+ });
+
+ afterEach(() => {
+ vi.clearAllMocks();
+ });
+
+ it('should redirect to "/" when app is brand new (no provider configured)', async () => {
+ // Mock no provider configured
+ mockElectron.getConfig.mockReturnValue({
+ GOOSE_DEFAULT_PROVIDER: null,
+ GOOSE_DEFAULT_MODEL: null,
+ GOOSE_ALLOWLIST_WARNING: false,
+ });
+
+ render( );
+
+ // Wait for initialization
+ await waitFor(() => {
+ expect(mockElectron.reactReady).toHaveBeenCalled();
+ });
+
+ // Check that we navigated to "/" not "/welcome"
+ await waitFor(() => {
+ expect(window.location.hash).toBe('#/');
+ });
+
+ // History should have been updated to "/"
+ expect(window.history.replaceState).toHaveBeenCalledWith({}, '', '#/');
+ });
+
+ it('should handle deep links correctly when app is brand new', async () => {
+ // Mock no provider configured
+ mockElectron.getConfig.mockReturnValue({
+ GOOSE_DEFAULT_PROVIDER: null,
+ GOOSE_DEFAULT_MODEL: null,
+ GOOSE_ALLOWLIST_WARNING: false,
+ });
+
+ // Simulate a deep link
+ window.location.search = '?view=settings';
+
+ render( );
+
+ // Wait for initialization
+ await waitFor(() => {
+ expect(mockElectron.reactReady).toHaveBeenCalled();
+ });
+
+ // Should redirect to settings route via hash
+ await waitFor(() => {
+ expect(window.location.hash).toBe('#/settings');
+ });
+ });
+
+ it('should not redirect to /welcome when provider is configured', async () => {
+ // Mock provider configured
+ mockElectron.getConfig.mockReturnValue({
+ GOOSE_DEFAULT_PROVIDER: 'openai',
+ GOOSE_DEFAULT_MODEL: 'gpt-4',
+ GOOSE_ALLOWLIST_WARNING: false,
+ });
+
+ render( );
+
+ // Wait for initialization
+ await waitFor(() => {
+ expect(mockElectron.reactReady).toHaveBeenCalled();
+ });
+
+ // Should stay at "/" since provider is configured
+ await waitFor(() => {
+ expect(window.location.hash).toBe('#/');
+ });
+ });
+
+ it('should handle config recovery gracefully', async () => {
+ // Mock config error that triggers recovery
+ const { readAllConfig, recoverConfig } = await import('./api/sdk.gen');
+ console.log(recoverConfig);
+ vi.mocked(readAllConfig).mockRejectedValueOnce(new Error('Config read error'));
+
+ mockElectron.getConfig.mockReturnValue({
+ GOOSE_DEFAULT_PROVIDER: null,
+ GOOSE_DEFAULT_MODEL: null,
+ GOOSE_ALLOWLIST_WARNING: false,
+ });
+
+ render( );
+
+ // Wait for initialization and recovery
+ await waitFor(() => {
+ expect(mockElectron.reactReady).toHaveBeenCalled();
+ });
+
+ // App should still initialize and navigate to "/"
+ await waitFor(() => {
+ expect(window.location.hash).toBe('#/');
+ });
+ });
+});
diff --git a/ui/desktop/src/App.tsx b/ui/desktop/src/App.tsx
index 1b08dc5b2513..49f1d9f46833 100644
--- a/ui/desktop/src/App.tsx
+++ b/ui/desktop/src/App.tsx
@@ -986,12 +986,12 @@ export default function App() {
if (error instanceof MalformedConfigError) {
throw error;
}
- window.location.hash = '#/welcome';
- window.history.replaceState({}, '', '#/welcome');
+ window.location.hash = '#/';
+ window.history.replaceState({}, '', '#/');
}
} else {
- window.location.hash = '#/welcome';
- window.history.replaceState({}, '', '#/welcome');
+ window.location.hash = '#/';
+ window.history.replaceState({}, '', '#/');
}
} catch (error) {
console.error('Fatal error during initialization:', error);
diff --git a/ui/desktop/src/components/OllamaSetup.test.tsx b/ui/desktop/src/components/OllamaSetup.test.tsx
new file mode 100644
index 000000000000..291610e4a3b1
--- /dev/null
+++ b/ui/desktop/src/components/OllamaSetup.test.tsx
@@ -0,0 +1,277 @@
+import { describe, it, expect, vi, beforeEach } from 'vitest';
+import { render, screen, waitFor, fireEvent } from '@testing-library/react';
+import { OllamaSetup } from './OllamaSetup';
+import * as ollamaDetection from '../utils/ollamaDetection';
+import * as providerUtils from '../utils/providerUtils';
+import { toastService } from '../toasts';
+
+// Mock dependencies
+vi.mock('../utils/ollamaDetection');
+vi.mock('../utils/providerUtils');
+vi.mock('../toasts');
+
+// Mock useConfig hook
+const mockUpsert = vi.fn();
+const mockAddExtension = vi.fn();
+const mockGetExtensions = vi.fn();
+
+vi.mock('./ConfigContext', () => ({
+ useConfig: () => ({
+ upsert: mockUpsert,
+ addExtension: mockAddExtension,
+ getExtensions: mockGetExtensions,
+ }),
+}));
+
+describe('OllamaSetup', () => {
+ const mockOnSuccess = vi.fn();
+ const mockOnCancel = vi.fn();
+
+ beforeEach(() => {
+ vi.clearAllMocks();
+ // Default mocks
+ vi.mocked(ollamaDetection.getPreferredModel).mockReturnValue('gpt-oss:20b');
+ vi.mocked(ollamaDetection.getOllamaDownloadUrl).mockReturnValue('https://ollama.com/download');
+ });
+
+ describe('when Ollama is not detected', () => {
+ beforeEach(() => {
+ vi.mocked(ollamaDetection.checkOllamaStatus).mockResolvedValue({
+ isRunning: false,
+ host: 'http://127.0.0.1:11434',
+ });
+ });
+
+ it('should show installation instructions', async () => {
+ render( );
+
+ await waitFor(() => {
+ expect(screen.getByText('Ollama Setup')).toBeInTheDocument();
+ expect(screen.getByText(/Ollama is not detected on your system/)).toBeInTheDocument();
+ });
+ });
+
+ it('should provide download link', async () => {
+ render( );
+
+ await waitFor(() => {
+ const downloadLink = screen.getByRole('link', { name: /Install Ollama/ });
+ expect(downloadLink).toHaveAttribute('href', 'https://ollama.com/download');
+ expect(downloadLink).toHaveAttribute('target', '_blank');
+ });
+ });
+
+ it('should show polling state when install link is clicked', async () => {
+ render( );
+
+ // Mock pollForOllama
+ const mockStopPolling = vi.fn();
+ vi.mocked(ollamaDetection.pollForOllama).mockReturnValue(mockStopPolling);
+
+ await waitFor(() => {
+ const installLink = screen.getByText('Install Ollama');
+ fireEvent.click(installLink);
+ });
+
+ expect(screen.getByText(/Waiting for Ollama to start/)).toBeInTheDocument();
+ expect(ollamaDetection.pollForOllama).toHaveBeenCalled();
+ });
+
+ it('should handle cancel button', async () => {
+ render( );
+
+ await waitFor(() => {
+ fireEvent.click(screen.getByText('Use a different provider'));
+ });
+
+ expect(mockOnCancel).toHaveBeenCalled();
+ });
+ });
+
+ describe('when Ollama is detected but model is not available', () => {
+ beforeEach(() => {
+ vi.mocked(ollamaDetection.checkOllamaStatus).mockResolvedValue({
+ isRunning: true,
+ host: 'http://127.0.0.1:11434',
+ });
+ vi.mocked(ollamaDetection.hasModel).mockResolvedValue(false);
+ });
+
+ it('should show model download prompt', async () => {
+ render( );
+
+ await waitFor(() => {
+ expect(screen.getByText(/The gpt-oss:20b model is not installed/)).toBeInTheDocument();
+ expect(screen.getByText(/Download gpt-oss:20b/)).toBeInTheDocument();
+ });
+ });
+
+ it('should handle model download', async () => {
+ vi.mocked(ollamaDetection.pullOllamaModel).mockResolvedValue(true);
+
+ render( );
+
+ await waitFor(() => {
+ fireEvent.click(screen.getByText(/Download gpt-oss:20b/));
+ });
+
+ await waitFor(() => {
+ expect(toastService.success).toHaveBeenCalledWith(
+ expect.objectContaining({
+ title: 'Model Downloaded!',
+ })
+ );
+ });
+ });
+
+ it('should handle download failure', async () => {
+ vi.mocked(ollamaDetection.pullOllamaModel).mockResolvedValue(false);
+
+ render( );
+
+ await waitFor(() => {
+ fireEvent.click(screen.getByText(/Download gpt-oss:20b/));
+ });
+
+ await waitFor(() => {
+ expect(toastService.error).toHaveBeenCalledWith(
+ expect.objectContaining({
+ title: 'Download Failed',
+ })
+ );
+ });
+ });
+ });
+
+ describe('when Ollama and model are both available', () => {
+ beforeEach(() => {
+ vi.mocked(ollamaDetection.checkOllamaStatus).mockResolvedValue({
+ isRunning: true,
+ host: 'http://127.0.0.1:11434',
+ });
+ vi.mocked(ollamaDetection.hasModel).mockResolvedValue(true);
+ });
+
+ it('should show ready state and connect button', async () => {
+ render( );
+
+ await waitFor(() => {
+ expect(screen.getByText(/Ollama is running on your system/)).toBeInTheDocument();
+ });
+ });
+
+ it('should handle successful connection', async () => {
+ vi.mocked(providerUtils.initializeSystem).mockResolvedValue(undefined);
+
+ render( );
+
+ await waitFor(() => {
+ fireEvent.click(screen.getByText(/Use Goose with Ollama/));
+ });
+
+ await waitFor(() => {
+ expect(mockUpsert).toHaveBeenCalledWith('GOOSE_PROVIDER', 'ollama', false);
+ expect(mockUpsert).toHaveBeenCalledWith('GOOSE_MODEL', 'gpt-oss:20b', false);
+ expect(mockUpsert).toHaveBeenCalledWith('OLLAMA_HOST', 'localhost', false);
+ expect(providerUtils.initializeSystem).toHaveBeenCalledWith(
+ 'ollama',
+ 'gpt-oss:20b',
+ expect.any(Object)
+ );
+ expect(toastService.success).toHaveBeenCalled();
+ expect(mockOnSuccess).toHaveBeenCalled();
+ });
+ });
+
+ it('should handle connection failure', async () => {
+ const testError = new Error('Initialization failed');
+ vi.mocked(providerUtils.initializeSystem).mockRejectedValue(testError);
+
+ render( );
+
+ await waitFor(() => {
+ fireEvent.click(screen.getByText('Use Goose with Ollama'));
+ });
+
+ await waitFor(() => {
+ expect(toastService.error).toHaveBeenCalledWith(
+ expect.objectContaining({
+ title: 'Connection Failed',
+ msg: expect.stringContaining('Initialization failed'),
+ })
+ );
+ });
+ });
+ });
+
+ describe('polling behavior', () => {
+ it('should clean up polling on unmount', async () => {
+ const mockStopPolling = vi.fn();
+ vi.mocked(ollamaDetection.pollForOllama).mockReturnValue(mockStopPolling);
+
+ vi.mocked(ollamaDetection.checkOllamaStatus).mockResolvedValue({
+ isRunning: false,
+ host: 'http://127.0.0.1:11434',
+ });
+
+ const { unmount } = render( );
+
+ await waitFor(() => {
+ fireEvent.click(screen.getByText('Install Ollama'));
+ });
+
+ expect(ollamaDetection.pollForOllama).toHaveBeenCalled();
+
+ unmount();
+
+ expect(mockStopPolling).toHaveBeenCalled();
+ });
+
+ it('should handle Ollama detection during polling', async () => {
+ vi.mocked(ollamaDetection.checkOllamaStatus).mockResolvedValue({
+ isRunning: false,
+ host: 'http://127.0.0.1:11434',
+ });
+
+ let pollCallback: ((status: { isRunning: boolean; host: string }) => void) | undefined;
+ vi.mocked(ollamaDetection.pollForOllama).mockImplementation((onDetected) => {
+ pollCallback = onDetected;
+ return vi.fn();
+ });
+
+ render( );
+
+ await waitFor(() => {
+ fireEvent.click(screen.getByText('Install Ollama'));
+ });
+
+ expect(screen.getByText(/Waiting for Ollama/)).toBeInTheDocument();
+
+ // Simulate Ollama being detected
+ vi.mocked(ollamaDetection.hasModel).mockResolvedValue(true);
+ pollCallback!({ isRunning: true, host: 'http://127.0.0.1:11434' });
+
+ await waitFor(() => {
+ expect(screen.getByText('✓ Ollama is running on your system')).toBeInTheDocument();
+ });
+ });
+ });
+
+ describe('error states', () => {
+ it('should handle errors during initial check', async () => {
+ // Mock checkOllamaStatus to resolve with isRunning: false after an error
+ vi.mocked(ollamaDetection.checkOllamaStatus).mockResolvedValue({
+ isRunning: false,
+ host: 'http://127.0.0.1:11434',
+ error: 'Network error',
+ });
+
+ render( );
+
+ await waitFor(() => {
+ // Should still show not detected state
+ expect(screen.getByText('Ollama is not detected on your system')).toBeInTheDocument();
+ });
+ });
+ });
+});
diff --git a/ui/desktop/src/components/OllamaSetup.tsx b/ui/desktop/src/components/OllamaSetup.tsx
new file mode 100644
index 000000000000..4bdff6b68986
--- /dev/null
+++ b/ui/desktop/src/components/OllamaSetup.tsx
@@ -0,0 +1,260 @@
+import { useState, useEffect, useRef } from 'react';
+import { useConfig } from './ConfigContext';
+import {
+ checkOllamaStatus,
+ getOllamaDownloadUrl,
+ pollForOllama,
+ hasModel,
+ pullOllamaModel,
+ getPreferredModel,
+ type PullProgress,
+} from '../utils/ollamaDetection';
+import { initializeSystem } from '../utils/providerUtils';
+import { toastService } from '../toasts';
+
+interface OllamaSetupProps {
+ onSuccess: () => void;
+ onCancel: () => void;
+}
+
+export function OllamaSetup({ onSuccess, onCancel }: OllamaSetupProps) {
+ const { addExtension, getExtensions, upsert } = useConfig();
+ const [isChecking, setIsChecking] = useState(true);
+ const [ollamaDetected, setOllamaDetected] = useState(false);
+ const [isPolling, setIsPolling] = useState(false);
+ const [isConnecting, setIsConnecting] = useState(false);
+ const [modelStatus, setModelStatus] = useState<
+ 'checking' | 'available' | 'not-available' | 'downloading'
+ >('checking');
+ const [downloadProgress, setDownloadProgress] = useState(null);
+ const stopPollingRef = useRef<(() => void) | null>(null);
+
+ useEffect(() => {
+ // Check if Ollama is already running
+ const checkInitial = async () => {
+ const status = await checkOllamaStatus();
+ setOllamaDetected(status.isRunning);
+
+ // If Ollama is running, check for the preferred model
+ if (status.isRunning) {
+ const modelAvailable = await hasModel(getPreferredModel());
+ setModelStatus(modelAvailable ? 'available' : 'not-available');
+ }
+
+ setIsChecking(false);
+ };
+ checkInitial();
+
+ // Cleanup polling on unmount
+ return () => {
+ if (stopPollingRef.current) {
+ stopPollingRef.current();
+ }
+ };
+ }, []);
+
+ const handleInstallClick = () => {
+ setIsPolling(true);
+
+ // Start polling for Ollama
+ stopPollingRef.current = pollForOllama(
+ async (status) => {
+ setOllamaDetected(status.isRunning);
+ setIsPolling(false);
+
+ // Check for the model
+ const modelAvailable = await hasModel(getPreferredModel());
+ setModelStatus(modelAvailable ? 'available' : 'not-available');
+
+ toastService.success({
+ title: 'Ollama Detected!',
+ msg: 'Ollama is now running. You can connect to it.',
+ });
+ },
+ 3000 // Check every 3 seconds
+ );
+ };
+
+ const handleDownloadModel = async () => {
+ setModelStatus('downloading');
+ setDownloadProgress({ status: 'Starting download...' });
+
+ const success = await pullOllamaModel(getPreferredModel(), (progress) => {
+ setDownloadProgress(progress);
+ });
+
+ if (success) {
+ setModelStatus('available');
+ toastService.success({
+ title: 'Model Downloaded!',
+ msg: `Successfully downloaded ${getPreferredModel()}`,
+ });
+ } else {
+ setModelStatus('not-available');
+ toastService.error({
+ title: 'Download Failed',
+ msg: `Failed to download ${getPreferredModel()}. Please try again.`,
+ traceback: '',
+ });
+ }
+ setDownloadProgress(null);
+ };
+
+ const handleConnectOllama = async () => {
+ setIsConnecting(true);
+ try {
+ // Set up Ollama configuration
+ await upsert('GOOSE_PROVIDER', 'ollama', false);
+ await upsert('GOOSE_MODEL', getPreferredModel(), false);
+ await upsert('OLLAMA_HOST', 'localhost', false);
+
+ // Initialize the system with Ollama
+ await initializeSystem('ollama', getPreferredModel(), {
+ getExtensions,
+ addExtension,
+ });
+
+ toastService.success({
+ title: 'Success!',
+ msg: `Connected to Ollama with ${getPreferredModel()} model.`,
+ });
+
+ onSuccess();
+ } catch (error) {
+ console.error('Failed to connect to Ollama:', error);
+ toastService.error({
+ title: 'Connection Failed',
+ msg: `Failed to connect to Ollama: ${error instanceof Error ? error.message : String(error)}`,
+ traceback: error instanceof Error ? error.stack || '' : '',
+ });
+ setIsConnecting(false);
+ }
+ };
+
+ if (isChecking) {
+ return (
+
+
+
Checking for Ollama...
+
+ );
+ }
+
+ return (
+
+
+
Ollama Setup
+
+ Ollama lets you run AI models for free, private and locally on your computer.
+
+
+
+ {ollamaDetected ? (
+
+
+
✓ Ollama is running on your system
+
+
+ {modelStatus === 'checking' ? (
+
+ ) : modelStatus === 'not-available' ? (
+
+
+
+ The {getPreferredModel()} model is not installed
+
+
+ This model is recommended for the best experience with Goose
+
+
+
+ Download {getPreferredModel()} (~11GB)
+
+
+ ) : modelStatus === 'downloading' ? (
+
+
+
+ Downloading {getPreferredModel()}...
+
+ {downloadProgress && (
+ <>
+
+ {downloadProgress.status}
+
+ {downloadProgress.total && downloadProgress.completed && (
+
+
+
+ {Math.round((downloadProgress.completed / downloadProgress.total) * 100)}%
+
+
+ )}
+ >
+ )}
+
+
+ ) : (
+
+ {isConnecting ? 'Connecting...' : 'Use Goose with Ollama'}
+
+ )}
+
+ ) : (
+
+
+
Ollama is not detected on your system
+
+
+ {isPolling ? (
+
+
+
Waiting for Ollama to start...
+
+ Once Ollama is installed and running, we'll automatically detect it.
+
+
+ ) : (
+
+ Install Ollama
+
+ )}
+
+ )}
+
+
+ Use a different provider
+
+
+ );
+}
diff --git a/ui/desktop/src/components/ProviderGuard.tsx b/ui/desktop/src/components/ProviderGuard.tsx
index 3c88e806e833..831a23b52397 100644
--- a/ui/desktop/src/components/ProviderGuard.tsx
+++ b/ui/desktop/src/components/ProviderGuard.tsx
@@ -6,6 +6,8 @@ import { startOpenRouterSetup } from '../utils/openRouterSetup';
import WelcomeGooseLogo from './WelcomeGooseLogo';
import { initializeSystem } from '../utils/providerUtils';
import { toastService } from '../toasts';
+import { OllamaSetup } from './OllamaSetup';
+import { checkOllamaStatus } from '../utils/ollamaDetection';
interface ProviderGuardProps {
children: React.ReactNode;
@@ -17,6 +19,8 @@ export default function ProviderGuard({ children }: ProviderGuardProps) {
const [isChecking, setIsChecking] = useState(true);
const [hasProvider, setHasProvider] = useState(false);
const [showFirstTimeSetup, setShowFirstTimeSetup] = useState(false);
+ const [showOllamaSetup, setShowOllamaSetup] = useState(false);
+ const [ollamaDetected, setOllamaDetected] = useState(false);
const [openRouterSetupState, setOpenRouterSetupState] = useState<{
show: boolean;
title: string;
@@ -101,11 +105,15 @@ export default function ProviderGuard({ children }: ProviderGuardProps) {
const provider = (await read('GOOSE_PROVIDER', false)) ?? config.GOOSE_DEFAULT_PROVIDER;
const model = (await read('GOOSE_MODEL', false)) ?? config.GOOSE_DEFAULT_MODEL;
+ // Always check for Ollama regardless of provider status
+ const ollamaStatus = await checkOllamaStatus();
+ setOllamaDetected(ollamaStatus.isRunning);
+
if (provider && model) {
console.log('ProviderGuard - Provider and model found, continuing normally');
setHasProvider(true);
} else {
- console.log('ProviderGuard - No provider/model configured, showing first time setup');
+ console.log('ProviderGuard - No provider/model configured');
setShowFirstTimeSetup(true);
}
} catch (error) {
@@ -121,7 +129,24 @@ export default function ProviderGuard({ children }: ProviderGuardProps) {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [read]);
- if (isChecking && !openRouterSetupState?.show && !showFirstTimeSetup) {
+ // Poll for Ollama status while the first time setup is shown
+ useEffect(() => {
+ if (!showFirstTimeSetup) return;
+
+ const checkOllama = async () => {
+ const status = await checkOllamaStatus();
+ setOllamaDetected(status.isRunning);
+ };
+
+ // Check every 3 seconds
+ const interval = window.setInterval(checkOllama, 3000);
+
+ return () => {
+ window.clearInterval(interval);
+ };
+ }, [showFirstTimeSetup]);
+
+ if (isChecking && !openRouterSetupState?.show && !showFirstTimeSetup && !showOllamaSetup) {
return (
@@ -143,6 +168,28 @@ export default function ProviderGuard({ children }: ProviderGuardProps) {
);
}
+ if (showOllamaSetup) {
+ return (
+
+
+
+
+
+
{
+ setShowOllamaSetup(false);
+ setHasProvider(true);
+ }}
+ onCancel={() => {
+ setShowOllamaSetup(false);
+ setShowFirstTimeSetup(true);
+ }}
+ />
+
+
+ );
+ }
+
if (showFirstTimeSetup) {
return (
@@ -156,11 +203,28 @@ export default function ProviderGuard({ children }: ProviderGuardProps) {
Automatic setup with OpenRouter (recommended)
+ {
+ setShowFirstTimeSetup(false);
+ setShowOllamaSetup(true);
+ }}
+ className="w-full px-6 py-3 bg-background-muted text-text-standard rounded-lg hover:bg-background-hover transition-colors font-medium flex items-center justify-center gap-2"
+ >
+ {ollamaDetected ? (
+ <>
+ ●
+ Use Ollama (auto detected)
+ >
+ ) : (
+ 'Set up Ollama (run AI locally and free)'
+ )}
+
+
navigate('/welcome', { replace: true })}
className="w-full px-6 py-3 bg-background-muted text-text-standard rounded-lg hover:bg-background-hover transition-colors font-medium"
@@ -170,8 +234,10 @@ export default function ProviderGuard({ children }: ProviderGuardProps) {
- OpenRouter provides access to multiple AI models. To use this it will need to create an
- account with OpenRouter.
+ OpenRouter provides instant access to multiple AI models with a simple setup.
+ {ollamaDetected
+ ? ' Ollama is also detected on your system for running models locally.'
+ : ' You can also install Ollama to run free AI models locally on your computer.'}
diff --git a/ui/desktop/src/utils/ollamaDetection.test.ts b/ui/desktop/src/utils/ollamaDetection.test.ts
new file mode 100644
index 000000000000..8048b20187c0
--- /dev/null
+++ b/ui/desktop/src/utils/ollamaDetection.test.ts
@@ -0,0 +1,323 @@
+/* eslint-disable @typescript-eslint/no-explicit-any */
+/* global AbortSignal, TextEncoder, Event, EventListener */
+
+import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
+import {
+ checkOllamaStatus,
+ getOllamaModels,
+ hasModel,
+ pullOllamaModel,
+ pollForOllama,
+ getOllamaDownloadUrl,
+ getPreferredModel,
+} from './ollamaDetection';
+
+// Mock fetch globally
+globalThis.fetch = vi.fn();
+
+// Define global objects for testing environment if they don't exist
+if (typeof globalThis.AbortSignal === 'undefined') {
+ globalThis.AbortSignal = class AbortSignal {
+ aborted = false;
+ reason: any = undefined;
+ onabort: ((this: AbortSignal, ev: Event) => any) | null = null;
+
+ addEventListener(_type: string, _listener: EventListener): void {
+ // Mock implementation
+ }
+
+ removeEventListener(_type: string, _listener: EventListener): void {
+ // Mock implementation
+ }
+
+ dispatchEvent(_event: Event): boolean {
+ return true;
+ }
+ } as any;
+}
+
+if (typeof globalThis.TextEncoder === 'undefined') {
+ globalThis.TextEncoder = class TextEncoder {
+ encode(str: string): Uint8Array {
+ return new Uint8Array(str.split('').map((c) => c.charCodeAt(0)));
+ }
+ } as any;
+}
+
+describe('ollamaDetection', () => {
+ beforeEach(() => {
+ vi.clearAllMocks();
+ vi.useFakeTimers();
+ });
+
+ afterEach(() => {
+ vi.useRealTimers();
+ });
+
+ describe('checkOllamaStatus', () => {
+ it('should return isRunning: true when Ollama is accessible', async () => {
+ (globalThis.fetch as any).mockResolvedValueOnce({
+ ok: true,
+ json: async () => ({ models: [] }),
+ });
+
+ const result = await checkOllamaStatus();
+
+ expect(result).toEqual({
+ isRunning: true,
+ host: 'http://127.0.0.1:11434',
+ });
+ expect(globalThis.fetch).toHaveBeenCalledWith('http://127.0.0.1:11434/api/tags', {
+ method: 'GET',
+ signal: expect.any(globalThis.AbortSignal),
+ });
+ });
+
+ it('should return isRunning: false when Ollama is not accessible', async () => {
+ (globalThis.fetch as any).mockRejectedValueOnce(new Error('Connection refused'));
+
+ const result = await checkOllamaStatus();
+
+ expect(result).toEqual({
+ isRunning: false,
+ host: 'http://127.0.0.1:11434',
+ error: 'Connection refused',
+ });
+ });
+
+ it('should timeout after 2 seconds', async () => {
+ let abortSignal: AbortSignal | undefined;
+
+ (globalThis.fetch as any).mockImplementationOnce((_url: string, options: any) => {
+ abortSignal = options.signal;
+ return new Promise((_, reject) => {
+ // Listen for abort signal
+ options.signal.addEventListener('abort', () => {
+ reject(new Error('The operation was aborted'));
+ });
+ });
+ });
+
+ const checkPromise = checkOllamaStatus();
+
+ // Fast-forward 2 seconds
+ vi.advanceTimersByTime(2000);
+
+ // The abort signal should be triggered
+ expect(abortSignal?.aborted).toBe(true);
+
+ const result = await checkPromise;
+ expect(result.isRunning).toBe(false);
+ expect(result.error).toBe('The operation was aborted');
+ });
+ });
+
+ describe('getOllamaModels', () => {
+ it('should return models when API call is successful', async () => {
+ const mockModels = [
+ { name: 'llama2:latest', size: 4733363377, digest: 'abc123', modified_at: '2023-10-01' },
+ { name: 'gpt-oss:20b', size: 13780173839, digest: 'def456', modified_at: '2023-10-02' },
+ ];
+
+ (globalThis.fetch as any).mockResolvedValueOnce({
+ ok: true,
+ json: async () => ({ models: mockModels }),
+ });
+
+ const result = await getOllamaModels();
+
+ expect(result).toEqual(mockModels);
+ });
+
+ it('should return empty array when API call fails', async () => {
+ (globalThis.fetch as any).mockRejectedValueOnce(new Error('Network error'));
+
+ const result = await getOllamaModels();
+
+ expect(result).toEqual([]);
+ });
+
+ it('should handle non-ok responses', async () => {
+ (globalThis.fetch as any).mockResolvedValueOnce({
+ ok: false,
+ statusText: 'Not Found',
+ });
+
+ const result = await getOllamaModels();
+
+ expect(result).toEqual([]);
+ });
+ });
+
+ describe('hasModel', () => {
+ it('should return true when model exists', async () => {
+ (globalThis.fetch as any).mockResolvedValueOnce({
+ ok: true,
+ json: async () => ({
+ models: [
+ {
+ name: 'llama2:latest',
+ size: 4733363377,
+ digest: 'abc123',
+ modified_at: '2023-10-01',
+ },
+ { name: 'gpt-oss:20b', size: 13780173839, digest: 'def456', modified_at: '2023-10-02' },
+ ],
+ }),
+ });
+
+ const result = await hasModel('gpt-oss:20b');
+
+ expect(result).toBe(true);
+ });
+
+ it('should return false when model does not exist', async () => {
+ (globalThis.fetch as any).mockResolvedValueOnce({
+ ok: true,
+ json: async () => ({
+ models: [
+ {
+ name: 'llama2:latest',
+ size: 4733363377,
+ digest: 'abc123',
+ modified_at: '2023-10-01',
+ },
+ ],
+ }),
+ });
+
+ const result = await hasModel('gpt-oss:20b');
+
+ expect(result).toBe(false);
+ });
+ });
+
+ describe('pullOllamaModel', () => {
+ it('should successfully pull a model and report progress', async () => {
+ const progressUpdates: any[] = [];
+ const onProgress = vi.fn((progress) => progressUpdates.push(progress));
+
+ const mockResponse = {
+ ok: true,
+ body: {
+ getReader: () => ({
+ read: vi
+ .fn()
+ .mockResolvedValueOnce({
+ done: false,
+ value: new TextEncoder().encode(
+ JSON.stringify({ status: 'downloading', completed: 100, total: 1000 }) + '\n'
+ ),
+ })
+ .mockResolvedValueOnce({
+ done: false,
+ value: new TextEncoder().encode(
+ JSON.stringify({ status: 'downloading', completed: 500, total: 1000 }) + '\n'
+ ),
+ })
+ .mockResolvedValueOnce({
+ done: false,
+ value: new TextEncoder().encode(JSON.stringify({ status: 'success' }) + '\n'),
+ })
+ .mockResolvedValueOnce({ done: true }),
+ }),
+ },
+ };
+
+ (globalThis.fetch as any).mockResolvedValueOnce(mockResponse);
+
+ const result = await pullOllamaModel('gpt-oss:20b', onProgress);
+
+ expect(result).toBe(true);
+ expect(onProgress).toHaveBeenCalledTimes(3);
+ expect(progressUpdates).toContainEqual({
+ status: 'downloading',
+ completed: 100,
+ total: 1000,
+ });
+ expect(progressUpdates).toContainEqual({
+ status: 'downloading',
+ completed: 500,
+ total: 1000,
+ });
+ expect(progressUpdates).toContainEqual({ status: 'success' });
+ });
+
+ it('should return false on API error', async () => {
+ (globalThis.fetch as any).mockResolvedValueOnce({
+ ok: false,
+ statusText: 'Model not found',
+ });
+
+ const result = await pullOllamaModel('invalid-model');
+
+ expect(result).toBe(false);
+ });
+ });
+
+ describe('pollForOllama', () => {
+ it('should poll until Ollama is detected', async () => {
+ const onDetected = vi.fn();
+
+ // First call: Ollama not running
+ (globalThis.fetch as any).mockRejectedValueOnce(new Error('Connection refused'));
+
+ const stopPolling = pollForOllama(onDetected, 100);
+
+ // Verify initial call was made
+ expect(globalThis.fetch).toHaveBeenCalledTimes(1);
+
+ // Second call: Still not running
+ (globalThis.fetch as any).mockRejectedValueOnce(new Error('Connection refused'));
+ vi.advanceTimersByTime(100);
+
+ // Third call: Ollama is running
+ (globalThis.fetch as any).mockResolvedValueOnce({
+ ok: true,
+ json: async () => ({ models: [] }),
+ });
+ vi.advanceTimersByTime(100);
+
+ // Wait for async operations
+ await vi.runAllTimersAsync();
+
+ expect(onDetected).toHaveBeenCalledWith({
+ isRunning: true,
+ host: 'http://127.0.0.1:11434',
+ });
+
+ stopPolling();
+ });
+
+ it('should stop polling when stop function is called', () => {
+ const onDetected = vi.fn();
+
+ (globalThis.fetch as any).mockRejectedValue(new Error('Connection refused'));
+
+ const stopPolling = pollForOllama(onDetected, 100);
+
+ // Should make initial call
+ expect(globalThis.fetch).toHaveBeenCalledTimes(1);
+
+ // Stop polling
+ stopPolling();
+
+ // Advance time and verify no more calls are made
+ vi.advanceTimersByTime(500);
+
+ // Only the initial call should have been made
+ expect(globalThis.fetch).toHaveBeenCalledTimes(1);
+ expect(onDetected).not.toHaveBeenCalled();
+ });
+ });
+
+ describe('utility functions', () => {
+ it('should return correct download URL', () => {
+ expect(getOllamaDownloadUrl()).toBe('https://ollama.com/download');
+ });
+
+ it('should return correct preferred model', () => {
+ expect(getPreferredModel()).toBe('gpt-oss:20b');
+ });
+ });
+});
diff --git a/ui/desktop/src/utils/ollamaDetection.ts b/ui/desktop/src/utils/ollamaDetection.ts
new file mode 100644
index 000000000000..3bb21713bf33
--- /dev/null
+++ b/ui/desktop/src/utils/ollamaDetection.ts
@@ -0,0 +1,211 @@
+const DEFAULT_OLLAMA_HOST = 'http://127.0.0.1:11434';
+const OLLAMA_DOWNLOAD_URL = 'https://ollama.com/download';
+const PREFERRED_MODEL = 'gpt-oss:20b';
+
+export interface OllamaStatus {
+ isRunning: boolean;
+ host: string;
+ error?: string;
+}
+
+export interface OllamaModel {
+ name: string;
+ size: number;
+ digest: string;
+ modified_at: string;
+}
+
+export interface PullProgress {
+ status: string;
+ digest?: string;
+ total?: number;
+ completed?: number;
+}
+
+/**
+ * Check if Ollama is running on the default port
+ */
+export async function checkOllamaStatus(): Promise {
+ try {
+ // Create an AbortController for timeout
+ const controller = new AbortController();
+ const timeoutId = window.setTimeout(() => controller.abort(), 2000);
+
+ try {
+ // Ollama exposes a health endpoint at /api/tags
+ const response = await fetch(`${DEFAULT_OLLAMA_HOST}/api/tags`, {
+ method: 'GET',
+ signal: controller.signal,
+ });
+
+ window.clearTimeout(timeoutId);
+
+ return {
+ isRunning: response.ok,
+ host: DEFAULT_OLLAMA_HOST,
+ };
+ } catch (err) {
+ window.clearTimeout(timeoutId);
+ throw err;
+ }
+ } catch (error) {
+ return {
+ isRunning: false,
+ host: DEFAULT_OLLAMA_HOST,
+ error: error instanceof Error ? error.message : 'Unknown error',
+ };
+ }
+}
+
+/**
+ * Get the Ollama download URL
+ */
+export function getOllamaDownloadUrl(): string {
+ return OLLAMA_DOWNLOAD_URL;
+}
+
+/**
+ * Get the preferred model name
+ */
+export function getPreferredModel(): string {
+ return PREFERRED_MODEL;
+}
+
+/**
+ * Check which models are available in Ollama
+ */
+export async function getOllamaModels(): Promise {
+ try {
+ const controller = new AbortController();
+ const timeoutId = window.setTimeout(() => controller.abort(), 5000);
+
+ try {
+ const response = await fetch(`${DEFAULT_OLLAMA_HOST}/api/tags`, {
+ method: 'GET',
+ signal: controller.signal,
+ });
+
+ window.clearTimeout(timeoutId);
+
+ if (!response.ok) {
+ throw new Error(`Failed to get models: ${response.statusText}`);
+ }
+
+ const data = await response.json();
+ return data.models || [];
+ } catch (err) {
+ window.clearTimeout(timeoutId);
+ throw err;
+ }
+ } catch (error) {
+ console.error('Failed to get Ollama models:', error);
+ return [];
+ }
+}
+
+/**
+ * Check if a specific model is available
+ */
+export async function hasModel(modelName: string): Promise {
+ const models = await getOllamaModels();
+ return models.some((model) => model.name === modelName);
+}
+
+/**
+ * Pull a model from Ollama
+ */
+export async function pullOllamaModel(
+ modelName: string,
+ onProgress?: (progress: PullProgress) => void
+): Promise {
+ try {
+ const response = await fetch(`${DEFAULT_OLLAMA_HOST}/api/pull`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify({
+ name: modelName,
+ stream: true,
+ }),
+ });
+
+ if (!response.ok) {
+ throw new Error(`Failed to pull model: ${response.statusText}`);
+ }
+
+ const reader = response.body?.getReader();
+ if (!reader) {
+ throw new Error('No response body');
+ }
+
+ const decoder = new window.TextDecoder();
+ let done = false;
+
+ while (!done) {
+ const { value, done: readerDone } = await reader.read();
+ done = readerDone;
+
+ if (value) {
+ const text = decoder.decode(value);
+ const lines = text.split('\n').filter((line) => line.trim());
+
+ for (const line of lines) {
+ try {
+ const progress = JSON.parse(line) as PullProgress;
+ if (onProgress) {
+ onProgress(progress);
+ }
+ } catch (e) {
+ // Ignore parse errors
+ }
+ }
+ }
+ }
+
+ return true;
+ } catch (error) {
+ console.error('Failed to pull model:', error);
+ return false;
+ }
+}
+
+/**
+ * Poll for Ollama availability
+ * @param onDetected Callback when Ollama is detected
+ * @param intervalMs Polling interval in milliseconds
+ * @returns A function to stop polling
+ */
+export function pollForOllama(
+ onDetected: (status: OllamaStatus) => void,
+ intervalMs: number = 5000
+): () => void {
+ let intervalId: number | null = null;
+ let isPolling = true;
+
+ const poll = async () => {
+ if (!isPolling) return;
+
+ const status = await checkOllamaStatus();
+ if (status.isRunning) {
+ onDetected(status);
+ stopPolling();
+ }
+ };
+
+ const stopPolling = () => {
+ isPolling = false;
+ if (intervalId) {
+ window.clearInterval(intervalId);
+ intervalId = null;
+ }
+ };
+
+ // Start polling immediately
+ poll();
+
+ // Then poll at intervals
+ intervalId = window.setInterval(poll, intervalMs);
+
+ return stopPolling;
+}