Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 37 additions & 0 deletions crates/goose/src/providers/ollama.rs
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,43 @@ impl Provider for OllamaProvider {
}
}))
}

/// Fetch the list of available models from Ollama
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

rm

async fn fetch_supported_models(&self) -> Result<Option<Vec<String>>, ProviderError> {
let response = self
.api_client
.response_get("api/tags")
.await
.map_err(|e| ProviderError::RequestFailed(format!("Failed to fetch models: {}", e)))?;

if !response.status().is_success() {
return Err(ProviderError::RequestFailed(format!(
"Failed to fetch models: HTTP {}",
response.status()
)));
}

let json_response = response.json::<Value>().await.map_err(|e| {
ProviderError::RequestFailed(format!("Failed to parse response: {}", e))
})?;

let models = json_response
.get("models")
.and_then(|m| m.as_array())
.ok_or_else(|| {
ProviderError::RequestFailed("No models array in response".to_string())
})?;

let mut model_names: Vec<String> = models
.iter()
.filter_map(|model| model.get("name").and_then(|n| n.as_str()).map(String::from))
.collect();

// Sort alphabetically
model_names.sort();

Ok(Some(model_names))
}
}
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this feels more verbos than it needs to be - do we need to sort the models?


impl OllamaProvider {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ interface LeadWorkerSettingsProps {
}

export function LeadWorkerSettings({ isOpen, onClose }: LeadWorkerSettingsProps) {
const { read, upsert, getProviders, remove } = useConfig();
const { read, upsert, getProviders, getProviderModels, remove } = useConfig();
const { currentModel } = useModelAndProvider();
const [leadModel, setLeadModel] = useState<string>('');
const [workerModel, setWorkerModel] = useState<string>('');
Expand Down Expand Up @@ -103,13 +103,39 @@ export function LeadWorkerSettings({ isOpen, onClose }: LeadWorkerSettingsProps)
const providers = await getProviders(false);
const activeProviders = providers.filter((p) => p.is_configured);

activeProviders.forEach(({ metadata, name }) => {
if (metadata.known_models) {
metadata.known_models.forEach((model) => {
const modelPromises = activeProviders.map(async (p) => {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is near duplicated code or maybe the LLM did it, but suggest to unify it

try {
const models = await getProviderModels(p.name);
return { provider: p, models, error: null };
} catch (error) {
return { provider: p, models: null, error };
}
});

const results = await Promise.all(modelPromises);

// Process results and build options
results.forEach(({ provider: p, models, error }) => {
if (error) {
console.error(`Error fetching models for provider ${p.name}:`, error);
}

// Use dynamically fetched models if available
if (models && models.length > 0) {
models.forEach((modelName) => {
options.push({
value: modelName,
label: `${modelName} (${p.metadata.display_name})`,
provider: p.name,
});
});
} else if (p.metadata.known_models && p.metadata.known_models.length > 0) {
// Fallback to known_models if no models were fetched or on error
p.metadata.known_models.forEach((model) => {
options.push({
value: model.name,
label: `${model.name} (${metadata.display_name})`,
provider: name,
label: `${model.name} (${p.metadata.display_name})`,
provider: p.name,
});
});
}
Expand All @@ -128,7 +154,7 @@ export function LeadWorkerSettings({ isOpen, onClose }: LeadWorkerSettingsProps)
};

loadConfig();
}, [read, getProviders, currentModel, isOpen]);
}, [read, getProviders, getProviderModels, currentModel, isOpen]);

// If current models are not in the list (e.g., previously set to custom), switch to custom mode
useEffect(() => {
Expand Down
Loading