Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions src/api/providers/fetchers/modelCache.ts
Original file line number Diff line number Diff line change
Expand Up @@ -273,13 +273,20 @@ export async function initializeModelCacheRefresh(): Promise<void> {
*
* @param router - The router to flush models for.
* @param refresh - If true, immediately fetch fresh data from API
* @param options - Optional provider options (e.g., apiKey, baseUrl) needed for certain providers
*/
export const flushModels = async (router: RouterName, refresh: boolean = false): Promise<void> => {
export const flushModels = async (
router: RouterName,
refresh: boolean = false,
options?: GetModelsOptions,
): Promise<void> => {
if (refresh) {
// Don't delete memory cache - let refreshModels atomically replace it
// This prevents a race condition where getModels() might be called
// before refresh completes, avoiding a gap in cache availability
refreshModels({ provider: router } as GetModelsOptions).catch((error) => {
// Use provided options if available, otherwise fallback to minimal options
const refreshOptions = options || ({ provider: router } as GetModelsOptions)
refreshModels(refreshOptions).catch((error) => {
console.error(`[flushModels] Refresh failed for ${router}:`, error)
})
} else {
Expand Down
7 changes: 6 additions & 1 deletion src/core/webview/webviewMessageHandler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -876,7 +876,12 @@ export const webviewMessageHandler = async (
// If explicit credentials are provided in message.values (from Refresh Models button),
// flush the cache first to ensure we fetch fresh data with the new credentials
if (message?.values?.litellmApiKey || message?.values?.litellmBaseUrl) {
await flushModels("litellm", true)
const litellmFlushOptions: GetModelsOptions = {
provider: "litellm",
apiKey: litellmApiKey,
baseUrl: litellmBaseUrl,
}
await flushModels("litellm", true, litellmFlushOptions)
}

candidates.push({
Expand Down
Loading