diff --git a/extensions/llamacpp-extension/settings.json b/extensions/llamacpp-extension/settings.json index 3c0964fc62..ddbefa9366 100644 --- a/extensions/llamacpp-extension/settings.json +++ b/extensions/llamacpp-extension/settings.json @@ -16,7 +16,7 @@ "description": "Environmental variables for llama.cpp(KEY=VALUE), separated by ';'", "controllerType": "input", "controllerProps": { - "value": "none", + "value": "", "placeholder": "Eg. GGML_VK_VISIBLE_DEVICES=0,1", "type": "text", "textAlign": "right" diff --git a/extensions/llamacpp-extension/src/index.ts b/extensions/llamacpp-extension/src/index.ts index fe4f2f34cc..785966a325 100644 --- a/extensions/llamacpp-extension/src/index.ts +++ b/extensions/llamacpp-extension/src/index.ts @@ -1082,9 +1082,9 @@ export default class llamacpp_extension extends AIEngine { // If we reach here, download completed successfully (including validation) // The downloadFiles function only returns successfully if all files downloaded AND validated - events.emit(DownloadEvent.onFileDownloadAndVerificationSuccess, { - modelId, - downloadType: 'Model' + events.emit(DownloadEvent.onFileDownloadAndVerificationSuccess, { + modelId, + downloadType: 'Model', }) } catch (error) { logger.error('Error downloading model:', modelId, opts, error) @@ -1092,7 +1092,8 @@ export default class llamacpp_extension extends AIEngine { error instanceof Error ? error.message : String(error) // Check if this is a cancellation - const isCancellationError = errorMessage.includes('Download cancelled') || + const isCancellationError = + errorMessage.includes('Download cancelled') || errorMessage.includes('Validation cancelled') || errorMessage.includes('Hash computation cancelled') || errorMessage.includes('cancelled') || @@ -1372,7 +1373,7 @@ export default class llamacpp_extension extends AIEngine { envs['LLAMA_API_KEY'] = api_key // set user envs - this.parseEnvFromString(envs, this.llamacpp_env) + if (this.llamacpp_env) this.parseEnvFromString(envs, this.llamacpp_env) // model option is required // NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path @@ -1751,7 +1752,7 @@ export default class llamacpp_extension extends AIEngine { } // set envs const envs: Record = {} - this.parseEnvFromString(envs, this.llamacpp_env) + if (this.llamacpp_env) this.parseEnvFromString(envs, this.llamacpp_env) // Ensure backend is downloaded and ready before proceeding await this.ensureBackendReady(backend, version) @@ -1767,7 +1768,7 @@ export default class llamacpp_extension extends AIEngine { return dList } catch (error) { logger.error('Failed to query devices:\n', error) - throw new Error("Failed to load llamacpp backend") + throw new Error('Failed to load llamacpp backend') } } @@ -1876,7 +1877,7 @@ export default class llamacpp_extension extends AIEngine { logger.info( `Using explicit key_length: ${keyLen}, value_length: ${valLen}` ) - headDim = (keyLen + valLen) + headDim = keyLen + valLen } else { // Fall back to embedding_length estimation const embeddingLen = Number(meta[`${arch}.embedding_length`]) diff --git a/package.json b/package.json index 04f1bc1dc7..fce3532225 100644 --- a/package.json +++ b/package.json @@ -22,7 +22,7 @@ "download:lib": "node ./scripts/download-lib.mjs", "download:bin": "node ./scripts/download-bin.mjs", "build:tauri:win32": "yarn download:bin && yarn tauri build", - "build:tauri:linux": "yarn download:bin && ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build && ./src-tauri/build-utils/buildAppImage.sh", + "build:tauri:linux": "yarn download:bin && NO_STRIP=1 ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build --verbose && ./src-tauri/build-utils/buildAppImage.sh", "build:tauri:darwin": "yarn tauri build --target universal-apple-darwin", "build:tauri": "yarn build:icon && yarn copy:assets:tauri && run-script-os", "build:tauri:plugin:api": "cd src-tauri/plugins && yarn install && yarn workspaces foreach -Apt run build", diff --git a/src-tauri/capabilities/system-monitor-window.json b/src-tauri/capabilities/system-monitor-window.json index 572cc08400..740bb82cc8 100644 --- a/src-tauri/capabilities/system-monitor-window.json +++ b/src-tauri/capabilities/system-monitor-window.json @@ -9,6 +9,11 @@ "core:window:allow-set-theme", "log:default", "core:webview:allow-create-webview-window", - "core:window:allow-set-focus" + "core:window:allow-set-focus", + "hardware:allow-get-system-info", + "hardware:allow-get-system-usage", + "llamacpp:allow-get-devices", + "llamacpp:allow-read-gguf-metadata", + "deep-link:allow-get-current" ] } diff --git a/web-app/src/components/ui/hover-card.tsx b/web-app/src/components/ui/hover-card.tsx index 00236b08a5..a06451eade 100644 --- a/web-app/src/components/ui/hover-card.tsx +++ b/web-app/src/components/ui/hover-card.tsx @@ -6,7 +6,14 @@ import { cn } from '@/lib/utils' function HoverCard({ ...props }: React.ComponentProps) { - return + return ( + + ) } function HoverCardTrigger({ diff --git a/web-app/src/containers/ChatInput.tsx b/web-app/src/containers/ChatInput.tsx index 0fa7a4b32e..f7be420c7d 100644 --- a/web-app/src/containers/ChatInput.tsx +++ b/web-app/src/containers/ChatInput.tsx @@ -107,9 +107,15 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { if (selectedProvider === 'llamacpp') { const hasLocalMmproj = await checkMmprojExists(selectedModel.id) setHasMmproj(hasLocalMmproj) - } else { - // For non-llamacpp providers, only check vision capability + } + // For non-llamacpp providers, only check vision capability + else if ( + selectedProvider !== 'llamacpp' && + selectedModel?.capabilities?.includes('vision') + ) { setHasMmproj(true) + } else { + setHasMmproj(false) } } catch (error) { console.error('Error checking mmproj:', error) @@ -119,7 +125,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { } checkMmprojSupport() - }, [selectedModel?.id, selectedProvider]) + }, [selectedModel?.capabilities, selectedModel?.id, selectedProvider]) // Check if there are active MCP servers const hasActiveMCPServers = connectedServers.length > 0 || tools.length > 0 @@ -535,29 +541,41 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { )} {/* File attachment - show only for models with mmproj */} {hasMmproj && ( -
- - -
+ + + +
+ + +
+
+ +

{t('vision')}

+
+
+
)} {/* Microphone - always available - Temp Hide */} - {/*
+ {/*
*/} {selectedModel?.capabilities?.includes('embeddings') && ( -
+
{ return (
@@ -632,7 +650,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => { -
+
{ -
+
- {currentModel?.settings && provider && ( - - )} + {currentModel?.settings && + provider && + provider.provider === 'llamacpp' && ( + + )} onCheckModelSupport: (variant: ModelQuant) => void @@ -19,12 +19,12 @@ interface ModelInfoHoverCardProps { export const ModelInfoHoverCard = ({ model, variant, + isDefaultVariant, defaultModelQuantizations, modelSupportStatus, onCheckModelSupport, children, }: ModelInfoHoverCardProps) => { - const isVariantMode = !!variant const displayVariant = variant || model.quants.find((m) => @@ -95,8 +95,8 @@ export const ModelInfoHoverCard = ({ {children || (
)} @@ -106,10 +106,10 @@ export const ModelInfoHoverCard = ({ {/* Header */}

- {isVariantMode ? variant.model_id : model.model_name} + {!isDefaultVariant ? variant?.model_id : model?.model_name}

- {isVariantMode + {!isDefaultVariant ? 'Model Variant Information' : 'Model Information'}

@@ -118,57 +118,21 @@ export const ModelInfoHoverCard = ({ {/* Main Info Grid */}
- {isVariantMode ? ( - <> -
- - File Size - - - {variant.file_size} - -
-
- - Quantization - - - {variant.model_id.split('-').pop()?.toUpperCase() || - 'N/A'} - -
- - ) : ( - <> -
- - Downloads - - - {model.downloads?.toLocaleString() || '0'} - -
-
- Variants - - {model.quants?.length || 0} - -
- - )} -
- -
- {!isVariantMode && ( + <>
- Default Size + {isDefaultVariant + ? 'Maybe Default Quantization' + : 'Quantization'} - {displayVariant?.file_size || 'N/A'} + {variant?.model_id.split('-').pop()?.toUpperCase() || 'N/A'}
- )} + +
+ +
Compatibility @@ -204,21 +168,6 @@ export const ModelInfoHoverCard = ({
)} - - {/* Content Section */} -
-
- {isVariantMode ? 'Download URL' : 'Description'} -
-
- {isVariantMode ? ( -
{variant.path}
- ) : ( - extractDescription(model?.description) || - 'No description available' - )} -
-
diff --git a/web-app/src/containers/ModelSupportStatus.tsx b/web-app/src/containers/ModelSupportStatus.tsx index 3667f4461d..43827f58e9 100644 --- a/web-app/src/containers/ModelSupportStatus.tsx +++ b/web-app/src/containers/ModelSupportStatus.tsx @@ -7,7 +7,8 @@ import { TooltipTrigger, } from '@/components/ui/tooltip' import { isModelSupported } from '@/services/models' -import { getJanDataFolderPath, joinPath } from '@janhq/core' +import { getJanDataFolderPath, joinPath, fs } from '@janhq/core' +import { invoke } from '@tauri-apps/api/core' interface ModelSupportStatusProps { modelId: string | undefined @@ -31,12 +32,12 @@ export const ModelSupportStatus = ({ async ( id: string, ctxSize: number - ): Promise<'RED' | 'YELLOW' | 'GREEN'> => { + ): Promise<'RED' | 'YELLOW' | 'GREEN' | null> => { try { - // Get Jan's data folder path and construct the full model file path - // Following the llamacpp extension structure: /llamacpp/models//model.gguf const janDataFolder = await getJanDataFolderPath() - const modelFilePath = await joinPath([ + + // First try the standard downloaded model path + const ggufModelPath = await joinPath([ janDataFolder, 'llamacpp', 'models', @@ -44,14 +45,47 @@ export const ModelSupportStatus = ({ 'model.gguf', ]) - return await isModelSupported(modelFilePath, ctxSize) + // Check if the standard model.gguf file exists + if (await fs.existsSync(ggufModelPath)) { + return await isModelSupported(ggufModelPath, ctxSize) + } + + // If model.gguf doesn't exist, try reading from model.yml (for imported models) + const modelConfigPath = await joinPath([ + janDataFolder, + 'llamacpp', + 'models', + id, + 'model.yml', + ]) + + if (!(await fs.existsSync(modelConfigPath))) { + console.error( + `Neither model.gguf nor model.yml found for model: ${id}` + ) + return null + } + + // Read the model configuration to get the actual model path + const modelConfig = await invoke<{ model_path: string }>('read_yaml', { + path: `llamacpp/models/${id}/model.yml`, + }) + + // Handle both absolute and relative paths + const actualModelPath = + modelConfig.model_path.startsWith('/') || + modelConfig.model_path.match(/^[A-Za-z]:/) + ? modelConfig.model_path // absolute path, use as-is + : await joinPath([janDataFolder, modelConfig.model_path]) // relative path, join with data folder + + return await isModelSupported(actualModelPath, ctxSize) } catch (error) { console.error( - 'Error checking model support with constructed path:', + 'Error checking model support with path resolution:', error ) // If path construction or model support check fails, assume not supported - return 'RED' + return null } }, [] diff --git a/web-app/src/containers/dialogs/EditModel.tsx b/web-app/src/containers/dialogs/EditModel.tsx index 5fd9b3f853..68a268640e 100644 --- a/web-app/src/containers/dialogs/EditModel.tsx +++ b/web-app/src/containers/dialogs/EditModel.tsx @@ -7,11 +7,7 @@ import { DialogTrigger, } from '@/components/ui/dialog' import { Switch } from '@/components/ui/switch' -import { - Tooltip, - TooltipContent, - TooltipTrigger, -} from '@/components/ui/tooltip' + import { useModelProvider } from '@/hooks/useModelProvider' import { IconPencil, @@ -19,7 +15,7 @@ import { IconTool, // IconWorld, // IconAtom, - IconCodeCircle2, + // IconCodeCircle2, } from '@tabler/icons-react' import { useState, useEffect } from 'react' import { useTranslation } from '@/i18n/react-i18next-compat' @@ -177,24 +173,16 @@ export const DialogEditModel = ({ {t('providers:editModel.vision')}
- - - - handleCapabilityChange('vision', checked) - } - /> - - - {t('providers:editModel.notAvailable')} - - + + handleCapabilityChange('vision', checked) + } + />
-
+ {/*
@@ -216,7 +204,7 @@ export const DialogEditModel = ({ {t('providers:editModel.notAvailable')} -
+
*/} {/*
diff --git a/web-app/src/hooks/useModelProvider.ts b/web-app/src/hooks/useModelProvider.ts index 9be26ce41c..f0ee6a2fc8 100644 --- a/web-app/src/hooks/useModelProvider.ts +++ b/web-app/src/hooks/useModelProvider.ts @@ -241,7 +241,7 @@ export const useModelProvider = create()( } // Migrate model settings - if (provider.models) { + if (provider.models && provider.provider === 'llamacpp') { provider.models.forEach((model) => { if (!model.settings) model.settings = {} diff --git a/web-app/src/locales/en/settings.json b/web-app/src/locales/en/settings.json index 17e59d3bb8..cf3d8ec170 100644 --- a/web-app/src/locales/en/settings.json +++ b/web-app/src/locales/en/settings.json @@ -37,7 +37,7 @@ "reportAnIssueDesc": "Found a bug? Help us out by filing an issue on GitHub.", "reportIssue": "Report Issue", "credits": "Credits", - "creditsDesc1": "Jan is built with ❤️ by the Menlo Team.", + "creditsDesc1": "👋 Jan is built with ❤️ by the Menlo Research team.", "creditsDesc2": "Special thanks to our open-source dependencies—especially llama.cpp and Tauri—and to our amazing AI community.", "appVersion": "App Version", "dataFolder": { @@ -234,7 +234,7 @@ "reportAnIssueDesc": "Found a bug? Help us out by filing an issue on GitHub.", "reportIssue": "Report Issue", "credits": "Credits", - "creditsDesc1": "Jan is built with ❤️ by the Menlo Team.", + "creditsDesc1": "👋 Jan is built with ❤️ by the Menlo Research team.", "creditsDesc2": "Special thanks to our open-source dependencies—especially llama.cpp and Tauri—and to our amazing AI community." }, "extensions": { diff --git a/web-app/src/routes/hub/index.tsx b/web-app/src/routes/hub/index.tsx index 07dd0f85b9..081009fcd9 100644 --- a/web-app/src/routes/hub/index.tsx +++ b/web-app/src/routes/hub/index.tsx @@ -353,12 +353,7 @@ function Hub() { // Immediately set local downloading state addLocalDownloadingModel(modelId) const mmprojPath = model.mmproj_models?.[0]?.path - pullModelWithMetadata( - modelId, - modelUrl, - mmprojPath, - huggingfaceToken - ) + pullModelWithMetadata(modelId, modelUrl, mmprojPath, huggingfaceToken) } return ( @@ -399,13 +394,13 @@ function Hub() { ) } }, [ + localDownloadingModels, downloadProcesses, llamaProvider?.models, isRecommendedModel, - downloadButtonRef, - localDownloadingModels, - addLocalDownloadingModel, t, + addLocalDownloadingModel, + huggingfaceToken, handleUseModel, ]) @@ -482,9 +477,9 @@ function Hub() { const isLastStep = currentStepIndex === steps.length - 1 const renderFilter = () => { - if (searchValue.length === 0) - return ( - <> + return ( + <> + {searchValue.length === 0 && ( @@ -509,17 +504,18 @@ function Hub() { ))} -
- - - {t('hub:downloaded')} - -
- - ) + )} +
+ + + {t('hub:downloaded')} + +
+ + ) } return ( @@ -661,6 +657,18 @@ function Hub() { defaultModelQuantizations={ defaultModelQuantizations } + variant={ + filteredModels[ + virtualItem.index + ].quants.find((m) => + defaultModelQuantizations.some((e) => + m.model_id.toLowerCase().includes(e) + ) + ) ?? + filteredModels[virtualItem.index] + .quants?.[0] + } + isDefaultVariant={true} modelSupportStatus={modelSupportStatus} onCheckModelSupport={checkModelSupport} /> diff --git a/web-app/src/routes/settings/providers/$providerName.tsx b/web-app/src/routes/settings/providers/$providerName.tsx index 9d456cc402..6f9e7efc01 100644 --- a/web-app/src/routes/settings/providers/$providerName.tsx +++ b/web-app/src/routes/settings/providers/$providerName.tsx @@ -584,10 +584,12 @@ function ProviderDetail() { } actions={
- + {provider && provider.provider !== 'llamacpp' && ( + + )} {model.settings && (