diff --git a/packages/types/npm/package.metadata.json b/packages/types/npm/package.metadata.json index 5dbd6299635f..962a789430bb 100644 --- a/packages/types/npm/package.metadata.json +++ b/packages/types/npm/package.metadata.json @@ -1,6 +1,6 @@ { "name": "@roo-code/types", - "version": "1.79.0", + "version": "1.81.0", "description": "TypeScript type definitions for Roo Code.", "publishConfig": { "access": "public", diff --git a/packages/types/src/cloud.ts b/packages/types/src/cloud.ts index 903dfcb93fdf..f6acb28c363d 100644 --- a/packages/types/src/cloud.ts +++ b/packages/types/src/cloud.ts @@ -721,3 +721,25 @@ export type LeaveResponse = { taskId?: string timestamp?: string } + +/** + * UsageStats + */ + +export const usageStatsSchema = z.object({ + success: z.boolean(), + data: z.object({ + dates: z.array(z.string()), // Array of date strings + tasks: z.array(z.number()), // Array of task counts + tokens: z.array(z.number()), // Array of token counts + costs: z.array(z.number()), // Array of costs in USD + totals: z.object({ + tasks: z.number(), + tokens: z.number(), + cost: z.number(), // Total cost in USD + }), + }), + period: z.number(), // Period in days (e.g., 30) +}) + +export type UsageStats = z.infer diff --git a/packages/types/src/model.ts b/packages/types/src/model.ts index 449ee1a7f65a..2e7ccc9c5c9e 100644 --- a/packages/types/src/model.ts +++ b/packages/types/src/model.ts @@ -76,6 +76,8 @@ export const modelInfoSchema = z.object({ minTokensPerCachePoint: z.number().optional(), maxCachePoints: z.number().optional(), cachableFields: z.array(z.string()).optional(), + // Flag to indicate if the model is deprecated and should not be used + deprecated: z.boolean().optional(), /** * Service tiers with pricing information. * Each tier can have a name (for OpenAI service tiers) and pricing overrides. diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index ea14b1decb77..37c2a1e614b1 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -645,7 +645,7 @@ export interface ZgsmUserInfo { */ export const MODELS_BY_PROVIDER: Record< - Exclude, + Exclude, { id: ProviderName; label: string; models: string[] } > = { zgsm: { @@ -736,7 +736,7 @@ export const MODELS_BY_PROVIDER: Record< xai: { id: "xai", label: "xAI (Grok)", models: Object.keys(xaiModels) }, zai: { id: "zai", label: "Zai", models: Object.keys(internationalZAiModels) }, - // Dynamic providers; models pulled from the respective APIs. + // Dynamic providers; models pulled from remote APIs. glama: { id: "glama", label: "Glama", models: [] }, huggingface: { id: "huggingface", label: "Hugging Face", models: [] }, litellm: { id: "litellm", label: "LiteLLM", models: [] }, @@ -745,4 +745,8 @@ export const MODELS_BY_PROVIDER: Record< unbound: { id: "unbound", label: "Unbound", models: [] }, deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] }, "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }, + + // Local providers; models discovered from localhost endpoints. + lmstudio: { id: "lmstudio", label: "LM Studio", models: [] }, + ollama: { id: "ollama", label: "Ollama", models: [] }, } diff --git a/packages/types/src/providers/chutes.ts b/packages/types/src/providers/chutes.ts index d05bd489b1be..53168187e5bc 100644 --- a/packages/types/src/providers/chutes.ts +++ b/packages/types/src/providers/chutes.ts @@ -6,6 +6,9 @@ export type ChutesModelId = | "deepseek-ai/DeepSeek-R1" | "deepseek-ai/DeepSeek-V3" | "deepseek-ai/DeepSeek-V3.1" + | "deepseek-ai/DeepSeek-V3.1-Terminus" + | "deepseek-ai/DeepSeek-V3.1-turbo" + | "deepseek-ai/DeepSeek-V3.2-Exp" | "unsloth/Llama-3.3-70B-Instruct" | "chutesai/Llama-4-Scout-17B-16E-Instruct" | "unsloth/Mistral-Nemo-Instruct-2407" @@ -30,11 +33,13 @@ export type ChutesModelId = | "zai-org/GLM-4.5-Air" | "zai-org/GLM-4.5-FP8" | "zai-org/GLM-4.5-turbo" + | "zai-org/GLM-4.6-FP8" | "moonshotai/Kimi-K2-Instruct-75k" | "moonshotai/Kimi-K2-Instruct-0905" | "Qwen/Qwen3-235B-A22B-Thinking-2507" | "Qwen/Qwen3-Next-80B-A3B-Instruct" | "Qwen/Qwen3-Next-80B-A3B-Thinking" + | "Qwen/Qwen3-VL-235B-A22B-Thinking" export const chutesDefaultModelId: ChutesModelId = "deepseek-ai/DeepSeek-R1-0528" @@ -75,6 +80,36 @@ export const chutesModels = { outputPrice: 0, description: "DeepSeek V3.1 model.", }, + "deepseek-ai/DeepSeek-V3.1-Terminus": { + maxTokens: 163840, + contextWindow: 163840, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.23, + outputPrice: 0.9, + description: + "DeepSeek‑V3.1‑Terminus is an update to V3.1 that improves language consistency by reducing CN/EN mix‑ups and eliminating random characters, while strengthening agent capabilities with notably better Code Agent and Search Agent performance.", + }, + "deepseek-ai/DeepSeek-V3.1-turbo": { + maxTokens: 32768, + contextWindow: 163840, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 1.0, + outputPrice: 3.0, + description: + "DeepSeek-V3.1-turbo is an FP8, speculative-decoding turbo variant optimized for ultra-fast single-shot queries (~200 TPS), with outputs close to the originals and solid function calling/reasoning/structured output, priced at $1/M input and $3/M output tokens, using 2× quota per request and not intended for bulk workloads.", + }, + "deepseek-ai/DeepSeek-V3.2-Exp": { + maxTokens: 163840, + contextWindow: 163840, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.25, + outputPrice: 0.35, + description: + "DeepSeek-V3.2-Exp is an experimental LLM that introduces DeepSeek Sparse Attention to improve long‑context training and inference efficiency while maintaining performance comparable to V3.1‑Terminus.", + }, "unsloth/Llama-3.3-70B-Instruct": { maxTokens: 32768, // From Groq contextWindow: 131072, // From Groq @@ -284,6 +319,16 @@ export const chutesModels = { outputPrice: 3, description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference.", }, + "zai-org/GLM-4.6-FP8": { + maxTokens: 32768, + contextWindow: 202752, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0, + outputPrice: 0, + description: + "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios.", + }, "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8": { maxTokens: 32768, contextWindow: 262144, @@ -340,4 +385,14 @@ export const chutesModels = { description: "Reasoning-first model with structured thinking traces for multi-step problems, math proofs, and code synthesis.", }, + "Qwen/Qwen3-VL-235B-A22B-Thinking": { + maxTokens: 262144, + contextWindow: 262144, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.16, + outputPrice: 0.65, + description: + "Qwen3‑VL‑235B‑A22B‑Thinking is an open‑weight MoE vision‑language model (235B total, ~22B activated) optimized for deliberate multi‑step reasoning with strong text‑image‑video understanding and long‑context capabilities.", + }, } as const satisfies Record diff --git a/packages/types/src/providers/roo.ts b/packages/types/src/providers/roo.ts index 63650ac9a1a4..fd705b1eb978 100644 --- a/packages/types/src/providers/roo.ts +++ b/packages/types/src/providers/roo.ts @@ -38,6 +38,7 @@ export const rooModels = { outputPrice: 0, description: "Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. (Note: prompts and completions are logged by xAI and used to improve the model.)", + deprecated: true, }, "deepseek/deepseek-chat-v3.1": { maxTokens: 16_384, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b1bfee2fc663..d354431fac89 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -105,7 +105,7 @@ importers: version: 2.5.2 glob: specifier: ^11.0.1 - version: 11.0.2 + version: 11.0.3 mocha: specifier: ^11.1.0 version: 11.2.2 @@ -950,7 +950,7 @@ importers: version: 9.5.3 glob: specifier: ^11.0.1 - version: 11.0.2 + version: 11.0.3 mkdirp: specifier: ^3.0.1 version: 3.0.1 @@ -1031,7 +1031,7 @@ importers: version: link:../packages/types '@tailwindcss/vite': specifier: ^4.0.0 - version: 4.1.6(vite@6.3.5(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0)) + version: 4.1.6(vite@6.3.6(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0)) '@tanstack/react-query': specifier: ^5.68.0 version: 5.76.1(react@18.3.1) @@ -1233,7 +1233,7 @@ importers: version: 1.57.5 '@vitejs/plugin-react': specifier: ^4.3.4 - version: 4.4.1(vite@6.3.5(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0)) + version: 4.4.1(vite@6.3.6(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0)) '@vitest/ui': specifier: ^3.2.3 version: 3.2.4(vitest@3.2.4) @@ -1247,8 +1247,8 @@ importers: specifier: 5.8.3 version: 5.8.3 vite: - specifier: 6.3.5 - version: 6.3.5(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) + specifier: 6.3.6 + version: 6.3.6(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) vitest: specifier: ^3.2.3 version: 3.2.4(@types/debug@4.1.12)(@types/node@20.17.57)(@vitest/ui@3.2.4)(jiti@2.4.2)(jsdom@26.1.0)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) @@ -4137,6 +4137,9 @@ packages: '@types/node@20.17.57': resolution: {integrity: sha512-f3T4y6VU4fVQDKVqJV4Uppy8c1p/sVvS3peyqxyWnzkqXFJLRU7Y1Bl7rMS1Qe9z0v4M6McY0Fp9yBsgHJUsWQ==} + '@types/node@20.19.19': + resolution: {integrity: sha512-pb1Uqj5WJP7wrcbLU7Ru4QtA0+3kAXrkutGiD26wUKzSMgNNaPARTUDQmElUXp64kh3cWdou3Q0C7qwwxqSFmg==} + '@types/node@24.2.1': resolution: {integrity: sha512-DRh5K+ka5eJic8CjH7td8QpYEV6Zo10gfRkjHCO3weqZHWDtAaSTFtl4+VMqOJ4N5jcuhZ9/l+yy8rVgw7BQeQ==} @@ -4452,6 +4455,10 @@ packages: resolution: {integrity: sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==} engines: {node: '>=12'} + ansi-regex@6.2.2: + resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==} + engines: {node: '>=12'} + ansi-styles@3.2.1: resolution: {integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==} engines: {node: '>=4'} @@ -4468,6 +4475,10 @@ packages: resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==} engines: {node: '>=12'} + ansi-styles@6.2.3: + resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==} + engines: {node: '>=12'} + any-promise@1.3.0: resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} @@ -5350,6 +5361,15 @@ packages: supports-color: optional: true + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + decamelize@1.2.0: resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} engines: {node: '>=0.10.0'} @@ -6356,11 +6376,6 @@ packages: resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} hasBin: true - glob@11.0.2: - resolution: {integrity: sha512-YT7U7Vye+t5fZ/QMkBFrTJ7ZQxInIUjwyAjVj84CYXqgBdv30MFUPGnBR6sQaVq6Is15wYJUsnzTuWaGRBhBAQ==} - engines: {node: 20 || >=22} - hasBin: true - glob@11.0.3: resolution: {integrity: sha512-2Nim7dha1KVkaiF4q6Dj+ngPPMdfvLJEOpZk/jKiUAkqKebpGAWQXAq9z1xu9HKu5lWfqw/FASuccEjyznjPaA==} engines: {node: 20 || >=22} @@ -6947,10 +6962,6 @@ packages: jackspeak@3.4.3: resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} - jackspeak@4.1.0: - resolution: {integrity: sha512-9DDdhb5j6cpeitCbvLO7n7J4IxnbM6hoF6O1g4HQ5TfhvvKN8ywDM7668ZhMHRqVmxqhps/F6syWK2KcPxYlkw==} - engines: {node: 20 || >=22} - jackspeak@4.1.1: resolution: {integrity: sha512-zptv57P3GpL+O0I7VdMJNBZCu+BPHVQUk55Ft8/QCJjTVxrnJHuVuX/0Bl2A6/+2oyR/ZMEuFKwmzqqZ/U5nPQ==} engines: {node: 20 || >=22} @@ -7448,6 +7459,10 @@ packages: resolution: {integrity: sha512-QIXZUBJUx+2zHUdQujWejBkcD9+cs94tLn0+YL8UrCh+D5sCXZ4c7LaEH48pNwRY3MLDgqUFyhlCyjJPf1WP0A==} engines: {node: 20 || >=22} + lru-cache@11.2.2: + resolution: {integrity: sha512-F9ODfyqML2coTIsQpSkRHnLSZMtkU8Q+mSfcaIyKwy58u+8k5nvAYeiNhsyMARvzNcXJ9QfWVrcPsC9e9rAxtg==} + engines: {node: 20 || >=22} + lru-cache@5.1.1: resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} @@ -9276,6 +9291,10 @@ packages: resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==} engines: {node: '>=12'} + strip-ansi@7.1.2: + resolution: {integrity: sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==} + engines: {node: '>=12'} + strip-bom-string@1.0.0: resolution: {integrity: sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==} engines: {node: '>=0.10.0'} @@ -9732,6 +9751,9 @@ packages: undici-types@6.19.8: resolution: {integrity: sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==} + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + undici-types@7.10.0: resolution: {integrity: sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag==} @@ -9980,6 +10002,46 @@ packages: yaml: optional: true + vite@6.3.6: + resolution: {integrity: sha512-0msEVHJEScQbhkbVTb/4iHZdJ6SXp/AvxL2sjwYQFfBqleHtnCqv1J3sa9zbWz/6kW1m9Tfzn92vW+kZ1WV6QA==} + engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} + hasBin: true + peerDependencies: + '@types/node': ^18.0.0 || ^20.0.0 || >=22.0.0 + jiti: '>=1.21.0' + less: '*' + lightningcss: ^1.21.0 + sass: '*' + sass-embedded: '*' + stylus: '*' + sugarss: '*' + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + vitest@3.2.4: resolution: {integrity: sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==} engines: {node: ^18.0.0 || ^20.0.0 || >=22.0.0} @@ -11667,7 +11729,7 @@ snapshots: dependencies: string-width: 5.1.2 string-width-cjs: string-width@4.2.3 - strip-ansi: 7.1.0 + strip-ansi: 7.1.2 strip-ansi-cjs: strip-ansi@6.0.1 wrap-ansi: 8.1.0 wrap-ansi-cjs: wrap-ansi@7.0.0 @@ -13526,12 +13588,12 @@ snapshots: postcss-selector-parser: 6.0.10 tailwindcss: 3.4.17 - '@tailwindcss/vite@4.1.6(vite@6.3.5(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0))': + '@tailwindcss/vite@4.1.6(vite@6.3.6(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0))': dependencies: '@tailwindcss/node': 4.1.6 '@tailwindcss/oxide': 4.1.6 tailwindcss: 4.1.6 - vite: 6.3.5(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) + vite: 6.3.6(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) '@tanstack/query-core@5.76.0': {} @@ -13846,6 +13908,11 @@ snapshots: dependencies: undici-types: 6.19.8 + '@types/node@20.19.19': + dependencies: + undici-types: 6.21.0 + optional: true + '@types/node@24.2.1': dependencies: undici-types: 7.10.0 @@ -13919,7 +13986,7 @@ snapshots: '@types/ws@8.18.1': dependencies: - '@types/node': 24.2.1 + '@types/node': 20.19.19 optional: true '@types/yargs-parser@21.0.3': {} @@ -14020,14 +14087,14 @@ snapshots: '@ungap/structured-clone@1.3.0': {} - '@vitejs/plugin-react@4.4.1(vite@6.3.5(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0))': + '@vitejs/plugin-react@4.4.1(vite@6.3.6(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0))': dependencies: '@babel/core': 7.27.1 '@babel/plugin-transform-react-jsx-self': 7.27.1(@babel/core@7.27.1) '@babel/plugin-transform-react-jsx-source': 7.27.1(@babel/core@7.27.1) '@types/babel__core': 7.20.5 react-refresh: 0.17.0 - vite: 6.3.5(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) + vite: 6.3.6(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) transitivePeerDependencies: - supports-color @@ -14092,7 +14159,7 @@ snapshots: sirv: 3.0.1 tinyglobby: 0.2.14 tinyrainbow: 2.0.0 - vitest: 3.2.4(@types/debug@4.1.12)(@types/node@20.17.50)(@vitest/ui@3.2.4)(jiti@2.4.2)(jsdom@26.1.0)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) + vitest: 3.2.4(@types/debug@4.1.12)(@types/node@24.2.1)(@vitest/ui@3.2.4)(jiti@2.4.2)(jsdom@26.1.0)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) '@vitest/utils@3.2.4': dependencies: @@ -14251,6 +14318,8 @@ snapshots: ansi-regex@6.1.0: {} + ansi-regex@6.2.2: {} + ansi-styles@3.2.1: dependencies: color-convert: 1.9.3 @@ -14263,6 +14332,8 @@ snapshots: ansi-styles@6.2.1: {} + ansi-styles@6.2.3: {} + any-promise@1.3.0: {} anymatch@3.1.3: @@ -15242,6 +15313,11 @@ snapshots: optionalDependencies: supports-color: 8.1.1 + debug@4.4.3: + dependencies: + ms: 2.1.3 + optional: true + decamelize@1.2.0: {} decamelize@4.0.0: {} @@ -16285,7 +16361,7 @@ snapshots: gel@2.1.0: dependencies: '@petamoriken/float16': 3.9.2 - debug: 4.4.1(supports-color@8.1.1) + debug: 4.4.3 env-paths: 3.0.0 semver: 7.7.2 shell-quote: 1.8.3 @@ -16379,15 +16455,6 @@ snapshots: package-json-from-dist: 1.0.1 path-scurry: 1.11.1 - glob@11.0.2: - dependencies: - foreground-child: 3.3.1 - jackspeak: 4.1.0 - minimatch: 10.0.1 - minipass: 7.1.2 - package-json-from-dist: 1.0.1 - path-scurry: 2.0.0 - glob@11.0.3: dependencies: foreground-child: 3.3.1 @@ -17029,10 +17096,6 @@ snapshots: optionalDependencies: '@pkgjs/parseargs': 0.11.0 - jackspeak@4.1.0: - dependencies: - '@isaacs/cliui': 8.0.2 - jackspeak@4.1.1: dependencies: '@isaacs/cliui': 8.0.2 @@ -17541,6 +17604,8 @@ snapshots: lru-cache@11.1.0: {} + lru-cache@11.2.2: {} + lru-cache@5.1.1: dependencies: yallist: 3.1.1 @@ -18624,7 +18689,7 @@ snapshots: path-scurry@2.0.0: dependencies: - lru-cache: 11.1.0 + lru-cache: 11.2.2 minipass: 7.1.2 path-to-regexp@8.2.0: {} @@ -19825,7 +19890,7 @@ snapshots: dependencies: eastasianwidth: 0.2.0 emoji-regex: 9.2.2 - strip-ansi: 7.1.0 + strip-ansi: 7.1.2 string-width@7.2.0: dependencies: @@ -19896,6 +19961,10 @@ snapshots: dependencies: ansi-regex: 6.1.0 + strip-ansi@7.1.2: + dependencies: + ansi-regex: 6.2.2 + strip-bom-string@1.0.0: {} strip-bom@3.0.0: {} @@ -20371,6 +20440,9 @@ snapshots: undici-types@6.19.8: {} + undici-types@6.21.0: + optional: true + undici-types@7.10.0: {} undici@6.21.3: {} @@ -20619,7 +20691,7 @@ snapshots: debug: 4.4.1(supports-color@8.1.1) es-module-lexer: 1.7.0 pathe: 2.0.3 - vite: 6.3.5(@types/node@20.17.50)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) + vite: 6.3.6(@types/node@20.17.50)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) transitivePeerDependencies: - '@types/node' - jiti @@ -20640,7 +20712,7 @@ snapshots: debug: 4.4.1(supports-color@8.1.1) es-module-lexer: 1.7.0 pathe: 2.0.3 - vite: 6.3.5(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) + vite: 6.3.6(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) transitivePeerDependencies: - '@types/node' - jiti @@ -20661,7 +20733,7 @@ snapshots: debug: 4.4.1(supports-color@8.1.1) es-module-lexer: 1.7.0 pathe: 2.0.3 - vite: 6.3.5(@types/node@24.2.1)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) + vite: 6.3.6(@types/node@24.2.1)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0) transitivePeerDependencies: - '@types/node' - jiti @@ -20724,6 +20796,54 @@ snapshots: tsx: 4.19.4 yaml: 2.8.0 + vite@6.3.6(@types/node@20.17.50)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0): + dependencies: + esbuild: 0.25.9 + fdir: 6.4.6(picomatch@4.0.2) + picomatch: 4.0.2 + postcss: 8.5.4 + rollup: 4.40.2 + tinyglobby: 0.2.14 + optionalDependencies: + '@types/node': 20.17.50 + fsevents: 2.3.3 + jiti: 2.4.2 + lightningcss: 1.30.1 + tsx: 4.19.4 + yaml: 2.8.0 + + vite@6.3.6(@types/node@20.17.57)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0): + dependencies: + esbuild: 0.25.9 + fdir: 6.4.6(picomatch@4.0.2) + picomatch: 4.0.2 + postcss: 8.5.4 + rollup: 4.40.2 + tinyglobby: 0.2.14 + optionalDependencies: + '@types/node': 20.17.57 + fsevents: 2.3.3 + jiti: 2.4.2 + lightningcss: 1.30.1 + tsx: 4.19.4 + yaml: 2.8.0 + + vite@6.3.6(@types/node@24.2.1)(jiti@2.4.2)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0): + dependencies: + esbuild: 0.25.9 + fdir: 6.4.6(picomatch@4.0.2) + picomatch: 4.0.2 + postcss: 8.5.4 + rollup: 4.40.2 + tinyglobby: 0.2.14 + optionalDependencies: + '@types/node': 24.2.1 + fsevents: 2.3.3 + jiti: 2.4.2 + lightningcss: 1.30.1 + tsx: 4.19.4 + yaml: 2.8.0 + vitest@3.2.4(@types/debug@4.1.12)(@types/node@20.17.50)(@vitest/ui@3.2.4)(jiti@2.4.2)(jsdom@26.1.0)(lightningcss@1.30.1)(tsx@4.19.4)(yaml@2.8.0): dependencies: '@types/chai': 5.2.2 @@ -21021,9 +21141,9 @@ snapshots: wrap-ansi@8.1.0: dependencies: - ansi-styles: 6.2.1 + ansi-styles: 6.2.3 string-width: 5.1.2 - strip-ansi: 7.1.0 + strip-ansi: 7.1.2 wrap-ansi@9.0.0: dependencies: diff --git a/releases/3.28.15-release.png b/releases/3.28.15-release.png new file mode 100644 index 000000000000..fc6e235befee Binary files /dev/null and b/releases/3.28.15-release.png differ diff --git a/src/core/task/AutoApprovalHandler.ts b/src/core/task/AutoApprovalHandler.ts index 33821ddfa24a..db6ff04fe198 100644 --- a/src/core/task/AutoApprovalHandler.ts +++ b/src/core/task/AutoApprovalHandler.ts @@ -10,6 +10,7 @@ export interface AutoApprovalResult { } export class AutoApprovalHandler { + private lastResetMessageIndex: number = 0 private consecutiveAutoApprovedRequestsCount: number = 0 private consecutiveAutoApprovedCost: number = 0 @@ -25,7 +26,7 @@ export class AutoApprovalHandler { ) => Promise<{ response: ClineAskResponse; text?: string; images?: string[] }>, ): Promise { // Check request count limit - const requestResult = await this.checkRequestLimit(state, askForApproval) + const requestResult = await this.checkRequestLimit(state, messages, askForApproval) if (!requestResult.shouldProceed || requestResult.requiresApproval) { return requestResult } @@ -36,10 +37,11 @@ export class AutoApprovalHandler { } /** - * Increment the request counter and check if limit is exceeded + * Calculate request count and check if limit is exceeded */ private async checkRequestLimit( state: GlobalState | undefined, + messages: ClineMessage[], askForApproval: ( type: ClineAsk, data: string, @@ -47,8 +49,11 @@ export class AutoApprovalHandler { ): Promise { const maxRequests = state?.allowedMaxRequests || Infinity - // Increment the counter for each new API request - this.consecutiveAutoApprovedRequestsCount++ + // Calculate request count from messages after the last reset point + const messagesAfterReset = messages.slice(this.lastResetMessageIndex) + // Count API request messages (simplified - you may need to adjust based on your message structure) + this.consecutiveAutoApprovedRequestsCount = + messagesAfterReset.filter((msg) => msg.type === "say" && msg.say === "api_req_started").length + 1 // +1 for the current request being checked if (this.consecutiveAutoApprovedRequestsCount > maxRequests) { const { response } = await askForApproval( @@ -58,7 +63,8 @@ export class AutoApprovalHandler { // If we get past the promise, it means the user approved and did not start a new task if (response === "yesButtonClicked") { - this.consecutiveAutoApprovedRequestsCount = 0 + // Reset tracking by recording the current message count + this.lastResetMessageIndex = messages.length return { shouldProceed: true, requiresApproval: true, @@ -91,8 +97,9 @@ export class AutoApprovalHandler { ): Promise { const maxCost = state?.allowedMaxCost || Infinity - // Calculate total cost from messages - this.consecutiveAutoApprovedCost = getApiMetrics(messages).totalCost + // Calculate total cost from messages after the last reset point + const messagesAfterReset = messages.slice(this.lastResetMessageIndex) + this.consecutiveAutoApprovedCost = getApiMetrics(messagesAfterReset).totalCost // Use epsilon for floating-point comparison to avoid precision issues const EPSILON = 0.0001 @@ -104,8 +111,9 @@ export class AutoApprovalHandler { // If we get past the promise, it means the user approved and did not start a new task if (response === "yesButtonClicked") { - // Note: We don't reset the cost to 0 here because the actual cost - // is calculated from the messages. This is different from the request count. + // Reset tracking by recording the current message count + // Future calculations will only include messages after this point + this.lastResetMessageIndex = messages.length return { shouldProceed: true, requiresApproval: true, @@ -126,10 +134,12 @@ export class AutoApprovalHandler { } /** - * Reset the request counter (typically called when starting a new task) + * Reset the tracking (typically called when starting a new task) */ resetRequestCount(): void { + this.lastResetMessageIndex = 0 this.consecutiveAutoApprovedRequestsCount = 0 + this.consecutiveAutoApprovedCost = 0 } /** diff --git a/src/core/task/__tests__/AutoApprovalHandler.spec.ts b/src/core/task/__tests__/AutoApprovalHandler.spec.ts index 4d91b1f77d6e..00752a13087c 100644 --- a/src/core/task/__tests__/AutoApprovalHandler.spec.ts +++ b/src/core/task/__tests__/AutoApprovalHandler.spec.ts @@ -40,12 +40,15 @@ describe("AutoApprovalHandler", () => { mockState.allowedMaxCost = 10 const messages: ClineMessage[] = [] - // First call should be under limit + // First call should be under limit (count = 1) const result1 = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) expect(result1.shouldProceed).toBe(true) expect(result1.requiresApproval).toBe(false) - // Second call should trigger request limit + // Add a message to simulate first request completed + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 1000 }) + + // Second call should trigger request limit (1 message + current = 2 > 1) mockAskForApproval.mockResolvedValue({ response: "yesButtonClicked" }) const result2 = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) @@ -64,27 +67,35 @@ describe("AutoApprovalHandler", () => { mockState.allowedMaxRequests = 3 }) - it("should increment request count on each check", async () => { + it("should calculate request count from messages", async () => { const messages: ClineMessage[] = [] - // Check state after each call - for (let i = 1; i <= 3; i++) { - await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) - const state = handler.getApprovalState() - expect(state.requestCount).toBe(i) - } + // First check - no messages yet, count should be 1 (for current request) + await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + let state = handler.getApprovalState() + expect(state.requestCount).toBe(1) + + // Add API request messages + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 1000 }) + await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + state = handler.getApprovalState() + expect(state.requestCount).toBe(2) // 1 message + current request + + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 2000 }) + await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + state = handler.getApprovalState() + expect(state.requestCount).toBe(3) // 2 messages + current request }) it("should ask for approval when limit is exceeded", async () => { const messages: ClineMessage[] = [] - // Make 3 requests (within limit) + // Add 3 API request messages (to simulate 3 requests made) for (let i = 0; i < 3; i++) { - await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 1000 + i }) } - expect(mockAskForApproval).not.toHaveBeenCalled() - // 4th request should trigger approval + // Next check should trigger approval (3 messages + current = 4 > 3) mockAskForApproval.mockResolvedValue({ response: "yesButtonClicked" }) const result = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) @@ -99,29 +110,35 @@ describe("AutoApprovalHandler", () => { it("should reset count when user approves", async () => { const messages: ClineMessage[] = [] - // Exceed limit + // Add messages to exceed limit for (let i = 0; i < 3; i++) { - await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 1000 + i }) } - // 4th request should trigger approval and reset + // Next request should trigger approval and reset mockAskForApproval.mockResolvedValue({ response: "yesButtonClicked" }) await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) - // Count should be reset + // Add more messages after reset + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 4000 }) + + // Next check should only count messages after reset + const result = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + expect(result.requiresApproval).toBe(false) // Should not require approval (1 message + current = 2 <= 3) + const state = handler.getApprovalState() - expect(state.requestCount).toBe(0) + expect(state.requestCount).toBe(2) // 1 message after reset + current request }) it("should not proceed when user rejects", async () => { const messages: ClineMessage[] = [] - // Exceed limit + // Add messages to exceed limit for (let i = 0; i < 3; i++) { - await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 1000 + i }) } - // 4th request with rejection + // Next request with rejection mockAskForApproval.mockResolvedValue({ response: "noButtonClicked" }) const result = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) @@ -183,17 +200,67 @@ describe("AutoApprovalHandler", () => { expect(result3.requiresApproval).toBe(true) }) - it("should not reset cost to zero on approval", async () => { + it("should reset cost tracking on approval", async () => { + const messages: ClineMessage[] = [ + { type: "say", say: "api_req_started", text: '{"cost": 3.0}', ts: 1000 }, + { type: "say", say: "api_req_started", text: '{"cost": 3.0}', ts: 2000 }, + ] + + // First check - cost exceeds limit (6.0 > 5.0) + mockGetApiMetrics.mockReturnValue({ totalCost: 6.0 }) + mockAskForApproval.mockResolvedValue({ response: "yesButtonClicked" }) + + const result1 = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + expect(result1.shouldProceed).toBe(true) + expect(result1.requiresApproval).toBe(true) + + // Add more messages after reset + messages.push( + { type: "say", say: "api_req_started", text: '{"cost": 2.0}', ts: 3000 }, + { type: "say", say: "api_req_started", text: '{"cost": 1.0}', ts: 4000 }, + ) + + // Second check - should only count messages after reset (3.0 < 5.0) + mockGetApiMetrics.mockReturnValue({ totalCost: 3.0 }) + const result2 = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + + // Should not require approval since cost after reset is under limit + expect(result2.shouldProceed).toBe(true) + expect(result2.requiresApproval).toBe(false) + + // Verify it's only calculating cost from messages after reset point + expect(mockGetApiMetrics).toHaveBeenLastCalledWith(messages.slice(2)) + }) + + it("should track multiple cost resets correctly", async () => { const messages: ClineMessage[] = [] + // First cost limit hit + messages.push({ type: "say", say: "api_req_started", text: '{"cost": 6.0}', ts: 1000 }) mockGetApiMetrics.mockReturnValue({ totalCost: 6.0 }) mockAskForApproval.mockResolvedValue({ response: "yesButtonClicked" }) await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) - // Cost should still be calculated from messages, not reset - const state = handler.getApprovalState() - expect(state.currentCost).toBe(6.0) + // Add more messages + messages.push( + { type: "say", say: "api_req_started", text: '{"cost": 3.0}', ts: 2000 }, + { type: "say", say: "api_req_started", text: '{"cost": 3.0}', ts: 3000 }, + ) + + // Second cost limit hit (only counting from index 1) + mockGetApiMetrics.mockReturnValue({ totalCost: 6.0 }) + await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + + // Add more messages after second reset + messages.push({ type: "say", say: "api_req_started", text: '{"cost": 2.0}', ts: 4000 }) + + // Third check - should only count from last reset + mockGetApiMetrics.mockReturnValue({ totalCost: 2.0 }) + const result = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + + expect(result.requiresApproval).toBe(false) + expect(mockGetApiMetrics).toHaveBeenLastCalledWith(messages.slice(3)) }) }) @@ -205,16 +272,21 @@ describe("AutoApprovalHandler", () => { mockGetApiMetrics.mockReturnValue({ totalCost: 3.0 }) - // First two requests should pass - for (let i = 0; i < 2; i++) { - const result = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) - expect(result.shouldProceed).toBe(true) - expect(result.requiresApproval).toBe(false) - } + // First request should pass (count = 1) + let result = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + expect(result.shouldProceed).toBe(true) + expect(result.requiresApproval).toBe(false) - // Third request should trigger request limit (not cost limit) + // Add a message and check again (count = 2) + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 1000 }) + result = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + expect(result.shouldProceed).toBe(true) + expect(result.requiresApproval).toBe(false) + + // Add another message - third request should trigger request limit (count = 3 > 2) + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 2000 }) mockAskForApproval.mockResolvedValue({ response: "yesButtonClicked" }) - const result = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + result = await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) expect(mockAskForApproval).toHaveBeenCalledWith( "auto_approval_max_req_reached", @@ -227,23 +299,38 @@ describe("AutoApprovalHandler", () => { }) describe("resetRequestCount", () => { - it("should reset the request counter", async () => { + it("should reset tracking", async () => { mockState.allowedMaxRequests = 5 + mockState.allowedMaxCost = 10.0 const messages: ClineMessage[] = [] - // Make some requests + // Add some messages for (let i = 0; i < 3; i++) { - await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + messages.push({ type: "say", say: "api_req_started", text: "{}", ts: 1000 + i }) } + mockGetApiMetrics.mockReturnValue({ totalCost: 5.0 }) + await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + let state = handler.getApprovalState() - expect(state.requestCount).toBe(3) + expect(state.requestCount).toBe(4) // 3 messages + current + expect(state.currentCost).toBe(5.0) // Reset handler.resetRequestCount() + // After reset, counts should be zero state = handler.getApprovalState() expect(state.requestCount).toBe(0) + expect(state.currentCost).toBe(0) + + // Next check should start fresh + mockGetApiMetrics.mockReturnValue({ totalCost: 8.0 }) + await handler.checkAutoApprovalLimits(mockState, messages, mockAskForApproval) + + state = handler.getApprovalState() + expect(state.requestCount).toBe(4) // All messages counted again + expect(state.currentCost).toBe(8.0) }) }) }) diff --git a/src/shared/__tests__/api.spec.ts b/src/shared/__tests__/api.spec.ts index aaeb1bf4447e..9dc54e42d01c 100644 --- a/src/shared/__tests__/api.spec.ts +++ b/src/shared/__tests__/api.spec.ts @@ -194,18 +194,17 @@ describe("getModelMaxOutputTokens", () => { expect(result).toBe(20_000) // Should use model.maxTokens since it's exactly at 20% }) - test("should bypass 20% cap for GPT-5 models and use exact configured max tokens", () => { + test("should apply 20% cap for GPT-5 models like other models", () => { const model: ModelInfo = { contextWindow: 200_000, supportsPromptCache: false, - maxTokens: 128_000, // 64% of context window, normally would be capped + maxTokens: 128_000, // 64% of context window, should be capped } const settings: ProviderSettings = { apiProvider: "openai", } - // Test various GPT-5 model IDs const gpt5ModelIds = ["gpt-5", "gpt-5-turbo", "GPT-5", "openai/gpt-5-preview", "gpt-5-32k", "GPT-5-TURBO"] gpt5ModelIds.forEach((modelId) => { @@ -215,8 +214,8 @@ describe("getModelMaxOutputTokens", () => { settings, format: "openai", }) - // Should use full 128k tokens, not capped to 20% (40k) - expect(result).toBe(128_000) + // Should be capped to 20% of context window: 200_000 * 0.2 = 40_000 + expect(result).toBe(40_000) }) }) @@ -246,23 +245,11 @@ describe("getModelMaxOutputTokens", () => { }) }) - test("should handle GPT-5 models with various max token configurations", () => { + test("should cap GPT-5 models to min(model.maxTokens, 20% of contextWindow)", () => { const testCases = [ - { - maxTokens: 128_000, - contextWindow: 200_000, - expected: 128_000, // Uses full 128k - }, - { - maxTokens: 64_000, - contextWindow: 200_000, - expected: 64_000, // Uses configured 64k - }, - { - maxTokens: 256_000, - contextWindow: 400_000, - expected: 256_000, // Uses full 256k even though it's 64% of context - }, + { maxTokens: 128_000, contextWindow: 200_000, expected: 40_000 }, + { maxTokens: 64_000, contextWindow: 200_000, expected: 40_000 }, + { maxTokens: 256_000, contextWindow: 400_000, expected: 80_000 }, ] testCases.forEach(({ maxTokens, contextWindow, expected }) => { diff --git a/src/shared/api.ts b/src/shared/api.ts index 4904573585bc..c80dc401ec15 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -115,17 +115,7 @@ export const getModelMaxOutputTokens = ({ } // If model has explicit maxTokens, clamp it to 20% of the context window - // Exception: GPT-5 models should use their exact configured max output tokens if (model.maxTokens) { - // Check if this is a GPT-5 model (case-insensitive) - const isGpt5Model = modelId.toLowerCase().includes("gpt-5") - - // GPT-5 models bypass the 20% cap and use their full configured max tokens - if (isGpt5Model) { - return model.maxTokens - } - - // All other models are clamped to 20% of context window return Math.min(model.maxTokens, Math.ceil(model.contextWindow * 0.2)) } diff --git a/webview-ui/package.json b/webview-ui/package.json index 9643a6d3a38a..89ea022dc86c 100644 --- a/webview-ui/package.json +++ b/webview-ui/package.json @@ -104,7 +104,7 @@ "identity-obj-proxy": "^3.0.0", "jsdom": "^26.0.0", "typescript": "5.8.3", - "vite": "6.3.5", + "vite": "6.3.6", "vitest": "^3.2.3" } } diff --git a/webview-ui/src/components/modes/ModesView.tsx b/webview-ui/src/components/modes/ModesView.tsx index ad6446bb758f..a93a4c371202 100644 --- a/webview-ui/src/components/modes/ModesView.tsx +++ b/webview-ui/src/components/modes/ModesView.tsx @@ -868,7 +868,7 @@ const ModesView = ({ onDone }: ModesViewProps) => { })()} onChange={(e) => { const value = - (e as unknown as CustomEvent)?.detail?.target?.value || + (e as unknown as CustomEvent)?.detail?.target?.value ?? ((e as any).target as HTMLTextAreaElement).value const customMode = findModeBySlug(visualMode, customModes) if (customMode) { @@ -923,7 +923,7 @@ const ModesView = ({ onDone }: ModesViewProps) => { })()} onChange={(e) => { const value = - (e as unknown as CustomEvent)?.detail?.target?.value || + (e as unknown as CustomEvent)?.detail?.target?.value ?? ((e as any).target as HTMLTextAreaElement).value const customMode = findModeBySlug(visualMode, customModes) if (customMode) { @@ -978,7 +978,7 @@ const ModesView = ({ onDone }: ModesViewProps) => { })()} onChange={(e) => { const value = - (e as unknown as CustomEvent)?.detail?.target?.value || + (e as unknown as CustomEvent)?.detail?.target?.value ?? ((e as any).target as HTMLTextAreaElement).value const customMode = findModeBySlug(visualMode, customModes) if (customMode) { @@ -1137,14 +1137,15 @@ const ModesView = ({ onDone }: ModesViewProps) => { })()} onChange={(e) => { const value = - (e as unknown as CustomEvent)?.detail?.target?.value || + (e as unknown as CustomEvent)?.detail?.target?.value ?? ((e as any).target as HTMLTextAreaElement).value const customMode = findModeBySlug(visualMode, customModes) if (customMode) { // For custom modes, update the JSON file updateCustomMode(visualMode, { ...customMode, - customInstructions: value.trim() || undefined, + // Preserve empty string; only treat null/undefined as unset + customInstructions: value ?? undefined, source: customMode.source || "global", }) } else { @@ -1354,12 +1355,12 @@ const ModesView = ({ onDone }: ModesViewProps) => { value={customInstructions || ""} onChange={(e) => { const value = - (e as unknown as CustomEvent)?.detail?.target?.value || + (e as unknown as CustomEvent)?.detail?.target?.value ?? ((e as any).target as HTMLTextAreaElement).value - setCustomInstructions(value || undefined) + setCustomInstructions(value ?? undefined) vscode.postMessage({ type: "customInstructions", - text: value.trim() || undefined, + text: value ?? undefined, }) }} rows={4} diff --git a/webview-ui/src/components/modes/__tests__/ModesView.spec.tsx b/webview-ui/src/components/modes/__tests__/ModesView.spec.tsx index 56ab791360ed..1a30ae079f1f 100644 --- a/webview-ui/src/components/modes/__tests__/ModesView.spec.tsx +++ b/webview-ui/src/components/modes/__tests__/ModesView.spec.tsx @@ -223,12 +223,13 @@ describe("PromptsView", () => { const changeEvent = new Event("change", { bubbles: true }) fireEvent(textarea, changeEvent) - // The component calls setCustomInstructions with value || undefined - // Since empty string is falsy, it should be undefined - expect(setCustomInstructions).toHaveBeenCalledWith(undefined) + // The component calls setCustomInstructions with value ?? undefined + // With nullish coalescing, empty string is preserved (not treated as nullish) + expect(setCustomInstructions).toHaveBeenCalledWith("") + // The postMessage call will have multiple calls, we need to check the right one expect(vscode.postMessage).toHaveBeenCalledWith({ type: "customInstructions", - text: undefined, + text: "", // empty string is now preserved with ?? operator }) }) diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 65450c9349bb..7cf00d65ac8d 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -285,15 +285,24 @@ const ApiOptions = ({ const filteredModels = filterModels(models, selectedProvider, organizationAllowList) - const modelOptions = filteredModels - ? Object.keys(filteredModels).map((modelId) => ({ - value: modelId, - label: modelId, - })) + // Include the currently selected model even if deprecated (so users can see what they have selected) + // But filter out other deprecated models from being newly selectable + const availableModels = filteredModels + ? Object.entries(filteredModels) + .filter(([modelId, modelInfo]) => { + // Always include the currently selected model + if (modelId === selectedModelId) return true + // Filter out deprecated models that aren't currently selected + return !modelInfo.deprecated + }) + .map(([modelId]) => ({ + value: modelId, + label: modelId, + })) : [] - return modelOptions - }, [selectedProvider, organizationAllowList]) + return availableModels + }, [selectedProvider, organizationAllowList, selectedModelId]) const onProviderChange = useCallback( (value: ProviderName) => { @@ -778,6 +787,11 @@ const ApiOptions = ({ + {/* Show error if a deprecated model is selected */} + {selectedModelInfo?.deprecated && ( + + )} + {selectedProvider === "bedrock" && selectedModelId === "custom-arn" && ( )} - + {/* Only show model info if not deprecated */} + {!selectedModelInfo?.deprecated && ( + + )} )} diff --git a/webview-ui/src/components/settings/ModelPicker.tsx b/webview-ui/src/components/settings/ModelPicker.tsx index d0f9216c21c6..37375db47603 100644 --- a/webview-ui/src/components/settings/ModelPicker.tsx +++ b/webview-ui/src/components/settings/ModelPicker.tsx @@ -95,13 +95,30 @@ export const ModelPicker = ({ const selectTimeoutRef = useRef(null) const closeTimeoutRef = useRef(null) + const { id: selectedModelId, info: selectedModelInfo } = useSelectedModel(apiConfiguration) + const modelIds = useMemo(() => { const filteredModels = filterModels(models, apiConfiguration.apiProvider, organizationAllowList) - return Object.keys(filteredModels ?? {}).sort((a, b) => a.localeCompare(b)) - }, [models, apiConfiguration.apiProvider, organizationAllowList]) + // Include the currently selected model even if deprecated (so users can see what they have selected) + // But filter out other deprecated models from being newly selectable + const availableModels = Object.entries(filteredModels ?? {}) + .filter(([modelId, modelInfo]) => { + // Always include the currently selected model + if (modelId === selectedModelId) return true + // Filter out deprecated models that aren't currently selected + return !modelInfo.deprecated + }) + .reduce( + (acc, [modelId, modelInfo]) => { + acc[modelId] = modelInfo + return acc + }, + {} as Record, + ) - const { id: selectedModelId, info: selectedModelInfo } = useSelectedModel(apiConfiguration) + return Object.keys(availableModels).sort((a, b) => a.localeCompare(b)) + }, [models, apiConfiguration.apiProvider, organizationAllowList, selectedModelId]) const [searchValue, setSearchValue] = useState( (apiConfiguration.apiProvider === "zgsm" ? "" : selectedModelId) || "", @@ -320,7 +337,10 @@ export const ModelPicker = ({ {errorMessage && } - {selectedModelId && selectedModelInfo && showInfoView && ( + {selectedModelInfo?.deprecated && showInfoView && ( + + )} + {selectedModelId && selectedModelInfo && !selectedModelInfo.deprecated && showInfoView && ( { + // Don't trim during editing to preserve intentional whitespace + // Use nullish coalescing to preserve empty strings + const finalValue = value ?? undefined + if (type === "CONDENSE") { - setCustomCondensingPrompt(value || supportPrompt.default.CONDENSE) + setCustomCondensingPrompt(finalValue ?? supportPrompt.default.CONDENSE) vscode.postMessage({ type: "updateCondensingPrompt", - text: value || supportPrompt.default.CONDENSE, + text: finalValue ?? supportPrompt.default.CONDENSE, }) + // Also update the customSupportPrompts to trigger change detection + const updatedPrompts = { ...customSupportPrompts } + if (finalValue === undefined) { + delete updatedPrompts[type] + } else { + updatedPrompts[type] = finalValue + } + setCustomSupportPrompts(updatedPrompts) } else { - const updatedPrompts = { ...customSupportPrompts, [type]: value } + const updatedPrompts = { ...customSupportPrompts } + if (finalValue === undefined) { + delete updatedPrompts[type] + } else { + updatedPrompts[type] = finalValue + } setCustomSupportPrompts(updatedPrompts) } } @@ -88,6 +105,10 @@ const PromptsSettings = ({ type: "updateCondensingPrompt", text: supportPrompt.default.CONDENSE, }) + // Also update the customSupportPrompts to trigger change detection + const updatedPrompts = { ...customSupportPrompts } + delete updatedPrompts[type] + setCustomSupportPrompts(updatedPrompts) } else { const updatedPrompts = { ...customSupportPrompts } delete updatedPrompts[type] @@ -97,7 +118,8 @@ const PromptsSettings = ({ const getSupportPromptValue = (type: SupportPromptType): string => { if (type === "CONDENSE") { - return customCondensingPrompt || supportPrompt.default.CONDENSE + // Preserve empty string - only fall back to default when value is nullish + return customCondensingPrompt ?? supportPrompt.default.CONDENSE } return supportPrompt.get(customSupportPrompts, type) } @@ -158,12 +180,11 @@ const PromptsSettings = ({ { + onInput={(e) => { const value = - (e as unknown as CustomEvent)?.detail?.target?.value || + (e as unknown as CustomEvent)?.detail?.target?.value ?? ((e as any).target as HTMLTextAreaElement).value - const trimmedValue = value.trim() - updateSupportPrompt(activeSupportOption, trimmedValue || undefined) + updateSupportPrompt(activeSupportOption, value) }} rows={6} className="w-full" diff --git a/webview-ui/src/components/settings/__tests__/ModelPicker.deprecated.spec.tsx b/webview-ui/src/components/settings/__tests__/ModelPicker.deprecated.spec.tsx new file mode 100644 index 000000000000..d87547215b1b --- /dev/null +++ b/webview-ui/src/components/settings/__tests__/ModelPicker.deprecated.spec.tsx @@ -0,0 +1,200 @@ +// npx vitest src/components/settings/__tests__/ModelPicker.deprecated.spec.tsx + +import { render, screen } from "@testing-library/react" +import userEvent from "@testing-library/user-event" +import { QueryClient, QueryClientProvider } from "@tanstack/react-query" +import { describe, it, expect, vi, beforeEach } from "vitest" + +import { ModelPicker } from "../ModelPicker" +import type { ModelInfo } from "@roo-code/types" + +// Mock the i18n module +vi.mock("@src/i18n/TranslationContext", () => ({ + useAppTranslation: () => ({ + t: (key: string, options?: any) => { + // Handle specific translation keys + if (key === "settings:validation.modelDeprecated") { + return "This model is no longer available. Please select a different model." + } + if (options) return `${key} ${JSON.stringify(options)}` + return key + }, + }), +})) + +// Mock the useSelectedModel hook +vi.mock("@/components/ui/hooks/useSelectedModel", () => ({ + useSelectedModel: (apiConfiguration: any) => { + const modelId = apiConfiguration?.openRouterModelId || "" + const models: Record = { + "model-1": { + maxTokens: 1000, + contextWindow: 4000, + supportsPromptCache: true, + }, + "model-2": { + maxTokens: 2000, + contextWindow: 8000, + supportsPromptCache: false, + }, + "deprecated-model": { + maxTokens: 1500, + contextWindow: 6000, + supportsPromptCache: true, + deprecated: true, + }, + } + return { + id: modelId, + info: models[modelId], + provider: "openrouter", + isLoading: false, + isError: false, + } + }, +})) + +describe("ModelPicker - Deprecated Models", () => { + const mockSetApiConfigurationField = vi.fn() + const queryClient = new QueryClient({ + defaultOptions: { + queries: { retry: false }, + }, + }) + + const regularModels: Record = { + "model-1": { + maxTokens: 1000, + contextWindow: 4000, + supportsPromptCache: true, + }, + "model-2": { + maxTokens: 2000, + contextWindow: 8000, + supportsPromptCache: false, + }, + "deprecated-model": { + maxTokens: 1500, + contextWindow: 6000, + supportsPromptCache: true, + deprecated: true, + }, + } + + beforeEach(() => { + vi.clearAllMocks() + }) + + it("should filter out deprecated models from the dropdown", async () => { + const user = userEvent.setup() + + render( + + + , + ) + + // Open the dropdown + const button = screen.getByTestId("model-picker-button") + await user.click(button) + + // Check that non-deprecated models are shown + expect(screen.getByTestId("model-option-model-1")).toBeInTheDocument() + expect(screen.getByTestId("model-option-model-2")).toBeInTheDocument() + + // Check that deprecated model is NOT shown + expect(screen.queryByTestId("model-option-deprecated-model")).not.toBeInTheDocument() + }) + + it("should show error when a deprecated model is currently selected", () => { + render( + + + , + ) + + // Check that the error message is displayed + expect( + screen.getByText("This model is no longer available. Please select a different model."), + ).toBeInTheDocument() + }) + + it("should allow selecting non-deprecated models", async () => { + const user = userEvent.setup() + + render( + + + , + ) + + // Open the dropdown + const button = screen.getByTestId("model-picker-button") + await user.click(button) + + // Select a non-deprecated model + const model2Option = screen.getByTestId("model-option-model-2") + await user.click(model2Option) + + // Verify the selection was made + expect(mockSetApiConfigurationField).toHaveBeenCalledWith("openRouterModelId", "model-2") + }) + + it("should not display model info for deprecated models", () => { + render( + + + , + ) + + // Model info should not be displayed for deprecated models + expect(screen.queryByText("This is a deprecated model")).not.toBeInTheDocument() + }) +}) diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index c2f08564e3d6..cd5dbadffc40 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -847,6 +847,7 @@ "regionMismatch": "Warning: The region in your ARN ({{arnRegion}}) does not match your selected region ({{region}}). This may cause access issues. The provider will use the region from the ARN." }, "modelAvailability": "The model ID ({{modelId}}) you provided is not available. Please choose a different model.", + "modelDeprecated": "This model is no longer available. Please select a different model.", "providerNotAllowed": "Provider '{{provider}}' is not allowed by your organization", "modelNotAllowed": "Model '{{model}}' is not allowed for provider '{{provider}}' by your organization", "profileInvalid": "This profile contains a provider or model that is not allowed by your organization", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 456566e291b4..50f67c7005e4 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -843,6 +843,7 @@ "regionMismatch": "警告:您的 ARN 中的区域 ({{arnRegion}}) 与您选择的区域 ({{region}}) 不匹配。这可能会导致访问问题。提供程序将使用 ARN 中的区域。" }, "modelAvailability": "模型ID {{modelId}} 不可用,请重新选择", + "modelDeprecated": "此模型不再可用,请选择其他模型。", "providerNotAllowed": "提供商 '{{provider}}' 不允许用于您的组织", "modelNotAllowed": "模型 '{{model}}' 不允许用于提供商 '{{provider}}',您的组织不允许", "profileInvalid": "此配置文件包含您的组织不允许的提供商或模型", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 96ada5c5f775..57d39b67cfa9 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -843,6 +843,7 @@ "regionMismatch": "警告:您 ARN 中的區域 ({{arnRegion}}) 與您選擇的區域 ({{region}}) 不符,可能導致存取問題。系統將使用 ARN 中指定的區域。" }, "modelAvailability": "您指定的模型 ID ({{modelId}}) 目前無法使用,請選擇其他模型。", + "modelDeprecated": "此模型已停用,請選擇其他模型。", "providerNotAllowed": "供應商 '{{provider}}' 不允許用於您的組織。", "modelNotAllowed": "模型 '{{model}}' 不允許用於供應商 '{{provider}}',您的組織不允許", "profileInvalid": "此設定檔包含您的組織不允許的供應商或模型",