diff --git a/packages/types/src/providers/baseten.ts b/packages/types/src/providers/baseten.ts index 0eff95b5160..eeb6b0d2d1f 100644 --- a/packages/types/src/providers/baseten.ts +++ b/packages/types/src/providers/baseten.ts @@ -33,6 +33,7 @@ export const basetenModels = { contextWindow: 163_840, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 2.55, outputPrice: 5.95, cacheWritesPrice: 0, @@ -44,6 +45,7 @@ export const basetenModels = { contextWindow: 163_840, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 2.55, outputPrice: 5.95, cacheWritesPrice: 0, @@ -55,6 +57,7 @@ export const basetenModels = { contextWindow: 163_840, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0.77, outputPrice: 0.77, cacheWritesPrice: 0, @@ -66,6 +69,7 @@ export const basetenModels = { contextWindow: 163_840, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0.5, outputPrice: 1.5, cacheWritesPrice: 0, @@ -86,11 +90,24 @@ export const basetenModels = { description: "DeepSeek's hybrid reasoning model with efficient long context scaling with GPT-5 level performance", }, + "openai/gpt-oss-120b": { + maxTokens: 16_384, + contextWindow: 128_072, + supportsImages: false, + supportsPromptCache: false, + supportsNativeTools: true, + inputPrice: 0.1, + outputPrice: 0.5, + cacheWritesPrice: 0, + cacheReadsPrice: 0, + description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities", + }, "Qwen/Qwen3-235B-A22B-Instruct-2507": { maxTokens: 16_384, contextWindow: 262_144, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0.22, outputPrice: 0.8, cacheWritesPrice: 0, @@ -102,24 +119,13 @@ export const basetenModels = { contextWindow: 262_144, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0.38, outputPrice: 1.53, cacheWritesPrice: 0, cacheReadsPrice: 0, description: "Mixture-of-experts LLM with advanced coding and reasoning capabilities", }, - "openai/gpt-oss-120b": { - maxTokens: 16_384, - contextWindow: 128_072, - supportsImages: false, - supportsPromptCache: false, - supportsNativeTools: true, - inputPrice: 0.1, - outputPrice: 0.5, - cacheWritesPrice: 0, - cacheReadsPrice: 0, - description: "Extremely capable general-purpose LLM with strong, controllable reasoning capabilities", - }, "moonshotai/Kimi-K2-Instruct-0905": { maxTokens: 16_384, contextWindow: 262_000, diff --git a/packages/types/src/providers/bedrock.ts b/packages/types/src/providers/bedrock.ts index 8dec37108ad..da40e98f433 100644 --- a/packages/types/src/providers/bedrock.ts +++ b/packages/types/src/providers/bedrock.ts @@ -454,22 +454,6 @@ export const bedrockModels = { outputPrice: 0.6, description: "Amazon Titan Text Express", }, - "amazon.titan-text-embeddings-v1:0": { - maxTokens: 8192, - contextWindow: 8_000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.1, - description: "Amazon Titan Text Embeddings", - }, - "amazon.titan-text-embeddings-v2:0": { - maxTokens: 8192, - contextWindow: 8_000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.02, - description: "Amazon Titan Text Embeddings V2", - }, "moonshot.kimi-k2-thinking": { maxTokens: 32_000, contextWindow: 262_144, diff --git a/packages/types/src/providers/featherless.ts b/packages/types/src/providers/featherless.ts index 8d9f7ae4e65..63bcb989687 100644 --- a/packages/types/src/providers/featherless.ts +++ b/packages/types/src/providers/featherless.ts @@ -13,6 +13,7 @@ export const featherlessModels = { contextWindow: 32678, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0, outputPrice: 0, description: "DeepSeek V3 0324 model.", @@ -22,6 +23,7 @@ export const featherlessModels = { contextWindow: 32678, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0, outputPrice: 0, description: "DeepSeek R1 0528 model.", @@ -41,6 +43,7 @@ export const featherlessModels = { contextWindow: 32678, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0, outputPrice: 0, description: "GPT-OSS 120B model.", @@ -57,4 +60,4 @@ export const featherlessModels = { }, } as const satisfies Record -export const featherlessDefaultModelId: FeatherlessModelId = "deepseek-ai/DeepSeek-R1-0528" +export const featherlessDefaultModelId: FeatherlessModelId = "moonshotai/Kimi-K2-Instruct" diff --git a/packages/types/src/providers/groq.ts b/packages/types/src/providers/groq.ts index a8d8ca7c98a..a22ad764eef 100644 --- a/packages/types/src/providers/groq.ts +++ b/packages/types/src/providers/groq.ts @@ -5,12 +5,7 @@ export type GroqModelId = | "llama-3.1-8b-instant" | "llama-3.3-70b-versatile" | "meta-llama/llama-4-scout-17b-16e-instruct" - | "meta-llama/llama-4-maverick-17b-128e-instruct" - | "mistral-saba-24b" - | "qwen-qwq-32b" | "qwen/qwen3-32b" - | "deepseek-r1-distill-llama-70b" - | "moonshotai/kimi-k2-instruct" | "moonshotai/kimi-k2-instruct-0905" | "openai/gpt-oss-120b" | "openai/gpt-oss-20b" @@ -52,33 +47,6 @@ export const groqModels = { outputPrice: 0.34, description: "Meta Llama 4 Scout 17B Instruct model, 128K context.", }, - "meta-llama/llama-4-maverick-17b-128e-instruct": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.2, - outputPrice: 0.6, - description: "Meta Llama 4 Maverick 17B Instruct model, 128K context.", - }, - "mistral-saba-24b": { - maxTokens: 8192, - contextWindow: 32768, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.79, - outputPrice: 0.79, - description: "Mistral Saba 24B model, 32K context.", - }, - "qwen-qwq-32b": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.29, - outputPrice: 0.39, - description: "Alibaba Qwen QwQ 32B model, 128K context.", - }, "qwen/qwen3-32b": { maxTokens: 8192, contextWindow: 131072, @@ -90,25 +58,6 @@ export const groqModels = { outputPrice: 0.59, description: "Alibaba Qwen 3 32B model, 128K context.", }, - "deepseek-r1-distill-llama-70b": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.75, - outputPrice: 0.99, - description: "DeepSeek R1 Distill Llama 70B model, 128K context.", - }, - "moonshotai/kimi-k2-instruct": { - maxTokens: 16384, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: true, - inputPrice: 1.0, - outputPrice: 3.0, - cacheReadsPrice: 0.5, // 50% discount for cached input tokens - description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context.", - }, "moonshotai/kimi-k2-instruct-0905": { maxTokens: 16384, contextWindow: 262144, diff --git a/packages/types/src/providers/sambanova.ts b/packages/types/src/providers/sambanova.ts index 7c04475c60f..dc592d180ca 100644 --- a/packages/types/src/providers/sambanova.ts +++ b/packages/types/src/providers/sambanova.ts @@ -7,9 +7,7 @@ export type SambaNovaModelId = | "DeepSeek-R1" | "DeepSeek-V3-0324" | "DeepSeek-V3.1" - | "DeepSeek-R1-Distill-Llama-70B" | "Llama-4-Maverick-17B-128E-Instruct" - | "Llama-3.3-Swallow-70B-Instruct-v0.4" | "Qwen3-32B" | "gpt-oss-120b" @@ -72,15 +70,6 @@ export const sambaNovaModels = { outputPrice: 4.5, description: "DeepSeek V3.1 model with 32K context window.", }, - "DeepSeek-R1-Distill-Llama-70B": { - maxTokens: 8192, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.7, - outputPrice: 1.4, - description: "DeepSeek R1 distilled Llama 70B model with 128K context window.", - }, "Llama-4-Maverick-17B-128E-Instruct": { maxTokens: 8192, contextWindow: 131072, @@ -92,15 +81,6 @@ export const sambaNovaModels = { outputPrice: 1.8, description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window.", }, - "Llama-3.3-Swallow-70B-Instruct-v0.4": { - maxTokens: 8192, - contextWindow: 16384, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.6, - outputPrice: 1.2, - description: "Tokyotech Llama 3.3 Swallow 70B Instruct v0.4 model with 16K context window.", - }, "Qwen3-32B": { maxTokens: 8192, contextWindow: 8192, diff --git a/packages/types/src/providers/vercel-ai-gateway.ts b/packages/types/src/providers/vercel-ai-gateway.ts index 875b87bf8b5..40d4f1ca509 100644 --- a/packages/types/src/providers/vercel-ai-gateway.ts +++ b/packages/types/src/providers/vercel-ai-gateway.ts @@ -90,6 +90,7 @@ export const vercelAiGatewayDefaultModelInfo: ModelInfo = { contextWindow: 200000, supportsImages: true, supportsPromptCache: true, + supportsNativeTools: true, inputPrice: 3, outputPrice: 15, cacheWritesPrice: 3.75, diff --git a/packages/types/src/providers/vertex.ts b/packages/types/src/providers/vertex.ts index cd247efb41a..384b78de4db 100644 --- a/packages/types/src/providers/vertex.ts +++ b/packages/types/src/providers/vertex.ts @@ -477,6 +477,7 @@ export const vertexModels = { contextWindow: 131072, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0.35, outputPrice: 1.15, description: "Meta Llama 4 Maverick 17B Instruct model, 128K context.", @@ -486,6 +487,7 @@ export const vertexModels = { contextWindow: 163_840, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 1.35, outputPrice: 5.4, description: "DeepSeek R1 (0528). Available in us-central1", @@ -495,6 +497,7 @@ export const vertexModels = { contextWindow: 163_840, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0.6, outputPrice: 1.7, description: "DeepSeek V3.1. Available in us-west2", @@ -504,6 +507,7 @@ export const vertexModels = { contextWindow: 131_072, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0.15, outputPrice: 0.6, description: "OpenAI gpt-oss 120B. Available in us-central1", @@ -513,6 +517,7 @@ export const vertexModels = { contextWindow: 131_072, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0.075, outputPrice: 0.3, description: "OpenAI gpt-oss 20B. Available in us-central1", @@ -522,6 +527,7 @@ export const vertexModels = { contextWindow: 262_144, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 1.0, outputPrice: 4.0, description: "Qwen3 Coder 480B A35B Instruct. Available in us-south1", @@ -531,6 +537,7 @@ export const vertexModels = { contextWindow: 262_144, supportsImages: false, supportsPromptCache: false, + supportsNativeTools: true, inputPrice: 0.25, outputPrice: 1.0, description: "Qwen3 235B A22B Instruct. Available in us-south1", diff --git a/src/api/providers/__tests__/anthropic-vertex.spec.ts b/src/api/providers/__tests__/anthropic-vertex.spec.ts index 0746602d7ef..6890e4178b1 100644 --- a/src/api/providers/__tests__/anthropic-vertex.spec.ts +++ b/src/api/providers/__tests__/anthropic-vertex.spec.ts @@ -1200,7 +1200,8 @@ describe("VertexHandler", () => { ) }) - it("should not include tools when toolProtocol is xml", async () => { + it("should include tools even when toolProtocol is set to xml (user preference now ignored)", async () => { + // XML protocol deprecation: user preference is now ignored when model supports native tools handler = new AnthropicVertexHandler({ apiModelId: "claude-3-5-sonnet-v2@20241022", vertexProjectId: "test-project", @@ -1241,9 +1242,14 @@ describe("VertexHandler", () => { // Just consume } + // Native is forced when supportsNativeTools===true, so tools should still be included expect(mockCreate).toHaveBeenCalledWith( - expect.not.objectContaining({ - tools: expect.anything(), + expect.objectContaining({ + tools: expect.arrayContaining([ + expect.objectContaining({ + name: "get_weather", + }), + ]), }), undefined, ) diff --git a/src/api/providers/__tests__/anthropic.spec.ts b/src/api/providers/__tests__/anthropic.spec.ts index 5c0c1632b4c..3fa5baf81be 100644 --- a/src/api/providers/__tests__/anthropic.spec.ts +++ b/src/api/providers/__tests__/anthropic.spec.ts @@ -451,8 +451,8 @@ describe("AnthropicHandler", () => { ) }) - it("should not include tools when toolProtocol is xml", async () => { - // Create handler with xml tool protocol in options + it("should include tools even when toolProtocol is set to xml (user preference now ignored)", async () => { + // XML protocol deprecation: user preference is now ignored when model supports native tools const xmlHandler = new AnthropicHandler({ ...mockOptions, toolProtocol: "xml", @@ -468,9 +468,14 @@ describe("AnthropicHandler", () => { // Just consume } + // Native is forced when supportsNativeTools===true, so tools should still be included expect(mockCreate).toHaveBeenCalledWith( - expect.not.objectContaining({ - tools: expect.anything(), + expect.objectContaining({ + tools: expect.arrayContaining([ + expect.objectContaining({ + name: "get_weather", + }), + ]), }), expect.anything(), ) diff --git a/src/api/providers/__tests__/featherless.spec.ts b/src/api/providers/__tests__/featherless.spec.ts index 00518828851..936c10fcd09 100644 --- a/src/api/providers/__tests__/featherless.spec.ts +++ b/src/api/providers/__tests__/featherless.spec.ts @@ -3,12 +3,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" -import { - type FeatherlessModelId, - featherlessDefaultModelId, - featherlessModels, - DEEP_SEEK_DEFAULT_TEMPERATURE, -} from "@roo-code/types" +import { type FeatherlessModelId, featherlessDefaultModelId, featherlessModels } from "@roo-code/types" import { FeatherlessHandler } from "../featherless" @@ -76,7 +71,7 @@ describe("FeatherlessHandler", () => { expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: featherlessApiKey })) }) - it("should handle DeepSeek R1 reasoning format", async () => { + it("should handle reasoning format from models that use tags", async () => { // Override the mock for this specific test mockCreate.mockImplementationOnce(async () => ({ [Symbol.asyncIterator]: async function* () { @@ -113,7 +108,7 @@ describe("FeatherlessHandler", () => { const systemPrompt = "You are a helpful assistant." const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hi" }] vi.spyOn(handler, "getModel").mockReturnValue({ - id: "deepseek-ai/DeepSeek-R1-0528", + id: "some-reasoning-model", info: { maxTokens: 1024, temperature: 0.7 }, } as any) @@ -154,7 +149,7 @@ describe("FeatherlessHandler", () => { }) it("should return specified model when valid model is provided", () => { - const testModelId: FeatherlessModelId = "deepseek-ai/DeepSeek-R1-0528" + const testModelId: FeatherlessModelId = "moonshotai/Kimi-K2-Instruct" const handlerWithModel = new FeatherlessHandler({ apiModelId: testModelId, featherlessApiKey: "test-featherless-api-key", @@ -225,8 +220,8 @@ describe("FeatherlessHandler", () => { expect(firstChunk.value).toMatchObject({ type: "usage", inputTokens: 10, outputTokens: 20 }) }) - it("createMessage should pass correct parameters to Featherless client for DeepSeek R1", async () => { - const modelId: FeatherlessModelId = "deepseek-ai/DeepSeek-R1-0528" + it("createMessage should pass correct parameters to Featherless client", async () => { + const modelId: FeatherlessModelId = "moonshotai/Kimi-K2-Instruct" // Clear previous mocks and set up new implementation mockCreate.mockClear() @@ -247,27 +242,9 @@ describe("FeatherlessHandler", () => { const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages) await messageGenerator.next() - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: modelId, - messages: [ - { - role: "user", - content: `${systemPrompt}\n${messages[0].content}`, - }, - ], - }), - ) - }) - - it("should apply DeepSeek default temperature for R1 models", () => { - const testModelId: FeatherlessModelId = "deepseek-ai/DeepSeek-R1-0528" - const handlerWithModel = new FeatherlessHandler({ - apiModelId: testModelId, - featherlessApiKey: "test-featherless-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.info.temperature).toBe(DEEP_SEEK_DEFAULT_TEMPERATURE) + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.model).toBe(modelId) }) it("should use default temperature for non-DeepSeek models", () => { diff --git a/src/core/tools/__tests__/applyDiffTool.experiment.spec.ts b/src/core/tools/__tests__/applyDiffTool.experiment.spec.ts index 4e0044c5ee2..65d7cb67749 100644 --- a/src/core/tools/__tests__/applyDiffTool.experiment.spec.ts +++ b/src/core/tools/__tests__/applyDiffTool.experiment.spec.ts @@ -84,7 +84,7 @@ describe("applyDiffTool experiment routing", () => { mockRemoveClosingTag = vi.fn((tag, value) => value) }) - it("should use legacy tool when MULTI_FILE_APPLY_DIFF experiment is disabled", async () => { + it("should always use class-based tool with native protocol (XML deprecated)", async () => { mockProvider.getState.mockResolvedValue({ experiments: { [EXPERIMENT_IDS.MULTI_FILE_APPLY_DIFF]: false, @@ -103,16 +103,17 @@ describe("applyDiffTool experiment routing", () => { mockRemoveClosingTag, ) + // Always uses native protocol now (XML deprecated) expect(applyDiffToolClass.handle).toHaveBeenCalledWith(mockCline, mockBlock, { askApproval: mockAskApproval, handleError: mockHandleError, pushToolResult: mockPushToolResult, removeClosingTag: mockRemoveClosingTag, - toolProtocol: "xml", + toolProtocol: "native", }) }) - it("should use legacy tool when experiments are not defined", async () => { + it("should use class-based tool when experiments are not defined", async () => { mockProvider.getState.mockResolvedValue({}) // Mock the class-based tool to resolve successfully @@ -127,24 +128,26 @@ describe("applyDiffTool experiment routing", () => { mockRemoveClosingTag, ) + // Always uses native protocol now (XML deprecated) expect(applyDiffToolClass.handle).toHaveBeenCalledWith(mockCline, mockBlock, { askApproval: mockAskApproval, handleError: mockHandleError, pushToolResult: mockPushToolResult, removeClosingTag: mockRemoveClosingTag, - toolProtocol: "xml", + toolProtocol: "native", }) }) - it("should use multi-file tool when MULTI_FILE_APPLY_DIFF experiment is enabled and using XML protocol", async () => { + it("should use class-based tool when MULTI_FILE_APPLY_DIFF experiment is enabled (native protocol always used)", async () => { mockProvider.getState.mockResolvedValue({ experiments: { [EXPERIMENT_IDS.MULTI_FILE_APPLY_DIFF]: true, }, }) - // Mock the new tool behavior - it should continue with the multi-file implementation - // Since we're not mocking the entire function, we'll just verify it doesn't call the class-based tool + // Mock the class-based tool to resolve successfully + ;(applyDiffToolClass.handle as any).mockResolvedValue(undefined) + await multiApplyDiffTool( mockCline, mockBlock, @@ -154,7 +157,14 @@ describe("applyDiffTool experiment routing", () => { mockRemoveClosingTag, ) - expect(applyDiffToolClass.handle).not.toHaveBeenCalled() + // Native protocol is always used now, so class-based tool is always called + expect(applyDiffToolClass.handle).toHaveBeenCalledWith(mockCline, mockBlock, { + askApproval: mockAskApproval, + handleError: mockHandleError, + pushToolResult: mockPushToolResult, + removeClosingTag: mockRemoveClosingTag, + toolProtocol: "native", + }) }) it("should use class-based tool when model defaults to native protocol", async () => { diff --git a/src/core/tools/__tests__/multiApplyDiffTool.spec.ts b/src/core/tools/__tests__/multiApplyDiffTool.spec.ts index 5a9a1af1da3..0910550dd82 100644 --- a/src/core/tools/__tests__/multiApplyDiffTool.spec.ts +++ b/src/core/tools/__tests__/multiApplyDiffTool.spec.ts @@ -1,4 +1,5 @@ import { applyDiffTool } from "../MultiApplyDiffTool" +import { applyDiffTool as applyDiffToolClass } from "../ApplyDiffTool" import { EXPERIMENT_IDS } from "../../../shared/experiments" import * as fs from "fs/promises" import * as fileUtils from "../../../utils/fs" @@ -9,8 +10,12 @@ vi.mock("fs/promises") vi.mock("../../../utils/fs") vi.mock("../../../utils/path") vi.mock("../../../utils/xml") -vi.mock("../applyDiffTool", () => ({ - applyDiffToolLegacy: vi.fn(), + +// Mock the ApplyDiffTool class-based tool that MultiApplyDiffTool delegates to for native protocol +vi.mock("../ApplyDiffTool", () => ({ + applyDiffTool: { + handle: vi.fn().mockResolvedValue(undefined), + }, })) // Mock TelemetryService @@ -116,8 +121,8 @@ describe("multiApplyDiffTool", () => { ;(pathUtils.getReadablePath as any).mockImplementation((cwd: string, path: string) => path) }) - describe("Early content validation", () => { - it("should filter out non-string content at parse time", async () => { + describe("Native protocol delegation", () => { + it("should delegate to applyDiffToolClass.handle for XML args format", async () => { mockBlock = { params: { args: ` @@ -130,22 +135,6 @@ describe("multiApplyDiffTool", () => { partial: false, } - // Mock parseXml to return mixed content types - const parseXml = await import("../../../utils/xml") - ;(parseXml.parseXml as any).mockReturnValue({ - file: { - path: "test.ts", - diff: [ - { content: "<<<<<<< SEARCH\ntest\n=======\nreplaced\n>>>>>>> REPLACE" }, - { content: null }, - { content: undefined }, - { content: 42 }, - { content: "" }, // Empty string should also be filtered - { content: "<<<<<<< SEARCH\nmore\n=======\nchanges\n>>>>>>> REPLACE" }, - ], - }, - }) - await applyDiffTool( mockCline, mockBlock, @@ -155,23 +144,26 @@ describe("multiApplyDiffTool", () => { mockRemoveClosingTag, ) - // Should complete without error and only process valid string content - expect(mockPushToolResult).toHaveBeenCalled() - expect(mockHandleError).not.toHaveBeenCalled() - - // Verify that only valid diffs were processed - const resultCall = mockPushToolResult.mock.calls[0][0] - // Should not include the single block notice since we have 2 valid blocks - expect(resultCall).not.toContain("Making multiple related changes") + // Should delegate to the class-based tool + expect(applyDiffToolClass.handle).toHaveBeenCalled() + expect(applyDiffToolClass.handle).toHaveBeenCalledWith( + mockCline, + mockBlock, + expect.objectContaining({ + askApproval: mockAskApproval, + handleError: mockHandleError, + pushToolResult: mockPushToolResult, + removeClosingTag: mockRemoveClosingTag, + toolProtocol: "native", + }), + ) }) - }) - describe("SEARCH block counting with non-string content", () => { - it("should handle diffItem.content being undefined", async () => { + it("should delegate to applyDiffToolClass.handle for legacy path/diff params", async () => { mockBlock = { params: { path: "test.ts", - diff: undefined, // This will result in undefined content + diff: "<<<<<<< SEARCH\nold\n=======\nnew\n>>>>>>> REPLACE", }, partial: false, } @@ -185,35 +177,30 @@ describe("multiApplyDiffTool", () => { mockRemoveClosingTag, ) - // Should complete without throwing an error - expect(mockPushToolResult).toHaveBeenCalled() - expect(mockHandleError).not.toHaveBeenCalled() + // Should delegate to the class-based tool + expect(applyDiffToolClass.handle).toHaveBeenCalled() + expect(applyDiffToolClass.handle).toHaveBeenCalledWith( + mockCline, + mockBlock, + expect.objectContaining({ + askApproval: mockAskApproval, + handleError: mockHandleError, + pushToolResult: mockPushToolResult, + removeClosingTag: mockRemoveClosingTag, + toolProtocol: "native", + }), + ) }) - it("should handle diffItem.content being null", async () => { + it("should handle undefined diff content by delegating to class-based tool", async () => { mockBlock = { params: { - args: ` - test.ts - - - - `, + path: "test.ts", + diff: undefined, }, partial: false, } - // Mock parseXml to return null content - const parseXml = await import("../../../utils/xml") - ;(parseXml.parseXml as any).mockReturnValue({ - file: { - path: "test.ts", - diff: { - content: null, - }, - }, - }) - await applyDiffTool( mockCline, mockBlock, @@ -223,35 +210,23 @@ describe("multiApplyDiffTool", () => { mockRemoveClosingTag, ) - // Should complete without throwing an error - expect(mockPushToolResult).toHaveBeenCalled() - expect(mockHandleError).not.toHaveBeenCalled() + // Should delegate to the class-based tool (which will handle the error) + expect(applyDiffToolClass.handle).toHaveBeenCalled() }) - it("should handle diffItem.content being a number", async () => { + it("should handle null diff content by delegating to class-based tool", async () => { mockBlock = { params: { args: ` test.ts - 123 + `, }, partial: false, } - // Mock parseXml to return number content - const parseXml = await import("../../../utils/xml") - ;(parseXml.parseXml as any).mockReturnValue({ - file: { - path: "test.ts", - diff: { - content: 123, // Number instead of string - }, - }, - }) - await applyDiffTool( mockCline, mockBlock, @@ -261,12 +236,11 @@ describe("multiApplyDiffTool", () => { mockRemoveClosingTag, ) - // Should complete without throwing an error - expect(mockPushToolResult).toHaveBeenCalled() - expect(mockHandleError).not.toHaveBeenCalled() + // Should delegate to the class-based tool + expect(applyDiffToolClass.handle).toHaveBeenCalled() }) - it("should correctly count SEARCH blocks when content is a valid string", async () => { + it("should delegate multiple SEARCH blocks to class-based tool", async () => { const diffContent = `<<<<<<< SEARCH old content ======= @@ -296,14 +270,11 @@ another new content mockRemoveClosingTag, ) - // Should complete successfully - expect(mockPushToolResult).toHaveBeenCalled() - const resultCall = mockPushToolResult.mock.calls[0][0] - // Should not include the single block notice since we have 2 blocks - expect(resultCall).not.toContain("Making multiple related changes") + // Should delegate to the class-based tool + expect(applyDiffToolClass.handle).toHaveBeenCalled() }) - it("should show single block notice when only one SEARCH block exists", async () => { + it("should delegate single SEARCH block to class-based tool", async () => { const diffContent = `<<<<<<< SEARCH old content ======= @@ -327,16 +298,13 @@ new content mockRemoveClosingTag, ) - // Should complete successfully - expect(mockPushToolResult).toHaveBeenCalled() - const resultCall = mockPushToolResult.mock.calls[0][0] - // Should include the single block notice - expect(resultCall).toContain("Making multiple related changes") + // Should delegate to the class-based tool + expect(applyDiffToolClass.handle).toHaveBeenCalled() }) }) describe("Edge cases for diff content", () => { - it("should handle empty diff array", async () => { + it("should handle empty diff by delegating to class-based tool", async () => { mockBlock = { params: { args: ` @@ -347,14 +315,6 @@ new content partial: false, } - const parseXml = await import("../../../utils/xml") - ;(parseXml.parseXml as any).mockReturnValue({ - file: { - path: "test.ts", - diff: [], - }, - }) - await applyDiffTool( mockCline, mockBlock, @@ -364,12 +324,12 @@ new content mockRemoveClosingTag, ) - // Should complete without error - expect(mockPushToolResult).toHaveBeenCalled() + // Should delegate to the class-based tool + expect(applyDiffToolClass.handle).toHaveBeenCalled() expect(mockHandleError).not.toHaveBeenCalled() }) - it("should handle mixed content types in diff array", async () => { + it("should handle mixed content types by delegating to class-based tool", async () => { mockBlock = { params: { args: ` @@ -382,20 +342,6 @@ new content partial: false, } - const parseXml = await import("../../../utils/xml") - ;(parseXml.parseXml as any).mockReturnValue({ - file: { - path: "test.ts", - diff: [ - { content: "<<<<<<< SEARCH\ntest\n=======\nreplaced\n>>>>>>> REPLACE" }, - { content: null }, - { content: undefined }, - { content: 42 }, - { content: "<<<<<<< SEARCH\nmore\n=======\nchanges\n>>>>>>> REPLACE" }, - ], - }, - }) - await applyDiffTool( mockCline, mockBlock, @@ -405,8 +351,8 @@ new content mockRemoveClosingTag, ) - // Should complete without error and count only valid string SEARCH blocks - expect(mockPushToolResult).toHaveBeenCalled() + // Should delegate to the class-based tool + expect(applyDiffToolClass.handle).toHaveBeenCalled() expect(mockHandleError).not.toHaveBeenCalled() }) }) diff --git a/src/core/tools/__tests__/readFileTool.spec.ts b/src/core/tools/__tests__/readFileTool.spec.ts index d22c163636f..c98211f9402 100644 --- a/src/core/tools/__tests__/readFileTool.spec.ts +++ b/src/core/tools/__tests__/readFileTool.spec.ts @@ -380,9 +380,9 @@ describe("read_file tool with maxReadFileLine setting", () => { // Execute const result = await executeReadFileTool({}, { maxReadFileLine: -1 }) - // Verify - just check that the result contains the expected elements - expect(result).toContain(`${testFilePath}`) - expect(result).toContain(``) + // Verify - check that the result contains the expected native format elements + expect(result).toContain(`File: ${testFilePath}`) + expect(result).toContain(`Lines 1-5:`) }) it("should not show line snippet in approval message when maxReadFileLine is -1", async () => { @@ -418,17 +418,14 @@ describe("read_file tool with maxReadFileLine setting", () => { }, ) - // Verify - expect(result).toContain(`${testFilePath}`) - expect(result).toContain(``) + // Verify - native format + expect(result).toContain(`File: ${testFilePath}`) + expect(result).toContain(`Code Definitions:`) - // Verify XML structure - expect(result).toContain("Showing only 0 of 5 total lines") - expect(result).toContain("") - expect(result).toContain("") + // Verify native structure + expect(result).toContain("Note: Showing only 0 of 5 total lines") expect(result).toContain(sourceCodeDef.trim()) - expect(result).toContain("") - expect(result).not.toContain(" { // Execute const result = await executeReadFileTool({}, { maxReadFileLine: 3 }) - // Verify - just check that the result contains the expected elements - expect(result).toContain(`${testFilePath}`) - expect(result).toContain(``) - expect(result).toContain(``) - expect(result).toContain("Showing only 3 of 5 total lines") + // Verify - native format + expect(result).toContain(`File: ${testFilePath}`) + expect(result).toContain(`Lines 1-3:`) + expect(result).toContain(`Code Definitions:`) + expect(result).toContain("Note: Showing only 3 of 5 total lines") }) it("should truncate code definitions when file exceeds maxReadFileLine", async () => { @@ -471,17 +468,17 @@ describe("read_file tool with maxReadFileLine setting", () => { // Execute with maxReadFileLine = 30 const result = await executeReadFileTool({}, { maxReadFileLine: 30, totalLines: 100 }) - // Verify that only definitions within the first 30 lines are included - expect(result).toContain(`${testFilePath}`) - expect(result).toContain(``) - expect(result).toContain(``) + // Verify - native format + expect(result).toContain(`File: ${testFilePath}`) + expect(result).toContain(`Lines 1-30:`) + expect(result).toContain(`Code Definitions:`) // Should include foo (starts at line 10) but not bar (starts at line 50) or baz (starts at line 80) expect(result).toContain("10--20 | function foo()") expect(result).not.toContain("50--60 | function bar()") expect(result).not.toContain("80--90 | function baz()") - expect(result).toContain("Showing only 30 of 100 total lines") + expect(result).toContain("Note: Showing only 30 of 100 total lines") }) it("should handle truncation when all definitions are beyond the line limit", async () => { @@ -499,10 +496,10 @@ describe("read_file tool with maxReadFileLine setting", () => { // Execute with maxReadFileLine = 30 const result = await executeReadFileTool({}, { maxReadFileLine: 30, totalLines: 100 }) - // Verify that only the header is included (all definitions filtered out) - expect(result).toContain(`${testFilePath}`) - expect(result).toContain(``) - expect(result).toContain(``) + // Verify - native format + expect(result).toContain(`File: ${testFilePath}`) + expect(result).toContain(`Lines 1-30:`) + expect(result).toContain(`Code Definitions:`) expect(result).toContain("# file.txt") expect(result).not.toContain("50--60 | function foo()") expect(result).not.toContain("80--90 | function bar()") @@ -518,9 +515,9 @@ describe("read_file tool with maxReadFileLine setting", () => { // Execute const result = await executeReadFileTool({}, { maxReadFileLine: 10, totalLines: 5 }) - // Verify - just check that the result contains the expected elements - expect(result).toContain(`${testFilePath}`) - expect(result).toContain(``) + // Verify - native format + expect(result).toContain(`File: ${testFilePath}`) + expect(result).toContain(`Lines 1-5:`) }) it("should read with extractTextFromFile when file has few lines", async () => { @@ -540,9 +537,9 @@ describe("read_file tool with maxReadFileLine setting", () => { // Execute const result = await executeReadFileTool({}, { maxReadFileLine: 5, totalLines: 3 }) - // Verify - just check that the result contains the expected elements - expect(result).toContain(`${testFilePath}`) - expect(result).toContain(``) + // Verify - native format + expect(result).toContain(`File: ${testFilePath}`) + expect(result).toContain(`Lines 1-3:`) }) }) @@ -556,8 +553,8 @@ describe("read_file tool with maxReadFileLine setting", () => { // Execute const result = await executeReadFileTool({}, { maxReadFileLine: 3, totalLines: 3 }) - // Verify - just check basic structure, the actual binary handling may vary - expect(result).toContain(`${testFilePath}`) + // Verify - native format for binary files + expect(result).toContain(`File: ${testFilePath}`) expect(typeof result).toBe("string") }) }) @@ -576,14 +573,14 @@ describe("read_file tool with maxReadFileLine setting", () => { }, ) - // Verify - just check that the result contains the expected elements - expect(rangeResult).toContain(`${testFilePath}`) - expect(rangeResult).toContain(``) + // Verify - native format + expect(rangeResult).toContain(`File: ${testFilePath}`) + expect(rangeResult).toContain(`Lines 2-4:`) }) }) }) -describe("read_file tool XML output structure", () => { +describe("read_file tool output structure", () => { // Test basic XML structure const testFilePath = "test/file.txt" const absoluteFilePath = "/test/file.txt" @@ -691,8 +688,8 @@ describe("read_file tool XML output structure", () => { return toolResult } - describe("Basic XML Structure Tests", () => { - it("should produce XML output with no unnecessary indentation", async () => { + describe("Basic Structure Tests", () => { + it("should produce native output with proper format", async () => { // Setup const numberedContent = "1 | Line 1\n2 | Line 2\n3 | Line 3\n4 | Line 4\n5 | Line 5" @@ -713,24 +710,19 @@ describe("read_file tool XML output structure", () => { // Execute const result = await executeReadFileTool() - // Verify - expect(result).toBe( - `\n${testFilePath}\n\n${numberedContent}\n\n`, - ) + // Verify native format + expect(result).toBe(`File: ${testFilePath}\nLines 1-5:\n${numberedContent}`) }) - it("should follow the correct XML structure format", async () => { + it("should follow the correct native structure format", async () => { // Setup mockInputContent = fileContent // Execute const result = await executeReadFileTool({}, { maxReadFileLine: -1 }) - // Verify using regex to check structure - const xmlStructureRegex = new RegExp( - `^\\n${testFilePath}\\n\\n.*\\n\\n$`, - "s", - ) - expect(result).toMatch(xmlStructureRegex) + // Verify using regex to check native structure + const nativeStructureRegex = new RegExp(`^File: ${testFilePath}\\nLines 1-5:\\n.*$`, "s") + expect(result).toMatch(nativeStructureRegex) }) it("should handle empty files correctly", async () => { @@ -754,10 +746,8 @@ describe("read_file tool XML output structure", () => { // Execute const result = await executeReadFileTool({}, { totalLines: 0 }) - // Verify - expect(result).toBe( - `\n${testFilePath}\nFile is empty\n\n`, - ) + // Verify native format for empty file + expect(result).toBe(`File: ${testFilePath}\nNote: File is empty`) }) describe("Total Image Memory Limit", () => { @@ -1406,7 +1396,7 @@ describe("read_file tool XML output structure", () => { }) describe("Error Handling Tests", () => { - it("should include error tag for invalid path", async () => { + it("should include error in output for invalid path", async () => { // Setup - missing path parameter const toolUse: ReadFileToolUse = { type: "tool_use", @@ -1426,17 +1416,17 @@ describe("read_file tool XML output structure", () => { toolProtocol: "xml", }) - // Verify - expect(toolResult).toBe(`Missing required parameter`) + // Verify - native format for error + expect(toolResult).toBe(`Error: Missing required parameter`) }) - it("should include error tag for RooIgnore error", async () => { + it("should include error for RooIgnore error", async () => { // Execute - skip addLineNumbers check as it returns early with an error const result = await executeReadFileTool({}, { validateAccess: false }) - // Verify + // Verify - native format for error expect(result).toBe( - `\n${testFilePath}Access to ${testFilePath} is blocked by the .rooignore file settings. You must try to continue in the task without using this file, or ask the user to update the .rooignore file.\n`, + `File: ${testFilePath}\nError: Access to ${testFilePath} is blocked by the .rooignore file settings. You must try to continue in the task without using this file, or ask the user to update the .rooignore file.`, ) }) }) @@ -1548,10 +1538,10 @@ describe("read_file tool with image support", () => { const textPart = (result as any[]).find((p) => p.type === "text")?.text const imagePart = (result as any[]).find((p) => p.type === "image") - // Verify text part - expect(textPart).toContain(`${imagePath}`) + // Verify text part - native format + expect(textPart).toContain(`File: ${imagePath}`) expect(textPart).not.toContain("") - expect(textPart).toContain(`Image file`) + expect(textPart).toContain(`Note: Image file`) // Verify image part expect(imagePart).toBeDefined() @@ -1570,10 +1560,10 @@ describe("read_file tool with image support", () => { const textPart = (result as any[]).find((p) => p.type === "text")?.text const imagePart = (result as any[]).find((p) => p.type === "image") - // Verify text part - expect(textPart).toContain(`${testImagePath}`) + // Verify text part - native format + expect(textPart).toContain(`File: ${testImagePath}`) expect(textPart).not.toContain(``) - expect(textPart).toContain(`Image file`) + expect(textPart).toContain(`Note: Image file`) // Verify image part expect(imagePart).toBeDefined() @@ -1591,7 +1581,8 @@ describe("read_file tool with image support", () => { const textArg = callArgs[0] const imagesArg = callArgs[1] - expect(textArg).toContain(`${testImagePath}`) + // Native format + expect(textArg).toContain(`File: ${testImagePath}`) expect(imagesArg).toBeDefined() expect(imagesArg).toBeInstanceOf(Array) expect(imagesArg!.length).toBe(1) @@ -1622,11 +1613,12 @@ describe("read_file tool with image support", () => { // Execute const result = await executeReadImageTool() - // When images are not supported, the tool should return just XML (not call formatResponse.toolResult) + // When images are not supported, the tool should return just text (not call formatResponse.toolResult) expect(toolResultMock).not.toHaveBeenCalled() expect(typeof result).toBe("string") - expect(result).toContain(`${testImagePath}`) - expect(result).toContain(`Image file`) + // Native format + expect(result).toContain(`File: ${testImagePath}`) + expect(result).toContain(`Note: Image file`) }) it("should include images when model supports images", async () => { @@ -1642,7 +1634,8 @@ describe("read_file tool with image support", () => { const textArg = callArgs[0] const imagesArg = callArgs[1] - expect(textArg).toContain(`${testImagePath}`) + // Native format + expect(textArg).toContain(`File: ${testImagePath}`) expect(imagesArg).toBeDefined() // Images should be included expect(imagesArg).toBeInstanceOf(Array) expect(imagesArg!.length).toBe(1) @@ -1656,11 +1649,12 @@ describe("read_file tool with image support", () => { // Execute const result = await executeReadImageTool() - // When supportsImages is undefined, should default to false and return just XML + // When supportsImages is undefined, should default to false and return just text expect(toolResultMock).not.toHaveBeenCalled() expect(typeof result).toBe("string") - expect(result).toContain(`${testImagePath}`) - expect(result).toContain(`Image file`) + // Native format + expect(result).toContain(`File: ${testImagePath}`) + expect(result).toContain(`Note: Image file`) }) it("should handle errors when reading image files", async () => { @@ -1686,8 +1680,8 @@ describe("read_file tool with image support", () => { toolProtocol: "xml", }) - // Verify error handling - expect(toolResult).toContain("Error reading image file: Failed to read image") + // Verify error handling - native format + expect(toolResult).toContain("Error: Error reading image file: Failed to read image") // Verify that say was called to show error to user expect(localMockCline.say).toHaveBeenCalledWith("error", expect.stringContaining("Failed to read image")) }) @@ -1722,9 +1716,9 @@ describe("read_file tool with image support", () => { // Execute const result = await executeReadImageTool(binaryPath) - // Verify + // Verify - native format for binary files expect(result).not.toContain("") - expect(result).toContain(' { - describe("Precedence Level 1: User Profile Setting", () => { - it("should use profile toolProtocol when explicitly set to xml", () => { + /** + * XML Protocol Deprecation: + * + * XML tool protocol has been fully deprecated. All models now use Native + * tool calling. User preferences and model defaults are ignored. + * + * Precedence: + * 1. Locked Protocol (for resumed tasks that used XML) + * 2. Native (always, for all new tasks) + */ + + describe("Locked Protocol (Precedence Level 0 - Highest Priority)", () => { + it("should return lockedProtocol when provided", () => { const settings: ProviderSettings = { - toolProtocol: "xml", - apiProvider: "anthropic", - } - const result = resolveToolProtocol(settings) - expect(result).toBe(TOOL_PROTOCOL.XML) - }) - - it("should use profile toolProtocol when explicitly set to native", () => { - const settings: ProviderSettings = { - toolProtocol: "native", - apiProvider: "anthropic", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: true, // Model supports native tools - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.NATIVE) - }) - - it("should override model default when profile setting is present", () => { - const settings: ProviderSettings = { - toolProtocol: "xml", + toolProtocol: "xml", // Ignored apiProvider: "openai-native", } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - defaultToolProtocol: "native", - supportsNativeTools: true, - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // Profile setting wins - }) - - it("should override model capability when profile setting is present", () => { - const settings: ProviderSettings = { - toolProtocol: "xml", - apiProvider: "openai-native", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: true, - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // Profile setting wins - }) - }) - - describe("Precedence Level 2: Model Default", () => { - it("should use model defaultToolProtocol when no profile setting", () => { - const settings: ProviderSettings = { - apiProvider: "roo", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - defaultToolProtocol: "native", - supportsNativeTools: true, // Model must support native tools - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Model default wins when experiment is disabled - }) - - it("should override model capability when model default is present", () => { - const settings: ProviderSettings = { - apiProvider: "roo", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - defaultToolProtocol: "xml", - supportsNativeTools: true, - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // Model default wins over capability + // lockedProtocol overrides everything + const result = resolveToolProtocol(settings, undefined, "native") + expect(result).toBe(TOOL_PROTOCOL.NATIVE) }) - }) - describe("Support Validation", () => { - it("should fall back to XML when model doesn't support native", () => { + it("should return XML lockedProtocol for resumed tasks that used XML", () => { const settings: ProviderSettings = { + toolProtocol: "native", // Ignored apiProvider: "anthropic", } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: false, - } - const result = resolveToolProtocol(settings, modelInfo) + // lockedProtocol forces XML for backward compatibility + const result = resolveToolProtocol(settings, undefined, "xml") expect(result).toBe(TOOL_PROTOCOL.XML) }) - it("should fall back to XML when user prefers native but model doesn't support it", () => { + it("should fall through to Native when lockedProtocol is undefined", () => { const settings: ProviderSettings = { - toolProtocol: "native", // User wants native + toolProtocol: "xml", // Ignored apiProvider: "anthropic", } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: false, // But model doesn't support it - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML due to lack of support - }) - - it("should fall back to XML when user prefers native but model support is undefined", () => { - const settings: ProviderSettings = { - toolProtocol: "native", // User wants native - apiProvider: "anthropic", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - // supportsNativeTools is undefined (not specified) - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML - undefined treated as unsupported - }) - }) - - describe("Precedence Level 3: Native Fallback", () => { - it("should use Native fallback when no model default is specified and model supports native", () => { - const settings: ProviderSettings = { - apiProvider: "anthropic", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: true, - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Native fallback + // undefined lockedProtocol should return native + const result = resolveToolProtocol(settings, undefined, undefined) + expect(result).toBe(TOOL_PROTOCOL.NATIVE) }) }) - describe("Complete Precedence Chain", () => { - it("should respect full precedence: Profile > Model Default > Native Fallback", () => { - // Set up a scenario with all levels defined - const settings: ProviderSettings = { - toolProtocol: "native", // Level 1: User profile setting - apiProvider: "roo", - } - - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - defaultToolProtocol: "xml", // Level 2: Model default - supportsNativeTools: true, // Support check - } - - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Profile setting wins - }) - - it("should skip to model default when profile setting is undefined", () => { - const settings: ProviderSettings = { - apiProvider: "openai-native", - } - - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - defaultToolProtocol: "xml", // Level 2 - supportsNativeTools: true, // Support check - } - - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // Model default wins - }) - - it("should skip to Native fallback when profile and model default are undefined", () => { - const settings: ProviderSettings = { - apiProvider: "openai-native", - } - - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: true, - } - - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Native fallback - }) - - it("should skip to XML fallback when model info is unavailable", () => { + describe("Native Protocol Always Used For New Tasks", () => { + it("should always use native for new tasks", () => { const settings: ProviderSettings = { apiProvider: "anthropic", } - - const result = resolveToolProtocol(settings, undefined) - expect(result).toBe(TOOL_PROTOCOL.XML) // XML fallback (no model info means no native support) + const result = resolveToolProtocol(settings) + expect(result).toBe(TOOL_PROTOCOL.NATIVE) }) - }) - describe("Locked Protocol (Precedence Level 0)", () => { - it("should return lockedProtocol when provided, ignoring all other settings", () => { + it("should use native even when user preference is XML (user prefs ignored)", () => { const settings: ProviderSettings = { - toolProtocol: "xml", // User wants XML + toolProtocol: "xml", // User wants XML - ignored apiProvider: "openai-native", } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: true, - defaultToolProtocol: "xml", - } - // lockedProtocol overrides everything - const result = resolveToolProtocol(settings, modelInfo, "native") + const result = resolveToolProtocol(settings) expect(result).toBe(TOOL_PROTOCOL.NATIVE) }) - it("should return XML lockedProtocol even when model supports native", () => { + it("should use native for OpenAI compatible provider", () => { const settings: ProviderSettings = { - toolProtocol: "native", // User wants native - apiProvider: "anthropic", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: true, // Model supports native - defaultToolProtocol: "native", - } - // lockedProtocol forces XML - const result = resolveToolProtocol(settings, modelInfo, "xml") - expect(result).toBe(TOOL_PROTOCOL.XML) - }) - - it("should fall through to normal resolution when lockedProtocol is undefined", () => { - const settings: ProviderSettings = { - toolProtocol: "xml", - apiProvider: "anthropic", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: true, + apiProvider: "openai", } - // undefined lockedProtocol should use normal precedence - const result = resolveToolProtocol(settings, modelInfo, undefined) - expect(result).toBe(TOOL_PROTOCOL.XML) // User setting wins + const result = resolveToolProtocol(settings, openAiModelInfoSaneDefaults) + expect(result).toBe(TOOL_PROTOCOL.NATIVE) }) }) @@ -275,7 +79,7 @@ describe("resolveToolProtocol", () => { it("should handle missing provider name gracefully", () => { const settings: ProviderSettings = {} const result = resolveToolProtocol(settings) - expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML (no model info) + expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Always native now }) it("should handle undefined model info gracefully", () => { @@ -283,26 +87,18 @@ describe("resolveToolProtocol", () => { apiProvider: "openai-native", } const result = resolveToolProtocol(settings, undefined) - expect(result).toBe(TOOL_PROTOCOL.XML) // XML fallback (no model info) + expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Always native now }) - it("should fall back to XML when model doesn't support native", () => { - const settings: ProviderSettings = { - apiProvider: "roo", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: false, // Model doesn't support native - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML due to lack of support + it("should handle empty settings", () => { + const settings: ProviderSettings = {} + const result = resolveToolProtocol(settings) + expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Always native now }) }) describe("Real-world Scenarios", () => { - it("should use Native fallback for models without defaultToolProtocol", () => { + it("should use Native for OpenAI models", () => { const settings: ProviderSettings = { apiProvider: "openai-native", } @@ -313,10 +109,10 @@ describe("resolveToolProtocol", () => { supportsNativeTools: true, } const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Native fallback + expect(result).toBe(TOOL_PROTOCOL.NATIVE) }) - it("should use XML for Claude models with Anthropic provider", () => { + it("should use Native for Claude models", () => { const settings: ProviderSettings = { apiProvider: "anthropic", } @@ -324,65 +120,39 @@ describe("resolveToolProtocol", () => { maxTokens: 8192, contextWindow: 200000, supportsPromptCache: true, - supportsNativeTools: false, - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) - }) - - it("should allow user to force XML on native-supporting model", () => { - const settings: ProviderSettings = { - toolProtocol: "xml", // User explicitly wants XML - apiProvider: "openai-native", - } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: true, // Model supports native but user wants XML - defaultToolProtocol: "native", + supportsNativeTools: true, } const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // User preference wins + expect(result).toBe(TOOL_PROTOCOL.NATIVE) }) - it("should not allow user to force native when model doesn't support it", () => { + it("should honor locked protocol for resumed tasks that used XML", () => { const settings: ProviderSettings = { - toolProtocol: "native", // User wants native apiProvider: "anthropic", } - const modelInfo: ModelInfo = { - maxTokens: 4096, - contextWindow: 128000, - supportsPromptCache: false, - supportsNativeTools: false, // Model doesn't support native - } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // Falls back to XML due to lack of support + // Task was started when XML was used, so it's locked to XML + const result = resolveToolProtocol(settings, undefined, "xml") + expect(result).toBe(TOOL_PROTOCOL.XML) }) + }) - it("should use model default when available", () => { + describe("Backward Compatibility - User Preferences Ignored", () => { + it("should ignore user preference for XML", () => { const settings: ProviderSettings = { - apiProvider: "roo", - } - const modelInfo: ModelInfo = { - maxTokens: 8192, - contextWindow: 200000, - supportsPromptCache: true, - defaultToolProtocol: "xml", - supportsNativeTools: true, + toolProtocol: "xml", // User explicitly wants XML - ignored + apiProvider: "openai-native", } - const result = resolveToolProtocol(settings, modelInfo) - expect(result).toBe(TOOL_PROTOCOL.XML) // Model default wins + const result = resolveToolProtocol(settings) + expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Native is always used }) - it("should use native tools for OpenAI compatible provider with default model info", () => { + it("should return native regardless of user preference", () => { const settings: ProviderSettings = { - apiProvider: "openai", + toolProtocol: "native", // User preference - ignored but happens to match + apiProvider: "anthropic", } - // Using the actual openAiModelInfoSaneDefaults to verify the fix - const result = resolveToolProtocol(settings, openAiModelInfoSaneDefaults) - expect(result).toBe(TOOL_PROTOCOL.NATIVE) // Should use native tools by default + const result = resolveToolProtocol(settings) + expect(result).toBe(TOOL_PROTOCOL.NATIVE) }) }) }) diff --git a/src/utils/resolveToolProtocol.ts b/src/utils/resolveToolProtocol.ts index 2f8ddea0c34..92041fbeaf2 100644 --- a/src/utils/resolveToolProtocol.ts +++ b/src/utils/resolveToolProtocol.ts @@ -1,5 +1,5 @@ import { ToolProtocol, TOOL_PROTOCOL } from "@roo-code/types" -import type { ProviderSettings, ModelInfo } from "@roo-code/types" +import type { ProviderSettings } from "@roo-code/types" import type { Anthropic } from "@anthropic-ai/sdk" import { findLast, findLastIndex } from "../shared/array" @@ -12,48 +12,35 @@ type ApiMessageForDetection = Anthropic.MessageParam & { } /** - * Resolve the effective tool protocol based on the precedence hierarchy: + * Resolve the effective tool protocol. * - * 0. Locked Protocol (task-level lock, if provided - highest priority) - * 1. User Preference - Per-Profile (explicit profile setting) - * 2. Model Default (defaultToolProtocol in ModelInfo) - * 3. Native Fallback (final fallback) + * **Deprecation Note (XML Protocol):** + * XML tool protocol has been deprecated. All models now use Native tool calling. + * User/profile preferences (`providerSettings.toolProtocol`) and model defaults + * (`modelInfo.defaultToolProtocol`) are ignored. * - * Then check support: if protocol is "native" but model doesn't support it, use XML. + * Precedence: + * 1. Locked Protocol (task-level lock for resumed tasks - highest priority) + * 2. Native (always, for all new tasks) * - * @param providerSettings - The provider settings for the current profile - * @param modelInfo - Optional model information containing capabilities + * @param _providerSettings - The provider settings (toolProtocol field is ignored) + * @param _modelInfo - Unused, kept for API compatibility * @param lockedProtocol - Optional task-locked protocol that takes absolute precedence * @returns The resolved tool protocol (either "xml" or "native") */ export function resolveToolProtocol( - providerSettings: ProviderSettings, - modelInfo?: ModelInfo, + _providerSettings: ProviderSettings, + _modelInfo?: unknown, lockedProtocol?: ToolProtocol, ): ToolProtocol { - // 0. Locked Protocol - task-level lock takes absolute precedence - // This ensures tasks continue using their original protocol even if settings change + // 1. Locked Protocol - task-level lock takes absolute precedence + // This ensures resumed tasks continue using their original protocol if (lockedProtocol) { return lockedProtocol } - // If model doesn't support native tools, return XML immediately - // Treat undefined as unsupported (only allow native when explicitly true) - if (modelInfo?.supportsNativeTools !== true) { - return TOOL_PROTOCOL.XML - } - - // 1. User Preference - Per-Profile (explicit profile setting, highest priority) - if (providerSettings.toolProtocol) { - return providerSettings.toolProtocol - } - - // 2. Model Default - model's preferred protocol - if (modelInfo?.defaultToolProtocol) { - return modelInfo.defaultToolProtocol - } - - // 3. Native Fallback + // 2. Always return Native protocol for new tasks + // All models now support native tools; XML is deprecated return TOOL_PROTOCOL.NATIVE } diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 72518c783f8..00092fea9d1 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -38,8 +38,6 @@ import { vercelAiGatewayDefaultModelId, deepInfraDefaultModelId, minimaxDefaultModelId, - type ToolProtocol, - TOOL_PROTOCOL, } from "@roo-code/types" import { vscode } from "@src/utils/vscode" @@ -413,17 +411,6 @@ const ApiOptions = ({ } }, [selectedProvider]) - // Calculate the default protocol that would be used if toolProtocol is not set - // Mirrors the simplified logic in resolveToolProtocol.ts: - // 1. User preference (toolProtocol) - handled by the select value binding - // 2. Model default - use if available - // 3. Native fallback - const defaultProtocol = selectedModelInfo?.defaultToolProtocol || TOOL_PROTOCOL.NATIVE - - // Show the tool protocol selector when model supports native tools. - // For OpenAI Compatible providers we always show it so users can force XML/native explicitly. - const showToolProtocolSelector = selectedProvider === "openai" || selectedModelInfo?.supportsNativeTools === true - // Convert providers to SearchableSelect options const providerOptions = useMemo(() => { // First filter by organization allow list @@ -938,39 +925,6 @@ const ApiOptions = ({ )} - {showToolProtocolSelector && ( -
- - -
- {t("settings:toolProtocol.description")} -
-
- )} )}