diff --git a/packages/types/src/providers/anthropic.ts b/packages/types/src/providers/anthropic.ts index b6c3614eee3..70f880a24ef 100644 --- a/packages/types/src/providers/anthropic.ts +++ b/packages/types/src/providers/anthropic.ts @@ -12,6 +12,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, // $3 per million input tokens (≤200K context) outputPrice: 15.0, // $15 per million output tokens (≤200K context) cacheWritesPrice: 3.75, // $3.75 per million tokens @@ -34,6 +35,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, // $3 per million input tokens (≤200K context) outputPrice: 15.0, // $15 per million output tokens (≤200K context) cacheWritesPrice: 3.75, // $3.75 per million tokens @@ -56,6 +58,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 5.0, // $5 per million input tokens outputPrice: 25.0, // $25 per million output tokens cacheWritesPrice: 6.25, // $6.25 per million tokens @@ -68,6 +71,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 15.0, // $15 per million input tokens outputPrice: 75.0, // $75 per million output tokens cacheWritesPrice: 18.75, // $18.75 per million tokens @@ -80,6 +84,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 15.0, // $15 per million input tokens outputPrice: 75.0, // $75 per million output tokens cacheWritesPrice: 18.75, // $18.75 per million tokens @@ -92,6 +97,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, // $3 per million input tokens outputPrice: 15.0, // $15 per million output tokens cacheWritesPrice: 3.75, // $3.75 per million tokens @@ -105,6 +111,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, // $3 per million input tokens outputPrice: 15.0, // $15 per million output tokens cacheWritesPrice: 3.75, // $3.75 per million tokens @@ -116,6 +123,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 3.0, // $3 per million input tokens outputPrice: 15.0, // $15 per million output tokens cacheWritesPrice: 3.75, // $3.75 per million tokens @@ -127,6 +135,7 @@ export const anthropicModels = { supportsImages: false, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 1.0, outputPrice: 5.0, cacheWritesPrice: 1.25, @@ -138,6 +147,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 15.0, outputPrice: 75.0, cacheWritesPrice: 18.75, @@ -149,6 +159,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 0.25, outputPrice: 1.25, cacheWritesPrice: 0.3, @@ -160,6 +171,7 @@ export const anthropicModels = { supportsImages: true, supportsPromptCache: true, supportsNativeTools: true, + defaultToolProtocol: "native", inputPrice: 1.0, outputPrice: 5.0, cacheWritesPrice: 1.25, diff --git a/src/api/providers/__tests__/anthropic.spec.ts b/src/api/providers/__tests__/anthropic.spec.ts index 8b229212d47..5c0c1632b4c 100644 --- a/src/api/providers/__tests__/anthropic.spec.ts +++ b/src/api/providers/__tests__/anthropic.spec.ts @@ -3,6 +3,15 @@ import { AnthropicHandler } from "../anthropic" import { ApiHandlerOptions } from "../../../shared/api" +// Mock TelemetryService +vitest.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureException: vitest.fn(), + }, + }, +})) + const mockCreate = vitest.fn() vitest.mock("@anthropic-ai/sdk", () => { @@ -411,11 +420,11 @@ describe("AnthropicHandler", () => { }, ] - it("should include tools in request when toolProtocol is native", async () => { + it("should include tools in request by default (native is default)", async () => { + // Handler uses native protocol by default via model's defaultToolProtocol const stream = handler.createMessage(systemPrompt, messages, { taskId: "test-task", tools: mockTools, - toolProtocol: "native", }) // Consume the stream to trigger the API call @@ -443,10 +452,15 @@ describe("AnthropicHandler", () => { }) it("should not include tools when toolProtocol is xml", async () => { - const stream = handler.createMessage(systemPrompt, messages, { + // Create handler with xml tool protocol in options + const xmlHandler = new AnthropicHandler({ + ...mockOptions, + toolProtocol: "xml", + }) + + const stream = xmlHandler.createMessage(systemPrompt, messages, { taskId: "test-task", tools: mockTools, - toolProtocol: "xml", }) // Consume the stream to trigger the API call @@ -463,9 +477,9 @@ describe("AnthropicHandler", () => { }) it("should not include tools when no tools are provided", async () => { + // Handler uses native protocol by default const stream = handler.createMessage(systemPrompt, messages, { taskId: "test-task", - toolProtocol: "native", }) // Consume the stream to trigger the API call @@ -482,10 +496,10 @@ describe("AnthropicHandler", () => { }) it("should convert tool_choice 'auto' to Anthropic format", async () => { + // Handler uses native protocol by default const stream = handler.createMessage(systemPrompt, messages, { taskId: "test-task", tools: mockTools, - toolProtocol: "native", tool_choice: "auto", }) @@ -503,10 +517,10 @@ describe("AnthropicHandler", () => { }) it("should convert tool_choice 'required' to Anthropic 'any' format", async () => { + // Handler uses native protocol by default const stream = handler.createMessage(systemPrompt, messages, { taskId: "test-task", tools: mockTools, - toolProtocol: "native", tool_choice: "required", }) @@ -524,10 +538,10 @@ describe("AnthropicHandler", () => { }) it("should omit both tools and tool_choice when tool_choice is 'none'", async () => { + // Handler uses native protocol by default const stream = handler.createMessage(systemPrompt, messages, { taskId: "test-task", tools: mockTools, - toolProtocol: "native", tool_choice: "none", }) @@ -552,10 +566,10 @@ describe("AnthropicHandler", () => { }) it("should convert specific tool_choice to Anthropic 'tool' format", async () => { + // Handler uses native protocol by default const stream = handler.createMessage(systemPrompt, messages, { taskId: "test-task", tools: mockTools, - toolProtocol: "native", tool_choice: { type: "function" as const, function: { name: "get_weather" } }, }) @@ -573,10 +587,10 @@ describe("AnthropicHandler", () => { }) it("should enable parallel tool calls when parallelToolCalls is true", async () => { + // Handler uses native protocol by default const stream = handler.createMessage(systemPrompt, messages, { taskId: "test-task", tools: mockTools, - toolProtocol: "native", tool_choice: "auto", parallelToolCalls: true, }) @@ -618,10 +632,10 @@ describe("AnthropicHandler", () => { }, })) + // Handler uses native protocol by default const stream = handler.createMessage(systemPrompt, messages, { taskId: "test-task", tools: mockTools, - toolProtocol: "native", }) const chunks: any[] = [] @@ -685,10 +699,10 @@ describe("AnthropicHandler", () => { }, })) + // Handler uses native protocol by default const stream = handler.createMessage(systemPrompt, messages, { taskId: "test-task", tools: mockTools, - toolProtocol: "native", }) const chunks: any[] = [] diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index 2bf48d2562e..38426d6295e 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -9,13 +9,17 @@ import { anthropicDefaultModelId, anthropicModels, ANTHROPIC_DEFAULT_MAX_TOKENS, + ApiProviderError, + TOOL_PROTOCOL, } from "@roo-code/types" +import { TelemetryService } from "@roo-code/telemetry" import type { ApiHandlerOptions } from "../../shared/api" import { ApiStream } from "../transform/stream" import { getModelParams } from "../transform/model-params" import { filterNonAnthropicBlocks } from "../transform/anthropic-filter" +import { resolveToolProtocol } from "../../utils/resolveToolProtocol" import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" @@ -25,6 +29,7 @@ import { convertOpenAIToolsToAnthropic } from "../../core/prompts/tools/native-t export class AnthropicHandler extends BaseProvider implements SingleCompletionHandler { private options: ApiHandlerOptions private client: Anthropic + private readonly providerName = "Anthropic" constructor(options: ApiHandlerOptions) { super() @@ -65,12 +70,15 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa betas.push("context-1m-2025-08-07") } - // Prepare native tool parameters if tools are provided and protocol is not XML + // Enable native tools by default using resolveToolProtocol (which checks model's defaultToolProtocol) + // This matches OpenRouter's approach of always including tools when provided // Also exclude tools when tool_choice is "none" since that means "don't use tools" + const model = this.getModel() + const toolProtocol = resolveToolProtocol(this.options, model.info) const shouldIncludeNativeTools = metadata?.tools && metadata.tools.length > 0 && - metadata?.toolProtocol !== "xml" && + toolProtocol === TOOL_PROTOCOL.NATIVE && metadata?.tool_choice !== "none" const nativeToolParams = shouldIncludeNativeTools @@ -110,70 +118,94 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1 const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1 - stream = await this.client.messages.create( - { - model: modelId, - max_tokens: maxTokens ?? ANTHROPIC_DEFAULT_MAX_TOKENS, - temperature, - thinking, - // Setting cache breakpoint for system prompt so new tasks can reuse it. - system: [{ text: systemPrompt, type: "text", cache_control: cacheControl }], - messages: sanitizedMessages.map((message, index) => { - if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) { - return { - ...message, - content: - typeof message.content === "string" - ? [{ type: "text", text: message.content, cache_control: cacheControl }] - : message.content.map((content, contentIndex) => - contentIndex === message.content.length - 1 - ? { ...content, cache_control: cacheControl } - : content, - ), + try { + stream = await this.client.messages.create( + { + model: modelId, + max_tokens: maxTokens ?? ANTHROPIC_DEFAULT_MAX_TOKENS, + temperature, + thinking, + // Setting cache breakpoint for system prompt so new tasks can reuse it. + system: [{ text: systemPrompt, type: "text", cache_control: cacheControl }], + messages: sanitizedMessages.map((message, index) => { + if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) { + return { + ...message, + content: + typeof message.content === "string" + ? [{ type: "text", text: message.content, cache_control: cacheControl }] + : message.content.map((content, contentIndex) => + contentIndex === message.content.length - 1 + ? { ...content, cache_control: cacheControl } + : content, + ), + } } + return message + }), + stream: true, + ...nativeToolParams, + }, + (() => { + // prompt caching: https://x.com/alexalbert__/status/1823751995901272068 + // https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers + // https://github.com/anthropics/anthropic-sdk-typescript/commit/c920b77fc67bd839bfeb6716ceab9d7c9bbe7393 + + // Then check for models that support prompt caching + switch (modelId) { + case "claude-sonnet-4-5": + case "claude-sonnet-4-20250514": + case "claude-opus-4-5-20251101": + case "claude-opus-4-1-20250805": + case "claude-opus-4-20250514": + case "claude-3-7-sonnet-20250219": + case "claude-3-5-sonnet-20241022": + case "claude-3-5-haiku-20241022": + case "claude-3-opus-20240229": + case "claude-haiku-4-5-20251001": + case "claude-3-haiku-20240307": + betas.push("prompt-caching-2024-07-31") + return { headers: { "anthropic-beta": betas.join(",") } } + default: + return undefined } - return message - }), - stream: true, - ...nativeToolParams, - }, - (() => { - // prompt caching: https://x.com/alexalbert__/status/1823751995901272068 - // https://github.com/anthropics/anthropic-sdk-typescript?tab=readme-ov-file#default-headers - // https://github.com/anthropics/anthropic-sdk-typescript/commit/c920b77fc67bd839bfeb6716ceab9d7c9bbe7393 - - // Then check for models that support prompt caching - switch (modelId) { - case "claude-sonnet-4-5": - case "claude-sonnet-4-20250514": - case "claude-opus-4-5-20251101": - case "claude-opus-4-1-20250805": - case "claude-opus-4-20250514": - case "claude-3-7-sonnet-20250219": - case "claude-3-5-sonnet-20241022": - case "claude-3-5-haiku-20241022": - case "claude-3-opus-20240229": - case "claude-haiku-4-5-20251001": - case "claude-3-haiku-20240307": - betas.push("prompt-caching-2024-07-31") - return { headers: { "anthropic-beta": betas.join(",") } } - default: - return undefined - } - })(), - ) + })(), + ) + } catch (error) { + TelemetryService.instance.captureException( + new ApiProviderError( + error instanceof Error ? error.message : String(error), + this.providerName, + modelId, + "createMessage", + ), + ) + throw error + } break } default: { - stream = (await this.client.messages.create({ - model: modelId, - max_tokens: maxTokens ?? ANTHROPIC_DEFAULT_MAX_TOKENS, - temperature, - system: [{ text: systemPrompt, type: "text" }], - messages: sanitizedMessages, - stream: true, - ...nativeToolParams, - })) as any + try { + stream = (await this.client.messages.create({ + model: modelId, + max_tokens: maxTokens ?? ANTHROPIC_DEFAULT_MAX_TOKENS, + temperature, + system: [{ text: systemPrompt, type: "text" }], + messages: sanitizedMessages, + stream: true, + ...nativeToolParams, + })) as any + } catch (error) { + TelemetryService.instance.captureException( + new ApiProviderError( + error instanceof Error ? error.message : String(error), + this.providerName, + modelId, + "createMessage", + ), + ) + throw error + } break } } @@ -390,14 +422,27 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa async completePrompt(prompt: string) { let { id: model, temperature } = this.getModel() - const message = await this.client.messages.create({ - model, - max_tokens: ANTHROPIC_DEFAULT_MAX_TOKENS, - thinking: undefined, - temperature, - messages: [{ role: "user", content: prompt }], - stream: false, - }) + let message + try { + message = await this.client.messages.create({ + model, + max_tokens: ANTHROPIC_DEFAULT_MAX_TOKENS, + thinking: undefined, + temperature, + messages: [{ role: "user", content: prompt }], + stream: false, + }) + } catch (error) { + TelemetryService.instance.captureException( + new ApiProviderError( + error instanceof Error ? error.message : String(error), + this.providerName, + model, + "completePrompt", + ), + ) + throw error + } const content = message.content.find(({ type }) => type === "text") return content?.type === "text" ? content.text : "" diff --git a/src/core/task/__tests__/Task.spec.ts b/src/core/task/__tests__/Task.spec.ts index 4bae9c49d09..e3af7953c7b 100644 --- a/src/core/task/__tests__/Task.spec.ts +++ b/src/core/task/__tests__/Task.spec.ts @@ -985,6 +985,7 @@ describe("Cline", () => { postStateToWebview: vi.fn().mockResolvedValue(undefined), postMessageToWebview: vi.fn().mockResolvedValue(undefined), updateTaskHistory: vi.fn().mockResolvedValue(undefined), + getMcpHub: vi.fn().mockReturnValue(undefined), } // Get the mocked delay function