diff --git a/litellm/llms/anthropic/experimental_pass_through/messages/handler.py b/litellm/llms/anthropic/experimental_pass_through/messages/handler.py index 5b215c1fe54..aa73ab18f90 100644 --- a/litellm/llms/anthropic/experimental_pass_through/messages/handler.py +++ b/litellm/llms/anthropic/experimental_pass_through/messages/handler.py @@ -24,6 +24,10 @@ from litellm.types.router import GenericLiteLLMParams from litellm.utils import ProviderConfigManager, client +from litellm.litellm_core_utils.prompt_templates.common_utils import ( + DEFAULT_ASSISTANT_CONTINUE_MESSAGE, +) + from ..adapters.handler import LiteLLMMessagesToCompletionTransformationHandler from ..responses_adapters.handler import LiteLLMMessagesToResponsesAPIHandler from .utils import AnthropicMessagesRequestUtils, mock_response @@ -49,6 +53,58 @@ def _should_route_to_responses_api(custom_llm_provider: Optional[str]) -> bool: ################################################# +def _sanitize_anthropic_messages(messages: List[Dict]) -> List[Dict]: + """ + Sanitize messages for the /v1/messages endpoint. + + The Anthropic API can return assistant messages with empty text blocks + alongside tool_use blocks (e.g., {"type": "text", "text": ""}). While + the API returns these, it rejects them when sent back in subsequent + requests with "text content blocks must be non-empty". + + This is particularly common in multi-turn tool-use conversations (e.g., + Claude Code / Agent SDK) where the model starts a text block but + immediately switches to a tool_use block. + + The /v1/chat/completions path already handles this via + process_empty_text_blocks() in factory.py, but the /v1/messages path + was missing sanitization. + """ + for i, message in enumerate(messages): + content = message.get("content") + if not isinstance(content, list): + continue + + # Filter out empty text blocks, keeping non-empty text and other types. + # Use `(... or "")` to guard against None text values. + filtered = [ + block + for block in content + if not ( + isinstance(block, dict) + and block.get("type") == "text" + and not (block.get("text") or "").strip() + ) + ] + + # Only update if we actually removed something. + # Avoid mutating the caller's dicts — create a shallow copy. + if len(filtered) < len(content): + if len(filtered) > 0: + messages[i] = {**message, "content": filtered} + else: + # All blocks were empty text — replace with a continuation + # message rather than leaving empty blocks that trigger 400 + # errors. Matches behavior of process_empty_text_blocks() + # in factory.py. + messages[i] = { + **message, + "content": [{"type": "text", "text": DEFAULT_ASSISTANT_CONTINUE_MESSAGE.get("content", "Please continue.")}], + } + + return messages + + async def _execute_pre_request_hooks( model: str, messages: List[Dict], @@ -137,6 +193,10 @@ async def anthropic_messages( """ Async: Make llm api request in Anthropic /messages API spec """ + # Sanitize empty text blocks from messages before processing. + # See: https://github.com/BerriAI/litellm/issues/22930 + messages = _sanitize_anthropic_messages(messages) + # Execute pre-request hooks to allow CustomLoggers to modify request request_kwargs = await _execute_pre_request_hooks( model=model, diff --git a/tests/test_litellm/llms/anthropic/experimental_pass_through/messages/test_anthropic_experimental_pass_through_messages_handler.py b/tests/test_litellm/llms/anthropic/experimental_pass_through/messages/test_anthropic_experimental_pass_through_messages_handler.py index 636e84fe796..243a06ed716 100644 --- a/tests/test_litellm/llms/anthropic/experimental_pass_through/messages/test_anthropic_experimental_pass_through_messages_handler.py +++ b/tests/test_litellm/llms/anthropic/experimental_pass_through/messages/test_anthropic_experimental_pass_through_messages_handler.py @@ -190,6 +190,152 @@ def test_openai_model_with_thinking_converts_to_reasoning(): assert "thinking" not in call_kwargs, "thinking should NOT be passed directly to litellm.responses" +class TestSanitizeAnthropicMessages: + """Tests for _sanitize_anthropic_messages which strips empty text blocks.""" + + def test_strips_empty_text_block_alongside_tool_use(self): + """The most common case: model returns empty text + tool_use.""" + from litellm.llms.anthropic.experimental_pass_through.messages.handler import ( + _sanitize_anthropic_messages, + ) + + messages = [ + {"role": "user", "content": "Use the bash tool to list files"}, + { + "role": "assistant", + "content": [ + {"type": "text", "text": ""}, + {"type": "tool_use", "id": "toolu_123", "name": "Bash", "input": {"cmd": "ls"}}, + ], + }, + ] + result = _sanitize_anthropic_messages(messages) + assistant = result[1] + assert len(assistant["content"]) == 1 + assert assistant["content"][0]["type"] == "tool_use" + + def test_strips_whitespace_only_text_block(self): + from litellm.llms.anthropic.experimental_pass_through.messages.handler import ( + _sanitize_anthropic_messages, + ) + + messages = [ + { + "role": "assistant", + "content": [ + {"type": "text", "text": " \n "}, + {"type": "tool_use", "id": "toolu_123", "name": "Bash", "input": {}}, + ], + }, + ] + result = _sanitize_anthropic_messages(messages) + assert len(result[0]["content"]) == 1 + assert result[0]["content"][0]["type"] == "tool_use" + + def test_preserves_non_empty_text_blocks(self): + from litellm.llms.anthropic.experimental_pass_through.messages.handler import ( + _sanitize_anthropic_messages, + ) + + messages = [ + { + "role": "assistant", + "content": [ + {"type": "text", "text": "I'll run that for you."}, + {"type": "tool_use", "id": "toolu_123", "name": "Bash", "input": {}}, + ], + }, + ] + result = _sanitize_anthropic_messages(messages) + assert len(result[0]["content"]) == 2 + + def test_replaces_all_empty_blocks_with_continuation(self): + """If ALL blocks are empty text, replace with a continuation message.""" + from litellm.llms.anthropic.experimental_pass_through.messages.handler import ( + _sanitize_anthropic_messages, + ) + from litellm.litellm_core_utils.prompt_templates.common_utils import ( + DEFAULT_ASSISTANT_CONTINUE_MESSAGE, + ) + + messages = [ + { + "role": "assistant", + "content": [{"type": "text", "text": ""}], + }, + ] + result = _sanitize_anthropic_messages(messages) + assert len(result[0]["content"]) == 1 + assert result[0]["content"][0]["type"] == "text" + assert result[0]["content"][0]["text"] == DEFAULT_ASSISTANT_CONTINUE_MESSAGE.get("content", "Please continue.") + + def test_handles_string_content(self): + """String content (not list) should pass through unchanged.""" + from litellm.llms.anthropic.experimental_pass_through.messages.handler import ( + _sanitize_anthropic_messages, + ) + + messages = [{"role": "user", "content": "Hello"}] + result = _sanitize_anthropic_messages(messages) + assert result[0]["content"] == "Hello" + + def test_handles_user_messages_too(self): + """User messages can also have content lists with empty text blocks.""" + from litellm.llms.anthropic.experimental_pass_through.messages.handler import ( + _sanitize_anthropic_messages, + ) + + messages = [ + { + "role": "user", + "content": [ + {"type": "text", "text": ""}, + {"type": "tool_result", "tool_use_id": "toolu_123", "content": "file1.txt"}, + ], + }, + ] + result = _sanitize_anthropic_messages(messages) + assert len(result[0]["content"]) == 1 + assert result[0]["content"][0]["type"] == "tool_result" + + + def test_handles_none_text_value(self): + """Text blocks with None text value should be treated as empty, not crash.""" + from litellm.llms.anthropic.experimental_pass_through.messages.handler import ( + _sanitize_anthropic_messages, + ) + + messages = [ + { + "role": "assistant", + "content": [ + {"type": "text", "text": None}, + {"type": "tool_use", "id": "toolu_123", "name": "Bash", "input": {}}, + ], + }, + ] + result = _sanitize_anthropic_messages(messages) + assert len(result[0]["content"]) == 1 + assert result[0]["content"][0]["type"] == "tool_use" + + def test_does_not_mutate_original_message(self): + """Sanitized messages should be shallow copies, not mutated originals.""" + from litellm.llms.anthropic.experimental_pass_through.messages.handler import ( + _sanitize_anthropic_messages, + ) + + original_content = [ + {"type": "text", "text": ""}, + {"type": "tool_use", "id": "toolu_123", "name": "Bash", "input": {}}, + ] + messages = [{"role": "assistant", "content": original_content}] + result = _sanitize_anthropic_messages(messages) + # Original content list should be unchanged + assert len(original_content) == 2 + # Result message should be a different dict + assert len(result[0]["content"]) == 1 + + class TestThinkingParameterTransformation: """Core tests for thinking parameter transformation logic."""