From 1d85623b668a7b8f889fb5c66b72f90d0004f4a7 Mon Sep 17 00:00:00 2001 From: Tim Elfrink Date: Mon, 15 Sep 2025 10:58:36 +0200 Subject: [PATCH 1/4] Add comprehensive tests for Bedrock Converse requestMetadata support - Test requestMetadata parameter support in get_supported_openai_params - Test transformation to top-level field in Bedrock API request - Test validation of AWS constraints: max 16 items, key/value length limits - Test character set validation for keys and values - Cover edge cases including empty values and special characters - Ensure compatibility with existing test patterns --- .../chat/test_converse_transformation.py | 447 ++++++++++++++---- 1 file changed, 354 insertions(+), 93 deletions(-) diff --git a/tests/test_litellm/llms/bedrock/chat/test_converse_transformation.py b/tests/test_litellm/llms/bedrock/chat/test_converse_transformation.py index 257f5be3ee9c..39d1d89b0fda 100644 --- a/tests/test_litellm/llms/bedrock/chat/test_converse_transformation.py +++ b/tests/test_litellm/llms/bedrock/chat/test_converse_transformation.py @@ -1599,7 +1599,7 @@ async def test_no_cache_control_no_cache_point(): def test_guarded_text_wraps_in_guardrail_converse_content(): """Test that guarded_text content type gets wrapped in guardrailConverseContent blocks.""" from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - + messages = [ { "role": "user", @@ -1610,27 +1610,27 @@ def test_guarded_text_wraps_in_guardrail_converse_content(): ] } ] - + result = _bedrock_converse_messages_pt( messages=messages, model="us.amazon.nova-pro-v1:0", llm_provider="bedrock_converse" ) - + # Should have 1 message assert len(result) == 1 assert result[0]["role"] == "user" - + # Should have 3 content blocks content = result[0]["content"] assert len(content) == 3 - + # First and third should be regular text assert "text" in content[0] assert content[0]["text"] == "Regular text content" assert "text" in content[2] assert content[2]["text"] == "More regular text" - + # Second should be guardrailConverseContent assert "guardrailConverseContent" in content[1] assert content[1]["guardrailConverseContent"]["text"] == "This should be guarded" @@ -1639,7 +1639,7 @@ def test_guarded_text_wraps_in_guardrail_converse_content(): def test_guarded_text_with_system_messages(): """Test guarded_text with system messages using the full transformation.""" config = AmazonConverseConfig() - + messages = [ {"role": "system", "content": "You are a helpful assistant."}, { @@ -1650,14 +1650,14 @@ def test_guarded_text_with_system_messages(): ] } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "DRAFT" } } - + result = config._transform_request( model="us.amazon.nova-pro-v1:0", messages=messages, @@ -1665,26 +1665,26 @@ def test_guarded_text_with_system_messages(): litellm_params={}, headers={} ) - + # Should have system content blocks assert "system" in result assert len(result["system"]) == 1 assert result["system"][0]["text"] == "You are a helpful assistant." - + # Should have 1 message (system messages are removed) assert "messages" in result assert len(result["messages"]) == 1 - + # User message should have both regular text and guarded text user_message = result["messages"][0] assert user_message["role"] == "user" content = user_message["content"] assert len(content) == 2 - + # First should be regular text assert "text" in content[0] assert content[0]["text"] == "What is the main topic of this legal document?" - + # Second should be guardrailConverseContent assert "guardrailConverseContent" in content[1] assert content[1]["guardrailConverseContent"]["text"] == "This is a set of very long instructions that you will follow. Here is a legal document that you will use to answer the user's question." @@ -1693,7 +1693,7 @@ def test_guarded_text_with_system_messages(): def test_guarded_text_with_mixed_content_types(): """Test guarded_text with mixed content types including images.""" from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - + messages = [ { "role": "user", @@ -1704,28 +1704,28 @@ def test_guarded_text_with_mixed_content_types(): ] } ] - + result = _bedrock_converse_messages_pt( messages=messages, model="us.amazon.nova-pro-v1:0", llm_provider="bedrock_converse" ) - + # Should have 1 message assert len(result) == 1 assert result[0]["role"] == "user" - + # Should have 3 content blocks content = result[0]["content"] assert len(content) == 3 - + # First should be regular text assert "text" in content[0] assert content[0]["text"] == "Look at this image" - + # Second should be image assert "image" in content[1] - + # Third should be guardrailConverseContent assert "guardrailConverseContent" in content[2] assert content[2]["guardrailConverseContent"]["text"] == "This sensitive content should be guarded" @@ -1735,7 +1735,7 @@ def test_guarded_text_with_mixed_content_types(): async def test_async_guarded_text(): """Test async version of guarded_text processing.""" from litellm.litellm_core_utils.prompt_templates.factory import BedrockConverseMessagesProcessor - + messages = [ { "role": "user", @@ -1745,25 +1745,25 @@ async def test_async_guarded_text(): ] } ] - + result = await BedrockConverseMessagesProcessor._bedrock_converse_messages_pt_async( messages=messages, model="us.amazon.nova-pro-v1:0", llm_provider="bedrock_converse" ) - + # Should have 1 message assert len(result) == 1 assert result[0]["role"] == "user" - + # Should have 2 content blocks content = result[0]["content"] assert len(content) == 2 - + # First should be regular text assert "text" in content[0] assert content[0]["text"] == "Hello" - + # Second should be guardrailConverseContent assert "guardrailConverseContent" in content[1] assert content[1]["guardrailConverseContent"]["text"] == "This should be guarded" @@ -1772,7 +1772,7 @@ async def test_async_guarded_text(): def test_guarded_text_with_tool_calls(): """Test guarded_text with tool calls in the conversation.""" from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - + messages = [ { "role": "user", @@ -1798,30 +1798,30 @@ def test_guarded_text_with_tool_calls(): "content": "It's sunny and 25°C" } ] - + result = _bedrock_converse_messages_pt( messages=messages, model="us.amazon.nova-pro-v1:0", llm_provider="bedrock_converse" ) - + # Should have 3 messages assert len(result) == 3 - + # First message (user) should have both text and guarded_text user_message = result[0] assert user_message["role"] == "user" content = user_message["content"] assert len(content) == 2 - + # First should be regular text assert "text" in content[0] assert content[0]["text"] == "What's the weather?" - + # Second should be guardrailConverseContent assert "guardrailConverseContent" in content[1] assert content[1]["guardrailConverseContent"]["text"] == "Please be careful with sensitive information" - + # Other messages should not have guardrailConverseContent for i in range(1, 3): content = result[i]["content"] @@ -1832,7 +1832,7 @@ def test_guarded_text_with_tool_calls(): def test_guarded_text_guardrail_config_preserved(): """Test that guardrailConfig is preserved when using guarded_text.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", @@ -1842,14 +1842,14 @@ def test_guarded_text_guardrail_config_preserved(): ] } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "DRAFT" } } - + result = config._transform_request( model="us.amazon.nova-pro-v1:0", messages=messages, @@ -1857,11 +1857,11 @@ def test_guarded_text_guardrail_config_preserved(): litellm_params={}, headers={} ) - + # GuardrailConfig should be present at top level assert "guardrailConfig" in result assert result["guardrailConfig"]["guardrailIdentifier"] == "gr-abc123" - + # GuardrailConfig should also be in inferenceConfig assert "inferenceConfig" in result assert "guardrailConfig" in result["inferenceConfig"] @@ -1871,7 +1871,7 @@ def test_guarded_text_guardrail_config_preserved(): def test_auto_convert_last_user_message_to_guarded_text(): """Test that last user message is automatically converted to guarded_text when guardrailConfig is present.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", @@ -1883,17 +1883,17 @@ def test_auto_convert_last_user_message_to_guarded_text(): ] } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "1" } } - + # Test the helper method directly converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params) - + # Verify the conversion assert len(converted_messages) == 1 assert converted_messages[0]["role"] == "user" @@ -1905,24 +1905,24 @@ def test_auto_convert_last_user_message_to_guarded_text(): def test_auto_convert_last_user_message_string_content(): """Test that last user message with string content is automatically converted to guarded_text when guardrailConfig is present.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", "content": "What is the main topic of this legal document?" } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "1" } } - + # Test the helper method directly converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params) - + # Verify the conversion assert len(converted_messages) == 1 assert converted_messages[0]["role"] == "user" @@ -1934,7 +1934,7 @@ def test_auto_convert_last_user_message_string_content(): def test_no_conversion_when_no_guardrail_config(): """Test that no conversion happens when guardrailConfig is not present.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", @@ -1946,12 +1946,12 @@ def test_no_conversion_when_no_guardrail_config(): ] } ] - + optional_params = {} - + # Test the helper method directly converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params) - + # Verify no conversion happened assert converted_messages == messages @@ -1959,7 +1959,7 @@ def test_no_conversion_when_no_guardrail_config(): def test_no_conversion_when_guarded_text_already_present(): """Test that no conversion happens when guarded_text is already present in the last user message.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", @@ -1971,17 +1971,17 @@ def test_no_conversion_when_guarded_text_already_present(): ] } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "1" } } - + # Test the helper method directly converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params) - + # Verify no conversion happened assert converted_messages == messages @@ -1989,7 +1989,7 @@ def test_no_conversion_when_guarded_text_already_present(): def test_auto_convert_with_mixed_content(): """Test that only text elements are converted to guarded_text, other content types are preserved.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", @@ -2005,26 +2005,26 @@ def test_auto_convert_with_mixed_content(): ] } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "1" } } - + # Test the helper method directly converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params) - + # Verify the conversion assert len(converted_messages) == 1 assert converted_messages[0]["role"] == "user" assert len(converted_messages[0]["content"]) == 2 - + # First element should be converted to guarded_text assert converted_messages[0]["content"][0]["type"] == "guarded_text" assert converted_messages[0]["content"][0]["text"] == "What is the main topic of this legal document?" - + # Second element should remain unchanged assert converted_messages[0]["content"][1]["type"] == "image_url" assert converted_messages[0]["content"][1]["image_url"]["url"] == "https://example.com/image.jpg" @@ -2033,7 +2033,7 @@ def test_auto_convert_with_mixed_content(): def test_auto_convert_in_full_transformation(): """Test that the automatic conversion works in the full transformation pipeline.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", @@ -2045,14 +2045,14 @@ def test_auto_convert_in_full_transformation(): ] } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "1" } } - + # Test the full transformation result = config._transform_request( model="anthropic.claude-3-sonnet-20240229-v1:0", @@ -2061,11 +2061,11 @@ def test_auto_convert_in_full_transformation(): litellm_params={}, headers={} ) - + # Verify the transformation worked assert "messages" in result assert len(result["messages"]) == 1 - + # The message should have guardrailConverseContent message = result["messages"][0] assert "content" in message @@ -2077,7 +2077,7 @@ def test_auto_convert_in_full_transformation(): def test_convert_consecutive_user_messages_to_guarded_text(): """Test that consecutive user messages at the end are converted to guarded_text.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", @@ -2111,34 +2111,34 @@ def test_convert_consecutive_user_messages_to_guarded_text(): ] } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "1" } } - + # Test the helper method directly converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params) - + # Verify the conversion - only the last two user messages should be converted assert len(converted_messages) == 4 - + # First user message should remain unchanged assert converted_messages[0]["role"] == "user" assert converted_messages[0]["content"][0]["type"] == "text" assert converted_messages[0]["content"][0]["text"] == "First user message" - + # Assistant message should remain unchanged assert converted_messages[1]["role"] == "assistant" assert converted_messages[1]["content"] == "Assistant response" - + # Second user message should be converted to guarded_text assert converted_messages[2]["role"] == "user" assert converted_messages[2]["content"][0]["type"] == "guarded_text" assert converted_messages[2]["content"][0]["text"] == "Second user message" - + # Third user message should be converted to guarded_text assert converted_messages[3]["role"] == "user" assert converted_messages[3]["content"][0]["type"] == "guarded_text" @@ -2148,7 +2148,7 @@ def test_convert_consecutive_user_messages_to_guarded_text(): def test_convert_all_user_messages_when_all_consecutive(): """Test that all user messages are converted when they are all consecutive at the end.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", @@ -2178,24 +2178,24 @@ def test_convert_all_user_messages_when_all_consecutive(): ] } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "1" } } - + # Test the helper method directly converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params) - + # Verify all three user messages are converted assert len(converted_messages) == 3 - + for i in range(3): assert converted_messages[i]["role"] == "user" assert converted_messages[i]["content"][0]["type"] == "guarded_text" - + assert converted_messages[0]["content"][0]["text"] == "First user message" assert converted_messages[1]["content"][0]["text"] == "Second user message" assert converted_messages[2]["content"][0]["text"] == "Third user message" @@ -2204,7 +2204,7 @@ def test_convert_all_user_messages_when_all_consecutive(): def test_convert_consecutive_user_messages_with_string_content(): """Test that consecutive user messages with string content are converted to guarded_text.""" config = AmazonConverseConfig() - + messages = [ { "role": "assistant", @@ -2219,30 +2219,30 @@ def test_convert_consecutive_user_messages_with_string_content(): "content": "Second user message" } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "1" } } - + # Test the helper method directly converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params) - + # Verify the conversion assert len(converted_messages) == 3 - + # Assistant message should remain unchanged assert converted_messages[0]["role"] == "assistant" assert converted_messages[0]["content"] == "Assistant response" - + # Both user messages should be converted to guarded_text assert converted_messages[1]["role"] == "user" assert len(converted_messages[1]["content"]) == 1 assert converted_messages[1]["content"][0]["type"] == "guarded_text" assert converted_messages[1]["content"][0]["text"] == "First user message" - + assert converted_messages[2]["role"] == "user" assert len(converted_messages[2]["content"]) == 1 assert converted_messages[2]["content"][0]["type"] == "guarded_text" @@ -2252,7 +2252,7 @@ def test_convert_consecutive_user_messages_with_string_content(): def test_skip_consecutive_user_messages_with_existing_guarded_text(): """Test that consecutive user messages with existing guarded_text are skipped.""" config = AmazonConverseConfig() - + messages = [ { "role": "user", @@ -2273,27 +2273,288 @@ def test_skip_consecutive_user_messages_with_existing_guarded_text(): ] } ] - + optional_params = { "guardrailConfig": { "guardrailIdentifier": "gr-abc123", "guardrailVersion": "1" } } - + # Test the helper method directly converted_messages = config._convert_consecutive_user_messages_to_guarded_text(messages, optional_params) - + # Verify the conversion assert len(converted_messages) == 2 - + # First message should remain unchanged (already has guarded_text) assert converted_messages[0]["role"] == "user" assert converted_messages[0]["content"][0]["type"] == "guarded_text" assert converted_messages[0]["content"][0]["text"] == "Already guarded" - + # Second message should be converted assert converted_messages[1]["role"] == "user" assert converted_messages[1]["content"][0]["type"] == "guarded_text" assert converted_messages[1]["content"][0]["text"] == "Should be converted" + +def test_request_metadata_parameter_support(): + """Test that requestMetadata is in supported parameters.""" + config = AmazonConverseConfig() + supported_params = config.get_supported_openai_params( + model="bedrock/converse/us.anthropic.claude-sonnet-4-20250514-v1:0" + ) + assert "requestMetadata" in supported_params + + +def test_request_metadata_transformation(): + """Test that requestMetadata is properly transformed to top-level field.""" + config = AmazonConverseConfig() + + request_metadata = { + "cost_center": "engineering", + "user_id": "user123", + "session_id": "sess_abc123" + } + + messages = [ + {"role": "user", "content": "Hello!"}, + ] + + # Transform request with requestMetadata + request_data = config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={"requestMetadata": request_metadata}, + litellm_params={}, + headers={} + ) + + # Verify that requestMetadata appears as top-level field + assert "requestMetadata" in request_data + assert request_data["requestMetadata"] == request_metadata + + +def test_request_metadata_validation(): + """Test validation of requestMetadata constraints.""" + config = AmazonConverseConfig() + + messages = [{"role": "user", "content": "Hello!"}] + + # Test valid metadata + valid_metadata = { + "cost_center": "engineering", + "user_id": "user123", + } + + # Should not raise exception + config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={"request_metadata": valid_metadata}, + litellm_params={}, + headers={} + ) + + # Test too many items (max 16) + too_many_items = {f"key_{i}": f"value_{i}" for i in range(17)} + + try: + config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={"request_metadata": too_many_items}, + litellm_params={}, + headers={} + ) + assert False, "Should have raised validation error for too many items" + except Exception as e: + assert "maximum of 16 items" in str(e).lower() + + +def test_request_metadata_key_constraints(): + """Test key constraint validation.""" + config = AmazonConverseConfig() + + messages = [{"role": "user", "content": "Hello!"}] + + # Test key too long (max 256 characters) + long_key = "a" * 257 + invalid_metadata = {long_key: "value"} + + try: + config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={"request_metadata": invalid_metadata}, + litellm_params={}, + headers={} + ) + assert False, "Should have raised validation error for key too long" + except Exception as e: + assert "key length" in str(e).lower() or "256 characters" in str(e).lower() + + # Test empty key + invalid_metadata = {"": "value"} + + try: + config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={"request_metadata": invalid_metadata}, + litellm_params={}, + headers={} + ) + assert False, "Should have raised validation error for empty key" + except Exception as e: + assert "key length" in str(e).lower() or "empty" in str(e).lower() + + +def test_request_metadata_value_constraints(): + """Test value constraint validation.""" + config = AmazonConverseConfig() + + messages = [{"role": "user", "content": "Hello!"}] + + # Test value too long (max 256 characters) + long_value = "a" * 257 + invalid_metadata = {"key": long_value} + + try: + config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={"request_metadata": invalid_metadata}, + litellm_params={}, + headers={} + ) + assert False, "Should have raised validation error for value too long" + except Exception as e: + assert "value length" in str(e).lower() or "256 characters" in str(e).lower() + + # Test empty value (should be allowed) + valid_metadata = {"key": ""} + + # Should not raise exception + config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={"request_metadata": valid_metadata}, + litellm_params={}, + headers={} + ) + + +def test_request_metadata_character_pattern(): + """Test character pattern validation for keys and values.""" + config = AmazonConverseConfig() + + messages = [{"role": "user", "content": "Hello!"}] + + # Test valid characters + valid_metadata = { + "cost-center_2024": "engineering@team#1", + "user:id": "$100.00", + "session+token": "/path/to=resource", + } + + # Should not raise exception + config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={"request_metadata": valid_metadata}, + litellm_params={}, + headers={} + ) + + +def test_request_metadata_with_other_params(): + """Test that request_metadata works alongside other parameters.""" + config = AmazonConverseConfig() + + request_metadata = { + "experiment": "test_A", + "user_type": "premium" + } + + messages = [ + {"role": "user", "content": "What's the weather?"}, + ] + + tools = [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string"} + }, + "required": ["location"] + } + } + } + ] + + # Transform request with multiple parameters including request_metadata + request_data = config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={ + "request_metadata": request_metadata, + "tools": tools, + "max_tokens": 100, + "temperature": 0.7 + }, + litellm_params={}, + headers={} + ) + + # Verify requestMetadata is at top level + assert "requestMetadata" in request_data + assert request_data["requestMetadata"] == request_metadata + + # Verify other parameters are also processed correctly + assert "toolConfig" in request_data + assert "inferenceConfig" in request_data + assert request_data["inferenceConfig"]["temperature"] == 0.7 + + +def test_request_metadata_empty(): + """Test handling of empty request_metadata.""" + config = AmazonConverseConfig() + + messages = [{"role": "user", "content": "Hello!"}] + + # Empty dict should be allowed + request_data = config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={"request_metadata": {}}, + litellm_params={}, + headers={} + ) + + assert "requestMetadata" in request_data + assert request_data["requestMetadata"] == {} + + +def test_request_metadata_not_provided(): + """Test that requestMetadata is not included when not provided.""" + config = AmazonConverseConfig() + + messages = [{"role": "user", "content": "Hello!"}] + + # No request_metadata provided + request_data = config.transform_request( + model="anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=messages, + optional_params={}, + litellm_params={}, + headers={} + ) + + # requestMetadata should not be in the request + assert "requestMetadata" not in request_data From a34bb6f6d375c97f1db286f2644393c4d163c329 Mon Sep 17 00:00:00 2001 From: Tim Elfrink Date: Mon, 15 Sep 2025 10:58:58 +0200 Subject: [PATCH 2/4] Implement requestMetadata support for Bedrock Converse API - Add requestMetadata field to CommonRequestObject type definition - Support request_metadata parameter in get_supported_openai_params - Add comprehensive validation for AWS Bedrock constraints: * Maximum 16 key-value pairs * Key length 1-256 characters * Value length 0-256 characters * Character set validation [a-zA-Z0-9\s:_@0=/+,.-] - Transform request_metadata to top-level requestMetadata field in API request - Maintain backward compatibility with existing functionality - Enable metadata logging and traceability for multi-cloud environments --- .../bedrock/chat/converse_transformation.py | 85 +++++++++++++++++++ litellm/types/llms/bedrock.py | 3 +- 2 files changed, 87 insertions(+), 1 deletion(-) diff --git a/litellm/llms/bedrock/chat/converse_transformation.py b/litellm/llms/bedrock/chat/converse_transformation.py index d99cff6b6bce..5cc644dd4eae 100644 --- a/litellm/llms/bedrock/chat/converse_transformation.py +++ b/litellm/llms/bedrock/chat/converse_transformation.py @@ -175,6 +175,77 @@ def get_config(cls): and v is not None } + def _validate_request_metadata(self, metadata: dict) -> None: + """ + Validate requestMetadata according to AWS Bedrock Converse API constraints. + + Constraints: + - Maximum of 16 items + - Keys: 1-256 characters, pattern [a-zA-Z0-9\\s:_@$#=/+,-.]{1,256} + - Values: 0-256 characters, pattern [a-zA-Z0-9\\s:_@$#=/+,-.]{0,256} + """ + import re + + if not isinstance(metadata, dict): + raise litellm.exceptions.BadRequestError( + message="request_metadata must be a dictionary", + model="bedrock", + llm_provider="bedrock", + ) + + if len(metadata) > 16: + raise litellm.exceptions.BadRequestError( + message="request_metadata can contain a maximum of 16 items", + model="bedrock", + llm_provider="bedrock", + ) + + key_pattern = re.compile(r'^[a-zA-Z0-9\s:_@$#=/+,.-]{1,256}$') + value_pattern = re.compile(r'^[a-zA-Z0-9\s:_@$#=/+,.-]{0,256}$') + + for key, value in metadata.items(): + if not isinstance(key, str): + raise litellm.exceptions.BadRequestError( + message="request_metadata keys must be strings", + model="bedrock", + llm_provider="bedrock", + ) + + if not isinstance(value, str): + raise litellm.exceptions.BadRequestError( + message="request_metadata values must be strings", + model="bedrock", + llm_provider="bedrock", + ) + + if len(key) == 0 or len(key) > 256: + raise litellm.exceptions.BadRequestError( + message="request_metadata key length must be 1-256 characters", + model="bedrock", + llm_provider="bedrock", + ) + + if len(value) > 256: + raise litellm.exceptions.BadRequestError( + message="request_metadata value length must be 0-256 characters", + model="bedrock", + llm_provider="bedrock", + ) + + if not key_pattern.match(key): + raise litellm.exceptions.BadRequestError( + message=f"request_metadata key '{key}' contains invalid characters. Allowed: [a-zA-Z0-9\\s:_@$#=/+,.-]", + model="bedrock", + llm_provider="bedrock", + ) + + if not value_pattern.match(value): + raise litellm.exceptions.BadRequestError( + message=f"request_metadata value '{value}' contains invalid characters. Allowed: [a-zA-Z0-9\\s:_@$#=/+,.-]", + model="bedrock", + llm_provider="bedrock", + ) + def get_supported_openai_params(self, model: str) -> List[str]: from litellm.utils import supports_function_calling @@ -188,6 +259,7 @@ def get_supported_openai_params(self, model: str) -> List[str]: "top_p", "extra_headers", "response_format", + "request_metadata", ] if ( @@ -497,6 +569,10 @@ def map_openai_params( optional_params["thinking"] = AnthropicConfig._map_reasoning_effort( value ) + if param == "request_metadata": + if value is not None: + self._validate_request_metadata(value) + optional_params["request_metadata"] = value # Only update thinking tokens for non-GPT-OSS models if "gpt-oss" not in model: @@ -727,6 +803,11 @@ def _transform_request_helper( ) inference_params.pop("json_mode", None) # used for handling json_schema + # Extract request_metadata before processing other parameters + request_metadata = inference_params.pop("request_metadata", None) + if request_metadata is not None: + self._validate_request_metadata(request_metadata) + # keep supported params in 'inference_params', and set all model-specific params in 'additional_request_params' additional_request_params = { k: v for k, v in inference_params.items() if k not in total_supported_params @@ -813,6 +894,10 @@ def _transform_request_helper( if bedrock_tool_config is not None: data["toolConfig"] = bedrock_tool_config + # Request Metadata (top-level field) + if request_metadata is not None: + data["requestMetadata"] = request_metadata + return data async def _async_transform_request( diff --git a/litellm/types/llms/bedrock.py b/litellm/types/llms/bedrock.py index a829a6b94b90..62143e8803a6 100644 --- a/litellm/types/llms/bedrock.py +++ b/litellm/types/llms/bedrock.py @@ -1,5 +1,5 @@ import json -from typing import Any, List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import ( TYPE_CHECKING, @@ -227,6 +227,7 @@ class CommonRequestObject( toolConfig: ToolConfigBlock guardrailConfig: Optional[GuardrailConfigBlock] performanceConfig: Optional[PerformanceConfigBlock] + requestMetadata: Optional[Dict[str, str]] class RequestObject(CommonRequestObject, total=False): From 352a0eedb554fb817a0d0651faead9bf02c3fdfc Mon Sep 17 00:00:00 2001 From: Tim Elfrink Date: Mon, 15 Sep 2025 11:06:03 +0200 Subject: [PATCH 3/4] refactor: Extract helper methods to fix linting issues - Split _transform_request_helper into smaller focused methods - Extract _prepare_request_params for parameter preparation logic - Extract _process_tools_and_beta for tool processing logic - Resolve PLR0915 (too many statements) linting issue - Maintain all existing functionality and test compatibility --- .../bedrock/chat/converse_transformation.py | 73 +++++++++++-------- 1 file changed, 43 insertions(+), 30 deletions(-) diff --git a/litellm/llms/bedrock/chat/converse_transformation.py b/litellm/llms/bedrock/chat/converse_transformation.py index 5cc644dd4eae..d92c6d4a584b 100644 --- a/litellm/llms/bedrock/chat/converse_transformation.py +++ b/litellm/llms/bedrock/chat/converse_transformation.py @@ -762,34 +762,8 @@ def _handle_top_k_value(self, model: str, inference_params: dict) -> dict: return {} - def _transform_request_helper( - self, - model: str, - system_content_blocks: List[SystemContentBlock], - optional_params: dict, - messages: Optional[List[AllMessageValues]] = None, - headers: Optional[dict] = None, - ) -> CommonRequestObject: - ## VALIDATE REQUEST - """ - Bedrock doesn't support tool calling without `tools=` param specified. - """ - if ( - "tools" not in optional_params - and messages is not None - and has_tool_call_blocks(messages) - ): - if litellm.modify_params: - optional_params["tools"] = add_dummy_tool( - custom_llm_provider="bedrock_converse" - ) - else: - raise litellm.UnsupportedParamsError( - message="Bedrock doesn't support tool calling without `tools=` param specified. Pass `tools=` param OR set `litellm.modify_params = True` // `litellm_settings::modify_params: True` to add dummy tool to the request.", - model="", - llm_provider="bedrock", - ) - + def _prepare_request_params(self, optional_params: dict, model: str) -> tuple[dict, dict, dict]: + """Prepare and separate request parameters.""" inference_params = copy.deepcopy(optional_params) supported_converse_params = list( AmazonConverseConfig.__annotations__.keys() @@ -821,9 +795,10 @@ def _transform_request_helper( self._handle_top_k_value(model, inference_params) ) - original_tools = inference_params.pop("tools", []) + return inference_params, additional_request_params, request_metadata - # Initialize bedrock_tools + def _process_tools_and_beta(self, original_tools: list, model: str, headers: Optional[dict], additional_request_params: dict) -> tuple[List[ToolBlock], list]: + """Process tools and collect anthropic_beta values.""" bedrock_tools: List[ToolBlock] = [] # Collect anthropic_beta values from user headers @@ -865,6 +840,44 @@ def _transform_request_helper( seen.add(beta) additional_request_params["anthropic_beta"] = unique_betas + return bedrock_tools, anthropic_beta_list + + def _transform_request_helper( + self, + model: str, + system_content_blocks: List[SystemContentBlock], + optional_params: dict, + messages: Optional[List[AllMessageValues]] = None, + headers: Optional[dict] = None, + ) -> CommonRequestObject: + ## VALIDATE REQUEST + """ + Bedrock doesn't support tool calling without `tools=` param specified. + """ + if ( + "tools" not in optional_params + and messages is not None + and has_tool_call_blocks(messages) + ): + if litellm.modify_params: + optional_params["tools"] = add_dummy_tool( + custom_llm_provider="bedrock_converse" + ) + else: + raise litellm.UnsupportedParamsError( + message="Bedrock doesn't support tool calling without `tools=` param specified. Pass `tools=` param OR set `litellm.modify_params = True` // `litellm_settings::modify_params: True` to add dummy tool to the request.", + model="", + llm_provider="bedrock", + ) + + # Prepare and separate parameters + inference_params, additional_request_params, request_metadata = self._prepare_request_params(optional_params, model) + + original_tools = inference_params.pop("tools", []) + + # Process tools and collect beta values + bedrock_tools, anthropic_beta_list = self._process_tools_and_beta(original_tools, model, headers, additional_request_params) + bedrock_tool_config: Optional[ToolConfigBlock] = None if len(bedrock_tools) > 0: tool_choice_values: ToolChoiceValuesBlock = inference_params.pop( From 27a37ce57e2b514111df329cba5fe9f9a6a48223 Mon Sep 17 00:00:00 2001 From: Tim Elfrink Date: Thu, 18 Sep 2025 09:13:21 +0200 Subject: [PATCH 4/4] Use requestMetadata parameter name for consistency with other Bedrock params - Change parameter from request_metadata to requestMetadata to match camelCase convention - Consistent with guardrailConfig and performanceConfig naming pattern - Update all references in transformation code and error messages - Update tests and documentation to use correct parameter name - Fix type checking for parameter validation --- .../completion/provider_specific_params.md | 50 +++++++++++ docs/my-website/docs/providers/bedrock.md | 59 +++++++++++++ .../bedrock/chat/converse_transformation.py | 30 +++---- .../chat/test_converse_transformation.py | 84 ++++++++++++------- 4 files changed, 177 insertions(+), 46 deletions(-) diff --git a/docs/my-website/docs/completion/provider_specific_params.md b/docs/my-website/docs/completion/provider_specific_params.md index a8307fc8a204..772ca13e2933 100644 --- a/docs/my-website/docs/completion/provider_specific_params.md +++ b/docs/my-website/docs/completion/provider_specific_params.md @@ -433,4 +433,54 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \ ], "adapater_id": "my-special-adapter-id" # 👈 PROVIDER-SPECIFIC PARAM }' + +## Provider-Specific Metadata Parameters + +| Provider | Parameter | Use Case | +|----------|-----------|----------| +| **AWS Bedrock** | `requestMetadata` | Cost attribution, logging | +| **Gemini/Vertex AI** | `labels` | Resource labeling | +| **Anthropic** | `metadata` | User identification | + + + + +```python +import litellm + +response = litellm.completion( + model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=[{"role": "user", "content": "Hello!"}], + requestMetadata={"cost_center": "engineering"} +) +``` + + + + +```python +import litellm + +response = litellm.completion( + model="vertex_ai/gemini-pro", + messages=[{"role": "user", "content": "Hello!"}], + labels={"environment": "production"} +) +``` + + + + +```python +import litellm + +response = litellm.completion( + model="anthropic/claude-3-sonnet-20240229", + messages=[{"role": "user", "content": "Hello!"}], + metadata={"user_id": "user123"} +) +``` + + + ``` \ No newline at end of file diff --git a/docs/my-website/docs/providers/bedrock.md b/docs/my-website/docs/providers/bedrock.md index 165ef1d12f73..c9d159a11e62 100644 --- a/docs/my-website/docs/providers/bedrock.md +++ b/docs/my-website/docs/providers/bedrock.md @@ -308,6 +308,65 @@ print(response) +## Usage - Request Metadata + +Attach metadata to Bedrock requests for logging and cost attribution. + + + + +```python +import os +from litellm import completion + +os.environ["AWS_ACCESS_KEY_ID"] = "" +os.environ["AWS_SECRET_ACCESS_KEY"] = "" +os.environ["AWS_REGION_NAME"] = "" + +response = completion( + model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0", + messages=[{"role": "user", "content": "Hello, how are you?"}], + requestMetadata={ + "cost_center": "engineering", + "user_id": "user123" + } +) +``` + + + +**Set on yaml** + +```yaml +model_list: + - model_name: bedrock-claude-v1 + litellm_params: + model: bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0 + requestMetadata: + cost_center: "engineering" +``` + +**Set on request** + +```python +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:4000" +) + +response = client.chat.completions.create( + model="bedrock-claude-v1", + messages=[{"role": "user", "content": "Hello"}], + extra_body={ + "requestMetadata": {"cost_center": "engineering"} + } +) +``` + + + + ## Usage - Function Calling / Tool calling LiteLLM supports tool calling via Bedrock's Converse and Invoke API's. diff --git a/litellm/llms/bedrock/chat/converse_transformation.py b/litellm/llms/bedrock/chat/converse_transformation.py index d92c6d4a584b..e8094444330d 100644 --- a/litellm/llms/bedrock/chat/converse_transformation.py +++ b/litellm/llms/bedrock/chat/converse_transformation.py @@ -188,14 +188,14 @@ def _validate_request_metadata(self, metadata: dict) -> None: if not isinstance(metadata, dict): raise litellm.exceptions.BadRequestError( - message="request_metadata must be a dictionary", + message="requestMetadata must be a dictionary", model="bedrock", llm_provider="bedrock", ) if len(metadata) > 16: raise litellm.exceptions.BadRequestError( - message="request_metadata can contain a maximum of 16 items", + message="requestMetadata can contain a maximum of 16 items", model="bedrock", llm_provider="bedrock", ) @@ -206,42 +206,42 @@ def _validate_request_metadata(self, metadata: dict) -> None: for key, value in metadata.items(): if not isinstance(key, str): raise litellm.exceptions.BadRequestError( - message="request_metadata keys must be strings", + message="requestMetadata keys must be strings", model="bedrock", llm_provider="bedrock", ) if not isinstance(value, str): raise litellm.exceptions.BadRequestError( - message="request_metadata values must be strings", + message="requestMetadata values must be strings", model="bedrock", llm_provider="bedrock", ) if len(key) == 0 or len(key) > 256: raise litellm.exceptions.BadRequestError( - message="request_metadata key length must be 1-256 characters", + message="requestMetadata key length must be 1-256 characters", model="bedrock", llm_provider="bedrock", ) if len(value) > 256: raise litellm.exceptions.BadRequestError( - message="request_metadata value length must be 0-256 characters", + message="requestMetadata value length must be 0-256 characters", model="bedrock", llm_provider="bedrock", ) if not key_pattern.match(key): raise litellm.exceptions.BadRequestError( - message=f"request_metadata key '{key}' contains invalid characters. Allowed: [a-zA-Z0-9\\s:_@$#=/+,.-]", + message=f"requestMetadata key '{key}' contains invalid characters. Allowed: [a-zA-Z0-9\\s:_@$#=/+,.-]", model="bedrock", llm_provider="bedrock", ) if not value_pattern.match(value): raise litellm.exceptions.BadRequestError( - message=f"request_metadata value '{value}' contains invalid characters. Allowed: [a-zA-Z0-9\\s:_@$#=/+,.-]", + message=f"requestMetadata value '{value}' contains invalid characters. Allowed: [a-zA-Z0-9\\s:_@$#=/+,.-]", model="bedrock", llm_provider="bedrock", ) @@ -259,7 +259,7 @@ def get_supported_openai_params(self, model: str) -> List[str]: "top_p", "extra_headers", "response_format", - "request_metadata", + "requestMetadata", ] if ( @@ -569,10 +569,10 @@ def map_openai_params( optional_params["thinking"] = AnthropicConfig._map_reasoning_effort( value ) - if param == "request_metadata": - if value is not None: - self._validate_request_metadata(value) - optional_params["request_metadata"] = value + if param == "requestMetadata": + if value is not None and isinstance(value, dict): + self._validate_request_metadata(value) # type: ignore + optional_params["requestMetadata"] = value # Only update thinking tokens for non-GPT-OSS models if "gpt-oss" not in model: @@ -777,8 +777,8 @@ def _prepare_request_params(self, optional_params: dict, model: str) -> tuple[di ) inference_params.pop("json_mode", None) # used for handling json_schema - # Extract request_metadata before processing other parameters - request_metadata = inference_params.pop("request_metadata", None) + # Extract requestMetadata before processing other parameters + request_metadata = inference_params.pop("requestMetadata", None) if request_metadata is not None: self._validate_request_metadata(request_metadata) diff --git a/tests/test_litellm/llms/bedrock/chat/test_converse_transformation.py b/tests/test_litellm/llms/bedrock/chat/test_converse_transformation.py index 39d1d89b0fda..3c8af9d0d57a 100644 --- a/tests/test_litellm/llms/bedrock/chat/test_converse_transformation.py +++ b/tests/test_litellm/llms/bedrock/chat/test_converse_transformation.py @@ -1,7 +1,7 @@ +import asyncio import json import os import sys -import asyncio import pytest from fastapi.testclient import TestClient @@ -12,7 +12,7 @@ from unittest.mock import MagicMock, patch import litellm -from litellm import completion, RateLimitError, ModelResponse +from litellm import ModelResponse, RateLimitError, completion from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig from litellm.types.llms.bedrock import ConverseTokenUsageBlock @@ -308,8 +308,12 @@ def test_transform_request_helper_includes_anthropic_beta_and_tools(): def test_transform_response_with_computer_use_tool(): """Test response transformation with computer use tool call.""" import httpx - from litellm.types.llms.bedrock import ConverseResponseBlock, ConverseTokenUsageBlock + from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig + from litellm.types.llms.bedrock import ( + ConverseResponseBlock, + ConverseTokenUsageBlock, + ) from litellm.types.utils import ModelResponse # Simulate a Bedrock Converse response with a computer-use tool call @@ -397,8 +401,12 @@ def text(self): def test_transform_response_with_bash_tool(): """Test response transformation with bash tool call.""" import httpx - from litellm.types.llms.bedrock import ConverseResponseBlock, ConverseTokenUsageBlock + from litellm.llms.bedrock.chat.converse_transformation import AmazonConverseConfig + from litellm.types.llms.bedrock import ( + ConverseResponseBlock, + ConverseTokenUsageBlock, + ) from litellm.types.utils import ModelResponse # Simulate a Bedrock Converse response with a bash tool call @@ -1236,9 +1244,11 @@ def test_map_openai_params_with_response_format(): @pytest.mark.asyncio async def test_assistant_message_cache_control(): """Test that assistant messages with cache_control generate cachePoint blocks.""" - from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - from litellm.litellm_core_utils.prompt_templates.factory import BedrockConverseMessagesProcessor - + from litellm.litellm_core_utils.prompt_templates.factory import ( + BedrockConverseMessagesProcessor, + _bedrock_converse_messages_pt, + ) + # Test assistant message with string content and cache_control messages = [ {"role": "user", "content": "Hello"}, @@ -1287,8 +1297,10 @@ async def test_assistant_message_cache_control(): @pytest.mark.asyncio async def test_assistant_message_list_content_cache_control(): """Test assistant messages with list content and cache_control.""" - from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - from litellm.litellm_core_utils.prompt_templates.factory import BedrockConverseMessagesProcessor + from litellm.litellm_core_utils.prompt_templates.factory import ( + BedrockConverseMessagesProcessor, + _bedrock_converse_messages_pt, + ) messages = [ {"role": "user", "content": "Hello"}, @@ -1329,8 +1341,10 @@ async def test_assistant_message_list_content_cache_control(): @pytest.mark.asyncio async def test_tool_message_cache_control(): """Test that tool messages with cache_control generate cachePoint blocks.""" - from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - from litellm.litellm_core_utils.prompt_templates.factory import BedrockConverseMessagesProcessor + from litellm.litellm_core_utils.prompt_templates.factory import ( + BedrockConverseMessagesProcessor, + _bedrock_converse_messages_pt, + ) messages = [ {"role": "user", "content": "What's the weather?"}, @@ -1391,8 +1405,10 @@ async def test_tool_message_cache_control(): @pytest.mark.asyncio async def test_tool_message_string_content_cache_control(): """Test tool messages with string content and message-level cache_control.""" - from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - from litellm.litellm_core_utils.prompt_templates.factory import BedrockConverseMessagesProcessor + from litellm.litellm_core_utils.prompt_templates.factory import ( + BedrockConverseMessagesProcessor, + _bedrock_converse_messages_pt, + ) messages = [ {"role": "user", "content": "What's the weather?"}, @@ -1445,8 +1461,10 @@ async def test_tool_message_string_content_cache_control(): @pytest.mark.asyncio async def test_assistant_tool_calls_cache_control(): """Test that assistant tool_calls with cache_control generate cachePoint blocks.""" - from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - from litellm.litellm_core_utils.prompt_templates.factory import BedrockConverseMessagesProcessor + from litellm.litellm_core_utils.prompt_templates.factory import ( + BedrockConverseMessagesProcessor, + _bedrock_converse_messages_pt, + ) messages = [ {"role": "user", "content": "Calculate 2+2"}, @@ -1495,8 +1513,10 @@ async def test_assistant_tool_calls_cache_control(): @pytest.mark.asyncio async def test_multiple_tool_calls_with_mixed_cache_control(): """Test multiple tool calls where only some have cache_control.""" - from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - from litellm.litellm_core_utils.prompt_templates.factory import BedrockConverseMessagesProcessor + from litellm.litellm_core_utils.prompt_templates.factory import ( + BedrockConverseMessagesProcessor, + _bedrock_converse_messages_pt, + ) messages = [ {"role": "user", "content": "Do multiple calculations"}, @@ -1554,8 +1574,10 @@ async def test_multiple_tool_calls_with_mixed_cache_control(): @pytest.mark.asyncio async def test_no_cache_control_no_cache_point(): """Test that messages without cache_control don't generate cachePoint blocks.""" - from litellm.litellm_core_utils.prompt_templates.factory import _bedrock_converse_messages_pt - from litellm.litellm_core_utils.prompt_templates.factory import BedrockConverseMessagesProcessor + from litellm.litellm_core_utils.prompt_templates.factory import ( + BedrockConverseMessagesProcessor, + _bedrock_converse_messages_pt, + ) messages = [ {"role": "user", "content": "Hello"}, @@ -2351,7 +2373,7 @@ def test_request_metadata_validation(): config.transform_request( model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages, - optional_params={"request_metadata": valid_metadata}, + optional_params={"requestMetadata": valid_metadata}, litellm_params={}, headers={} ) @@ -2363,7 +2385,7 @@ def test_request_metadata_validation(): config.transform_request( model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages, - optional_params={"request_metadata": too_many_items}, + optional_params={"requestMetadata": too_many_items}, litellm_params={}, headers={} ) @@ -2386,7 +2408,7 @@ def test_request_metadata_key_constraints(): config.transform_request( model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages, - optional_params={"request_metadata": invalid_metadata}, + optional_params={"requestMetadata": invalid_metadata}, litellm_params={}, headers={} ) @@ -2401,7 +2423,7 @@ def test_request_metadata_key_constraints(): config.transform_request( model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages, - optional_params={"request_metadata": invalid_metadata}, + optional_params={"requestMetadata": invalid_metadata}, litellm_params={}, headers={} ) @@ -2424,7 +2446,7 @@ def test_request_metadata_value_constraints(): config.transform_request( model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages, - optional_params={"request_metadata": invalid_metadata}, + optional_params={"requestMetadata": invalid_metadata}, litellm_params={}, headers={} ) @@ -2439,7 +2461,7 @@ def test_request_metadata_value_constraints(): config.transform_request( model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages, - optional_params={"request_metadata": valid_metadata}, + optional_params={"requestMetadata": valid_metadata}, litellm_params={}, headers={} ) @@ -2462,14 +2484,14 @@ def test_request_metadata_character_pattern(): config.transform_request( model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages, - optional_params={"request_metadata": valid_metadata}, + optional_params={"requestMetadata": valid_metadata}, litellm_params={}, headers={} ) def test_request_metadata_with_other_params(): - """Test that request_metadata works alongside other parameters.""" + """Test that requestMetadata works alongside other parameters.""" config = AmazonConverseConfig() request_metadata = { @@ -2503,7 +2525,7 @@ def test_request_metadata_with_other_params(): model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages, optional_params={ - "request_metadata": request_metadata, + "requestMetadata": request_metadata, "tools": tools, "max_tokens": 100, "temperature": 0.7 @@ -2523,7 +2545,7 @@ def test_request_metadata_with_other_params(): def test_request_metadata_empty(): - """Test handling of empty request_metadata.""" + """Test handling of empty requestMetadata.""" config = AmazonConverseConfig() messages = [{"role": "user", "content": "Hello!"}] @@ -2532,7 +2554,7 @@ def test_request_metadata_empty(): request_data = config.transform_request( model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages, - optional_params={"request_metadata": {}}, + optional_params={"requestMetadata": {}}, litellm_params={}, headers={} ) @@ -2547,7 +2569,7 @@ def test_request_metadata_not_provided(): messages = [{"role": "user", "content": "Hello!"}] - # No request_metadata provided + # No requestMetadata provided request_data = config.transform_request( model="anthropic.claude-3-5-sonnet-20240620-v1:0", messages=messages,