Skip to content

Commit 4e3930e

Browse files
committed
Merge branch 'main'
2 parents 4dee33b + 3391e91 commit 4e3930e

File tree

2 files changed

+79
-7
lines changed

2 files changed

+79
-7
lines changed

src/strands/models/bedrock.py

Lines changed: 26 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -278,13 +278,20 @@ def _format_bedrock_messages(self, messages: Messages) -> Messages:
278278
cleaned_content: list[ContentBlock] = []
279279

280280
for content_block in message["content"]:
281+
# DeepSeek models have issues with reasoningContent
282+
# TODO: Replace with systematic model configuration registry (https://github.com/strands-agents/sdk-python/issues/780)
283+
if "deepseek" in self.config["model_id"].lower() and "reasoningContent" in content_block:
284+
continue
285+
281286
if "toolResult" in content_block:
282287
# Create a new content block with only the cleaned toolResult
283288
tool_result: ToolResult = content_block["toolResult"]
284289

285290
# Keep only the required fields for Bedrock
286291
cleaned_tool_result = ToolResult(
287-
content=tool_result["content"], toolUseId=tool_result["toolUseId"], status=tool_result["status"]
292+
content=tool_result["content"],
293+
toolUseId=tool_result["toolUseId"],
294+
status=tool_result["status"],
288295
)
289296

290297
cleaned_block: ContentBlock = {"toolResult": cleaned_tool_result}
@@ -293,9 +300,10 @@ def _format_bedrock_messages(self, messages: Messages) -> Messages:
293300
# Keep other content blocks as-is
294301
cleaned_content.append(content_block)
295302

296-
# Create new message with cleaned content
297-
cleaned_message: Message = Message(content=cleaned_content, role=message["role"])
298-
cleaned_messages.append(cleaned_message)
303+
# Create new message with cleaned content (skip if empty for DeepSeek)
304+
if cleaned_content:
305+
cleaned_message: Message = Message(content=cleaned_content, role=message["role"])
306+
cleaned_messages.append(cleaned_message)
299307

300308
return cleaned_messages
301309

@@ -347,7 +355,8 @@ def _generate_redaction_events(self) -> list[StreamEvent]:
347355
{
348356
"redactContent": {
349357
"redactAssistantContentMessage": self.config.get(
350-
"guardrail_redact_output_message", "[Assistant output redacted.]"
358+
"guardrail_redact_output_message",
359+
"[Assistant output redacted.]",
351360
)
352361
}
353362
}
@@ -427,6 +436,7 @@ def _stream(
427436
try:
428437
logger.debug("formatting request")
429438
request = self.format_request(messages, tool_specs, system_prompt)
439+
430440
logger.debug("request=<%s>", request)
431441

432442
logger.debug("invoking model")
@@ -660,7 +670,11 @@ def _find_detected_and_blocked_policy(self, input: Any) -> bool:
660670

661671
@override
662672
async def structured_output(
663-
self, output_model: Type[T], prompt: Messages, system_prompt: Optional[str] = None, **kwargs: Any
673+
self,
674+
output_model: Type[T],
675+
prompt: Messages,
676+
system_prompt: Optional[str] = None,
677+
**kwargs: Any,
664678
) -> AsyncGenerator[dict[str, Union[T, Any]], None]:
665679
"""Get structured output from the model.
666680
@@ -675,7 +689,12 @@ async def structured_output(
675689
"""
676690
tool_spec = convert_pydantic_to_tool_spec(output_model)
677691

678-
response = self.stream(messages=prompt, tool_specs=[tool_spec], system_prompt=system_prompt, **kwargs)
692+
response = self.stream(
693+
messages=prompt,
694+
tool_specs=[tool_spec],
695+
system_prompt=system_prompt,
696+
**kwargs,
697+
)
679698
async for event in streaming.process_stream(response):
680699
yield event
681700

tests/strands/models/test_bedrock.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1301,3 +1301,56 @@ def test_format_request_cleans_tool_result_content_blocks(model, model_id):
13011301
assert tool_result == expected
13021302
assert "extraField" not in tool_result
13031303
assert "mcpMetadata" not in tool_result
1304+
1305+
1306+
@pytest.mark.asyncio
1307+
async def test_stream_deepseek_filters_reasoning_content(bedrock_client, alist):
1308+
"""Test that DeepSeek models filter reasoningContent from messages during streaming."""
1309+
model = BedrockModel(model_id="us.deepseek.r1-v1:0")
1310+
1311+
messages = [
1312+
{"role": "user", "content": [{"text": "Hello"}]},
1313+
{
1314+
"role": "assistant",
1315+
"content": [
1316+
{"text": "Response"},
1317+
{"reasoningContent": {"reasoningText": {"text": "Thinking..."}}},
1318+
],
1319+
},
1320+
]
1321+
1322+
bedrock_client.converse_stream.return_value = {"stream": []}
1323+
1324+
await alist(model.stream(messages))
1325+
1326+
# Verify the request was made with filtered messages (no reasoningContent)
1327+
call_args = bedrock_client.converse_stream.call_args[1]
1328+
sent_messages = call_args["messages"]
1329+
1330+
assert len(sent_messages) == 2
1331+
assert sent_messages[0]["content"] == [{"text": "Hello"}]
1332+
assert sent_messages[1]["content"] == [{"text": "Response"}]
1333+
1334+
1335+
@pytest.mark.asyncio
1336+
async def test_stream_deepseek_skips_empty_messages(bedrock_client, alist):
1337+
"""Test that DeepSeek models skip messages that would be empty after filtering reasoningContent."""
1338+
model = BedrockModel(model_id="us.deepseek.r1-v1:0")
1339+
1340+
messages = [
1341+
{"role": "user", "content": [{"text": "Hello"}]},
1342+
{"role": "assistant", "content": [{"reasoningContent": {"reasoningText": {"text": "Only reasoning..."}}}]},
1343+
{"role": "user", "content": [{"text": "Follow up"}]},
1344+
]
1345+
1346+
bedrock_client.converse_stream.return_value = {"stream": []}
1347+
1348+
await alist(model.stream(messages))
1349+
1350+
# Verify the request was made with only non-empty messages
1351+
call_args = bedrock_client.converse_stream.call_args[1]
1352+
sent_messages = call_args["messages"]
1353+
1354+
assert len(sent_messages) == 2
1355+
assert sent_messages[0]["content"] == [{"text": "Hello"}]
1356+
assert sent_messages[1]["content"] == [{"text": "Follow up"}]

0 commit comments

Comments
 (0)