Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions src/strands/tools/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,9 +192,9 @@ def register_tool(self, tool: AgentTool) -> None:

# Check duplicate tool name, throw on duplicate tool names except if hot_reloading is enabled
if tool.tool_name in self.registry and not tool.supports_hot_reload:
raise ValueError(
f"Tool name '{tool.tool_name}' already exists. Cannot register tools with exact same name."
)
raise ValueError(
f"Tool name '{tool.tool_name}' already exists. Cannot register tools with exact same name."
)

# Check for normalized name conflicts (- vs _)
if self.registry.get(tool.tool_name) is None:
Expand Down
45 changes: 39 additions & 6 deletions tests_integ/test_bedrock_guardrails.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,9 +138,25 @@ def test_guardrail_output_intervention(boto_session, bedrock_guardrail, processi
response1 = agent("Say the word.")
response2 = agent("Hello!")
assert response1.stop_reason == "guardrail_intervened"
assert BLOCKED_OUTPUT in str(response1)
assert response2.stop_reason != "guardrail_intervened"
assert BLOCKED_OUTPUT not in str(response2)

"""
In async streaming: The buffering is non-blocking.
Tokens are streamed while Guardrails processes the buffered content in the background.
This means the response may be returned before Guardrails has finished processing.
As a result, we cannot guarantee that the REDACT_MESSAGE is in the response
"""
if processing_mode == "sync":
assert BLOCKED_OUTPUT in str(response1)
assert response2.stop_reason != "guardrail_intervened"
assert BLOCKED_OUTPUT not in str(response2)
else:
cactus_returned_in_response1_blocked_by_input_guardrail = BLOCKED_INPUT in str(response2)
cactus_blocked_in_response1_allows_next_response = (
BLOCKED_OUTPUT not in str(response2) and response2.stop_reason != "guardrail_intervened"
)
assert (
cactus_returned_in_response1_blocked_by_input_guardrail or cactus_blocked_in_response1_allows_next_response
)


@pytest.mark.parametrize("processing_mode", ["sync", "async"])
Expand All @@ -164,10 +180,27 @@ def test_guardrail_output_intervention_redact_output(bedrock_guardrail, processi

response1 = agent("Say the word.")
response2 = agent("Hello!")

assert response1.stop_reason == "guardrail_intervened"
assert REDACT_MESSAGE in str(response1)
assert response2.stop_reason != "guardrail_intervened"
assert REDACT_MESSAGE not in str(response2)

"""
In async streaming: The buffering is non-blocking.
Tokens are streamed while Guardrails processes the buffered content in the background.
This means the response may be returned before Guardrails has finished processing.
As a result, we cannot guarantee that the REDACT_MESSAGE is in the response
"""
if processing_mode == "sync":
assert REDACT_MESSAGE in str(response1)
assert response2.stop_reason != "guardrail_intervened"
assert REDACT_MESSAGE not in str(response2)
else:
cactus_returned_in_response1_blocked_by_input_guardrail = BLOCKED_INPUT in str(response2)
cactus_blocked_in_response1_allows_next_response = (
REDACT_MESSAGE not in str(response2) and response2.stop_reason != "guardrail_intervened"
)
assert (
cactus_returned_in_response1_blocked_by_input_guardrail or cactus_blocked_in_response1_allows_next_response
)


def test_guardrail_input_intervention_properly_redacts_in_session(boto_session, bedrock_guardrail, temp_dir):
Expand Down
Loading