Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 34 additions & 4 deletions .github/workflows/test-lint-pr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,17 +48,47 @@ jobs:
return true;

unit-test:
name: Run Tests on Python ${{ matrix.python-version }}
runs-on: ubuntu-latest
name: Unit Tests - Python ${{ matrix.python-version }} - ${{ matrix.os-name }}
needs: check-approval
permissions:
contents: read
# Only run if PR is approved or this is a direct push to main
if: github.event_name == 'push' || needs.check-approval.outputs.approved == 'true'
strategy:
matrix:
python-version: [ "3.10", "3.11", "3.12", "3.13" ]
include:
# Linux
- os: ubuntu-latest
os-name: linux
python-version: "3.10"
- os: ubuntu-latest
os-name: linux
python-version: "3.11"
- os: ubuntu-latest
os-name: linux
python-version: "3.12"
- os: ubuntu-latest
os-name: linux
python-version: "3.13"
# Windows
- os: windows-latest
os-name: windows
python-version: "3.10"
- os: windows-latest
os-name: windows
python-version: "3.11"
- os: windows-latest
os-name: windows
python-version: "3.12"
- os: windows-latest
os-name: windows
python-version: "3.13"
# MacOS - latest only; not enough runners for MacOS
- os: macos-latest
os-name: macos
python-version: "3.13"
fail-fast: false
runs-on: ${{ matrix.os }}
steps:
- name: Checkout code
uses: actions/checkout@v4
Expand All @@ -78,7 +108,7 @@ jobs:
continue-on-error: false

lint:
name: Run Lint
name: Lint
runs-on: ubuntu-latest
needs: check-approval
permissions:
Expand Down
4 changes: 3 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "hatchling.build"

[project]
name = "strands-agents-tools"
version = "0.1.1"
version = "0.1.2"
description = "A collection of specialized tools for Strands Agents"
readme = "README.md"
requires-python = ">=3.10"
Expand Down Expand Up @@ -40,6 +40,8 @@ dependencies = [
"slack_bolt>=1.23.0,<2.0.0",
"mem0ai>=0.1.99,<1.0.0",
"opensearch-py>=2.8.0,<3.0.0",
# Note: Always want the latest tzdata
"tzdata ; platform_system == 'Windows'",
]

[tool.hatch.build.targets.wheel]
Expand Down
7 changes: 3 additions & 4 deletions src/strands_tools/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,7 @@
from rich.text import Text
from strands.types.tools import ToolResult, ToolResultContent, ToolUse

from strands_tools.utils import console_util
from strands_tools.utils.user_input import get_user_input
from strands_tools.utils import console_util, user_input

TOOL_SPEC = {
"name": "environment",
Expand Down Expand Up @@ -584,7 +583,7 @@ def environment(tool: ToolUse, **kwargs: Any) -> ToolResult:
)

# Ask for confirmation
confirm = get_user_input(
confirm = user_input.get_user_input(
"\n<yellow><bold>Do you want to proceed with setting this environment variable?</bold> [y/*]</yellow>"
)
# For tests, 'y' should be recognized even with extra spaces or newlines
Expand Down Expand Up @@ -706,7 +705,7 @@ def environment(tool: ToolUse, **kwargs: Any) -> ToolResult:
)

# Ask for confirmation
confirm = get_user_input(
confirm = user_input.get_user_input(
"\n<red><bold>Do you want to proceed with deleting this environment variable?</bold> [y/*]</red>"
)
# For tests, 'y' should be recognized even with extra spaces or newlines
Expand Down
6 changes: 2 additions & 4 deletions src/strands_tools/load_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ def load_tool(path: str, name: str, agent=None) -> Dict[str, Any]:

Tool Loading Process:
-------------------
- First, checks if dynamic loading is permitted (hot_reload_tools=True)
- Expands the path to handle user paths with tilde (~)
- Validates that the file exists at the specified path
- Uses the tool_registry's load_tool_from_filepath method to:
Expand Down Expand Up @@ -175,7 +174,6 @@ def my_custom_tool(tool: ToolUse, **kwargs: Any) -> ToolResult:

Notes:
- The tool loading can be disabled via STRANDS_DISABLE_LOAD_TOOL=true environment variable
- The Agent instance must have hot_reload_tools=True to enable dynamic loading
- Python files in the cwd()/tools/ directory are automatically hot reloaded without
requiring explicit calls to load_tool
- When using the load_tool function, ensure your tool files have proper docstrings as they are
Expand All @@ -187,8 +185,8 @@ def my_custom_tool(tool: ToolUse, **kwargs: Any) -> ToolResult:
current_agent = agent

try:
# Check if dynamic tool loading is disabled via environment variable or agent.hot_reload_tools.
if not current_agent.hot_reload_tools or os.environ.get("STRANDS_DISABLE_LOAD_TOOL", "").lower() == "true":
# Check if dynamic tool loading is disabled via environment variable.
if os.environ.get("STRANDS_DISABLE_LOAD_TOOL", "").lower() == "true":
logger.warning("Dynamic tool loading is disabled via STRANDS_DISABLE_LOAD_TOOL=true")
return {"status": "error", "content": [{"text": "⚠️ Dynamic tool loading is disabled in production mode."}]}

Expand Down
37 changes: 10 additions & 27 deletions src/strands_tools/mem0_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,19 +141,10 @@
"required": ["action"],
"allOf": [
{
"if": {
"properties": {
"action": {"enum": ["store", "list", "retrieve"]}
}
},
"then": {
"oneOf": [
{"required": ["user_id"]},
{"required": ["agent_id"]}
]
}
"if": {"properties": {"action": {"enum": ["store", "list", "retrieve"]}}},
"then": {"oneOf": [{"required": ["user_id"]}, {"required": ["agent_id"]}]},
}
]
],
}
},
}
Expand Down Expand Up @@ -536,7 +527,7 @@ def mem0_memory(tool: ToolUse, **kwargs: Any) -> ToolResult:
return ToolResult(
toolUseId=tool_use_id,
status="success",
content=[ToolResultContent(text=f"Successfully stored {len(results.get('results', []))} memories")]
content=[ToolResultContent(text=f"Successfully stored {len(results.get('results', []))} memories")],
)

elif action == "get":
Expand All @@ -547,9 +538,7 @@ def mem0_memory(tool: ToolUse, **kwargs: Any) -> ToolResult:
panel = format_get_response(memory)
console.print(panel)
return ToolResult(
toolUseId=tool_use_id,
status="success",
content=[ToolResultContent(text=json.dumps(memory, indent=2))]
toolUseId=tool_use_id, status="success", content=[ToolResultContent(text=json.dumps(memory, indent=2))]
)

elif action == "list":
Expand All @@ -559,7 +548,7 @@ def mem0_memory(tool: ToolUse, **kwargs: Any) -> ToolResult:
return ToolResult(
toolUseId=tool_use_id,
status="success",
content=[ToolResultContent(text=json.dumps(memories.get("results", []), indent=2))]
content=[ToolResultContent(text=json.dumps(memories.get("results", []), indent=2))],
)

elif action == "retrieve":
Expand All @@ -576,7 +565,7 @@ def mem0_memory(tool: ToolUse, **kwargs: Any) -> ToolResult:
return ToolResult(
toolUseId=tool_use_id,
status="success",
content=[ToolResultContent(text=json.dumps(memories.get("results", []), indent=2))]
content=[ToolResultContent(text=json.dumps(memories.get("results", []), indent=2))],
)

elif action == "delete":
Expand All @@ -589,7 +578,7 @@ def mem0_memory(tool: ToolUse, **kwargs: Any) -> ToolResult:
return ToolResult(
toolUseId=tool_use_id,
status="success",
content=[ToolResultContent(text=f"Memory {tool_input['memory_id']} deleted successfully")]
content=[ToolResultContent(text=f"Memory {tool_input['memory_id']} deleted successfully")],
)

elif action == "history":
Expand All @@ -600,9 +589,7 @@ def mem0_memory(tool: ToolUse, **kwargs: Any) -> ToolResult:
panel = format_history_response(history)
console.print(panel)
return ToolResult(
toolUseId=tool_use_id,
status="success",
content=[ToolResultContent(text=json.dumps(history, indent=2))]
toolUseId=tool_use_id, status="success", content=[ToolResultContent(text=json.dumps(history, indent=2))]
)

else:
Expand All @@ -615,8 +602,4 @@ def mem0_memory(tool: ToolUse, **kwargs: Any) -> ToolResult:
border_style="red",
)
console.print(error_panel)
return ToolResult(
toolUseId=tool_use_id,
status="error",
content=[ToolResultContent(text=f"Error: {str(e)}")]
)
return ToolResult(toolUseId=tool_use_id, status="error", content=[ToolResultContent(text=f"Error: {str(e)}")])
6 changes: 3 additions & 3 deletions src/strands_tools/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,23 +42,23 @@
agent = Agent(tools=[memory])

# Store content in Knowledge Base
agent.memory(
agent.tool.memory(
action="store",
content="Important information to remember",
title="Meeting Notes",
STRANDS_KNOWLEDGE_BASE_ID="my1234kb"
)

# Retrieve content using semantic search
agent.memory(
agent.tool.memory(
action="retrieve",
query="meeting information",
min_score=0.7,
STRANDS_KNOWLEDGE_BASE_ID="my1234kb"
)

# List all documents
agent.memory(
agent.tool.memory(
action="list",
max_results=50,
STRANDS_KNOWLEDGE_BASE_ID="my1234kb"
Expand Down
2 changes: 2 additions & 0 deletions src/strands_tools/slack.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,12 +351,14 @@ def _process_message(self, event):
return

tools = list(self.agent.tool_registry.registry.values())
trace_attributes = self.agent.trace_attributes

agent = Agent(
messages=[],
system_prompt=f"{self.agent.system_prompt}\n{SLACK_SYSTEM_PROMPT}",
tools=tools,
callback_handler=None,
trace_attributes=trace_attributes,
)

channel_id = event.get("channel")
Expand Down
66 changes: 39 additions & 27 deletions src/strands_tools/think.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@
Usage with Strands Agent:
```python
from strands import Agent
from strands_tools import think
from strands_tools import think, stop

agent = Agent(tools=[think])
agent = Agent(tools=[think, stop])

# Basic usage with default system prompt
result = agent.tool.think(
Expand All @@ -32,13 +32,15 @@
See the think function docstring for more details on configuration options and parameters.
"""

import logging
import traceback
import uuid
from typing import Any, Dict

from strands import tool
from strands import Agent, tool
from strands.telemetry.metrics import metrics_to_string

from strands_tools.use_llm import use_llm
logger = logging.getLogger(__name__)


class ThoughtProcessor:
Expand Down Expand Up @@ -77,36 +79,46 @@ def process_cycle(
) -> str:
"""Process a single thinking cycle."""

logger.debug(f"🧠 Thinking Cycle {cycle}/{total_cycles}: Processing cycle...")
print(f"🧠 Thinking Cycle {cycle}/{total_cycles}: Processing cycle...")

# Create cycle-specific prompt
prompt = self.create_thinking_prompt(thought, cycle, total_cycles)

# Use LLM for processing
result = use_llm(
{
"name": "use_llm",
"toolUseId": self.tool_use_id,
"input": {
"system_prompt": custom_system_prompt,
"prompt": prompt,
},
},
**kwargs,
)

# Extract and return response
cycle_response = ""
if result.get("status") == "success":
for content in result.get("content", []):
if content.get("text"):
cycle_response += content["text"] + "\n"

return cycle_response.strip()
# Display input prompt
logger.debug(f"\n--- Input Prompt ---\n{prompt}\n")

# Get tools from parent agent if available
tools = []
trace_attributes = {}
parent_agent = kwargs.get("agent")
if parent_agent:
tools = list(parent_agent.tool_registry.registry.values())
trace_attributes = parent_agent.trace_attributes

# Initialize the new Agent with provided parameters
agent = Agent(messages=[], tools=tools, system_prompt=custom_system_prompt, trace_attributes=trace_attributes)

# Run the agent with the provided prompt
result = agent(prompt)

# Extract response
assistant_response = str(result)

# Display assistant response
logger.debug(f"\n--- Assistant Response ---\n{assistant_response.strip()}\n")

# Print metrics if available
if result.metrics:
metrics = result.metrics
metrics_text = metrics_to_string(metrics)
logger.debug(metrics_text)

return assistant_response.strip()


@tool
def think(thought: str, cycle_count: int, system_prompt: str, **kwargs: Any) -> Dict[str, Any]:
def think(thought: str, cycle_count: int, system_prompt: str, agent: Any) -> Dict[str, Any]:
"""
Recursive thinking tool for sophisticated thought generation, learning, and self-reflection.

Expand Down Expand Up @@ -172,7 +184,7 @@ def think(thought: str, cycle_count: int, system_prompt: str, **kwargs: Any) ->
custom_system_prompt = (
"You are an expert analytical thinker. Process the thought deeply and provide clear insights."
)

kwargs = {"agent": agent}
# Create thought processor instance with the available context
processor = ThoughtProcessor(kwargs)

Expand Down
9 changes: 4 additions & 5 deletions src/strands_tools/use_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,12 @@ def use_llm(tool: ToolUse, **kwargs: Any) -> ToolResult:
tool_system_prompt = tool_input.get("system_prompt")

tools = []
trace_attributes = {}

parent_agent = kwargs.get("agent")
if parent_agent:
tools = list(parent_agent.tool_registry.registry.values())
trace_attributes = parent_agent.trace_attributes

# Display input prompt
logger.debug(f"\n--- Input Prompt ---\n{prompt}\n")
Expand All @@ -128,11 +131,7 @@ def use_llm(tool: ToolUse, **kwargs: Any) -> ToolResult:
logger.debug("🔄 Creating new LLM instance...")

# Initialize the new Agent with provided parameters
agent = Agent(
messages=[],
tools=tools,
system_prompt=tool_system_prompt,
)
agent = Agent(messages=[], tools=tools, system_prompt=tool_system_prompt, trace_attributes=trace_attributes)
# Run the agent with the provided prompt
result = agent(prompt)

Expand Down
Loading