diff --git a/docs/src/content/docs/cookbook/examples/api-agent.md b/docs/src/content/docs/cookbook/examples/api-agent.mdx
similarity index 100%
rename from docs/src/content/docs/cookbook/examples/api-agent.md
rename to docs/src/content/docs/cookbook/examples/api-agent.mdx
diff --git a/docs/src/content/docs/cookbook/tools/weather-api.md b/docs/src/content/docs/cookbook/tools/weather-api.mdx
similarity index 57%
rename from docs/src/content/docs/cookbook/tools/weather-api.md
rename to docs/src/content/docs/cookbook/tools/weather-api.mdx
index e20db03..3021e85 100644
--- a/docs/src/content/docs/cookbook/tools/weather-api.md
+++ b/docs/src/content/docs/cookbook/tools/weather-api.mdx
@@ -5,15 +5,17 @@ description: Understanding Tool use in Bedrock LLM Agent
This guide demonstrates how to create a specialized weather agent using BedrockLLMAgent and a custom weather tool. We'll walk through the process of defining the tool, setting up the agent, and integrating it into your Multi-Agent Orchestrator system.
-
-
1. **Define the Weather Tool**
Let's break down the weather tool definition into its key components:
-A. Tool Description
+**A. Tool Description**
+
+import { Tabs, TabItem } from '@astrojs/starlight/components';
+
+
```typescript
export const weatherToolDescription = [
{
@@ -40,6 +42,35 @@ export const weatherToolDescription = [
}
];
```
+
+
+ ```python
+weather_tool_description = [{
+ "toolSpec": {
+ "name": "Weather_Tool",
+ "description": "Get the current weather for a given location, based on its WGS84 coordinates.",
+ "inputSchema": {
+ "json": {
+ "type": "object",
+ "properties": {
+ "latitude": {
+ "type": "string",
+ "description": "Geographical WGS84 latitude of the location.",
+ },
+ "longitude": {
+ "type": "string",
+ "description": "Geographical WGS84 longitude of the location.",
+ },
+ },
+ "required": ["latitude", "longitude"],
+ }
+ },
+ }
+}]
+
+```
+
+
**Explanation:**
- This describes the tool's interface to the LLM.
@@ -49,11 +80,9 @@ export const weatherToolDescription = [
- Requires `latitude` and `longitude` as strings.
- This schema helps the LLM understand how to use the tool correctly.
-
-
-
-B. Custom Prompt
-
+**B. Custom Prompt**
+
+
```typescript
export const WEATHER_PROMPT = `
You are a weather assistant that provides current weather data for user-specified locations using only
@@ -72,6 +101,29 @@ To use the tool, you strictly apply the provided tool specification.
- Complete the entire process until you have all required data before sending the complete response.
`;
```
+
+
+```python
+weather_tool_prompt = """
+You are a weather assistant that provides current weather data for user-specified locations using only
+the Weather_Tool, which expects latitude and longitude. Infer the coordinates from the location yourself.
+If the user provides coordinates, infer the approximate location and refer to it in your response.
+To use the tool, you strictly apply the provided tool specification.
+
+- Explain your step-by-step process, and give brief updates before each step.
+- Only use the Weather_Tool for data. Never guess or make up information.
+- Repeat the tool use for subsequent requests if necessary.
+- If the tool errors, apologize, explain weather is unavailable, and suggest other options.
+- Report temperatures in °C (°F) and wind in km/h (mph). Keep weather reports concise. Sparingly use
+ emojis where appropriate.
+- Only respond to weather queries. Remind off-topic users of your purpose.
+- Never claim to search online, access external data, or use tools besides Weather_Tool.
+- Complete the entire process until you have all required data before sending the complete response.
+"""
+```
+
+
+
**Explanation:**
- This prompt sets the behavior and limitations for the LLM.
@@ -83,13 +135,11 @@ To use the tool, you strictly apply the provided tool specification.
- Format responses consistently (units, conciseness).
- Stay on topic and use only the provided tool.
-
-
-
-C. Tool Handler
+**C. Tool Handler**
+
+
```typescript
-
import { ConversationMessage, ParticipantRole } from "multi-agent-orchestrator";
@@ -123,6 +173,50 @@ export async function weatherToolHandler(response, conversation: ConversationMes
return message;
}
```
+
+
+```python
+import requests
+from requests.exceptions import RequestException
+from typing import List, Dict, Any
+from multi_agent_orchestrator.types import ConversationMessage, ParticipantRole
+
+async def weather_tool_handler(response: ConversationMessage, conversation: List[Dict[str, Any]]) -> ConversationMessage:
+ response_content_blocks = response.content
+
+ # Initialize an empty list of tool results
+ tool_results = []
+
+ if not response_content_blocks:
+ raise ValueError("No content blocks in response")
+
+ for content_block in response_content_blocks:
+ if "text" in content_block:
+ # Handle text content if needed
+ pass
+
+ if "toolUse" in content_block:
+ tool_use_block = content_block["toolUse"]
+ tool_use_name = tool_use_block.get("name")
+
+ if tool_use_name == "Weather_Tool":
+ tool_response = await fetch_weather_data(tool_use_block["input"])
+ tool_results.append({
+ "toolResult": {
+ "toolUseId": tool_use_block["toolUseId"],
+ "content": [{"json": {"result": tool_response}}],
+ }
+ })
+
+ # Embed the tool results in a new user message
+ message = ConversationMessage(
+ role=ParticipantRole.USER.value,
+ content=tool_results)
+
+ return message
+```
+
+
**Explanation:**
- This handler processes the LLM's request to use the Weather_Tool.
@@ -130,13 +224,12 @@ export async function weatherToolHandler(response, conversation: ConversationMes
- When it finds a Weather_Tool use:
- It calls `fetchWeatherData` with the provided coordinates.
- It formats the result into a tool result object.
-- Finally, it adds the tool results to the conversation as a new user message.
-
-
-
+- Finally, it returns the tool results to the caller as a new user message.
-D. Data Fetching Function
+**D. Data Fetching Function**
+
+
```typescript
async function fetchWeatherData(inputData: { latitude: number; longitude: number }) {
const endpoint = "https://api.open-meteo.com/v1/forecast";
@@ -158,6 +251,34 @@ async function fetchWeatherData(inputData: { latitude: number; longitude: number
}
}
```
+
+
+```python
+async def fetch_weather_data(input_data):
+ """
+ Fetches weather data for the given latitude and longitude using the Open-Meteo API.
+ Returns the weather data or an error message if the request fails.
+
+ :param input_data: The input data containing the latitude and longitude.
+ :return: The weather data or an error message.
+ """
+ endpoint = "https://api.open-meteo.com/v1/forecast"
+ latitude = input_data.get("latitude")
+ longitude = input_data.get("longitude", "")
+ params = {"latitude": latitude, "longitude": longitude, "current_weather": True}
+
+ try:
+ response = requests.get(endpoint, params=params)
+ weather_data = {"weather_data": response.json()}
+ response.raise_for_status()
+ return weather_data
+ except RequestException as e:
+ return e.response.json()
+ except Exception as e:
+ return {"error": type(e), "message": str(e)}
+```
+
+
**Explanation:**
- This function makes the actual API call to get weather data.
@@ -174,13 +295,13 @@ These components work together to create a functional weather tool:
4. The fetch function retrieves real weather data based on the LLM's input.
This setup allows the BedrockLLMAgent to provide weather information by seamlessly integrating external data into its responses.
-
-
+
2. **Create the Weather Agent**
Now that we have our weather tool defined and the code above in a file called `weatherTool.ts`, let's create a BedrockLLMAgent that uses this tool.
-
+
+
```typescript
// weatherAgent.ts
@@ -206,13 +327,34 @@ const weatherAgent = new BedrockLLMAgent({
});
weatherAgent.setSystemPrompt(WEATHER_PROMPT);
-
```
+
+
+```python
+from tools import weather_tool
+from multi_agent_orchestrator.agents import (BedrockLLMAgent, BedrockLLMAgentOptions)
+
+weather_agent = BedrockLLMAgent(BedrockLLMAgentOptions(
+ name="Weather Agent",
+ streaming=False,
+ description="Specialized agent for giving weather condition from a city.",
+ tool_config={
+ 'tool':weather_tool.weather_tool_description,
+ 'toolMaxRecursions': 5,
+ 'useToolHandler': weather_tool.weather_tool_handler
+ }
+ ))
+weather_agent.set_system_prompt(weather_tool.weather_tool_prompt)
+```
+
+
3. **Add the Weather Agent to the Orchestrator**
Now we can add our weather agent to the Multi-Agent Orchestrator:
+
+
```typescript
import { MultiAgentOrchestrator } from "multi-agent-orchestrator";
@@ -222,11 +364,23 @@ const orchestrator = new MultiAgentOrchestrator();
orchestrator.addAgent(weatherAgent);
```
+
+
+```python
+from multi_agent_orchestrator.orchestrator import MultiAgentOrchestrator
+
+orchestrator = MultiAgentOrchestrator()
+orchestrator.add_agent(weather_agent)
+```
+
+
## 4. Using the Weather Agent
Now that our weather agent is set up and added to the orchestrator, we can use it to get weather information:
+
+
```typescript
const response = await orchestrator.routeRequest(
@@ -235,6 +389,13 @@ const response = await orchestrator.routeRequest(
"session456"
);
```
+
+
+```python
+response = await orchestrator.route_request("What's the weather like in New York City?", "user123", "session456")
+```
+
+
### How It Works
diff --git a/examples/local-demo/apiAgent.ts b/examples/local-demo/apiAgent.ts
deleted file mode 100644
index c6882d9..0000000
--- a/examples/local-demo/apiAgent.ts
+++ /dev/null
@@ -1,149 +0,0 @@
-import {
- ConversationMessage,
- ParticipantRole,
- Agent,
- AgentOptions
-} from "multi-agent-orchestrator";
-
-/**
- * Extended options for the ApiAgent class.
- */
-export interface ApiAgentOptions extends AgentOptions {
- endpoint: string;
- method: string;
- streaming?: boolean;
- headersCallback?: () => Record;
- inputPayloadEncoder?: (inputText: string, ...additionalParams: any) => any;
- outputPayloadDecoder?: (response: any) => any;
-}
-
-/**
- * ApiAgent class for handling API-based agent interactions.
- */
-export class ApiAgent extends Agent {
- private options: ApiAgentOptions;
-
- constructor(options: ApiAgentOptions) {
- super(options);
- this.options = options;
- this.options.inputPayloadEncoder = options.inputPayloadEncoder ?? this.defaultInputPayloadEncoder;
- this.options.outputPayloadDecoder = options.outputPayloadDecoder ?? this.defaultOutputPayloadDecoder;
- }
-
- /**
- * Default input payload encoder.
- */
- private defaultInputPayloadEncoder(inputText: string, chatHistory: ConversationMessage[]): any {
- return { input: inputText, history: chatHistory };
- }
-
- /**
- * Default output payload decoder.
- */
- private defaultOutputPayloadDecoder(response: any): any {
- return response.output;
- }
-
- /**
- * Fetch data from the API.
- * @param payload - The payload to send to the API.
- * @param streaming - Whether to use streaming or not.
- */
- private async *fetch(payload: any, streaming: boolean = false): AsyncGenerator {
- const headers = this.getHeaders();
- const response = await this.sendRequest(payload, headers);
-
- if (!response.ok) {
- throw new Error(`HTTP error! status: ${response.status}`);
- }
-
- if (!response.body) {
- throw new Error('Response body is null');
- }
-
- const reader = response.body.getReader();
- const decoder = new TextDecoder();
-
- try {
- if (streaming) {
- yield* this.handleStreamingResponse(reader, decoder);
- } else {
- return yield* this.handleNonStreamingResponse(reader, decoder);
- }
- } finally {
- reader.releaseLock();
- }
- }
-
- /**
- * Get headers for the API request.
- */
- private getHeaders(): Record {
- const defaultHeaders = {
- 'Content-Type': 'application/json',
- };
- return this.options.headersCallback
- ? { ...defaultHeaders, ...this.options.headersCallback() }
- : defaultHeaders;
- }
-
- /**
- * Send the API request.
- */
- private async sendRequest(payload: any, headers: Record): Promise {
- return fetch(this.options.endpoint, {
- method: this.options.method,
- headers: headers,
- body: JSON.stringify(payload),
- });
- }
-
- /**
- * Handle streaming response.
- */
- private async *handleStreamingResponse(reader: any, decoder: any): AsyncGenerator {
- while (true) {
- const { done, value } = await reader.read();
- if (done) break;
- const chunk = decoder.decode(value, { stream: true });
- const message = this.options.outputPayloadDecoder!(chunk);
- yield message;
- }
- }
-
- /**
- * Handle non-streaming response.
- */
- private async *handleNonStreamingResponse(reader: any, decoder: any): AsyncGenerator {
- let result = '';
- while (true) {
- const { done, value } = await reader.read();
- if (done) break;
- result += decoder.decode(value, { stream: false });
- }
- return result;
- }
-
- /**
- * Process the request and return the response.
- */
- async processRequest(
- inputText: string,
- userId: string,
- sessionId: string,
- chatHistory: ConversationMessage[],
- additionalParams?: Record
- ): Promise> {
- const payload = this.options.inputPayloadEncoder!(inputText, chatHistory, userId, sessionId, additionalParams);
-
- if (this.options.streaming) {
- return this.fetch(payload, true);
- } else {
- const result = await this.fetch(payload, false).next();
- return {
- role: ParticipantRole.ASSISTANT,
- content: [{ text: this.options.outputPayloadDecoder!(result.value) }]
- };
- }
- }
-}
\ No newline at end of file
diff --git a/examples/local-demo/ollamaAgent.ts b/examples/local-demo/ollamaAgent.ts
deleted file mode 100644
index a41a791..0000000
--- a/examples/local-demo/ollamaAgent.ts
+++ /dev/null
@@ -1,72 +0,0 @@
-import {
- Agent,
- AgentOptions,
- ConversationMessage,
- ParticipantRole,
- Logger
- } from "multi-agent-orchestrator";
- import ollama from 'ollama'
-
- export interface OllamaAgentOptions extends AgentOptions {
- streaming?: boolean;
- // Add other Ollama-specific options here (e.g., temperature, top_k, top_p)
- }
-
- export class OllamaAgent extends Agent {
- private options: OllamaAgentOptions;
-
- constructor(options: OllamaAgentOptions) {
- super(options);
- this.options = {
- name: options.name,
- description: options.description,
- modelId: options.modelId ?? "llama2",
- streaming: options.streaming ?? false
- };
- }
-
- private async *handleStreamingResponse(messages: any[]): AsyncIterable {
- try {
- const response = await ollama.chat({
- model: this.options.modelId ?? "llama2",
- messages: messages,
- stream: true,
- });
-
- for await (const part of response) {
- yield part.message.content;
- }
- } catch (error) {
- Logger.logger.error("Error getting stream from Ollama model:", error);
- throw error;
- }
- }
-
- async processRequest(
- inputText: string,
- userId: string,
- sessionId: string,
- chatHistory: ConversationMessage[],
- additionalParams?: Record
- ): Promise> {
- const messages = chatHistory.map(item => ({
- role: item.role,
- content: item.content![0].text
- }));
- messages.push({role: ParticipantRole.USER, content: inputText});
-
- if (this.options.streaming) {
- return this.handleStreamingResponse(messages);
- } else {
- const response = await ollama.chat({
- model: this.options.modelId!,
- messages: messages,
- });
- const message: ConversationMessage = {
- role: ParticipantRole.ASSISTANT,
- content: [{text: response.message.content}]
- };
- return message;
- }
- }
- }
\ No newline at end of file
diff --git a/examples/python-demo/tools/weather_tool.py b/examples/python-demo/tools/weather_tool.py
index bbc76d6..9a51870 100644
--- a/examples/python-demo/tools/weather_tool.py
+++ b/examples/python-demo/tools/weather_tool.py
@@ -45,7 +45,7 @@
"""
-async def weather_tool_handler(response: ConversationMessage, conversation: List[Dict[str, Any]]):
+async def weather_tool_handler(response: ConversationMessage, conversation: List[Dict[str, Any]]) -> ConversationMessage:
response_content_blocks = response.content
# Initialize an empty list of tool results
@@ -64,11 +64,11 @@ async def weather_tool_handler(response: ConversationMessage, conversation: List
tool_use_name = tool_use_block.get("name")
if tool_use_name == "Weather_Tool":
- response = await fetch_weather_data(tool_use_block["input"])
+ tool_response = await fetch_weather_data(tool_use_block["input"])
tool_results.append({
"toolResult": {
"toolUseId": tool_use_block["toolUseId"],
- "content": [{"json": {"result": response}}],
+ "content": [{"json": {"result": tool_response}}],
}
})
@@ -77,8 +77,8 @@ async def weather_tool_handler(response: ConversationMessage, conversation: List
role=ParticipantRole.USER.value,
content=tool_results)
- # Append the new message to the ongoing conversation
- conversation.append(message)
+ return message
+
async def fetch_weather_data(input_data):
"""
diff --git a/python/README.md b/python/README.md
index 612a557..b6b04e8 100644
--- a/python/README.md
+++ b/python/README.md
@@ -18,9 +18,9 @@
## What's the Multi-Agent Orchestrator ❓
-The Multi-Agent Orchestrator is a flexible framework for managing multiple AI agents and handling complex conversations. It intelligently routes queries and maintains context across interactions.
+The Multi-Agent Orchestrator is a flexible framework for managing multiple AI agents and handling complex conversations. It intelligently routes queries and maintains context across interactions.
-The system offers pre-built components for quick deployment, while also allowing easy integration of custom agents and conversation messages storage solutions.
+The system offers pre-built components for quick deployment, while also allowing easy integration of custom agents and conversation messages storage solutions.
This adaptability makes it suitable for a wide range of applications, from simple chatbots to sophisticated AI systems, accommodating diverse requirements and scaling efficiently.
@@ -34,15 +34,15 @@ This adaptability makes it suitable for a wide range of applications, from simpl
-1. The process begins with user input, which is analyzed by a Classifier.
-2. The Classifier leverages both Agents' Characteristics and Agents' Conversation history to select the most appropriate agent for the task.
+1. The process begins with user input, which is analyzed by a Classifier.
+2. The Classifier leverages both Agents' Characteristics and Agents' Conversation history to select the most appropriate agent for the task.
3. Once an agent is selected, it processes the user input.
-4. The orchestrator then saves the conversation, updating the Agents' Conversation history, before delivering the response back to the user.
+4. The orchestrator then saves the conversation, updating the Agents' Conversation history, before delivering the response back to the user.
## 💬 Demo App
-To quickly get a feel for the Multi-Agent Orchestrator, we've provided a Demo App with a few basic agents. This interactive demo showcases the orchestrator's capabilities in a user-friendly interface. To learn more about setting up and running the demo app, please refer to our [Demo App](https://awslabs.github.io/multi-agent-orchestrator/deployment/demo-web-app/) section.
+To quickly get a feel for the Multi-Agent Orchestrator, we've provided a Demo App with a few basic agents. This interactive demo showcases the orchestrator's capabilities in a user-friendly interface. To learn more about setting up and running the demo app, please refer to our [Demo App](https://awslabs.github.io/multi-agent-orchestrator/cookbook/examples/chat-demo-app/) section.
@@ -55,7 +55,7 @@ In the screen recording below, we demonstrate an extended version of the demo ap
- **Health Agent**: A Bedrock LLM Agent focused on addressing health-related queries
Watch as the system seamlessly switches context between diverse topics, from booking flights to checking weather, solving math problems, and providing health information.
-Notice how the appropriate agent is selected for each query, maintaining coherence even with brief follow-up inputs.
+Notice how the appropriate agent is selected for each query, maintaining coherence even with brief follow-up inputs.
The demo highlights the system's ability to handle complex, multi-turn conversations while preserving context and leveraging specialized agents across various domains.
diff --git a/python/setup.cfg b/python/setup.cfg
index 209869e..8d41e5a 100644
--- a/python/setup.cfg
+++ b/python/setup.cfg
@@ -18,7 +18,7 @@ classifiers =
package_dir =
= src
packages = find:
-python_requires = >=3.12
+python_requires = >=3.11
install_requires =
boto3==1.34.151
anthropic==0.32.0
diff --git a/python/src/multi_agent_orchestrator/agents/bedrock_llm_agent.py b/python/src/multi_agent_orchestrator/agents/bedrock_llm_agent.py
index 578e7af..8c43657 100644
--- a/python/src/multi_agent_orchestrator/agents/bedrock_llm_agent.py
+++ b/python/src/multi_agent_orchestrator/agents/bedrock_llm_agent.py
@@ -149,7 +149,8 @@ async def process_request(
conversation.append(bedrock_response)
if any('toolUse' in content for content in bedrock_response.content):
- await self.tool_config['useToolHandler'](bedrock_response, conversation)
+ tool_response = await self.tool_config['useToolHandler'](bedrock_response, conversation)
+ conversation.append(tool_response)
else:
continue_with_tools = False
final_message = bedrock_response
diff --git a/typescript/README.md b/typescript/README.md
index 360068c..32c574f 100644
--- a/typescript/README.md
+++ b/typescript/README.md
@@ -42,7 +42,7 @@ This adaptability makes it suitable for a wide range of applications, from simpl
## 💬 Demo App
-To quickly get a feel for the Multi-Agent Orchestrator, we've provided a Demo App with a few basic agents. This interactive demo showcases the orchestrator's capabilities in a user-friendly interface. To learn more about setting up and running the demo app, please refer to our [Demo App](https://awslabs.github.io/multi-agent-orchestrator/deployment/demo-web-app/) section.
+To quickly get a feel for the Multi-Agent Orchestrator, we've provided a Demo App with a few basic agents. This interactive demo showcases the orchestrator's capabilities in a user-friendly interface. To learn more about setting up and running the demo app, please refer to our [Demo App](https://awslabs.github.io/multi-agent-orchestrator/cookbook/examples/chat-demo-app/) section.