diff --git a/README.md b/README.md index 0c78c82f5888..e92f9b107684 100644 --- a/README.md +++ b/README.md @@ -106,21 +106,32 @@ The following code uses code execution, you need to have [Docker installed](http and running on your machine. ```python +import asyncio +import logging +from autogen_agentchat import EVENT_LOGGER_NAME from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent +from autogen_agentchat.logging import ConsoleLogHandler from autogen_agentchat.teams import RoundRobinGroupChat, StopMessageTermination from autogen_core.components.code_executor import DockerCommandLineCodeExecutor from autogen_core.components.models import OpenAIChatCompletionClient -async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor: - code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor) - coding_assistant_agent = CodingAssistantAgent( - "coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o") - ) - group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent]) - result = await group_chat.run( - task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.", - termination_condition=StopMessageTermination(), - ) +logger = logging.getLogger(EVENT_LOGGER_NAME) +logger.addHandler(ConsoleLogHandler()) +logger.setLevel(logging.INFO) + +async def main() -> None: + async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor: + code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor) + coding_assistant_agent = CodingAssistantAgent( + "coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o") + ) + group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent]) + result = await group_chat.run( + task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.", + termination_condition=StopMessageTermination(), + ) + +asyncio.run(main()) ``` ### C# diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_coding_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_coding_assistant_agent.py index f12658f8074e..36edd0bcd3d4 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_coding_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_coding_assistant_agent.py @@ -34,7 +34,7 @@ def __init__( If you want the user to save the code in a file before executing it, put # filename: inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user. If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. -Reply "TERMINATE" in the end when everything is done.""", +Reply "TERMINATE" in the end when code has been executed and task is complete.""", ): super().__init__(name=name, description=description) self._model_client = model_client diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/logging/_console_log_handler.py b/python/packages/autogen-agentchat/src/autogen_agentchat/logging/_console_log_handler.py index f4badf787a3f..5ff7c689a587 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/logging/_console_log_handler.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/logging/_console_log_handler.py @@ -3,7 +3,6 @@ import sys from datetime import datetime -from .. import EVENT_LOGGER_NAME from ..agents import ChatMessage, StopMessage, TextMessage from ..teams._events import ( ContentPublishEvent, @@ -68,8 +67,3 @@ def emit(self, record: logging.LogRecord) -> None: sys.stdout.flush() else: raise ValueError(f"Unexpected log record: {record.msg}") - - -logger = logging.getLogger(EVENT_LOGGER_NAME) -logger.setLevel(logging.INFO) -logger.addHandler(ConsoleLogHandler()) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.md index ab510c4c1e4c..c12030f0bc1e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.md @@ -29,6 +29,9 @@ The following example illustrates creating a simple agent team with two agents t 1. `CodingAssistantAgent` that generates responses using an LLM model. 2. `CodeExecutorAgent` that executes code snippets and returns the output. +Because the `CodeExecutorAgent` uses a Docker command-line code executor to execute code snippets, +you need to have [Docker installed](https://docs.docker.com/engine/install/) and running on your machine. + The task is to "Create a plot of NVIDIA and TESLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'." ```{include} stocksnippet.md diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/stocksnippet.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/stocksnippet.md index 0969c77667a0..f87e8fc35990 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/stocksnippet.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/stocksnippet.md @@ -2,21 +2,32 @@ `````{tab-item} AgentChat (v0.4x) ```python +import asyncio +import logging +from autogen_agentchat import EVENT_LOGGER_NAME from autogen_agentchat.agents import CodeExecutorAgent, CodingAssistantAgent +from autogen_agentchat.logging import ConsoleLogHandler from autogen_agentchat.teams import RoundRobinGroupChat, StopMessageTermination from autogen_core.components.code_executor import DockerCommandLineCodeExecutor from autogen_core.components.models import OpenAIChatCompletionClient -async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor: - code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor) - coding_assistant_agent = CodingAssistantAgent( - "coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4") - ) - group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent]) - result = await group_chat.run( - task="Create a plot of NVIDIA and TESLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.", - termination_condition=StopMessageTermination(), - ) +logger = logging.getLogger(EVENT_LOGGER_NAME) +logger.addHandler(ConsoleLogHandler()) +logger.setLevel(logging.INFO) + +async def main() -> None: + async with DockerCommandLineCodeExecutor(work_dir="coding") as code_executor: + code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor) + coding_assistant_agent = CodingAssistantAgent( + "coding_assistant", model_client=OpenAIChatCompletionClient(model="gpt-4o") + ) + group_chat = RoundRobinGroupChat([coding_assistant_agent, code_executor_agent]) + result = await group_chat.run( + task="Create a plot of NVDIA and TSLA stock returns YTD from 2024-01-01 and save it to 'nvidia_tesla_2024_ytd.png'.", + termination_condition=StopMessageTermination(), + ) + +asyncio.run(main()) ``` `````