diff --git a/.github/workflows/contrib-openai.yml b/.github/workflows/contrib-openai.yml index a8cedb29a3c5..6443aa62de69 100644 --- a/.github/workflows/contrib-openai.yml +++ b/.github/workflows/contrib-openai.yml @@ -4,7 +4,7 @@ name: OpenAI4ContribTests on: - pull_request_target: + pull_request: branches: ['main'] paths: - 'autogen/**' @@ -173,7 +173,7 @@ jobs: AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }} OAI_CONFIG_LIST: ${{ secrets.OAI_CONFIG_LIST }} run: | - coverage run -a -m pytest test/agentchat/contrib/test_teachable_agent.py + coverage run -a -m pytest test/agentchat/contrib/capabilities/test_teachable_agent.py coverage xml - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 diff --git a/.github/workflows/contrib-tests.yml b/.github/workflows/contrib-tests.yml index b82fe1baf032..c4d698655d9b 100644 --- a/.github/workflows/contrib-tests.yml +++ b/.github/workflows/contrib-tests.yml @@ -172,7 +172,7 @@ jobs: - name: Coverage run: | pip install coverage>=5.3 - coverage run -a -m pytest test/agentchat/contrib/test_teachable_agent.py --skip-openai + coverage run -a -m pytest test/agentchat/contrib/capabilities/test_teachable_agent.py --skip-openai coverage xml - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 diff --git a/.github/workflows/deploy-website.yml b/.github/workflows/deploy-website.yml index c9c7deede620..13180d504ba7 100644 --- a/.github/workflows/deploy-website.yml +++ b/.github/workflows/deploy-website.yml @@ -37,7 +37,7 @@ jobs: - name: pydoc-markdown install run: | python -m pip install --upgrade pip - pip install pydoc-markdown pyyaml colored + pip install pydoc-markdown pyyaml termcolor - name: pydoc-markdown run run: | pydoc-markdown @@ -83,7 +83,7 @@ jobs: - name: pydoc-markdown install run: | python -m pip install --upgrade pip - pip install pydoc-markdown pyyaml colored + pip install pydoc-markdown pyyaml termcolor - name: pydoc-markdown run run: | pydoc-markdown diff --git a/.github/workflows/openai.yml b/.github/workflows/openai.yml index be2840c2dc67..2018aa8e6c0b 100644 --- a/.github/workflows/openai.yml +++ b/.github/workflows/openai.yml @@ -4,7 +4,7 @@ name: OpenAI on: - pull_request_target: + pull_request: branches: ["main"] paths: - "autogen/**" diff --git a/autogen/agentchat/chat.py b/autogen/agentchat/chat.py index 6d2858c18c2e..8c9ed1ee2ef3 100644 --- a/autogen/agentchat/chat.py +++ b/autogen/agentchat/chat.py @@ -3,15 +3,9 @@ from collections import defaultdict from typing import Dict, List, Any, Set, Tuple from dataclasses import dataclass -from .utils import consolidate_chat_info import warnings - -try: - from termcolor import colored -except ImportError: - - def colored(x, *args, **kwargs): - return x +from termcolor import colored +from .utils import consolidate_chat_info logger = logging.getLogger(__name__) diff --git a/autogen/agentchat/contrib/capabilities/context_handling.py b/autogen/agentchat/contrib/capabilities/context_handling.py index ebbc00e1097c..1510ae5fcd64 100644 --- a/autogen/agentchat/contrib/capabilities/context_handling.py +++ b/autogen/agentchat/contrib/capabilities/context_handling.py @@ -25,10 +25,9 @@ class TransformChatHistory: 2. Second, it limits the number of message to keep 3. Third, it limits the total number of tokens in the chat history - Args: - max_tokens_per_message (Optional[int]): Maximum number of tokens to keep in each message. - max_messages (Optional[int]): Maximum number of messages to keep in the context. - max_tokens (Optional[int]): Maximum number of tokens to keep in the context. + When adding this capability to an agent, the following are modified: + - A hook is added to the hookable method `process_all_messages_before_reply` to transform the received messages for possible truncation. + Not modifying the stored message history. """ def __init__( @@ -38,6 +37,12 @@ def __init__( max_messages: Optional[int] = None, max_tokens: Optional[int] = None, ): + """ + Args: + max_tokens_per_message (Optional[int]): Maximum number of tokens to keep in each message. + max_messages (Optional[int]): Maximum number of messages to keep in the context. + max_tokens (Optional[int]): Maximum number of tokens to keep in the context. + """ self.max_tokens_per_message = max_tokens_per_message if max_tokens_per_message else sys.maxsize self.max_messages = max_messages if max_messages else sys.maxsize self.max_tokens = max_tokens if max_tokens else sys.maxsize diff --git a/autogen/agentchat/contrib/capabilities/teachability.py b/autogen/agentchat/contrib/capabilities/teachability.py index e90612fa53b2..9e18f99a3454 100644 --- a/autogen/agentchat/contrib/capabilities/teachability.py +++ b/autogen/agentchat/contrib/capabilities/teachability.py @@ -1,18 +1,12 @@ import os -from autogen.agentchat.assistant_agent import ConversableAgent -from autogen.agentchat.contrib.capabilities.agent_capability import AgentCapability -from autogen.agentchat.contrib.text_analyzer_agent import TextAnalyzerAgent -from typing import Dict, Optional, Union, List, Tuple, Any +from typing import Dict, Optional, Union import chromadb from chromadb.config import Settings import pickle - -try: - from termcolor import colored -except ImportError: - - def colored(x, *args, **kwargs): - return x +from autogen.agentchat.assistant_agent import ConversableAgent +from autogen.agentchat.contrib.capabilities.agent_capability import AgentCapability +from autogen.agentchat.contrib.text_analyzer_agent import TextAnalyzerAgent +from autogen.agentchat.conversable_agent import colored class Teachability(AgentCapability): @@ -23,6 +17,13 @@ class Teachability(AgentCapability): To make any conversable agent teachable, instantiate both the agent and the Teachability class, then pass the agent to teachability.add_to_agent(agent). Note that teachable agents in a group chat must be given unique path_to_db_dir values. + + When adding Teachability to an agent, the following are modified: + - The agent's system message is appended with a note about the agent's new ability. + - A hook is added to the agent's `process_last_received_message` hookable method, + and the hook potentially modifies the last of the received messages to include earlier teachings related to the message. + Added teachings do not propagate into the stored message history. + If new user teachings are detected, they are added to new memos in the vector database. """ def __init__( diff --git a/autogen/agentchat/contrib/llava_agent.py b/autogen/agentchat/contrib/llava_agent.py index 376c6c88f249..c26f576ab398 100644 --- a/autogen/agentchat/contrib/llava_agent.py +++ b/autogen/agentchat/contrib/llava_agent.py @@ -1,25 +1,14 @@ import json import logging -import os -import pdb -import re -from typing import Any, Dict, List, Optional, Tuple, Union - +from typing import List, Optional, Tuple import replicate import requests -from regex import R from autogen.agentchat.agent import Agent from autogen.agentchat.contrib.img_utils import get_image_data, llava_formatter from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent from autogen.code_utils import content_str - -try: - from termcolor import colored -except ImportError: - - def colored(x, *args, **kwargs): - return x +from autogen.agentchat.conversable_agent import colored logger = logging.getLogger(__name__) diff --git a/autogen/agentchat/contrib/multimodal_conversable_agent.py b/autogen/agentchat/contrib/multimodal_conversable_agent.py index a978e8fb722d..2355c630f9cc 100644 --- a/autogen/agentchat/contrib/multimodal_conversable_agent.py +++ b/autogen/agentchat/contrib/multimodal_conversable_agent.py @@ -1,26 +1,16 @@ import copy -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple, Union from autogen import OpenAIWrapper from autogen.agentchat import Agent, ConversableAgent from autogen.agentchat.contrib.img_utils import ( - convert_base64_to_data_uri, gpt4v_formatter, message_formatter_pil_to_b64, - pil_to_data_uri, ) +from autogen.code_utils import content_str from ..._pydantic import model_dump -try: - from termcolor import colored -except ImportError: - - def colored(x, *args, **kwargs): - return x - - -from autogen.code_utils import content_str DEFAULT_LMM_SYS_MSG = """You are a helpful AI assistant.""" DEFAULT_MODEL = "gpt-4-vision-preview" diff --git a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py index facb64d07efa..b6ec63630964 100644 --- a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py +++ b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py @@ -1,4 +1,6 @@ import re +from typing import Callable, Dict, Optional, Union, List, Tuple, Any +from IPython import get_ipython try: import chromadb @@ -10,16 +12,7 @@ from autogen.token_count_utils import count_token from autogen.code_utils import extract_code from autogen import logger - -from typing import Callable, Dict, Optional, Union, List, Tuple, Any -from IPython import get_ipython - -try: - from termcolor import colored -except ImportError: - - def colored(x, *args, **kwargs): - return x +from autogen.agentchat.conversable_agent import colored PROMPT_DEFAULT = """You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the diff --git a/autogen/coding/local_commandline_code_executor.py b/autogen/coding/local_commandline_code_executor.py index 25f4e5ebe4f5..0ed4ce1ae828 100644 --- a/autogen/coding/local_commandline_code_executor.py +++ b/autogen/coding/local_commandline_code_executor.py @@ -3,21 +3,14 @@ import uuid import warnings from typing import Any, ClassVar, List, Optional - from pydantic import BaseModel, Field, field_validator +from autogen.agentchat.conversable_agent import colored from ..agentchat.agent import LLMAgent from ..code_utils import execute_code from .base import CodeBlock, CodeExtractor, CodeResult from .markdown_code_extractor import MarkdownCodeExtractor -try: - from termcolor import colored -except ImportError: - - def colored(x: Any, *args: Any, **kwargs: Any) -> str: # type: ignore[misc] - return x # type: ignore[no-any-return] - __all__ = ( "LocalCommandlineCodeExecutor", diff --git a/notebook/agentchat_teachability.ipynb b/notebook/agentchat_teachability.ipynb index 0f9550bd7b03..3785f01fe93f 100644 --- a/notebook/agentchat_teachability.ipynb +++ b/notebook/agentchat_teachability.ipynb @@ -161,14 +161,6 @@ "# Now add the Teachability capability to the agent.\n", "teachability.add_to_agent(teachable_agent)\n", "\n", - "try:\n", - " from termcolor import colored\n", - "except ImportError:\n", - "\n", - " def colored(x, *args, **kwargs):\n", - " return x\n", - "\n", - "\n", "# Instantiate a UserProxyAgent to represent the user. But in this notebook, all user input will be simulated.\n", "user = UserProxyAgent(\n", " name=\"user\",\n", diff --git a/test/agentchat/contrib/chat_with_teachable_agent.py b/test/agentchat/contrib/capabilities/chat_with_teachable_agent.py similarity index 93% rename from test/agentchat/contrib/chat_with_teachable_agent.py rename to test/agentchat/contrib/capabilities/chat_with_teachable_agent.py index 44449091a42b..66d61386d615 100755 --- a/test/agentchat/contrib/chat_with_teachable_agent.py +++ b/test/agentchat/contrib/capabilities/chat_with_teachable_agent.py @@ -1,24 +1,16 @@ #!/usr/bin/env python3 -m pytest +import os +import sys +from termcolor import colored from autogen import UserProxyAgent, config_list_from_json from autogen.agentchat.contrib.capabilities.teachability import Teachability from autogen import ConversableAgent -import os -import sys - -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(os.path.join(os.path.dirname(__file__), "../..")) from test_assistant_agent import OAI_CONFIG_LIST, KEY_LOC # noqa: E402 -try: - from termcolor import colored -except ImportError: - - def colored(x, *args, **kwargs): - return x - - # Specify the model to use. GPT-3.5 is less reliable than GPT-4 at learning from user input. filter_dict = {"model": ["gpt-4-0125-preview"]} # filter_dict = {"model": ["gpt-3.5-turbo-1106"]} diff --git a/test/agentchat/contrib/capabilities/test_context_handling.py b/test/agentchat/contrib/capabilities/test_context_handling.py index d1692cc4b7ff..6d7a774118de 100755 --- a/test/agentchat/contrib/capabilities/test_context_handling.py +++ b/test/agentchat/contrib/capabilities/test_context_handling.py @@ -10,7 +10,7 @@ # from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(os.path.join(os.path.dirname(__file__), "../../..")) from conftest import skip_openai # noqa: E402 sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..")) diff --git a/test/agentchat/contrib/test_teachable_agent.py b/test/agentchat/contrib/capabilities/test_teachable_agent.py similarity index 96% rename from test/agentchat/contrib/test_teachable_agent.py rename to test/agentchat/contrib/capabilities/test_teachable_agent.py index e12921379238..44904b26d362 100755 --- a/test/agentchat/contrib/test_teachable_agent.py +++ b/test/agentchat/contrib/capabilities/test_teachable_agent.py @@ -3,28 +3,21 @@ import pytest import os import sys +from termcolor import colored from autogen import ConversableAgent, config_list_from_json -sys.path.append(os.path.join(os.path.dirname(__file__), "../..")) +sys.path.append(os.path.join(os.path.dirname(__file__), "../../..")) from conftest import skip_openai # noqa: E402 -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(os.path.join(os.path.dirname(__file__), "../..")) from test_assistant_agent import OAI_CONFIG_LIST, KEY_LOC # noqa: E402 try: - from openai import OpenAI from autogen.agentchat.contrib.capabilities.teachability import Teachability except ImportError: skip = True else: - skip = False or skip_openai - -try: - from termcolor import colored -except ImportError: - - def colored(x, *args, **kwargs): - return x + skip = skip_openai # Specify the model to use by uncommenting one of the following lines. @@ -141,7 +134,7 @@ def use_task_advice_pair_phrasing(): @pytest.mark.skipif( skip, - reason="do not run if dependency is not installed", + reason="do not run if dependency is not installed or requested to skip", ) def test_teachability_code_paths(): """Runs this file's unit tests.""" @@ -172,7 +165,7 @@ def test_teachability_code_paths(): @pytest.mark.skipif( skip, - reason="do not run if dependency is not installed", + reason="do not run if dependency is not installed or requested to skip", ) def test_teachability_accuracy(): """A very cheap and fast test of teachability accuracy.""" diff --git a/website/process_notebooks.py b/website/process_notebooks.py index 56f1f190abb4..2fd70a19479b 100755 --- a/website/process_notebooks.py +++ b/website/process_notebooks.py @@ -14,12 +14,10 @@ import typing import concurrent.futures import os - -from typing import Any, Dict, Optional, Tuple, Union +from typing import Dict, Optional, Tuple, Union from dataclasses import dataclass - - from multiprocessing import current_process +from termcolor import colored try: import yaml @@ -27,7 +25,6 @@ print("pyyaml not found.\n\nPlease install pyyaml:\n\tpip install pyyaml\n") sys.exit(1) - try: import nbclient from nbclient.client import ( @@ -49,14 +46,6 @@ print("test won't work without nbclient") -try: - from termcolor import colored -except ImportError: - - def colored(x, *args, **kwargs): - return x - - class Result: def __init__(self, returncode: int, stdout: str, stderr: str): self.returncode = returncode