Skip to content

Commit

Permalink
Merge branch 'main' of github.com:WilliamEspegren/autogen
Browse files Browse the repository at this point in the history
  • Loading branch information
WilliamEspegren committed May 26, 2024
2 parents 9137f2d + 501610b commit 49e8053
Show file tree
Hide file tree
Showing 91 changed files with 3,149 additions and 862 deletions.
88 changes: 88 additions & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -1,3 +1,91 @@
# Source code
*.bash text eol=lf
*.bat text eol=crlf
*.cmd text eol=crlf
*.coffee text
*.css text diff=css eol=lf
*.htm text diff=html eol=lf
*.html text diff=html eol=lf
*.inc text
*.ini text
*.js text
*.json text eol=lf
*.jsx text
*.less text
*.ls text
*.map text -diff
*.od text
*.onlydata text
*.php text diff=php
*.pl text
*.ps1 text eol=crlf
*.py text diff=python eol=lf
*.rb text diff=ruby eol=lf
*.sass text
*.scm text
*.scss text diff=css
*.sh text eol=lf
.husky/* text eol=lf
*.sql text
*.styl text
*.tag text
*.ts text
*.tsx text
*.xml text
*.xhtml text diff=html

# Docker
Dockerfile text eol=lf

# Documentation
*.ipynb text
*.markdown text diff=markdown eol=lf
*.md text diff=markdown eol=lf
*.mdwn text diff=markdown eol=lf
*.mdown text diff=markdown eol=lf
*.mkd text diff=markdown eol=lf
*.mkdn text diff=markdown eol=lf
*.mdtxt text eol=lf
*.mdtext text eol=lf
*.txt text eol=lf
AUTHORS text eol=lf
CHANGELOG text eol=lf
CHANGES text eol=lf
CONTRIBUTING text eol=lf
COPYING text eol=lf
copyright text eol=lf
*COPYRIGHT* text eol=lf
INSTALL text eol=lf
license text eol=lf
LICENSE text eol=lf
NEWS text eol=lf
readme text eol=lf
*README* text eol=lf
TODO text

# Configs
*.cnf text eol=lf
*.conf text eol=lf
*.config text eol=lf
.editorconfig text
.env text eol=lf
.gitattributes text eol=lf
.gitconfig text eol=lf
.htaccess text
*.lock text -diff
package.json text eol=lf
package-lock.json text eol=lf -diff
pnpm-lock.yaml text eol=lf -diff
.prettierrc text
yarn.lock text -diff
*.toml text eol=lf
*.yaml text eol=lf
*.yml text eol=lf
browserslist text
Makefile text eol=lf
makefile text eol=lf

# Images
*.png filter=lfs diff=lfs merge=lfs -text
*.jpg filter=lfs diff=lfs merge=lfs -text
*.jpeg filter=lfs diff=lfs merge=lfs -text
2 changes: 2 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,9 @@ jobs:
fi
- name: Test with pytest skipping openai tests
if: matrix.python-version != '3.10' && matrix.os == 'ubuntu-latest'
# Remove the line below once https://github.com/docker/docker-py/issues/3256 is merged
run: |
pip install "requests<2.32.0"
pytest test --ignore=test/agentchat/contrib --skip-openai --durations=10 --durations-min=1.0
- name: Test with pytest skipping openai and docker tests
if: matrix.python-version != '3.10' && matrix.os != 'ubuntu-latest'
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/contrib-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y tesseract-ocr poppler-utils
pip install unstructured[all-docs]==0.13.0
pip install --no-cache-dir unstructured[all-docs]==0.13.0
- name: Install packages and dependencies for RetrieveChat
run: |
pip install -e .[retrievechat]
Expand Down Expand Up @@ -418,9 +418,9 @@ jobs:
os: [ubuntu-latest, macos-latest, windows-2019]
python-version: ["3.11"]
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install packages and dependencies for all tests
Expand Down
9 changes: 7 additions & 2 deletions autogen/agentchat/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,9 @@ def initiate_chats(chat_queue: List[Dict[str, Any]]) -> List[ChatResult]:
r.summary for i, r in enumerate(finished_chats) if i not in finished_chat_indexes_to_exclude_from_carryover
]

__post_carryover_processing(chat_info)
if not chat_info.get("silent", False):
__post_carryover_processing(chat_info)

sender = chat_info["sender"]
chat_res = sender.initiate_chat(**chat_info)
finished_chats.append(chat_res)
Expand Down Expand Up @@ -236,7 +238,10 @@ async def _dependent_chat_future(
if isinstance(_chat_carryover, str):
_chat_carryover = [_chat_carryover]
chat_info["carryover"] = _chat_carryover + [finished_chats[pre_id].summary for pre_id in finished_chats]
__post_carryover_processing(chat_info)

if not chat_info.get("silent", False):
__post_carryover_processing(chat_info)

sender = chat_info["sender"]
chat_res_future = asyncio.create_task(sender.a_initiate_chat(**chat_info))
call_back_with_args = partial(_on_chat_future_done, chat_id=chat_id)
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/capabilities/context_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
from autogen import ConversableAgent, token_count_utils

warn(
"Context handling with TransformChatHistory is deprecated. "
"Please use TransformMessages from autogen/agentchat/contrib/capabilities/transform_messages.py instead.",
"Context handling with TransformChatHistory is deprecated and will be removed in `0.2.30`. "
"Please use `TransformMessages`, documentation can be found at https://microsoft.github.io/autogen/docs/topics/handling_long_contexts/intro_to_transform_messages",
DeprecationWarning,
stacklevel=2,
)
Expand Down
42 changes: 39 additions & 3 deletions autogen/agentchat/contrib/capabilities/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from autogen import token_count_utils
from autogen.cache import AbstractCache, Cache
from autogen.oai.openai_utils import filter_config

from .text_compressors import LLMLingua, TextCompressor

Expand Down Expand Up @@ -130,6 +131,8 @@ def __init__(
max_tokens: Optional[int] = None,
min_tokens: Optional[int] = None,
model: str = "gpt-3.5-turbo-0613",
filter_dict: Optional[Dict] = None,
exclude_filter: bool = True,
):
"""
Args:
Expand All @@ -140,11 +143,17 @@ def __init__(
min_tokens (Optional[int]): Minimum number of tokens in messages to apply the transformation.
Must be greater than or equal to 0 if not None.
model (str): The target OpenAI model for tokenization alignment.
filter_dict (None or dict): A dictionary to filter out messages that you want/don't want to compress.
If None, no filters will be applied.
exclude_filter (bool): If exclude filter is True (the default value), messages that match the filter will be
excluded from token truncation. If False, messages that match the filter will be truncated.
"""
self._model = model
self._max_tokens_per_message = self._validate_max_tokens(max_tokens_per_message)
self._max_tokens = self._validate_max_tokens(max_tokens)
self._min_tokens = self._validate_min_tokens(min_tokens, max_tokens)
self._filter_dict = filter_dict
self._exclude_filter = exclude_filter

def apply_transform(self, messages: List[Dict]) -> List[Dict]:
"""Applies token truncation to the conversation history.
Expand All @@ -169,10 +178,15 @@ def apply_transform(self, messages: List[Dict]) -> List[Dict]:

for msg in reversed(temp_messages):
# Some messages may not have content.
if not isinstance(msg.get("content"), (str, list)):
if not _is_content_right_type(msg.get("content")):
processed_messages.insert(0, msg)
continue

if not _should_transform_message(msg, self._filter_dict, self._exclude_filter):
processed_messages.insert(0, msg)
processed_messages_tokens += _count_tokens(msg["content"])
continue

expected_tokens_remained = self._max_tokens - processed_messages_tokens - self._max_tokens_per_message

# If adding this message would exceed the token limit, truncate the last message to meet the total token
Expand Down Expand Up @@ -282,6 +296,8 @@ def __init__(
min_tokens: Optional[int] = None,
compression_params: Dict = dict(),
cache: Optional[AbstractCache] = Cache.disk(),
filter_dict: Optional[Dict] = None,
exclude_filter: bool = True,
):
"""
Args:
Expand All @@ -293,6 +309,10 @@ def __init__(
dictionary.
cache (None or AbstractCache): The cache client to use to store and retrieve previously compressed messages.
If None, no caching will be used.
filter_dict (None or dict): A dictionary to filter out messages that you want/don't want to compress.
If None, no filters will be applied.
exclude_filter (bool): If exclude filter is True (the default value), messages that match the filter will be
excluded from compression. If False, messages that match the filter will be compressed.
"""

if text_compressor is None:
Expand All @@ -303,6 +323,8 @@ def __init__(
self._text_compressor = text_compressor
self._min_tokens = min_tokens
self._compression_args = compression_params
self._filter_dict = filter_dict
self._exclude_filter = exclude_filter
self._cache = cache

# Optimizing savings calculations to optimize log generation
Expand Down Expand Up @@ -334,7 +356,10 @@ def apply_transform(self, messages: List[Dict]) -> List[Dict]:
processed_messages = messages.copy()
for message in processed_messages:
# Some messages may not have content.
if not isinstance(message.get("content"), (str, list)):
if not _is_content_right_type(message.get("content")):
continue

if not _should_transform_message(message, self._filter_dict, self._exclude_filter):
continue

if _is_content_text_empty(message["content"]):
Expand Down Expand Up @@ -397,7 +422,7 @@ def _cache_set(
self, content: Union[str, List[Dict]], compressed_content: Union[str, List[Dict]], tokens_saved: int
):
if self._cache:
value = (tokens_saved, json.dumps(compressed_content))
value = (tokens_saved, compressed_content)
self._cache.set(self._cache_key(content), value)

def _cache_key(self, content: Union[str, List[Dict]]) -> str:
Expand Down Expand Up @@ -427,10 +452,21 @@ def _count_tokens(content: Union[str, List[Dict[str, Any]]]) -> int:
return token_count


def _is_content_right_type(content: Any) -> bool:
return isinstance(content, (str, list))


def _is_content_text_empty(content: Union[str, List[Dict[str, Any]]]) -> bool:
if isinstance(content, str):
return content == ""
elif isinstance(content, list):
return all(_is_content_text_empty(item.get("text", "")) for item in content)
else:
return False


def _should_transform_message(message: Dict[str, Any], filter_dict: Optional[Dict[str, Any]], exclude: bool) -> bool:
if not filter_dict:
return True

return len(filter_config([message], filter_dict, exclude)) > 0
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/compressible_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
logger = logging.getLogger(__name__)

warn(
"Context handling with CompressibleAgent is deprecated. "
"Please use `TransformMessages`, documentation can be found at https://microsoft.github.io/autogen/docs/reference/agentchat/contrib/capabilities/transform_messages",
"Context handling with CompressibleAgent is deprecated and will be removed in `0.2.30`. "
"Please use `TransformMessages`, documentation can be found at https://microsoft.github.io/autogen/docs/topics/handling_long_contexts/intro_to_transform_messages",
DeprecationWarning,
stacklevel=2,
)
Expand Down
6 changes: 5 additions & 1 deletion autogen/agentchat/contrib/gpt_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from autogen.agentchat.agent import Agent
from autogen.agentchat.assistant_agent import AssistantAgent, ConversableAgent
from autogen.oai.openai_utils import create_gpt_assistant, retrieve_assistants_by_name, update_gpt_assistant
from autogen.runtime_logging import log_new_agent, logging_enabled

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -65,6 +66,8 @@ def __init__(
super().__init__(
name=name, system_message=instructions, human_input_mode="NEVER", llm_config=openai_client_cfg, **kwargs
)
if logging_enabled():
log_new_agent(self, locals())

# GPTAssistantAgent's azure_deployment param may cause NotFoundError (404) in client.beta.assistants.list()
# See: https://github.com/microsoft/autogen/pull/1721
Expand Down Expand Up @@ -169,10 +172,11 @@ def __init__(
# Tools are specified but overwrite_tools is False; do not update the assistant's tools
logger.warning("overwrite_tools is False. Using existing tools from assistant API.")

self.update_system_message(self._openai_assistant.instructions)
# lazily create threads
self._openai_threads = {}
self._unread_index = defaultdict(int)
self.register_reply(Agent, GPTAssistantAgent._invoke_assistant, position=2)
self.register_reply([Agent, None], GPTAssistantAgent._invoke_assistant, position=2)

def _invoke_assistant(
self,
Expand Down
Loading

0 comments on commit 49e8053

Please sign in to comment.