Skip to content

Commit

Permalink
Merge branch 'main' into add-links-to-sdk-microsoft#910
Browse files Browse the repository at this point in the history
  • Loading branch information
qingyun-wu authored Dec 21, 2023
2 parents 5b4c2d4 + a0288e0 commit a1c8eff
Show file tree
Hide file tree
Showing 52 changed files with 2,326 additions and 150 deletions.
86 changes: 65 additions & 21 deletions autogen/agentchat/contrib/agent_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,25 @@
import time
import subprocess as sp
import socket
import os
import json
import hashlib
from typing import Optional, List, Dict, Tuple, Union


def _config_check(config: Dict):
# check config loading
assert config.get("coding", None) is not None, 'Missing "coding" in your config.'
assert config.get("default_llm_config", None) is not None, 'Missing "default_llm_config" in your config.'
assert config.get("code_execution_config", None) is not None, 'Missing "code_execution_config" in your config.'

for agent_config in config["agent_configs"]:
assert agent_config.get("name", None) is not None, 'Missing agent "name" in your agent_configs.'
assert agent_config.get("model", None) is not None, 'Missing agent "model" in your agent_configs.'
assert (
agent_config.get("system_message", None) is not None
), 'Missing agent "system_message" in your agent_configs.'


class AgentBuilder:
"""
AgentBuilder can help user build an automatic task solving process powered by multi-agent system.
Expand Down Expand Up @@ -37,7 +50,8 @@ class AgentBuilder:
Hint:
# Considering the effort, the position in this task should be no more then {max_agents}, less is better.
# Answer the name of those positions/jobs, separated by comma and use "_" instead of space. For example: Product_manager,Programmer
# Answer the name of those positions/jobs.
# Separated names by comma and use "_" instead of space. For example: Product_manager,Programmer
# Only return the list of positions.
"""

Expand Down Expand Up @@ -69,6 +83,7 @@ def __init__(
Args:
config_path: path of the OpenAI api configs.
builder_model: specify a model as the backbone of build manager.
agent_model: specify a model as the backbone of participant agents.
host: endpoint host.
endpoint_building_timeout: timeout for building up an endpoint server.
"""
Expand All @@ -89,6 +104,12 @@ def __init__(
if self._is_port_open(host, port):
self.open_ports.append(str(port))

def set_builder_model(self, model: str):
self.builder_model = model

def set_agent_model(self, model: str):
self.agent_model = model

@staticmethod
def _is_port_open(host, port):
"""Check if a tcp port is open."""
Expand Down Expand Up @@ -128,6 +149,11 @@ def _create_agent(
agent: a set-up agent.
"""
config_list = autogen.config_list_from_json(self.config_path, filter_dict={"model": [model_name_or_hf_repo]})
if len(config_list) == 0:
raise RuntimeError(
f"Fail to initialize agent:{agent_name}: {self.builder_model} does not exist in {self.config_path}. "
f'If you would like to change this model, please specify the "agent_model" in the constructor.'
)
if "gpt-" in model_name_or_hf_repo:
server_id = self.openai_server_name
else:
Expand Down Expand Up @@ -259,14 +285,6 @@ def build(
"""
use_api = False

if code_execution_config is None:
code_execution_config = {
"last_n_messages": 2,
"work_dir": "groupchat",
"use_docker": False,
"timeout": 60,
}

if cached_configs is None:
use_api = True
agent_configs = []
Expand All @@ -276,9 +294,23 @@ def build(
default_llm_config = cached_configs["default_llm_config"]
coding = cached_configs["coding"]
agent_configs = cached_configs["agent_configs"]
code_execution_config = cached_configs["code_execution_config"]

if code_execution_config is None:
code_execution_config = {
"last_n_messages": 2,
"work_dir": "groupchat",
"use_docker": False,
"timeout": 60,
}

if use_api:
config_list = autogen.config_list_from_json(self.config_path, filter_dict={"model": [self.builder_model]})
if len(config_list) == 0:
raise RuntimeError(
f"Fail to initialize build manager: {self.builder_model} does not exist in {self.config_path}. "
f'If you want to change this model, please specify the "builder_model" in the constructor.'
)
build_manager = autogen.OpenAIWrapper(config_list=config_list)

print("Generating agents...")
Expand All @@ -294,8 +326,8 @@ def build(
.choices[0]
.message.content
)
agent_name_list = resp_agent_name.split(",")
print(f"{resp_agent_name} are generated.")
agent_name_list = [agent_name.strip().replace(" ", "_") for agent_name in resp_agent_name.split(",")]
print(f"{agent_name_list} are generated.")

agent_sys_msg_list = []
for name in agent_name_list:
Expand Down Expand Up @@ -390,19 +422,31 @@ def save(self, filepath: Optional[str] = None) -> str:

def load(
self,
filepath: str,
filepath: Optional[str] = None,
config_json: Optional[str] = None,
**kwargs,
):
"""
Load building configs and call the build function to complete building without calling online LLMs' api.
Args:
filepath: filepath for the save config.
filepath: filepath or JSON string for the save config.
config_json: JSON string for the save config.
"""
try:
print(f"Loding config from {filepath}")
cached_configs = json.load(open(filepath))
except FileNotFoundError:
raise FileNotFoundError(f"Config file {filepath} does not exist.")

return self.build(cached_configs=cached_configs, **kwargs)
# load json string.
if config_json is not None:
cached_configs = json.loads(config_json)
print("Loading config from JSON...")
_config_check(cached_configs)
return self.build(cached_configs=cached_configs, **kwargs)

# load from path.
if filepath is not None:
print(f"Loading config from {filepath}")
try:
with open(filepath) as f:
cached_configs = json.load(f)
except FileNotFoundError as e:
raise FileNotFoundError(f"{filepath} does not exist.") from e
_config_check(cached_configs)
return self.build(cached_configs=cached_configs, **kwargs)
23 changes: 15 additions & 8 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,16 @@ class GroupChat:
- "manual": the next speaker is selected manually by user input.
- "random": the next speaker is selected randomly.
- "round_robin": the next speaker is selected in a round robin fashion, i.e., iterating in the same order as provided in `agents`.
- allow_repeat_speaker: whether to allow the same speaker to speak consecutively. Default is True.
- allow_repeat_speaker: whether to allow the same speaker to speak consecutively. Default is True, in which case all speakers are allowed to speak consecutively. If allow_repeat_speaker is a list of Agents, then only those listed agents are allowed to repeat. If set to False, then no speakers are allowed to repeat.
"""

agents: List[Agent]
messages: List[Dict]
max_round: int = 10
admin_name: str = "Admin"
func_call_filter: bool = True
speaker_selection_method: str = "auto"
allow_repeat_speaker: bool = True
max_round: Optional[int] = 10
admin_name: Optional[str] = "Admin"
func_call_filter: Optional[bool] = True
speaker_selection_method: Optional[str] = "auto"
allow_repeat_speaker: Optional[Union[bool, List[Agent]]] = True

_VALID_SPEAKER_SELECTION_METHODS = ["auto", "manual", "random", "round_robin"]

Expand Down Expand Up @@ -125,6 +125,13 @@ def _prepare_and_select_agents(self, last_speaker: Agent) -> Tuple[Optional[Agen
f"It should be one of {self._VALID_SPEAKER_SELECTION_METHODS} (case insensitive). "
)

# If provided a list, make sure the agent is in the list
allow_repeat_speaker = (
self.allow_repeat_speaker
if isinstance(self.allow_repeat_speaker, bool)
else last_speaker in self.allow_repeat_speaker
)

agents = self.agents
n_agents = len(agents)
# Warn if GroupChat is underpopulated
Expand All @@ -133,7 +140,7 @@ def _prepare_and_select_agents(self, last_speaker: Agent) -> Tuple[Optional[Agen
f"GroupChat is underpopulated with {n_agents} agents. "
"Please add more agents to the GroupChat or use direct communication instead."
)
elif n_agents == 2 and self.speaker_selection_method.lower() != "round_robin" and self.allow_repeat_speaker:
elif n_agents == 2 and self.speaker_selection_method.lower() != "round_robin" and allow_repeat_speaker:
logger.warning(
f"GroupChat is underpopulated with {n_agents} agents. "
"It is recommended to set speaker_selection_method to 'round_robin' or allow_repeat_speaker to False."
Expand All @@ -159,7 +166,7 @@ def _prepare_and_select_agents(self, last_speaker: Agent) -> Tuple[Optional[Agen
"Please check the function_map of the agents."
)
# remove the last speaker from the list to avoid selecting the same speaker if allow_repeat_speaker is False
agents = agents if self.allow_repeat_speaker else [agent for agent in agents if agent != last_speaker]
agents = agents if allow_repeat_speaker else [agent for agent in agents if agent != last_speaker]

if self.speaker_selection_method.lower() == "manual":
selected_agent = self.manual_select_speaker(agents)
Expand Down
2 changes: 1 addition & 1 deletion autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def _construct_create_params(self, create_config: Dict, extra_kwargs: Dict) -> D
def create(self, **config):
"""Make a completion for a given config using openai's clients.
Besides the kwargs allowed in openai's client, we allow the following additional kwargs.
The config in each client will be overriden by the config.
The config in each client will be overridden by the config.
Args:
- context (Dict | None): The context to instantiate the prompt or messages. Default to None.
Expand Down
14 changes: 10 additions & 4 deletions autogen/token_count_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,33 @@
import logging
import json
import tiktoken
import re


logger = logging.getLogger(__name__)


def get_max_token_limit(model="gpt-3.5-turbo-0613"):
# Handle common azure model names/aliases
model = re.sub(r"^gpt\-?35", "gpt-3.5", model)
model = re.sub(r"^gpt4", "gpt-4", model)

max_token_limit = {
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-instruct": 4096,
"gpt-3.5-turbo-16k": 16384,
"gpt-35-turbo": 4096,
"gpt-35-turbo-16k": 16384,
"gpt-35-turbo-instruct": 4096,
"gpt-3.5-turbo-16k": 16385,
"gpt-3.5-turbo-16k-0613": 16385,
"gpt-3.5-turbo-1106": 16385,
"gpt-4": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768, # deprecate in Sep
"gpt-4-0314": 8192, # deprecate in Sep
"gpt-4-0613": 8192,
"gpt-4-32k-0613": 32768,
"gpt-4-1106-preview": 128000,
"gpt-4-vision-preview": 128000,
}
return max_token_limit[model]

Expand Down
Loading

0 comments on commit a1c8eff

Please sign in to comment.