Skip to content

Commit

Permalink
Merge branch 'main' into patchwork
Browse files Browse the repository at this point in the history
  • Loading branch information
prithvi2226 authored Jun 26, 2024
2 parents 8373968 + 0872642 commit 394561b
Show file tree
Hide file tree
Showing 45 changed files with 2,934 additions and 466 deletions.
40 changes: 40 additions & 0 deletions .github/workflows/contrib-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -558,3 +558,43 @@ jobs:
with:
file: ./coverage.xml
flags: unittests

TogetherTest:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-2019]
python-version: ["3.9", "3.10", "3.11", "3.12"]
exclude:
- os: macos-latest
python-version: "3.9"
steps:
- uses: actions/checkout@v4
with:
lfs: true
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install packages and dependencies for all tests
run: |
python -m pip install --upgrade pip wheel
pip install pytest-cov>=5
- name: Install packages and dependencies for Together
run: |
pip install -e .[together,test]
- name: Set AUTOGEN_USE_DOCKER based on OS
shell: bash
run: |
if [[ ${{ matrix.os }} != ubuntu-latest ]]; then
echo "AUTOGEN_USE_DOCKER=False" >> $GITHUB_ENV
fi
- name: Coverage
run: |
pytest test/oai/test_together.py --skip-openai
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
file: ./coverage.xml
flags: unittests
3 changes: 2 additions & 1 deletion autogen/logger/file_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from autogen.oai.anthropic import AnthropicClient
from autogen.oai.gemini import GeminiClient
from autogen.oai.mistral import MistralAIClient
from autogen.oai.together import TogetherClient

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -203,7 +204,7 @@ def log_new_wrapper(

def log_new_client(
self,
client: AzureOpenAI | OpenAI | GeminiClient | AnthropicClient | MistralAIClient,
client: AzureOpenAI | OpenAI | GeminiClient | AnthropicClient | MistralAIClient | TogetherClient,
wrapper: OpenAIWrapper,
init_args: Dict[str, Any],
) -> None:
Expand Down
3 changes: 2 additions & 1 deletion autogen/logger/sqlite_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from autogen.oai.anthropic import AnthropicClient
from autogen.oai.gemini import GeminiClient
from autogen.oai.mistral import MistralAIClient
from autogen.oai.together import TogetherClient

logger = logging.getLogger(__name__)
lock = threading.Lock()
Expand Down Expand Up @@ -390,7 +391,7 @@ def log_function_use(self, source: Union[str, Agent], function: F, args: Dict[st

def log_new_client(
self,
client: Union[AzureOpenAI, OpenAI, GeminiClient, AnthropicClient, MistralAIClient],
client: Union[AzureOpenAI, OpenAI, GeminiClient, AnthropicClient, MistralAIClient, TogetherClient],
wrapper: OpenAIWrapper,
init_args: Dict[str, Any],
) -> None:
Expand Down
158 changes: 89 additions & 69 deletions autogen/oai/anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,10 @@
"claude-3-5-sonnet-20240620": (0.003, 0.015),
"claude-3-sonnet-20240229": (0.003, 0.015),
"claude-3-opus-20240229": (0.015, 0.075),
"claude-2.0": (0.008, 0.024),
"claude-3-haiku-20240307": (0.00025, 0.00125),
"claude-2.1": (0.008, 0.024),
"claude-3.0-opus": (0.015, 0.075),
"claude-3.0-haiku": (0.00025, 0.00125),
"claude-2.0": (0.008, 0.024),
"claude-instant-1.2": (0.008, 0.024),
}


Expand Down Expand Up @@ -181,7 +181,7 @@ def create(self, params: Dict[str, Any]) -> Completion:
response_oai = ChatCompletion(
id=response.id,
model=anthropic_params["model"],
created=int(time.time() * 1000),
created=int(time.time()),
object="chat.completion",
choices=choices,
usage=CompletionUsage(
Expand Down Expand Up @@ -242,86 +242,106 @@ def oai_messages_to_anthropic_messages(params: Dict[str, Any]) -> list[dict[str,

# Convert messages to Anthropic compliant format
processed_messages = []

# Used to interweave user messages to ensure user/assistant alternating
user_continue_message = {"content": "Please continue.", "role": "user"}
assistant_continue_message = {"content": "Please continue.", "role": "assistant"}

tool_use_messages = 0
tool_result_messages = 0
last_tool_use_index = -1
last_tool_result_index = -1
for message in params["messages"]:
if message["role"] == "system":
params["system"] = message["content"]
elif "tool_calls" in message:
# Map the tool call options to Anthropic's ToolUseBlock
tool_uses = []
tool_names = []
for tool_call in message["tool_calls"]:
tool_uses.append(
ToolUseBlock(
type="tool_use",
id=tool_call["id"],
name=tool_call["function"]["name"],
input=json.loads(tool_call["function"]["arguments"]),
else:
# New messages will be added here, manage role alternations
expected_role = "user" if len(processed_messages) % 2 == 0 else "assistant"

if "tool_calls" in message:
# Map the tool call options to Anthropic's ToolUseBlock
tool_uses = []
tool_names = []
for tool_call in message["tool_calls"]:
tool_uses.append(
ToolUseBlock(
type="tool_use",
id=tool_call["id"],
name=tool_call["function"]["name"],
input=json.loads(tool_call["function"]["arguments"]),
)
)
)
tool_names.append(tool_call["function"]["name"])

if has_tools:
processed_messages.append({"role": "assistant", "content": tool_uses})
tool_use_messages += 1
last_tool_use_index = len(processed_messages) - 1
else:
# Not using tools, so put in a plain text message
processed_messages.append(
{
"role": "assistant",
"content": f"Some internal function(s) that could be used: [{', '.join(tool_names)}]",
}
)
elif "tool_call_id" in message:
if has_tools:
# Map the tool usage call to tool_result for Anthropic
processed_messages.append(
{
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": message["tool_call_id"],
"content": message["content"],
}
],
if has_tools:
tool_use_messages += 1
tool_names.append(tool_call["function"]["name"])

if expected_role == "user":
# Insert an extra user message as we will append an assistant message
processed_messages.append(user_continue_message)

if has_tools:
processed_messages.append({"role": "assistant", "content": tool_uses})
last_tool_use_index = len(processed_messages) - 1
else:
# Not using tools, so put in a plain text message
processed_messages.append(
{
"role": "assistant",
"content": f"Some internal function(s) that could be used: [{', '.join(tool_names)}]",
}
)
elif "tool_call_id" in message:
if has_tools:
# Map the tool usage call to tool_result for Anthropic
tool_result = {
"type": "tool_result",
"tool_use_id": message["tool_call_id"],
"content": message["content"],
}
)
tool_result_messages += 1

# If the previous message also had a tool_result, add it to that
# Otherwise append a new message
if last_tool_result_index == len(processed_messages) - 1:
processed_messages[-1]["content"].append(tool_result)
else:
if expected_role == "assistant":
# Insert an extra assistant message as we will append a user message
processed_messages.append(assistant_continue_message)

processed_messages.append({"role": "user", "content": [tool_result]})
last_tool_result_index = len(processed_messages) - 1

tool_result_messages += 1
else:
# Not using tools, so put in a plain text message
processed_messages.append(
{"role": "user", "content": f"Running the function returned: {message['content']}"}
)
elif message["content"] == "":
# Ignoring empty messages
pass
else:
# Not using tools, so put in a plain text message
processed_messages.append(
{"role": "user", "content": f"Running the function returned: {message['content']}"}
)
elif message["content"] == "":
message["content"] = (
"I'm done. Please send TERMINATE" # TODO: Determine why we would be getting a blank response. Typically this is because 'assistant' is the last message role.
)
processed_messages.append(message)
else:
processed_messages.append(message)
if expected_role != message["role"]:
# Inserting the alternating continue message
processed_messages.append(
user_continue_message if expected_role == "user" else assistant_continue_message
)

# We'll drop the last tool_use if there's no tool_result (occurs if we finish the conversation before running the function)
if tool_use_messages != tool_result_messages:
# Too many tool_use messages, drop the last one as we haven't run it.
processed_messages.pop(last_tool_use_index)
processed_messages.append(message)

# Check for interleaving roles and correct, for Anthropic must be: user, assistant, user, etc.
for i, message in enumerate(processed_messages):
if message["role"] is not ("user" if i % 2 == 0 else "assistant"):
message["role"] = "user" if i % 2 == 0 else "assistant"
# We'll replace the last tool_use if there's no tool_result (occurs if we finish the conversation before running the function)
if has_tools and tool_use_messages != tool_result_messages:
processed_messages[last_tool_use_index] = assistant_continue_message

# Also remove name key from message as it is not supported
message.pop("name", None)
# name is not a valid field on messages
for message in processed_messages:
if "name" in message:
message.pop("name", None)

# Note: When using reflection_with_llm we may end up with an "assistant" message as the last message and that may cause a blank response
# So, if the last role is not user, add a 'user' continue message at the end
if processed_messages[-1]["role"] != "user":
# If the last role is not user, add a continue message at the end
continue_message = {"content": "continue", "role": "user"}
processed_messages.append(continue_message)
processed_messages.append(user_continue_message)

return processed_messages

Expand Down
11 changes: 11 additions & 0 deletions autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,13 @@
except ImportError as e:
mistral_import_exception = e

try:
from autogen.oai.together import TogetherClient

together_import_exception: Optional[ImportError] = None
except ImportError as e:
together_import_exception = e

logger = logging.getLogger(__name__)
if not logger.handlers:
# Add the console handler.
Expand Down Expand Up @@ -473,6 +480,10 @@ def _register_default_client(self, config: Dict[str, Any], openai_config: Dict[s
raise ImportError("Please install `mistralai` to use the Mistral.AI API.")
client = MistralAIClient(**openai_config)
self._clients.append(client)
elif api_type is not None and api_type.startswith("together"):
if together_import_exception:
raise ImportError("Please install `together` to use the Together.AI API.")
self._clients.append(TogetherClient(**config))
else:
client = OpenAI(**openai_config)
self._clients.append(OpenAIClient(client))
Expand Down
2 changes: 1 addition & 1 deletion autogen/oai/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ def create(self, params: Dict) -> ChatCompletion:
response_oai = ChatCompletion(
id=str(random.randint(0, 1000)),
model=model_name,
created=int(time.time() * 1000),
created=int(time.time()),
object="chat.completion",
choices=choices,
usage=CompletionUsage(
Expand Down
2 changes: 1 addition & 1 deletion autogen/oai/mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def create(self, params: Dict[str, Any]) -> ChatCompletion:
response_oai = ChatCompletion(
id=mistral_response.id,
model=mistral_response.model,
created=int(time.time() * 1000),
created=int(time.time()),
object="chat.completion",
choices=choices,
usage=CompletionUsage(
Expand Down
Loading

0 comments on commit 394561b

Please sign in to comment.