Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Made the cost info easier to read #2356

Merged
merged 9 commits into from
Apr 15, 2024
10 changes: 6 additions & 4 deletions autogen/agentchat/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,12 @@ class ChatResult:
"""The chat history."""
summary: str = None
"""A summary obtained from the chat."""
cost: tuple = None # (dict, dict) - (total_cost, actual_cost_with_cache)
"""The cost of the chat. a tuple of (total_cost, total_actual_cost), where total_cost is a
dictionary of cost information, and total_actual_cost is a dictionary of information on
the actual incurred cost with cache."""
cost: Dict[str, dict] = None # keys: "usage_including_cached_inference", "usage_excluding_cached_inference"
"""The cost of the chat.
The value for each usage type is a dictionary containing cost information for that specific type.
- "usage_including_cached_inference": Cost information on the actual incurred cost with cache.
Hk669 marked this conversation as resolved.
Show resolved Hide resolved
- "usage_excluding_cached_inference": Cost information on the incurred cost without cache.
Hk669 marked this conversation as resolved.
Show resolved Hide resolved
"""
human_input: List[str] = None
"""A list of human input solicited during the chat."""

Expand Down
48 changes: 32 additions & 16 deletions autogen/agentchat/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,33 +26,46 @@ def consolidate_chat_info(chat_info, uniform_sender=None) -> None:
), "llm client must be set in either the recipient or sender when summary_method is reflection_with_llm."


def gather_usage_summary(agents: List[Agent]) -> Tuple[Dict[str, any], Dict[str, any]]:
def gather_usage_summary(agents: List[Agent]) -> Dict[Dict[str, Dict], Dict[str, Dict]]:
r"""Gather usage summary from all agents.

Args:
agents: (list): List of agents.

Returns:
tuple: (total_usage_summary, actual_usage_summary)
dictionary: A dictionary containing two keys:
- "usage_including_cached_inference": Usage summary including cached inference.
- "usage_excluding_cached_inference": Usage summary excluding cached inference.

Example:

```python
total_usage_summary = {
"total_cost": 0.0006090000000000001,
"gpt-35-turbo": {
"cost": 0.0006090000000000001,
"prompt_tokens": 242,
"completion_tokens": 123,
"total_tokens": 365
{
"usage_including_cached_inference" : {
"total_cost": 0.0006090000000000001,
"gpt-35-turbo": {
"cost": 0.0006090000000000001,
"prompt_tokens": 242,
"completion_tokens": 123,
"total_tokens": 365
},
},

"usage_excluding_cached_inference" : {
"total_cost": 0.0006090000000000001,
"gpt-35-turbo": {
"cost": 0.0006090000000000001,
"prompt_tokens": 242,
"completion_tokens": 123,
"total_tokens": 365
},
}
sonichi marked this conversation as resolved.
Show resolved Hide resolved
}
```

Note:

`actual_usage_summary` follows the same format.
If none of the agents incurred any cost (not having a client), then the total_usage_summary and actual_usage_summary will be `{'total_cost': 0}`.
If none of the agents incurred any cost (not having a client), then the usage_including_cached_inference and usage_excluding_cached_inference will be `{'total_cost': 0}`.
"""

def aggregate_summary(usage_summary: Dict[str, Any], agent_summary: Dict[str, Any]) -> None:
Expand All @@ -69,15 +82,18 @@ def aggregate_summary(usage_summary: Dict[str, Any], agent_summary: Dict[str, An
usage_summary[model]["completion_tokens"] += data.get("completion_tokens", 0)
usage_summary[model]["total_tokens"] += data.get("total_tokens", 0)

total_usage_summary = {"total_cost": 0}
actual_usage_summary = {"total_cost": 0}
usage_including_cached_inference = {"total_cost": 0}
usage_excluding_cached_inference = {"total_cost": 0}

for agent in agents:
if getattr(agent, "client", None):
aggregate_summary(total_usage_summary, agent.client.total_usage_summary)
aggregate_summary(actual_usage_summary, agent.client.actual_usage_summary)
aggregate_summary(usage_excluding_cached_inference, agent.client.total_usage_summary)
aggregate_summary(usage_including_cached_inference, agent.client.actual_usage_summary)

return total_usage_summary, actual_usage_summary
return {
"usage_including_cached_inference": usage_including_cached_inference,
"usage_excluding_cached_inference": usage_excluding_cached_inference,
}


def parse_tags_from_content(tag: str, content: Union[str, List[Dict[str, Any]]]) -> List[Dict[str, Dict[str, str]]]:
Expand Down
2 changes: 1 addition & 1 deletion autogen/code_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@


def content_str(content: Union[str, List[Union[UserMessageTextContentPart, UserMessageImageContentPart]], None]) -> str:
"""Converts the `content` field of an OpenAI merssage into a string format.
"""Converts the `content` field of an OpenAI message into a string format.
sonichi marked this conversation as resolved.
Show resolved Hide resolved

This function processes content that may be a string, a list of mixed text and image URLs, or None,
and converts it into a string. Text is directly appended to the result string, while image URLs are
Expand Down
2 changes: 1 addition & 1 deletion notebook/agentchat_agentoptimizer.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
"version": "3.9.13"
}
},
"nbformat": 4,
Expand Down
1 change: 1 addition & 0 deletions notebook/agentchat_auto_feedback_from_code_execution.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -692,6 +692,7 @@
" file_content = \"No data found.\"\n",
" return \"Analyze the data and write a brief but engaging blog post. \\n Data: \\n\" + file_content\n",
"\n",
"\n",
"# followup of the previous question\n",
"chat_res = user_proxy.initiate_chat(\n",
" recipient=assistant,\n",
Expand Down
4 changes: 2 additions & 2 deletions notebook/agentchat_cost_token_tracking.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -494,8 +494,8 @@
}
],
"source": [
"total_usage_summary, actual_usage_summary = gather_usage_summary([assistant, ai_user_proxy, user_proxy])\n",
"total_usage_summary"
"usage_summary = gather_usage_summary([assistant, ai_user_proxy, user_proxy])\n",
"usage_summary[\"usage_including_cached_inference\"]"
]
}
],
Expand Down
1 change: 0 additions & 1 deletion notebook/agentchat_groupchat_stateflow.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@
")\n",
"\n",
"\n",
"\n",
"coder = autogen.AssistantAgent(\n",
" name=\"Retrieve_Action_1\",\n",
" llm_config=gpt4_config,\n",
Expand Down
1 change: 1 addition & 0 deletions notebook/agentchat_image_generation_capability.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@
" return content[\"text\"].rstrip().endswith(\"TERMINATE\")\n",
" return False\n",
"\n",
"\n",
"def critic_agent() -> autogen.ConversableAgent:\n",
" return autogen.ConversableAgent(\n",
" name=\"critic\",\n",
Expand Down
13 changes: 8 additions & 5 deletions test/agentchat/test_agent_usage.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,11 +62,11 @@ def test_gathering():
"gpt-4": {"cost": 0.3, "prompt_tokens": 100, "completion_tokens": 200, "total_tokens": 300},
}

total_usage, _ = gather_usage_summary([assistant1, assistant2, assistant3])
total_usage = gather_usage_summary([assistant1, assistant2, assistant3])

assert round(total_usage["total_cost"], 8) == 0.6
assert round(total_usage["gpt-35-turbo"]["cost"], 8) == 0.3
assert round(total_usage["gpt-4"]["cost"], 8) == 0.3
assert round(total_usage["usage_including_cached_inference"]["total_cost"], 8) == 0.6
assert round(total_usage["usage_including_cached_inference"]["gpt-35-turbo"]["cost"], 8) == 0.3
assert round(total_usage["usage_including_cached_inference"]["gpt-4"]["cost"], 8) == 0.3

# test when agent doesn't have client
user_proxy = UserProxyAgent(
Expand All @@ -77,7 +77,10 @@ def test_gathering():
default_auto_reply="That's all. Thank you.",
)

total_usage, acutal_usage = gather_usage_summary([user_proxy])
total_usage = gather_usage_summary([user_proxy])
total_usage_summary = total_usage["usage_including_cached_inference"]

print("Total usage summary:", total_usage_summary)


@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip")
Expand Down
Loading