Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Core] Fix update_usage_summary when response doesn't have usage attribute #1008

Merged
merged 15 commits into from
Jan 4, 2024
Merged
18 changes: 13 additions & 5 deletions autogen/oai/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,15 @@ def _update_usage_summary(self, response: ChatCompletion | Completion, use_cache

Usage is calculated no matter filter is passed or not.
"""
try:
usage = response.usage
assert usage is not None
usage.prompt_tokens = 0 if usage.prompt_tokens is None else usage.prompt_tokens
usage.completion_tokens = 0 if usage.completion_tokens is None else usage.completion_tokens
usage.total_tokens = 0 if usage.total_tokens is None else usage.total_tokens
except (AttributeError, AssertionError):
logger.debug("Usage attribute is not found in the response.", exc_info=1)
return

def update_usage(usage_summary):
if usage_summary is None:
Expand All @@ -360,12 +369,10 @@ def update_usage(usage_summary):

usage_summary[response.model] = {
"cost": usage_summary.get(response.model, {}).get("cost", 0) + response.cost,
"prompt_tokens": usage_summary.get(response.model, {}).get("prompt_tokens", 0)
+ response.usage.prompt_tokens,
"prompt_tokens": usage_summary.get(response.model, {}).get("prompt_tokens", 0) + usage.prompt_tokens,
"completion_tokens": usage_summary.get(response.model, {}).get("completion_tokens", 0)
+ response.usage.completion_tokens,
"total_tokens": usage_summary.get(response.model, {}).get("total_tokens", 0)
+ response.usage.total_tokens,
+ usage.completion_tokens,
"total_tokens": usage_summary.get(response.model, {}).get("total_tokens", 0) + usage.total_tokens,
}
return usage_summary

Expand Down Expand Up @@ -435,6 +442,7 @@ def cost(self, response: Union[ChatCompletion, Completion]) -> float:
model = response.model
if model not in oai_price1k:
# TODO: add logging to warn that the model is not found
logger.debug(f"Model {model} is not found. The cost will be 0.", exc_info=1)
return 0

n_input_tokens = response.usage.prompt_tokens
Expand Down
6 changes: 5 additions & 1 deletion notebook/agentchat_function_call_async.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,11 @@
" llm_config=llm_config,\n",
")\n",
"groupchat = autogen.GroupChat(agents=[user_proxy, coder, markdownagent], messages=[], max_round=12)\n",
"manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config,\n",
"\n",
"llm_config_manager = llm_config.copy()\n",
"llm_config_manager.pop(\"functions\", None)\n",
"\n",
"manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config_manager,\n",
" is_termination_msg=lambda x: \"GROUPCHAT_TERMINATE\" in x.get(\"content\", \"\"),\n",
" )"
]
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
__version__ = version["__version__"]

install_requires = [
"openai~=1.3",
"openai<=1.4.0",
qingyun-wu marked this conversation as resolved.
Show resolved Hide resolved
"diskcache",
"termcolor",
"flaml",
Expand Down
Loading