Skip to content

Commit 6bf33df

Browse files
yiranwu0qingyun-wusonichi
authored
[Core] Fix update_usage_summary when response doesn't have usage attribute (#1008)
* update cost for cache * fix test * fix bug when reponse doesn't have usage info * update setupversion * update * update * Update setup.py Co-authored-by: Chi Wang <[email protected]> --------- Co-authored-by: Qingyun Wu <[email protected]> Co-authored-by: Chi Wang <[email protected]>
1 parent 1508f53 commit 6bf33df

File tree

2 files changed

+18
-6
lines changed

2 files changed

+18
-6
lines changed

autogen/oai/client.py

+13-5
Original file line numberDiff line numberDiff line change
@@ -364,6 +364,15 @@ def _update_usage_summary(self, response: ChatCompletion | Completion, use_cache
364364
365365
Usage is calculated no matter filter is passed or not.
366366
"""
367+
try:
368+
usage = response.usage
369+
assert usage is not None
370+
usage.prompt_tokens = 0 if usage.prompt_tokens is None else usage.prompt_tokens
371+
usage.completion_tokens = 0 if usage.completion_tokens is None else usage.completion_tokens
372+
usage.total_tokens = 0 if usage.total_tokens is None else usage.total_tokens
373+
except (AttributeError, AssertionError):
374+
logger.debug("Usage attribute is not found in the response.", exc_info=1)
375+
return
367376

368377
def update_usage(usage_summary):
369378
if usage_summary is None:
@@ -373,12 +382,10 @@ def update_usage(usage_summary):
373382

374383
usage_summary[response.model] = {
375384
"cost": usage_summary.get(response.model, {}).get("cost", 0) + response.cost,
376-
"prompt_tokens": usage_summary.get(response.model, {}).get("prompt_tokens", 0)
377-
+ response.usage.prompt_tokens,
385+
"prompt_tokens": usage_summary.get(response.model, {}).get("prompt_tokens", 0) + usage.prompt_tokens,
378386
"completion_tokens": usage_summary.get(response.model, {}).get("completion_tokens", 0)
379-
+ response.usage.completion_tokens,
380-
"total_tokens": usage_summary.get(response.model, {}).get("total_tokens", 0)
381-
+ response.usage.total_tokens,
387+
+ usage.completion_tokens,
388+
"total_tokens": usage_summary.get(response.model, {}).get("total_tokens", 0) + usage.total_tokens,
382389
}
383390
return usage_summary
384391

@@ -448,6 +455,7 @@ def cost(self, response: Union[ChatCompletion, Completion]) -> float:
448455
model = response.model
449456
if model not in OAI_PRICE1K:
450457
# TODO: add logging to warn that the model is not found
458+
logger.debug(f"Model {model} is not found. The cost will be 0.", exc_info=1)
451459
return 0
452460

453461
n_input_tokens = response.usage.prompt_tokens

notebook/agentchat_function_call_async.ipynb

+5-1
Original file line numberDiff line numberDiff line change
@@ -256,9 +256,13 @@
256256
"\n",
257257
"\n",
258258
"groupchat = autogen.GroupChat(agents=[user_proxy, coder, markdownagent], messages=[], max_round=12)\n",
259+
"\n",
260+
"llm_config_manager = llm_config.copy()\n",
261+
"llm_config_manager.pop(\"functions\", None)\n",
262+
"\n",
259263
"manager = autogen.GroupChatManager(\n",
260264
" groupchat=groupchat,\n",
261-
" llm_config=llm_config,\n",
265+
" llm_config=llm_config_manager,\n",
262266
" is_termination_msg=lambda x: \"GROUPCHAT_TERMINATE\" in x.get(\"content\", \"\"),\n",
263267
")"
264268
]

0 commit comments

Comments
 (0)