Skip to content

Commit c8e296a

Browse files
LaraNoninoLara NoninoLara Nonino
authored
Fix: added agent name to client.create calls to prevent runtime loggi… (microsoft#47)
* Fix: added agent name to client.create calls to prevent runtime logging error (microsoft#3507) * reformat --------- Co-authored-by: Lara Nonino <[email protected]> Co-authored-by: Lara Nonino <[email protected]>
1 parent 5353f0e commit c8e296a

File tree

2 files changed

+4
-2
lines changed

2 files changed

+4
-2
lines changed

autogen/agentchat/contrib/multimodal_conversable_agent.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,9 @@ def generate_oai_reply(
117117
messages_with_b64_img = message_formatter_pil_to_b64(self._oai_system_message + messages)
118118

119119
# TODO: #1143 handle token limit exceeded error
120-
response = client.create(context=messages[-1].pop("context", None), messages=messages_with_b64_img)
120+
response = client.create(
121+
context=messages[-1].pop("context", None), messages=messages_with_b64_img, agent=self.name
122+
)
121123

122124
# TODO: line 301, line 271 is converting messages to dict. Can be removed after ChatCompletionMessage_to_dict is merged.
123125
extracted_response = client.extract_text_or_completion_object(response)[0]

autogen/agentchat/contrib/society_of_mind_agent.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ def _llm_response_preparer(self, prompt, messages):
131131
}
132132
)
133133

134-
response = self.client.create(context=None, messages=_messages, cache=self.client_cache)
134+
response = self.client.create(context=None, messages=_messages, cache=self.client_cache, agent=self.name)
135135
extracted_response = self.client.extract_text_or_completion_object(response)[0]
136136
if not isinstance(extracted_response, str):
137137
return str(extracted_response.model_dump(mode="dict"))

0 commit comments

Comments
 (0)