Skip to content

Commit 39489ba

Browse files
authored
Rename fields in agent metadata (#336)
* Rename fields in agent metadata * team one fixes * another fix
1 parent 437dbef commit 39489ba

32 files changed

+94
-95
lines changed

python/benchmarks/GAIA/Templates/TeamOne/scenario.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929

3030
from agnext.components.models import AssistantMessage
3131

32-
encoding = None
32+
encoding = None
3333
def count_token(value: str) -> int:
3434
# TODO:: Migrate to model_client.count_tokens
3535
global encoding
@@ -40,7 +40,7 @@ def count_token(value: str) -> int:
4040
async def response_preparer(task: str, source: str, client: ChatCompletionClient, transcript: List[LLMMessage]) -> str:
4141
messages: List[LLMMessage] = []
4242

43-
# copy them to this context
43+
# copy them to this context
4444
for message in transcript:
4545
messages.append(
4646
UserMessage(
@@ -168,7 +168,7 @@ async def main() -> None:
168168

169169
run_context = runtime.start()
170170

171-
actual_surfer = await runtime.try_get_underlying_agent_instance(web_surfer.id, type=MultimodalWebSurfer)
171+
actual_surfer = await runtime.try_get_underlying_agent_instance(web_surfer.id, type=MultimodalWebSurfer)
172172
await actual_surfer.init(model_client=client, downloads_folder=os.getcwd(), browser_channel="chromium")
173173

174174
#await runtime.send_message(RequestReplyMessage(), user_proxy.id)
@@ -206,7 +206,7 @@ async def main() -> None:
206206
# Output the final answer
207207
actual_orchestrator = await runtime.try_get_underlying_agent_instance(orchestrator.id, type=LedgerOrchestrator)
208208
transcript: List[LLMMessage] = actual_orchestrator._chat_history # type: ignore
209-
print(await response_preparer(task=task, source=(await orchestrator.metadata)["name"], client=client, transcript=transcript))
209+
print(await response_preparer(task=task, source=(await orchestrator.metadata)["type"], client=client, transcript=transcript))
210210

211211

212212

python/benchmarks/HumanEval/Templates/TwoAgents/scenario.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ async def handle_user_message(
100100
)
101101
assert isinstance(response.content, str)
102102
self._session_memory[session_id].append(
103-
AssistantMessage(content=response.content, source=self.metadata["name"])
103+
AssistantMessage(content=response.content, source=self.metadata["type"])
104104
)
105105

106106
await self.publish_message(
@@ -138,7 +138,7 @@ async def handle_code_execution_result(
138138
)
139139
assert isinstance(response.content, str)
140140
self._session_memory[message.session_id].append(
141-
AssistantMessage(content=response.content, source=self.metadata["name"])
141+
AssistantMessage(content=response.content, source=self.metadata["type"])
142142
)
143143

144144
if "TERMINATE" in response.content:

python/docs/src/cookbook/type-routed-agent.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ class MyAgent(TypeRoutedAgent):
5656
await self.publish_message(
5757
TextMessage(
5858
content=f"I received a message from {message.source}. Message received #{self._received_count}",
59-
source=self.metadata["name"],
59+
source=self.metadata["type"],
6060
)
6161
)
6262

python/samples/common/agents/_chat_completion_agent.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ async def _generate_response(
175175
# Get a response from the model.
176176
hisorical_messages = await self._memory.get_messages()
177177
response = await self._client.create(
178-
self._system_messages + convert_messages_to_llm_messages(hisorical_messages, self.metadata["name"]),
178+
self._system_messages + convert_messages_to_llm_messages(hisorical_messages, self.metadata["type"]),
179179
tools=self._tools,
180180
json_output=response_format == ResponseFormat.json_object,
181181
)
@@ -190,25 +190,25 @@ async def _generate_response(
190190
):
191191
# Send a function call message to itself.
192192
response = await self.send_message(
193-
message=FunctionCallMessage(content=response.content, source=self.metadata["name"]),
193+
message=FunctionCallMessage(content=response.content, source=self.metadata["type"]),
194194
recipient=self.id,
195195
cancellation_token=cancellation_token,
196196
)
197197
# Make an assistant message from the response.
198198
hisorical_messages = await self._memory.get_messages()
199199
response = await self._client.create(
200-
self._system_messages + convert_messages_to_llm_messages(hisorical_messages, self.metadata["name"]),
200+
self._system_messages + convert_messages_to_llm_messages(hisorical_messages, self.metadata["type"]),
201201
tools=self._tools,
202202
json_output=response_format == ResponseFormat.json_object,
203203
)
204204

205205
final_response: Message
206206
if isinstance(response.content, str):
207207
# If the response is a string, return a text message.
208-
final_response = TextMessage(content=response.content, source=self.metadata["name"])
208+
final_response = TextMessage(content=response.content, source=self.metadata["type"])
209209
elif isinstance(response.content, list) and all(isinstance(x, FunctionCall) for x in response.content):
210210
# If the response is a list of function calls, return a function call message.
211-
final_response = FunctionCallMessage(content=response.content, source=self.metadata["name"])
211+
final_response = FunctionCallMessage(content=response.content, source=self.metadata["type"])
212212
else:
213213
raise ValueError(f"Unexpected response: {response.content}")
214214

python/samples/common/agents/_image_generation_agent.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ async def _generate_response(self, cancellation_token: CancellationToken) -> Mul
6363
messages = await self._memory.get_messages()
6464
if len(messages) == 0:
6565
return MultiModalMessage(
66-
content=["I need more information to generate an image."], source=self.metadata["name"]
66+
content=["I need more information to generate an image."], source=self.metadata["type"]
6767
)
6868
prompt = ""
6969
for m in messages:
@@ -74,5 +74,5 @@ async def _generate_response(self, cancellation_token: CancellationToken) -> Mul
7474
assert len(response.data) > 0 and response.data[0].b64_json is not None
7575
# Create a MultiModalMessage with the image.
7676
image = Image.from_base64(response.data[0].b64_json)
77-
multi_modal_message = MultiModalMessage(content=[image], source=self.metadata["name"])
77+
multi_modal_message = MultiModalMessage(content=[image], source=self.metadata["type"])
7878
return multi_modal_message

python/samples/common/agents/_oai_assistant.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ async def _generate_response(
121121
raise ValueError(f"Expected text content in the last message: {last_message_content}")
122122

123123
# TODO: handle multiple text content.
124-
return TextMessage(content=text_content[0].text.value, source=self.metadata["name"])
124+
return TextMessage(content=text_content[0].text.value, source=self.metadata["type"])
125125

126126
def save_state(self) -> Mapping[str, Any]:
127127
return {

python/samples/common/agents/_user_proxy.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def __init__(self, description: str, user_input_prompt: str) -> None:
2323
async def on_publish_now(self, message: PublishNow, cancellation_token: CancellationToken) -> None:
2424
"""Handle a publish now message. This method prompts the user for input, then publishes it."""
2525
user_input = await self.get_user_input(self._user_input_prompt)
26-
await self.publish_message(TextMessage(content=user_input, source=self.metadata["name"]))
26+
await self.publish_message(TextMessage(content=user_input, source=self.metadata["type"]))
2727

2828
async def get_user_input(self, prompt: str) -> str:
2929
"""Get user input from the console. Override this method to customize how user input is retrieved."""

python/samples/common/patterns/_group_chat_utils.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@ async def select_speaker(memory: ChatMemory[Message], client: ChatCompletionClie
2323

2424
# Construct agent roles.
2525
roles = "\n".join(
26-
[f"{(await agent.metadata)['name']}: {(await agent.metadata)['description']}".strip() for agent in agents]
26+
[f"{(await agent.metadata)['type']}: {(await agent.metadata)['description']}".strip() for agent in agents]
2727
)
2828

2929
# Construct agent list.
30-
participants = str([(await agent.metadata)["name"] for agent in agents])
30+
participants = str([(await agent.metadata)["type"] for agent in agents])
3131

3232
# Select the next speaker.
3333
select_speaker_prompt = f"""You are in a role play game. The following roles are available:
@@ -48,7 +48,7 @@ async def select_speaker(memory: ChatMemory[Message], client: ChatCompletionClie
4848
# Get the index of the selected agent by name
4949
agent_index = 0
5050
for i, agent in enumerate(agents):
51-
if (await agent.metadata)["name"] == agent_name:
51+
if (await agent.metadata)["type"] == agent_name:
5252
agent_index = i
5353
break
5454

@@ -74,7 +74,7 @@ async def mentioned_agents(message_content: str, agents: List[AgentProxy]) -> Di
7474
for agent in agents:
7575
# Finds agent mentions, taking word boundaries into account,
7676
# accommodates escaping underscores and underscores as spaces
77-
name = (await agent.metadata)["name"]
77+
name = (await agent.metadata)["type"]
7878
regex = (
7979
r"(?<=\W)("
8080
+ re.escape(name)

python/samples/common/patterns/_orchestrator_chat.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ async def on_text_message(
7373

7474
# Send the task specs to the orchestrator and specialists.
7575
for agent in [*self._specialists, self._orchestrator]:
76-
await (await self.send_message(TextMessage(content=task_specs, source=self.metadata["name"]), agent))
76+
await (await self.send_message(TextMessage(content=task_specs, source=self.metadata["type"]), agent))
7777

7878
# Inner loop.
7979
stalled_turns = 0
@@ -85,7 +85,7 @@ async def on_text_message(
8585
if data["is_request_satisfied"]["answer"]:
8686
return TextMessage(
8787
content=f"The task has been successfully addressed. {data['is_request_satisfied']['reason']}",
88-
source=self.metadata["name"],
88+
source=self.metadata["type"],
8989
)
9090

9191
# Update stalled turns.
@@ -111,7 +111,7 @@ async def on_text_message(
111111
if educated_guess["has_educated_guesses"]["answer"]:
112112
return TextMessage(
113113
content=f"The task is addressed with an educated guess. {educated_guess['has_educated_guesses']['reason']}",
114-
source=self.metadata["name"],
114+
source=self.metadata["type"],
115115
)
116116

117117
# Come up with a new plan.
@@ -129,7 +129,7 @@ async def on_text_message(
129129
for agent in [*self._specialists, self._orchestrator]:
130130
_ = await (
131131
await self.send_message(
132-
TextMessage(content=subtask, source=self.metadata["name"]),
132+
TextMessage(content=subtask, source=self.metadata["type"]),
133133
agent,
134134
)
135135
)
@@ -161,7 +161,7 @@ async def on_text_message(
161161

162162
return TextMessage(
163163
content="The task was not addressed. The maximum number of turns was reached.",
164-
source=self.metadata["name"],
164+
source=self.metadata["type"],
165165
)
166166

167167
async def _prepare_task(self, task: str, sender: str) -> Tuple[str, str, str, str]:

python/samples/core/inner_outer_direct.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def __init__(self) -> None:
2727

2828
@message_handler()
2929
async def on_new_message(self, message: MessageType, cancellation_token: CancellationToken) -> MessageType:
30-
return MessageType(body=f"Inner: {message.body}", sender=self.metadata["name"])
30+
return MessageType(body=f"Inner: {message.body}", sender=self.metadata["type"])
3131

3232

3333
class Outer(TypeRoutedAgent):
@@ -40,7 +40,7 @@ async def on_new_message(self, message: MessageType, cancellation_token: Cancell
4040
inner_response = self.send_message(message, self._inner)
4141
inner_message = await inner_response
4242
assert isinstance(inner_message, MessageType)
43-
return MessageType(body=f"Outer: {inner_message.body}", sender=self.metadata["name"])
43+
return MessageType(body=f"Outer: {inner_message.body}", sender=self.metadata["type"])
4444

4545

4646
async def main() -> None:

python/samples/core/two_agents_pub_sub.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -63,13 +63,13 @@ async def handle_message(self, message: Message, cancellation_token: Cancellatio
6363
return
6464
llm_messages: List[LLMMessage] = []
6565
for m in self._memory[-10:]:
66-
if m.source == self.metadata["name"]:
67-
llm_messages.append(AssistantMessage(content=m.content, source=self.metadata["name"]))
66+
if m.source == self.metadata["type"]:
67+
llm_messages.append(AssistantMessage(content=m.content, source=self.metadata["type"]))
6868
else:
6969
llm_messages.append(UserMessage(content=m.content, source=m.source))
7070
response = await self._model_client.create(self._system_messages + llm_messages)
7171
assert isinstance(response.content, str)
72-
await self.publish_message(Message(content=response.content, source=self.metadata["name"]))
72+
await self.publish_message(Message(content=response.content, source=self.metadata["type"]))
7373

7474

7575
async def main() -> None:

python/samples/demos/assistant.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ async def on_publish_now(self, message: PublishNow, cancellation_token: Cancella
108108
return
109109
else:
110110
# Publish user input and exit handler.
111-
await self.publish_message(TextMessage(content=user_input, source=self.metadata["name"]))
111+
await self.publish_message(TextMessage(content=user_input, source=self.metadata["type"]))
112112
return
113113

114114

python/samples/demos/chat_room.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -60,13 +60,13 @@ async def on_chat_room_message(self, message: TextMessage, cancellation_token: C
6060
# Get a response from the model.
6161
raw_response = await self._client.create(
6262
self._system_messages
63-
+ convert_messages_to_llm_messages(await self._memory.get_messages(), self_name=self.metadata["name"]),
63+
+ convert_messages_to_llm_messages(await self._memory.get_messages(), self_name=self.metadata["type"]),
6464
json_output=True,
6565
)
6666
assert isinstance(raw_response.content, str)
6767

6868
# Save the response to memory.
69-
await self._memory.add_message(TextMessage(source=self.metadata["name"], content=raw_response.content))
69+
await self._memory.add_message(TextMessage(source=self.metadata["type"], content=raw_response.content))
7070

7171
# Parse the response.
7272
data = json.loads(raw_response.content)
@@ -75,7 +75,7 @@ async def on_chat_room_message(self, message: TextMessage, cancellation_token: C
7575

7676
# Publish the response if needed.
7777
if respond is True or str(respond).lower().strip() == "true":
78-
await self.publish_message(TextMessage(source=self.metadata["name"], content=str(response)))
78+
await self.publish_message(TextMessage(source=self.metadata["type"], content=str(response)))
7979

8080

8181
class ChatRoomUserAgent(TextualUserAgent):

python/samples/demos/illustrator_critics.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -86,9 +86,9 @@ async def illustrator_critics(runtime: AgentRuntime, app: TextualChatApp) -> Non
8686

8787
app.welcoming_notice = f"""You are now in a group chat with the following agents:
8888
89-
1. 🤖 {(await descriptor.metadata)['name']}: {(await descriptor.metadata).get('description')}
90-
2. 🤖 {(await illustrator.metadata)['name']}: {(await illustrator.metadata).get('description')}
91-
3. 🤖 {(await critic.metadata)['name']}: {(await critic.metadata).get('description')}
89+
1. 🤖 {(await descriptor.metadata)['type']}: {(await descriptor.metadata).get('description')}
90+
2. 🤖 {(await illustrator.metadata)['type']}: {(await illustrator.metadata).get('description')}
91+
3. 🤖 {(await critic.metadata)['type']}: {(await critic.metadata).get('description')}
9292
9393
Provide a prompt for the illustrator to generate an image.
9494
"""

python/samples/demos/utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ async def on_multi_modal_message(self, message: MultiModalMessage, cancellation_
171171
# Generate a ramdom file name.
172172
for content in message.content:
173173
if isinstance(content, Image):
174-
filename = f"{self.metadata['name']}_{message.source}_{random.randbytes(16).hex()}.png"
174+
filename = f"{self.metadata['type']}_{message.source}_{random.randbytes(16).hex()}.png"
175175
content.image.save(filename)
176176
await self._app.post_runtime_message(message)
177177

python/samples/patterns/coder_executor.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ async def handle_task(self, message: TaskMessage, cancellation_token: Cancellati
9797
response = await self._model_client.create(self._system_messages + self._session_memory[session_id])
9898
assert isinstance(response.content, str)
9999
self._session_memory[session_id].append(
100-
AssistantMessage(content=response.content, source=self.metadata["name"])
100+
AssistantMessage(content=response.content, source=self.metadata["type"])
101101
)
102102

103103
# Publish the code execution task.
@@ -116,7 +116,7 @@ async def handle_code_execution_result(
116116
response = await self._model_client.create(self._system_messages + self._session_memory[message.session_id])
117117
assert isinstance(response.content, str)
118118
self._session_memory[message.session_id].append(
119-
AssistantMessage(content=response.content, source=self.metadata["name"])
119+
AssistantMessage(content=response.content, source=self.metadata["type"])
120120
)
121121

122122
if "TERMINATE" in response.content:

python/samples/patterns/coder_reviewer.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ async def handle_code_review_task(self, message: CodeReviewTask, cancellation_to
100100
"""
101101
# Generate a response using the chat completion API.
102102
response = await self._model_client.create(
103-
self._system_messages + [UserMessage(content=prompt, source=self.metadata["name"])]
103+
self._system_messages + [UserMessage(content=prompt, source=self.metadata["type"])]
104104
)
105105
assert isinstance(response.content, str)
106106
# TODO: use structured generation library e.g. guidance to ensure the response is in the expected format.
@@ -162,7 +162,7 @@ async def handle_code_writing_task(
162162
self._session_memory.setdefault(session_id, []).append(message)
163163
# Generate a response using the chat completion API.
164164
response = await self._model_client.create(
165-
self._system_messages + [UserMessage(content=message.task, source=self.metadata["name"])]
165+
self._system_messages + [UserMessage(content=message.task, source=self.metadata["type"])]
166166
)
167167
assert isinstance(response.content, str)
168168
# Extract the code block from the response.

python/samples/patterns/group_chat.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -97,13 +97,13 @@ async def handle_request_to_speak(self, message: RequestToSpeak, cancellation_to
9797
return
9898
llm_messages: List[LLMMessage] = []
9999
for m in self._memory[-10:]:
100-
if m.source == self.metadata["name"]:
101-
llm_messages.append(AssistantMessage(content=m.content, source=self.metadata["name"]))
100+
if m.source == self.metadata["type"]:
101+
llm_messages.append(AssistantMessage(content=m.content, source=self.metadata["type"]))
102102
else:
103103
llm_messages.append(UserMessage(content=m.content, source=m.source))
104104
response = await self._model_client.create(self._system_messages + llm_messages)
105105
assert isinstance(response.content, str)
106-
speach = Message(content=response.content, source=self.metadata["name"])
106+
speach = Message(content=response.content, source=self.metadata["type"])
107107
self._memory.append(speach)
108108
await self.publish_message(speach)
109109

0 commit comments

Comments
 (0)