Skip to content

Commit e9b5da5

Browse files
authored
Update RetrieveChat blog to fix some outdated APIs (microsoft#2273)
* Fix rag blog * Update wording
1 parent 7ffec66 commit e9b5da5

File tree

2 files changed

+59
-65
lines changed

2 files changed

+59
-65
lines changed

notebook/agentchat_groupchat_RAG.ipynb

+12-17
Original file line numberDiff line numberDiff line change
@@ -79,13 +79,6 @@
7979
"metadata": {},
8080
"outputs": [],
8181
"source": [
82-
"llm_config = {\n",
83-
" \"timeout\": 60,\n",
84-
" \"temperature\": 0,\n",
85-
" \"config_list\": config_list,\n",
86-
"}\n",
87-
"\n",
88-
"\n",
8982
"def termination_msg(x):\n",
9083
" return isinstance(x, dict) and \"TERMINATE\" == str(x.get(\"content\", \"\"))[-9:].upper()\n",
9184
"\n",
@@ -117,31 +110,27 @@
117110
" description=\"Assistant who has extra content retrieval power for solving difficult problems.\",\n",
118111
")\n",
119112
"\n",
120-
"\n",
121-
"coder_llm_config = llm_config.copy()\n",
122113
"coder = AssistantAgent(\n",
123114
" name=\"Senior_Python_Engineer\",\n",
124115
" is_termination_msg=termination_msg,\n",
125116
" system_message=\"You are a senior python engineer, you provide python code to answer questions. Reply `TERMINATE` in the end when everything is done.\",\n",
126-
" llm_config={\"config_list\": config_list},\n",
117+
" llm_config={\"config_list\": config_list, \"timeout\": 60, \"temperature\": 0},\n",
127118
" description=\"Senior Python Engineer who can write code to solve problems and answer questions.\",\n",
128119
")\n",
129120
"\n",
130-
"pm_llm_config = llm_config.copy()\n",
131121
"pm = autogen.AssistantAgent(\n",
132122
" name=\"Product_Manager\",\n",
133123
" is_termination_msg=termination_msg,\n",
134124
" system_message=\"You are a product manager. Reply `TERMINATE` in the end when everything is done.\",\n",
135-
" llm_config={\"config_list\": config_list},\n",
125+
" llm_config={\"config_list\": config_list, \"timeout\": 60, \"temperature\": 0},\n",
136126
" description=\"Product Manager who can design and plan the project.\",\n",
137127
")\n",
138128
"\n",
139-
"reviewer_llm_config = llm_config.copy()\n",
140129
"reviewer = autogen.AssistantAgent(\n",
141130
" name=\"Code_Reviewer\",\n",
142131
" is_termination_msg=termination_msg,\n",
143132
" system_message=\"You are a code reviewer. Reply `TERMINATE` in the end when everything is done.\",\n",
144-
" llm_config={\"config_list\": config_list},\n",
133+
" llm_config={\"config_list\": config_list, \"timeout\": 60, \"temperature\": 0},\n",
145134
" description=\"Code Reviewer who can review the code.\",\n",
146135
")\n",
147136
"\n",
@@ -161,7 +150,9 @@
161150
" groupchat = autogen.GroupChat(\n",
162151
" agents=[boss_aid, pm, coder, reviewer], messages=[], max_round=12, speaker_selection_method=\"round_robin\"\n",
163152
" )\n",
164-
" manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)\n",
153+
" manager = autogen.GroupChatManager(\n",
154+
" groupchat=groupchat, llm_config={\"config_list\": config_list, \"timeout\": 60, \"temperature\": 0}\n",
155+
" )\n",
165156
"\n",
166157
" # Start chatting with boss_aid as this is the user proxy agent.\n",
167158
" boss_aid.initiate_chat(\n",
@@ -181,7 +172,9 @@
181172
" speaker_selection_method=\"auto\",\n",
182173
" allow_repeat_speaker=False,\n",
183174
" )\n",
184-
" manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)\n",
175+
" manager = autogen.GroupChatManager(\n",
176+
" groupchat=groupchat, llm_config={\"config_list\": config_list, \"timeout\": 60, \"temperature\": 0}\n",
177+
" )\n",
185178
"\n",
186179
" # Start chatting with the boss as this is the user proxy agent.\n",
187180
" boss.initiate_chat(\n",
@@ -233,7 +226,9 @@
233226
" allow_repeat_speaker=False,\n",
234227
" )\n",
235228
"\n",
236-
" manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)\n",
229+
" manager = autogen.GroupChatManager(\n",
230+
" groupchat=groupchat, llm_config={\"config_list\": config_list, \"timeout\": 60, \"temperature\": 0}\n",
231+
" )\n",
237232
"\n",
238233
" # Start chatting with the boss as this is the user proxy agent.\n",
239234
" boss.initiate_chat(\n",

website/blog/2023-10-18-RetrieveChat/index.mdx

+47-48
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ authors: thinkall
44
tags: [LLM, RAG]
55
---
66

7+
*Last update: April 4, 2024; AutoGen version: v0.2.21*
8+
79
![RAG Architecture](img/retrievechat-arch.png)
810

911
**TL;DR:**
@@ -57,10 +59,17 @@ pip install "pyautogen[retrievechat]"
5759
RetrieveChat can handle various types of documents. By default, it can process
5860
plain text and PDF files, including formats such as 'txt', 'json', 'csv', 'tsv',
5961
'md', 'html', 'htm', 'rtf', 'rst', 'jsonl', 'log', 'xml', 'yaml', 'yml' and 'pdf'.
60-
If you install [unstructured](https://unstructured-io.github.io/unstructured/installation/full_installation.html)
61-
(`pip install "unstructured[all-docs]"`), additional document types such as 'docx',
62+
If you install [unstructured](https://unstructured-io.github.io/unstructured/installation/full_installation.html),
63+
additional document types such as 'docx',
6264
'doc', 'odt', 'pptx', 'ppt', 'xlsx', 'eml', 'msg', 'epub' will also be supported.
6365

66+
- Install `unstructured` in ubuntu
67+
```bash
68+
sudo apt-get update
69+
sudo apt-get install -y tesseract-ocr poppler-utils
70+
pip install unstructured[all-docs]
71+
```
72+
6473
You can find a list of all supported document types by using `autogen.retrieve_utils.TEXT_FORMATS`.
6574

6675
1. Import Agents
@@ -90,7 +99,7 @@ ragproxyagent = RetrieveUserProxyAgent(
9099
3. Initialize Chat and ask a question
91100
```python
92101
assistant.reset()
93-
ragproxyagent.initiate_chat(assistant, problem="What is autogen?")
102+
ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem="What is autogen?")
94103
```
95104

96105
Output is like:
@@ -283,28 +292,6 @@ However, you may want to initialize the chat with another agent in some cases. T
283292
you'll need to call it from a function.
284293

285294
```python
286-
llm_config = {
287-
"functions": [
288-
{
289-
"name": "retrieve_content",
290-
"description": "retrieve content for code generation and question answering.",
291-
"parameters": {
292-
"type": "object",
293-
"properties": {
294-
"message": {
295-
"type": "string",
296-
"description": "Refined message which keeps the original meaning and can be used to retrieve content for code generation and question answering.",
297-
}
298-
},
299-
"required": ["message"],
300-
},
301-
},
302-
],
303-
"config_list": config_list,
304-
"timeout": 60,
305-
"seed": 42,
306-
}
307-
308295
boss = autogen.UserProxyAgent(
309296
name="Boss",
310297
is_termination_msg=termination_msg,
@@ -328,46 +315,58 @@ coder = AssistantAgent(
328315
name="Senior_Python_Engineer",
329316
is_termination_msg=termination_msg,
330317
system_message="You are a senior python engineer. Reply `TERMINATE` in the end when everything is done.",
331-
llm_config=llm_config,
318+
llm_config={"config_list": config_list, "timeout": 60, "temperature": 0},
332319
)
333320

334321
pm = autogen.AssistantAgent(
335322
name="Product_Manager",
336323
is_termination_msg=termination_msg,
337324
system_message="You are a product manager. Reply `TERMINATE` in the end when everything is done.",
338-
llm_config=llm_config,
325+
llm_config={"config_list": config_list, "timeout": 60, "temperature": 0},
339326
)
340327

341328
reviewer = autogen.AssistantAgent(
342329
name="Code_Reviewer",
343330
is_termination_msg=termination_msg,
344331
system_message="You are a code reviewer. Reply `TERMINATE` in the end when everything is done.",
345-
llm_config=llm_config,
332+
llm_config={"config_list": config_list, "timeout": 60, "temperature": 0},
346333
)
347334

348-
def retrieve_content(message, n_results=3):
349-
boss_aid.n_results = n_results # Set the number of results to be retrieved.
350-
# Check if we need to update the context.
351-
update_context_case1, update_context_case2 = boss_aid._check_update_context(message)
352-
if (update_context_case1 or update_context_case2) and boss_aid.update_context:
353-
boss_aid.problem = message if not hasattr(boss_aid, "problem") else boss_aid.problem
354-
_, ret_msg = boss_aid._generate_retrieve_user_reply(message)
355-
else:
356-
_context = {"problem": message, "n_results": n_results}
357-
ret_msg = boss_aid.message_generator(boss_aid, None, _context)
358-
return ret_msg if ret_msg else message
359-
360-
for agent in [boss, coder, pm, reviewer]:
361-
# register functions for all agents.
362-
agent.register_function(
363-
function_map={
364-
"retrieve_content": retrieve_content,
365-
}
366-
)
335+
def retrieve_content(
336+
message: Annotated[
337+
str,
338+
"Refined message which keeps the original meaning and can be used to retrieve content for code generation and question answering.",
339+
],
340+
n_results: Annotated[int, "number of results"] = 3,
341+
) -> str:
342+
boss_aid.n_results = n_results # Set the number of results to be retrieved.
343+
# Check if we need to update the context.
344+
update_context_case1, update_context_case2 = boss_aid._check_update_context(message)
345+
if (update_context_case1 or update_context_case2) and boss_aid.update_context:
346+
boss_aid.problem = message if not hasattr(boss_aid, "problem") else boss_aid.problem
347+
_, ret_msg = boss_aid._generate_retrieve_user_reply(message)
348+
else:
349+
_context = {"problem": message, "n_results": n_results}
350+
ret_msg = boss_aid.message_generator(boss_aid, None, _context)
351+
return ret_msg if ret_msg else message
352+
353+
for caller in [pm, coder, reviewer]:
354+
d_retrieve_content = caller.register_for_llm(
355+
description="retrieve content for code generation and question answering.", api_style="function"
356+
)(retrieve_content)
357+
358+
for executor in [boss, pm]:
359+
executor.register_for_execution()(d_retrieve_content)
367360

368361
groupchat = autogen.GroupChat(
369-
agents=[boss, coder, pm, reviewer], messages=[], max_round=12
362+
agents=[boss, pm, coder, reviewer],
363+
messages=[],
364+
max_round=12,
365+
speaker_selection_method="round_robin",
366+
allow_repeat_speaker=False,
370367
)
368+
369+
llm_config = {"config_list": config_list, "timeout": 60, "temperature": 0}
371370
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
372371

373372
# Start chatting with the boss as this is the user proxy agent.

0 commit comments

Comments
 (0)