Skip to content

Commit 114b63c

Browse files
BeibinLisonichi
andauthored
Large Multimodal Models in AgentChat (microsoft#554)
* LMM Code added * LLaVA notebook update * Test cases and Notebook modified for OpenAI v1 * Move LMM into contrib To resolve test issues and deploy issues In the future, we can install pillow by default, and then move back LMM agents into agentchat * LMM test setup update * try...except... clause for LMM tests * disable patch for llava agent test To resolve dependencies issue for build * Add LMM Blog * Change docstring for LMM agents * Docstring update patch * llava: insert reply at position 1 now So, it can still handle human_input_mode and max_consecutive_reply * Resolve comments Fixing: typos, blogs, yml, and add OpenAIWrapper * Signature typo fix for LMM agent: system_message * Update LMM "content" from latest OpenAI release Reference https://platform.openai.com/docs/guides/vision * update LMM test according to latest OpenAI release * Fully support GPT-4V now 1. Add a notebook for GPT-4V. LLava notebook also updated. 2. img_utils updated 3. GPT-4V formatter now return base64 image with mime type 4. Infer mime type directly from b64 image content (while loading without suffix) 5. Test cases modified according to all the related changes. * GPT-4V link updated in blog --------- Co-authored-by: Chi Wang <[email protected]>
1 parent 3ed0355 commit 114b63c

17 files changed

+2107
-723
lines changed

.github/workflows/contrib-lmm.yml

+60
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2+
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3+
4+
name: ContribTests
5+
6+
on:
7+
pull_request:
8+
branches: ['main', 'dev/v0.2']
9+
paths:
10+
- 'autogen/img_utils.py'
11+
- 'autogen/agentchat/contrib/multimodal_conversable_agent.py'
12+
- 'autogen/agentchat/contrib/llava_agent.py'
13+
- 'test/test_img_utils.py'
14+
- 'test/agentchat/contrib/test_lmm.py'
15+
- 'test/agentchat/contrib/test_llava.py'
16+
- '.github/workflows/lmm-test.yml'
17+
- 'setup.py'
18+
19+
concurrency:
20+
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
21+
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
22+
23+
jobs:
24+
LMMTest:
25+
26+
runs-on: ${{ matrix.os }}
27+
strategy:
28+
fail-fast: false
29+
matrix:
30+
os: [ubuntu-latest, macos-latest, windows-2019]
31+
python-version: ["3.8", "3.9", "3.10", "3.11"]
32+
steps:
33+
- uses: actions/checkout@v3
34+
- name: Set up Python ${{ matrix.python-version }}
35+
uses: actions/setup-python@v4
36+
with:
37+
python-version: ${{ matrix.python-version }}
38+
- name: Install packages and dependencies for all tests
39+
run: |
40+
python -m pip install --upgrade pip wheel
41+
pip install pytest
42+
- name: Install packages and dependencies for LMM
43+
run: |
44+
pip install -e .[lmm]
45+
pip uninstall -y openai
46+
- name: Test LMM and LLaVA
47+
run: |
48+
pytest test/test_img_utils.py test/agentchat/contrib/test_lmm.py test/agentchat/contrib/test_llava.py
49+
- name: Coverage
50+
if: matrix.python-version == '3.10'
51+
run: |
52+
pip install coverage>=5.3
53+
coverage run -a -m pytest test/test_img_utils.py test/agentchat/contrib/test_lmm.py test/agentchat/contrib/test_llava.py
54+
coverage xml
55+
- name: Upload coverage to Codecov
56+
if: matrix.python-version == '3.10'
57+
uses: codecov/codecov-action@v3
58+
with:
59+
file: ./coverage.xml
60+
flags: unittests

autogen/agentchat/__init__.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
from .agent import Agent
2-
from .conversable_agent import ConversableAgent
32
from .assistant_agent import AssistantAgent
4-
from .user_proxy_agent import UserProxyAgent
3+
from .conversable_agent import ConversableAgent
54
from .groupchat import GroupChat, GroupChatManager
5+
from .user_proxy_agent import UserProxyAgent
66

77
__all__ = [
88
"Agent",
+178
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,178 @@
1+
import json
2+
import logging
3+
import os
4+
import pdb
5+
import re
6+
from typing import Any, Dict, List, Optional, Tuple, Union
7+
8+
import replicate
9+
import requests
10+
from regex import R
11+
12+
from autogen.agentchat.agent import Agent
13+
from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent
14+
from autogen.code_utils import content_str
15+
from autogen.img_utils import get_image_data, llava_formater
16+
17+
try:
18+
from termcolor import colored
19+
except ImportError:
20+
21+
def colored(x, *args, **kwargs):
22+
return x
23+
24+
25+
logger = logging.getLogger(__name__)
26+
27+
# we will override the following variables later.
28+
SEP = "###"
29+
30+
DEFAULT_LLAVA_SYS_MSG = "You are an AI agent and you can view images."
31+
32+
33+
class LLaVAAgent(MultimodalConversableAgent):
34+
def __init__(
35+
self,
36+
name: str,
37+
system_message: Optional[Tuple[str, List]] = DEFAULT_LLAVA_SYS_MSG,
38+
*args,
39+
**kwargs,
40+
):
41+
"""
42+
Args:
43+
name (str): agent name.
44+
system_message (str): system message for the ChatCompletion inference.
45+
Please override this attribute if you want to reprogram the agent.
46+
**kwargs (dict): Please refer to other kwargs in
47+
[ConversableAgent](../conversable_agent#__init__).
48+
"""
49+
super().__init__(
50+
name,
51+
system_message=system_message,
52+
*args,
53+
**kwargs,
54+
)
55+
56+
assert self.llm_config is not None, "llm_config must be provided."
57+
self.register_reply([Agent, None], reply_func=LLaVAAgent._image_reply, position=1)
58+
59+
def _image_reply(self, messages=None, sender=None, config=None):
60+
# Note: we did not use "llm_config" yet.
61+
62+
if all((messages is None, sender is None)):
63+
error_msg = f"Either {messages=} or {sender=} must be provided."
64+
logger.error(error_msg)
65+
raise AssertionError(error_msg)
66+
67+
if messages is None:
68+
messages = self._oai_messages[sender]
69+
70+
# The formats for LLaVA and GPT are different. So, we manually handle them here.
71+
images = []
72+
prompt = content_str(self.system_message) + "\n"
73+
for msg in messages:
74+
role = "Human" if msg["role"] == "user" else "Assistant"
75+
# pdb.set_trace()
76+
images += [d["image_url"]["url"] for d in msg["content"] if d["type"] == "image_url"]
77+
content_prompt = content_str(msg["content"])
78+
prompt += f"{SEP}{role}: {content_prompt}\n"
79+
prompt += "\n" + SEP + "Assistant: "
80+
images = [re.sub("data:image/.+;base64,", "", im, count=1) for im in images]
81+
print(colored(prompt, "blue"))
82+
83+
out = ""
84+
retry = 10
85+
while len(out) == 0 and retry > 0:
86+
# image names will be inferred automatically from llava_call
87+
out = llava_call_binary(
88+
prompt=prompt,
89+
images=images,
90+
config_list=self.llm_config["config_list"],
91+
temperature=self.llm_config.get("temperature", 0.5),
92+
max_new_tokens=self.llm_config.get("max_new_tokens", 2000),
93+
)
94+
retry -= 1
95+
96+
assert out != "", "Empty response from LLaVA."
97+
98+
return True, out
99+
100+
101+
def _llava_call_binary_with_config(
102+
prompt: str, images: list, config: dict, max_new_tokens: int = 1000, temperature: float = 0.5, seed: int = 1
103+
):
104+
if config["base_url"].find("0.0.0.0") >= 0 or config["base_url"].find("localhost") >= 0:
105+
llava_mode = "local"
106+
else:
107+
llava_mode = "remote"
108+
109+
if llava_mode == "local":
110+
headers = {"User-Agent": "LLaVA Client"}
111+
pload = {
112+
"model": config["model"],
113+
"prompt": prompt,
114+
"max_new_tokens": max_new_tokens,
115+
"temperature": temperature,
116+
"stop": SEP,
117+
"images": images,
118+
}
119+
120+
response = requests.post(
121+
config["base_url"].rstrip("/") + "/worker_generate_stream", headers=headers, json=pload, stream=False
122+
)
123+
124+
for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"):
125+
if chunk:
126+
data = json.loads(chunk.decode("utf-8"))
127+
output = data["text"].split(SEP)[-1]
128+
elif llava_mode == "remote":
129+
# The Replicate version of the model only support 1 image for now.
130+
img = "data:image/jpeg;base64," + images[0]
131+
response = replicate.run(
132+
config["base_url"], input={"image": img, "prompt": prompt.replace("<image>", " "), "seed": seed}
133+
)
134+
# The yorickvp/llava-13b model can stream output as it's running.
135+
# The predict method returns an iterator, and you can iterate over that output.
136+
output = ""
137+
for item in response:
138+
# https://replicate.com/yorickvp/llava-13b/versions/2facb4a474a0462c15041b78b1ad70952ea46b5ec6ad29583c0b29dbd4249591/api#output-schema
139+
output += item
140+
141+
# Remove the prompt and the space.
142+
output = output.replace(prompt, "").strip().rstrip()
143+
return output
144+
145+
146+
def llava_call_binary(
147+
prompt: str, images: list, config_list: list, max_new_tokens: int = 1000, temperature: float = 0.5, seed: int = 1
148+
):
149+
# TODO 1: add caching around the LLaVA call to save compute and cost
150+
# TODO 2: add `seed` to ensure reproducibility. The seed is not working now.
151+
152+
for config in config_list:
153+
try:
154+
return _llava_call_binary_with_config(prompt, images, config, max_new_tokens, temperature, seed)
155+
except Exception as e:
156+
print(f"Error: {e}")
157+
continue
158+
159+
160+
def llava_call(prompt: str, llm_config: dict) -> str:
161+
"""
162+
Makes a call to the LLaVA service to generate text based on a given prompt
163+
"""
164+
165+
prompt, images = llava_formater(prompt, order_image_tokens=False)
166+
167+
for im in images:
168+
if len(im) == 0:
169+
raise RuntimeError("An image is empty!")
170+
171+
return llava_call_binary(
172+
prompt,
173+
images,
174+
config_list=llm_config["config_list"],
175+
max_new_tokens=llm_config.get("max_new_tokens", 2000),
176+
temperature=llm_config.get("temperature", 0.5),
177+
seed=llm_config.get("seed", None),
178+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
2+
3+
from autogen import OpenAIWrapper
4+
from autogen.agentchat import Agent, ConversableAgent
5+
from autogen.img_utils import gpt4v_formatter
6+
7+
try:
8+
from termcolor import colored
9+
except ImportError:
10+
11+
def colored(x, *args, **kwargs):
12+
return x
13+
14+
15+
from autogen.code_utils import content_str
16+
17+
DEFAULT_LMM_SYS_MSG = """You are a helpful AI assistant."""
18+
19+
20+
class MultimodalConversableAgent(ConversableAgent):
21+
def __init__(
22+
self,
23+
name: str,
24+
system_message: Optional[Union[str, List]] = DEFAULT_LMM_SYS_MSG,
25+
is_termination_msg: str = None,
26+
*args,
27+
**kwargs,
28+
):
29+
"""
30+
Args:
31+
name (str): agent name.
32+
system_message (str): system message for the OpenAIWrapper inference.
33+
Please override this attribute if you want to reprogram the agent.
34+
**kwargs (dict): Please refer to other kwargs in
35+
[ConversableAgent](../conversable_agent#__init__).
36+
"""
37+
super().__init__(
38+
name,
39+
system_message,
40+
is_termination_msg=is_termination_msg,
41+
*args,
42+
**kwargs,
43+
)
44+
45+
self.update_system_message(system_message)
46+
self._is_termination_msg = (
47+
is_termination_msg
48+
if is_termination_msg is not None
49+
else (lambda x: any([item["text"] == "TERMINATE" for item in x.get("content") if item["type"] == "text"]))
50+
)
51+
52+
@property
53+
def system_message(self) -> List:
54+
"""Return the system message."""
55+
return self._oai_system_message[0]["content"]
56+
57+
def update_system_message(self, system_message: Union[Dict, List, str]):
58+
"""Update the system message.
59+
60+
Args:
61+
system_message (str): system message for the OpenAIWrapper inference.
62+
"""
63+
self._oai_system_message[0]["content"] = self._message_to_dict(system_message)["content"]
64+
self._oai_system_message[0]["role"] = "system"
65+
66+
@staticmethod
67+
def _message_to_dict(message: Union[Dict, List, str]):
68+
"""Convert a message to a dictionary.
69+
70+
The message can be a string or a dictionary. The string will be put in the "content" field of the new dictionary.
71+
"""
72+
if isinstance(message, str):
73+
return {"content": gpt4v_formatter(message)}
74+
if isinstance(message, list):
75+
return {"content": message}
76+
else:
77+
return message
78+
79+
def _print_received_message(self, message: Union[Dict, str], sender: Agent):
80+
# print the message received
81+
print(colored(sender.name, "yellow"), "(to", f"{self.name}):\n", flush=True)
82+
if message.get("role") == "function":
83+
func_print = f"***** Response from calling function \"{message['name']}\" *****"
84+
print(colored(func_print, "green"), flush=True)
85+
print(content_str(message["content"]), flush=True)
86+
print(colored("*" * len(func_print), "green"), flush=True)
87+
else:
88+
content = message.get("content")
89+
if content is not None:
90+
if "context" in message:
91+
content = OpenAIWrapper.instantiate(
92+
content,
93+
message["context"],
94+
self.llm_config and self.llm_config.get("allow_format_str_template", False),
95+
)
96+
print(content_str(content), flush=True)
97+
if "function_call" in message:
98+
func_print = f"***** Suggested function Call: {message['function_call'].get('name', '(No function name found)')} *****"
99+
print(colored(func_print, "green"), flush=True)
100+
print(
101+
"Arguments: \n",
102+
message["function_call"].get("arguments", "(No arguments found)"),
103+
flush=True,
104+
sep="",
105+
)
106+
print(colored("*" * len(func_print), "green"), flush=True)
107+
print("\n", "-" * 80, flush=True, sep="")

0 commit comments

Comments
 (0)