-
Notifications
You must be signed in to change notification settings - Fork 416
/
llm_vl_mix_text.py
89 lines (79 loc) · 2.73 KB
/
llm_vl_mix_text.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
"""An example of calling text and vl llm interfaces alternately"""
from qwen_agent.llm import get_chat_model
from qwen_agent.llm.schema import ContentItem, Message
def test():
llm_cfg = {'model': 'qwen-max', 'model_server': 'dashscope'}
llm_cfg_vl = {'model': 'qwen-vl-max', 'model_server': 'dashscope'}
functions = [{
'name': 'image_gen',
'name_for_human': 'AI绘画',
'description': 'AI绘画(图像生成)服务,输入文本描述和图像分辨率,返回根据文本信息绘制的图片URL。',
'parameters': {
'type': 'object',
'properties': {
'prompt': {
'type': 'string',
'description': '详细描述了希望生成的图像具有什么内容,例如人物、环境、动作等细节描述,使用英文',
},
},
'required': ['prompt'],
},
'args_format': '参数为json格式'
}]
# Chat with vl llm
llm_vl = get_chat_model(llm_cfg_vl)
messages = [{
'role':
'user',
'content': [{
'text': '框出小狗并描述',
}, {
'image': 'https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg',
}]
}]
response = llm_vl.chat(messages, stream=True)
for x in response:
print(x)
messages.extend(x)
messages.append(Message('user', [ContentItem(text='描述更详细一点')]))
response = llm_vl.chat(messages, stream=True)
for x in response:
print(x)
messages.extend(x)
# Chat with text llm
llm = get_chat_model(llm_cfg)
messages.append({'role': 'user', 'content': '你是?'})
response = llm.chat(messages, stream=True)
for x in response:
print(x)
messages.extend(x)
messages.append({'role': 'user', 'content': '画个可爱小猫'})
response = llm.chat(messages, functions=functions, stream=True)
for x in response:
print(x)
messages.extend(x)
# Simulation function call results
messages.append({
'role': 'function',
'name': 'image_gen',
'content': '![fig-001](https://seopic.699pic.com/photo/60098/4947.jpg_wh1200.jpg)'
})
response = llm.chat(messages, functions=functions, stream=True)
for x in response:
print(x)
messages.extend(x)
# Chat with vl llm
messages.append({
'role': 'user',
'content': [{
'text': '可以描述下这张图片吗?'
}, {
'image': 'https://seopic.699pic.com/photo/60098/4947.jpg_wh1200.jpg'
}]
})
response = llm_vl.chat(messages, stream=True)
for x in response:
print(x)
messages.extend(x)
if __name__ == '__main__':
test()