This repository has been archived by the owner on Jan 18, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 41
/
Copy pathcoder_plot_chart_mixin_test.py
171 lines (131 loc) · 4.92 KB
/
coder_plot_chart_mixin_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
from langchain.agents import (
AgentExecutor,
LLMSingleActionAgent,
Tool,
AgentOutputParser,
)
from langchain.prompts import StringPromptTemplate
from langchain import LLMChain
from langchain_app.models.vicuna_request_llm import VicunaLLM
from langchain.schema import AgentAction, AgentFinish
from code_it.langchain.python_langchain_tool_mixin import LangchainPythonToolMixin
import re
from typing import List, Union
llm = VicunaLLM()
code_editor = LangchainPythonToolMixin()
tools = [
code_editor.build_add_code_tool(),
code_editor.build_run_tool(),
code_editor.build_pip_install()
]
template = """You're a programmer AI.
You are asked to code a certain task.
You have access to a Code Editor, that can be used through the following tools:
{tools}
You should ALWAYS think what to do next.
Use the following format:
Task: the input task you must implement
Current Source Code: Your current code state that you are editing
Thought: you should always think about what to code next
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: The result of your last action
... (this Thought/Action/Action Input/Source Code/Code Result can repeat N times)
Thought: I have finished the task
Task Completed: the task has been implemented
Example task:
Task: the input task you must implement
Thought: To start, we need to add the line of code to print 'hello world'
Action: CodeEditorAddCode
Action Input:
print("hello world") end of llm ouput
Observation:None
Thought: I have added the line of code to print 'hello world'. I should execute the code to test the output
Action: CodeEditorRunCode
Action Input:
Observation:Program Succeeded
Stdout:b'hello world\n'
Stderr:b''
Thought: The output is correct, it should be 'hello world'
Action: None
Action Input:
Output is correct
Observation:None is not a valid tool, try another one.
Thought: I have concluded that the output is correct
Task Completed: the task is completed.
REMEMBER: don't install the same package more than once
Now we begin with a real task!
Task: {input}
Source Code: {source_code}
{agent_scratchpad}
Thought:"""
# Set up a prompt template
class CodeEditorPromptTemplate(StringPromptTemplate):
# The template to use
template: str
code_editor: LangchainPythonToolMixin
tools: List[Tool]
def format(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
kwargs["source_code"] = code_editor.display_code()
kwargs["tools"] = "\n".join(
[f"{tool.name}: {tool.description}" for tool in self.tools]
)
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
return self.template.format(**kwargs)
prompt = CodeEditorPromptTemplate(
template=template,
code_editor=code_editor,
tools=tools,
input_variables=["input", "intermediate_steps"],
)
class CodeEditorOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
print("llm output: ", llm_output, "end of llm ouput")
# Check if agent should finish
if "Task Completed:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(
tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output
)
output_parser = CodeEditorOutputParser()
llm_chain = LLMChain(llm=llm, prompt=prompt)
llm = VicunaLLM()
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names,
)
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True
)
agent_executor.run(
"""
Your job is to plot an example chart using matplotlib. Create your own random data.
Run this code only when you're finished.
DO NOT add code and run into a single step.
"""
)