Skip to content

Commit 593f3c5

Browse files
Merge pull request #23 from BillSchumacher/dev
Dev -> Master
2 parents b9ffc13 + 4daacf8 commit 593f3c5

17 files changed

+362
-439
lines changed

BULLETIN.md

+1-6
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,4 @@
11
Welcome to Auto-GPT! We'll keep you informed of the latest news and features by printing messages here.
22
If you don't wish to see this message, you can run Auto-GPT with the --skip-news flag
33

4-
# INCLUDED COMMAND 'send_tweet' IS DEPRICATED, AND WILL BE REMOVED IN THE NEXT STABLE RELEASE
5-
Base Twitter functionality (and more) is now covered by plugins: https://github.com/Significant-Gravitas/Auto-GPT-Plugins
6-
7-
## Changes to Docker configuration
8-
The workdir has been changed from /home/appuser to /app. Be sure to update any volume mounts accordingly.
9-
4+
# This repo does not track with upstream!! If there's functionality not included open a PR.

autogpt/agent/agent.py

+6-7
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,15 @@
33
from colorama import Fore, Style
44

55
from autogpt.app import execute_command, get_command
6-
from autogpt.chat import chat_with_ai, create_chat_message
6+
from autogpt.chat import chat_with_ai
77
from autogpt.config import Config
88
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
99
from autogpt.json_utils.utilities import validate_json
1010
from autogpt.llm_utils import create_chat_completion
1111
from autogpt.logs import logger, print_assistant_thoughts
1212
from autogpt.speech import say_text
1313
from autogpt.spinner import Spinner
14+
from autogpt.types.openai import Message
1415
from autogpt.utils import clean_input, send_chat_message_to_user
1516
from autogpt.workspace import Workspace
1617

@@ -337,12 +338,10 @@ def update_memory(self, assistant_reply, result, user_input):
337338

338339
def update_history(self, result):
339340
if result is not None:
340-
self.full_message_history.append(create_chat_message("system", result))
341+
self.full_message_history.append(Message("system", result))
341342
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
342343
return
343-
self.full_message_history.append(
344-
create_chat_message("system", "Unable to execute command")
345-
)
344+
self.full_message_history.append(Message("system", "Unable to execute command"))
346345
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
347346

348347
def _resolve_pathlike_command_args(self, command_args):
@@ -383,8 +382,8 @@ def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
383382
plan = thoughts.get("plan", "")
384383
thought = thoughts.get("thoughts", "")
385384
criticism = thoughts.get("criticism", "")
386-
feedback_thoughts = thought + reasoning + plan + criticism
385+
feedback_thoughts = f"{thought} {reasoning} {plan} {criticism}"
387386
return create_chat_completion(
388-
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
387+
[Message("user", f"{feedback_prompt} {feedback_thoughts}")],
389388
llm_model,
390389
)

autogpt/agent/agent_manager.py

+26-15
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,10 @@
11
"""Agent manager for managing GPT agents"""
22
from __future__ import annotations
33

4-
from typing import List
5-
64
from autogpt.config.config import Config
75
from autogpt.llm_utils import create_chat_completion
86
from autogpt.singleton import Singleton
9-
from autogpt.types.openai import Message
7+
from autogpt.types.openai import Message, ensure_messages
108

119

1210
class AgentManager(metaclass=Singleton):
@@ -20,7 +18,7 @@ def __init__(self):
2018
# Create new GPT agent
2119
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
2220

23-
def handle_preinstruction(self, messages: List[Message]) -> List[Message]:
21+
def handle_preinstruction(self, messages: list[Message]) -> list[Message]:
2422
"""Handle pre-instruction plugins
2523
2624
Args:
@@ -34,7 +32,7 @@ def handle_preinstruction(self, messages: List[Message]) -> List[Message]:
3432
continue
3533
if plugin_messages := plugin.pre_instruction(messages):
3634
messages.extend(iter(plugin_messages))
37-
return messages
35+
return ensure_messages(messages)
3836

3937
def handle_postinstruction(self, agent_reply) -> str:
4038
"""Handle post-instruction plugins
@@ -52,7 +50,7 @@ def handle_postinstruction(self, agent_reply) -> str:
5250
agent_reply = plugin.post_instruction(agent_reply)
5351
return agent_reply
5452

55-
def handle_oninstruction(self, messages: List[Message]) -> List[Message]:
53+
def handle_oninstruction(self, messages: list[Message]) -> list[Message]:
5654
"""Handle on-instruction plugins
5755
5856
Args:
@@ -70,7 +68,7 @@ def handle_oninstruction(self, messages: List[Message]) -> List[Message]:
7068
plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
7169

7270
if plugins_reply and plugins_reply != "":
73-
messages.append({"role": "assistant", "content": plugins_reply})
71+
messages.append(Message("assistant", plugins_reply))
7472
return messages
7573

7674
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
@@ -84,19 +82,27 @@ def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
8482
Returns:
8583
The key of the new agent
8684
"""
87-
messages: List[Message] = [
88-
{"role": "system", "content": "You are an autonomous AI agent. You can ask me to do things. I will try my best to do them."},
89-
{"role": "user", "content": prompt},
85+
messages: list[Message] = [
86+
Message(
87+
"system",
88+
"You are an autonomous AI agent. You can ask me to do things."
89+
" I will try my best to do them.",
90+
),
91+
Message("user", prompt),
9092
]
9193
messages = self.handle_preinstruction(messages)
92-
token_limit = self.cfg.fast_token_limit if not self.cfg.use_fastchat else self.cfg.fastchat_token_limit
94+
token_limit = (
95+
self.cfg.fast_token_limit
96+
if not self.cfg.use_fastchat
97+
else self.cfg.fastchat_token_limit
98+
)
9399
agent_reply = create_chat_completion(
94100
model=model,
95101
messages=messages,
96102
max_tokens=token_limit,
97103
use_fastchat=self.cfg.use_fastchat,
98104
)
99-
messages.append({"role": "assistant", "content": agent_reply})
105+
messages.append(Message("assistant", agent_reply))
100106
messages = self.handle_oninstruction(messages)
101107
key = self.next_key
102108
self.next_key += 1
@@ -114,16 +120,21 @@ def message_agent(self, key: str | int, message: str) -> str:
114120
The agent's response
115121
"""
116122
task, messages, model = self.agents[int(key)]
117-
messages.append({"role": "user", "content": message})
123+
messages.append(Message("user", message))
118124
messages = self.handle_preinstruction(messages)
119-
token_limit = self.cfg.fast_token_limit if not self.cfg.use_fastchat else self.cfg.fastchat_token_limit
125+
126+
token_limit = (
127+
self.cfg.fast_token_limit
128+
if not self.cfg.use_fastchat
129+
else self.cfg.fastchat_token_limit
130+
)
120131
agent_reply = create_chat_completion(
121132
model=model,
122133
messages=messages,
123134
max_tokens=token_limit,
124135
use_fastchat=self.cfg.use_fastchat,
125136
)
126-
messages.append({"role": "assistant", "content": agent_reply})
137+
messages.append(Message("assistant", agent_reply))
127138
messages = self.handle_oninstruction(messages)
128139
return self.handle_postinstruction(agent_reply)
129140

autogpt/api_manager.py

+25-13
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,12 @@
11
from __future__ import annotations
22

33
import openai
4+
from tenacity import (
5+
retry,
6+
retry_if_not_exception_type,
7+
stop_after_attempt,
8+
wait_random_exponential,
9+
)
410

511
from autogpt.config import Config
612
from autogpt.logs import logger
@@ -21,6 +27,11 @@ def reset(self):
2127
self.total_cost = 0
2228
self.total_budget = 0.0
2329

30+
@retry(
31+
wait=wait_random_exponential(min=1, max=20),
32+
stop=stop_after_attempt(10),
33+
retry=retry_if_not_exception_type(openai.InvalidRequestError),
34+
)
2435
def create_chat_completion(
2536
self,
2637
messages: list, # type: ignore
@@ -31,13 +42,14 @@ def create_chat_completion(
3142
) -> str:
3243
"""
3344
Create a chat completion and update the cost.
45+
3446
Args:
35-
messages (list): The list of messages to send to the API.
36-
model (str): The model to use for the API call.
37-
temperature (float): The temperature to use for the API call.
38-
max_tokens (int): The maximum number of tokens for the API call.
47+
messages (list): The list of messages to send to the API.
48+
model (str): The model to use for the API call.
49+
temperature (float): The temperature to use for the API call.
50+
max_tokens (int): The maximum number of tokens for the API call.
3951
Returns:
40-
str: The AI's response.
52+
str: The AI's response.
4153
"""
4254
cfg = Config()
4355
if temperature is None:
@@ -70,9 +82,9 @@ def update_cost(self, prompt_tokens, completion_tokens, model):
7082
Update the total cost, prompt tokens, and completion tokens.
7183
7284
Args:
73-
prompt_tokens (int): The number of tokens used in the prompt.
74-
completion_tokens (int): The number of tokens used in the completion.
75-
model (str): The model used for the API call.
85+
prompt_tokens (int): The number of tokens used in the prompt.
86+
completion_tokens (int): The number of tokens used in the completion.
87+
model (str): The model used for the API call.
7688
"""
7789
self.total_prompt_tokens += prompt_tokens
7890
self.total_completion_tokens += completion_tokens
@@ -87,7 +99,7 @@ def set_total_budget(self, total_budget):
8799
Sets the total user-defined budget for API calls.
88100
89101
Args:
90-
total_budget (float): The total budget for API calls.
102+
total_budget (float): The total budget for API calls.
91103
"""
92104
self.total_budget = total_budget
93105

@@ -96,7 +108,7 @@ def get_total_prompt_tokens(self):
96108
Get the total number of prompt tokens.
97109
98110
Returns:
99-
int: The total number of prompt tokens.
111+
int: The total number of prompt tokens.
100112
"""
101113
return self.total_prompt_tokens
102114

@@ -105,7 +117,7 @@ def get_total_completion_tokens(self):
105117
Get the total number of completion tokens.
106118
107119
Returns:
108-
int: The total number of completion tokens.
120+
int: The total number of completion tokens.
109121
"""
110122
return self.total_completion_tokens
111123

@@ -114,7 +126,7 @@ def get_total_cost(self):
114126
Get the total cost of API calls.
115127
116128
Returns:
117-
float: The total cost of API calls.
129+
float: The total cost of API calls.
118130
"""
119131
return self.total_cost
120132

@@ -123,6 +135,6 @@ def get_total_budget(self):
123135
Get the total user-defined budget for API calls.
124136
125137
Returns:
126-
float: The total budget for API calls.
138+
float: The total budget for API calls.
127139
"""
128140
return self.total_budget

autogpt/app.py

+11-3
Original file line numberDiff line numberDiff line change
@@ -213,9 +213,17 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) ->
213213
return f"Agent {name} created with key {key}. First response: {agent_response}"
214214

215215

216-
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
216+
@command("message_agent", "Message GPT Agent", '"key": "<int>", "message": "<message>"')
217217
def message_agent(key: str, message: str) -> str:
218-
"""Message an agent with a given key and message"""
218+
"""Message an agent with a given key and message
219+
220+
Args:
221+
key (str): The key of the agent, should be a number
222+
message (str): The message to send to the agent
223+
224+
Returns:
225+
str: The response of the agent
226+
"""
219227
# Check if the key is a valid integer
220228
if is_valid_int(key):
221229
agent_response = AGENT_MANAGER.message_agent(int(key), message)
@@ -233,7 +241,7 @@ def list_agents() -> str:
233241
"""List all agents
234242
235243
Returns:
236-
str: A list of all agents
244+
str: A list of all agents, ids (numbers)
237245
"""
238246
return "List of agents:\n" + "\n".join(
239247
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()]

0 commit comments

Comments
 (0)