From b515b332dee377bd1eb2cad0bd29d2241a345a04 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 19:12:42 -0500 Subject: [PATCH 1/9] The unlooping and fixing of file execution. --- autogpt/chat.py | 7 ++++++- autogpt/commands/execute_code.py | 5 ++--- autogpt/workspace/workspace.py | 10 +++++++++- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/autogpt/chat.py b/autogpt/chat.py index 4b906a001555..7a6809e4af4a 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -1,4 +1,5 @@ import time +from random import choice, shuffle from openai.error import RateLimitError @@ -84,7 +85,11 @@ def chat_with_ai( relevant_memory = ( "" if len(full_message_history) == 0 - else permanent_memory.get_relevant(str(full_message_history[-9:]), 10) + else shuffle( + permanent_memory.get_relevant( + str(shuffle(full_message_history[-5:])), 5 + ) + ) ) logger.debug(f"Memory Stats: {permanent_memory.get_stats()}") diff --git a/autogpt/commands/execute_code.py b/autogpt/commands/execute_code.py index ca0586f44887..590f95cc0511 100644 --- a/autogpt/commands/execute_code.py +++ b/autogpt/commands/execute_code.py @@ -1,6 +1,7 @@ """Execute code in a Docker container""" import os import subprocess +from pathlib import Path import docker from docker.errors import ImageNotFound @@ -40,7 +41,6 @@ def execute_python_file(filename: str) -> str: try: client = docker.from_env() - # You can replace this with the desired Python image/version # You can find available Python images on Docker Hub: # https://hub.docker.com/_/python @@ -60,10 +60,9 @@ def execute_python_file(filename: str) -> str: print(f"{status}: {progress}") elif status: print(status) - container = client.containers.run( image_name, - f"python {filename}", + f"python {Path(filename).relative_to(CFG.workspace_path)}", volumes={ CFG.workspace_path: { "bind": "/workspace", diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py index 91d2140e45ea..c1050230a8d4 100644 --- a/autogpt/workspace/workspace.py +++ b/autogpt/workspace/workspace.py @@ -11,6 +11,8 @@ from pathlib import Path +from autogpt.logs import logger + class Workspace: """A class that represents a workspace for an AutoGPT agent.""" @@ -112,7 +114,11 @@ def _sanitize_path( if root is None: return Path(relative_path).resolve() - root, relative_path = Path(root), Path(relative_path) + logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'") + + root, relative_path = Path(root).resolve(), Path(relative_path) + + logger.debug(f"Resolved root as '{root}'") if relative_path.is_absolute(): raise ValueError( @@ -121,6 +127,8 @@ def _sanitize_path( full_path = root.joinpath(relative_path).resolve() + logger.debug(f"Joined paths as '{full_path}'") + if restrict_to_root and not full_path.is_relative_to(root): raise ValueError( f"Attempted to access path '{full_path}' outside of workspace '{root}'." From 25aed0f28e7a32fbe5d34f359c65200d368e60d3 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 19:17:42 -0500 Subject: [PATCH 2/9] lint --- autogpt/workspace/workspace.py | 1 - 1 file changed, 1 deletion(-) diff --git a/autogpt/workspace/workspace.py b/autogpt/workspace/workspace.py index 3284f0d67bf5..c1050230a8d4 100644 --- a/autogpt/workspace/workspace.py +++ b/autogpt/workspace/workspace.py @@ -120,7 +120,6 @@ def _sanitize_path( logger.debug(f"Resolved root as '{root}'") - if relative_path.is_absolute(): raise ValueError( f"Attempted to access absolute path '{relative_path}' in workspace '{root}'." From ee6a46d9a6cf90d3cbcfc5a71e5a4336b492fe5d Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 21:27:28 -0500 Subject: [PATCH 3/9] Use static random seed during testing. remove unused import. --- autogpt/__init__.py | 8 ++++++++ autogpt/chat.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/autogpt/__init__.py b/autogpt/__init__.py index 5f5b20ef2311..909f8bf4b1db 100644 --- a/autogpt/__init__.py +++ b/autogpt/__init__.py @@ -1,5 +1,13 @@ +import os +import random +import sys + from dotenv import load_dotenv +if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"): + print("Setting random seed to 42") + random.seed(42) + # Load the users .env file into environment variables load_dotenv(verbose=True, override=True) diff --git a/autogpt/chat.py b/autogpt/chat.py index 7a6809e4af4a..1c46d829723c 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -1,5 +1,5 @@ import time -from random import choice, shuffle +from random import shuffle from openai.error import RateLimitError From 1f36a255205594824f9e3678140823d84e93f031 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 21:51:09 -0500 Subject: [PATCH 4/9] Fix bug --- autogpt/chat.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/autogpt/chat.py b/autogpt/chat.py index 1c46d829723c..3be180115789 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -81,16 +81,17 @@ def chat_with_ai( logger.debug(f"Token limit: {token_limit}") send_token_limit = token_limit - 1000 - - relevant_memory = ( - "" - if len(full_message_history) == 0 - else shuffle( - permanent_memory.get_relevant( - str(shuffle(full_message_history[-5:])), 5 - ) + if len(full_message_history) == 0: + relevant_memory = "" + else: + recent_history = full_message_history[-5:] + shuffle(recent_history) + relevant_memories = permanent_memory.get_relevant( + str(recent_history), 5 ) - ) + shuffle(relevant_memories) + relevant_memory = str(relevant_memories) + logger.debug(f"Memory Stats: {permanent_memory.get_stats()}") From 07491d25b54b7da249170b7372afa23fee20b305 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 21:53:15 -0500 Subject: [PATCH 5/9] Actually fix bug. --- autogpt/chat.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autogpt/chat.py b/autogpt/chat.py index 3be180115789..e545eabb729a 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -89,7 +89,8 @@ def chat_with_ai( relevant_memories = permanent_memory.get_relevant( str(recent_history), 5 ) - shuffle(relevant_memories) + if relevant_memories: + shuffle(relevant_memories) relevant_memory = str(relevant_memories) From 50b33c529d10a09a2d458b00b7f8d2823dfa2923 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 21:56:43 -0500 Subject: [PATCH 6/9] lint --- autogpt/chat.py | 1 - 1 file changed, 1 deletion(-) diff --git a/autogpt/chat.py b/autogpt/chat.py index e545eabb729a..469ec9bd38df 100644 --- a/autogpt/chat.py +++ b/autogpt/chat.py @@ -93,7 +93,6 @@ def chat_with_ai( shuffle(relevant_memories) relevant_memory = str(relevant_memories) - logger.debug(f"Memory Stats: {permanent_memory.get_stats()}") ( From eeae701d2eff9e05d6c59fc3c00a7d9121a2aa6b Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 22:31:29 -0500 Subject: [PATCH 7/9] Unloop a bit more an fix json. --- autogpt/json_utils/json_fix_llm.py | 19 +++++++++++++++++++ autogpt/prompts/prompt.py | 3 +++ 2 files changed, 22 insertions(+) diff --git a/autogpt/json_utils/json_fix_llm.py b/autogpt/json_utils/json_fix_llm.py index 869aed125cfb..5bbf51acab91 100644 --- a/autogpt/json_utils/json_fix_llm.py +++ b/autogpt/json_utils/json_fix_llm.py @@ -91,14 +91,33 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: Returns: str: The fixed JSON string. """ + assistant_reply = assistant_reply.strip() + if assistant_reply.startswith("```json"): + assistant_reply = assistant_reply[7:] + if assistant_reply.endswith("```"): + assistant_reply = assistant_reply[:-3] + try: + return json.loads(assistant_reply) # just check the validity + except json.JSONDecodeError: # noqa: E722 + pass + + if assistant_reply.startswith("json "): + assistant_reply = assistant_reply[5:] + assistant_reply = assistant_reply.strip() + try: + return json.loads(assistant_reply) # just check the validity + except json.JSONDecodeError: # noqa: E722 + pass # Parse and print Assistant response assistant_reply_json = fix_and_parse_json(assistant_reply) + logger.debug("Assistant reply JSON: %s", assistant_reply_json) if assistant_reply_json == {}: assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( assistant_reply ) + logger.debug("Assistant reply JSON 2: %s", assistant_reply_json) if assistant_reply_json != {}: return assistant_reply_json diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index b20a1757762a..8c26dda2131b 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -70,6 +70,9 @@ def build_default_prompt_generator() -> PromptGenerator: prompt_generator.add_performance_evaluation( "Every command has a cost, so be smart and efficient. Aim to complete tasks in" " the least number of steps." + ) + prompt_generator.add_performance_evaluation( + "If you cannot think of a valid command to perform start or message an agent to determine the next command." ) prompt_generator.add_performance_evaluation("Write all code to a file.") return prompt_generator From 39660d924dea0bc16203b8cf47f94591da5380df Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 22:55:19 -0500 Subject: [PATCH 8/9] Fix another bug. --- autogpt/json_utils/json_fix_llm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogpt/json_utils/json_fix_llm.py b/autogpt/json_utils/json_fix_llm.py index 5bbf51acab91..10317accb98f 100644 --- a/autogpt/json_utils/json_fix_llm.py +++ b/autogpt/json_utils/json_fix_llm.py @@ -111,13 +111,13 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]: # Parse and print Assistant response assistant_reply_json = fix_and_parse_json(assistant_reply) - logger.debug("Assistant reply JSON: %s", assistant_reply_json) + logger.debug("Assistant reply JSON: %s", str(assistant_reply_json)) if assistant_reply_json == {}: assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets( assistant_reply ) - logger.debug("Assistant reply JSON 2: %s", assistant_reply_json) + logger.debug("Assistant reply JSON 2: %s", str(assistant_reply_json)) if assistant_reply_json != {}: return assistant_reply_json From 409e1118d1475ff2f4598c8519620a3fe1eef1c6 Mon Sep 17 00:00:00 2001 From: BillSchumacher <34168009+BillSchumacher@users.noreply.github.com> Date: Wed, 26 Apr 2023 23:02:49 -0500 Subject: [PATCH 9/9] lint. --- autogpt/prompts/prompt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogpt/prompts/prompt.py b/autogpt/prompts/prompt.py index 8c26dda2131b..1d169fe16d36 100644 --- a/autogpt/prompts/prompt.py +++ b/autogpt/prompts/prompt.py @@ -70,7 +70,7 @@ def build_default_prompt_generator() -> PromptGenerator: prompt_generator.add_performance_evaluation( "Every command has a cost, so be smart and efficient. Aim to complete tasks in" " the least number of steps." - ) + ) prompt_generator.add_performance_evaluation( "If you cannot think of a valid command to perform start or message an agent to determine the next command." )