Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

The unlooping and fixing of file execution. #3368

Merged
merged 14 commits into from
Apr 27, 2023
8 changes: 8 additions & 0 deletions autogpt/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
import os
import random
import sys

from dotenv import load_dotenv

if "pytest" in sys.argv or "pytest" in sys.modules or os.getenv("CI"):
print("Setting random seed to 42")
random.seed(42)

# Load the users .env file into environment variables
load_dotenv(verbose=True, override=True)

Expand Down
18 changes: 12 additions & 6 deletions autogpt/chat.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import time
from random import shuffle

from openai.error import RateLimitError

Expand Down Expand Up @@ -80,12 +81,17 @@ def chat_with_ai(

logger.debug(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000

relevant_memory = (
""
if len(full_message_history) == 0
else permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
)
if len(full_message_history) == 0:
relevant_memory = ""
else:
recent_history = full_message_history[-5:]
shuffle(recent_history)
relevant_memories = permanent_memory.get_relevant(
str(recent_history), 5
)
if relevant_memories:
shuffle(relevant_memories)
relevant_memory = str(relevant_memories)

logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")

Expand Down
5 changes: 2 additions & 3 deletions autogpt/commands/execute_code.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Execute code in a Docker container"""
import os
import subprocess
from pathlib import Path

import docker
from docker.errors import ImageNotFound
Expand Down Expand Up @@ -40,7 +41,6 @@ def execute_python_file(filename: str) -> str:

try:
client = docker.from_env()

# You can replace this with the desired Python image/version
# You can find available Python images on Docker Hub:
# https://hub.docker.com/_/python
Expand All @@ -60,10 +60,9 @@ def execute_python_file(filename: str) -> str:
print(f"{status}: {progress}")
elif status:
print(status)

container = client.containers.run(
image_name,
f"python {filename}",
f"python {Path(filename).relative_to(CFG.workspace_path)}",
volumes={
CFG.workspace_path: {
"bind": "/workspace",
Expand Down
19 changes: 19 additions & 0 deletions autogpt/json_utils/json_fix_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,14 +91,33 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
Returns:
str: The fixed JSON string.
"""
assistant_reply = assistant_reply.strip()
if assistant_reply.startswith("```json"):
assistant_reply = assistant_reply[7:]
if assistant_reply.endswith("```"):
assistant_reply = assistant_reply[:-3]
try:
return json.loads(assistant_reply) # just check the validity
except json.JSONDecodeError: # noqa: E722
pass

if assistant_reply.startswith("json "):
assistant_reply = assistant_reply[5:]
assistant_reply = assistant_reply.strip()
try:
return json.loads(assistant_reply) # just check the validity
except json.JSONDecodeError: # noqa: E722
pass

# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
logger.debug("Assistant reply JSON: %s", str(assistant_reply_json))
if assistant_reply_json == {}:
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
assistant_reply
)

logger.debug("Assistant reply JSON 2: %s", str(assistant_reply_json))
if assistant_reply_json != {}:
return assistant_reply_json

Expand Down
3 changes: 3 additions & 0 deletions autogpt/prompts/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ def build_default_prompt_generator() -> PromptGenerator:
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
" the least number of steps."
)
prompt_generator.add_performance_evaluation(
"If you cannot think of a valid command to perform start or message an agent to determine the next command."
)
prompt_generator.add_performance_evaluation("Write all code to a file.")
return prompt_generator

Expand Down
8 changes: 8 additions & 0 deletions autogpt/workspace/workspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@

from pathlib import Path

from autogpt.logs import logger


class Workspace:
"""A class that represents a workspace for an AutoGPT agent."""
Expand Down Expand Up @@ -112,15 +114,21 @@ def _sanitize_path(
if root is None:
return Path(relative_path).resolve()

logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'")

root, relative_path = Path(root).resolve(), Path(relative_path)

logger.debug(f"Resolved root as '{root}'")

if relative_path.is_absolute():
raise ValueError(
f"Attempted to access absolute path '{relative_path}' in workspace '{root}'."
)

full_path = root.joinpath(relative_path).resolve()

logger.debug(f"Joined paths as '{full_path}'")

if restrict_to_root and not full_path.is_relative_to(root):
raise ValueError(
f"Attempted to access path '{full_path}' outside of workspace '{root}'."
Expand Down