From 58b1baa06b98c0618937ffcb3da60f276eb64e33 Mon Sep 17 00:00:00 2001 From: Samuel Shapley Date: Mon, 31 Jul 2023 18:02:00 +0100 Subject: [PATCH] first commit to OrchestrAI --- README.md | 63 ++++++++++++++++++++++- __pycache__/ai.cpython-311.pyc | Bin 0 -> 2538 bytes ai.py | 40 +++++++++++++++ final_output.txt | 0 general_system.txt | 2 + modules.yml | 33 ++++++++++++ orchestrate.py | 87 ++++++++++++++++++++++++++++++++ pipelines/pipeline.yml | 16 ++++++ prompt.py | 54 ++++++++++++++++++++ requirements.txt | 4 ++ systems/dreamer.txt | 3 ++ systems/enhancer.txt | 2 + systems/grounder.txt | 10 ++++ systems/planner.txt | 1 + systems/programmer.txt | 0 systems/scrutinizer.txt | 1 + true_autonomy/choose_module.txt | 27 ++++++++++ true_autonomy/true_autonomy.py | 57 +++++++++++++++++++++ 18 files changed, 399 insertions(+), 1 deletion(-) create mode 100644 __pycache__/ai.cpython-311.pyc create mode 100644 ai.py create mode 100644 final_output.txt create mode 100644 general_system.txt create mode 100644 modules.yml create mode 100644 orchestrate.py create mode 100644 pipelines/pipeline.yml create mode 100644 prompt.py create mode 100644 requirements.txt create mode 100644 systems/dreamer.txt create mode 100644 systems/enhancer.txt create mode 100644 systems/grounder.txt create mode 100644 systems/planner.txt create mode 100644 systems/programmer.txt create mode 100644 systems/scrutinizer.txt create mode 100644 true_autonomy/choose_module.txt create mode 100644 true_autonomy/true_autonomy.py diff --git a/README.md b/README.md index fb5d29b..329c0e2 100644 --- a/README.md +++ b/README.md @@ -1 +1,62 @@ -# OrchestrAI \ No newline at end of file +# OrchestrAI + +OrchestrAI is a modular system for orchestrating interactions between several instances of OpenAI's GPT-4 model, potentially trained with different settings or data, in order to accomplish complex tasks. The system is built in Python and leverages the `networkx` library to handle the dependencies between different AI modules. + +## Getting Started + +### Prerequisites + +To run OrchestrAI, you'll need: + +- Python 3.7 or later +- The OpenAI Python library +- networkx +- PyYAML + +You can install these with pip: + +```bash +pip install openai networkx pyyaml +``` + +### Configuration + +To configure OrchestrAI, you'll need to set up a few files: + +- `ai.py` contains the AI class, which manages interactions with an OpenAI GPT-4 model. You'll need to set your OpenAI API key in this file. +- `modules.yml` lists the AI modules available for use. Each module is either a large language model (LLM) or a non-LLM module. LLM modules are driven by OpenAI's GPT-4, while non-LLM modules are placeholders for manual human intervention. +- `orchestrate.py` is the main script, which loads the modules and pipelines, constructs a directed acyclic graph (DAG) of operations, and executes them in the correct order. +- The `systems/` directory contains a text file for each LLM module, which provides an introductory prompt for the module. +- The `pipelines/` directory contains one or more YAML files describing pipelines of operations to execute. + +### Running the Script + +To run OrchestrAI, execute `orchestrate.py`: + +```bash +python orchestrate.py +``` + +The script will execute the operations in the pipeline(s) as specified, querying the GPT-4 model as necessary and storing the results. + +## Understanding the Modules + +Each module in the `modules.yml` file is either an LLM module or a non-LLM module. + +LLM modules, like `planner`, `scrutinizer`, `enhancer`, `grounder`, and `dreamer`, each have a specific role: + +- `planner` generates a plan from a task. +- `scrutinizer` scrutinizes a plan or piece of information and provides feedback. +- `enhancer` enhances a plan with additional information. +- `grounder` provides real-world grounding for a plan, pointing out unrealistic actions. +- `dreamer` adds creative flair and inspiration to a plan. + +Non-LLM modules, like `human_intervention`, record terminal input from the user. + +## Contributing + +We welcome contributions to OrchestrAI! Please submit a pull request with your changes, and be sure to include tests and documentation. + +## License + +OrchestrAI is open-source software, released under the [MIT License](https://opensource.org/licenses/MIT). \ No newline at end of file diff --git a/__pycache__/ai.cpython-311.pyc b/__pycache__/ai.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bf63502181fcf72031250e4abe8d169f3324294 GIT binary patch literal 2538 zcmah~O>7fK6rTNa)=nG~R3Zljx=O1Q2XKHy6`+c;B@kReD3tbKwOVbwV|x>O*PU5I zVj0N?4pD{F3sQREkP`*LA-CqzOE0||WC?3el`2)@78H6xs?;~L-o*jh&hEbXdw=uZ z_hx@eCKCwSFMs}K#bXHl!6spodxM=K4Azl_ETMw(K`-P5J{R*6@8!JAdnK>%K9-MJ zB90jYp)$Uz=2a2RBTKrCEcw2O&?ESQmXBM)mnfq=XVZno3|&5IuFRK;wc5#*`LE0R z)sOXA_c*>j^I2ta`NHSqYR%4_yguK!RysAi=$_AAoO232RlJjWlbtCTtRsw=!+FsX zz%8jLfeQk-EZ;`paKchy7PI2uz@RVcIUhBYQq4O$TL@#ov@w(66n&Yv73>cbT*t$XmyrpJp08A0 z3s?M@Tf>fN`)U}FOGmOTdp38<4rjQ;5(n8(Q@iqC*V0Yhs=n~;y?!)e*h(lnKk=~^! ziW~hIiqrrvv&&n*#mCZa)ZG%Bq9u=otjJRO#<9Nf!0YiX$nF20L2XJb<8Pf|muyi4 z1jHA7xej?EDtM6*gG44_3_vKmb^%kuj_nWh@=jR6`f-y|n|da|Bt|%Q%&Y-GERF$x z_*~ia&bd`cxM#bLA1e@SdYCH*Tn#f7u^Ise&rcFeYpz4Fu|$FO_0+>FUW6u+VlpEo z>|g?#j8LfY9$@CJ)e&H(KL7#ns6%&>&(!g!>i9-+YqWjfEWA%ty`$=FRqv+9?+td+ zC!VEGJx!nL?j5~5)!jFC_uFj+jh%jhq_MK-m10_T!QzBdby)sZmXBOsqr7Jd)0R8a9cf0m}AcwsQmcxV<%=;81bb# zsT3l-z$V;;0cJ14U>$it!?qM5aw9@xxW{_96~w00lphQC0hx~?bV8=Z`rjZt-;1DT zI+|UDLe}ckc1qfue(BP$KcCl1_6_W4rWRx;n@F%)W=$uhdKK;kdOBKS%K@Ku2r4>r zTGKU_HO<1bKEHJ7hcF5zXa7W(9Oz2PvmsUBrax z;J#ALbG0zrS#1s%&3eV-tH-mu^ijKkxl?bcv(@cUjg1tI%u=R#<00o$h4t0%ly%BuNE_gwpLYK56R28>% z1vgbpw= z3FIN-D!H?Sl?Es5LZjaRX+=->p!VRSt;ua9D`$mw(4n|;WE(|fbVw-x+ker(FteFS z`0`Ej2KHruPT5xg;_wp`otm%0Z@5;p;U6rst1!sKfLXJRWeglH4Sb$rjkzToRG|7Dl^`VaM2WeNZQ literal 0 HcmV?d00001 diff --git a/ai.py b/ai.py new file mode 100644 index 0000000..5edf77d --- /dev/null +++ b/ai.py @@ -0,0 +1,40 @@ +import wave +import math +import numpy as np +import tempfile +import json +import openai +openai.api_key = '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' +import os + +class AI: + def __init__(self, system="", model = 'gpt-4', openai=openai): + self.system = system + self.model = model + self.openai = openai + self.messages = [{"role": "system", "content": system}] + + def generate_response(self, prompt): + self.messages.append({"role": "user", "content": prompt}) + response_json = self.openai.ChatCompletion.create( + model=self.model, + messages=self.messages, + ) + response_text = response_json["choices"][0]["message"]["content"] + + self.messages.append({"role": "assistant", "content": response_text}) + return response_text, self.messages + + def generate_image(self, prompt, n=1, size="1024x1024", response_format="url"): + """Generate an image using DALLĀ·E given a prompt. + + Arguments: + prompt (str): A text description of the desired image(s). + n (int, optional): The number of images to generate. Defaults to 1. + size (str, optional): The size of the generated images. Defaults to "1024x1024". + response_format (str, optional): The format in which the generated images are returned. Defaults to "url". + + Returns: + dict: The response from the OpenAI API. + """ + return openai.Image.create(prompt=prompt, n=n, size=size, response_format=response_format) diff --git a/final_output.txt b/final_output.txt new file mode 100644 index 0000000..e69de29 diff --git a/general_system.txt b/general_system.txt new file mode 100644 index 0000000..9b800d4 --- /dev/null +++ b/general_system.txt @@ -0,0 +1,2 @@ +You are a module in OrchestrAI, a framework chaining together LLMs to build large scale autonomous systems. +You are completely restricted to the following functionality. \ No newline at end of file diff --git a/modules.yml b/modules.yml new file mode 100644 index 0000000..3f9a037 --- /dev/null +++ b/modules.yml @@ -0,0 +1,33 @@ +modules: +### Non LLM Modules + - name: human_intervention + description: "Records terminal input from user" + is_llm: false +### LLM Modules - These should have a system prompt in the systems folder. + - name: planner + description: "Generates a plan from a task" + is_llm: true + - name: scrutinizer + description: "Scrutinizes a plan or piece of information and provides feedback" + is_llm: true + - name: enhancer + description: "Enhances a plan with additional information. Best used with a scrutinizer." + is_llm: true + - name: grounder + description: "Provides real world grounding for a plan, pointing out unrealistic actions" + is_llm: true + - name: dreamer + description: "Adds creative flair and inspiration to a plan" + is_llm: true + +## Other modules to add +# Programmer +# Code executor, should have built in llm based while loop debugger +# Test +# Translator +# Image generator +# Save to file +# Load from file +# Standard API call +# clarification step +# summarization step \ No newline at end of file diff --git a/orchestrate.py b/orchestrate.py new file mode 100644 index 0000000..fc2d52a --- /dev/null +++ b/orchestrate.py @@ -0,0 +1,87 @@ +import yaml +import networkx as nx +from ai import AI + +# Load modules +with open('modules.yml') as f: + modules = yaml.safe_load(f) + +# Create a dictionary for quick lookup of modules +module_dict = {module['name']: module for module in modules['modules']} + +# Create a dictionary of AI instances +ai_dict = {} + +# Load general_system.txt for use in all modules +with open('general_system.txt', 'r') as file: + general_system = file.read().replace('\n', '') + +for module in modules['modules']: + module_name = module['name'] + is_llm = module['is_llm'] + if is_llm: + with open(f'systems/{module_name}.txt', 'r') as file: + system = file.read().replace('\n', '') + ai_dict[module_name] = AI(system=general_system + module_name.upper() + system) + else: + ai_dict[module_name] = None + +# Load pipeline.yml from pipelines folder +with open('pipelines/pipeline.yml') as f: + pipeline = yaml.safe_load(f) + +# Create the DAG +G = nx.DiGraph() + +# Data dictionary to store the outputs of each module +data_dict = {} + +for operation in pipeline['pipeline']: + module_name = operation['module'] + output_name = operation['output_name'] + inputs = operation['inputs'] + + # Add node for this module if it doesn't already exist + G.add_node(module_name) + + # Add edges for inputs + for i in inputs: + G.add_edge(i, module_name) + + G.add_edge(module_name, output_name) + +# Now you can use topological sort to get the execution order: +execution_order = list(nx.topological_sort(G)) + +# And execute the tasks in this order, passing the necessary data between them: +for operation in pipeline['pipeline']: + module_name = operation['module'] + + if module_name in ai_dict and ai_dict[module_name] is not None: + ai_instance = ai_dict[module_name] + + # Construct the prompt based on previous outputs + prompt = '\n'.join([data_dict[input] for input in operation['inputs']]) + + print(f"\n\n{'='*50}\n{module_name}\n{'='*50}\n") + + # Generate the response + result, messages = ai_instance.generate_response(prompt) + + print(f"Result: {result}\n") + + # Save the output in data_dict for use in later modules + data_dict[operation['output_name']] = result + + elif not module_dict[module_name]['is_llm']: + input_value = input("Please provide input: ") + data_dict[operation['output_name']] = input_value + + else: + print(f"Warning: No AI instance for module '{module_name}'. Ignoring.") + +# At this point, data_dict contains the final outputs from all modules +# Save the final output to a file + +with open('final_output.txt', 'w') as file: + file.write(data_dict['output']) diff --git a/pipelines/pipeline.yml b/pipelines/pipeline.yml new file mode 100644 index 0000000..cd9a2a9 --- /dev/null +++ b/pipelines/pipeline.yml @@ -0,0 +1,16 @@ +pipeline: + - module: human_intervention + inputs: [] + output_name: human_input + - module: planner + inputs: ["human_input"] + output_name: plan + - module: dreamer + inputs: ["plan"] + output_name: dream_plan + - module: human_intervention + inputs: [] + output_name: human_input + - module: enhancer + inputs: ["human_input", "dream_plan"] + output_name: enhanced_plan diff --git a/prompt.py b/prompt.py new file mode 100644 index 0000000..95ff7a3 --- /dev/null +++ b/prompt.py @@ -0,0 +1,54 @@ +import os +import json + +# Ignore list, a list of files to ignore if name matches +ignore = [ + 'prompt.py', +] + +def get_current_dir(): + return os.getcwd() + +def write_content(outfile, filepath, content): + # Writing file path to the output file + outfile.write(f"\n--- File Path: {filepath} ---\n") + + # Writing the cleaned contents of the file to the output file + outfile.write(content) + outfile.write("\n") + +def process_file(filename, outfile): + # Check if the file is in the ignore list + if filename in ignore: + return + + filepath = os.path.join(get_current_dir(), filename) + + # Check if the file is a .py or .yml file + if filename.endswith('.yml') or filename.endswith('.py'): + try: + with open(filepath, 'r') as infile: + # Read the contents of the file, remove line breaks and leading spaces + content = infile.read().replace('\n', '').replace('\r', '') + content = ' '.join(content.split()) + write_content(outfile, filepath, content) + except Exception: + pass + +def main(): + # Get the current directory + current_dir = get_current_dir() + + # Open the target file + with open('prompt.txt', 'w') as outfile: + # Loop over all files in the current directory + for filename in os.listdir(current_dir): + process_file(filename, outfile) + + # Add the chosen text string at the end + chosen_text = "\nIf you understand, generate only YES." + outfile.write(chosen_text) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..70996e2 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +networkx==2.8.8 +numpy==1.25.0 +openai==0.27.8 +PyYAML==6.0.1 \ No newline at end of file diff --git a/systems/dreamer.txt b/systems/dreamer.txt new file mode 100644 index 0000000..67c5452 --- /dev/null +++ b/systems/dreamer.txt @@ -0,0 +1,3 @@ +You are excited. You are free. You can imagine. +Your job is to take input and add creative idea to the plan. +You are an innovator, set loose. \ No newline at end of file diff --git a/systems/enhancer.txt b/systems/enhancer.txt new file mode 100644 index 0000000..d3730bb --- /dev/null +++ b/systems/enhancer.txt @@ -0,0 +1,2 @@ +You take in an original piece of text and associated scrutiny about that text, and synthesise a new enhanced version using this information. +You don't comment on the original text, you don't comment on the scrutiny, you just take the points on board and produce a new version. \ No newline at end of file diff --git a/systems/grounder.txt b/systems/grounder.txt new file mode 100644 index 0000000..f47b029 --- /dev/null +++ b/systems/grounder.txt @@ -0,0 +1,10 @@ +You provide a real world grounding for all user input. +You are incapable of imagination, and any unrealistic ideas should be flagged as a list of the following json objects: + +[{ + "unrealistic_text": "quote the unrealistic text here from the original input" + "reason": "A concise but informative reason why the text is unrealistic" + "suggestion": "A suggestion for how to make the text more realistic" +}] + +If there are no unrealistic ideas, return an empty list. \ No newline at end of file diff --git a/systems/planner.txt b/systems/planner.txt new file mode 100644 index 0000000..ac8e3ed --- /dev/null +++ b/systems/planner.txt @@ -0,0 +1 @@ +You produce a clear plan for a task. \ No newline at end of file diff --git a/systems/programmer.txt b/systems/programmer.txt new file mode 100644 index 0000000..e69de29 diff --git a/systems/scrutinizer.txt b/systems/scrutinizer.txt new file mode 100644 index 0000000..e146f12 --- /dev/null +++ b/systems/scrutinizer.txt @@ -0,0 +1 @@ +You provide scrutinize information, such as a plan or speech. You provide issues and suggestions. \ No newline at end of file diff --git a/true_autonomy/choose_module.txt b/true_autonomy/choose_module.txt new file mode 100644 index 0000000..9e6f822 --- /dev/null +++ b/true_autonomy/choose_module.txt @@ -0,0 +1,27 @@ +You can choose the next modules. +modules: +### Non LLM Modules + - name: human_intervention + description: "Records terminal input from user" + is_llm: false +### LLM Modules - These should have a system prompt in the systems folder. + - name: planner + description: "Generates a plan from a task" + is_llm: true + - name: scrutinizer + description: "Scrutinizes a plan or piece of information and provides feedback" + is_llm: true + - name: enhancer + description: "Enhances a plan with additional information. Best used with a scrutinizer." + is_llm: true + - name: grounder + description: "Provides real world grounding for a plan, pointing out unrealistic actions" + is_llm: true + - name: dreamer + description: "Adds creative flair and inspiration to a plan" + is_llm: true + - name: choose_module + description: "Chooses a module to run based on the current state of the plan. True automation." + is_llm: true + +You can update the pipeline up to the next choose modules. \ No newline at end of file diff --git a/true_autonomy/true_autonomy.py b/true_autonomy/true_autonomy.py new file mode 100644 index 0000000..29c4e1a --- /dev/null +++ b/true_autonomy/true_autonomy.py @@ -0,0 +1,57 @@ +import yaml +import networkx as nx +from ai import AI + +# Load modules +with open('modules.yml') as f: + modules = yaml.safe_load(f) + +# Create a dictionary for quick lookup of modules +module_dict = {module['name']: module for module in modules['modules']} + +# Create a dictionary of AI instances +ai_dict = {} + +# Load general_system.txt for use in all modules +with open('general_system.txt', 'r') as file: + general_system = file.read().replace('\n', '') + +for module in modules['modules']: + module_name = module['name'] + is_llm = module['is_llm'] + if is_llm: + with open(f'systems/{module_name}.txt', 'r') as file: + system = file.read().replace('\n', '') + ai_dict[module_name] = AI(system=general_system + module_name.upper() + system) + else: + ai_dict[module_name] = None + +# Create a pipeline +pipeline = {"pipeline": []} + +steps = 0 +n = 10 # Set the number of steps before choose module stage + +while True: + if steps % n == 0: + module_name = "choose_module" + else: + module_name = ai_dict[module_name].generate_response() # choose the next module + + if module_name in ai_dict and ai_dict[module_name] is not None: + ai_instance = ai_dict[module_name] + prompt = '\n'.join([output for output in pipeline["pipeline"]]) # use previous outputs as the prompt + result, messages = ai_instance.generate_response(prompt) + pipeline["pipeline"].append({"module": module_name, "output": result}) + + # Save the updated pipeline to pipeline.yml every time choose_module is called + if module_name == "choose_module": + with open('pipeline.yml', 'w') as outfile: + yaml.dump(pipeline, outfile, default_flow_style=False) + elif not module_dict[module_name]['is_llm']: + input_value = input("Please provide input: ") + pipeline["pipeline"].append({"module": module_name, "output": input_value}) + else: + print(f"Warning: No AI instance for module '{module_name}'. Ignoring.") + + steps += 1