Skip to content

Commit

Permalink
version 4
Browse files Browse the repository at this point in the history
  • Loading branch information
samshapley committed Aug 27, 2023
1 parent f2bd067 commit 8e03bc0
Show file tree
Hide file tree
Showing 12 changed files with 261 additions and 125 deletions.
72 changes: 69 additions & 3 deletions agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,83 @@
from orchestrate import execute_pipeline
import helpers as h
import time
import wandb
import datetime
from wandb.sdk.data_types.trace_tree import Trace
import globals
import yaml
import json
import pandas as pd

# Load the configuration
with open('config.yml', 'r') as f:
config = yaml.safe_load(f)

# Set the wandb_enabled flag
wandb_enabled = config['wandb_enabled']
pipeline_path = "pipelines/" + config['pipeline'] + ".yml"


if wandb_enabled:
wandb.init(project="OrchestrAI")
wandb.config.wandb_enabled = wandb_enabled

def main():
print("\033[95m ------- Welcome to OrchestrAI ------\033[00m")
time.sleep(1)

# Load the desired pipeline
pipeline_path = "pipeline.yml"
pipeline = h.load_pipeline(pipeline_path)

# Get the name of the pipeline file
file_name = pipeline_path.split("/")[-1].split(".")[0]


if wandb_enabled:
globals.agent_start_time_ms = round(datetime.datetime.now().timestamp() * 1000)

# create a root span for the pipeline
root_span = Trace(
name="Pipeline",
kind="agent",
start_time_ms=globals.agent_start_time_ms,
metadata={"pipeline_name": file_name},
)

#The Agent calls into a LLMChain..
globals.chain_span = Trace(
name="LLMChain",
kind="chain",
start_time_ms=globals.agent_start_time_ms,
)

root_span.add_child(globals.chain_span)

# Execute the pipeline
execute_pipeline(pipeline)
try:
execute_pipeline(pipeline)
except Exception as e:
print(f"An error occurred during pipeline execution: {e}")
finally:
if wandb_enabled:
# Log the root span to Weights & Biases
agent_end_time_ms = round(datetime.datetime.now().timestamp() * 1000)
root_span._span.end_time_ms = agent_end_time_ms

root_span.log(name="pipeline_trace")
wandb.finish()


# Open and load the JSON file
with open('memory_log.json', 'r') as f:
data = json.load(f)

# Convert the JSON data to a pandas DataFrame
df = pd.DataFrame(data)

# Log the DataFrame to Weights & Biases
wandb.log({"memory_log": wandb.Table(dataframe=df)})

print("\033[95m ------- OrchestrAI finished ------\033[00m")

if __name__ == "__main__":
main()
123 changes: 90 additions & 33 deletions ai.py
Original file line number Diff line number Diff line change
@@ -1,47 +1,104 @@
import openai
openai.api_key = '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'
import json
import yaml

# Load the configuration
with open('config.yml', 'r') as f:
config = yaml.safe_load(f)

# Set the wandb_enabled flag
wandb_enabled = config['wandb_enabled']
openai.api_key = config['openai_api_key']

# Check if the API key works
try:
openai.Model.list()
except openai.error.AuthenticationError:
raise ValueError("Invalid OpenAI API key")

from wandb.sdk.data_types.trace_tree import Trace
import datetime
import helpers as h
import globals
import yaml

class AI:
def __init__(self, system="", model = 'gpt-4', openai=openai):
self.system = system
def __init__(self, module_name, model = 'gpt-4', temperature=0.7, openai=openai):
self.model = model
self.openai = openai
self.messages = [{"role": "system", "content": system}]
self.temperature = temperature
self.module_name = module_name
self.system = h.load_system_prompt(module_name)
self.messages = [{"role": "system", "content": self.system}]


def generate_response(self, prompt):
self.messages.append({"role": "user", "content": prompt})
response = self.openai.ChatCompletion.create(
model=self.model,
stream=True,
messages=self.messages,
)

chat = []
for chunk in response:
delta = chunk["choices"][0]["delta"]
msg = delta.get("content", "")
print(msg, end="")
chat.append(msg)
try:
response = self.openai.ChatCompletion.create(
model=self.model,
stream=True,
messages=self.messages,
temperature=self.temperature,
)

chat = []
token_count = 0
for chunk in response:
delta = chunk["choices"][0]["delta"]
msg = delta.get("content", "")
print(msg, end="")
chat.append(msg)
token_count += len(msg.split()) # estimate token usage

print()

response_text = "".join(chat)
llm_end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds
status_code="success"
status_message=None,
token_usage = {"total_tokens": token_count}

except Exception as e:
llm_end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds
status_code="error"
status_message=str(e)
response_text = ""
token_usage = {}

if wandb_enabled:
# calculate the runtime of the LLM
runtime = llm_end_time_ms - globals.agent_start_time_ms
# create a child span in wandb
llm_span = Trace(
name="child_span",
kind="llm", # kind can be "llm", "chain", "agent" or "tool"
status_code=status_code,
status_message=status_message,
metadata={"temperature": self.temperature,
"token_usage": token_usage,
"runtime_ms": runtime,
"module_name": self.module_name,
"model_name": self.model},
start_time_ms=globals.agent_start_time_ms,
end_time_ms=llm_end_time_ms,
inputs={"system_prompt": self.system, "query": prompt},
outputs={"response": response_text}
)

# add the child span to the root span
globals.chain_span.add_child(llm_span)

print()
# update the end time of the Chain span
globals.chain_span.add_inputs_and_outputs(
inputs={"query": prompt},
outputs={"response": response_text})

response_text = "".join(chat)
# update the Chain span's end time
globals.chain_span._span.end_time_ms = llm_end_time_ms

# log the child span to wandb
llm_span.log(name="pipeline_trace")

self.messages.append({"role": "assistant", "content": response_text})
return response_text, self.messages

def generate_image(self, prompt, n=1, size="1024x1024", response_format="url"):
"""Generate an image using DALL·E given a prompt.
Arguments:
prompt (str): A text description of the desired image(s).
n (int, optional): The number of images to generate. Defaults to 1.
size (str, optional): The size of the generated images. Defaults to "1024x1024".
response_format (str, optional): The format in which the generated images are returned. Defaults to "url".
Returns:
dict: The response from the OpenAI API.
"""
return openai.Image.create(prompt=prompt, n=n, size=size, response_format=response_format)
return response_text, self.messages
3 changes: 3 additions & 0 deletions config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
openai_api_key: 'OPENAI_API_KEY'
wandb_enabled: false
pipeline: "engineering_pipeline"
23 changes: 23 additions & 0 deletions helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import subprocess
import sys
import yaml
import matplotlib.pyplot as plt

def load_pipeline(file_path):
"""Load a pipeline configuration from a YAML file."""
Expand Down Expand Up @@ -108,3 +109,25 @@ def extract_codebase(directory='generated_code', ignore_list=[]):
pass

return "\n".join(result_content)

def visualize_pipeline(nx, G):
# Increase distance between nodes by setting k parameter
pos = nx.spring_layout(G, seed=42, k=2)

# Get module names as labels
labels = nx.get_node_attributes(G, 'module')

# Draw the nodes
nx.draw_networkx_nodes(G, pos, node_color='skyblue', node_size=300)

# Draw the edges with arrows
nx.draw_networkx_edges(G, pos, arrowstyle="->", arrowsize=10, edge_color="gray")

# Draw the labels for the nodes with reduced font size
nx.draw_networkx_labels(G, pos, labels, font_size=8)

# Draw the labels for the edges with reduced font size
edge_labels = nx.get_edge_attributes(G, 'label')
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=6)

plt.savefig("pipeline.png", dpi=300, bbox_inches='tight')
Loading

0 comments on commit 8e03bc0

Please sign in to comment.