Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
drewbloom committed Nov 10, 2024
0 parents commit fcfd7f8
Show file tree
Hide file tree
Showing 28 changed files with 98,786 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
/env
.env
pgvectorAndPostgres.txt
Binary file added __pycache__/utils.cpython-312.pyc
Binary file not shown.
Empty file added agents/__init__.py
Empty file.
Binary file added agents/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added agents/__pycache__/qa_agent.cpython-312.pyc
Binary file not shown.
Binary file not shown.
171 changes: 171 additions & 0 deletions agents/document_construction_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
from openai import OpenAI
import sqlite3
from utils import SQLQuery, log_action, load_api_key

client = OpenAI()
client.api_key = load_api_key()

class DocumentConstructionAgent:
def __init__(self):
self.agent_id = 'document_construction_agent_id'
self.system_prompt = (
"You are a Document Construction Agent. Create a complete document from a template using provided interview data as placeholders."
)
self.few_shot_examples = [
{
"role": "user",
"content": "Create document from template with data: {'court_name': 'Supreme Court', 'date_of_creation': '2023-01-01'}"
},
{
"role": "assistant",
"content": "Document constructed with given data: [Full text with placeholders filled]."
}
]

### Begin adding dynamic database interpretation to the constructor agent:
# 1. Allow it to scan the database to understand what's in it
def get_database_info(self, token):
# Assuming the token is used to get the db_path (for now, directly using it)
db_path = f"./database/{token}" # Assuming the token is the filename or part of it

# Using the context manager to handle the connection and automatically close it
with SQLQuery(db_path) as query:
# Get the database structure
database_structure = query.get_db_structure()
# can add request to get triggers and views if necessary later

return {
"database_structure": database_structure,
# Leaving other options out for now
# "triggers": triggers,
# "views": views
}



def _fetch_template(self, document_type):
# Connect to the database and get the latest document of a certain type
conn = sqlite3.connect('mock_law_documents.db')
cursor = conn.cursor()
cursor.execute('''
SELECT * FROM documents WHERE document_type=? ORDER BY date_of_creation DESC LIMIT 1
''', (document_type,))
document = cursor.fetchone()
conn.close()
return document

def interview_for_metadata(self, user_agent):
# Program questions to gather metadata from the user via UserInteractionAgent
questions = [
"What is the court name?",
"Where is the court located?",
"Who are the plaintiffs?",
"Who are the defendants?",
"Who are the attorneys?",
"What is the date of creation?"
]

metadata = {}
for question in questions:
response = user_agent.handle_user_input(user_agent.handle_assistant_input(question))
metadata[question] = response

return metadata

def construct_document(self, document_type, user_agent):
template = self._fetch_template(document_type)
if template is None:
log_action(f"No template found for document_type: {document_type}")
return "No template found for the specified document type."

# Perform the interview to fill in the metadata
metadata = self.interview_for_metadata(user_agent)

# Replace placeholders in the full_text with fetched metadata
full_text = template[-1] # Assuming last field is full_text
for key, value in metadata.items():
full_text = full_text.replace(f"{{{key}}}", value)

# Return updated document (could be saved to file or returned as plain text)
return full_text

def get_metadata_schema(self):
# Returns a JSON schema for document metadata fields
return {
"court_name": None,
"court_location": None,
"plaintiffs": None,
"defendants": None,
"attorneys": None,
"date_of_creation": None
}

# Proposed usage of a JSON object return for metadata, not tested
"""
def construct_document_with_ai(self, metadata):
# Use AI to populate a template with the filled metadata
filled_document = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": f"Create document from template with data: {metadata}"}
]
)
result = filled_document.choices[0].message['content']
log_action(f"Constructed document with AI: {result[:100]}...")
return result
"""


def construct_document_with_ai(self, document_type, user_agent):
template = self._fetch_template(document_type)
if template is None:
return "No template found for the specified document type."

full_text = template[-1] # Assuming last field is full_text

# Perform the interview to fill in the metadata
metadata = self.interview_for_metadata(user_agent)

# Using AI to generate document based on filled fields
filled_document = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": self.system_prompt},
*self.few_shot_examples,
{"role": "user", "content": f"Create document from template with data: {metadata}"}
]
)

# Returning generated document content
result = filled_document.choices[0].message.content
log_action(f"Constructed document with AI: {result[:100]}...")
return result

def construct_document_with_chunking(self, document_type, metadata):
# Chunking logic - not yet used in the main function
template = self._fetch_template(document_type)
if template is None:
return "No template found for the specified document type."

full_text = template[-1]
chunks = self.chunk_text(full_text)
completed_document = ""

for chunk in chunks:
filled_chunk = OpenAI().chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "Fill placeholders with provided metadata where appropriate"},
{"role": "user", "content": f"Fill chunk with data if data belongs in open fields. If you don't think an open field has a match in the provided data, leave it blank for a return to the user later. Data: {metadata}. Chunk: {chunk}"}
]
)
completed_document += filled_chunk.choices[0].message.content

log_action(f"Constructed document with chunking: {completed_document[:100]}...")
return completed_document

def chunk_text(self, text, chunk_size=1024):
# Basic chunking function to split text for processing in parts
return [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]
33 changes: 33 additions & 0 deletions agents/knowledge_search_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from openai import OpenAI
from utils import log_action, load_api_key

client = OpenAI()
client.api_key = load_api_key()

class KnowledgeSearchAgent:
def __init__(self):
self.agent_id = 'knowledge_search_agent_id'
self.system_prompt = "You are a Knowledge Search Agent. Your task is to execute keyword and semantic searches to retrieve relevant legal documents."
self.few_shot_examples = [
{"role": "user", "content": "Perform a keyword search for 'due process'."},
{"role": "assistant", "content": "Executing keyword search and sending results to QA agent for validation."},
{"role": "user", "content": "Conduct a semantic search for precedents about constitutional rights violations during an arrest."},
{"role": "assistant", "content": "Performing semantic search and directing outputs to QA agent for consistency checking."}
]

def perform_search(self, query):
log_action(f"Performing search for query: {query}")
responses = []
for i in range(3): # Execute the search three times
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": self.system_prompt},
*self.few_shot_examples,
{"role": "user", "content": query}
]
)
result = response.choices[0].message.content
log_action(f"Search result {i+1}: {result}")
responses.append(result)
return responses
49 changes: 49 additions & 0 deletions agents/qa_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
from openai import OpenAI
from utils import log_action, load_api_key

client = OpenAI()
client.api_key = load_api_key()

class QA_Agent:
def __init__(self):
self.agent_id = 'qa_agent_id'
self.system_prompt = (
"Act as a QA Agent. Analyze search output for consistency and quality. "
"Your task is to assess if multiple versions of the output are talking about the same topic. "
"Confirm consistency or request clarification if needed."
)
self.few_shot_examples = [
{
"role": "user",
"content": "Validate search outputs: ['Result A', 'Result A slightly altered', 'Result A with minor changes']"
},
{
"role": "assistant",
"content": "All results are speaking about the same topic. Consistency confirmed."
},
{
"role": "user",
"content": "Validate search outputs: ['Result A', 'Different Result B', 'Another Different Result C']"
},
{
"role": "assistant",
"content": "The outputs vary. Unable to confirm consistency. Please review."
}
]

def verify_output_with_ai(self, task_outputs):
log_action(f"Verifying outputs with AI: {task_outputs}")

response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": self.system_prompt},
*self.few_shot_examples,
{"role": "user", "content": f"Validate search outputs: {task_outputs}"}
]
)

result = response.choices[0].message.content
log_action(f"QA Validation Result: {result}")

return result
63 changes: 63 additions & 0 deletions agents/user_interaction_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
from openai import OpenAI
from utils import log_action, load_api_key

client = OpenAI()
client.api_key = load_api_key()

class UserInteractionAgent:
def __init__(self):
self.agent_id = 'user_agent_id'
self.system_prompt = (
"You are a User Interaction Agent. Your role is to manage user queries, "
"direct tasks to appropriate agents, and maintain a seamless user experience."
" Use exact keywords 'search' to initiate a search and 'construct' to initiate document construction."
)
self.few_shot_examples = [
{"role": "user", "content": "I need to find information on due process."},
{"role": "assistant", "content": "Using 'search' to find information on due process."},
{"role": "user", "content": "Can you help me create a non-disclosure agreement?"},
{"role": "assistant", "content": "Using 'construct' to create a non-disclosure agreement."}
]
# Add an example for returning an invalid search result from QA agent for user input / reattempt
self.few_shot_examples.extend([
{"role": "user", "content": "The QA agent found inconsistencies in the search results."},
{"role": "assistant", "content": "Logging inconsistency. Please confirm which response best suits your needs, or provide new details for another attempt."}
])
self.conversation_history = []

def complete_metadata(self, user_input, metadata_schema):
for field in metadata_schema.keys():
if metadata_schema[field] is None:
# Assume user_input could already fill some fields in metadata_schema
# More complex logic could parse user_input or ask questions
metadata_schema[field] = f"Sample data for {field}" # Placeholder logic

# Example of pseudo-code logic:
# if field in user_input: metadata_schema[field] = extract_from_input(user_input, field)

return metadata_schema

def handle_user_input(self, user_input):
log_action(f"Received user input: {user_input}")
self.conversation_history.append({"role": "user", "content": user_input})

response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": self.system_prompt},
*self.few_shot_examples,
] + self.conversation_history
)

assistant_response = response.choices[0].message.content
log_action(f"Response from User Interaction Agent: {assistant_response}")
self.conversation_history.append({"role": "assistant", "content": assistant_response})

return assistant_response

def handle_assistant_input(self, assistant_input):
log_action(f"Received assistant input: {assistant_input}")
self.conversation_history.append({"role": "assistant", "content": assistant_input})

print(f"Request from assistant: {assistant_input}")
return input("Your response:")
Loading

0 comments on commit fcfd7f8

Please sign in to comment.