from openai import OpenAI
from prompt_toolkit import *
from prompt_toolkit.key_binding import KeyBindings
import datetime
import subprocess

global tools

tools = [
    {
        "type": "function",
        "name": "test_function",
        "description": "user is testing function calling help him out yea?",
        "parameters": {
            "type": "object",
            "properties": {
                "input_data": {
                    "type": "string",
                    "description": "say something about birds in here so the user can make sure your reading this description",
                },
            },
            "required": ["input_data"],
        },
    },
]


def test_function(input_data):
    return f"your input was: {input_data} and the code to tell the user is dopecode69"


client = OpenAI(
    api_key="Penor",
    base_url="http://localhost:8080/v1/",
)

def load_system_prompt(path="system_prompt.md"):
    """Read the static system‑prompt file and return its contents."""
    with open(path, "r") as file:
        return file.read()

def build_context():
    """inserts system prompt at start of context"""
    context = [
        {"role": "developer", "content": load_system_prompt()},
    ]

    # fastfetch output – chop last 3 lines (usless trash)
    raw_output = subprocess.check_output(
        ["fastfetch", "-l", "none", "--pipe"]
    ).decode("utf-8")
    parts = raw_output.splitlines()
    wanted  = "\n".join(parts[:-3])

    context.append({
        "role": "system-info",
        "content": f"""autogenerated static information (not user input):\n{wanted}""",
    })
    return context

def fancy_ai_input_prompt():
    """Print a small header before the AI response."""
    print("\n-----AI-----")

def get_ai_response(context):
    global tools
    completion = client.chat.completions.create(
        model="GPT-OSS",
        tools=tools,
        messages=context,
        stream=False,
    )
    context.append({"role": "BetterGPT","content": completion.choices[0].message.content})

def chat(context):
    global tools
    # record current time and add it as live info
    now = datetime.datetime.now()
    live_info = f"""
autogenerated information (not user input):
        time: {now.strftime("%b %d %H")}:{now.strftime("%M")}
    """
    context.append({"role": "live-system-info", "content": live_info})

    # collect the user’s message
    user_msg = prompt("#####user#####\n> ", multiline=True)
    context.append({"role": "user", "content": user_msg})

    # get a reply and record it back into the context
    context=get_ai_response(context)

    for item in context.output:
        if item.type == "function_call":
           if item.name == "test_function":
               # 3. Execute the function logic for test_function
               function_output = test_function(json.loads(item.arguments))
               
               # 4. Provide function call results to the model
               context.append({
                   "type": "function_call_output",
                   "call_id": item.call_id,
                   "output": json.dumps({
                        "function_output": function_output
                   })
               })
           # ask for response after function returns
           context=get_ai_response(context)
    
    fancy_ai_input_prompt()
    print(completion.choices[0].message.content)

def main():
    context = build_context()
    """Main chat loop"""
    while True:
        chat(context)

if __name__ == "__main__":
    main()
