+
+
+ )
+}
\ No newline at end of file
diff --git a/openai-concurrent-functions/app.py b/openai-concurrent-functions/app.py
index 56fe74e15..0cb227f20 100644
--- a/openai-concurrent-functions/app.py
+++ b/openai-concurrent-functions/app.py
@@ -62,19 +62,19 @@ async def run_conversation(message: cl.Message):
message_history.append({"role": "user", "content": message.content})
# Step 1: send the conversation and available functions to the model
- msg = cl.Message(author="Assistant", content="")
- await msg.send()
+
response = await client.chat.completions.create(
- model="gpt-3.5-turbo-1106",
+ model="gpt-4o-mini",
messages=message_history,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
)
response_message = response.choices[0].message
- msg.content = response_message.content or ""
- await msg.update()
+ if response_message.content:
+ msg = cl.Message(author="Assistant", content=response_message.content)
+ await msg.send()
tool_calls = response_message.tool_calls
# Step 2: check if the model wanted to call a function
@@ -112,7 +112,7 @@ async def call_function(tool_call):
# Extend conversation with all function responses
message_history.extend(function_responses)
second_response = await client.chat.completions.create(
- model="gpt-3.5-turbo-1106",
+ model="gpt-4o-mini",
messages=message_history,
) # get a new response from the model where it can see the function response
second_message = second_response.choices[0].message
diff --git a/openai-concurrent-streaming/app.py b/openai-concurrent-streaming/app.py
index 49ed37a30..13fa97c95 100644
--- a/openai-concurrent-streaming/app.py
+++ b/openai-concurrent-streaming/app.py
@@ -7,7 +7,7 @@
openai_client = AsyncClient(api_key=os.environ.get("OPENAI_API_KEY"))
-model_name = "gpt-3.5-turbo"
+model_name = "gpt-4o-mini"
settings = {
"temperature": 0.3,
"max_tokens": 500,
diff --git a/openinterpreter/.env.example b/openinterpreter/.env.example
deleted file mode 100644
index 9847a1df1..000000000
--- a/openinterpreter/.env.example
+++ /dev/null
@@ -1 +0,0 @@
-OPENAI_API_KEY=
\ No newline at end of file
diff --git a/openinterpreter/README.md b/openinterpreter/README.md
deleted file mode 100644
index 3e060a463..000000000
--- a/openinterpreter/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
----
-title: 'OpenInterpreter x Chainlit'
-tags: ['openinterpreter', 'chainlit']
----
-
-# Welcome to OpenInterpreter x Chainlit! 🚀🤖
-
-**OpenInterpreter**: https://github.com/KillianLucas/open-interpreter/
-
-**Chainlit**: https://github.com/chainlit/chainlit
-
-Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.
-
-
-This Chainlit cookbook example allows you to do just that from a web app, with additional UI features such as file uploads, downloads, Images and other UI Elements.
-
-This is an example combining both to provide a code interpreter-like app.
-
-## High-Level Description
-
-The integration allows users to:
-1. Select an LLM of their choice.
-2. Optionally upload files to be used by the LLM.
-3. Interact with the LLM through a chat interface, sending and receiving messages that can include code execution requests.
-
-## Quickstart
-
-To get started with this integration, follow these steps:
-
-1. **Install OpenInterpreter**: Follow the installation instructions on the [OpenInterpreter GitHub page](https://github.com/KillianLucas/open-interpreter/).
-
-2. **Install Chainlit**: Chainlit can be installed via pip:
-```shell
-pip install chainlit
-```
-
-3. **Set Up Environment Variables**: Ensure that your OpenAI API key is set as an environment variable:
-```shell
-export OPENAI_API_KEY='your_api_key_here'
-```
-
-4. **Run the Application**: Navigate to the directory containing the `app.py` file and run:
-```shell
-chainlit run app.py
-```
-
-5. **Interact with the Web App**: Open your web browser to the address provided by Chainlit (usually `http://localhost:8501`) and start interacting with the application.
-
-## Code Definitions
-
-- `CustomStdout`: A class that overrides the standard output to redirect print statements to the Chainlit UI.
-- `CustomStdin`: A class that overrides the standard input to receive input from the Chainlit UI.
-- `@cl.on_chat_start`: A decorator that initializes the custom standard input/output classes and sets up the chat settings.
-- `@cl.on_settings_update`: A decorator that updates the selected LLM model based on user input.
-- `@cl.on_message`: A decorator that handles incoming messages, processes file uploads, and interacts with the OpenInterpreter.
-
-[Demo](openinterpreter/openinterpreter-chainlit.mp4)
-
----
-
-Enjoy the power of LLMs in your browser with OpenInterpreter x Chainlit!
\ No newline at end of file
diff --git a/openinterpreter/app.py b/openinterpreter/app.py
deleted file mode 100644
index ef8e29b56..000000000
--- a/openinterpreter/app.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import interpreter
-import chainlit as cl
-from chainlit.input_widget import Select
-
-import sys, os
-
-interpreter.api_key = os.getenv("OPENAI_API_KEY")
-# interpreter.debug_mode=True
-
-
-# 1. Custom StdOut class to output prints to Chainlit UI
-# 2. Custom StdIn class to receive input from Chainlit UI
-# WARNING: Do not write prints in there, otherwise infinite loop
-class CustomStdout:
- def __init__(self, original_stdout):
- self.original_stdout = original_stdout
-
- def write(self, data):
- # React to the data being written. For this example, I'm just printing to stderr.
- # language = ""
- # if interpreter.active_block and type(interpreter.active_block).__name__ == "CodeBlock":
- # if interpreter.active_block.language:
- # language = interpreter.active_block.language
- if data != "\n" and data != "":
- # cl.run_sync(cl.Message(content=data, language=language).send())
- cl.run_sync(cl.Message(content=data).send())
- # Write the data to the original stdout (so it still gets displayed)
- self.original_stdout.write(data)
-
- def flush(self):
- # If needed, you can also implement flush
- self.original_stdout.flush()
-
-
-class CustomStdin:
- def __init__(self, original_stdin):
- self.original_stdin = original_stdin
-
- def readline(self):
- response_from_ui = cl.run_sync(cl.AskUserMessage(content="").send())
- return str(response_from_ui["content"])
-
- def flush(self):
- self.original_stdin.flush()
-
-
-@cl.on_chat_start
-async def start():
- sys.stdout = CustomStdout(sys.__stdout__)
- sys.stdin = CustomStdin(sys.__stdin__)
- settings = await cl.ChatSettings(
- [
- Select(
- id="model",
- label="OpenAI - Model",
- values=["gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k"],
- initial_index=0,
- ),
- ]
- ).send()
- interpreter.model = settings["model"]
-
-
-@cl.on_settings_update
-async def setup_agent(settings):
- interpreter.model = settings["model"]
- await cl.Message(content=f"Chose OpenAI model {settings['model']}").send()
-
-
-@cl.on_message
-async def main(message: cl.Message):
- if message.elements:
- for element in message.elements:
- file_name = element.name
- content = element.content
- # If want to show content Content: {content.decode('utf-8')}\n\n
- await cl.Message(content=f"Uploaded file: {file_name}\n").send()
-
- # Save the file locally
- with open(file_name, "wb") as file:
- file.write(content)
- interpreter.load(
- [{"role": "assistant", "content": f"User uploaded file: {file_name}"}]
- )
- interpreter.chat(message.content)
diff --git a/openinterpreter/chainlit.md b/openinterpreter/chainlit.md
deleted file mode 100644
index 4a2a07342..000000000
--- a/openinterpreter/chainlit.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# Welcome to OpenInterpreter x Chainlit! 🚀🤖
-
-**OpenInterpreter**: https://github.com/KillianLucas/open-interpreter/
-
-**Chainlit**: https://github.com/chainlit/chainlit
-
-Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.
-
-
-This Chainlit cookbook example allows you to do just that from a web app, with additional UI features such as file uploads, downloads, Images and other UI Elements.
-
-This is an example combining both to provide a code interpreter-like app.
-
-## High-Level Description
-
-The integration allows users to:
-1. Select an LLM of their choice.
-2. Optionally upload files to be used by the LLM.
-3. Interact with the LLM through a chat interface, sending and receiving messages that can include code execution requests.
-
-## Quickstart
-
-To get started with this integration, follow these steps:
-
-1. **Install OpenInterpreter**: Follow the installation instructions on the [OpenInterpreter GitHub page](https://github.com/KillianLucas/open-interpreter/).
-
-2. **Install Chainlit**: Chainlit can be installed via pip:
-```shell
-pip install chainlit
-```
-
-3. **Set Up Environment Variables**: Ensure that your OpenAI API key is set as an environment variable:
-```shell
-export OPENAI_API_KEY='your_api_key_here'
-```
-
-4. **Run the Application**: Navigate to the directory containing the `app.py` file and run:
-```shell
-chainlit run app.py
-```
-
-5. **Interact with the Web App**: Open your web browser to the address provided by Chainlit (usually `http://localhost:8501`) and start interacting with the application.
-
-## Code Definitions
-
-- `CustomStdout`: A class that overrides the standard output to redirect print statements to the Chainlit UI.
-- `CustomStdin`: A class that overrides the standard input to receive input from the Chainlit UI.
-- `@cl.on_chat_start`: A decorator that initializes the custom standard input/output classes and sets up the chat settings.
-- `@cl.on_settings_update`: A decorator that updates the selected LLM model based on user input.
-- `@cl.on_message`: A decorator that handles incoming messages, processes file uploads, and interacts with the OpenInterpreter.
-
-[Demo](openinterpreter/openinterpreter-chainlit.mp4)
-
----
-
-Enjoy the power of LLMs in your browser with OpenInterpreter x Chainlit!
\ No newline at end of file
diff --git a/openinterpreter/openinterpreter-chainlit.mp4 b/openinterpreter/openinterpreter-chainlit.mp4
deleted file mode 100644
index b51a0dd55..000000000
Binary files a/openinterpreter/openinterpreter-chainlit.mp4 and /dev/null differ
diff --git a/reflection-70b/app.py b/reflection-70b/app.py
deleted file mode 100644
index 020bba0c6..000000000
--- a/reflection-70b/app.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import requests
-import os
-import chainlit as cl
-from dotenv import load_dotenv
-
-# Load environment variables from .env file
-load_dotenv()
-
-
-def call_model(messages):
- # Model ID for production deployment
- model_id = os.getenv("MODEL_ID")
- # Read secrets from environment variables
- baseten_api_key = os.getenv("BASETEN_API_KEY")
- # Call model endpoint
- resp = requests.post(
- f"https://model-{model_id}.api.baseten.co/production/predict",
- headers={"Authorization": f"Api-Key {baseten_api_key}"},
- json={"messages": messages, "max_tokens": 1024, "temperature": 0.7},
- stream=True,
- )
-
- # Stream the generated tokens
- for content in resp.iter_content():
- yield content.decode("utf-8")
-
-
-@cl.set_starters
-async def set_starters():
- return [
- cl.Starter(
- label="Reflection-70B", message="how many R's are there in Strawberry?"
- )
- ]
-
-
-@cl.on_chat_start
-def init_history():
- system_prompt = "You are a world-class AI system, capable of complex reasoning and reflection. Reason through the query inside tags, and then provide your final response inside