From 8301c70e7a683218b4c15f5d4f7c5ebabf1f5d58 Mon Sep 17 00:00:00 2001 From: Jacolon Walker Date: Sun, 24 Mar 2024 01:00:24 -0700 Subject: [PATCH] added mistral 8x7b instruct model. prepping to port framework (#1) --- .gitignore | 1 + app.py | 11 ++++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 09dd1da..1b0aba6 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ venv/* +conversation* diff --git a/app.py b/app.py index 88f61a3..4aada18 100644 --- a/app.py +++ b/app.py @@ -1,9 +1,10 @@ import streamlit as st from llama_cpp import Llama +import json # Initialize the Llama model llm = Llama( - model_path="../llama.cpp/models/mistral-7B-v0.1/ggml-model-Q4_K_M.gguf", + model_path="../llama.cpp/models/mixtral-8x7B/ggml-model-Q4_K_M.gguf", n_ctx=4096, n_gpu_layers=-1, chat_format="chatml" @@ -58,6 +59,7 @@ def export_conversation_history(): file.write(line) st.success('Conversation exported successfully!') + if st.button('Submit'): if user_query: st.session_state['conversation_history'].append({"sender": "Researcher", "message": user_query}) @@ -107,6 +109,13 @@ def export_conversation_history(): display_debug_info() with col2: st.markdown("**Show Debug Information**") + + st.markdown("---") # Horizontal line for visual separation + st.markdown("## 🛠 Integration Controls") + + # Slack Integration Button + if st.button("Slack"): + # stub st.markdown("""---""") # Horizontal line for visual separation