Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix generate messages on error in gui #2380

Merged
merged 2 commits into from
Nov 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 3 additions & 6 deletions g4f/Provider/Copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,13 +93,11 @@ def create_completion(
if return_conversation:
yield Conversation(conversation_id, session.cookies.jar, access_token)
prompt = format_prompt(messages)
if debug.logging:
print(f"Copilot: Created conversation: {conversation_id}")
debug.log(f"Copilot: Created conversation: {conversation_id}")
else:
conversation_id = conversation.conversation_id
prompt = messages[-1]["content"]
if debug.logging:
print(f"Copilot: Use conversation: {conversation_id}")
debug.log(f"Copilot: Use conversation: {conversation_id}")

images = []
if image is not None:
Expand Down Expand Up @@ -143,8 +141,7 @@ async def get_access_token_and_cookies(cls, proxy: str = None):
if not has_nodriver:
raise MissingRequirementsError('Install "nodriver" package | pip install -U nodriver')
user_data_dir = user_config_dir("g4f-nodriver") if has_platformdirs else None
if debug.logging:
print(f"Copilot: Open nodriver with user_dir: {user_data_dir}")
debug.log(f"Copilot: Open nodriver with user_dir: {user_data_dir}")
browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
Expand Down
63 changes: 35 additions & 28 deletions g4f/Provider/needs_auth/OpenaiChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,8 +282,11 @@ async def get_generated_image(cls, session: StreamSession, headers: dict, elemen
Raises:
RuntimeError: If there'san error in downloading the image, including issues with the HTTP request or response.
"""
prompt = element["metadata"]["dalle"]["prompt"]
file_id = element["asset_pointer"].split("file-service://", 1)[1]
try:
prompt = element["metadata"]["dalle"]["prompt"]
file_id = element["asset_pointer"].split("file-service://", 1)[1]
except Exception as e:
raise RuntimeError(f"No Image: {e.__class__.__name__}: {e}")
try:
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
cls._update_request_args(session)
Expand Down Expand Up @@ -380,9 +383,8 @@ async def create_async_generator(
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
image_request = None
if debug.logging:
print("OpenaiChat: Upload image failed")
print(f"{e.__class__.__name__}: {e}")
debug.log("OpenaiChat: Upload image failed")
debug.log(f"{e.__class__.__name__}: {e}")
model = cls.get_model(model)
if conversation is None:
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
Expand Down Expand Up @@ -419,11 +421,10 @@ async def create_async_generator(
user_agent=cls._headers["user-agent"],
proof_token=RequestConfig.proof_token
)
if debug.logging:
print(
'Arkose:', False if not need_arkose else RequestConfig.arkose_token[:12]+"...",
'Proofofwork:', False if proofofwork is None else proofofwork[:12]+"...",
)
[debug.log(text) for text in (
f"Arkose: {'False' if not need_arkose else RequestConfig.arkose_token[:12]+'...'}",
f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
)]
ws = None
if need_arkose:
async with session.post(f"{cls.url}/backend-api/register-websocket", headers=cls._headers) as response:
Expand All @@ -444,6 +445,7 @@ async def create_async_generator(
}
if conversation.conversation_id is not None:
data["conversation_id"] = conversation.conversation_id
debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
if action != "continue":
messages = messages if conversation_id is None else [messages[-1]]
data["messages"] = cls.create_messages(messages, image_request)
Expand All @@ -468,8 +470,7 @@ async def create_async_generator(
cls._update_request_args(session)
if response.status == 403 and max_retries > 0:
max_retries -= 1
if debug.logging:
print(f"Retry: Error {response.status}: {await response.text()}")
debug.log(f"Retry: Error {response.status}: {await response.text()}")
await asyncio.sleep(5)
continue
await raise_for_status(response)
Expand Down Expand Up @@ -553,20 +554,27 @@ async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: C
elif isinstance(v, dict):
if fields.conversation_id is None:
fields.conversation_id = v.get("conversation_id")
fields.message_id = v.get("message", {}).get("id")
c = v.get("message", {}).get("content", {})
if c.get("content_type") == "multimodal_text":
generated_images = []
for element in c.get("parts"):
if element.get("content_type") == "image_asset_pointer":
generated_images.append(
cls.get_generated_image(session, cls._headers, element)
)
elif element.get("content_type") == "text":
for part in element.get("parts", []):
yield part
for image_response in await asyncio.gather(*generated_images):
yield image_response
debug.log(f"OpenaiChat: New conversation: {fields.conversation_id}")
m = v.get("message", {})
if m.get("author", {}).get("role") == "assistant":
fields.message_id = v.get("message", {}).get("id")
c = m.get("content", {})
if c.get("content_type") == "multimodal_text":
generated_images = []
for element in c.get("parts"):
if isinstance(element, str):
debug.log(f"No image or text: {line}")
elif element.get("content_type") == "image_asset_pointer":
generated_images.append(
cls.get_generated_image(session, cls._headers, element)
)
elif element.get("content_type") == "text":
for part in element.get("parts", []):
yield part
for image_response in await asyncio.gather(*generated_images):
yield image_response
else:
debug.log(f"OpenaiChat: {line}")
return
if "error" in line and line.get("error"):
raise RuntimeError(line.get("error"))
Expand All @@ -579,8 +587,7 @@ async def nodriver_auth(cls, proxy: str = None):
user_data_dir = user_config_dir("g4f-nodriver")
else:
user_data_dir = None
if debug.logging:
print(f"Open nodriver with user_dir: {user_data_dir}")
debug.log(f"Open nodriver with user_dir: {user_data_dir}")
browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
Expand Down
5 changes: 2 additions & 3 deletions g4f/Provider/openai/har_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,7 @@ def readHAR():
if "openai-sentinel-turnstile-token" in v_headers:
RequestConfig.turnstile_token = v_headers["openai-sentinel-turnstile-token"]
except Exception as e:
if debug.logging:
print(f"Read proof token: {e}")
debug.log(f"Read proof token: {e}")
if arkose_url == v['request']['url']:
RequestConfig.arkose_request = parseHAREntry(v)
elif v['request']['url'] == start_url or v['request']['url'].startswith(conversation_url):
Expand Down Expand Up @@ -150,7 +149,7 @@ def getN() -> str:
return base64.b64encode(timestamp.encode()).decode()

async def get_request_config(proxy: str) -> RequestConfig:
if RequestConfig.arkose_request is None or RequestConfig.access_token is None:
if RequestConfig.access_token is None:
readHAR()
if RequestConfig.arkose_request is not None:
RequestConfig.arkose_token = await sendRequest(genArkReq(RequestConfig.arkose_request), proxy)
Expand Down
9 changes: 4 additions & 5 deletions g4f/client/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,10 @@ def get_model_and_provider(model : Union[Model, str],
if not ignore_stream and not provider.supports_stream and stream:
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')

if debug.logging:
if model:
print(f'Using {provider.__name__} provider and {model} model')
else:
print(f'Using {provider.__name__} provider')
if model:
debug.log(f'Using {provider.__name__} provider and {model} model')
else:
debug.log(f'Using {provider.__name__} provider')

debug.last_provider = provider
debug.last_model = model
Expand Down
7 changes: 6 additions & 1 deletion g4f/debug.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,9 @@
version_check: bool = True
last_provider: ProviderType = None
last_model: str = None
version: str = None
version: str = None
log_handler: callable = print

def log(text):
if logging:
log_handler(text)
5 changes: 5 additions & 0 deletions g4f/gui/client/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,10 @@ <h3>Settings</h3>
<i class="fa-solid fa-download"></i>
<a href="" onclick="return false;">Export Conversations</a>
</button>
<button id="showLog">
<i class="fa-solid fa-terminal"></i>
<a href="" onclick="return false;">Show log</a>
</button>
</div>
</div>
<div class="conversation">
Expand Down Expand Up @@ -257,6 +261,7 @@ <h3>Settings</h3>
</div>
</div>
</div>
<div class="log hidden"></div>
</div>
<div class="mobile-sidebar">
<i class="fa-solid fa-bars"></i>
Expand Down
11 changes: 10 additions & 1 deletion g4f/gui/client/static/css/style.css
Original file line number Diff line number Diff line change
Expand Up @@ -1051,13 +1051,22 @@ a:-webkit-any-link {
padding: var(--inner-gap) var(--inner-gap) var(--inner-gap) 0;
}

.settings, .images {
.settings, .log {
width: 100%;
display: flex;
flex-direction: column;
overflow: auto;
}

.log {
white-space: pre-wrap;
font-family: Consolas, "Andale Mono WT", "Andale Mono", "Lucida Console", "Lucida Sans Typewriter", "DejaVu Sans Mono", "Bitstream Vera Sans Mono", "Liberation Mono", "Nimbus Mono L", Monaco, "Courier New", Courier, monospace;
}

.log.hidden {
display: none;
}

.settings .paper {
flex-direction: column;
min-width: 400px;
Expand Down
45 changes: 33 additions & 12 deletions g4f/gui/client/static/js/chat.v1.js
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,15 @@ const systemPrompt = document.getElementById("systemPrompt");
const settings = document.querySelector(".settings");
const chat = document.querySelector(".conversation");
const album = document.querySelector(".images");
const log_storage = document.querySelector(".log");

const optionElements = document.querySelectorAll(".settings input, .settings textarea, #model, #model2, #provider")

let provider_storage = {};
let message_storage = {};
let controller_storage = {};
let content_storage = {}
let content_storage = {};
let error_storage = {};

messageInput.addEventListener("blur", () => {
window.scrollTo(0, 0);
Expand Down Expand Up @@ -256,14 +258,14 @@ const delete_conversations = async () => {
const handle_ask = async () => {
messageInput.style.height = "82px";
messageInput.focus();
window.scrollTo(0, 0);
await scroll_to_bottom();

let message = messageInput.value;
if (message.length <= 0) {
return;
}
messageInput.value = "";
count_input()
await count_input()
await add_conversation(window.conversation_id, message);

if ("text" in fileInput.dataset) {
Expand Down Expand Up @@ -396,9 +398,12 @@ async function add_message_chunk(message, message_index) {
} else if (message.type == "message") {
console.error(message.message)
} else if (message.type == "error") {
window.error = message.error
error_storage[message_index] = message.error
console.error(message.error);
content_map.inner.innerHTML += `<p><strong>An error occured:</strong> ${message.error}</p>`;
let p = document.createElement("p");
p.innerText = message.error;
log_storage.appendChild(p);
} else if (message.type == "preview") {
content_map.inner.innerHTML = markdown_render(message.preview);
} else if (message.type == "content") {
Expand All @@ -418,6 +423,10 @@ async function add_message_chunk(message, message_index) {
content_map.inner.innerHTML = html;
content_map.count.innerText = count_words_and_tokens(message_storage[message_index], provider_storage[message_index]?.model);
highlight(content_map.inner);
} else if (message.type == "log") {
let p = document.createElement("p");
p.innerText = message.log;
log_storage.appendChild(p);
}
window.scrollTo(0, 0);
if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
Expand Down Expand Up @@ -469,7 +478,6 @@ const ask_gpt = async (message_index = -1, message_id) => {
`;

controller_storage[message_index] = new AbortController();
let error = false;

let content_el = document.getElementById(`gpt_${message_id}`)
let content_map = content_storage[message_index] = {
Expand All @@ -478,8 +486,7 @@ const ask_gpt = async (message_index = -1, message_id) => {
count: content_el.querySelector('.count'),
}

message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0);
await scroll_to_bottom();
try {
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput;
const file = input && input.files.length > 0 ? input.files[0] : null;
Expand All @@ -501,7 +508,7 @@ const ask_gpt = async (message_index = -1, message_id) => {
auto_continue: auto_continue,
api_key: api_key
}, file, message_index);
if (!error) {
if (!error_storage[message_index]) {
html = markdown_render(message_storage[message_index]);
content_map.inner.innerHTML = html;
highlight(content_map.inner);
Expand All @@ -513,26 +520,31 @@ const ask_gpt = async (message_index = -1, message_id) => {
} catch (e) {
console.error(e);
if (e.name != "AbortError") {
error = true;
error_storage[message_index] = true;
content_map.inner.innerHTML += `<p><strong>An error occured:</strong> ${e}</p>`;
}
}
delete controller_storage[message_index];
if (!error && message_storage[message_index]) {
if (!error_storage[message_index] && message_storage[message_index]) {
const message_provider = message_index in provider_storage ? provider_storage[message_index] : null;
await add_message(window.conversation_id, "assistant", message_storage[message_index], message_provider);
await safe_load_conversation(window.conversation_id);
} else {
let cursorDiv = message_box.querySelector(".cursor");
if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
}
window.scrollTo(0, 0);
message_box.scrollTop = message_box.scrollHeight;
await scroll_to_bottom();
await remove_cancel_button();
await register_message_buttons();
await load_conversations();
regenerate.classList.remove("regenerate-hidden");
};

async function scroll_to_bottom() {
window.scrollTo(0, 0);
message_box.scrollTop = message_box.scrollHeight;
}

const clear_conversations = async () => {
const elements = box_conversations.childNodes;
let index = elements.length;
Expand Down Expand Up @@ -631,6 +643,7 @@ const set_conversation = async (conversation_id) => {
await load_conversation(conversation_id);
load_conversations();
hide_sidebar();
log_storage.classList.add("hidden");
};

const new_conversation = async () => {
Expand All @@ -643,6 +656,7 @@ const new_conversation = async () => {
}
load_conversations();
hide_sidebar();
log_storage.classList.add("hidden");
say_hello();
};

Expand Down Expand Up @@ -941,6 +955,7 @@ function open_settings() {
settings.classList.add("hidden");
chat.classList.remove("hidden");
}
log_storage.classList.add("hidden");
}

function open_album() {
Expand Down Expand Up @@ -1472,3 +1487,9 @@ if (SpeechRecognition) {
}
});
}

document.getElementById("showLog").addEventListener("click", ()=> {
log_storage.classList.remove("hidden");
settings.classList.add("hidden");
log_storage.scrollTop = log_storage.scrollHeight;
});
Loading
Loading