diff --git a/.gitattributes b/.gitattributes index 379dafb..e7c5a3b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,2 @@ -/static/**/image*.png export-ignore \ No newline at end of file +/static/**/image*.png export-ignore +/.github/FUNDING.yml export-ignore \ No newline at end of file diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..976dafa --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,13 @@ +# These are supported funding model platforms + +github: [yaroslavyaroslav] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +# patreon: # Replace with a single Patreon username +# open_collective: # Replace with a single Open Collective username +# ko_fi: # Replace with a single Ko-fi username +# tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +# community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +# liberapay: # Replace with a single Liberapay username +# issuehunt: # Replace with a single IssueHunt username +# otechie: # Replace with a single Otechie username +# lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +# custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/buffer.py b/buffer.py new file mode 100644 index 0000000..5f70168 --- /dev/null +++ b/buffer.py @@ -0,0 +1,35 @@ +from typing import Optional + +class SublimeBuffer(): + + def __init__(self, view) -> None: + self.view = view + + def prompt_completion(self, mode: str, completion: str, placeholder: Optional[str] = None): + completion = completion.replace("$", "\$") + if mode == 'insertion': + result = self.view.find(placeholder, 0, 1) + if result: + self.view.sel().clear() + self.view.sel().add(result) + # Replace the placeholder with the specified replacement text + self.view.run_command("insert_snippet", {"contents": completion}) + return + + elif mode == 'completion': + region = self.view.sel()[0] + if region.a <= region.b: + region.a = region.b + else: + region.b = region.a + + self.view.sel().clear() + self.view.sel().add(region) + # Replace the placeholder with the specified replacement text + self.view.run_command("insert_snippet", {"contents": completion}) + return + + elif mode == 'edition': # it's just replacing all given text for now. + region = self.view.sel()[0] + self.view.run_command("insert_snippet", {"contents": completion}) + return \ No newline at end of file diff --git a/errors/OpenAIException.py b/errors/OpenAIException.py new file mode 100644 index 0000000..d237f15 --- /dev/null +++ b/errors/OpenAIException.py @@ -0,0 +1,22 @@ +from sublime import error_message +from logging import exception + +class OpenAIException(Exception): + """Exception raised for errors in the input. + + Attributes: + message -- explanation of the error + """ + + def __init__(self, message: str): + self.message = message + super().__init__(self.message) + +class ContextLengthExceededException(OpenAIException): ... + +class UnknownException(OpenAIException): ... + + +def present_error(title: str, error: OpenAIException): + exception(f"{title}: {error.message}") + error_message(f"{title}\n{error.message}") \ No newline at end of file diff --git a/messages.json b/messages.json index 94d92a7..7ab140d 100644 --- a/messages.json +++ b/messages.json @@ -2,5 +2,6 @@ "install": "README.md", "2.0.0": "messages/2.0.0.txt", "2.0.3": "messages/2.0.3.txt", - "2.0.4": "messages/2.0.4.txt" + "2.0.4": "messages/2.0.4.txt", + "2.1.0": "messages/2.1.0.txt" } \ No newline at end of file diff --git a/messages/2.0.0.txt b/messages/2.0.0.txt index 8842c5c..eca291a 100644 --- a/messages/2.0.0.txt +++ b/messages/2.0.0.txt @@ -15,7 +15,7 @@ ChatGPT mode works the following way: 4. If you would like to fetch chat history to another window manually, you can do that by running the `OpenAI: Refresh Chat` command. 5. When you're done or want to start all over you should run the `OpenAI: Reset Chat History` command, which deletes the chat cache. -> You can bind both of the most usable commands `OpenAI: New Message` and `OpenAI: Show output panel`, to do that please follow `Settings`->`Package Control`->`OpenAI completion`->`Key Bindings`. +> You can bind both of the most usable commands `OpenAI: New Message` and `OpenAI: Show output panel`, to do that please follow `Settings` -> `Package Control` -> `OpenAI completion` -> `Key Bindings`. > As for now there's just a single history instance. I guess this limitation would disappear sometime, but highly likely it wouldn't be soon. diff --git a/messages/2.1.0.txt b/messages/2.1.0.txt new file mode 100644 index 0000000..e5a14d6 --- /dev/null +++ b/messages/2.1.0.txt @@ -0,0 +1,17 @@ +=> 2.1.0 + +## Features + +- Completion streaming support. +- Drop the 2 farthest replies from the plugin cache dialogue. + +### Completion streaming support. + +Yep, you've heard it right. That new cool shiny way that you see in the original OpenAI Chat now comes to Sublime. Embrace, behold and all that stuff. Jokes aside — this thing only makes GPT-4 completion workable, by releasing the most significant tradeoff it has — long answering time. I mean GPT-4 answering time is still the same, but now you starting to see it up to 20 seconds earlier which is matters in terms of UX. + +### Drop the 2 farthest replies from the plugin cache dialogue. + +Now if you reach the context window limit, you're getting asked whether you or not wish to delete the 2 farthest messages (1 yours and 1 from the assistant) to shorter the chat history. If yes, the plugin would drop them and resend all the other chat history to OpenAI servers once again. This thing is recursive and will spit the popup in your face until the chat history would fit within a given model context window again. On cancel it will do nothing, as expected. + +PS: As usual, if you have any issues feel free to open an issue [here](https://github.com/yaroslavyaroslav/OpenAI-sublime-text/issues). +PS2: If you feel happy with this plugin you can drop me some coins for paying my OpenAI bills on Ethereum here (including L2 chains): 0x60843b4026Ff630b36835a8b78561eDD559ab208. \ No newline at end of file diff --git a/openAI.sublime-settings b/openAI.sublime-settings index 88e0c32..a71c9ae 100644 --- a/openAI.sublime-settings +++ b/openAI.sublime-settings @@ -17,25 +17,41 @@ // ____Affects only chat completion mode___ "chat_model": "gpt-3.5-turbo", - // Controls randomness: Lowering results in less random completions. - // As the temperature approaches zero, the model will become deterministic and repetitive. + // ChatGPT model knows how to role, lol + // It can act as a different kind of person. Recently in this plugin it was acting + // like as a code assistant. With this setting you're able to set it up more precisely. + // E.g. "You are (rust|python|js|whatewer) developer assistant", "You are an english tutor" and so on. + "assistant_role": "You are a senior code assitant", + + // What sampling temperature to use, between 0 and 2. + // Higher values like 0.8 will make the output more random, + // while lower values like 0.2 will make it more focused and deterministic. + // + // OpenAI generally recommend altering this or top_p but not both. "temperature": 0.7, - // The maximum number of tokens to generate. - // Requests can use up to 2,048 or 4,000 tokens shared between prompt and completion. - // The exact limit varies by model. + // The maximum number of tokens to generate in the completion. + // The token count of your prompt plus `max_tokens` cannot exceed the model's context length. // (One token is roughly 4 characters for normal English text) // Does not affect editing mode. "max_tokens": 256, - // Controls diversity via nucleus sampling: - // 0.5 means half of all likelihood-weighted options are considered. + // An alternative to sampling with temperature, called nucleus sampling, + // where the model considers the results of the tokens with `top_p` probability mass. + // So 0.1 means only the tokens comprising the top 10% probability mass are considered. + // OpenAI generally recommend altering this or temperature but not both. "top_p": 1, - // Controls the minimum height of the debugger output panels in lines. + // Number between -2.0 and 2.0. + // Positive values penalize new tokens based on their existing frequency in the text so far, + // decreasing the model's likelihood to repeat the same line verbatim. + // docs: https://platform.openai.com/docs/api-reference/parameter-details "frequency_penalty": 0, - // Some new features are locked behind this flag. + // Number between -2.0 and 2.0. + /// Positive values penalize new tokens based on whether they appear in the text so far, + // increasing the model's likelihood to talk about new topics. + // docs: https://platform.openai.com/docs/api-reference/parameter-details "presence_penalty": 0, // Placeholder for insert mode. You should to put it where you want the suggestion to be inserted. diff --git a/openai.py b/openai.py index c1d4cba..2d3302e 100644 --- a/openai.py +++ b/openai.py @@ -28,7 +28,7 @@ def run(self, edit, **kwargs): text = self.view.substr(region) - # Cheching that user select some text + # Checking that user select some text try: if region.__len__() < settings.get("minimum_selection_length"): if mode != 'chat_completion' and mode != 'reset_chat_history' and mode != 'refresh_output_panel': @@ -57,11 +57,8 @@ def run(self, edit, **kwargs): elif mode == 'refresh_output_panel': from .outputpanel import SharedOutputPanelListener window = sublime.active_window() - listner = SharedOutputPanelListener() - listner.refresh_output_panel( - window=window, - markdown=settings.get('markdown'), - ) + listner = SharedOutputPanelListener(markdown=settings.get('markdown')) + listner.refresh_output_panel(window=window) listner.show_panel(window=window) else: # mode 'chat_completion', always in panel sublime.active_window().show_input_panel("Question: ", "", functools.partial(self.on_input, "region", "text", self.view, mode), None, None) diff --git a/openai_network_client.py b/openai_network_client.py new file mode 100644 index 0000000..bc2af35 --- /dev/null +++ b/openai_network_client.py @@ -0,0 +1,104 @@ +from http.client import HTTPSConnection, HTTPResponse +from os import error +from urllib.error import HTTPError, URLError +from typing import Optional, List +import logging +import sublime +import json +from .errors.OpenAIException import ContextLengthExceededException, UnknownException, present_error +from .cacher import Cacher + +class NetworkClient(): + mode = "" + def __init__(self, settings: sublime.Settings) -> None: + self.settings = settings + self.headers = { + 'Content-Type': "application/json", + 'Authorization': f'Bearer {self.settings.get("token")}', + 'cache-control': "no-cache", + } + + proxy_settings = self.settings.get('proxy') + if isinstance(proxy_settings, dict): + address = proxy_settings.get('address') + port = proxy_settings.get('port') + if address and len(address) > 0 and port: + self.connection = HTTPSConnection( + host=address, + port=port + ) + self.connection.set_tunnel("api.openai.com") + else: + self.connection = HTTPSConnection("api.openai.com") + + def prepare_payload(self, mode: str, text: Optional[str] = None, command: Optional[str] = None, role: Optional[str] = None, parts: Optional[List[str]] = None) -> str: + self.mode = mode + if mode == 'insertion': + prompt, suffix = (parts[0], parts[1]) if parts and len(parts) >= 2 else ("Print out that input text is wrong", "Print out that input text is wrong") + return json.dumps({ + "model": self.settings.get("model"), + "prompt": prompt, + "suffix": suffix, + "temperature": self.settings.get("temperature"), + "max_tokens": self.settings.get("max_tokens"), + "top_p": self.settings.get("top_p"), + "frequency_penalty": self.settings.get("frequency_penalty"), + "presence_penalty": self.settings.get("presence_penalty") + }) + + elif mode == 'edition': + return json.dumps({ + "model": self.settings.get('edit_model'), + "input": text, + "instruction": command, + "temperature": self.settings.get("temperature"), + "top_p": self.settings.get("top_p"), + }) + + elif mode == 'completion': + return json.dumps({ + "prompt": text, + "model": self.settings.get("model"), + "temperature": self.settings.get("temperature"), + "max_tokens": self.settings.get("max_tokens"), + "top_p": self.settings.get("top_p"), + "frequency_penalty": self.settings.get("frequency_penalty"), + "presence_penalty": self.settings.get("presence_penalty") + }) + + elif mode == 'chat_completion': + return json.dumps({ + # Todo add uniq name for each output panel (e.g. each window) + "messages": [ + {"role": "system", "content": role}, + *Cacher().read_all() + ], + "model": self.settings.get('chat_model'), + "temperature": self.settings.get("temperature"), + "max_tokens": self.settings.get("max_tokens"), + "top_p": self.settings.get("top_p"), + "stream": True + }) + else: raise Exception("Undefined mode") + + def prepare_request(self, gateway, json_payload): + self.connection.request(method="POST", url=gateway, body=json_payload, headers=self.headers) + + def execute_response(self) -> Optional[HTTPResponse]: + return self._execute_network_request() + + def _execute_network_request(self) -> Optional[HTTPResponse]: + response = self.connection.getresponse() + # handle 400-499 client errors and 500-599 server errors + if 400 <= response.status < 600: + error_object = response.read().decode('utf-8') + error_data = json.loads(error_object) + if error_data.get('error', {}).get('code') == 'context_length_exceeded': + raise ContextLengthExceededException(error_data['error']['message']) + # raise custom exception for 'context_length_exceeded' error + # if error_data.get('error', {}).get('code') == 'context_length_exceeded': + # raise ContextLengthExceeded(error_data['error']['message']) + code = error_data.get('error', {}).get('code') or error_data.get('error', {}).get('type') + unknown_error = UnknownException(error_data.get('error', {}).get('message')) + present_error(title=code, error=unknown_error) + return response diff --git a/openai_worker.py b/openai_worker.py index bf3d0f8..7b441d1 100644 --- a/openai_worker.py +++ b/openai_worker.py @@ -1,15 +1,16 @@ -import sublime, sublime_plugin -import http.client +import sublime import threading from .cacher import Cacher -from .outputpanel import get_number_of_lines, SharedOutputPanelListener +from typing import List +from .openai_network_client import NetworkClient +from .buffer import SublimeBuffer +from .errors.OpenAIException import ContextLengthExceededException, UnknownException, present_error import json import logging +import re class OpenAIWorker(threading.Thread): - message = {} - def __init__(self, region, text, view, mode, command): self.region = region self.text = text @@ -17,207 +18,126 @@ def __init__(self, region, text, view, mode, command): self.mode = mode self.command = command # optional self.message = {"role": "user", "content": self.command, 'name': 'OpenAI_completion'} - settings = sublime.load_settings("openAI.sublime-settings") - self.settings = settings - self.proxy = settings.get('proxy')['address'] - self.port = settings.get('proxy')['port'] + self.settings = sublime.load_settings("openAI.sublime-settings") + self.provider = NetworkClient(settings=self.settings) + + self.buffer_manager = SublimeBuffer(self.view) super(OpenAIWorker, self).__init__() + def update_output_panel(self, text_chunk: str): + from .outputpanel import SharedOutputPanelListener + window = sublime.active_window() + markdown_setting = self.settings.get('markdown') + if not isinstance(markdown_setting, bool): + markdown_setting = True + + listner = SharedOutputPanelListener(markdown=markdown_setting) + listner.show_panel(window=window) + listner.update_output_panel( + text=text_chunk, + window=window + ) + def prompt_completion(self, completion): - completion = completion.replace("$", "\$") - if self.mode == 'insertion': - result = self.view.find(self.settings.get('placeholder'), 0, 1) - if result: - self.view.sel().clear() - self.view.sel().add(result) - # Replace the placeholder with the specified replacement text - self.view.run_command("insert_snippet", {"contents": completion}) + placeholder = self.settings.get('placeholder') + if not isinstance(placeholder, str): + placeholder = "[insert]" + self.buffer_manager.prompt_completion( + mode=self.mode, + completion=completion, + placeholder=placeholder + ) + + def handle_chat_completion_response(self): + response = self.provider.execute_response() + + if response is None or response.status != 200: + print("xxxx5") return - if self.mode == 'chat_completion': - from .outputpanel import SharedOutputPanelListener - window = sublime.active_window() - ## FIXME: This setting applies only in one way none -> markdown - listner = SharedOutputPanelListener() - listner.refresh_output_panel( - window=window, - markdown=self.settings.get('markdown'), - ) - listner.show_panel(window=window) - - if self.mode == 'completion': - region = self.view.sel()[0] - if region.a <= region.b: - region.a = region.b - else: - region.b = region.a + decoder = json.JSONDecoder() - self.view.sel().clear() - self.view.sel().add(region) - # Replace the placeholder with the specified replacement text - self.view.run_command("insert_snippet", {"contents": completion}) - return + full_response_content = {"role": "", "content": ""} + + self.update_output_panel("\n\n## Answer\n\n") + + for chunk in response: + chunk_str = chunk.decode('utf-8') + + # Check for SSE data + if chunk_str.startswith("data:") and not re.search(r"\[DONE\]$", chunk_str): + # print(chunk_str) + # print(re.search(r"\[DONE\]$", chunk_str)) + chunk_str = chunk_str[len("data:"):].strip() + + try: + response = decoder.decode(chunk_str) + except ValueError as ex: + sublime.error_message(f"Server Error: {str(ex)}") + logging.exception("Exception: " + str(ex)) + + if 'delta' in response['choices'][0]: + delta = response['choices'][0]['delta'] + if 'role' in delta: + full_response_content['role'] = delta['role'] + elif 'content' in delta: + full_response_content['content'] += delta['content'] + self.update_output_panel(delta['content']) + + self.provider.connection.close() + Cacher().append_to_cache([full_response_content]) - if self.mode == 'edition': # it's just replacing all given text for now. - region = self.view.sel()[0] - self.view.run_command("insert_snippet", {"contents": completion}) + def handle_ordinary_response(self): + response = self.provider.execute_response() + if response is None or response.status != 200: return + data = response.read() + data_decoded = data.decode('utf-8') + self.provider.connection.close() + completion = json.loads(data_decoded)['choices'][0]['text'] + completion = completion.strip() # Remove leading and trailing spaces + self.prompt_completion(completion) - def exec_net_request(self, connect: http.client.HTTPSConnection): - # TODO: Add status bar "loading" status, to make it obvious, that we're waiting the server response. - try: - res = connect.getresponse() - data = res.read() - status = res.status - data_decoded = data.decode('utf-8') - connect.close() - response = json.loads(data_decoded) + def handle_response(self): + try: + if self.mode == "chat_completion": self.handle_chat_completion_response() + else: self.handle_ordinary_response() + except ContextLengthExceededException as error: + print("xxxx8") if self.mode == 'chat_completion': - Cacher().append_to_cache([response['choices'][0]['message']]) - completion = "" - print(f"token number: {response['usage']['total_tokens']}") + # As user to delete first dialog pair, + do_delete = sublime.ok_cancel_dialog(msg=f'Delete the two farthest pairs?\n\n{error.message}', ok_title="Delete") + if do_delete: + Cacher().drop_first(2) + assistant_role = self.settings.get('assistant_role') + if not isinstance(assistant_role, str): + raise ValueError("The assistant_role setting must be a string.") + payload = self.provider.prepare_payload(mode=self.mode, role=assistant_role) + self.provider.prepare_request(gateway="/v1/chat/completions", json_payload=payload) + self.handle_response() else: - completion = json.loads(data_decoded)['choices'][0]['text'] - - completion = completion.strip() # Remove leading and trailing spaces - self.prompt_completion(completion) + present_error(title="OpenAI error", error=error) except KeyError: - # TODO: Add status bar user notification for this action. if self.mode == 'chat_completion' and response['error']['code'] == 'context_length_exceeded': - Cacher().drop_first(4) - self.chat_complete() + Cacher().drop_first(2) else: sublime.error_message("Exception\n" + "The OpenAI response could not be decoded. There could be a problem on their side. Please look in the console for additional error info.") - logging.exception("Exception: " + str(data_decoded)) + logging.exception("Exception: " + str(response)) return except Exception as ex: - sublime.error_message(f"Server Error: {str(status)}\n{ex}") + sublime.error_message(f"Server Error: {str(response.status)}\n{ex}") logging.exception("Exception: " + str(data_decoded)) return - def create_connection(self) -> http.client.HTTPSConnection: - if len(self.proxy) > 0: - connection = http.client.HTTPSConnection(host=self.proxy, port=self.port) - connection.set_tunnel("api.openai.com") - return connection - else: - return http.client.HTTPSConnection("api.openai.com") - - def chat_complete(self): - cacher = Cacher() - - conn = self.create_connection() - - payload = { - # Todo add uniq name for each output panel (e.g. each window) - "messages": [ - {"role": "system", "content": "You are a code assistant."}, - *cacher.read_all() - ], - "model": self.settings.get('chat_model'), - "temperature": self.settings.get("temperature"), - "max_tokens": self.settings.get("max_tokens"), - "top_p": self.settings.get("top_p"), - } - - json_payload = json.dumps(payload) - token = self.settings.get('token') - - headers = { - 'Content-Type': "application/json", - 'Authorization': f'Bearer {token}', - 'cache-control': "no-cache", - } - conn.request("POST", "/v1/chat/completions", json_payload, headers) - self.exec_net_request(connect=conn) - - def complete(self): - conn = self.create_connection() - - payload = { - "prompt": self.text, - "model": self.settings.get("model"), - "temperature": self.settings.get("temperature"), - "max_tokens": self.settings.get("max_tokens"), - "top_p": self.settings.get("top_p"), - "frequency_penalty": self.settings.get("frequency_penalty"), - "presence_penalty": self.settings.get("presence_penalty") - } - json_payload = json.dumps(payload) - - token = self.settings.get('token') - - headers = { - 'Content-Type': "application/json", - 'Authorization': 'Bearer {}'.format(token), - 'cache-control': "no-cache", - } - conn.request("POST", "/v1/completions", json_payload, headers) - self.exec_net_request(connect=conn) - - def insert(self): - conn = self.create_connection() - parts = self.text.split(self.settings.get('placeholder')) - try: - if not len(parts) == 2: - raise AssertionError("There is no placeholder '" + self.settings.get('placeholder') + "' within the selected text. There should be exactly one.") - except Exception as ex: - sublime.error_message("Exception\n" + str(ex)) - logging.exception("Exception: " + str(ex)) - return - - payload = { - "model": self.settings.get("model"), - "prompt": parts[0], - "suffix": parts[1], - "temperature": self.settings.get("temperature"), - "max_tokens": self.settings.get("max_tokens"), - "top_p": self.settings.get("top_p"), - "frequency_penalty": self.settings.get("frequency_penalty"), - "presence_penalty": self.settings.get("presence_penalty") - } - json_payload = json.dumps(payload) - - token = self.settings.get('token') - - headers = { - 'Content-Type': "application/json", - 'Authorization': 'Bearer {}'.format(token), - 'cache-control': "no-cache", - } - conn.request("POST", "/v1/completions", json_payload, headers) - self.exec_net_request(connect=conn) - - def edit_f(self): - conn = self.create_connection() - payload = { - "model": self.settings.get('edit_model'), - "input": self.text, - "instruction": self.command, - "temperature": self.settings.get("temperature"), - "top_p": self.settings.get("top_p"), - } - json_payload = json.dumps(payload) - - token = self.settings.get('token') - - headers = { - 'Content-Type': "application/json", - 'Authorization': 'Bearer {}'.format(token), - 'cache-control': "no-cache", - } - conn.request("POST", "/v1/edits", json_payload, headers) - self.exec_net_request(connect=conn) - def run(self): try: # FIXME: It's better to have such check locally, but it's pretty complicated with all those different modes and models # if (self.settings.get("max_tokens") + len(self.text)) > 4000: # raise AssertionError("OpenAI accepts max. 4000 tokens, so the selected text and the max_tokens setting must be lower than 4000.") - if not self.settings.has("token"): - raise AssertionError("No token provided, you have to set the OpenAI token into the settings to make things work.") token = self.settings.get('token') + if not isinstance(token, str): + raise AssertionError("The token must be a string.") if len(token) < 10: raise AssertionError("No token provided, you have to set the OpenAI token into the settings to make things work.") except Exception as ex: @@ -225,9 +145,41 @@ def run(self): logging.exception("Exception: " + str(ex)) return - if self.mode == 'insertion': self.insert() - if self.mode == 'edition': self.edit_f() - if self.mode == 'completion': self.complete() - if self.mode == 'chat_completion': - Cacher().append_to_cache([self.message]) - self.chat_complete() + if self.mode == 'insertion': + placeholder = self.settings.get('placeholder') + if not isinstance(placeholder, str): + raise AssertionError("The placeholder must be a string.") + parts: List[str] = self.text.split(self.settings.get('placeholder')) + try: + if not len(parts) == 2: + raise AssertionError("There is no placeholder '" + placeholder + "' within the selected text. There should be exactly one.") + except Exception as ex: + sublime.error_message("Exception\n" + str(ex)) + logging.exception("Exception: " + str(ex)) + return + payload = self.provider.prepare_payload(mode=self.mode, parts=parts) + self.provider.prepare_request(gateway="/v1/completions", json_payload=payload) + self.handle_response() + + elif self.mode == 'edition': + payload = self.provider.prepare_payload(mode=self.mode, text=self.text, command=self.command) + self.provider.prepare_request(gateway="/v1/edits", json_payload=payload) + self.handle_response() + elif self.mode == 'completion': + payload = self.provider.prepare_payload(mode=self.mode, text=self.text) + self.provider.prepare_request(gateway="/v1/completions", json_payload=payload) + self.handle_response() + + elif self.mode == 'chat_completion': + cacher = Cacher() + cacher.append_to_cache([self.message]) + self.update_output_panel("\n\n## Question\n\n") + self.update_output_panel(cacher.read_all()[-1]["content"]) + + assistant_role = self.settings.get('assistant_role') + if not isinstance(assistant_role, str): + raise ValueError("The assistant_role setting must be a string.") + + payload = self.provider.prepare_payload(mode=self.mode, role=assistant_role) + self.provider.prepare_request(gateway="/v1/chat/completions", json_payload=payload) + self.handle_response() diff --git a/outputpanel.py b/outputpanel.py index 2b697b1..3b071ef 100644 --- a/outputpanel.py +++ b/outputpanel.py @@ -1,25 +1,48 @@ -import sublime -import sublime_plugin +from sublime import Window, View +from sublime_plugin import EventListener from .cacher import Cacher -class SharedOutputPanelListener(sublime_plugin.EventListener): +class SharedOutputPanelListener(EventListener): OUTPUT_PANEL_NAME = "OpenAI Chat" - def get_output_panel(self, window: sublime.Window): - return window.find_output_panel(self.OUTPUT_PANEL_NAME) if window.find_output_panel(self.OUTPUT_PANEL_NAME) != None else window.create_output_panel(self.OUTPUT_PANEL_NAME) + def __init__(self, markdown: bool = True) -> None: + self.markdown: bool = markdown + super().__init__() - def refresh_output_panel(self, window, markdown: bool): - output_panel = self.get_output_panel(window=window) + def __get_output_panel__(self, window: Window) -> View: + output_panel = window.find_output_panel(self.OUTPUT_PANEL_NAME) or window.create_output_panel(self.OUTPUT_PANEL_NAME) + if self.markdown: output_panel.set_syntax_file("Packages/Markdown/MultiMarkdown.sublime-syntax") + return output_panel + + def __scroll_to_text_point__(self, output_panel: View, num_lines: int): + point = output_panel.text_point(num_lines, 0) + # FIXME: make me scrollable while printing in addition to following bottom edge if not scrolled. + output_panel.show_at_center(point) + + ## FIXME: This one should allow scroll while updating, yet it should follow the text if it's not + def update_output_panel(self, text: str, window: Window): + output_panel = self.__get_output_panel__(window=window) output_panel.set_read_only(False) - self.clear_output_panel(window) + output_panel.run_command('append', {'characters': text}) + output_panel.set_read_only(True) + num_lines: int = get_number_of_lines(output_panel) + print(f'num_lines: {num_lines}') - if markdown: output_panel.set_syntax_file("Packages/Markdown/MultiMarkdown.sublime-syntax") + self.__scroll_to_text_point__( + output_panel=output_panel, + num_lines=num_lines + ) + + def refresh_output_panel(self, window): + output_panel = self.__get_output_panel__(window=window) + output_panel.set_read_only(False) + self.clear_output_panel(window) for line in Cacher().read_all(): if line['role'] == 'user': output_panel.run_command('append', {'characters': f'\n\n## Question\n\n'}) elif line['role'] == 'assistant': - ## This one left here as there're could be loooong questions. + ## This one placed here as there're could be loooong questions. output_panel.run_command('append', {'characters': '\n\n## Answer\n\n'}) output_panel.run_command('append', {'characters': line['content']}) @@ -30,18 +53,19 @@ def refresh_output_panel(self, window, markdown: bool): ## Hardcoded to -10 lines from the end, just completely randrom number. ## TODO: Here's some complex scrolling logic based on the content (## Answer) required. - point = output_panel.text_point(num_lines - 10, 0) - - output_panel.show_at_center(point) + self.__scroll_to_text_point__( + output_panel=output_panel, + num_lines=num_lines - 10 + ) def clear_output_panel(self, window): - output_panel = self.get_output_panel(window=window) + output_panel = self.__get_output_panel__(window=window) output_panel.run_command("select_all") output_panel.run_command("right_delete") def show_panel(self, window): window.run_command("show_panel", {"panel": f"output.{self.OUTPUT_PANEL_NAME}"}) -def get_number_of_lines(view): +def get_number_of_lines(view: View) -> int: last_line_num = view.rowcol(view.size())[0] + 1 return last_line_num \ No newline at end of file