-
-
Notifications
You must be signed in to change notification settings - Fork 18
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
12 changed files
with
400 additions
and
218 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1,2 @@ | ||
/static/**/image*.png export-ignore | ||
/static/**/image*.png export-ignore | ||
/.github/FUNDING.yml export-ignore |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
# These are supported funding model platforms | ||
|
||
github: [yaroslavyaroslav] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] | ||
# patreon: # Replace with a single Patreon username | ||
# open_collective: # Replace with a single Open Collective username | ||
# ko_fi: # Replace with a single Ko-fi username | ||
# tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel | ||
# community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry | ||
# liberapay: # Replace with a single Liberapay username | ||
# issuehunt: # Replace with a single IssueHunt username | ||
# otechie: # Replace with a single Otechie username | ||
# lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry | ||
# custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
from typing import Optional | ||
|
||
class SublimeBuffer(): | ||
|
||
def __init__(self, view) -> None: | ||
self.view = view | ||
|
||
def prompt_completion(self, mode: str, completion: str, placeholder: Optional[str] = None): | ||
completion = completion.replace("$", "\$") | ||
if mode == 'insertion': | ||
result = self.view.find(placeholder, 0, 1) | ||
if result: | ||
self.view.sel().clear() | ||
self.view.sel().add(result) | ||
# Replace the placeholder with the specified replacement text | ||
self.view.run_command("insert_snippet", {"contents": completion}) | ||
return | ||
|
||
elif mode == 'completion': | ||
region = self.view.sel()[0] | ||
if region.a <= region.b: | ||
region.a = region.b | ||
else: | ||
region.b = region.a | ||
|
||
self.view.sel().clear() | ||
self.view.sel().add(region) | ||
# Replace the placeholder with the specified replacement text | ||
self.view.run_command("insert_snippet", {"contents": completion}) | ||
return | ||
|
||
elif mode == 'edition': # it's just replacing all given text for now. | ||
region = self.view.sel()[0] | ||
self.view.run_command("insert_snippet", {"contents": completion}) | ||
return |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
from sublime import error_message | ||
from logging import exception | ||
|
||
class OpenAIException(Exception): | ||
"""Exception raised for errors in the input. | ||
Attributes: | ||
message -- explanation of the error | ||
""" | ||
|
||
def __init__(self, message: str): | ||
self.message = message | ||
super().__init__(self.message) | ||
|
||
class ContextLengthExceededException(OpenAIException): ... | ||
|
||
class UnknownException(OpenAIException): ... | ||
|
||
|
||
def present_error(title: str, error: OpenAIException): | ||
exception(f"{title}: {error.message}") | ||
error_message(f"{title}\n{error.message}") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
=> 2.1.0 | ||
|
||
## Features | ||
|
||
- Completion streaming support. | ||
- Drop the 2 farthest replies from the plugin cache dialogue. | ||
|
||
### Completion streaming support. | ||
|
||
Yep, you've heard it right. That new cool shiny way that you see in the original OpenAI Chat now comes to Sublime. Embrace, behold and all that stuff. Jokes aside — this thing only makes GPT-4 completion workable, by releasing the most significant tradeoff it has — long answering time. I mean GPT-4 answering time is still the same, but now you starting to see it up to 20 seconds earlier which is matters in terms of UX. | ||
|
||
### Drop the 2 farthest replies from the plugin cache dialogue. | ||
|
||
Now if you reach the context window limit, you're getting asked whether you or not wish to delete the 2 farthest messages (1 yours and 1 from the assistant) to shorter the chat history. If yes, the plugin would drop them and resend all the other chat history to OpenAI servers once again. This thing is recursive and will spit the popup in your face until the chat history would fit within a given model context window again. On cancel it will do nothing, as expected. | ||
|
||
PS: As usual, if you have any issues feel free to open an issue [here](https://github.com/yaroslavyaroslav/OpenAI-sublime-text/issues). | ||
PS2: If you feel happy with this plugin you can drop me some coins for paying my OpenAI bills on Ethereum here (including L2 chains): 0x60843b4026Ff630b36835a8b78561eDD559ab208. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,104 @@ | ||
from http.client import HTTPSConnection, HTTPResponse | ||
from os import error | ||
from urllib.error import HTTPError, URLError | ||
from typing import Optional, List | ||
import logging | ||
import sublime | ||
import json | ||
from .errors.OpenAIException import ContextLengthExceededException, UnknownException, present_error | ||
from .cacher import Cacher | ||
|
||
class NetworkClient(): | ||
mode = "" | ||
def __init__(self, settings: sublime.Settings) -> None: | ||
self.settings = settings | ||
self.headers = { | ||
'Content-Type': "application/json", | ||
'Authorization': f'Bearer {self.settings.get("token")}', | ||
'cache-control': "no-cache", | ||
} | ||
|
||
proxy_settings = self.settings.get('proxy') | ||
if isinstance(proxy_settings, dict): | ||
address = proxy_settings.get('address') | ||
port = proxy_settings.get('port') | ||
if address and len(address) > 0 and port: | ||
self.connection = HTTPSConnection( | ||
host=address, | ||
port=port | ||
) | ||
self.connection.set_tunnel("api.openai.com") | ||
else: | ||
self.connection = HTTPSConnection("api.openai.com") | ||
|
||
def prepare_payload(self, mode: str, text: Optional[str] = None, command: Optional[str] = None, role: Optional[str] = None, parts: Optional[List[str]] = None) -> str: | ||
self.mode = mode | ||
if mode == 'insertion': | ||
prompt, suffix = (parts[0], parts[1]) if parts and len(parts) >= 2 else ("Print out that input text is wrong", "Print out that input text is wrong") | ||
return json.dumps({ | ||
"model": self.settings.get("model"), | ||
"prompt": prompt, | ||
"suffix": suffix, | ||
"temperature": self.settings.get("temperature"), | ||
"max_tokens": self.settings.get("max_tokens"), | ||
"top_p": self.settings.get("top_p"), | ||
"frequency_penalty": self.settings.get("frequency_penalty"), | ||
"presence_penalty": self.settings.get("presence_penalty") | ||
}) | ||
|
||
elif mode == 'edition': | ||
return json.dumps({ | ||
"model": self.settings.get('edit_model'), | ||
"input": text, | ||
"instruction": command, | ||
"temperature": self.settings.get("temperature"), | ||
"top_p": self.settings.get("top_p"), | ||
}) | ||
|
||
elif mode == 'completion': | ||
return json.dumps({ | ||
"prompt": text, | ||
"model": self.settings.get("model"), | ||
"temperature": self.settings.get("temperature"), | ||
"max_tokens": self.settings.get("max_tokens"), | ||
"top_p": self.settings.get("top_p"), | ||
"frequency_penalty": self.settings.get("frequency_penalty"), | ||
"presence_penalty": self.settings.get("presence_penalty") | ||
}) | ||
|
||
elif mode == 'chat_completion': | ||
return json.dumps({ | ||
# Todo add uniq name for each output panel (e.g. each window) | ||
"messages": [ | ||
{"role": "system", "content": role}, | ||
*Cacher().read_all() | ||
], | ||
"model": self.settings.get('chat_model'), | ||
"temperature": self.settings.get("temperature"), | ||
"max_tokens": self.settings.get("max_tokens"), | ||
"top_p": self.settings.get("top_p"), | ||
"stream": True | ||
}) | ||
else: raise Exception("Undefined mode") | ||
|
||
def prepare_request(self, gateway, json_payload): | ||
self.connection.request(method="POST", url=gateway, body=json_payload, headers=self.headers) | ||
|
||
def execute_response(self) -> Optional[HTTPResponse]: | ||
return self._execute_network_request() | ||
|
||
def _execute_network_request(self) -> Optional[HTTPResponse]: | ||
response = self.connection.getresponse() | ||
# handle 400-499 client errors and 500-599 server errors | ||
if 400 <= response.status < 600: | ||
error_object = response.read().decode('utf-8') | ||
error_data = json.loads(error_object) | ||
if error_data.get('error', {}).get('code') == 'context_length_exceeded': | ||
raise ContextLengthExceededException(error_data['error']['message']) | ||
# raise custom exception for 'context_length_exceeded' error | ||
# if error_data.get('error', {}).get('code') == 'context_length_exceeded': | ||
# raise ContextLengthExceeded(error_data['error']['message']) | ||
code = error_data.get('error', {}).get('code') or error_data.get('error', {}).get('type') | ||
unknown_error = UnknownException(error_data.get('error', {}).get('message')) | ||
present_error(title=code, error=unknown_error) | ||
return response |
Oops, something went wrong.