-
-
Notifications
You must be signed in to change notification settings - Fork 18
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
18 changed files
with
633 additions
and
216 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
import sublime | ||
import os | ||
from . import jl_utility as jl | ||
|
||
|
||
class Cacher(): | ||
def __init__(self) -> None: | ||
cache_dir = sublime.cache_path() | ||
plugin_cache_dir = os.path.join(cache_dir, "OpenAI completion") | ||
if not os.path.exists(plugin_cache_dir): | ||
os.makedirs(plugin_cache_dir) | ||
|
||
# Create the file path to store the data | ||
self.history_file = os.path.join(plugin_cache_dir, "chat_history.jl") | ||
|
||
def read_all(self): | ||
json_objects = [] | ||
reader = jl.reader(self.history_file) | ||
for json_object in reader: | ||
json_objects.append(json_object) | ||
|
||
return json_objects | ||
|
||
def append_to_cache(self, cache_lines): | ||
# Create a new JSON Lines writer for output.jl | ||
writer = jl.writer(self.history_file) | ||
next(writer) | ||
writer.send(cache_lines[0]) | ||
# for line in cache_lines: | ||
# writer.send(line) | ||
|
||
def drop_first(self, number = 4): | ||
# Read all lines from the JSON Lines file | ||
with open(self.history_file, "r") as file: | ||
lines = file.readlines() | ||
|
||
# Remove the specified number of lines from the beginning | ||
lines = lines[number:] | ||
|
||
# Write the remaining lines back to the cache file | ||
with open(self.history_file, "w") as file: | ||
file.writelines(lines) | ||
|
||
def drop_all(self): | ||
with open(self.history_file, "w") as _: | ||
pass # Truncate the file by opening it in 'w' mode and doing nothing |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,38 @@ | ||
import json | ||
from typing import Iterator, Generator | ||
|
||
|
||
def reader(fname: str) -> Iterator[dict]: | ||
with open(fname) as file: | ||
for line in file: | ||
obj = json.loads(line.strip()) | ||
yield obj | ||
|
||
|
||
def writer(fname: str, mode: str = 'a') -> Generator[None, dict, None]: | ||
with open(fname, mode) as file: | ||
while True: | ||
obj = yield | ||
line = json.dumps(obj, ensure_ascii=False) | ||
file.write(f"{line}\n") | ||
|
||
|
||
# if __name__ == "__main__": | ||
# # Read employees from employees.jl | ||
# reader = jl_reader("employees.jl") | ||
|
||
# # Create a new JSON Lines writer for output.jl | ||
# writer = jl_writer("output.jl") | ||
# next(writer) | ||
|
||
# for employee in reader: | ||
# id = employee["id"] | ||
# name = employee["name"] | ||
# dept = employee["department"] | ||
# print(f"#{id} - {name} ({dept})") | ||
|
||
# # Write the employee data to output.jl | ||
# writer.send(employee) | ||
|
||
# # Close the writer | ||
# writer.close() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
{ | ||
"2.0.0": "messages/2.0.0.txt" | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
=> 2.0.0 | ||
|
||
# Features summary | ||
- ChatGPT mode support. | ||
- [Multi]Markdown syntax with syntax highlight support (ChatGPT mode only). | ||
- Proxy support. | ||
- GPT-4 support. | ||
|
||
## ChatGPT mode | ||
|
||
ChatGPT mode works the following way: | ||
1. Run the `OpenAI: New Message` command | ||
2. Wait until OpenAI performs a response (be VERY patient in the case of the GPT-4 model it's way slower than you could imagine). | ||
3. On the Response plugin opens the `OpenAI completion` output panel with the whole log of your chat at [any] active Window. | ||
4. If you would like to fetch chat history to another window manually, you can do that by running the `OpenAI: Refresh Chat` command. | ||
5. When you're done or want to start all over you should run the `OpenAI: Reset Chat History` command, which deletes the chat cache. | ||
|
||
> You can bind both of the most usable commands `OpenAI: New Message` and `OpenAI: Show output panel`, to do that please follow `Settings`->`Package Control`->`OpenAI completion`->`Key Bindings`. | ||
|
||
> As for now there's just a single history instance. I guess this limitation would disappear sometime, but highly likely it wouldn't be soon. | ||
|
||
## [Multi]Markdown syntax with syntax highlight support (ChatGPT mode only). | ||
|
||
ChatGPT output panel supports markdown syntax highlight. It should just work (if it's not please report an issue). | ||
|
||
Although it's highly recommended to install the [`MultimarkdownEditing`](https://sublimetext-markdown.github.io/MarkdownEditing/) to apply syntax highlighting for code snippets provided by ChatGPT. `OpenAI completion` should just pick it up implicitly for the output panel content. | ||
|
||
## Proxy support | ||
|
||
That's it. Now you can set up a proxy for this plugin. | ||
You can setup it up by overriding the proxy property in the `OpenAI completion` settings like follow: | ||
|
||
```json | ||
// Proxy setting | ||
"proxy": { | ||
// Proxy address | ||
"address": "127.0.0.1", | ||
|
||
// Proxy port | ||
"port": 9898 | ||
} | ||
``` | ||
|
||
## GPT-4 support | ||
|
||
It should just work, just set the `chat_model` setting to `GPT-4`. Please be patient while working with it. (1) It's **very** slow and an answer would appear only after it finishes its prompt. It could take up to 10 seconds easily. | ||
|
||
## Disclaimer | ||
|
||
Unfortunately, this version hasn't got covered with comprehensive testing, so there could be bugs. Please report them, so I'd be happy to release a patch. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,48 @@ | ||
# Features summary | ||
- ChatGPT mode support. | ||
- [Multi]Markdown syntax with syntax highlight support (ChatGPT mode only). | ||
- Proxy support. | ||
- GPT-4 support. | ||
|
||
## ChatGPT mode | ||
|
||
ChatGPT mode works the following way: | ||
1. Run the `OpenAI: New Message` command | ||
2. Wait until OpenAI performs a response (be VERY patient in the case of the GPT-4 model it's way slower than you could imagine). | ||
3. On the Response plugin opens the `OpenAI completion` output panel with the whole log of your chat at [any] active Window. | ||
4. If you would like to fetch chat history to another window manually, you can do that by running the `OpenAI: Refresh Chat` command. | ||
5. When you're done or want to start all over you should run the `OpenAI: Reset Chat History` command, which deletes the chat cache. | ||
|
||
> You can bind both of the most usable commands `OpenAI: New Message` and `OpenAI: Show output panel`, to do that please follow `Settings`->`Package Control`->`OpenAI completion`->`Key Bindings`. | ||
|
||
> As for now there's just a single history instance. I guess this limitation would disappear sometime, but highly likely it wouldn't be soon. | ||
|
||
## [Multi]Markdown syntax with syntax highlight support (ChatGPT mode only). | ||
|
||
ChatGPT output panel supports markdown syntax highlight. It should just work (if it's not please report an issue). | ||
|
||
Although it's highly recommended to install the [`MultimarkdownEditing`](https://sublimetext-markdown.github.io/MarkdownEditing/) to apply syntax highlighting for code snippets provided by ChatGPT. `OpenAI completion` should just pick it up implicitly for the output panel content. | ||
|
||
## Proxy support | ||
|
||
That's it. Now you can set up a proxy for this plugin. | ||
You can setup it up by overriding the proxy property in the `OpenAI completion` settings like follow: | ||
|
||
```json | ||
// Proxy setting | ||
"proxy": { | ||
// Proxy address | ||
"address": "127.0.0.1", | ||
|
||
// Proxy port | ||
"port": 9898 | ||
} | ||
``` | ||
|
||
## GPT-4 support | ||
|
||
It should just work, just set the `chat_model` setting to `GPT-4`. Please be patient while working with it. (1) It's **very** slow and an answer would appear only after it finishes its prompt. It could take up to 10 seconds easily. | ||
|
||
## Disclaimer | ||
|
||
Unfortunately, this version hasn't got covered with comprehensive testing, so there could be bugs. Please report them, so I'd be happy to release a patch. |
Oops, something went wrong.