Skip to content

Commit

Permalink
Merge branch 'develop'
Browse files Browse the repository at this point in the history
  • Loading branch information
yaroslavyaroslav committed Sep 23, 2024
2 parents aa8dd8f + 0655a19 commit 76be79b
Show file tree
Hide file tree
Showing 20 changed files with 328 additions and 360 deletions.
2 changes: 2 additions & 0 deletions .ruff.toml
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
line-length = 100

[format]
quote-style = "single"
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,10 @@ The OpenAI Completion plugin has a settings file where you can set your OpenAI A

If you're here it meaning that a model that you're using with ollama talking shit. This is because `temperature` property of a model which is 1 somewhat [doubles](https://github.com/ollama/ollama/blob/69be940bf6d2816f61c79facfa336183bc882720/openai/openai.go#L454) on ollama's side, so it becomes 2, which is a little bit too much for a good model's response. So you to make things work you have to set temperature to 1.

### Advertisement disabling

To disable advertisement you have to add `"advertisement": false` line into an assistant setting where you wish it to be disabled.

## Key bindings

You can bind keys for a given plugin command in `Preferences` -> `Package Settings` -> `OpenAI` -> `Key Bindings`. For example you can bind "New Message" command like this:
Expand Down
33 changes: 19 additions & 14 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,25 @@
import sys

# clear modules cache if package is reloaded (after update?)
prefix = __package__ + ".plugins" # don't clear the base package
for module_name in [
module_name
for module_name in sys.modules
if module_name.startswith(prefix)
]:
prefix = __package__ + '.plugins' # type: ignore # don't clear the base package
for module_name in [module_name for module_name in sys.modules if module_name.startswith(prefix)]:
del sys.modules[module_name]
del prefix

from .plugins.openai import Openai
from .plugins.active_view_event import ActiveViewEventListener
from .plugins.openai_panel import OpenaiPanelCommand
from .plugins.stop_worker_execution import StopOpenaiExecutionCommand
from .plugins.worker_running_context import OpenaiWorkerRunningContext
from .plugins.settings_reloader import ReloadSettingsListener
from .plugins.output_panel import SharedOutputPanelListener, AIChatViewEventListener
from .plugins.buffer import TextStreamAtCommand, ReplaceRegionCommand, EraseRegionCommand
from .plugins.active_view_event import ActiveViewEventListener # noqa: E402, F401
from .plugins.ai_chat_event import AIChatViewEventListener # noqa: E402, F401
from .plugins.buffer import ( # noqa: E402, F401
EraseRegionCommand,
ReplaceRegionCommand,
TextStreamAtCommand,
)
from .plugins.openai import Openai # noqa: E402, F401
from .plugins.openai_panel import OpenaiPanelCommand # noqa: E402, F401
from .plugins.output_panel import SharedOutputPanelListener # noqa: E402, F401
from .plugins.settings_reloader import ReloadSettingsListener # noqa: E402, F401
from .plugins.stop_worker_execution import ( # noqa: E402
StopOpenaiExecutionCommand, # noqa: F401
)
from .plugins.worker_running_context import ( # noqa: E402,
OpenaiWorkerRunningContext, # noqa: F401
)
32 changes: 14 additions & 18 deletions plugins/active_view_event.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
from typing import Any, Dict, List, Optional
from __future__ import annotations

import logging
from typing import Any, Dict, List

import sublime
from sublime import View
from sublime_plugin import EventListener
import logging

from .cacher import Cacher
from .status_bar import StatusBarMode


logger = logging.getLogger(__name__)


Expand All @@ -17,21 +19,19 @@ class ActiveViewEventListener(EventListener):
def on_activated(self, view: View):
## FIXME: This is might be wrong, settings of view should be get not for an active view, but for a given window project view.
## It could be correct btw, as if a window with a specific settings gets active — it updated exact it status bar.
self.project_settings = (
sublime.active_window().active_view().settings().get('ai_assistant', None)
)
self.project_settings: Dict[str, str] | None = (
sublime.active_window().active_view().settings().get('ai_assistant')
) # type: ignore

# Logging disabled becuase it's too spammy. Uncomment in case of necessity.
# logger.debug(
# "project_settings exists: %s", "YES" if self.project_settings else "NO"
# )

cache_prefix = self.project_settings.get('cache_prefix') if self.project_settings else None

# Initialize Cacher with proper default handling for missing cache_prefix
self.cacher = (
Cacher(name=self.project_settings['cache_prefix'])
if self.project_settings
else Cacher()
)
self.cacher = Cacher(name=cache_prefix)

# logger.debug("cacher.history_file: %s", self.cacher.history_file)
# logger.debug("cacher.current_model_file: %s", self.cacher.current_model_file)
Expand All @@ -44,9 +44,7 @@ def on_activated(self, view: View):

settings = sublime.load_settings('openAI.sublime-settings')

status_hint_options: List[str] = (
settings.get('status_hint', []) if settings else []
)
status_hint_options: List[str] = settings.get('status_hint', []) if settings else [] # type: ignore

# logger.debug("status_hint_options: %s", status_hint_options)

Expand All @@ -56,7 +54,7 @@ def on_activated(self, view: View):
def update_status_bar(
self,
view: View,
assistant: Optional[Dict[str, Any]],
assistant: Dict[str, Any] | None,
status_hint_options: List[str],
):
if not assistant:
Expand All @@ -69,9 +67,7 @@ def update_status_bar(
if {'name', 'prompt_mode', 'chat_model'} <= assistant.keys():
statuses: List[str] = []
for key in ['name', 'prompt_mode', 'chat_model']:
lookup_key = (
key if key != 'name' else 'name_'
) # name is a reserved keyword
lookup_key = key if key != 'name' else 'name_' # name is a reserved keyword
if StatusBarMode[lookup_key].value in status_hint_options:
if key == 'chat_model':
statuses.append(assistant[key].upper())
Expand Down
44 changes: 44 additions & 0 deletions plugins/ai_chat_event.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from __future__ import annotations

from typing import Dict

from sublime import Window
from sublime_plugin import ViewEventListener

from .cacher import Cacher


class AIChatViewEventListener(ViewEventListener):
@classmethod
def is_applicable(cls, settings) -> bool:
return (
settings.get('syntax') == 'Packages/Markdown/MultiMarkdown.sublime-syntax'
or settings.get('syntax') == 'Packages/Markdown/PlainText.sublime-syntax'
)

def on_activated(self) -> None:
self.update_status_message(self.view.window()) # type: ignore

def update_status_message(self, window: Window) -> None:
project_settings: Dict[str, str] | None = window.active_view().settings().get('ai_assistant') # type: ignore

cache_prefix = project_settings.get('cache_prefix') if project_settings else None

cacher = Cacher(name=cache_prefix)
if self.is_ai_chat_tab_active(window):
status_message = self.get_status_message(cacher=cacher)
active_view = window.active_view()
if active_view and active_view.name() == 'AI Chat':
active_view.set_status('ai_chat_status', status_message)

def is_ai_chat_tab_active(self, window: Window) -> bool:
active_view = window.active_view()
return active_view.name() == 'AI Chat' if active_view else False

def get_status_message(self, cacher: Cacher) -> str:
tokens = cacher.read_tokens_count()
prompt = tokens['prompt_tokens'] if tokens else 0
completion = tokens['completion_tokens'] if tokens else 0
total = prompt + completion

return f'[⬆️: {prompt:,} + ⬇️: {completion:,} = {total:,}]'
14 changes: 8 additions & 6 deletions plugins/assistant_settings.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from __future__ import annotations

from dataclasses import dataclass
from enum import Enum
from typing import Optional
from typing import Dict, Any


class PromptMode(Enum):
Expand All @@ -14,20 +16,20 @@ class PromptMode(Enum):
class AssistantSettings:
name: str
prompt_mode: PromptMode
url: Optional[str]
token: Optional[str]
url: str | None
token: str | None
chat_model: str
assistant_role: str
temperature: int
max_tokens: int
top_p: int
frequency_penalty: int
presence_penalty: int
placeholder: Optional[str]
placeholder: str | None
advertisement: bool


DEFAULT_ASSISTANT_SETTINGS = {
DEFAULT_ASSISTANT_SETTINGS: Dict[str, Any] = {
'placeholder': None,
'url': None,
'token': None,
Expand All @@ -36,7 +38,7 @@ class AssistantSettings:
'top_p': 1,
'frequency_penalty': 0,
'presence_penalty': 0,
'advertisement': True
'advertisement': True,
}


Expand Down
19 changes: 12 additions & 7 deletions plugins/buffer.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,34 @@
from sublime import Edit, Region, View
from sublime_plugin import TextCommand

class TextStreamer():

class TextStreamer:
def __init__(self, view: View) -> None:
self.view = view

def update_completion(self, completion: str):
## Till this line selection has to be cleared and the carret should be placed in to a desired starting point.
## So begin() and end() sould be the very same carret offset.
start_of_selection = self.view.sel()[0].begin() ## begin() because if we point an end there — it'll start to reverse prompting.
self.view.run_command("text_stream_at", {"position": start_of_selection, "text": completion})
## begin() because if we point an end there — it'll start to reverse prompting.
start_of_selection = self.view.sel()[0].begin()
self.view.run_command('text_stream_at', {'position': start_of_selection, 'text': completion})
return

def delete_selected_region(self, region: Region):
json_reg = {'a': region.begin(), 'b': region.end()}
self.view.run_command("erase_region", {"region": json_reg})
self.view.run_command('erase_region', {'region': json_reg})


class TextStreamAtCommand(TextCommand):
def run(self, edit: Edit, position: int, text: str):
def run(self, edit: Edit, position: int, text: str): # type: ignore
_ = self.view.insert(edit=edit, pt=position, text=text)


class ReplaceRegionCommand(TextCommand):
def run(self, edit: Edit, region, text: str):
def run(self, edit: Edit, region, text: str): # type: ignore
self.view.replace(edit=edit, region=Region(region['a'], region['b']), text=text)


class EraseRegionCommand(TextCommand):
def run(self, edit: Edit, region):
def run(self, edit: Edit, region): # type: ignore
self.view.erase(edit=edit, region=Region(region['a'], region['b']))
34 changes: 16 additions & 18 deletions plugins/cacher.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
import sublime
import os
from . import jl_utility as jl
from __future__ import annotations

import json
import os
from json.decoder import JSONDecodeError
from typing import List, Dict, Iterator, Any, Optional
from typing import Any, Dict, Iterator, List

import sublime

from . import jl_utility as jl


class Cacher:
def __init__(self, name: str = '') -> None:
def __init__(self, name: str | None = None) -> None:
cache_dir = sublime.cache_path()
plugin_cache_dir = os.path.join(cache_dir, 'OpenAI completion')
if not os.path.exists(plugin_cache_dir):
Expand All @@ -16,21 +20,15 @@ def __init__(self, name: str = '') -> None:
# Create the file path to store the data
self.history_file = os.path.join(
plugin_cache_dir,
'{file_name}chat_history.jl'.format(
file_name=name + '_' if len(name) > 0 else ''
),
'{file_name}chat_history.jl'.format(file_name=name + '_' if name else ''),
)
self.current_model_file = os.path.join(
plugin_cache_dir,
'{file_name}current_assistant.json'.format(
file_name=name + '_' if len(name) > 0 else ''
),
'{file_name}current_assistant.json'.format(file_name=name + '_' if name else ''),
)
self.tokens_count_file = os.path.join(
plugin_cache_dir,
'{file_name}tokens_count.json'.format(
file_name=name + '_' if len(name) > 0 else ''
),
'{file_name}tokens_count.json'.format(file_name=name + '_' if name else ''),
)

def check_and_create(self, path: str):
Expand Down Expand Up @@ -61,11 +59,11 @@ def reset_tokens_count(self):
with open(self.tokens_count_file, 'w') as _:
pass # Truncate the file by opening it in 'w' mode and doing nothing

def read_tokens_count(self) -> Optional[Dict[str, int]]:
def read_tokens_count(self) -> Dict[str, int] | None:
self.check_and_create(self.tokens_count_file)
with open(self.tokens_count_file, 'r') as file:
try:
data: Optional[Dict[str, int]] = json.load(file)
data: Dict[str, int] | None = json.load(file)
except JSONDecodeError:
data = {'prompt_tokens': 0, 'completion_tokens': 0, 'total_tokens': 0}
return data
Expand All @@ -75,11 +73,11 @@ def save_model(self, data: Dict[str, Any]):
with open(self.current_model_file, 'w') as file:
json.dump(data, file)

def read_model(self) -> Optional[Dict[str, Any]]:
def read_model(self) -> Dict[str, Any] | None:
self.check_and_create(self.current_model_file)
with open(self.current_model_file, 'r') as file:
try:
data: Optional[Dict[str, Any]] = json.load(file)
data: Dict[str, Any] | None = json.load(file)
except JSONDecodeError:
# TODO: Handle this state, but keep in mind
# that it's completely legal to being a file empty for some (yet unspecified) state
Expand Down
3 changes: 2 additions & 1 deletion plugins/errors/OpenAIException.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from sublime import error_message
from logging import exception

from sublime import error_message


class OpenAIException(Exception):
"""Exception raised for errors in the input.
Expand Down
13 changes: 6 additions & 7 deletions plugins/image_handler.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import logging
import os
import re
from urllib.parse import urlparse

import sublime
import logging

logger = logging.getLogger(__name__)

Expand All @@ -11,9 +12,7 @@ class ImageValidator:
@staticmethod
def get_valid_image_input(text: str) -> str:
"""Check if the input text contains valid image URLs or file paths; return the original string if valid."""
clipboard_content = (
sublime.get_clipboard().strip() if sublime.get_clipboard() else text.strip()
)
clipboard_content = sublime.get_clipboard().strip() if sublime.get_clipboard() else text.strip()

# Split the content by spaces or newlines
potential_images = re.split(r'\n', clipboard_content)
Expand Down Expand Up @@ -41,12 +40,12 @@ def is_valid_url(text: str) -> bool:
result = urlparse(text)
# Ensure the URL scheme is HTTP/HTTPS and it has a valid image extension
return all(
[result.scheme in ('http', 'https'), result.netloc]
[result.scheme in ('http', 'https'), result.netloc] # type: ignore
) and re.match(r'.*\.(jpg|jpeg|png)$', text)
except:
except: # noqa: E722
return False

@staticmethod
def is_local_image(text: str) -> bool:
"""Check if the text is a valid local file path pointing to an image."""
return os.path.isfile(text) and re.match(r'.*\.(jpg|jpeg|png)$', text)
return os.path.isfile(text) and re.match(r'.*\.(jpg|jpeg|png)$', text) # type: ignore
Loading

0 comments on commit 76be79b

Please sign in to comment.