diff --git a/extensions/void/package-lock.json b/extensions/void/package-lock.json index 2f9292773..4ac8becb0 100644 --- a/extensions/void/package-lock.json +++ b/extensions/void/package-lock.json @@ -9,6 +9,7 @@ "version": "0.0.1", "dependencies": { "@anthropic-ai/sdk": "^0.27.1", + "ollama": "^0.5.9", "openai": "^4.57.0" }, "devDependencies": { @@ -32,7 +33,6 @@ "eslint-plugin-react-hooks": "^4.6.2", "globals": "^15.9.0", "marked": "^14.1.0", - "ollama": "^0.5.8", "postcss": "^8.4.41", "react": "^18.3.1", "react-dom": "^18.3.1", @@ -5973,8 +5973,11 @@ "version": "0.5.9", "resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.9.tgz", "integrity": "sha512-F/KZuDRC+ZsVCuMvcOYuQ6zj42/idzCkkuknGyyGVmNStMZ/sU3jQpvhnl4SyC0+zBzLiKNZJnJeuPFuieWZvQ==", +<<<<<<< HEAD +======= "dev": true, "license": "MIT", +>>>>>>> upstream/main "dependencies": { "whatwg-fetch": "^3.6.20" } @@ -8181,9 +8184,13 @@ "node_modules/whatwg-fetch": { "version": "3.6.20", "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", +<<<<<<< HEAD + "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==" +======= "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==", "dev": true, "license": "MIT" +>>>>>>> upstream/main }, "node_modules/whatwg-url": { "version": "5.0.0", diff --git a/extensions/void/package.json b/extensions/void/package.json index 0f9105b2f..fd233e42c 100644 --- a/extensions/void/package.json +++ b/extensions/void/package.json @@ -55,10 +55,15 @@ "default": "", "description": "Greptile - Github PAT (gives Greptile access to your repo)" }, - "void.ollamaSettings": { + "void.ollamaSettings.endpoint": { "type": "string", "default": "", - "description": "Ollama settings (coming soon...)" + "description": "Ollama Endpoint - Local API server can be started with `OLLAMA_ORIGINS=\"vscode-webview://*\" ollama serve`" + }, + "void.ollamaSettings.model": { + "type": "string", + "default": "", + "description": "Ollama model to use" } } }, @@ -153,7 +158,6 @@ "eslint-plugin-react-hooks": "^4.6.2", "globals": "^15.9.0", "marked": "^14.1.0", - "ollama": "^0.5.8", "postcss": "^8.4.41", "react": "^18.3.1", "react-dom": "^18.3.1", @@ -165,6 +169,7 @@ }, "dependencies": { "@anthropic-ai/sdk": "^0.27.1", + "ollama": "^0.5.9", "openai": "^4.57.0" } } diff --git a/extensions/void/src/SidebarWebviewProvider.ts b/extensions/void/src/SidebarWebviewProvider.ts index fe01c8001..d5a699927 100644 --- a/extensions/void/src/SidebarWebviewProvider.ts +++ b/extensions/void/src/SidebarWebviewProvider.ts @@ -14,21 +14,62 @@ function getNonce() { export class SidebarWebviewProvider implements vscode.WebviewViewProvider { public static readonly viewId = 'void.viewnumberone'; - public webview: Promise // used to send messages to the webview + public webview: Promise // used to send messages to the webview, resolved by _res in resolveWebviewView + private _res: (c: vscode.Webview) => void // used to resolve the webview private readonly _extensionUri: vscode.Uri - private _res: (c: vscode.Webview) => void // used to resolve the webview + + private _webviewView?: vscode.WebviewView; // only used inside onDidChangeConfiguration constructor(context: vscode.ExtensionContext) { - // const extensionPath = context.extensionPath // the directory where the extension is installed, might be useful later, not sure for what though... was included in webviewProvider code + // const extensionPath = context.extensionPath // the directory where the extension is installed, might be useful later... was included in webviewProvider code this._extensionUri = context.extensionUri let temp_res: typeof this._res | undefined = undefined this.webview = new Promise((res, rej) => { temp_res = res }) if (!temp_res) throw new Error("sidebar provider: resolver was undefined") this._res = temp_res + + vscode.workspace.onDidChangeConfiguration(event => { + if (event.affectsConfiguration('void.ollamaSettings.endpoint')) { + if (this._webviewView) { + this.updateWebviewHTML(this._webviewView.webview); + } + } + }); } + private updateWebviewHTML(webview: vscode.Webview) { + const allowed_urls = ['https://api.anthropic.com', 'https://api.openai.com', 'https://api.greptile.com']; + const ollamaEndpoint: string | undefined = vscode.workspace.getConfiguration('void').get('ollamaSettings.endpoint'); + if (ollamaEndpoint) + allowed_urls.push(ollamaEndpoint); + + const scriptUri = webview.asWebviewUri(vscode.Uri.joinPath(this._extensionUri, 'dist/sidebar/index.js')); + const stylesUri = webview.asWebviewUri(vscode.Uri.joinPath(this._extensionUri, 'dist/sidebar/styles.css')); + const rootUri = webview.asWebviewUri(vscode.Uri.joinPath(this._extensionUri)); + const nonce = getNonce(); + + const webviewHTML = ` + + + + + Custom View + + + + + +
+ + + `; + + webview.html = webviewHTML; + } + + // called internally by vscode resolveWebviewView( webviewView: vscode.WebviewView, @@ -36,43 +77,17 @@ export class SidebarWebviewProvider implements vscode.WebviewViewProvider { token: vscode.CancellationToken, ) { - const webview = webviewView.webview + const webview = webviewView.webview; webview.options = { enableScripts: true, localResourceRoots: [this._extensionUri] }; - // This allows us to use React in vscode - // when you run `npm run build`, we take the React code in the `sidebar` folder - // and compile it into `dist/sidebar/index.js` and `dist/sidebar/styles.css` - // we render that code here - const rootPath = this._extensionUri; - const scriptUri = webview.asWebviewUri(vscode.Uri.joinPath(rootPath, 'dist/sidebar/index.js')); - const stylesUri = webview.asWebviewUri(vscode.Uri.joinPath(rootPath, 'dist/sidebar/styles.css')); - const rootUri = webview.asWebviewUri(vscode.Uri.joinPath(rootPath)); - - const nonce = getNonce(); // only scripts with the nonce are allowed to run, this is a recommended security measure - - - const allowed_urls = ['https://api.anthropic.com', 'https://api.openai.com', 'https://api.greptile.com'] - webview.html = ` - - - - - Custom View - - - - - -
- - - `; - + this.updateWebviewHTML(webview); + // resolve webview and _webviewView this._res(webview); + this._webviewView = webviewView; } } diff --git a/extensions/void/src/common/sendLLMMessage.ts b/extensions/void/src/common/sendLLMMessage.ts index 9e47a80a1..1d9a9a78a 100644 --- a/extensions/void/src/common/sendLLMMessage.ts +++ b/extensions/void/src/common/sendLLMMessage.ts @@ -1,7 +1,8 @@ import Anthropic from '@anthropic-ai/sdk'; import OpenAI from 'openai'; +import { Ollama } from 'ollama/browser' +import { getVSCodeAPI } from '../sidebar/getVscodeApi'; -// import ollama from 'ollama' export type ApiConfig = { anthropic: { @@ -22,7 +23,8 @@ export type ApiConfig = { } }, ollama: { - // TODO + endpoint: string, + model: string }, whichApi: string } @@ -103,11 +105,13 @@ const sendClaudeMsg: SendLLMMessageFnTypeInternal = ({ messages, onText, onFinal // OpenAI const sendOpenAIMsg: SendLLMMessageFnTypeInternal = ({ messages, onText, onFinalMessage, apiConfig }) => { - let did_abort = false + let didAbort = false let fullText = '' // if abort is called, onFinalMessage is NOT called, and no later onTexts are called either - let abort: () => void = () => { did_abort = true } + let abort: () => void = () => { + didAbort = true; + }; const openai = new OpenAI({ apiKey: apiConfig.openai.apikey, dangerouslyAllowBrowser: true }); @@ -118,13 +122,13 @@ const sendOpenAIMsg: SendLLMMessageFnTypeInternal = ({ messages, onText, onFinal }) .then(async response => { abort = () => { - // response.controller.abort() // this isn't needed now, to keep consistency with claude will leave it commented - did_abort = true; + // response.controller.abort() + didAbort = true; } // when receive text try { for await (const chunk of response) { - if (did_abort) return; + if (didAbort) return; const newText = chunk.choices[0]?.delta?.content || ''; fullText += newText; onText(newText, fullText); @@ -136,8 +140,50 @@ const sendOpenAIMsg: SendLLMMessageFnTypeInternal = ({ messages, onText, onFinal console.error('Error in OpenAI stream:', error); onFinalMessage(fullText); } - // when we get the final message on this stream - onFinalMessage(fullText) + }) + return { abort }; +}; + + + +// Ollama +export const sendOllamaMsg: SendLLMMessageFnTypeInternal = ({ messages, onText, onFinalMessage, apiConfig }) => { + + let didAbort = false + let fullText = "" + + // if abort is called, onFinalMessage is NOT called, and no later onTexts are called either + let abort = () => { + didAbort = true; + }; + + const ollama = new Ollama({ host: apiConfig.ollama.endpoint }) + + ollama.chat({ + model: apiConfig.ollama.model, + messages: messages, + stream: true, + }) + .then(async stream => { + abort = () => { + // ollama.abort() + didAbort = true + } + // iterate through the stream + try { + for await (const chunk of stream) { + if (didAbort) return; + const newText = chunk.message.content; + fullText += newText; + onText(newText, fullText); + } + onFinalMessage(fullText); + } + // when error/fail + catch (error) { + console.error('Error:', error); + onFinalMessage(fullText); + } }) return { abort }; }; @@ -150,11 +196,11 @@ const sendOpenAIMsg: SendLLMMessageFnTypeInternal = ({ messages, onText, onFinal const sendGreptileMsg: SendLLMMessageFnTypeInternal = ({ messages, onText, onFinalMessage, apiConfig }) => { - let did_abort = false + let didAbort = false let fullText = '' // if abort is called, onFinalMessage is NOT called, and no later onTexts are called either - let abort: () => void = () => { did_abort = true } + let abort: () => void = () => { didAbort = true } fetch('https://api.greptile.com/v2/query', { @@ -178,7 +224,7 @@ const sendGreptileMsg: SendLLMMessageFnTypeInternal = ({ messages, onText, onFin }) // TODO make this actually stream, right now it just sends one message at the end .then(async responseArr => { - if (did_abort) + if (didAbort) return for (let response of responseArr) { @@ -213,74 +259,26 @@ const sendGreptileMsg: SendLLMMessageFnTypeInternal = ({ messages, onText, onFin return { abort } - - } + export const sendLLMMessage: SendLLMMessageFnTypeExternal = ({ messages, onText, onFinalMessage, apiConfig }) => { if (!apiConfig) return { abort: () => { } } - const whichApi = apiConfig.whichApi - - if (whichApi === 'anthropic') { - return sendClaudeMsg({ messages, onText, onFinalMessage, apiConfig }) - } - else if (whichApi === 'openai') { - return sendOpenAIMsg({ messages, onText, onFinalMessage, apiConfig }) - } - else if (whichApi === 'greptile') { - return sendGreptileMsg({ messages, onText, onFinalMessage, apiConfig }) - } - else if (whichApi === 'ollama') { - return sendClaudeMsg({ messages, onText, onFinalMessage, apiConfig }) // TODO + switch (apiConfig.whichApi) { + case 'anthropic': + return sendClaudeMsg({ messages, onText, onFinalMessage, apiConfig }); + case 'openai': + return sendOpenAIMsg({ messages, onText, onFinalMessage, apiConfig }); + case 'greptile': + return sendGreptileMsg({ messages, onText, onFinalMessage, apiConfig }); + case 'ollama': + return sendOllamaMsg({ messages, onText, onFinalMessage, apiConfig }); + default: + console.error(`Error: whichApi was ${apiConfig.whichApi}, which is not recognized!`); + return { abort: () => { } } + //return sendClaudeMsg({ messages, onText, onFinalMessage, apiConfig }); // TODO } - else { - console.error(`Error: whichApi was ${whichApi}, which is not recognized!`) - return sendClaudeMsg({ messages, onText, onFinalMessage, apiConfig }) // TODO - } - } - -// Ollama -// const sendOllamaMsg: sendMsgFnType = ({ messages, onText, onFinalMessage }) => { - -// let did_abort = false -// let fullText = '' - -// // if abort is called, onFinalMessage is NOT called, and no later onTexts are called either -// let abort: () => void = () => { -// did_abort = true -// } - -// ollama.chat({ model: 'llama3.1', messages: messages, stream: true }) -// .then(async response => { - -// abort = () => { -// // response.abort() // this isn't needed now, to keep consistency with claude will leave it commented for now -// did_abort = true; -// } - -// // when receive text -// try { -// for await (const part of response) { -// if (did_abort) return -// let newText = part.message.content -// fullText += newText -// onText(newText, fullText) -// } -// } -// // when error/fail -// catch (e) { -// onFinalMessage(fullText) -// return -// } - -// // when we get the final message on this stream -// onFinalMessage(fullText) -// }) - -// return { abort }; -// }; - diff --git a/extensions/void/src/extension.ts b/extensions/void/src/extension.ts index 2d7e85a1f..a06881d4d 100644 --- a/extensions/void/src/extension.ts +++ b/extensions/void/src/extension.ts @@ -29,13 +29,15 @@ const getApiConfig = () => { } }, ollama: { - // apikey: vscode.workspace.getConfiguration('void').get('ollamaSettings') ?? '', + endpoint: vscode.workspace.getConfiguration('void').get('ollamaSettings.endpoint') ?? '', + model: vscode.workspace.getConfiguration('void').get('ollamaSettings.model') ?? '', }, whichApi: vscode.workspace.getConfiguration('void').get('whichApi') ?? '' } return apiConfig } + export function activate(context: vscode.ExtensionContext) { // 1. Mount the chat sidebar @@ -112,7 +114,8 @@ export function activate(context: vscode.ExtensionContext) { // send contents to webview webview.postMessage({ type: 'files', files, } satisfies WebviewMessage) - } else if (m.type === 'applyCode') { + } + else if (m.type === 'applyCode') { const editor = vscode.window.activeTextEditor if (!editor) { @@ -132,7 +135,6 @@ export function activate(context: vscode.ExtensionContext) { } else { - console.error('unrecognized command', m.type, m) } }) diff --git a/extensions/void/src/sidebar/Sidebar.tsx b/extensions/void/src/sidebar/Sidebar.tsx index 4cba1ccb3..539b06be9 100644 --- a/extensions/void/src/sidebar/Sidebar.tsx +++ b/extensions/void/src/sidebar/Sidebar.tsx @@ -266,7 +266,7 @@ const Sidebar = () => { {!selection?.selectionStr ? null : (
-
- )} + )}
void)[] } = { "requestFiles": [], "files": [], "apiConfig": [], - "getApiConfig": [] + "getApiConfig": [], } // use this function to await responses