From 0084befac392fce328a739b8afb584c5bbf7ccd0 Mon Sep 17 00:00:00 2001 From: Olasunkanmi Oyinlola Date: Tue, 28 Jan 2025 09:13:03 +0800 Subject: [PATCH] Improve type safety and reusability in BaseEmitter Introduce LocalStorageManager for secure data storage --- src/agents/orchestrator.ts | 2 +- src/commands/event-generator.ts | 82 ++++++++++---- src/emitter/agent-emitter.ts | 6 +- src/emitter/emitter.ts | 22 +++- src/extension.ts | 114 ++++++++++++++++---- src/infrastructure/storage/local-storage.ts | 29 +++++ src/memory/base.ts | 11 +- src/providers/anthropic.ts | 47 ++++++-- src/providers/gemini.ts | 35 ++++-- src/providers/groq.ts | 35 ++++-- src/services/generative-ai-model-manager.ts | 3 + src/utils/utils.ts | 19 +++- 12 files changed, 321 insertions(+), 84 deletions(-) create mode 100644 src/infrastructure/storage/local-storage.ts diff --git a/src/agents/orchestrator.ts b/src/agents/orchestrator.ts index 8b16714..3ac8f1d 100644 --- a/src/agents/orchestrator.ts +++ b/src/agents/orchestrator.ts @@ -7,7 +7,7 @@ export class Orchestrator implements vscode.Disposable { constructor(private readonly aiAgent: BaseAiAgent) { this.disposables.push( - this.aiAgent.onStatus(this.handleStatus.bind(this)), + this.aiAgent.onStatusChange(this.handleStatus.bind(this)), this.aiAgent.onError(this.handleError.bind(this)), ); } diff --git a/src/commands/event-generator.ts b/src/commands/event-generator.ts index b0b57b9..1de1ace 100644 --- a/src/commands/event-generator.ts +++ b/src/commands/event-generator.ts @@ -3,7 +3,11 @@ import Anthropic from "@anthropic-ai/sdk"; import { GenerativeModel, GoogleGenerativeAI } from "@google/generative-ai"; import Groq from "groq-sdk"; import * as vscode from "vscode"; -import { APP_CONFIG, COMMON, generativeAiModels } from "../application/constant"; +import { + APP_CONFIG, + COMMON, + generativeAiModels, +} from "../application/constant"; import { AnthropicWebViewProvider } from "../providers/anthropic"; import { GeminiWebViewProvider } from "../providers/gemini"; import { GroqWebViewProvider } from "../providers/groq"; @@ -38,7 +42,7 @@ export abstract class EventGenerator implements IEventGenerator { constructor( private readonly action: string, _context: vscode.ExtensionContext, - errorMessage?: string + errorMessage?: string, ) { this.context = _context; this.error = errorMessage; @@ -68,13 +72,15 @@ export abstract class EventGenerator implements IEventGenerator { return getConfigValue(configKey); } - protected createModel(): { generativeAi: string; model: any; modelName: string } | undefined { + protected createModel(): + | { generativeAi: string; model: any; modelName: string } + | undefined { try { let model; let modelName = ""; if (!this.generativeAi) { vscodeErrorMessage( - "Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name" + "Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name", ); } if (this.generativeAi === generativeAiModels.GROQ) { @@ -82,7 +88,7 @@ export abstract class EventGenerator implements IEventGenerator { modelName = this.groqModel; if (!apiKey || !modelName) { vscodeErrorMessage( - "Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name" + "Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name", ); } model = this.createGroqModel(apiKey); @@ -108,7 +114,9 @@ export abstract class EventGenerator implements IEventGenerator { return { generativeAi: this.generativeAi, model, modelName }; } catch (error) { console.error("Error creating model:", error); - vscode.window.showErrorMessage("An error occurred while creating the model. Please try again."); + vscode.window.showErrorMessage( + "An error occurred while creating the model. Please try again.", + ); } } @@ -145,7 +153,9 @@ export abstract class EventGenerator implements IEventGenerator { return new Groq({ apiKey }); } - protected async generateModelResponse(text: string): Promise { + protected async generateModelResponse( + text: string, + ): Promise { try { const activeModel = this.createModel(); if (!activeModel) { @@ -182,7 +192,7 @@ export abstract class EventGenerator implements IEventGenerator { if (!response) { throw new Error( - "Could not generate response. Check your settings, ensure the API keys and Model Name is added properly." + "Could not generate response. Check your settings, ensure the API keys and Model Name is added properly.", ); } if (this.action.includes("chart")) { @@ -191,7 +201,9 @@ export abstract class EventGenerator implements IEventGenerator { return response; } catch (error) { console.error("Error generating response:", error); - vscode.window.showErrorMessage("An error occurred while generating the response. Please try again."); + vscode.window.showErrorMessage( + "An error occurred while generating the response. Please try again.", + ); } } @@ -202,12 +214,19 @@ export abstract class EventGenerator implements IEventGenerator { return inputString; } - async generateGeminiResponse(model: any, text: string): Promise { + async generateGeminiResponse( + model: any, + text: string, + ): Promise { const result = await model.generateContent(text); return result ? await result.response.text() : undefined; } - private async anthropicResponse(model: Anthropic, generativeAiModel: string, userPrompt: string) { + private async anthropicResponse( + model: Anthropic, + generativeAiModel: string, + userPrompt: string, + ) { try { const response = await model.messages.create({ model: generativeAiModel, @@ -218,14 +237,22 @@ export abstract class EventGenerator implements IEventGenerator { return response.content[0].text; } catch (error) { console.error("Error generating response:", error); - vscode.window.showErrorMessage("An error occurred while generating the response. Please try again."); + vscode.window.showErrorMessage( + "An error occurred while generating the response. Please try again.", + ); return; } } - private async groqResponse(model: Groq, prompt: string, generativeAiModel: string): Promise { + private async groqResponse( + model: Groq, + prompt: string, + generativeAiModel: string, + ): Promise { try { - const chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) ? Memory.get(COMMON.GROQ_CHAT_HISTORY) : []; + const chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) + ? Memory.get(COMMON.GROQ_CHAT_HISTORY) + : []; const params = { messages: [ ...chatHistory, @@ -237,11 +264,14 @@ export abstract class EventGenerator implements IEventGenerator { model: generativeAiModel, }; - const completion: Groq.Chat.ChatCompletion = await model.chat.completions.create(params); + const completion: Groq.Chat.ChatCompletion = + await model.chat.completions.create(params); return completion.choices[0]?.message?.content ?? undefined; } catch (error) { console.error("Error generating response:", error); - vscode.window.showErrorMessage("An error occurred while generating the response. Please try again."); + vscode.window.showErrorMessage( + "An error occurred while generating the response. Please try again.", + ); return; } } @@ -250,7 +280,9 @@ export abstract class EventGenerator implements IEventGenerator { abstract createPrompt(text?: string): any; - async generateResponse(message?: string): Promise { + async generateResponse( + message?: string, + ): Promise { this.showInformationMessage(); let prompt; const selectedCode = this.getSelectedWindowArea(); @@ -262,7 +294,9 @@ export abstract class EventGenerator implements IEventGenerator { if (message && selectedCode) { prompt = await this.createPrompt(`${message} \n ${selectedCode}`); } else { - message ? (prompt = await this.createPrompt(message)) : (prompt = await this.createPrompt(selectedCode)); + message + ? (prompt = await this.createPrompt(message)) + : (prompt = await this.createPrompt(selectedCode)); } if (!prompt) { @@ -345,19 +379,25 @@ export abstract class EventGenerator implements IEventGenerator { placeHolder: "Enter instructions for CodeBuddy", ignoreFocusOut: true, validateInput: (text) => { - return text === "" ? "Enter instructions for CodeBuddy or press Escape to close chat box" : null; + return text === "" + ? "Enter instructions for CodeBuddy or press Escape to close chat box" + : null; }, }); return userPrompt; } catch (error) { - vscode.window.showInformationMessage(`Error occured while getting user prompt`); + vscode.window.showInformationMessage( + `Error occured while getting user prompt`, + ); console.log(error); } } async execute(message?: string): Promise { let prompt: string | undefined; - const response = (await this.generateResponse(prompt ? prompt : message)) as string; + const response = (await this.generateResponse( + prompt ? prompt : message, + )) as string; if (!response) { vscode.window.showErrorMessage("model not reponding, try again later"); return; diff --git a/src/emitter/agent-emitter.ts b/src/emitter/agent-emitter.ts index 55a4562..542ccd9 100644 --- a/src/emitter/agent-emitter.ts +++ b/src/emitter/agent-emitter.ts @@ -8,7 +8,7 @@ import { import * as vscode from "vscode"; export class AgentEventEmitter extends BaseEmitter { - onStatus: vscode.Event = this.createEvent("onStatus"); + onStatusChange: vscode.Event = this.createEvent("onStatus"); onError: vscode.Event = this.createEvent("onError"); public emitError(message: string, code: string) { @@ -28,4 +28,8 @@ export class AgentEventEmitter extends BaseEmitter { timestamp: Date.now(), }); } + + public dispose(): void { + super.dispose(); + } } diff --git a/src/emitter/emitter.ts b/src/emitter/emitter.ts index 9b1eba4..bcb97d2 100644 --- a/src/emitter/emitter.ts +++ b/src/emitter/emitter.ts @@ -1,6 +1,6 @@ import * as vscode from "vscode"; import { Logger } from "../infrastructure/logger/logger"; -export class BaseEmitter { +export class BaseEmitter> { protected logger: Logger; constructor() { this.logger = new Logger("BaseEmitter"); @@ -8,12 +8,20 @@ export class BaseEmitter { private readonly emitters: Map> = new Map(); + /** + * Creates a new event for the given event name, reusing an existing emitter if one is already registered. + * @param name The name of the event to create. + * @returns The event that was created or retrieved. + */ protected createEvent( name: K, ): vscode.Event { try { - const emitter = new vscode.EventEmitter(); - this.emitters.set(name, emitter); + let emitter = this.emitters.get(name); + if (!emitter) { + emitter = new vscode.EventEmitter(); + this.emitters.set(name, emitter); + } return emitter.event; } catch (error) { this.logger.error("Error generating embeddings", error); @@ -21,6 +29,11 @@ export class BaseEmitter { } } + /** + * Emits the given event with the provided data, if an emitter exists for the event name. + * @param name The name of the event to emit. + * @param data The data to emit with the event. + */ protected emit(name: K, data: EventMap[K]): void { try { const emitter = this.emitters.get(name); @@ -31,6 +44,9 @@ export class BaseEmitter { } } + /** + * Disposes of all stored event emitters, freeing up any system resources they were using. + */ public dispose(): void { this.emitters.forEach((emitter) => emitter.dispose()); } diff --git a/src/extension.ts b/src/extension.ts index a40fd7f..7ccce97 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -1,5 +1,10 @@ import * as vscode from "vscode"; -import { APP_CONFIG, generativeAiModels, OLA_ACTIONS, USER_MESSAGE } from "./application/constant"; +import { + APP_CONFIG, + generativeAiModels, + OLA_ACTIONS, + USER_MESSAGE, +} from "./application/constant"; import { Comments } from "./commands/comment"; import { ExplainCode } from "./commands/explain"; import { FixError } from "./commands/fixError"; @@ -22,14 +27,28 @@ import { FileUploader } from "./services/file-uploader"; import { setUpGenerativeAiModel } from "./services/generative-ai-model-manager"; import { getConfigValue } from "./utils/utils"; import { Memory } from "./memory/base"; +import { AgentEventEmitter } from "./emitter/agent-emitter"; -const { geminiKey, geminiModel, groqApiKey, groqModel, anthropicApiKey, anthropicModel, grokApiKey, grokModel } = - APP_CONFIG; +const { + geminiKey, + geminiModel, + groqApiKey, + groqModel, + anthropicApiKey, + anthropicModel, + grokApiKey, + grokModel, +} = APP_CONFIG; const connectDB = async () => { - await dbManager.connect("file:/Users/olasunkanmi/Documents/Github/codebuddy/patterns/dev.db"); + await dbManager.connect( + "file:/Users/olasunkanmi/Documents/Github/codebuddy/patterns/dev.db", + ); }; +let quickFixCodeAction: vscode.Disposable; +let agentEventEmmitter: AgentEventEmitter; + export async function activate(context: vscode.ExtensionContext) { try { Memory.getInstance(); @@ -65,19 +84,52 @@ export async function activate(context: vscode.ExtensionContext) { generateCodeChart, inlineChat, } = OLA_ACTIONS; - const getComment = new Comments(`${USER_MESSAGE} generates the code comments...`, context); - const getInLineChat = new InLineChat(`${USER_MESSAGE} generates a response...`, context); - const generateOptimizeCode = new OptimizeCode(`${USER_MESSAGE} optimizes the code...`, context); - const generateRefactoredCode = new RefactorCode(`${USER_MESSAGE} refactors the code...`, context); - const explainCode = new ExplainCode(`${USER_MESSAGE} explains the code...`, context); - const generateReview = new ReviewCode(`${USER_MESSAGE} reviews the code...`, context); - const codeChartGenerator = new CodeChartGenerator(`${USER_MESSAGE} creates the code chart...`, context); + const getComment = new Comments( + `${USER_MESSAGE} generates the code comments...`, + context, + ); + const getInLineChat = new InLineChat( + `${USER_MESSAGE} generates a response...`, + context, + ); + const generateOptimizeCode = new OptimizeCode( + `${USER_MESSAGE} optimizes the code...`, + context, + ); + const generateRefactoredCode = new RefactorCode( + `${USER_MESSAGE} refactors the code...`, + context, + ); + const explainCode = new ExplainCode( + `${USER_MESSAGE} explains the code...`, + context, + ); + const generateReview = new ReviewCode( + `${USER_MESSAGE} reviews the code...`, + context, + ); + const codeChartGenerator = new CodeChartGenerator( + `${USER_MESSAGE} creates the code chart...`, + context, + ); const codePattern = fileUpload; - const knowledgeBase = new ReadFromKnowledgeBase(`${USER_MESSAGE} generate your code pattern...`, context); - const generateCommitMessage = new GenerateCommitMessage(`${USER_MESSAGE} generates a commit message...`, context); - const generateInterviewQuestions = new InterviewMe(`${USER_MESSAGE} generates interview questions...`, context); + const knowledgeBase = new ReadFromKnowledgeBase( + `${USER_MESSAGE} generate your code pattern...`, + context, + ); + const generateCommitMessage = new GenerateCommitMessage( + `${USER_MESSAGE} generates a commit message...`, + context, + ); + const generateInterviewQuestions = new InterviewMe( + `${USER_MESSAGE} generates interview questions...`, + context, + ); - const generateUnitTests = new GenerateUnitTest(`${USER_MESSAGE} generates unit tests...`, context); + const generateUnitTests = new GenerateUnitTest( + `${USER_MESSAGE} generates unit tests...`, + context, + ); const actionMap = { [comment]: () => getComment.execute(), @@ -87,7 +139,11 @@ export async function activate(context: vscode.ExtensionContext) { [interviewMe]: () => generateInterviewQuestions.execute(), [generateUnitTest]: () => generateUnitTests.execute(), [fix]: (errorMessage: string) => - new FixError(`${USER_MESSAGE} finds a solution to the error...`, context, errorMessage).execute(errorMessage), + new FixError( + `${USER_MESSAGE} finds a solution to the error...`, + context, + errorMessage, + ).execute(errorMessage), [explain]: () => explainCode.execute(), [pattern]: () => codePattern.uploadFileHandler(), [knowledge]: () => knowledgeBase.execute(), @@ -96,18 +152,20 @@ export async function activate(context: vscode.ExtensionContext) { [inlineChat]: () => getInLineChat.execute(), }; - const subscriptions: vscode.Disposable[] = Object.entries(actionMap).map(([action, handler]) => - vscode.commands.registerCommand(action, handler) + const subscriptions: vscode.Disposable[] = Object.entries(actionMap).map( + ([action, handler]) => vscode.commands.registerCommand(action, handler), ); const selectedGenerativeAiModel = getConfigValue("generativeAi.option"); const quickFix = new CodeActionsProvider(); - const quickFixCodeAction: vscode.Disposable = vscode.languages.registerCodeActionsProvider( + quickFixCodeAction = vscode.languages.registerCodeActionsProvider( { scheme: "file", language: "*" }, - quickFix + quickFix, ); + agentEventEmmitter = new AgentEventEmitter(); + const modelConfigurations: { [key: string]: { key: string; @@ -139,15 +197,27 @@ export async function activate(context: vscode.ExtensionContext) { if (selectedGenerativeAiModel in modelConfigurations) { const modelConfig = modelConfigurations[selectedGenerativeAiModel]; const { key, model, webviewProviderClass } = modelConfig; - setUpGenerativeAiModel(context, model, key, webviewProviderClass, subscriptions, quickFixCodeAction); + setUpGenerativeAiModel( + context, + model, + key, + webviewProviderClass, + subscriptions, + quickFixCodeAction, + agentEventEmmitter, + ); } } catch (error) { Memory.clear(); - vscode.window.showErrorMessage("An Error occured while setting up generative AI model"); + vscode.window.showErrorMessage( + "An Error occured while setting up generative AI model", + ); console.log(error); } } export function deactivate(context: vscode.ExtensionContext) { + quickFixCodeAction.dispose(); + agentEventEmmitter.dispose(); context.subscriptions.forEach((subscription) => subscription.dispose()); } diff --git a/src/infrastructure/storage/local-storage.ts b/src/infrastructure/storage/local-storage.ts new file mode 100644 index 0000000..786a658 --- /dev/null +++ b/src/infrastructure/storage/local-storage.ts @@ -0,0 +1,29 @@ +import * as vscode from "vscode"; +import { Logger } from "../logger/logger"; + +export class LocalStorageManager { + private readonly localStorage: vscode.SecretStorage; + private readonly logger: Logger; + constructor(context: vscode.ExtensionContext) { + this.localStorage = context.secrets; + this.logger = new Logger("localStorageManager"); + this.localStorage.onDidChange(this.handleOnChange.bind(this)); + } + + async add(key: string, value: string): Promise { + await this.localStorage.store(key, value); + } + + async get(key: string): Promise { + return await this.localStorage.get(key); + } + + async delete(key: string) { + await this.localStorage.delete(key); + } + + async handleOnChange(event: vscode.SecretStorageChangeEvent) { + const value = await this.localStorage.get(event.key); + this.logger.info(`Key: ${event.key}, Value: ${value}`); + } +} diff --git a/src/memory/base.ts b/src/memory/base.ts index e359411..63b8fba 100644 --- a/src/memory/base.ts +++ b/src/memory/base.ts @@ -1,12 +1,7 @@ import { MEMORY_CACHE_OPTIONS } from "../application/constant"; -interface ICacheEntry { - value: any; - expiry: number; -} - export class Memory { - private static bank: Map; + private static bank: Map; private static instance: Memory; constructor() { @@ -20,7 +15,7 @@ export class Memory { return Memory.instance; } - static set(key: string, value: any): Map { + static set(key: string, value: any): Map { const expiry = Date.now() + MEMORY_CACHE_OPTIONS.sessionTTL; return Memory.bank.set(key, { value, expiry }); } @@ -45,7 +40,7 @@ export class Memory { return Array.from(Memory.bank.keys()); } - static values(): ICacheEntry[] { + static values(): any[] { return Array.from(Memory.bank.values()); } diff --git a/src/providers/anthropic.ts b/src/providers/anthropic.ts index 0a2f372..c5a3dc0 100644 --- a/src/providers/anthropic.ts +++ b/src/providers/anthropic.ts @@ -1,8 +1,16 @@ import * as vscode from "vscode"; import { BaseWebViewProvider } from "./base"; -import { COMMON, generativeAiModels, GROQ_CONFIG } from "../application/constant"; +import { + COMMON, + generativeAiModels, + GROQ_CONFIG, +} from "../application/constant"; import Anthropic from "@anthropic-ai/sdk"; -import { createAnthropicClient, getGenerativeAiModel, getXGroKBaseURL } from "../utils/utils"; +import { + createAnthropicClient, + getGenerativeAiModel, + getXGroKBaseURL, +} from "../utils/utils"; import { Memory } from "../memory/base"; type Role = "user" | "assistant"; @@ -18,12 +26,15 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider { apiKey: string, generativeAiModel: string, context: vscode.ExtensionContext, - protected baseUrl?: string + protected baseUrl?: string, ) { super(extensionUri, apiKey, generativeAiModel, context); } - public async sendResponse(response: string, currentChat: string): Promise { + public async sendResponse( + response: string, + currentChat: string, + ): Promise { try { const type = currentChat === "bot" ? "bot-response" : "user-input"; if (currentChat === "bot") { @@ -39,8 +50,13 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider { } if (this.chatHistory.length === 2) { - const chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) ? Memory.get(COMMON.ANTHROPIC_CHAT_HISTORY) : []; - Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, [...chatHistory, ...this.chatHistory]); + const chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) + ? Memory.get(COMMON.ANTHROPIC_CHAT_HISTORY) + : []; + Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, [ + ...chatHistory, + ...this.chatHistory, + ]); } return await this.currentWebView?.webview.postMessage({ type, @@ -51,14 +67,23 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider { } } - async generateResponse(message: string, apiKey?: string, name?: string): Promise { + async generateResponse( + message: string, + apiKey?: string, + name?: string, + ): Promise { try { const { max_tokens } = GROQ_CONFIG; if (getGenerativeAiModel() === generativeAiModels.GROK) { this.baseUrl = getXGroKBaseURL(); } - const anthropic: Anthropic = createAnthropicClient(this.apiKey, this.baseUrl); - let chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) ? Memory.get(COMMON.ANTHROPIC_CHAT_HISTORY) : []; + const anthropic: Anthropic = createAnthropicClient( + this.apiKey, + this.baseUrl, + ); + let chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) + ? Memory.get(COMMON.ANTHROPIC_CHAT_HISTORY) + : []; if (chatHistory?.length) { chatHistory = [...chatHistory, { role: "user", content: message }]; @@ -83,7 +108,9 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider { } catch (error) { console.error(error); Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, []); - vscode.window.showErrorMessage("Model not responding, please resend your question"); + vscode.window.showErrorMessage( + "Model not responding, please resend your question", + ); } } } diff --git a/src/providers/gemini.ts b/src/providers/gemini.ts index 6f2d336..bc8e44a 100644 --- a/src/providers/gemini.ts +++ b/src/providers/gemini.ts @@ -12,11 +12,19 @@ export interface IHistory { export class GeminiWebViewProvider extends BaseWebViewProvider { chatHistory: IHistory[] = []; - constructor(extensionUri: vscode.Uri, apiKey: string, generativeAiModel: string, context: vscode.ExtensionContext) { + constructor( + extensionUri: vscode.Uri, + apiKey: string, + generativeAiModel: string, + context: vscode.ExtensionContext, + ) { super(extensionUri, apiKey, generativeAiModel, context); } - async sendResponse(response: string, currentChat: string): Promise { + async sendResponse( + response: string, + currentChat: string, + ): Promise { try { const type = currentChat === "bot" ? "bot-response" : "user-input"; if (currentChat === "bot") { @@ -31,8 +39,13 @@ export class GeminiWebViewProvider extends BaseWebViewProvider { }); } if (this.chatHistory.length === 2) { - const chatHistory = Memory.has(COMMON.GEMINI_CHAT_HISTORY) ? Memory.get(COMMON.GEMINI_CHAT_HISTORY) : []; - Memory.set(COMMON.GEMINI_CHAT_HISTORY, [...chatHistory, ...this.chatHistory]); + const chatHistory = Memory.has(COMMON.GEMINI_CHAT_HISTORY) + ? Memory.get(COMMON.GEMINI_CHAT_HISTORY) + : []; + Memory.set(COMMON.GEMINI_CHAT_HISTORY, [ + ...chatHistory, + ...this.chatHistory, + ]); } return await this.currentWebView?.webview.postMessage({ type, @@ -44,11 +57,17 @@ export class GeminiWebViewProvider extends BaseWebViewProvider { } } - async generateResponse(apiKey: string, name: string, message: string): Promise { + async generateResponse( + apiKey: string, + name: string, + message: string, + ): Promise { try { const genAi = new GoogleGenerativeAI(apiKey); const model = genAi.getGenerativeModel({ model: name }); - let chatHistory = Memory.has(COMMON.GEMINI_CHAT_HISTORY) ? Memory.get(COMMON.GEMINI_CHAT_HISTORY) : []; + let chatHistory = Memory.has(COMMON.GEMINI_CHAT_HISTORY) + ? Memory.get(COMMON.GEMINI_CHAT_HISTORY) + : []; if (chatHistory?.length) { chatHistory = [ @@ -89,7 +108,9 @@ export class GeminiWebViewProvider extends BaseWebViewProvider { return response.text(); } catch (error) { Memory.set(COMMON.GEMINI_CHAT_HISTORY, []); - vscode.window.showErrorMessage("Model not responding, please resend your question"); + vscode.window.showErrorMessage( + "Model not responding, please resend your question", + ); console.error(error); return; } diff --git a/src/providers/groq.ts b/src/providers/groq.ts index 06e2033..df1b9da 100644 --- a/src/providers/groq.ts +++ b/src/providers/groq.ts @@ -12,11 +12,19 @@ export interface IHistory { export class GroqWebViewProvider extends BaseWebViewProvider { chatHistory: IHistory[] = []; - constructor(extensionUri: vscode.Uri, apiKey: string, generativeAiModel: string, context: vscode.ExtensionContext) { + constructor( + extensionUri: vscode.Uri, + apiKey: string, + generativeAiModel: string, + context: vscode.ExtensionContext, + ) { super(extensionUri, apiKey, generativeAiModel, context); } - public async sendResponse(response: string, currentChat: string): Promise { + public async sendResponse( + response: string, + currentChat: string, + ): Promise { try { const type = currentChat === "bot" ? "bot-response" : "user-input"; if (currentChat === "bot") { @@ -31,8 +39,13 @@ export class GroqWebViewProvider extends BaseWebViewProvider { }); } if (this.chatHistory.length === 2) { - const chatHistory = Memory.has(COMMON.GROQ_CHAT_HISTORY) ? Memory.get(COMMON.GROQ_CHAT_HISTORY) : []; - Memory.set(COMMON.GROQ_CHAT_HISTORY, [...chatHistory, ...this.chatHistory]); + const chatHistory = Memory.has(COMMON.GROQ_CHAT_HISTORY) + ? Memory.get(COMMON.GROQ_CHAT_HISTORY) + : []; + Memory.set(COMMON.GROQ_CHAT_HISTORY, [ + ...chatHistory, + ...this.chatHistory, + ]); } // Once the agent task is done, map the memory into the llm brain. // Send the final answer to the webview here. @@ -45,14 +58,20 @@ export class GroqWebViewProvider extends BaseWebViewProvider { } } - async generateResponse(message: string, apiKey?: string, name?: string): Promise { + async generateResponse( + message: string, + apiKey?: string, + name?: string, + ): Promise { try { const { temperature, max_tokens, top_p, stop } = GROQ_CONFIG; const groq = new Groq({ apiKey: this.apiKey, }); - let chatHistory = Memory.has(COMMON.GROQ_CHAT_HISTORY) ? Memory.get(COMMON.GROQ_CHAT_HISTORY) : []; + let chatHistory = Memory.has(COMMON.GROQ_CHAT_HISTORY) + ? Memory.get(COMMON.GROQ_CHAT_HISTORY) + : []; if (chatHistory?.length) { chatHistory = [...chatHistory, { role: "user", content: message }]; @@ -80,7 +99,9 @@ export class GroqWebViewProvider extends BaseWebViewProvider { } catch (error) { console.error(error); Memory.set(COMMON.GROQ_CHAT_HISTORY, []); - vscode.window.showErrorMessage("Model not responding, please resend your question"); + vscode.window.showErrorMessage( + "Model not responding, please resend your question", + ); return; } } diff --git a/src/services/generative-ai-model-manager.ts b/src/services/generative-ai-model-manager.ts index d05ea29..68bf3f7 100644 --- a/src/services/generative-ai-model-manager.ts +++ b/src/services/generative-ai-model-manager.ts @@ -1,6 +1,7 @@ import * as vscode from "vscode"; import { getConfigValue } from "../utils/utils"; import { ChatManager } from "./chat-manager"; +import { AgentEventEmitter } from "../emitter/agent-emitter"; export const setUpGenerativeAiModel = ( context: vscode.ExtensionContext, @@ -9,6 +10,7 @@ export const setUpGenerativeAiModel = ( webViewProviderClass: any, subscriptions: vscode.Disposable[], quickFixCodeAction: vscode.Disposable, + agentEventEmmitter: AgentEventEmitter, ) => { try { const apiKey = getConfigValue(key); @@ -34,6 +36,7 @@ export const setUpGenerativeAiModel = ( quickFixCodeAction, registerWebViewProvider, chatWithCodeBuddy, + agentEventEmmitter, ); } catch (error) { vscode.window.showErrorMessage( diff --git a/src/utils/utils.ts b/src/utils/utils.ts index 8da246e..4d94741 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -1,6 +1,10 @@ import * as markdownit from "markdown-it"; import * as vscode from "vscode"; -import { APP_CONFIG, COMMON, generativeAiModels } from "../application/constant"; +import { + APP_CONFIG, + COMMON, + generativeAiModels, +} from "../application/constant"; import Anthropic from "@anthropic-ai/sdk"; import { Memory } from "../memory/base"; @@ -14,7 +18,9 @@ export const formatText = (text?: string): string => { return ""; }; -export const getConfigValue: GetConfigValueType = (key: string): T | undefined => { +export const getConfigValue: GetConfigValueType = ( + key: string, +): T | undefined => { return vscode.workspace.getConfiguration().get(key); }; @@ -66,7 +72,11 @@ export const getGenerativeAiModel = (): string | undefined => { return getConfigValue("generativeAi.option"); }; -export function getUri(webview: vscode.Webview, extensionUri: vscode.Uri, pathList: string[]) { +export function getUri( + webview: vscode.Webview, + extensionUri: vscode.Uri, + pathList: string[], +) { return webview.asWebviewUri(vscode.Uri.joinPath(extensionUri, ...pathList)); } @@ -75,7 +85,8 @@ export function getUri(webview: vscode.Webview, extensionUri: vscode.Uri, pathLi // and ensure script integrity when using Content Security Policy (CSP) export const getNonce = () => { let text = ""; - const possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + const possible = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; for (let i = 0; i < 32; i++) { text += possible.charAt(Math.floor(Math.random() * possible.length)); }