diff --git a/src/agents/base.ts b/src/agents/base.ts index 1ba6281..509057e 100644 --- a/src/agents/base.ts +++ b/src/agents/base.ts @@ -1,7 +1,10 @@ import * as vscode from "vscode"; import { AgentEventEmitter } from "../emitter/agent-emitter"; -export class BaseAiAgent extends AgentEventEmitter implements vscode.Disposable { +export class BaseAiAgent + extends AgentEventEmitter + implements vscode.Disposable +{ constructor() { super(); } @@ -10,7 +13,10 @@ export class BaseAiAgent extends AgentEventEmitter implements vscode.Disposable try { this.emitStatus("processing", input); } catch (error) { - this.emitError(error instanceof Error ? error.message : "Unknown Error", "process failed"); + this.emitError( + error instanceof Error ? error.message : "Unknown Error", + "process failed", + ); } finally { this.emitStatus("completed", "Processing complete"); } diff --git a/src/agents/orchestrator.ts b/src/agents/orchestrator.ts index 3d6dd20..8b16714 100644 --- a/src/agents/orchestrator.ts +++ b/src/agents/orchestrator.ts @@ -8,7 +8,7 @@ export class Orchestrator implements vscode.Disposable { constructor(private readonly aiAgent: BaseAiAgent) { this.disposables.push( this.aiAgent.onStatus(this.handleStatus.bind(this)), - this.aiAgent.onError(this.handleError.bind(this)) + this.aiAgent.onError(this.handleError.bind(this)), ); } diff --git a/src/commands/event-generator.ts b/src/commands/event-generator.ts index 4edb28a..b0b57b9 100644 --- a/src/commands/event-generator.ts +++ b/src/commands/event-generator.ts @@ -3,15 +3,10 @@ import Anthropic from "@anthropic-ai/sdk"; import { GenerativeModel, GoogleGenerativeAI } from "@google/generative-ai"; import Groq from "groq-sdk"; import * as vscode from "vscode"; -import { - APP_CONFIG, - COMMON, - generativeAiModels, -} from "../application/constant"; +import { APP_CONFIG, COMMON, generativeAiModels } from "../application/constant"; import { AnthropicWebViewProvider } from "../providers/anthropic"; import { GeminiWebViewProvider } from "../providers/gemini"; import { GroqWebViewProvider } from "../providers/groq"; -import { Brain } from "../services/brain"; import { createAnthropicClient, getConfigValue, @@ -19,6 +14,7 @@ import { getXGroKBaseURL, vscodeErrorMessage, } from "../utils/utils"; +import { Memory } from "../memory/base"; interface IEventGenerator { getApplicationConfig(configKey: string): string | undefined; @@ -42,7 +38,7 @@ export abstract class EventGenerator implements IEventGenerator { constructor( private readonly action: string, _context: vscode.ExtensionContext, - errorMessage?: string, + errorMessage?: string ) { this.context = _context; this.error = errorMessage; @@ -72,15 +68,13 @@ export abstract class EventGenerator implements IEventGenerator { return getConfigValue(configKey); } - protected createModel(): - | { generativeAi: string; model: any; modelName: string } - | undefined { + protected createModel(): { generativeAi: string; model: any; modelName: string } | undefined { try { let model; let modelName = ""; if (!this.generativeAi) { vscodeErrorMessage( - "Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name", + "Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name" ); } if (this.generativeAi === generativeAiModels.GROQ) { @@ -88,7 +82,7 @@ export abstract class EventGenerator implements IEventGenerator { modelName = this.groqModel; if (!apiKey || !modelName) { vscodeErrorMessage( - "Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name", + "Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name" ); } model = this.createGroqModel(apiKey); @@ -114,9 +108,7 @@ export abstract class EventGenerator implements IEventGenerator { return { generativeAi: this.generativeAi, model, modelName }; } catch (error) { console.error("Error creating model:", error); - vscode.window.showErrorMessage( - "An error occurred while creating the model. Please try again.", - ); + vscode.window.showErrorMessage("An error occurred while creating the model. Please try again."); } } @@ -153,9 +145,7 @@ export abstract class EventGenerator implements IEventGenerator { return new Groq({ apiKey }); } - protected async generateModelResponse( - text: string, - ): Promise { + protected async generateModelResponse(text: string): Promise { try { const activeModel = this.createModel(); if (!activeModel) { @@ -192,7 +182,7 @@ export abstract class EventGenerator implements IEventGenerator { if (!response) { throw new Error( - "Could not generate response. Check your settings, ensure the API keys and Model Name is added properly.", + "Could not generate response. Check your settings, ensure the API keys and Model Name is added properly." ); } if (this.action.includes("chart")) { @@ -201,9 +191,7 @@ export abstract class EventGenerator implements IEventGenerator { return response; } catch (error) { console.error("Error generating response:", error); - vscode.window.showErrorMessage( - "An error occurred while generating the response. Please try again.", - ); + vscode.window.showErrorMessage("An error occurred while generating the response. Please try again."); } } @@ -214,19 +202,12 @@ export abstract class EventGenerator implements IEventGenerator { return inputString; } - async generateGeminiResponse( - model: any, - text: string, - ): Promise { + async generateGeminiResponse(model: any, text: string): Promise { const result = await model.generateContent(text); return result ? await result.response.text() : undefined; } - private async anthropicResponse( - model: Anthropic, - generativeAiModel: string, - userPrompt: string, - ) { + private async anthropicResponse(model: Anthropic, generativeAiModel: string, userPrompt: string) { try { const response = await model.messages.create({ model: generativeAiModel, @@ -237,22 +218,14 @@ export abstract class EventGenerator implements IEventGenerator { return response.content[0].text; } catch (error) { console.error("Error generating response:", error); - vscode.window.showErrorMessage( - "An error occurred while generating the response. Please try again.", - ); + vscode.window.showErrorMessage("An error occurred while generating the response. Please try again."); return; } } - private async groqResponse( - model: Groq, - prompt: string, - generativeAiModel: string, - ): Promise { + private async groqResponse(model: Groq, prompt: string, generativeAiModel: string): Promise { try { - const chatHistory = Brain.has(COMMON.ANTHROPIC_CHAT_HISTORY) - ? Brain.get(COMMON.GROQ_CHAT_HISTORY) - : []; + const chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) ? Memory.get(COMMON.GROQ_CHAT_HISTORY) : []; const params = { messages: [ ...chatHistory, @@ -264,14 +237,11 @@ export abstract class EventGenerator implements IEventGenerator { model: generativeAiModel, }; - const completion: Groq.Chat.ChatCompletion = - await model.chat.completions.create(params); + const completion: Groq.Chat.ChatCompletion = await model.chat.completions.create(params); return completion.choices[0]?.message?.content ?? undefined; } catch (error) { console.error("Error generating response:", error); - vscode.window.showErrorMessage( - "An error occurred while generating the response. Please try again.", - ); + vscode.window.showErrorMessage("An error occurred while generating the response. Please try again."); return; } } @@ -280,9 +250,7 @@ export abstract class EventGenerator implements IEventGenerator { abstract createPrompt(text?: string): any; - async generateResponse( - message?: string, - ): Promise { + async generateResponse(message?: string): Promise { this.showInformationMessage(); let prompt; const selectedCode = this.getSelectedWindowArea(); @@ -294,9 +262,7 @@ export abstract class EventGenerator implements IEventGenerator { if (message && selectedCode) { prompt = await this.createPrompt(`${message} \n ${selectedCode}`); } else { - message - ? (prompt = await this.createPrompt(message)) - : (prompt = await this.createPrompt(selectedCode)); + message ? (prompt = await this.createPrompt(message)) : (prompt = await this.createPrompt(selectedCode)); } if (!prompt) { @@ -312,7 +278,7 @@ export abstract class EventGenerator implements IEventGenerator { switch (model) { case generativeAiModels.GEMINI: chatHistory = getLatestChatHistory(COMMON.GEMINI_CHAT_HISTORY); - Brain.set(COMMON.GEMINI_CHAT_HISTORY, [ + Memory.set(COMMON.GEMINI_CHAT_HISTORY, [ ...chatHistory, { role: "user", @@ -326,7 +292,7 @@ export abstract class EventGenerator implements IEventGenerator { break; case generativeAiModels.GROQ: chatHistory = getLatestChatHistory(COMMON.GROQ_CHAT_HISTORY); - Brain.set(COMMON.GROQ_CHAT_HISTORY, [ + Memory.set(COMMON.GROQ_CHAT_HISTORY, [ ...chatHistory, { role: "user", @@ -340,7 +306,7 @@ export abstract class EventGenerator implements IEventGenerator { break; case generativeAiModels.ANTHROPIC: chatHistory = getLatestChatHistory(COMMON.ANTHROPIC_CHAT_HISTORY); - Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, [ + Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, [ ...chatHistory, { role: "user", @@ -354,7 +320,7 @@ export abstract class EventGenerator implements IEventGenerator { break; case generativeAiModels.GROK: chatHistory = getLatestChatHistory(COMMON.ANTHROPIC_CHAT_HISTORY); - Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, [ + Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, [ ...chatHistory, { role: "user", @@ -379,25 +345,19 @@ export abstract class EventGenerator implements IEventGenerator { placeHolder: "Enter instructions for CodeBuddy", ignoreFocusOut: true, validateInput: (text) => { - return text === "" - ? "Enter instructions for CodeBuddy or press Escape to close chat box" - : null; + return text === "" ? "Enter instructions for CodeBuddy or press Escape to close chat box" : null; }, }); return userPrompt; } catch (error) { - vscode.window.showInformationMessage( - `Error occured while getting user prompt`, - ); + vscode.window.showInformationMessage(`Error occured while getting user prompt`); console.log(error); } } async execute(message?: string): Promise { let prompt: string | undefined; - const response = (await this.generateResponse( - prompt ? prompt : message, - )) as string; + const response = (await this.generateResponse(prompt ? prompt : message)) as string; if (!response) { vscode.window.showErrorMessage("model not reponding, try again later"); return; diff --git a/src/emitter/agent-emitter.ts b/src/emitter/agent-emitter.ts index e365f42..55a4562 100644 --- a/src/emitter/agent-emitter.ts +++ b/src/emitter/agent-emitter.ts @@ -1,5 +1,10 @@ import { BaseEmitter } from "./emitter"; -import { EventState, IAgentEventMap, IErrorEvent, IStatusEvent } from "./interface"; +import { + EventState, + IAgentEventMap, + IErrorEvent, + IStatusEvent, +} from "./interface"; import * as vscode from "vscode"; export class AgentEventEmitter extends BaseEmitter { diff --git a/src/emitter/emitter.ts b/src/emitter/emitter.ts index 68d88c8..9b1eba4 100644 --- a/src/emitter/emitter.ts +++ b/src/emitter/emitter.ts @@ -5,9 +5,12 @@ export class BaseEmitter { constructor() { this.logger = new Logger("BaseEmitter"); } - private readonly emitters: Map> = new Map(); + private readonly emitters: Map> = + new Map(); - protected createEvent(name: K): vscode.Event { + protected createEvent( + name: K, + ): vscode.Event { try { const emitter = new vscode.EventEmitter(); this.emitters.set(name, emitter); diff --git a/src/extension.ts b/src/extension.ts index 4403839..a40fd7f 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -1,11 +1,5 @@ import * as vscode from "vscode"; -import { - APP_CONFIG, - generativeAiModels, - OLA_ACTIONS, - USER_MESSAGE, -} from "./application/constant"; -import { getConfigValue } from "./utils/utils"; +import { APP_CONFIG, generativeAiModels, OLA_ACTIONS, USER_MESSAGE } from "./application/constant"; import { Comments } from "./commands/comment"; import { ExplainCode } from "./commands/explain"; import { FixError } from "./commands/fixError"; @@ -26,28 +20,19 @@ import { GroqWebViewProvider } from "./providers/groq"; import { CodeIndexingService } from "./services/code-indexing"; import { FileUploader } from "./services/file-uploader"; import { setUpGenerativeAiModel } from "./services/generative-ai-model-manager"; -import { Brain } from "./services/brain"; +import { getConfigValue } from "./utils/utils"; +import { Memory } from "./memory/base"; -const { - geminiKey, - geminiModel, - groqApiKey, - groqModel, - anthropicApiKey, - anthropicModel, - grokApiKey, - grokModel, -} = APP_CONFIG; +const { geminiKey, geminiModel, groqApiKey, groqModel, anthropicApiKey, anthropicModel, grokApiKey, grokModel } = + APP_CONFIG; const connectDB = async () => { - await dbManager.connect( - "file:/Users/olasunkanmi/Documents/Github/codebuddy/patterns/dev.db", - ); + await dbManager.connect("file:/Users/olasunkanmi/Documents/Github/codebuddy/patterns/dev.db"); }; export async function activate(context: vscode.ExtensionContext) { try { - Brain.getInstance(); + Memory.getInstance(); // await connectDB(); // const x = CodeRepository.getInstance(); // const apiKey = getGeminiAPIKey(); @@ -80,52 +65,19 @@ export async function activate(context: vscode.ExtensionContext) { generateCodeChart, inlineChat, } = OLA_ACTIONS; - const getComment = new Comments( - `${USER_MESSAGE} generates the code comments...`, - context, - ); - const getInLineChat = new InLineChat( - `${USER_MESSAGE} generates a response...`, - context, - ); - const generateOptimizeCode = new OptimizeCode( - `${USER_MESSAGE} optimizes the code...`, - context, - ); - const generateRefactoredCode = new RefactorCode( - `${USER_MESSAGE} refactors the code...`, - context, - ); - const explainCode = new ExplainCode( - `${USER_MESSAGE} explains the code...`, - context, - ); - const generateReview = new ReviewCode( - `${USER_MESSAGE} reviews the code...`, - context, - ); - const codeChartGenerator = new CodeChartGenerator( - `${USER_MESSAGE} creates the code chart...`, - context, - ); + const getComment = new Comments(`${USER_MESSAGE} generates the code comments...`, context); + const getInLineChat = new InLineChat(`${USER_MESSAGE} generates a response...`, context); + const generateOptimizeCode = new OptimizeCode(`${USER_MESSAGE} optimizes the code...`, context); + const generateRefactoredCode = new RefactorCode(`${USER_MESSAGE} refactors the code...`, context); + const explainCode = new ExplainCode(`${USER_MESSAGE} explains the code...`, context); + const generateReview = new ReviewCode(`${USER_MESSAGE} reviews the code...`, context); + const codeChartGenerator = new CodeChartGenerator(`${USER_MESSAGE} creates the code chart...`, context); const codePattern = fileUpload; - const knowledgeBase = new ReadFromKnowledgeBase( - `${USER_MESSAGE} generate your code pattern...`, - context, - ); - const generateCommitMessage = new GenerateCommitMessage( - `${USER_MESSAGE} generates a commit message...`, - context, - ); - const generateInterviewQuestions = new InterviewMe( - `${USER_MESSAGE} generates interview questions...`, - context, - ); + const knowledgeBase = new ReadFromKnowledgeBase(`${USER_MESSAGE} generate your code pattern...`, context); + const generateCommitMessage = new GenerateCommitMessage(`${USER_MESSAGE} generates a commit message...`, context); + const generateInterviewQuestions = new InterviewMe(`${USER_MESSAGE} generates interview questions...`, context); - const generateUnitTests = new GenerateUnitTest( - `${USER_MESSAGE} generates unit tests...`, - context, - ); + const generateUnitTests = new GenerateUnitTest(`${USER_MESSAGE} generates unit tests...`, context); const actionMap = { [comment]: () => getComment.execute(), @@ -135,11 +87,7 @@ export async function activate(context: vscode.ExtensionContext) { [interviewMe]: () => generateInterviewQuestions.execute(), [generateUnitTest]: () => generateUnitTests.execute(), [fix]: (errorMessage: string) => - new FixError( - `${USER_MESSAGE} finds a solution to the error...`, - context, - errorMessage, - ).execute(errorMessage), + new FixError(`${USER_MESSAGE} finds a solution to the error...`, context, errorMessage).execute(errorMessage), [explain]: () => explainCode.execute(), [pattern]: () => codePattern.uploadFileHandler(), [knowledge]: () => knowledgeBase.execute(), @@ -148,18 +96,17 @@ export async function activate(context: vscode.ExtensionContext) { [inlineChat]: () => getInLineChat.execute(), }; - const subscriptions: vscode.Disposable[] = Object.entries(actionMap).map( - ([action, handler]) => vscode.commands.registerCommand(action, handler), + const subscriptions: vscode.Disposable[] = Object.entries(actionMap).map(([action, handler]) => + vscode.commands.registerCommand(action, handler) ); const selectedGenerativeAiModel = getConfigValue("generativeAi.option"); const quickFix = new CodeActionsProvider(); - const quickFixCodeAction: vscode.Disposable = - vscode.languages.registerCodeActionsProvider( - { scheme: "file", language: "*" }, - quickFix, - ); + const quickFixCodeAction: vscode.Disposable = vscode.languages.registerCodeActionsProvider( + { scheme: "file", language: "*" }, + quickFix + ); const modelConfigurations: { [key: string]: { @@ -192,20 +139,11 @@ export async function activate(context: vscode.ExtensionContext) { if (selectedGenerativeAiModel in modelConfigurations) { const modelConfig = modelConfigurations[selectedGenerativeAiModel]; const { key, model, webviewProviderClass } = modelConfig; - setUpGenerativeAiModel( - context, - model, - key, - webviewProviderClass, - subscriptions, - quickFixCodeAction, - ); + setUpGenerativeAiModel(context, model, key, webviewProviderClass, subscriptions, quickFixCodeAction); } } catch (error) { - Brain.clear(); - vscode.window.showErrorMessage( - "An Error occured while setting up generative AI model", - ); + Memory.clear(); + vscode.window.showErrorMessage("An Error occured while setting up generative AI model"); console.log(error); } } diff --git a/src/llms/gemini/gemini.ts b/src/llms/gemini/gemini.ts index fd260c1..9b3b28d 100644 --- a/src/llms/gemini/gemini.ts +++ b/src/llms/gemini/gemini.ts @@ -44,10 +44,11 @@ export class GeminiLLM extends BaseLLM { private getModel(): GenerativeModel { try { - const model: GenerativeModel | undefined = this.generativeAi.getGenerativeModel({ - model: this.config.model, - tools: this.config.tools, - }); + const model: GenerativeModel | undefined = + this.generativeAi.getGenerativeModel({ + model: this.config.model, + tools: this.config.tools, + }); if (!model) { throw new Error(`Error retrieving model ${this.config.model}`); } diff --git a/src/memory/base.ts b/src/memory/base.ts index e69de29..e359411 100644 --- a/src/memory/base.ts +++ b/src/memory/base.ts @@ -0,0 +1,67 @@ +import { MEMORY_CACHE_OPTIONS } from "../application/constant"; + +interface ICacheEntry { + value: any; + expiry: number; +} + +export class Memory { + private static bank: Map; + private static instance: Memory; + + constructor() { + Memory.bank = new Map(); + } + + public static getInstance(): Memory { + if (!Memory.instance) { + return (Memory.instance = new Memory()); + } + return Memory.instance; + } + + static set(key: string, value: any): Map { + const expiry = Date.now() + MEMORY_CACHE_OPTIONS.sessionTTL; + return Memory.bank.set(key, { value, expiry }); + } + + static get(key: string): any { + const entry = Memory.bank.get(key); + if (entry && Date.now() < entry.expiry) { + return entry.value; + } + return undefined; + } + + static delete(key: string): boolean | undefined { + const cached = Memory.get(key); + if (cached) { + return Memory.bank.delete(key); + } + return undefined; + } + + static keys(): string[] { + return Array.from(Memory.bank.keys()); + } + + static values(): ICacheEntry[] { + return Array.from(Memory.bank.values()); + } + + static has(key: string): boolean { + return Memory.bank.has(key); + } + + static clear(): void { + return Memory.bank.clear(); + } + + static createSnapShot(): Memory { + return this.instance; + } + + static loadSnapShot(snapShot: Memory): void { + Object.assign(this, snapShot); + } +} diff --git a/src/providers/anthropic.ts b/src/providers/anthropic.ts index baf5b69..0a2f372 100644 --- a/src/providers/anthropic.ts +++ b/src/providers/anthropic.ts @@ -1,17 +1,9 @@ import * as vscode from "vscode"; import { BaseWebViewProvider } from "./base"; -import { - COMMON, - generativeAiModels, - GROQ_CONFIG, -} from "../application/constant"; +import { COMMON, generativeAiModels, GROQ_CONFIG } from "../application/constant"; import Anthropic from "@anthropic-ai/sdk"; -import { Brain } from "../services/brain"; -import { - createAnthropicClient, - getGenerativeAiModel, - getXGroKBaseURL, -} from "../utils/utils"; +import { createAnthropicClient, getGenerativeAiModel, getXGroKBaseURL } from "../utils/utils"; +import { Memory } from "../memory/base"; type Role = "user" | "assistant"; export interface IHistory { @@ -26,15 +18,12 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider { apiKey: string, generativeAiModel: string, context: vscode.ExtensionContext, - protected baseUrl?: string, + protected baseUrl?: string ) { super(extensionUri, apiKey, generativeAiModel, context); } - public async sendResponse( - response: string, - currentChat: string, - ): Promise { + public async sendResponse(response: string, currentChat: string): Promise { try { const type = currentChat === "bot" ? "bot-response" : "user-input"; if (currentChat === "bot") { @@ -50,13 +39,8 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider { } if (this.chatHistory.length === 2) { - const chatHistory = Brain.has(COMMON.ANTHROPIC_CHAT_HISTORY) - ? Brain.get(COMMON.ANTHROPIC_CHAT_HISTORY) - : []; - Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, [ - ...chatHistory, - ...this.chatHistory, - ]); + const chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) ? Memory.get(COMMON.ANTHROPIC_CHAT_HISTORY) : []; + Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, [...chatHistory, ...this.chatHistory]); } return await this.currentWebView?.webview.postMessage({ type, @@ -67,23 +51,14 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider { } } - async generateResponse( - apiKey = undefined, - name = undefined, - message: string, - ): Promise { + async generateResponse(message: string, apiKey?: string, name?: string): Promise { try { const { max_tokens } = GROQ_CONFIG; if (getGenerativeAiModel() === generativeAiModels.GROK) { this.baseUrl = getXGroKBaseURL(); } - const anthropic: Anthropic = createAnthropicClient( - this.apiKey, - this.baseUrl, - ); - let chatHistory = Brain.has(COMMON.ANTHROPIC_CHAT_HISTORY) - ? Brain.get(COMMON.ANTHROPIC_CHAT_HISTORY) - : []; + const anthropic: Anthropic = createAnthropicClient(this.apiKey, this.baseUrl); + let chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) ? Memory.get(COMMON.ANTHROPIC_CHAT_HISTORY) : []; if (chatHistory?.length) { chatHistory = [...chatHistory, { role: "user", content: message }]; @@ -107,10 +82,8 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider { return response; } catch (error) { console.error(error); - Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, []); - vscode.window.showErrorMessage( - "Model not responding, please resend your question", - ); + Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, []); + vscode.window.showErrorMessage("Model not responding, please resend your question"); } } } diff --git a/src/providers/gemini.ts b/src/providers/gemini.ts index 1a4dfdc..6f2d336 100644 --- a/src/providers/gemini.ts +++ b/src/providers/gemini.ts @@ -2,7 +2,7 @@ import { GoogleGenerativeAI } from "@google/generative-ai"; import * as vscode from "vscode"; import { COMMON } from "../application/constant"; import { BaseWebViewProvider } from "./base"; -import { Brain } from "../services/brain"; +import { Memory } from "../memory/base"; type Role = "function" | "user" | "model"; export interface IHistory { @@ -12,19 +12,11 @@ export interface IHistory { export class GeminiWebViewProvider extends BaseWebViewProvider { chatHistory: IHistory[] = []; - constructor( - extensionUri: vscode.Uri, - apiKey: string, - generativeAiModel: string, - context: vscode.ExtensionContext, - ) { + constructor(extensionUri: vscode.Uri, apiKey: string, generativeAiModel: string, context: vscode.ExtensionContext) { super(extensionUri, apiKey, generativeAiModel, context); } - async sendResponse( - response: string, - currentChat: string, - ): Promise { + async sendResponse(response: string, currentChat: string): Promise { try { const type = currentChat === "bot" ? "bot-response" : "user-input"; if (currentChat === "bot") { @@ -39,35 +31,24 @@ export class GeminiWebViewProvider extends BaseWebViewProvider { }); } if (this.chatHistory.length === 2) { - const chatHistory = Brain.has(COMMON.GEMINI_CHAT_HISTORY) - ? Brain.get(COMMON.GEMINI_CHAT_HISTORY) - : []; - Brain.set(COMMON.GEMINI_CHAT_HISTORY, [ - ...chatHistory, - ...this.chatHistory, - ]); + const chatHistory = Memory.has(COMMON.GEMINI_CHAT_HISTORY) ? Memory.get(COMMON.GEMINI_CHAT_HISTORY) : []; + Memory.set(COMMON.GEMINI_CHAT_HISTORY, [...chatHistory, ...this.chatHistory]); } return await this.currentWebView?.webview.postMessage({ type, message: response, }); } catch (error) { - Brain.set(COMMON.GEMINI_CHAT_HISTORY, []); + Memory.set(COMMON.GEMINI_CHAT_HISTORY, []); console.error(error); } } - async generateResponse( - apiKey: string, - name: string, - message: string, - ): Promise { + async generateResponse(apiKey: string, name: string, message: string): Promise { try { const genAi = new GoogleGenerativeAI(apiKey); const model = genAi.getGenerativeModel({ model: name }); - let chatHistory = Brain.has(COMMON.GEMINI_CHAT_HISTORY) - ? Brain.get(COMMON.GEMINI_CHAT_HISTORY) - : []; + let chatHistory = Memory.has(COMMON.GEMINI_CHAT_HISTORY) ? Memory.get(COMMON.GEMINI_CHAT_HISTORY) : []; if (chatHistory?.length) { chatHistory = [ @@ -107,10 +88,8 @@ export class GeminiWebViewProvider extends BaseWebViewProvider { const response = result.response; return response.text(); } catch (error) { - Brain.set(COMMON.GEMINI_CHAT_HISTORY, []); - vscode.window.showErrorMessage( - "Model not responding, please resend your question", - ); + Memory.set(COMMON.GEMINI_CHAT_HISTORY, []); + vscode.window.showErrorMessage("Model not responding, please resend your question"); console.error(error); return; } diff --git a/src/providers/groq.ts b/src/providers/groq.ts index 9dc365d..06e2033 100644 --- a/src/providers/groq.ts +++ b/src/providers/groq.ts @@ -2,7 +2,7 @@ import * as vscode from "vscode"; import { BaseWebViewProvider } from "./base"; import Groq from "groq-sdk"; import { COMMON, GROQ_CONFIG } from "../application/constant"; -import { Brain } from "../services/brain"; +import { Memory } from "../memory/base"; type Role = "user" | "system"; export interface IHistory { @@ -12,19 +12,11 @@ export interface IHistory { export class GroqWebViewProvider extends BaseWebViewProvider { chatHistory: IHistory[] = []; - constructor( - extensionUri: vscode.Uri, - apiKey: string, - generativeAiModel: string, - context: vscode.ExtensionContext, - ) { + constructor(extensionUri: vscode.Uri, apiKey: string, generativeAiModel: string, context: vscode.ExtensionContext) { super(extensionUri, apiKey, generativeAiModel, context); } - public async sendResponse( - response: string, - currentChat: string, - ): Promise { + public async sendResponse(response: string, currentChat: string): Promise { try { const type = currentChat === "bot" ? "bot-response" : "user-input"; if (currentChat === "bot") { @@ -39,13 +31,8 @@ export class GroqWebViewProvider extends BaseWebViewProvider { }); } if (this.chatHistory.length === 2) { - const chatHistory = Brain.has(COMMON.GROQ_CHAT_HISTORY) - ? Brain.get(COMMON.GROQ_CHAT_HISTORY) - : []; - Brain.set(COMMON.GROQ_CHAT_HISTORY, [ - ...chatHistory, - ...this.chatHistory, - ]); + const chatHistory = Memory.has(COMMON.GROQ_CHAT_HISTORY) ? Memory.get(COMMON.GROQ_CHAT_HISTORY) : []; + Memory.set(COMMON.GROQ_CHAT_HISTORY, [...chatHistory, ...this.chatHistory]); } // Once the agent task is done, map the memory into the llm brain. // Send the final answer to the webview here. @@ -58,20 +45,14 @@ export class GroqWebViewProvider extends BaseWebViewProvider { } } - async generateResponse( - apiKey = undefined, - name = undefined, - message: string, - ): Promise { + async generateResponse(message: string, apiKey?: string, name?: string): Promise { try { const { temperature, max_tokens, top_p, stop } = GROQ_CONFIG; const groq = new Groq({ apiKey: this.apiKey, }); - let chatHistory = Brain.has(COMMON.GROQ_CHAT_HISTORY) - ? Brain.get(COMMON.GROQ_CHAT_HISTORY) - : []; + let chatHistory = Memory.has(COMMON.GROQ_CHAT_HISTORY) ? Memory.get(COMMON.GROQ_CHAT_HISTORY) : []; if (chatHistory?.length) { chatHistory = [...chatHistory, { role: "user", content: message }]; @@ -98,10 +79,8 @@ export class GroqWebViewProvider extends BaseWebViewProvider { return response ?? undefined; } catch (error) { console.error(error); - Brain.set(COMMON.GROQ_CHAT_HISTORY, []); - vscode.window.showErrorMessage( - "Model not responding, please resend your question", - ); + Memory.set(COMMON.GROQ_CHAT_HISTORY, []); + vscode.window.showErrorMessage("Model not responding, please resend your question"); return; } } diff --git a/src/services/brain.ts b/src/services/brain.ts deleted file mode 100644 index 1d2b276..0000000 --- a/src/services/brain.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { MEMORY_CACHE_OPTIONS } from "../application/constant"; - -interface ICacheEntry { - value: any; - expiry: number; -} - -export class Brain { - private static memoryBank: Map; - private static instance: Brain; - - constructor() { - Brain.memoryBank = new Map(); - } - - public static getInstance(): Brain { - if (!Brain.instance) { - return (Brain.instance = new Brain()); - } - return Brain.instance; - } - - static set(key: string, value: any): Map { - const expiry = Date.now() + MEMORY_CACHE_OPTIONS.sessionTTL; - return Brain.memoryBank.set(key, { value, expiry }); - } - - static get(key: string): any { - const entry = Brain.memoryBank.get(key); - if (entry && Date.now() < entry.expiry) { - return entry.value; - } - return undefined; - } - - static delete(key: string): boolean | undefined { - const cached = Brain.get(key); - if (cached) { - return Brain.memoryBank.delete(key); - } - return undefined; - } - - static keys(): string[] { - return Array.from(Brain.memoryBank.keys()); - } - - static values(): ICacheEntry[] { - return Array.from(Brain.memoryBank.values()); - } - - static has(key: string): boolean { - return Brain.memoryBank.has(key); - } - - static clear(): void { - return Brain.memoryBank.clear(); - } -} diff --git a/src/services/chat-manager.ts b/src/services/chat-manager.ts index c5dc165..91f2203 100644 --- a/src/services/chat-manager.ts +++ b/src/services/chat-manager.ts @@ -140,11 +140,7 @@ export class ChatManager { groqAiConfigurations.model, this._context, ); - return await chatViewProvider.generateResponse( - undefined, - undefined, - message, - ); + return await chatViewProvider.generateResponse(message); } if (generativeAi === generativeAiModels.GEMINI) { const geminiConfigurations = this.handleAiProvider( @@ -169,11 +165,7 @@ export class ChatManager { const anthropicWebViewProvider = this.getAnthropicWebViewProvider( anthropicConfigurations, ); - return await anthropicWebViewProvider.generateResponse( - undefined, - undefined, - message, - ); + return await anthropicWebViewProvider.generateResponse(message); } if (generativeAi === generativeAiModels.GROK) { @@ -182,11 +174,7 @@ export class ChatManager { ); const anthropicWebViewProvider = this.getAnthropicWebViewProvider(grokConfigurations); - return await anthropicWebViewProvider.generateResponse( - undefined, - undefined, - message, - ); + return await anthropicWebViewProvider.generateResponse(message); } } catch (error) { const model = getConfigValue("generativeAi.option"); diff --git a/src/utils/utils.ts b/src/utils/utils.ts index faaba2c..8da246e 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -1,12 +1,8 @@ import * as markdownit from "markdown-it"; import * as vscode from "vscode"; -import { Brain } from "../services/brain"; -import { - APP_CONFIG, - COMMON, - generativeAiModels, -} from "../application/constant"; +import { APP_CONFIG, COMMON, generativeAiModels } from "../application/constant"; import Anthropic from "@anthropic-ai/sdk"; +import { Memory } from "../memory/base"; type GetConfigValueType = (key: string) => T | undefined; @@ -18,9 +14,7 @@ export const formatText = (text?: string): string => { return ""; }; -export const getConfigValue: GetConfigValueType = ( - key: string, -): T | undefined => { +export const getConfigValue: GetConfigValueType = (key: string): T | undefined => { return vscode.workspace.getConfiguration().get(key); }; @@ -29,7 +23,7 @@ export const vscodeErrorMessage = (error: string, metaData?: any) => { }; export const getLatestChatHistory = (key: string) => { - let chatHistory = Brain.has(key) ? Brain.get(key) : []; + let chatHistory = Memory.has(key) ? Memory.get(key) : []; if (chatHistory?.length > 3) { chatHistory = chatHistory.slice(-3); } @@ -39,13 +33,13 @@ export const getLatestChatHistory = (key: string) => { export const resetChatHistory = (model: string) => { switch (model) { case generativeAiModels.ANTHROPIC: - Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, []); + Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, []); break; case generativeAiModels.GEMINI: - Brain.set(COMMON.GEMINI_CHAT_HISTORY, []); + Memory.set(COMMON.GEMINI_CHAT_HISTORY, []); break; case generativeAiModels.GROQ: - Brain.set(COMMON.GROQ_CHAT_HISTORY, []); + Memory.set(COMMON.GROQ_CHAT_HISTORY, []); break; default: break; @@ -72,11 +66,7 @@ export const getGenerativeAiModel = (): string | undefined => { return getConfigValue("generativeAi.option"); }; -export function getUri( - webview: vscode.Webview, - extensionUri: vscode.Uri, - pathList: string[], -) { +export function getUri(webview: vscode.Webview, extensionUri: vscode.Uri, pathList: string[]) { return webview.asWebviewUri(vscode.Uri.joinPath(extensionUri, ...pathList)); } @@ -85,8 +75,7 @@ export function getUri( // and ensure script integrity when using Content Security Policy (CSP) export const getNonce = () => { let text = ""; - const possible = - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; + const possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; for (let i = 0; i < 32; i++) { text += possible.charAt(Math.floor(Math.random() * possible.length)); }