Skip to content

Commit

Permalink
add snapshots to memory
Browse files Browse the repository at this point in the history
Olasunkanmi Oyinlola authored and Olasunkanmi Oyinlola committed Jan 27, 2025
1 parent b87c1be commit 2ae80e4
Showing 14 changed files with 189 additions and 360 deletions.
10 changes: 8 additions & 2 deletions src/agents/base.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
import * as vscode from "vscode";
import { AgentEventEmitter } from "../emitter/agent-emitter";

export class BaseAiAgent extends AgentEventEmitter implements vscode.Disposable {
export class BaseAiAgent
extends AgentEventEmitter
implements vscode.Disposable
{
constructor() {
super();
}
@@ -10,7 +13,10 @@ export class BaseAiAgent extends AgentEventEmitter implements vscode.Disposable
try {
this.emitStatus("processing", input);
} catch (error) {
this.emitError(error instanceof Error ? error.message : "Unknown Error", "process failed");
this.emitError(
error instanceof Error ? error.message : "Unknown Error",
"process failed",
);
} finally {
this.emitStatus("completed", "Processing complete");
}
2 changes: 1 addition & 1 deletion src/agents/orchestrator.ts
Original file line number Diff line number Diff line change
@@ -8,7 +8,7 @@ export class Orchestrator implements vscode.Disposable {
constructor(private readonly aiAgent: BaseAiAgent) {
this.disposables.push(
this.aiAgent.onStatus(this.handleStatus.bind(this)),
this.aiAgent.onError(this.handleError.bind(this))
this.aiAgent.onError(this.handleError.bind(this)),
);
}

92 changes: 26 additions & 66 deletions src/commands/event-generator.ts
Original file line number Diff line number Diff line change
@@ -3,22 +3,18 @@ import Anthropic from "@anthropic-ai/sdk";
import { GenerativeModel, GoogleGenerativeAI } from "@google/generative-ai";
import Groq from "groq-sdk";
import * as vscode from "vscode";
import {
APP_CONFIG,
COMMON,
generativeAiModels,
} from "../application/constant";
import { APP_CONFIG, COMMON, generativeAiModels } from "../application/constant";
import { AnthropicWebViewProvider } from "../providers/anthropic";
import { GeminiWebViewProvider } from "../providers/gemini";
import { GroqWebViewProvider } from "../providers/groq";
import { Brain } from "../services/brain";
import {
createAnthropicClient,
getConfigValue,
getLatestChatHistory,
getXGroKBaseURL,
vscodeErrorMessage,
} from "../utils/utils";
import { Memory } from "../memory/base";

interface IEventGenerator {
getApplicationConfig(configKey: string): string | undefined;
@@ -42,7 +38,7 @@ export abstract class EventGenerator implements IEventGenerator {
constructor(
private readonly action: string,
_context: vscode.ExtensionContext,
errorMessage?: string,
errorMessage?: string
) {
this.context = _context;
this.error = errorMessage;
@@ -72,23 +68,21 @@ export abstract class EventGenerator implements IEventGenerator {
return getConfigValue(configKey);
}

protected createModel():
| { generativeAi: string; model: any; modelName: string }
| undefined {
protected createModel(): { generativeAi: string; model: any; modelName: string } | undefined {
try {
let model;
let modelName = "";
if (!this.generativeAi) {
vscodeErrorMessage(
"Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name",
"Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name"
);
}
if (this.generativeAi === generativeAiModels.GROQ) {
const apiKey = this.groqApiKey;
modelName = this.groqModel;
if (!apiKey || !modelName) {
vscodeErrorMessage(
"Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name",
"Configuration not found. Go to settings, search for Your coding buddy. Fill up the model and model name"
);
}
model = this.createGroqModel(apiKey);
@@ -114,9 +108,7 @@ export abstract class EventGenerator implements IEventGenerator {
return { generativeAi: this.generativeAi, model, modelName };
} catch (error) {
console.error("Error creating model:", error);
vscode.window.showErrorMessage(
"An error occurred while creating the model. Please try again.",
);
vscode.window.showErrorMessage("An error occurred while creating the model. Please try again.");
}
}

@@ -153,9 +145,7 @@ export abstract class EventGenerator implements IEventGenerator {
return new Groq({ apiKey });
}

protected async generateModelResponse(
text: string,
): Promise<string | Anthropic.Messages.Message | undefined> {
protected async generateModelResponse(text: string): Promise<string | Anthropic.Messages.Message | undefined> {
try {
const activeModel = this.createModel();
if (!activeModel) {
@@ -192,7 +182,7 @@ export abstract class EventGenerator implements IEventGenerator {

if (!response) {
throw new Error(
"Could not generate response. Check your settings, ensure the API keys and Model Name is added properly.",
"Could not generate response. Check your settings, ensure the API keys and Model Name is added properly."
);
}
if (this.action.includes("chart")) {
@@ -201,9 +191,7 @@ export abstract class EventGenerator implements IEventGenerator {
return response;
} catch (error) {
console.error("Error generating response:", error);
vscode.window.showErrorMessage(
"An error occurred while generating the response. Please try again.",
);
vscode.window.showErrorMessage("An error occurred while generating the response. Please try again.");
}
}

@@ -214,19 +202,12 @@ export abstract class EventGenerator implements IEventGenerator {
return inputString;
}

async generateGeminiResponse(
model: any,
text: string,
): Promise<string | undefined> {
async generateGeminiResponse(model: any, text: string): Promise<string | undefined> {
const result = await model.generateContent(text);
return result ? await result.response.text() : undefined;
}

private async anthropicResponse(
model: Anthropic,
generativeAiModel: string,
userPrompt: string,
) {
private async anthropicResponse(model: Anthropic, generativeAiModel: string, userPrompt: string) {
try {
const response = await model.messages.create({
model: generativeAiModel,
@@ -237,22 +218,14 @@ export abstract class EventGenerator implements IEventGenerator {
return response.content[0].text;
} catch (error) {
console.error("Error generating response:", error);
vscode.window.showErrorMessage(
"An error occurred while generating the response. Please try again.",
);
vscode.window.showErrorMessage("An error occurred while generating the response. Please try again.");
return;
}
}

private async groqResponse(
model: Groq,
prompt: string,
generativeAiModel: string,
): Promise<string | undefined> {
private async groqResponse(model: Groq, prompt: string, generativeAiModel: string): Promise<string | undefined> {
try {
const chatHistory = Brain.has(COMMON.ANTHROPIC_CHAT_HISTORY)
? Brain.get(COMMON.GROQ_CHAT_HISTORY)
: [];
const chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) ? Memory.get(COMMON.GROQ_CHAT_HISTORY) : [];
const params = {
messages: [
...chatHistory,
@@ -264,14 +237,11 @@ export abstract class EventGenerator implements IEventGenerator {
model: generativeAiModel,
};

const completion: Groq.Chat.ChatCompletion =
await model.chat.completions.create(params);
const completion: Groq.Chat.ChatCompletion = await model.chat.completions.create(params);
return completion.choices[0]?.message?.content ?? undefined;
} catch (error) {
console.error("Error generating response:", error);
vscode.window.showErrorMessage(
"An error occurred while generating the response. Please try again.",
);
vscode.window.showErrorMessage("An error occurred while generating the response. Please try again.");
return;
}
}
@@ -280,9 +250,7 @@ export abstract class EventGenerator implements IEventGenerator {

abstract createPrompt(text?: string): any;

async generateResponse(
message?: string,
): Promise<string | Anthropic.Messages.Message | undefined> {
async generateResponse(message?: string): Promise<string | Anthropic.Messages.Message | undefined> {
this.showInformationMessage();
let prompt;
const selectedCode = this.getSelectedWindowArea();
@@ -294,9 +262,7 @@ export abstract class EventGenerator implements IEventGenerator {
if (message && selectedCode) {
prompt = await this.createPrompt(`${message} \n ${selectedCode}`);
} else {
message
? (prompt = await this.createPrompt(message))
: (prompt = await this.createPrompt(selectedCode));
message ? (prompt = await this.createPrompt(message)) : (prompt = await this.createPrompt(selectedCode));
}

if (!prompt) {
@@ -312,7 +278,7 @@ export abstract class EventGenerator implements IEventGenerator {
switch (model) {
case generativeAiModels.GEMINI:
chatHistory = getLatestChatHistory(COMMON.GEMINI_CHAT_HISTORY);
Brain.set(COMMON.GEMINI_CHAT_HISTORY, [
Memory.set(COMMON.GEMINI_CHAT_HISTORY, [
...chatHistory,
{
role: "user",
@@ -326,7 +292,7 @@ export abstract class EventGenerator implements IEventGenerator {
break;
case generativeAiModels.GROQ:
chatHistory = getLatestChatHistory(COMMON.GROQ_CHAT_HISTORY);
Brain.set(COMMON.GROQ_CHAT_HISTORY, [
Memory.set(COMMON.GROQ_CHAT_HISTORY, [
...chatHistory,
{
role: "user",
@@ -340,7 +306,7 @@ export abstract class EventGenerator implements IEventGenerator {
break;
case generativeAiModels.ANTHROPIC:
chatHistory = getLatestChatHistory(COMMON.ANTHROPIC_CHAT_HISTORY);
Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, [
Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, [
...chatHistory,
{
role: "user",
@@ -354,7 +320,7 @@ export abstract class EventGenerator implements IEventGenerator {
break;
case generativeAiModels.GROK:
chatHistory = getLatestChatHistory(COMMON.ANTHROPIC_CHAT_HISTORY);
Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, [
Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, [
...chatHistory,
{
role: "user",
@@ -379,25 +345,19 @@ export abstract class EventGenerator implements IEventGenerator {
placeHolder: "Enter instructions for CodeBuddy",
ignoreFocusOut: true,
validateInput: (text) => {
return text === ""
? "Enter instructions for CodeBuddy or press Escape to close chat box"
: null;
return text === "" ? "Enter instructions for CodeBuddy or press Escape to close chat box" : null;
},
});
return userPrompt;
} catch (error) {
vscode.window.showInformationMessage(
`Error occured while getting user prompt`,
);
vscode.window.showInformationMessage(`Error occured while getting user prompt`);
console.log(error);
}
}

async execute(message?: string): Promise<void> {
let prompt: string | undefined;
const response = (await this.generateResponse(
prompt ? prompt : message,
)) as string;
const response = (await this.generateResponse(prompt ? prompt : message)) as string;
if (!response) {
vscode.window.showErrorMessage("model not reponding, try again later");
return;
7 changes: 6 additions & 1 deletion src/emitter/agent-emitter.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
import { BaseEmitter } from "./emitter";
import { EventState, IAgentEventMap, IErrorEvent, IStatusEvent } from "./interface";
import {
EventState,
IAgentEventMap,
IErrorEvent,
IStatusEvent,
} from "./interface";
import * as vscode from "vscode";

export class AgentEventEmitter extends BaseEmitter<IAgentEventMap> {
7 changes: 5 additions & 2 deletions src/emitter/emitter.ts
Original file line number Diff line number Diff line change
@@ -5,9 +5,12 @@ export class BaseEmitter<EventMap> {
constructor() {
this.logger = new Logger("BaseEmitter");
}
private readonly emitters: Map<keyof EventMap, vscode.EventEmitter<any>> = new Map();
private readonly emitters: Map<keyof EventMap, vscode.EventEmitter<any>> =
new Map();

protected createEvent<K extends keyof EventMap>(name: K): vscode.Event<EventMap[K]> {
protected createEvent<K extends keyof EventMap>(
name: K,
): vscode.Event<EventMap[K]> {
try {
const emitter = new vscode.EventEmitter<EventMap[K]>();
this.emitters.set(name, emitter);
118 changes: 28 additions & 90 deletions src/extension.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,5 @@
import * as vscode from "vscode";
import {
APP_CONFIG,
generativeAiModels,
OLA_ACTIONS,
USER_MESSAGE,
} from "./application/constant";
import { getConfigValue } from "./utils/utils";
import { APP_CONFIG, generativeAiModels, OLA_ACTIONS, USER_MESSAGE } from "./application/constant";
import { Comments } from "./commands/comment";
import { ExplainCode } from "./commands/explain";
import { FixError } from "./commands/fixError";
@@ -26,28 +20,19 @@ import { GroqWebViewProvider } from "./providers/groq";
import { CodeIndexingService } from "./services/code-indexing";
import { FileUploader } from "./services/file-uploader";
import { setUpGenerativeAiModel } from "./services/generative-ai-model-manager";
import { Brain } from "./services/brain";
import { getConfigValue } from "./utils/utils";
import { Memory } from "./memory/base";

const {
geminiKey,
geminiModel,
groqApiKey,
groqModel,
anthropicApiKey,
anthropicModel,
grokApiKey,
grokModel,
} = APP_CONFIG;
const { geminiKey, geminiModel, groqApiKey, groqModel, anthropicApiKey, anthropicModel, grokApiKey, grokModel } =
APP_CONFIG;

const connectDB = async () => {
await dbManager.connect(
"file:/Users/olasunkanmi/Documents/Github/codebuddy/patterns/dev.db",
);
await dbManager.connect("file:/Users/olasunkanmi/Documents/Github/codebuddy/patterns/dev.db");
};

export async function activate(context: vscode.ExtensionContext) {
try {
Brain.getInstance();
Memory.getInstance();
// await connectDB();
// const x = CodeRepository.getInstance();
// const apiKey = getGeminiAPIKey();
@@ -80,52 +65,19 @@ export async function activate(context: vscode.ExtensionContext) {
generateCodeChart,
inlineChat,
} = OLA_ACTIONS;
const getComment = new Comments(
`${USER_MESSAGE} generates the code comments...`,
context,
);
const getInLineChat = new InLineChat(
`${USER_MESSAGE} generates a response...`,
context,
);
const generateOptimizeCode = new OptimizeCode(
`${USER_MESSAGE} optimizes the code...`,
context,
);
const generateRefactoredCode = new RefactorCode(
`${USER_MESSAGE} refactors the code...`,
context,
);
const explainCode = new ExplainCode(
`${USER_MESSAGE} explains the code...`,
context,
);
const generateReview = new ReviewCode(
`${USER_MESSAGE} reviews the code...`,
context,
);
const codeChartGenerator = new CodeChartGenerator(
`${USER_MESSAGE} creates the code chart...`,
context,
);
const getComment = new Comments(`${USER_MESSAGE} generates the code comments...`, context);
const getInLineChat = new InLineChat(`${USER_MESSAGE} generates a response...`, context);
const generateOptimizeCode = new OptimizeCode(`${USER_MESSAGE} optimizes the code...`, context);
const generateRefactoredCode = new RefactorCode(`${USER_MESSAGE} refactors the code...`, context);
const explainCode = new ExplainCode(`${USER_MESSAGE} explains the code...`, context);
const generateReview = new ReviewCode(`${USER_MESSAGE} reviews the code...`, context);
const codeChartGenerator = new CodeChartGenerator(`${USER_MESSAGE} creates the code chart...`, context);
const codePattern = fileUpload;
const knowledgeBase = new ReadFromKnowledgeBase(
`${USER_MESSAGE} generate your code pattern...`,
context,
);
const generateCommitMessage = new GenerateCommitMessage(
`${USER_MESSAGE} generates a commit message...`,
context,
);
const generateInterviewQuestions = new InterviewMe(
`${USER_MESSAGE} generates interview questions...`,
context,
);
const knowledgeBase = new ReadFromKnowledgeBase(`${USER_MESSAGE} generate your code pattern...`, context);
const generateCommitMessage = new GenerateCommitMessage(`${USER_MESSAGE} generates a commit message...`, context);
const generateInterviewQuestions = new InterviewMe(`${USER_MESSAGE} generates interview questions...`, context);

const generateUnitTests = new GenerateUnitTest(
`${USER_MESSAGE} generates unit tests...`,
context,
);
const generateUnitTests = new GenerateUnitTest(`${USER_MESSAGE} generates unit tests...`, context);

const actionMap = {
[comment]: () => getComment.execute(),
@@ -135,11 +87,7 @@ export async function activate(context: vscode.ExtensionContext) {
[interviewMe]: () => generateInterviewQuestions.execute(),
[generateUnitTest]: () => generateUnitTests.execute(),
[fix]: (errorMessage: string) =>
new FixError(
`${USER_MESSAGE} finds a solution to the error...`,
context,
errorMessage,
).execute(errorMessage),
new FixError(`${USER_MESSAGE} finds a solution to the error...`, context, errorMessage).execute(errorMessage),
[explain]: () => explainCode.execute(),
[pattern]: () => codePattern.uploadFileHandler(),
[knowledge]: () => knowledgeBase.execute(),
@@ -148,18 +96,17 @@ export async function activate(context: vscode.ExtensionContext) {
[inlineChat]: () => getInLineChat.execute(),
};

const subscriptions: vscode.Disposable[] = Object.entries(actionMap).map(
([action, handler]) => vscode.commands.registerCommand(action, handler),
const subscriptions: vscode.Disposable[] = Object.entries(actionMap).map(([action, handler]) =>
vscode.commands.registerCommand(action, handler)
);

const selectedGenerativeAiModel = getConfigValue("generativeAi.option");

const quickFix = new CodeActionsProvider();
const quickFixCodeAction: vscode.Disposable =
vscode.languages.registerCodeActionsProvider(
{ scheme: "file", language: "*" },
quickFix,
);
const quickFixCodeAction: vscode.Disposable = vscode.languages.registerCodeActionsProvider(
{ scheme: "file", language: "*" },
quickFix
);

const modelConfigurations: {
[key: string]: {
@@ -192,20 +139,11 @@ export async function activate(context: vscode.ExtensionContext) {
if (selectedGenerativeAiModel in modelConfigurations) {
const modelConfig = modelConfigurations[selectedGenerativeAiModel];
const { key, model, webviewProviderClass } = modelConfig;
setUpGenerativeAiModel(
context,
model,
key,
webviewProviderClass,
subscriptions,
quickFixCodeAction,
);
setUpGenerativeAiModel(context, model, key, webviewProviderClass, subscriptions, quickFixCodeAction);
}
} catch (error) {
Brain.clear();
vscode.window.showErrorMessage(
"An Error occured while setting up generative AI model",
);
Memory.clear();
vscode.window.showErrorMessage("An Error occured while setting up generative AI model");
console.log(error);
}
}
9 changes: 5 additions & 4 deletions src/llms/gemini/gemini.ts
Original file line number Diff line number Diff line change
@@ -44,10 +44,11 @@ export class GeminiLLM extends BaseLLM<GeminiModelResponseType> {

private getModel(): GenerativeModel {
try {
const model: GenerativeModel | undefined = this.generativeAi.getGenerativeModel({
model: this.config.model,
tools: this.config.tools,
});
const model: GenerativeModel | undefined =
this.generativeAi.getGenerativeModel({
model: this.config.model,
tools: this.config.tools,
});
if (!model) {
throw new Error(`Error retrieving model ${this.config.model}`);
}
67 changes: 67 additions & 0 deletions src/memory/base.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import { MEMORY_CACHE_OPTIONS } from "../application/constant";

interface ICacheEntry {
value: any;
expiry: number;
}

export class Memory {
private static bank: Map<string, ICacheEntry>;
private static instance: Memory;

constructor() {
Memory.bank = new Map();
}

public static getInstance(): Memory {
if (!Memory.instance) {
return (Memory.instance = new Memory());
}
return Memory.instance;
}

static set(key: string, value: any): Map<string, ICacheEntry> {
const expiry = Date.now() + MEMORY_CACHE_OPTIONS.sessionTTL;
return Memory.bank.set(key, { value, expiry });
}

static get(key: string): any {
const entry = Memory.bank.get(key);
if (entry && Date.now() < entry.expiry) {
return entry.value;
}
return undefined;
}

static delete(key: string): boolean | undefined {
const cached = Memory.get(key);
if (cached) {
return Memory.bank.delete(key);
}
return undefined;
}

static keys(): string[] {
return Array.from(Memory.bank.keys());
}

static values(): ICacheEntry[] {
return Array.from(Memory.bank.values());
}

static has(key: string): boolean {
return Memory.bank.has(key);
}

static clear(): void {
return Memory.bank.clear();
}

static createSnapShot(): Memory {
return this.instance;
}

static loadSnapShot(snapShot: Memory): void {
Object.assign(this, snapShot);
}
}
51 changes: 12 additions & 39 deletions src/providers/anthropic.ts
Original file line number Diff line number Diff line change
@@ -1,17 +1,9 @@
import * as vscode from "vscode";
import { BaseWebViewProvider } from "./base";
import {
COMMON,
generativeAiModels,
GROQ_CONFIG,
} from "../application/constant";
import { COMMON, generativeAiModels, GROQ_CONFIG } from "../application/constant";
import Anthropic from "@anthropic-ai/sdk";
import { Brain } from "../services/brain";
import {
createAnthropicClient,
getGenerativeAiModel,
getXGroKBaseURL,
} from "../utils/utils";
import { createAnthropicClient, getGenerativeAiModel, getXGroKBaseURL } from "../utils/utils";
import { Memory } from "../memory/base";

type Role = "user" | "assistant";
export interface IHistory {
@@ -26,15 +18,12 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider {
apiKey: string,
generativeAiModel: string,
context: vscode.ExtensionContext,
protected baseUrl?: string,
protected baseUrl?: string
) {
super(extensionUri, apiKey, generativeAiModel, context);
}

public async sendResponse(
response: string,
currentChat: string,
): Promise<boolean | undefined> {
public async sendResponse(response: string, currentChat: string): Promise<boolean | undefined> {
try {
const type = currentChat === "bot" ? "bot-response" : "user-input";
if (currentChat === "bot") {
@@ -50,13 +39,8 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider {
}

if (this.chatHistory.length === 2) {
const chatHistory = Brain.has(COMMON.ANTHROPIC_CHAT_HISTORY)
? Brain.get(COMMON.ANTHROPIC_CHAT_HISTORY)
: [];
Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, [
...chatHistory,
...this.chatHistory,
]);
const chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) ? Memory.get(COMMON.ANTHROPIC_CHAT_HISTORY) : [];
Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, [...chatHistory, ...this.chatHistory]);
}
return await this.currentWebView?.webview.postMessage({
type,
@@ -67,23 +51,14 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider {
}
}

async generateResponse(
apiKey = undefined,
name = undefined,
message: string,
): Promise<string | undefined> {
async generateResponse(message: string, apiKey?: string, name?: string): Promise<string | undefined> {
try {
const { max_tokens } = GROQ_CONFIG;
if (getGenerativeAiModel() === generativeAiModels.GROK) {
this.baseUrl = getXGroKBaseURL();
}
const anthropic: Anthropic = createAnthropicClient(
this.apiKey,
this.baseUrl,
);
let chatHistory = Brain.has(COMMON.ANTHROPIC_CHAT_HISTORY)
? Brain.get(COMMON.ANTHROPIC_CHAT_HISTORY)
: [];
const anthropic: Anthropic = createAnthropicClient(this.apiKey, this.baseUrl);
let chatHistory = Memory.has(COMMON.ANTHROPIC_CHAT_HISTORY) ? Memory.get(COMMON.ANTHROPIC_CHAT_HISTORY) : [];

if (chatHistory?.length) {
chatHistory = [...chatHistory, { role: "user", content: message }];
@@ -107,10 +82,8 @@ export class AnthropicWebViewProvider extends BaseWebViewProvider {
return response;
} catch (error) {
console.error(error);
Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, []);
vscode.window.showErrorMessage(
"Model not responding, please resend your question",
);
Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, []);
vscode.window.showErrorMessage("Model not responding, please resend your question");
}
}
}
41 changes: 10 additions & 31 deletions src/providers/gemini.ts
Original file line number Diff line number Diff line change
@@ -2,7 +2,7 @@ import { GoogleGenerativeAI } from "@google/generative-ai";
import * as vscode from "vscode";
import { COMMON } from "../application/constant";
import { BaseWebViewProvider } from "./base";
import { Brain } from "../services/brain";
import { Memory } from "../memory/base";

type Role = "function" | "user" | "model";
export interface IHistory {
@@ -12,19 +12,11 @@ export interface IHistory {

export class GeminiWebViewProvider extends BaseWebViewProvider {
chatHistory: IHistory[] = [];
constructor(
extensionUri: vscode.Uri,
apiKey: string,
generativeAiModel: string,
context: vscode.ExtensionContext,
) {
constructor(extensionUri: vscode.Uri, apiKey: string, generativeAiModel: string, context: vscode.ExtensionContext) {
super(extensionUri, apiKey, generativeAiModel, context);
}

async sendResponse(
response: string,
currentChat: string,
): Promise<boolean | undefined> {
async sendResponse(response: string, currentChat: string): Promise<boolean | undefined> {
try {
const type = currentChat === "bot" ? "bot-response" : "user-input";
if (currentChat === "bot") {
@@ -39,35 +31,24 @@ export class GeminiWebViewProvider extends BaseWebViewProvider {
});
}
if (this.chatHistory.length === 2) {
const chatHistory = Brain.has(COMMON.GEMINI_CHAT_HISTORY)
? Brain.get(COMMON.GEMINI_CHAT_HISTORY)
: [];
Brain.set(COMMON.GEMINI_CHAT_HISTORY, [
...chatHistory,
...this.chatHistory,
]);
const chatHistory = Memory.has(COMMON.GEMINI_CHAT_HISTORY) ? Memory.get(COMMON.GEMINI_CHAT_HISTORY) : [];
Memory.set(COMMON.GEMINI_CHAT_HISTORY, [...chatHistory, ...this.chatHistory]);
}
return await this.currentWebView?.webview.postMessage({
type,
message: response,
});
} catch (error) {
Brain.set(COMMON.GEMINI_CHAT_HISTORY, []);
Memory.set(COMMON.GEMINI_CHAT_HISTORY, []);
console.error(error);
}
}

async generateResponse(
apiKey: string,
name: string,
message: string,
): Promise<string | undefined> {
async generateResponse(apiKey: string, name: string, message: string): Promise<string | undefined> {
try {
const genAi = new GoogleGenerativeAI(apiKey);
const model = genAi.getGenerativeModel({ model: name });
let chatHistory = Brain.has(COMMON.GEMINI_CHAT_HISTORY)
? Brain.get(COMMON.GEMINI_CHAT_HISTORY)
: [];
let chatHistory = Memory.has(COMMON.GEMINI_CHAT_HISTORY) ? Memory.get(COMMON.GEMINI_CHAT_HISTORY) : [];

if (chatHistory?.length) {
chatHistory = [
@@ -107,10 +88,8 @@ export class GeminiWebViewProvider extends BaseWebViewProvider {
const response = result.response;
return response.text();
} catch (error) {
Brain.set(COMMON.GEMINI_CHAT_HISTORY, []);
vscode.window.showErrorMessage(
"Model not responding, please resend your question",
);
Memory.set(COMMON.GEMINI_CHAT_HISTORY, []);
vscode.window.showErrorMessage("Model not responding, please resend your question");
console.error(error);
return;
}
39 changes: 9 additions & 30 deletions src/providers/groq.ts
Original file line number Diff line number Diff line change
@@ -2,7 +2,7 @@ import * as vscode from "vscode";
import { BaseWebViewProvider } from "./base";
import Groq from "groq-sdk";
import { COMMON, GROQ_CONFIG } from "../application/constant";
import { Brain } from "../services/brain";
import { Memory } from "../memory/base";

type Role = "user" | "system";
export interface IHistory {
@@ -12,19 +12,11 @@ export interface IHistory {

export class GroqWebViewProvider extends BaseWebViewProvider {
chatHistory: IHistory[] = [];
constructor(
extensionUri: vscode.Uri,
apiKey: string,
generativeAiModel: string,
context: vscode.ExtensionContext,
) {
constructor(extensionUri: vscode.Uri, apiKey: string, generativeAiModel: string, context: vscode.ExtensionContext) {
super(extensionUri, apiKey, generativeAiModel, context);
}

public async sendResponse(
response: string,
currentChat: string,
): Promise<boolean | undefined> {
public async sendResponse(response: string, currentChat: string): Promise<boolean | undefined> {
try {
const type = currentChat === "bot" ? "bot-response" : "user-input";
if (currentChat === "bot") {
@@ -39,13 +31,8 @@ export class GroqWebViewProvider extends BaseWebViewProvider {
});
}
if (this.chatHistory.length === 2) {
const chatHistory = Brain.has(COMMON.GROQ_CHAT_HISTORY)
? Brain.get(COMMON.GROQ_CHAT_HISTORY)
: [];
Brain.set(COMMON.GROQ_CHAT_HISTORY, [
...chatHistory,
...this.chatHistory,
]);
const chatHistory = Memory.has(COMMON.GROQ_CHAT_HISTORY) ? Memory.get(COMMON.GROQ_CHAT_HISTORY) : [];
Memory.set(COMMON.GROQ_CHAT_HISTORY, [...chatHistory, ...this.chatHistory]);
}
// Once the agent task is done, map the memory into the llm brain.
// Send the final answer to the webview here.
@@ -58,20 +45,14 @@ export class GroqWebViewProvider extends BaseWebViewProvider {
}
}

async generateResponse(
apiKey = undefined,
name = undefined,
message: string,
): Promise<string | undefined> {
async generateResponse(message: string, apiKey?: string, name?: string): Promise<string | undefined> {
try {
const { temperature, max_tokens, top_p, stop } = GROQ_CONFIG;
const groq = new Groq({
apiKey: this.apiKey,
});

let chatHistory = Brain.has(COMMON.GROQ_CHAT_HISTORY)
? Brain.get(COMMON.GROQ_CHAT_HISTORY)
: [];
let chatHistory = Memory.has(COMMON.GROQ_CHAT_HISTORY) ? Memory.get(COMMON.GROQ_CHAT_HISTORY) : [];

if (chatHistory?.length) {
chatHistory = [...chatHistory, { role: "user", content: message }];
@@ -98,10 +79,8 @@ export class GroqWebViewProvider extends BaseWebViewProvider {
return response ?? undefined;
} catch (error) {
console.error(error);
Brain.set(COMMON.GROQ_CHAT_HISTORY, []);
vscode.window.showErrorMessage(
"Model not responding, please resend your question",
);
Memory.set(COMMON.GROQ_CHAT_HISTORY, []);
vscode.window.showErrorMessage("Model not responding, please resend your question");
return;
}
}
59 changes: 0 additions & 59 deletions src/services/brain.ts

This file was deleted.

18 changes: 3 additions & 15 deletions src/services/chat-manager.ts
Original file line number Diff line number Diff line change
@@ -140,11 +140,7 @@ export class ChatManager {
groqAiConfigurations.model,
this._context,
);
return await chatViewProvider.generateResponse(
undefined,
undefined,
message,
);
return await chatViewProvider.generateResponse(message);
}
if (generativeAi === generativeAiModels.GEMINI) {
const geminiConfigurations = this.handleAiProvider(
@@ -169,11 +165,7 @@ export class ChatManager {
const anthropicWebViewProvider = this.getAnthropicWebViewProvider(
anthropicConfigurations,
);
return await anthropicWebViewProvider.generateResponse(
undefined,
undefined,
message,
);
return await anthropicWebViewProvider.generateResponse(message);
}

if (generativeAi === generativeAiModels.GROK) {
@@ -182,11 +174,7 @@ export class ChatManager {
);
const anthropicWebViewProvider =
this.getAnthropicWebViewProvider(grokConfigurations);
return await anthropicWebViewProvider.generateResponse(
undefined,
undefined,
message,
);
return await anthropicWebViewProvider.generateResponse(message);
}
} catch (error) {
const model = getConfigValue("generativeAi.option");
29 changes: 9 additions & 20 deletions src/utils/utils.ts
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
import * as markdownit from "markdown-it";
import * as vscode from "vscode";
import { Brain } from "../services/brain";
import {
APP_CONFIG,
COMMON,
generativeAiModels,
} from "../application/constant";
import { APP_CONFIG, COMMON, generativeAiModels } from "../application/constant";
import Anthropic from "@anthropic-ai/sdk";
import { Memory } from "../memory/base";

type GetConfigValueType<T> = (key: string) => T | undefined;

@@ -18,9 +14,7 @@ export const formatText = (text?: string): string => {
return "";
};

export const getConfigValue: GetConfigValueType<any> = <T>(
key: string,
): T | undefined => {
export const getConfigValue: GetConfigValueType<any> = <T>(key: string): T | undefined => {
return vscode.workspace.getConfiguration().get<T>(key);
};

@@ -29,7 +23,7 @@ export const vscodeErrorMessage = (error: string, metaData?: any) => {
};

export const getLatestChatHistory = (key: string) => {
let chatHistory = Brain.has(key) ? Brain.get(key) : [];
let chatHistory = Memory.has(key) ? Memory.get(key) : [];
if (chatHistory?.length > 3) {
chatHistory = chatHistory.slice(-3);
}
@@ -39,13 +33,13 @@ export const getLatestChatHistory = (key: string) => {
export const resetChatHistory = (model: string) => {
switch (model) {
case generativeAiModels.ANTHROPIC:
Brain.set(COMMON.ANTHROPIC_CHAT_HISTORY, []);
Memory.set(COMMON.ANTHROPIC_CHAT_HISTORY, []);
break;
case generativeAiModels.GEMINI:
Brain.set(COMMON.GEMINI_CHAT_HISTORY, []);
Memory.set(COMMON.GEMINI_CHAT_HISTORY, []);
break;
case generativeAiModels.GROQ:
Brain.set(COMMON.GROQ_CHAT_HISTORY, []);
Memory.set(COMMON.GROQ_CHAT_HISTORY, []);
break;
default:
break;
@@ -72,11 +66,7 @@ export const getGenerativeAiModel = (): string | undefined => {
return getConfigValue("generativeAi.option");
};

export function getUri(
webview: vscode.Webview,
extensionUri: vscode.Uri,
pathList: string[],
) {
export function getUri(webview: vscode.Webview, extensionUri: vscode.Uri, pathList: string[]) {
return webview.asWebviewUri(vscode.Uri.joinPath(extensionUri, ...pathList));
}

@@ -85,8 +75,7 @@ export function getUri(
// and ensure script integrity when using Content Security Policy (CSP)
export const getNonce = () => {
let text = "";
const possible =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
const possible = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
for (let i = 0; i < 32; i++) {
text += possible.charAt(Math.floor(Math.random() * possible.length));
}

0 comments on commit 2ae80e4

Please sign in to comment.