diff --git a/.changeset/five-eggs-brush.md b/.changeset/five-eggs-brush.md new file mode 100644 index 000000000..2e2e1a556 --- /dev/null +++ b/.changeset/five-eggs-brush.md @@ -0,0 +1,99 @@ +--- +"@voltagent/postgres": patch +--- + +feat: add PostgresVectorAdapter for semantic search with vanilla PostgreSQL + +## What Changed for You + +The `@voltagent/postgres` package now includes `PostgresVectorAdapter` for storing and querying vector embeddings using vanilla PostgreSQL (no extensions required). This enables semantic search capabilities for conversation history, allowing agents to retrieve contextually relevant messages based on meaning rather than just keywords. + +## New: PostgresVectorAdapter + +```typescript +import { Agent, Memory, AiSdkEmbeddingAdapter } from "@voltagent/core"; +import { PostgresMemoryAdapter, PostgresVectorAdapter } from "@voltagent/postgres"; +import { openai } from "@ai-sdk/openai"; + +const memory = new Memory({ + storage: new PostgresMemoryAdapter({ + connectionString: process.env.DATABASE_URL, + }), + embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), + vector: new PostgresVectorAdapter({ + connectionString: process.env.DATABASE_URL, + }), +}); + +const agent = new Agent({ + name: "Assistant", + instructions: "You are a helpful assistant with semantic memory", + model: openai("gpt-4o-mini"), + memory, +}); + +// Semantic search automatically enabled with userId + conversationId +const result = await agent.generateText("What did we discuss about the project?", { + userId: "user-123", + conversationId: "conv-456", +}); +``` + +## Key Features + +- **No Extensions Required**: Works with vanilla PostgreSQL (no pgvector needed) +- **BYTEA Storage**: Vectors stored efficiently as binary data using PostgreSQL's native BYTEA type +- **In-Memory Similarity**: Cosine similarity computed in-memory for accurate results +- **Automatic Setup**: Creates `voltagent_vectors` table and indexes automatically +- **Configurable**: Customize table name, vector dimensions, cache size, and retry logic +- **Production Ready**: Connection pooling, exponential backoff, LRU caching + +## Configuration Options + +```typescript +const vectorAdapter = new PostgresVectorAdapter({ + connectionString: process.env.DATABASE_URL, + + // Optional: customize table name (default: "voltagent_vector") + tablePrefix: "custom_vector", + + // Optional: vector dimensions (default: 1536 for text-embedding-3-small) + maxVectorDimensions: 1536, + + // Optional: LRU cache size (default: 100) + cacheSize: 100, + + // Optional: connection pool size (default: 10) + maxConnections: 10, +}); +``` + +## How It Works + +1. **Embedding Generation**: Messages are converted to vector embeddings using your chosen embedding model +2. **Binary Storage**: Vectors are serialized to binary (BYTEA) and stored in PostgreSQL +3. **In-Memory Similarity**: When searching, all vectors are loaded and cosine similarity is computed in-memory +4. **Context Merging**: Relevant messages are merged into conversation context automatically + +## Why This Matters + +- **Better Context Retrieval**: Find relevant past conversations even with different wording +- **Unified Storage**: Keep vectors and messages in the same PostgreSQL database +- **Zero Extensions**: Works with any PostgreSQL instance (12+), no extension installation needed +- **Cost Effective**: No separate vector database needed (Pinecone, Weaviate, etc.) +- **Familiar Tools**: Use standard PostgreSQL management and monitoring tools +- **Framework Parity**: Same `VectorStorageAdapter` interface as other providers + +## Performance Notes + +This adapter loads all vectors into memory for similarity computation, which works well for: + +- **Small to medium datasets** (< 10,000 vectors) +- **Development and prototyping** +- **Applications where extension installation is not possible** + +For large-scale production workloads with millions of vectors, consider specialized vector databases or PostgreSQL with pgvector extension for database-level similarity operations. + +## Migration Notes + +Existing PostgreSQL memory adapters continue to work without changes. Vector storage is optional and only activates when you configure both `embedding` and `vector` in the Memory constructor. diff --git a/.changeset/real-olives-wave.md b/.changeset/real-olives-wave.md new file mode 100644 index 000000000..85b86b270 --- /dev/null +++ b/.changeset/real-olives-wave.md @@ -0,0 +1,149 @@ +--- +"@voltagent/voltagent-memory": patch +"@voltagent/core": patch +--- + +feat: introduce managed memory - ready-made cloud storage for VoltAgent + +## What Changed for You + +VoltAgent now offers a managed memory solution that eliminates the need to run your own database infrastructure. The new `@voltagent/voltagent-memory` package provides a `ManagedMemoryAdapter` that connects to VoltOps Managed Memory service, perfect for pilots, demos, and production workloads. + +## New Package: @voltagent/voltagent-memory + +### Automatic Setup (Recommended) + +Get your credentials from [console.voltagent.dev/memory/managed-memory](https://console.voltagent.dev/memory/managed-memory) and set environment variables: + +```bash +# .env +VOLTAGENT_PUBLIC_KEY=pk_... +VOLTAGENT_SECRET_KEY=sk_... +``` + +```typescript +import { Agent, Memory } from "@voltagent/core"; +import { ManagedMemoryAdapter } from "@voltagent/voltagent-memory"; +import { openai } from "@ai-sdk/openai"; + +// Adapter automatically uses VoltOps credentials from environment +const agent = new Agent({ + name: "Assistant", + instructions: "You are a helpful assistant", + model: openai("gpt-4o-mini"), + memory: new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "production-memory", + }), + }), +}); + +// Use like any other agent - memory is automatically persisted +const result = await agent.generateText("Hello!", { + userId: "user-123", + conversationId: "conv-456", +}); +``` + +### Manual Setup + +Pass a `VoltOpsClient` instance explicitly: + +```typescript +import { Agent, Memory, VoltOpsClient } from "@voltagent/core"; +import { ManagedMemoryAdapter } from "@voltagent/voltagent-memory"; +import { openai } from "@ai-sdk/openai"; + +const voltOpsClient = new VoltOpsClient({ + publicKey: process.env.VOLTAGENT_PUBLIC_KEY!, + secretKey: process.env.VOLTAGENT_SECRET_KEY!, +}); + +const agent = new Agent({ + name: "Assistant", + instructions: "You are a helpful assistant", + model: openai("gpt-4o-mini"), + memory: new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "production-memory", + voltOpsClient, // explicit client + }), + }), +}); +``` + +### Vector Storage (Optional) + +Enable semantic search with `ManagedMemoryVectorAdapter`: + +```typescript +import { ManagedMemoryAdapter, ManagedMemoryVectorAdapter } from "@voltagent/voltagent-memory"; +import { AiSdkEmbeddingAdapter, Memory } from "@voltagent/core"; +import { openai } from "@ai-sdk/openai"; + +const memory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "production-memory", + }), + embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), + vector: new ManagedMemoryVectorAdapter({ + databaseName: "production-memory", + }), +}); +``` + +## Key Features + +- **Zero Infrastructure**: No need to provision or manage databases +- **Quick Setup**: Create a managed memory database in under 3 minutes from VoltOps Console +- **Framework Parity**: Works identically to local Postgres, LibSQL, or Supabase adapters +- **Production Ready**: Managed infrastructure with reliability guardrails +- **Multi-Region**: Available in US (Virginia) and EU (Germany) + +## Getting Started + +1. **Install the package**: + +```bash +npm install @voltagent/voltagent-memory +# or +pnpm add @voltagent/voltagent-memory +``` + +2. **Create a managed database**: + - Navigate to [console.voltagent.dev/memory/managed-memory](https://console.voltagent.dev/memory/managed-memory) + - Click **Create Database** + - Enter a name and select region (US or EU) + - Copy your VoltOps API keys from Settings + +3. **Configure environment variables**: + +```bash +VOLTAGENT_PUBLIC_KEY=pk_... +VOLTAGENT_SECRET_KEY=sk_... +``` + +4. **Use the adapter**: + +```typescript +import { ManagedMemoryAdapter } from "@voltagent/voltagent-memory"; +import { Memory } from "@voltagent/core"; + +const memory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "your-database-name", + }), +}); +``` + +## Why This Matters + +- **Faster Prototyping**: Launch pilots without database setup +- **Reduced Complexity**: No infrastructure management overhead +- **Consistent Experience**: Same StorageAdapter interface across all memory providers +- **Scalable Path**: Start with managed memory, migrate to self-hosted when needed +- **Multi-Region Support**: Deploy close to your users in US or EU + +## Migration Notes + +Existing agents using local storage adapters (InMemory, LibSQL, Postgres, Supabase) continue to work unchanged. Managed memory is an optional addition that provides a cloud-hosted alternative for teams who prefer not to manage their own database infrastructure. diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e222f231..8f136360b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3992,8 +3992,8 @@ // 1. Initialize VoltOps client const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, }); // 2. Create agent with VoltOps prompts @@ -4031,8 +4031,8 @@ import { Agent, VoltAgent, VoltOpsClient } from "@voltagent/core"; const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, observability: true, // Enable observability - default is true prompts: true, // Enable prompt management - default is true }); @@ -4056,8 +4056,8 @@ agents: { myAgent }, - telemetryExporter: new VoltAgentExporter({ + voltOpsClient: new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, - baseUrl: "https://api.voltagent.dev", }), }); @@ -4087,8 +4087,8 @@ ```typescript const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, baseUrl: "https://api.voltagent.dev", // Default observability: true, // Enable observability export - default is true prompts: false, // Observability only - default is true diff --git a/examples/with-dynamic-prompts/.env.example b/examples/with-dynamic-prompts/.env.example index 1d5734ec0..4fd3384ab 100644 --- a/examples/with-dynamic-prompts/.env.example +++ b/examples/with-dynamic-prompts/.env.example @@ -1,4 +1,4 @@ OPENAI_API_KEY=your_openai_api_key_here -VOLTOPS_PUBLIC_KEY=your_voltops_public_key -VOLTOPS_SECRET_KEY=your_voltops_secret_key \ No newline at end of file +VOLTAGENT_PUBLIC_KEY=your_VOLTAGENT_PUBLIC_KEY +VOLTAGENT_SECRET_KEY=your_VOLTAGENT_SECRET_KEY \ No newline at end of file diff --git a/examples/with-dynamic-prompts/src/index.ts b/examples/with-dynamic-prompts/src/index.ts index 685e838e3..372a4cecb 100644 --- a/examples/with-dynamic-prompts/src/index.ts +++ b/examples/with-dynamic-prompts/src/index.ts @@ -10,8 +10,8 @@ const logger = createPinoLogger({ }); const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, }); const supportAgent = new Agent({ diff --git a/examples/with-viteval/.env.example b/examples/with-viteval/.env.example index a0077a057..0e81594a7 100644 --- a/examples/with-viteval/.env.example +++ b/examples/with-viteval/.env.example @@ -1,4 +1,4 @@ OPENAI_API_KEY=sk-svcacct-your_openai_api_key_here -VOLTOPS_PUBLIC_KEY=pk_your_public_key_here -VOLTOPS_SECRET_KEY=sk_your_secret_key_here +VOLTAGENT_PUBLIC_KEY=pk_your_public_key_here +VOLTAGENT_SECRET_KEY=sk_your_secret_key_here diff --git a/examples/with-viteval/src/voltagent.ts b/examples/with-viteval/src/voltagent.ts index 232a88d23..53a5117f8 100644 --- a/examples/with-viteval/src/voltagent.ts +++ b/examples/with-viteval/src/voltagent.ts @@ -10,8 +10,8 @@ import { scienceAgent } from "#/agents/science"; import { supervisorAgent } from "#/agents/supervisor"; const env = cleanEnv(process.env, { - VOLTOPS_PUBLIC_KEY: str(), - VOLTOPS_SECRET_KEY: str(), + VOLTAGENT_PUBLIC_KEY: str(), + VOLTAGENT_SECRET_KEY: str(), }); export const voltagent = new VoltAgent({ @@ -24,8 +24,8 @@ export const voltagent = new VoltAgent({ science: scienceAgent, }, voltOpsClient: new VoltOpsClient({ - publicKey: env.VOLTOPS_PUBLIC_KEY, - secretKey: env.VOLTOPS_SECRET_KEY, + publicKey: env.VOLTAGENT_PUBLIC_KEY, + secretKey: env.VOLTAGENT_SECRET_KEY, }), server: honoServer(), }); diff --git a/examples/with-voltagent-managed-memory/.env.example b/examples/with-voltagent-managed-memory/.env.example new file mode 100644 index 000000000..08babc7fd --- /dev/null +++ b/examples/with-voltagent-managed-memory/.env.example @@ -0,0 +1,6 @@ +VOLTAGENT_PUBLIC_KEY=pk_xxxxxxxxxxxxxxxxxxxxx +VOLTAGENT_SECRET_KEY=sk_xxxxxxxxxxxxxxxxxxxxx +VOLTOPS_API_URL=https://api.voltagent.dev +MANAGED_MEMORY_DB_NAME=production-memory +PORT=3141 +LOG_LEVEL=info diff --git a/examples/with-voltagent-managed-memory/README.md b/examples/with-voltagent-managed-memory/README.md new file mode 100644 index 000000000..6f6e7dbea --- /dev/null +++ b/examples/with-voltagent-managed-memory/README.md @@ -0,0 +1,53 @@ +# VoltAgent Example – Managed Memory via VoltOps + +This example shows how to run VoltAgent with the hosted _VoltAgent Managed Memory_ service. The agent persists conversations in VoltOps without needing direct PostgreSQL credentials. + +## Prerequisites + +1. **VoltOps Project Keys** – create or reuse a VoltOps project and copy the **public** and **secret** API keys. +2. **Managed Memory Database** – create a managed database from VoltOps Console or the VoltOps API and note its name (e.g. `production-memory`). +3. **Node.js 20+** and **pnpm** (or your preferred package manager). + +## Setup + +```bash +cd examples/with-voltagent-managed-memory +cp .env.example .env +# fill in VOLTAGENT_PUBLIC_KEY, VOLTAGENT_SECRET_KEY and MANAGED_MEMORY_DB_NAME +pnpm install +pnpm dev +``` + +The agent boots on [`http://localhost:3141`](http://localhost:3141) and stores memory remotely. Logs include the connection metadata returned by VoltOps. + +## Environment variables + +| Variable | Description | +| ------------------------ | ------------------------------------------------------------------ | +| `VOLTAGENT_PUBLIC_KEY` | VoltOps project public key (starts with `pk_`). | +| `VOLTAGENT_SECRET_KEY` | VoltOps project secret key (starts with `sk_`). | +| `VOLTOPS_API_URL` | VoltOps API base URL. Leave blank for `https://api.voltagent.dev`. | +| `MANAGED_MEMORY_DB_NAME` | The managed memory database name (e.g. `production-memory`). | +| `PORT` | Optional local port (defaults to `3141`). | +| `LOG_LEVEL` | Optional logger level (`info`, `debug`, etc.). | + +## How it works + +```ts +const voltOpsClient = new VoltOpsClient({ publicKey, secretKey, baseUrl }); +const managedMemory = new ManagedMemoryAdapter({ + databaseName: process.env.MANAGED_MEMORY_DB_NAME, + voltOpsClient, +}); + +const agent = new Agent({ + model: openai("gpt-4o-mini"), + memory: new Memory({ storage: managedMemory }), +}); +``` + +The adapter calls VoltOps REST endpoints for every memory operation (`getMessages`, `createConversation`, etc.), so your application only needs VoltOps credentials. + +--- + +Need a ready-made database or rotation policies? Head to [VoltOps Console](https://console.voltagent.dev/) and open the **Managed Memory** tab. diff --git a/examples/sdk-trace-example/package.json b/examples/with-voltagent-managed-memory/package.json similarity index 58% rename from examples/sdk-trace-example/package.json rename to examples/with-voltagent-managed-memory/package.json index 41d6cfbc3..0e8cd7851 100644 --- a/examples/sdk-trace-example/package.json +++ b/examples/with-voltagent-managed-memory/package.json @@ -1,10 +1,14 @@ { - "name": "voltagent-example-sdk-trace", + "name": "voltagent-example-with-voltagent-managed-memory", "author": "", "dependencies": { + "@ai-sdk/openai": "^2.0.2", + "@voltagent/core": "^1.1.23", "@voltagent/logger": "^1.0.2", - "@voltagent/sdk": "^0.1.6", - "@voltagent/server-hono": "^1.0.15" + "@voltagent/server-hono": "^1.0.15", + "@voltagent/voltagent-memory": "^0.1.0", + "ai": "^5.0.12", + "zod": "^3.25.76" }, "devDependencies": { "@types/node": "^24.2.1", @@ -14,8 +18,7 @@ "keywords": [ "agent", "ai", - "sdk", - "trace", + "managed-memory", "voltagent" ], "license": "MIT", @@ -24,7 +27,7 @@ "build": "tsc", "dev": "tsx watch --env-file=.env ./src", "start": "node dist/index.js", - "trace": "tsx --env-file=.env ./src/index.ts" + "volt": "volt" }, "type": "module" } diff --git a/examples/with-voltagent-managed-memory/src/index.ts b/examples/with-voltagent-managed-memory/src/index.ts new file mode 100644 index 000000000..206f6138c --- /dev/null +++ b/examples/with-voltagent-managed-memory/src/index.ts @@ -0,0 +1,49 @@ +import { openai } from "@ai-sdk/openai"; +import { Agent, AiSdkEmbeddingAdapter, Memory, VoltAgent, VoltOpsClient } from "@voltagent/core"; +import { createPinoLogger } from "@voltagent/logger"; +import { honoServer } from "@voltagent/server-hono"; +import { ManagedMemoryAdapter, ManagedMemoryVectorAdapter } from "@voltagent/voltagent-memory"; + +/* const voltOpsClient = new VoltOpsClient({ + baseUrl: process.env.VOLTOPS_API_URL, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, +}); + */ +const managedMemory = new ManagedMemoryAdapter({ + databaseName: "us-test", +}); + +const managedVector = new ManagedMemoryVectorAdapter({ + databaseName: "us-test", +}); + +const agent = new Agent({ + name: "Managed Memory Agent", + instructions: + "A helpful assistant that stores conversations in VoltAgent Managed Memory via VoltOps.", + model: openai("gpt-4o-mini"), + memory: new Memory({ + storage: managedMemory, + vector: managedVector, + embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), + }), +}); + +const logger = createPinoLogger({ + name: "with-voltagent-managed-memory", + level: "info", +}); + +new VoltAgent({ + agents: { + managed: agent, + }, + logger, + server: honoServer({ port: Number(process.env.PORT || 3141) }), +}); + +(async () => { + const connection = await managedMemory.getConnectionInfo(); + logger.info("VoltAgent managed memory ready", { connection }); +})(); diff --git a/examples/with-voltagent-managed-memory/tsconfig.json b/examples/with-voltagent-managed-memory/tsconfig.json new file mode 100644 index 000000000..04700a4c5 --- /dev/null +++ b/examples/with-voltagent-managed-memory/tsconfig.json @@ -0,0 +1,16 @@ +{ + "compilerOptions": { + "target": "es2018", + "module": "esnext", + "moduleResolution": "node", + "lib": ["esnext", "dom"], + "declaration": true, + "outDir": "dist", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true + }, + "include": ["src/**/*.ts"], + "exclude": ["node_modules", "dist"] +} diff --git a/examples/with-whatsapp/package.json b/examples/with-whatsapp/package.json index 862fd491e..f8684109b 100644 --- a/examples/with-whatsapp/package.json +++ b/examples/with-whatsapp/package.json @@ -2,15 +2,15 @@ "name": "voltagent-example-with-whatsapp", "author": "", "dependencies": { - "@ai-sdk/openai": "^2.0.34", - "@supabase/supabase-js": "^2.57.4", + "@ai-sdk/openai": "^2.0.2", + "@supabase/supabase-js": "^2.49.4", "@voltagent/cli": "^0.1.11", "@voltagent/core": "^1.1.23", "@voltagent/libsql": "^1.0.7", "@voltagent/logger": "^1.0.2", "@voltagent/server-hono": "^1.0.15", - "ai": "^5.0.51", - "dotenv": "^16.4.7", + "ai": "^5.0.12", + "dotenv": "^16.4.5", "zod": "^3.25.76" }, "devDependencies": { @@ -32,4 +32,4 @@ "volt": "volt" }, "type": "module" -} \ No newline at end of file +} diff --git a/examples/with-workflow/.env.example b/examples/with-workflow/.env.example index 1d5734ec0..4fd3384ab 100644 --- a/examples/with-workflow/.env.example +++ b/examples/with-workflow/.env.example @@ -1,4 +1,4 @@ OPENAI_API_KEY=your_openai_api_key_here -VOLTOPS_PUBLIC_KEY=your_voltops_public_key -VOLTOPS_SECRET_KEY=your_voltops_secret_key \ No newline at end of file +VOLTAGENT_PUBLIC_KEY=your_VOLTAGENT_PUBLIC_KEY +VOLTAGENT_SECRET_KEY=your_VOLTAGENT_SECRET_KEY \ No newline at end of file diff --git a/packages/core/CHANGELOG.md b/packages/core/CHANGELOG.md index 1dbcb75ca..9caba6c05 100644 --- a/packages/core/CHANGELOG.md +++ b/packages/core/CHANGELOG.md @@ -4109,8 +4109,8 @@ // 1. Initialize VoltOps client const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, }); // 2. Create agent with VoltOps prompts @@ -4148,8 +4148,8 @@ import { Agent, VoltAgent, VoltOpsClient } from "@voltagent/core"; const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, observability: true, // Enable observability - default is true prompts: true, // Enable prompt management - default is true }); @@ -4173,8 +4173,8 @@ agents: { myAgent }, - telemetryExporter: new VoltAgentExporter({ + voltOpsClient: new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, - baseUrl: "https://api.voltagent.dev", }), }); @@ -4204,8 +4204,8 @@ ```typescript const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, baseUrl: "https://api.voltagent.dev", // Default observability: true, // Enable observability export - default is true prompts: false, // Observability only - default is true diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index ea3115b6b..b39004823 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -167,3 +167,25 @@ export { createAsyncIterableStream, type AsyncIterableStream } from "@voltagent/ // Convenience re-exports from ai-sdk so apps need only @voltagent/core export { stepCountIs, hasToolCall } from "ai"; export type { StopWhen } from "./ai-types"; + +export type { + ManagedMemoryStatus, + ManagedMemoryConnectionInfo, + ManagedMemoryDatabaseSummary, + ManagedMemoryCredentialSummary, + ManagedMemoryCredentialListResult, + ManagedMemoryCredentialCreateResult, + ManagedMemoryAddMessageInput, + ManagedMemoryAddMessagesInput, + ManagedMemoryGetMessagesInput, + ManagedMemoryClearMessagesInput, + ManagedMemoryUpdateConversationInput, + ManagedMemoryWorkingMemoryInput, + ManagedMemorySetWorkingMemoryInput, + ManagedMemoryWorkflowStateUpdateInput, + ManagedMemoryMessagesClient, + ManagedMemoryConversationsClient, + ManagedMemoryWorkingMemoryClient, + ManagedMemoryWorkflowStatesClient, + ManagedMemoryVoltOpsClient, +} from "./voltops/types"; diff --git a/packages/core/src/voltops/client.ts b/packages/core/src/voltops/client.ts index 51d3b6952..65467fd09 100644 --- a/packages/core/src/voltops/client.ts +++ b/packages/core/src/voltops/client.ts @@ -5,14 +5,40 @@ * Replaces the old telemetryExporter approach with a comprehensive solution. */ +import { safeStringify } from "@voltagent/internal"; +import type { UIMessage } from "ai"; import { type Logger, LoggerProxy } from "../logger"; import { LogEvents } from "../logger/events"; import { ResourceType, buildLogContext, buildVoltOpsLogMessage } from "../logger/message-builder"; +import type { SearchResult, VectorItem } from "../memory/adapters/vector/types"; +import type { + Conversation, + ConversationQueryOptions, + CreateConversationInput, + GetMessagesOptions, + WorkflowStateEntry, +} from "../memory/types"; import { AgentRegistry } from "../registries/agent-registry"; // VoltAgentExporter removed - migrated to OpenTelemetry import { VoltOpsPromptManagerImpl } from "./prompt-manager"; import type { VoltOpsClient as IVoltOpsClient, + ManagedMemoryAddMessageInput, + ManagedMemoryAddMessagesInput, + ManagedMemoryClearMessagesInput, + ManagedMemoryCredentialCreateResult, + ManagedMemoryCredentialListResult, + ManagedMemoryDatabaseSummary, + ManagedMemoryDeleteVectorsInput, + ManagedMemoryGetMessagesInput, + ManagedMemorySearchVectorsInput, + ManagedMemorySetWorkingMemoryInput, + ManagedMemoryStoreVectorInput, + ManagedMemoryStoreVectorsBatchInput, + ManagedMemoryUpdateConversationInput, + ManagedMemoryVoltOpsClient, + ManagedMemoryWorkflowStateUpdateInput, + ManagedMemoryWorkingMemoryInput, PromptHelper, PromptReference, VoltOpsClientOptions, @@ -27,8 +53,13 @@ export class VoltOpsClient implements IVoltOpsClient { public readonly options: VoltOpsClientOptions & { baseUrl: string }; // observability removed - now handled by VoltAgentObservability public readonly prompts?: VoltOpsPromptManager; + public readonly managedMemory: ManagedMemoryVoltOpsClient; private readonly logger: Logger; + private get fetchImpl(): typeof fetch { + return this.options.fetch ?? fetch; + } + constructor(options: VoltOpsClientOptions) { // Merge promptCache options properly to preserve defaults const defaultPromptCache = { @@ -49,6 +80,7 @@ export class VoltOpsClient implements IVoltOpsClient { }; this.logger = new LoggerProxy({ component: "voltops-client" }); + this.managedMemory = this.createManagedMemoryClient(); // Check if keys are valid (not empty and have correct prefixes) const hasValidKeys = @@ -169,6 +201,545 @@ export class VoltOpsClient implements IVoltOpsClient { return this.prompts; } + private async request(method: string, endpoint: string, body?: unknown): Promise { + const url = `${this.options.baseUrl.replace(/\/$/, "")}${endpoint}`; + const headers: Record = { + "Content-Type": "application/json", + "X-Public-Key": this.options.publicKey || "", + "X-Secret-Key": this.options.secretKey || "", + }; + + try { + const response = await this.fetchImpl(url, { + method, + headers, + body: body !== undefined ? safeStringify(body) : undefined, + }); + + const payload = await response.json().catch(() => ({})); + + if (!response.ok) { + throw new Error(payload?.message || `VoltOps request failed (${response.status})`); + } + + return payload as T; + } catch (error) { + this.logger.error("VoltOps request failed", { endpoint, method, error }); + throw error; + } + } + + private buildQueryString(params: Record): string { + const searchParams = new URLSearchParams(); + + for (const [key, value] of Object.entries(params)) { + if (value === undefined || value === null) { + continue; + } + + if (Array.isArray(value)) { + if (value.length === 0) { + continue; + } + searchParams.set(key, value.join(",")); + } else if (value instanceof Date) { + searchParams.set(key, value.toISOString()); + } else { + searchParams.set(key, String(value)); + } + } + + const query = searchParams.toString(); + return query ? `?${query}` : ""; + } + + private createManagedMemoryClient(): ManagedMemoryVoltOpsClient { + return { + messages: { + add: (databaseId, input) => this.addManagedMemoryMessage(databaseId, input), + addBatch: (databaseId, input) => this.addManagedMemoryMessages(databaseId, input), + list: (databaseId, input) => this.getManagedMemoryMessages(databaseId, input), + clear: (databaseId, input) => this.clearManagedMemoryMessages(databaseId, input), + }, + conversations: { + create: (databaseId, input) => this.createManagedMemoryConversation(databaseId, input), + get: (databaseId, conversationId) => + this.getManagedMemoryConversation(databaseId, conversationId), + query: (databaseId, options) => this.queryManagedMemoryConversations(databaseId, options), + update: (databaseId, input) => this.updateManagedMemoryConversation(databaseId, input), + delete: (databaseId, conversationId) => + this.deleteManagedMemoryConversation(databaseId, conversationId), + }, + workingMemory: { + get: (databaseId, input) => this.getManagedMemoryWorkingMemory(databaseId, input), + set: (databaseId, input) => this.setManagedMemoryWorkingMemory(databaseId, input), + delete: (databaseId, input) => this.deleteManagedMemoryWorkingMemory(databaseId, input), + }, + workflowStates: { + get: (databaseId, executionId) => + this.getManagedMemoryWorkflowState(databaseId, executionId), + set: (databaseId, executionId, state) => + this.setManagedMemoryWorkflowState(databaseId, executionId, state), + update: (databaseId, input) => this.updateManagedMemoryWorkflowState(databaseId, input), + listSuspended: (databaseId, workflowId) => + this.getManagedMemorySuspendedWorkflowStates(databaseId, workflowId), + }, + vectors: { + store: (databaseId, input) => this.storeManagedMemoryVector(databaseId, input), + storeBatch: (databaseId, input) => this.storeManagedMemoryVectors(databaseId, input), + search: (databaseId, input) => this.searchManagedMemoryVectors(databaseId, input), + get: (databaseId, vectorId) => this.getManagedMemoryVector(databaseId, vectorId), + delete: (databaseId, vectorId) => this.deleteManagedMemoryVector(databaseId, vectorId), + deleteBatch: (databaseId, input) => this.deleteManagedMemoryVectors(databaseId, input), + clear: (databaseId) => this.clearManagedMemoryVectors(databaseId), + count: (databaseId) => this.countManagedMemoryVectors(databaseId), + }, + }; + } + + public async listManagedMemoryDatabases(): Promise { + const payload = await this.request<{ + success: boolean; + data: { databases: ManagedMemoryDatabaseSummary[] }; + }>("GET", "/managed-memory/projects/databases"); + + if (!payload?.success) { + throw new Error("Failed to fetch managed memory databases from VoltOps"); + } + + return payload.data?.databases ?? []; + } + + public async listManagedMemoryCredentials( + databaseId: string, + ): Promise { + const payload = await this.request<{ + success: boolean; + data: ManagedMemoryCredentialListResult; + }>("GET", `/managed-memory/projects/databases/${databaseId}/credentials`); + + if (!payload?.success || !payload.data) { + throw new Error("Failed to fetch managed memory credentials from VoltOps"); + } + + return payload.data; + } + + public async createManagedMemoryCredential( + databaseId: string, + input: { name?: string } = {}, + ): Promise { + const payload = await this.request<{ + success: boolean; + data: ManagedMemoryCredentialCreateResult; + }>("POST", `/managed-memory/projects/databases/${databaseId}/credentials`, input); + + if (!payload?.success || !payload.data) { + throw new Error("Failed to create managed memory credential via VoltOps"); + } + + return payload.data; + } + + private async addManagedMemoryMessage( + databaseId: string, + input: ManagedMemoryAddMessageInput, + ): Promise { + const payload = await this.request<{ success: boolean }>( + "POST", + `/managed-memory/projects/databases/${databaseId}/messages`, + input, + ); + + if (!payload?.success) { + throw new Error("Failed to add managed memory message via VoltOps"); + } + } + + private async addManagedMemoryMessages( + databaseId: string, + input: ManagedMemoryAddMessagesInput, + ): Promise { + const payload = await this.request<{ success: boolean }>( + "POST", + `/managed-memory/projects/databases/${databaseId}/messages/batch`, + input, + ); + + if (!payload?.success) { + throw new Error("Failed to add managed memory messages via VoltOps"); + } + } + + private async getManagedMemoryMessages( + databaseId: string, + input: ManagedMemoryGetMessagesInput, + ): Promise { + const options: GetMessagesOptions | undefined = input.options; + const query = this.buildQueryString({ + conversationId: input.conversationId, + userId: input.userId, + limit: options?.limit, + before: options?.before, + after: options?.after, + roles: options?.roles, + }); + + const payload = await this.request<{ + success: boolean; + data?: { messages?: UIMessage[] }; + }>("GET", `/managed-memory/projects/databases/${databaseId}/messages${query}`); + + if (!payload?.success) { + throw new Error("Failed to fetch managed memory messages via VoltOps"); + } + + return payload.data?.messages ?? []; + } + + private async clearManagedMemoryMessages( + databaseId: string, + input: ManagedMemoryClearMessagesInput, + ): Promise { + const payload = await this.request<{ success: boolean }>( + "DELETE", + `/managed-memory/projects/databases/${databaseId}/messages`, + input, + ); + + if (!payload?.success) { + throw new Error("Failed to clear managed memory messages via VoltOps"); + } + } + + private async storeManagedMemoryVector( + databaseId: string, + input: ManagedMemoryStoreVectorInput, + ): Promise { + const payload = await this.request<{ success: boolean }>( + "POST", + `/managed-memory/projects/databases/${databaseId}/vectors`, + input, + ); + + if (!payload?.success) { + throw new Error("Failed to store managed memory vector via VoltOps"); + } + } + + private async storeManagedMemoryVectors( + databaseId: string, + input: ManagedMemoryStoreVectorsBatchInput, + ): Promise { + if (!input.items || input.items.length === 0) { + return; + } + + const payload = await this.request<{ success: boolean }>( + "POST", + `/managed-memory/projects/databases/${databaseId}/vectors/batch`, + input, + ); + + if (!payload?.success) { + throw new Error("Failed to store managed memory vectors via VoltOps"); + } + } + + private async searchManagedMemoryVectors( + databaseId: string, + input: ManagedMemorySearchVectorsInput, + ): Promise { + const payload = await this.request<{ + success: boolean; + data?: { results?: SearchResult[] }; + }>("POST", `/managed-memory/projects/databases/${databaseId}/vectors/search`, input); + + if (!payload?.success) { + throw new Error("Failed to search managed memory vectors via VoltOps"); + } + + return payload.data?.results ?? []; + } + + private async getManagedMemoryVector( + databaseId: string, + vectorId: string, + ): Promise { + const payload = await this.request<{ + success: boolean; + data?: { vector?: VectorItem | null }; + }>("GET", `/managed-memory/projects/databases/${databaseId}/vectors/${vectorId}`); + + if (!payload?.success) { + throw new Error("Failed to fetch managed memory vector via VoltOps"); + } + + return payload.data?.vector ?? null; + } + + private async deleteManagedMemoryVector(databaseId: string, vectorId: string): Promise { + const payload = await this.request<{ success: boolean }>( + "DELETE", + `/managed-memory/projects/databases/${databaseId}/vectors/${vectorId}`, + ); + + if (!payload?.success) { + throw new Error("Failed to delete managed memory vector via VoltOps"); + } + } + + private async deleteManagedMemoryVectors( + databaseId: string, + input: ManagedMemoryDeleteVectorsInput, + ): Promise { + if (!input.ids || input.ids.length === 0) { + return; + } + + const payload = await this.request<{ success: boolean }>( + "POST", + `/managed-memory/projects/databases/${databaseId}/vectors/delete`, + input, + ); + + if (!payload?.success) { + throw new Error("Failed to delete managed memory vectors via VoltOps"); + } + } + + private async clearManagedMemoryVectors(databaseId: string): Promise { + const payload = await this.request<{ success: boolean }>( + "POST", + `/managed-memory/projects/databases/${databaseId}/vectors/clear`, + ); + + if (!payload?.success) { + throw new Error("Failed to clear managed memory vectors via VoltOps"); + } + } + + private async countManagedMemoryVectors(databaseId: string): Promise { + const payload = await this.request<{ + success: boolean; + data?: { count?: number }; + }>("GET", `/managed-memory/projects/databases/${databaseId}/vectors/count`); + + if (!payload?.success || typeof payload.data?.count !== "number") { + throw new Error("Failed to count managed memory vectors via VoltOps"); + } + + return payload.data.count; + } + + private async createManagedMemoryConversation( + databaseId: string, + input: CreateConversationInput, + ): Promise { + const payload = await this.request<{ + success: boolean; + data?: { conversation?: Conversation }; + }>("POST", `/managed-memory/projects/databases/${databaseId}/conversations`, { input }); + + if (!payload?.success || !payload.data?.conversation) { + throw new Error("Failed to create managed memory conversation via VoltOps"); + } + + return payload.data.conversation; + } + + private async getManagedMemoryConversation( + databaseId: string, + conversationId: string, + ): Promise { + const payload = await this.request<{ + success: boolean; + data?: { conversation?: Conversation | null }; + }>("GET", `/managed-memory/projects/databases/${databaseId}/conversations/${conversationId}`); + + if (!payload?.success) { + throw new Error("Failed to fetch managed memory conversation via VoltOps"); + } + + return payload.data?.conversation ?? null; + } + + private async queryManagedMemoryConversations( + databaseId: string, + options: ConversationQueryOptions = {}, + ): Promise { + const query = this.buildQueryString({ + userId: options.userId, + resourceId: options.resourceId, + limit: options.limit, + offset: options.offset, + orderBy: options.orderBy, + orderDirection: options.orderDirection, + }); + + const payload = await this.request<{ + success: boolean; + data?: { conversations?: Conversation[] }; + }>("GET", `/managed-memory/projects/databases/${databaseId}/conversations${query}`); + + if (!payload?.success) { + throw new Error("Failed to query managed memory conversations via VoltOps"); + } + + return payload.data?.conversations ?? []; + } + + private async updateManagedMemoryConversation( + databaseId: string, + input: ManagedMemoryUpdateConversationInput, + ): Promise { + const payload = await this.request<{ + success: boolean; + data?: { conversation?: Conversation }; + }>( + "PATCH", + `/managed-memory/projects/databases/${databaseId}/conversations/${input.conversationId}`, + { updates: input.updates }, + ); + + if (!payload?.success || !payload.data?.conversation) { + throw new Error("Failed to update managed memory conversation via VoltOps"); + } + + return payload.data.conversation; + } + + private async deleteManagedMemoryConversation( + databaseId: string, + conversationId: string, + ): Promise { + const payload = await this.request<{ success: boolean }>( + "DELETE", + `/managed-memory/projects/databases/${databaseId}/conversations/${conversationId}`, + ); + + if (!payload?.success) { + throw new Error("Failed to delete managed memory conversation via VoltOps"); + } + } + + private async getManagedMemoryWorkingMemory( + databaseId: string, + input: ManagedMemoryWorkingMemoryInput, + ): Promise { + const query = this.buildQueryString({ + scope: input.scope, + conversationId: input.conversationId, + userId: input.userId, + }); + + const payload = await this.request<{ + success: boolean; + data?: { content?: string | null }; + }>("GET", `/managed-memory/projects/databases/${databaseId}/working-memory${query}`); + + if (!payload?.success) { + throw new Error("Failed to fetch managed memory working memory via VoltOps"); + } + + return payload.data?.content ?? null; + } + + private async setManagedMemoryWorkingMemory( + databaseId: string, + input: ManagedMemorySetWorkingMemoryInput, + ): Promise { + const payload = await this.request<{ success: boolean }>( + "PUT", + `/managed-memory/projects/databases/${databaseId}/working-memory`, + input, + ); + + if (!payload?.success) { + throw new Error("Failed to set managed memory working memory via VoltOps"); + } + } + + private async deleteManagedMemoryWorkingMemory( + databaseId: string, + input: ManagedMemoryWorkingMemoryInput, + ): Promise { + const payload = await this.request<{ success: boolean }>( + "DELETE", + `/managed-memory/projects/databases/${databaseId}/working-memory`, + input, + ); + + if (!payload?.success) { + throw new Error("Failed to delete managed memory working memory via VoltOps"); + } + } + + private async getManagedMemoryWorkflowState( + databaseId: string, + executionId: string, + ): Promise { + const payload = await this.request<{ + success: boolean; + data?: { workflowState?: WorkflowStateEntry | null }; + }>("GET", `/managed-memory/projects/databases/${databaseId}/workflow-states/${executionId}`); + + if (!payload?.success) { + throw new Error("Failed to fetch managed memory workflow state via VoltOps"); + } + + return payload.data?.workflowState ?? null; + } + + private async setManagedMemoryWorkflowState( + databaseId: string, + executionId: string, + state: WorkflowStateEntry, + ): Promise { + const payload = await this.request<{ success: boolean }>( + "PUT", + `/managed-memory/projects/databases/${databaseId}/workflow-states/${executionId}`, + { state }, + ); + + if (!payload?.success) { + throw new Error("Failed to set managed memory workflow state via VoltOps"); + } + } + + private async updateManagedMemoryWorkflowState( + databaseId: string, + input: ManagedMemoryWorkflowStateUpdateInput, + ): Promise { + const payload = await this.request<{ success: boolean }>( + "PATCH", + `/managed-memory/projects/databases/${databaseId}/workflow-states/${input.executionId}`, + { updates: input.updates }, + ); + + if (!payload?.success) { + throw new Error("Failed to update managed memory workflow state via VoltOps"); + } + } + + private async getManagedMemorySuspendedWorkflowStates( + databaseId: string, + workflowId: string, + ): Promise { + const query = this.buildQueryString({ workflowId }); + + const payload = await this.request<{ + success: boolean; + data?: { workflowStates?: WorkflowStateEntry[] }; + }>("GET", `/managed-memory/projects/databases/${databaseId}/workflow-states${query}`); + + if (!payload?.success) { + throw new Error("Failed to fetch suspended managed memory workflow states via VoltOps"); + } + + return payload.data?.workflowStates ?? []; + } + /** * Static method to create prompt helper with priority-based fallback * Priority: Agent VoltOpsClient > Global VoltOpsClient > Fallback instructions diff --git a/packages/core/src/voltops/types.ts b/packages/core/src/voltops/types.ts index cb7c6a14a..f26195bf5 100644 --- a/packages/core/src/voltops/types.ts +++ b/packages/core/src/voltops/types.ts @@ -5,7 +5,19 @@ * prompt management, telemetry, and API interactions. */ +export type ManagedMemoryStatus = "provisioning" | "ready" | "failed"; + +import type { UIMessage } from "ai"; import type { BaseMessage } from "../agent/providers/base/types"; +import type { SearchResult, VectorItem } from "../memory/adapters/vector/types"; +import type { + Conversation, + ConversationQueryOptions, + CreateConversationInput, + GetMessagesOptions, + WorkflowStateEntry, + WorkingMemoryScope, +} from "../memory/types"; // VoltAgentExporter removed - migrated to OpenTelemetry /** @@ -68,7 +80,7 @@ export type VoltOpsClientOptions = { * * @example * ```typescript - * publicKey: process.env.VOLTOPS_PUBLIC_KEY + * publicKey: process.env.VOLTAGENT_PUBLIC_KEY * ``` * * @@ -86,7 +98,7 @@ export type VoltOpsClientOptions = { * * @example * ```typescript - * secretKey: process.env.VOLTOPS_SECRET_KEY + * secretKey: process.env.VOLTAGENT_SECRET_KEY * ``` * * @@ -187,6 +199,21 @@ export interface VoltOpsClient { /** Create a prompt helper for agent instructions */ createPromptHelper(agentId: string, historyEntryId?: string): PromptHelper; + /** List managed memory databases available to the project */ + listManagedMemoryDatabases(): Promise; + + /** List credentials for a managed memory database */ + listManagedMemoryCredentials(databaseId: string): Promise; + + /** Create a credential for a managed memory database */ + createManagedMemoryCredential( + databaseId: string, + input?: { name?: string }, + ): Promise; + + /** Managed memory storage operations */ + managedMemory: ManagedMemoryVoltOpsClient; + // Backward compatibility methods removed - migrated to OpenTelemetry } @@ -233,3 +260,161 @@ export interface PromptContent { }; }; } + +export interface ManagedMemoryConnectionInfo { + host: string; + port: number; + database: string; + schema: string; + tablePrefix: string; + ssl: boolean; +} + +export interface ManagedMemoryDatabaseSummary { + id: string; + organization_id: string; + name: string; + region: string; + schema_name: string; + table_prefix: string; + status: ManagedMemoryStatus; + last_error?: string | null; + metadata?: Record | null; + created_at: string; + updated_at: string; + connection: ManagedMemoryConnectionInfo; +} + +export interface ManagedMemoryCredentialSummary { + id: string; + name: string; + role: string; + username: string; + secret: string | null; + expiresAt: string | null; + isRevoked: boolean; + createdAt: string; + updatedAt: string; +} + +export interface ManagedMemoryCredentialListResult { + connection: ManagedMemoryConnectionInfo; + credentials: ManagedMemoryCredentialSummary[]; +} + +export interface ManagedMemoryCredentialCreateResult { + connection: ManagedMemoryConnectionInfo; + credential: ManagedMemoryCredentialSummary; +} + +export interface ManagedMemoryAddMessageInput { + conversationId: string; + userId: string; + message: UIMessage; +} + +export interface ManagedMemoryAddMessagesInput { + conversationId: string; + userId: string; + messages: UIMessage[]; +} + +export interface ManagedMemoryGetMessagesInput { + conversationId: string; + userId: string; + options?: GetMessagesOptions; +} + +export interface ManagedMemoryClearMessagesInput { + userId: string; + conversationId?: string; +} + +export interface ManagedMemoryStoreVectorInput { + id: string; + vector: number[]; + metadata?: Record; + content?: string; +} + +export interface ManagedMemoryStoreVectorsBatchInput { + items: ManagedMemoryStoreVectorInput[]; +} + +export interface ManagedMemorySearchVectorsInput { + vector: number[]; + limit?: number; + threshold?: number; + filter?: Record; +} + +export interface ManagedMemoryDeleteVectorsInput { + ids: string[]; +} + +export interface ManagedMemoryUpdateConversationInput { + conversationId: string; + updates: Partial>; +} + +export interface ManagedMemoryWorkingMemoryInput { + scope: WorkingMemoryScope; + conversationId?: string; + userId?: string; +} + +export interface ManagedMemorySetWorkingMemoryInput extends ManagedMemoryWorkingMemoryInput { + content: string; +} + +export interface ManagedMemoryWorkflowStateUpdateInput { + executionId: string; + updates: Partial; +} + +export interface ManagedMemoryMessagesClient { + add(databaseId: string, input: ManagedMemoryAddMessageInput): Promise; + addBatch(databaseId: string, input: ManagedMemoryAddMessagesInput): Promise; + list(databaseId: string, input: ManagedMemoryGetMessagesInput): Promise; + clear(databaseId: string, input: ManagedMemoryClearMessagesInput): Promise; +} + +export interface ManagedMemoryConversationsClient { + create(databaseId: string, input: CreateConversationInput): Promise; + get(databaseId: string, conversationId: string): Promise; + query(databaseId: string, options: ConversationQueryOptions): Promise; + update(databaseId: string, input: ManagedMemoryUpdateConversationInput): Promise; + delete(databaseId: string, conversationId: string): Promise; +} + +export interface ManagedMemoryWorkingMemoryClient { + get(databaseId: string, input: ManagedMemoryWorkingMemoryInput): Promise; + set(databaseId: string, input: ManagedMemorySetWorkingMemoryInput): Promise; + delete(databaseId: string, input: ManagedMemoryWorkingMemoryInput): Promise; +} + +export interface ManagedMemoryWorkflowStatesClient { + get(databaseId: string, executionId: string): Promise; + set(databaseId: string, executionId: string, state: WorkflowStateEntry): Promise; + update(databaseId: string, input: ManagedMemoryWorkflowStateUpdateInput): Promise; + listSuspended(databaseId: string, workflowId: string): Promise; +} + +export interface ManagedMemoryVectorsClient { + store(databaseId: string, input: ManagedMemoryStoreVectorInput): Promise; + storeBatch(databaseId: string, input: ManagedMemoryStoreVectorsBatchInput): Promise; + search(databaseId: string, input: ManagedMemorySearchVectorsInput): Promise; + get(databaseId: string, vectorId: string): Promise; + delete(databaseId: string, vectorId: string): Promise; + deleteBatch(databaseId: string, input: ManagedMemoryDeleteVectorsInput): Promise; + clear(databaseId: string): Promise; + count(databaseId: string): Promise; +} + +export interface ManagedMemoryVoltOpsClient { + messages: ManagedMemoryMessagesClient; + conversations: ManagedMemoryConversationsClient; + workingMemory: ManagedMemoryWorkingMemoryClient; + workflowStates: ManagedMemoryWorkflowStatesClient; + vectors: ManagedMemoryVectorsClient; +} diff --git a/packages/postgres/src/index.ts b/packages/postgres/src/index.ts index 5eb2ed742..aadeb1cdc 100644 --- a/packages/postgres/src/index.ts +++ b/packages/postgres/src/index.ts @@ -8,3 +8,7 @@ // Export Memory Adapter export { PostgreSQLMemoryAdapter } from "./memory-adapter"; export type { PostgreSQLMemoryOptions } from "./memory-adapter"; + +// Export Vector Adapter +export { PostgreSQLVectorAdapter } from "./vector-adapter"; +export type { PostgresVectorAdapterOptions } from "./vector-adapter"; diff --git a/packages/postgres/src/vector-adapter.spec.ts b/packages/postgres/src/vector-adapter.spec.ts new file mode 100644 index 000000000..469148373 --- /dev/null +++ b/packages/postgres/src/vector-adapter.spec.ts @@ -0,0 +1,155 @@ +import { Pool } from "pg"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { PostgreSQLVectorAdapter } from "./vector-adapter"; + +vi.mock("pg", () => ({ + Pool: vi.fn(), +})); + +describe.sequential("PostgreSQLVectorAdapter", () => { + const mockQuery = vi.fn(); + const mockRelease = vi.fn(); + const mockConnect = vi.fn(); + const mockEnd = vi.fn(); + + let adapter: PostgreSQLVectorAdapter; + + type QueryStep = { rows?: any[]; error?: Error }; + let queue: QueryStep[]; + + const enqueue = (rows: any[] = []) => { + queue.push({ rows }); + }; + + const enqueueScalar = (value: unknown) => { + queue.push({ rows: [{ count: value }] }); + }; + + const setupInitializationQueue = () => { + // BEGIN, CREATE TABLE, CREATE INDEX (2x), COMMIT + enqueue(); + enqueue(); + enqueue(); + enqueue(); + enqueue(); + }; + + beforeEach(() => { + queue = []; + mockQuery.mockReset(); + mockRelease.mockReset(); + mockConnect.mockReset(); + mockEnd.mockReset(); + + mockQuery.mockImplementation(() => { + const step = queue.shift(); + if (!step) { + throw new Error("No query queued"); + } + if (step.error) { + return Promise.reject(step.error); + } + return Promise.resolve({ rows: step.rows ?? [] }); + }); + + const mockClient = { + query: mockQuery, + release: mockRelease, + }; + + mockConnect.mockResolvedValue(mockClient); + + vi.mocked(Pool).mockImplementation( + () => + ({ + connect: mockConnect, + end: mockEnd, + }) as unknown as Pool, + ); + + setupInitializationQueue(); + + adapter = new PostgreSQLVectorAdapter({ + connection: { + host: "localhost", + port: 5432, + database: "test", + user: "test", + password: "test", + }, + tablePrefix: "test_vectors", + }); + }); + + afterEach(async () => { + await adapter.close(); + expect(mockEnd).toHaveBeenCalled(); + }); + + it("initializes schema on first use", async () => { + enqueueScalar(0); // count query result + + await adapter.count(); + + const executedSql = mockQuery.mock.calls.map((call) => String(call[0])); + expect(executedSql).toEqual( + expect.arrayContaining([ + expect.stringContaining("CREATE TABLE IF NOT EXISTS test_vectors_vectors"), + expect.stringContaining("CREATE INDEX IF NOT EXISTS idx_test_vectors_vectors_created"), + expect.stringContaining("CREATE INDEX IF NOT EXISTS idx_test_vectors_vectors_dimensions"), + ]), + ); + }); + + it("stores vectors with metadata", async () => { + enqueue(); // store insert + + await adapter.store("vec-1", [0.1, 0.9], { topic: "test" }); + + const [, params] = mockQuery.mock.calls[mockQuery.mock.calls.length - 1]; + expect(mockQuery.mock.calls[mockQuery.mock.calls.length - 1][0]).toContain( + "INSERT INTO test_vectors_vectors", + ); + expect(params[0]).toBe("vec-1"); + expect(params[1]).toBeInstanceOf(Buffer); + expect(params[2]).toBe(2); + expect(JSON.parse(params[3])).toEqual({ topic: "test" }); + }); + + it("performs cosine-similarity search", async () => { + enqueue(); // storeBatch BEGIN + enqueue(); // INSERT + enqueue(); // COMMIT + + await adapter.storeBatch([ + { + id: "vec-1", + vector: [1, 0], + metadata: { label: "a" }, + content: "hello", + }, + ]); + + const buffer = Buffer.allocUnsafe(8); + buffer.writeFloatLE(1, 0); + buffer.writeFloatLE(0, 4); + + enqueue([ + { + id: "vec-1", + vector: buffer, + dimensions: 2, + metadata: { label: "a" }, + content: "hello", + }, + ]); + + const results = await adapter.search([1, 0], { limit: 1 }); + + expect(results).toHaveLength(1); + expect(results[0].id).toBe("vec-1"); + expect(results[0].score).toBeCloseTo(1); + expect(results[0].metadata).toEqual({ label: "a" }); + expect(results[0].content).toBe("hello"); + }); +}); diff --git a/packages/postgres/src/vector-adapter.ts b/packages/postgres/src/vector-adapter.ts new file mode 100644 index 000000000..2fc1c8672 --- /dev/null +++ b/packages/postgres/src/vector-adapter.ts @@ -0,0 +1,554 @@ +/** + * PostgreSQL Vector Adapter + * Provides vector storage and cosine-similarity search using PostgreSQL. + */ + +import { + type SearchResult, + type VectorAdapter, + type VectorItem, + type VectorSearchOptions, + cosineSimilarity, +} from "@voltagent/core"; +import { safeStringify } from "@voltagent/internal"; +import { Pool, type PoolClient } from "pg"; + +/** + * Configuration options for the PostgreSQL vector adapter. + */ +export interface PostgresVectorAdapterOptions { + /** + * PostgreSQL connection configuration (string URL or object form). + */ + connection: + | { + host: string; + port: number; + database: string; + user: string; + password: string; + ssl?: boolean; + } + | string; + + /** Table prefix for vector storage tables. Defaults to `voltagent_vector`. */ + tablePrefix?: string; + + /** Maximum allowed vector dimensions. Defaults to 1536. */ + maxVectorDimensions?: number; + + /** LRU cache size for vectors already fetched. Defaults to 100. */ + cacheSize?: number; + + /** Batch size for bulk operations. Defaults to 100. */ + batchSize?: number; + + /** Maximum retry attempts for database operations. Defaults to 3. */ + maxRetries?: number; + + /** Initial retry delay (ms) for exponential backoff. Defaults to 100. */ + retryDelayMs?: number; + + /** Enable verbose logging. */ + debug?: boolean; + + /** Optional search path to set for each connection. */ + searchPath?: string; + + /** Maximum number of pooled connections. Defaults to 10. */ + maxConnections?: number; +} + +interface CachedVectorItem extends VectorItem { + /** Cached vectors keep immutable copies of values for safety. */ + vector: number[]; +} + +/** + * PostgreSQL-backed persistent vector store with cosine similarity search. + */ +export class PostgreSQLVectorAdapter implements VectorAdapter { + private readonly pool: Pool; + private readonly tablePrefix: string; + private readonly maxVectorDimensions: number; + private readonly cacheSize: number; + private readonly batchSize: number; + private readonly maxRetries: number; + private readonly retryDelayMs: number; + private readonly debug: boolean; + private readonly searchPath?: string; + private readonly maxConnections: number; + + private initialized = false; + private initPromise: Promise | null = null; + private dimensions: number | null = null; + private readonly vectorCache = new Map(); + + constructor(options: PostgresVectorAdapterOptions) { + this.tablePrefix = options.tablePrefix ?? "voltagent_vector"; + this.maxVectorDimensions = options.maxVectorDimensions ?? 1536; + this.cacheSize = options.cacheSize ?? 100; + this.batchSize = options.batchSize ?? 100; + this.maxRetries = options.maxRetries ?? 3; + this.retryDelayMs = options.retryDelayMs ?? 100; + this.debug = options.debug ?? false; + this.searchPath = options.searchPath; + this.maxConnections = options.maxConnections ?? 10; + + this.pool = new Pool({ + ...(typeof options.connection === "string" + ? { connectionString: options.connection } + : options.connection), + max: this.maxConnections, + }); + + // Kick off initialization eagerly so that the first operation is fast. + this.initPromise = this.initializeInternal(); + void this.initPromise.catch((error) => { + this.log("Vector adapter initialization failed", error); + this.initPromise = null; + }); + } + + /** + * Close the underlying pool. + */ + async close(): Promise { + await this.pool.end(); + } + + /** + * Store or update a single vector. + */ + async store(id: string, vector: number[], metadata?: Record): Promise { + await this.ensureInitialized(); + this.validateVector(vector); + + const serializedVector = this.serializeVector(vector); + const metadataJson = metadata ? safeStringify(metadata) : null; + const dimensions = vector.length; + + await this.executeWithRetry(async () => { + await this.withClient(async (client) => { + await client.query( + `INSERT INTO ${this.vectorTable()} (id, vector, dimensions, metadata, updated_at) + VALUES ($1, $2, $3, $4, NOW()) + ON CONFLICT (id) DO UPDATE + SET vector = EXCLUDED.vector, + dimensions = EXCLUDED.dimensions, + metadata = EXCLUDED.metadata, + updated_at = NOW()`, + [id, serializedVector, dimensions, metadataJson], + ); + }); + }, `store vector ${id}`); + + this.setCacheItem({ id, vector: [...vector], metadata }); + } + + /** + * Store multiple vectors within a single transaction. + */ + async storeBatch(items: VectorItem[]): Promise { + await this.ensureInitialized(); + if (items.length === 0) { + return; + } + + await this.executeWithRetry(async () => { + await this.withClient(async (client) => { + await client.query("BEGIN"); + try { + const effectiveBatchSize = Math.max(1, this.batchSize); + for (let start = 0; start < items.length; start += effectiveBatchSize) { + const batch = items.slice(start, start + effectiveBatchSize); + + for (const item of batch) { + this.validateVector(item.vector); + const serializedVector = this.serializeVector(item.vector); + const metadataJson = item.metadata ? safeStringify(item.metadata) : null; + await client.query( + `INSERT INTO ${this.vectorTable()} (id, vector, dimensions, metadata, content, updated_at) + VALUES ($1, $2, $3, $4, $5, NOW()) + ON CONFLICT (id) DO UPDATE + SET vector = EXCLUDED.vector, + dimensions = EXCLUDED.dimensions, + metadata = EXCLUDED.metadata, + content = EXCLUDED.content, + updated_at = NOW()`, + [item.id, serializedVector, item.vector.length, metadataJson, item.content ?? null], + ); + this.setCacheItem({ + id: item.id, + vector: [...item.vector], + metadata: item.metadata, + content: item.content, + }); + } + } + + await client.query("COMMIT"); + } catch (error) { + await client.query("ROLLBACK"); + throw error; + } + }); + }, `storeBatch ${items.length} vectors`); + } + + /** + * Search vectors using cosine similarity computed in memory. + */ + async search(queryVector: number[], options?: VectorSearchOptions): Promise { + await this.ensureInitialized(); + const { limit = 10, threshold = 0, filter } = options ?? {}; + + if (this.dimensions !== null && queryVector.length !== this.dimensions) { + throw new Error( + `Vector dimension mismatch. Expected ${this.dimensions}, got ${queryVector.length}`, + ); + } + + const rows = await this.executeWithRetry(async () => { + return await this.withClient(async (client) => { + const result = await client.query( + `SELECT id, vector, dimensions, metadata, content FROM ${this.vectorTable()} + ${this.dimensions !== null ? "WHERE dimensions = $1" : ""}`, + this.dimensions !== null ? [this.dimensions] : [], + ); + return result.rows; + }); + }, "search vectors"); + + const results: SearchResult[] = []; + + for (const row of rows) { + const id = row.id as string; + const buffer = row.vector as Buffer | null; + if (!buffer) continue; + + const vector = this.deserializeVector(buffer); + + const metadata = this.parseMetadata(row.metadata); + if (filter && !this.matchesFilter(metadata, filter)) { + continue; + } + + const similarity = cosineSimilarity(queryVector, vector); + const score = (similarity + 1) / 2; + + if (score >= threshold) { + results.push({ + id, + vector, + metadata, + content: row.content ?? undefined, + score, + distance: 1 - similarity, + }); + } + } + + results.sort((a, b) => b.score - a.score); + return results.slice(0, limit); + } + + async delete(id: string): Promise { + await this.ensureInitialized(); + + await this.executeWithRetry(async () => { + await this.withClient(async (client) => { + await client.query(`DELETE FROM ${this.vectorTable()} WHERE id = $1`, [id]); + }); + }, `delete vector ${id}`); + + this.vectorCache.delete(id); + } + + async deleteBatch(ids: string[]): Promise { + await this.ensureInitialized(); + if (ids.length === 0) return; + + await this.executeWithRetry(async () => { + await this.withClient(async (client) => { + await client.query(`DELETE FROM ${this.vectorTable()} WHERE id = ANY($1::text[])`, [ids]); + }); + }, `deleteBatch ${ids.length} vectors`); + + for (const id of ids) { + this.vectorCache.delete(id); + } + } + + async clear(): Promise { + await this.ensureInitialized(); + + await this.executeWithRetry(async () => { + await this.withClient(async (client) => { + await client.query(`DELETE FROM ${this.vectorTable()}`); + }); + }, "clear all vectors"); + + this.vectorCache.clear(); + this.dimensions = null; + } + + async count(): Promise { + await this.ensureInitialized(); + + const result = await this.executeWithRetry(async () => { + return await this.withClient(async (client) => { + const response = await client.query<{ count: string | number }>( + `SELECT COUNT(*) AS count FROM ${this.vectorTable()}`, + ); + return response.rows[0]?.count ?? 0; + }); + }, "count vectors"); + + if (typeof result === "string") { + return Number.parseInt(result, 10) || 0; + } + return result as number; + } + + async get(id: string): Promise { + await this.ensureInitialized(); + + if (this.vectorCache.has(id)) { + const cached = this.vectorCache.get(id); + if (cached) { + // Move entry to the end to maintain LRU ordering. + this.vectorCache.delete(id); + this.vectorCache.set(id, cached); + return { + id, + vector: [...cached.vector], + metadata: cached.metadata ? { ...cached.metadata } : undefined, + content: cached.content, + }; + } + } + + const row = await this.executeWithRetry(async () => { + return await this.withClient(async (client) => { + const result = await client.query( + `SELECT id, vector, metadata, content FROM ${this.vectorTable()} WHERE id = $1`, + [id], + ); + return result.rows[0]; + }); + }, `get vector ${id}`); + + if (!row) { + return null; + } + + const buffer = row.vector as Buffer | null; + if (!buffer) { + return null; + } + + const metadata = this.parseMetadata(row.metadata); + const vectorItem: VectorItem = { + id, + vector: this.deserializeVector(buffer), + metadata, + content: row.content ?? undefined, + }; + + this.setCacheItem({ ...vectorItem, vector: [...vectorItem.vector] }); + return vectorItem; + } + + // --------------------------------------------------------------------------- + // Helpers + // --------------------------------------------------------------------------- + + private vectorTable(): string { + return `${this.tablePrefix}_vectors`; + } + + private async ensureInitialized(): Promise { + if (this.initialized) { + return; + } + + if (!this.initPromise) { + this.initPromise = this.initializeInternal(); + this.initPromise.catch((error) => { + this.log("Vector adapter initialization failed", error); + this.initPromise = null; + }); + } + + await this.initPromise; + } + + private async initializeInternal(): Promise { + await this.executeWithRetry(async () => { + await this.withClient(async (client) => { + await client.query("BEGIN"); + try { + await client.query(` + CREATE TABLE IF NOT EXISTS ${this.vectorTable()} ( + id TEXT PRIMARY KEY, + vector BYTEA NOT NULL, + dimensions INTEGER NOT NULL, + metadata JSONB, + content TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT timezone('utc'::text, now()), + updated_at TIMESTAMPTZ NOT NULL DEFAULT timezone('utc'::text, now()) + )`); + + await client.query( + `CREATE INDEX IF NOT EXISTS idx_${this.tablePrefix}_vectors_created ON ${this.vectorTable()}(created_at)`, + ); + await client.query( + `CREATE INDEX IF NOT EXISTS idx_${this.tablePrefix}_vectors_dimensions ON ${this.vectorTable()}(dimensions)`, + ); + + await client.query("COMMIT"); + this.initialized = true; + } catch (error) { + await client.query("ROLLBACK"); + throw error; + } + }); + }, "initialize postgres vector adapter"); + } + + private async withClient(handler: (client: PoolClient) => Promise): Promise { + const client = await this.pool.connect(); + try { + if (this.searchPath) { + await client.query(`SET search_path TO "${this.searchPath}"`); + } + return await handler(client); + } finally { + client.release(); + } + } + + private async executeWithRetry(fn: () => Promise, context: string): Promise { + let attempt = 0; + let delay = this.retryDelayMs; + let lastError: unknown; + + while (attempt < this.maxRetries) { + try { + attempt += 1; + return await fn(); + } catch (error) { + lastError = error; + this.log(`Operation failed (attempt ${attempt}): ${context}`, error as Error); + if (attempt >= this.maxRetries) { + break; + } + await new Promise((resolve) => setTimeout(resolve, delay)); + delay *= 2; // exponential backoff + } + } + + this.log(`Operation failed after ${this.maxRetries} attempts: ${context}`, lastError); + throw lastError instanceof Error ? lastError : new Error(String(lastError)); + } + + private validateVector(vector: number[]): void { + if (!Array.isArray(vector) || vector.length === 0) { + throw new Error("Vector must be a non-empty array"); + } + + if (vector.length > this.maxVectorDimensions) { + throw new Error( + `Vector dimensions (${vector.length}) exceed maximum (${this.maxVectorDimensions})`, + ); + } + + if (this.dimensions === null) { + this.dimensions = vector.length; + } else if (vector.length !== this.dimensions) { + throw new Error( + `Vector dimension mismatch. Expected ${this.dimensions}, got ${vector.length}`, + ); + } + } + + private serializeVector(vector: number[]): Buffer { + const buffer = Buffer.allocUnsafe(vector.length * 4); + for (let i = 0; i < vector.length; i++) { + buffer.writeFloatLE(vector[i], i * 4); + } + return buffer; + } + + private deserializeVector(buffer: Buffer): number[] { + const vector: number[] = new Array(buffer.length / 4); + for (let i = 0; i < vector.length; i++) { + vector[i] = buffer.readFloatLE(i * 4); + } + return vector; + } + + private parseMetadata(raw: unknown): Record | undefined { + if (!raw) { + return undefined; + } + if (typeof raw === "string") { + try { + return JSON.parse(raw) as Record; + } catch (error) { + this.log("Failed to parse metadata JSON", error); + return undefined; + } + } + if (typeof raw === "object") { + return raw as Record; + } + return undefined; + } + + private matchesFilter( + metadata: Record | undefined, + filter: Record, + ): boolean { + if (!metadata) { + return false; + } + for (const [key, value] of Object.entries(filter)) { + if (metadata[key] !== value) { + return false; + } + } + return true; + } + + private setCacheItem(item: CachedVectorItem): void { + if (this.cacheSize === 0) { + return; + } + + if (this.vectorCache.has(item.id)) { + this.vectorCache.delete(item.id); + } + + this.vectorCache.set(item.id, item); + + if (this.vectorCache.size > this.cacheSize) { + const [firstKey] = this.vectorCache.keys(); + if (firstKey) { + this.vectorCache.delete(firstKey); + } + } + } + + private log(message: string, error?: unknown): void { + if (!this.debug) { + return; + } + if (error) { + console.warn(`[PostgreSQLVectorAdapter] ${message}`, error); + } else { + console.debug(`[PostgreSQLVectorAdapter] ${message}`); + } + } +} diff --git a/packages/voltagent-memory/package.json b/packages/voltagent-memory/package.json new file mode 100644 index 000000000..49ad2ee31 --- /dev/null +++ b/packages/voltagent-memory/package.json @@ -0,0 +1,49 @@ +{ + "name": "@voltagent/voltagent-memory", + "description": "VoltAgent managed memory adapter backed by VoltOps", + "version": "0.1.0", + "dependencies": { + "@voltagent/internal": "^0.0.11" + }, + "devDependencies": { + "@types/node": "^24.2.1", + "@vitest/coverage-v8": "^3.2.4", + "@voltagent/core": "^1.1.13", + "ai": "^5.0.12", + "tsup": "^8.5.0", + "typescript": "^5.8.2", + "vitest": "^3.2.4" + }, + "exports": { + ".": { + "import": { + "types": "./dist/index.d.mts", + "default": "./dist/index.mjs" + }, + "require": { + "types": "./dist/index.d.ts", + "default": "./dist/index.js" + } + } + }, + "files": [ + "dist" + ], + "license": "MIT", + "main": "dist/index.js", + "module": "dist/index.mjs", + "peerDependencies": { + "@voltagent/core": "^1.0.0", + "ai": "^5.0.0" + }, + "scripts": { + "attw": "attw --pack", + "build": "tsup", + "dev": "tsup --watch", + "lint": "biome check .", + "lint:fix": "biome check . --write", + "publint": "publint --strict", + "test": "vitest" + }, + "types": "dist/index.d.ts" +} diff --git a/packages/voltagent-memory/src/index.ts b/packages/voltagent-memory/src/index.ts new file mode 100644 index 000000000..8b9e41591 --- /dev/null +++ b/packages/voltagent-memory/src/index.ts @@ -0,0 +1,650 @@ +import { + AgentRegistry, + type Conversation, + type ConversationQueryOptions, + type CreateConversationInput, + type GetMessagesOptions, + type SearchResult, + type StorageAdapter, + type VectorAdapter, + type VectorItem, + type VoltOpsClient, + type WorkflowStateEntry, + type WorkingMemoryScope, +} from "@voltagent/core"; +import type { + ManagedMemoryConnectionInfo, + ManagedMemoryCredentialCreateResult, + ManagedMemoryCredentialListResult, + ManagedMemoryDatabaseSummary, +} from "@voltagent/core"; +import { safeStringify } from "@voltagent/internal"; +import type { UIMessage } from "ai"; + +export interface ManagedMemoryAdapterOptions { + databaseId?: string; + databaseName?: string; + voltOpsClient?: VoltOpsClient; + debug?: boolean; +} + +export class ManagedMemoryAdapter implements StorageAdapter { + private readonly options: ManagedMemoryAdapterOptions; + private voltOpsClient?: VoltOpsClient; + private readonly debug: boolean; + private initializationPromise: Promise | null = null; + private initCheckInterval?: NodeJS.Timeout; + private initializationAttempts = 0; + private readonly maxInitializationAttempts = 50; + private database?: ManagedMemoryDatabaseSummary; + private connection?: ManagedMemoryConnectionInfo; + + constructor(options: ManagedMemoryAdapterOptions) { + this.options = options; + this.debug = options.debug ?? false; + this.voltOpsClient = this.resolveVoltOpsClient(options); + + if (this.voltOpsClient?.hasValidKeys()) { + this.initializationPromise = this.initialize(); + } else { + this.startInitializationCheck(); + } + } + + private resolveVoltOpsClient(options: ManagedMemoryAdapterOptions): VoltOpsClient | undefined { + if (options.voltOpsClient) { + return options.voltOpsClient; + } + + const registryClient = AgentRegistry.getInstance().getGlobalVoltOpsClient(); + if (registryClient?.hasValidKeys()) { + return registryClient; + } + + return undefined; + } + + private async initialize(): Promise { + if (!this.voltOpsClient) { + throw new Error( + "VoltOps client is not available for managed memory initialization. " + + "Set VOLTAGENT_PUBLIC_KEY and VOLTAGENT_SECRET_KEY environment variables, " + + "or pass voltOpsClient explicitly. " + + "See: https://voltagent.dev/docs/agents/memory/managed-memory", + ); + } + + this.log("Loading managed memory databases via VoltOps API"); + const databases = await this.voltOpsClient.listManagedMemoryDatabases(); + + if (!Array.isArray(databases) || databases.length === 0) { + throw new Error("No managed memory databases found for the provided VoltOps credentials."); + } + + const targetDatabase = this.findTargetDatabase(databases); + if (!targetDatabase) { + throw new Error( + `Unable to locate managed memory database. Provided databaseId=${this.options.databaseId ?? "-"}, databaseName=${ + this.options.databaseName ?? "-" + }.`, + ); + } + + this.database = targetDatabase; + this.connection = targetDatabase.connection; + + this.log( + "Managed memory adapter initialized", + safeStringify({ + databaseId: targetDatabase.id, + databaseName: targetDatabase.name, + region: targetDatabase.region, + }), + ); + } + + private findTargetDatabase( + databases: ManagedMemoryDatabaseSummary[], + ): ManagedMemoryDatabaseSummary | undefined { + if (this.options.databaseId) { + const match = databases.find((db) => db.id === this.options.databaseId); + if (match) { + return match; + } + } + + if (this.options.databaseName) { + const needle = this.options.databaseName.toLowerCase(); + const match = databases.find((db) => db.name.toLowerCase() === needle); + if (match) { + return match; + } + } + + return undefined; + } + + private async ensureInitialized(): Promise { + if (!this.voltOpsClient) { + this.startInitializationCheck(); + } + + if (!this.initializationPromise && this.voltOpsClient?.hasValidKeys() && !this.database) { + this.initializationPromise = this.initialize(); + } + + if (this.initializationPromise) { + await this.initializationPromise.catch((error) => { + this.log("Managed memory initialization failed", String(error)); + throw error; + }); + this.initializationPromise = null; + } + } + + private async withClientContext( + handler: (context: { + client: VoltOpsClient; + database: ManagedMemoryDatabaseSummary; + }) => Promise, + ): Promise { + await this.ensureInitialized(); + + if (!this.voltOpsClient) { + throw new Error("VoltOps client is not available to execute managed memory operation"); + } + + if (!this.database) { + throw new Error("Managed memory database metadata is unavailable"); + } + + return handler({ client: this.voltOpsClient, database: this.database }); + } + + private log(message: string, context?: string): void { + if (this.debug) { + console.log("[ManagedMemoryAdapter]", message, context ?? ""); + } + } + + addMessage(message: UIMessage, userId: string, conversationId: string): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log("Executing managed memory addMessage", safeStringify({ userId, conversationId })); + await client.managedMemory.messages.add(database.id, { message, userId, conversationId }); + }).then(() => undefined); + } + + addMessages(messages: UIMessage[], userId: string, conversationId: string): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log( + "Executing managed memory addMessages", + safeStringify({ count: messages.length, userId, conversationId }), + ); + await client.managedMemory.messages.addBatch(database.id, { + messages, + userId, + conversationId, + }); + }).then(() => undefined); + } + + getMessages( + userId: string, + conversationId: string, + options?: GetMessagesOptions, + ): Promise { + return this.withClientContext(({ client, database }) => { + this.log( + "Fetching managed memory messages", + safeStringify({ userId, conversationId, options }), + ); + return client.managedMemory.messages.list(database.id, { userId, conversationId, options }); + }); + } + + clearMessages(userId: string, conversationId?: string): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log("Clearing managed memory messages", safeStringify({ userId, conversationId })); + await client.managedMemory.messages.clear(database.id, { userId, conversationId }); + }).then(() => undefined); + } + + createConversation(input: CreateConversationInput): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Creating managed memory conversation", safeStringify({ conversationId: input.id })); + return client.managedMemory.conversations.create(database.id, input); + }); + } + + getConversation(id: string): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Fetching managed memory conversation", safeStringify({ id })); + return client.managedMemory.conversations.get(database.id, id); + }); + } + + getConversations(resourceId: string): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Listing managed memory conversations by resource", safeStringify({ resourceId })); + return client.managedMemory.conversations.query(database.id, { resourceId }); + }); + } + + getConversationsByUserId( + userId: string, + options?: Omit, + ): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Listing managed memory conversations by user", safeStringify({ userId, options })); + return client.managedMemory.conversations.query(database.id, { ...options, userId }); + }); + } + + queryConversations(options: ConversationQueryOptions): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Querying managed memory conversations", safeStringify(options)); + return client.managedMemory.conversations.query(database.id, options); + }); + } + + updateConversation( + id: string, + updates: Partial>, + ): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Updating managed memory conversation", safeStringify({ id, updates })); + return client.managedMemory.conversations.update(database.id, { + conversationId: id, + updates, + }); + }); + } + + deleteConversation(id: string): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log("Deleting managed memory conversation", safeStringify({ id })); + await client.managedMemory.conversations.delete(database.id, id); + }).then(() => undefined); + } + + getWorkingMemory(params: { + conversationId?: string; + userId?: string; + scope: WorkingMemoryScope; + }): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Fetching managed memory working memory", safeStringify(params)); + return client.managedMemory.workingMemory.get(database.id, params); + }); + } + + setWorkingMemory(params: { + conversationId?: string; + userId?: string; + content: string; + scope: WorkingMemoryScope; + }): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log( + "Setting managed memory working memory", + safeStringify({ ...params, content: params.content.length }), + ); + await client.managedMemory.workingMemory.set(database.id, params); + }).then(() => undefined); + } + + deleteWorkingMemory(params: { + conversationId?: string; + userId?: string; + scope: WorkingMemoryScope; + }): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log("Deleting managed memory working memory", safeStringify(params)); + await client.managedMemory.workingMemory.delete(database.id, params); + }).then(() => undefined); + } + + getWorkflowState(executionId: string): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Fetching managed memory workflow state", safeStringify({ executionId })); + return client.managedMemory.workflowStates.get(database.id, executionId); + }); + } + + setWorkflowState(executionId: string, state: WorkflowStateEntry): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log("Setting managed memory workflow state", safeStringify({ executionId })); + await client.managedMemory.workflowStates.set(database.id, executionId, state); + }).then(() => undefined); + } + + updateWorkflowState(executionId: string, updates: Partial): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log("Updating managed memory workflow state", safeStringify({ executionId, updates })); + await client.managedMemory.workflowStates.update(database.id, { executionId, updates }); + }).then(() => undefined); + } + + getSuspendedWorkflowStates(workflowId: string): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Fetching suspended workflow states", safeStringify({ workflowId })); + return client.managedMemory.workflowStates.listSuspended(database.id, workflowId); + }); + } + + private startInitializationCheck(): void { + if (this.initCheckInterval || this.voltOpsClient?.hasValidKeys()) { + return; + } + + this.initCheckInterval = setInterval(() => { + this.initializationAttempts++; + + if (!this.voltOpsClient) { + const registryClient = AgentRegistry.getInstance().getGlobalVoltOpsClient(); + if (registryClient?.hasValidKeys()) { + this.voltOpsClient = registryClient; + this.initializationPromise = this.initialize(); + } + } + + const initialized = Boolean(this.database); + const exhausted = this.initializationAttempts >= this.maxInitializationAttempts; + + if (initialized || exhausted) { + if (this.initCheckInterval) { + clearInterval(this.initCheckInterval); + this.initCheckInterval = undefined; + } + + if (exhausted && !this.voltOpsClient) { + this.log("VoltOps client not available after waiting for managed memory initialization"); + } + } + }, 100); + } + + async getConnectionInfo(): Promise { + await this.ensureInitialized(); + return this.connection; + } + + async getDatabaseMetadata(): Promise { + await this.ensureInitialized(); + return this.database; + } + + async listCredentials(): Promise { + await this.ensureInitialized(); + if (!this.database) { + throw new Error("Managed memory database metadata is unavailable"); + } + if (!this.voltOpsClient) { + throw new Error("VoltOps client is not available to list managed memory credentials"); + } + + return this.voltOpsClient.listManagedMemoryCredentials(this.database.id); + } + + async createCredential(name?: string): Promise { + await this.ensureInitialized(); + if (!this.database) { + throw new Error("Managed memory database metadata is unavailable"); + } + if (!this.voltOpsClient) { + throw new Error("VoltOps client is not available to create managed memory credential"); + } + + return this.voltOpsClient.createManagedMemoryCredential(this.database.id, { name }); + } +} + +export class ManagedMemoryVectorAdapter implements VectorAdapter { + private readonly options: ManagedMemoryAdapterOptions; + private voltOpsClient?: VoltOpsClient; + private readonly debug: boolean; + private initializationPromise: Promise | null = null; + private initCheckInterval?: NodeJS.Timeout; + private initializationAttempts = 0; + private readonly maxInitializationAttempts = 50; + private database?: ManagedMemoryDatabaseSummary; + + constructor(options: ManagedMemoryAdapterOptions) { + this.options = options; + this.debug = options.debug ?? false; + this.voltOpsClient = options.voltOpsClient ?? this.resolveVoltOpsClient(options); + + if (this.voltOpsClient?.hasValidKeys()) { + this.initializationPromise = this.initialize(); + } else { + this.startInitializationCheck(); + } + } + + private resolveVoltOpsClient(options: ManagedMemoryAdapterOptions): VoltOpsClient | undefined { + if (options.voltOpsClient) { + return options.voltOpsClient; + } + + const registryClient = AgentRegistry.getInstance().getGlobalVoltOpsClient(); + if (registryClient?.hasValidKeys()) { + return registryClient; + } + + return undefined; + } + + private async initialize(): Promise { + if (!this.voltOpsClient) { + throw new Error( + "VoltOps client is not available for managed memory initialization. " + + "Set VOLTAGENT_PUBLIC_KEY and VOLTAGENT_SECRET_KEY environment variables, " + + "or pass voltOpsClient explicitly. " + + "See: https://voltagent.dev/docs/agents/memory/managed-memory", + ); + } + + this.log("Loading managed memory databases via VoltOps API"); + const databases = await this.voltOpsClient.listManagedMemoryDatabases(); + + if (!Array.isArray(databases) || databases.length === 0) { + throw new Error("No managed memory databases found for the provided VoltOps credentials."); + } + + const targetDatabase = this.findTargetDatabase(databases); + if (!targetDatabase) { + throw new Error( + `Unable to locate managed memory database. Provided databaseId=${this.options.databaseId ?? "-"}, databaseName=${ + this.options.databaseName ?? "-" + }.`, + ); + } + + this.database = targetDatabase; + + this.log( + "Managed memory vector adapter initialized", + safeStringify({ + databaseId: targetDatabase.id, + databaseName: targetDatabase.name, + region: targetDatabase.region, + }), + ); + } + + private findTargetDatabase( + databases: ManagedMemoryDatabaseSummary[], + ): ManagedMemoryDatabaseSummary | undefined { + if (this.options.databaseId) { + const match = databases.find((db) => db.id === this.options.databaseId); + if (match) { + return match; + } + } + + if (this.options.databaseName) { + const needle = this.options.databaseName.toLowerCase(); + const match = databases.find((db) => db.name.toLowerCase() === needle); + if (match) { + return match; + } + } + + return undefined; + } + + private async ensureInitialized(): Promise { + if (!this.voltOpsClient) { + this.startInitializationCheck(); + } + + if (!this.initializationPromise && this.voltOpsClient?.hasValidKeys() && !this.database) { + this.initializationPromise = this.initialize(); + } + + if (this.initializationPromise) { + await this.initializationPromise.catch((error) => { + this.log("Managed memory vector initialization failed", String(error)); + throw error; + }); + this.initializationPromise = null; + } + + if (!this.database) { + throw new Error("ManagedMemoryVectorAdapter failed to initialize managed database metadata"); + } + } + + private startInitializationCheck(): void { + if (this.initCheckInterval || this.voltOpsClient?.hasValidKeys()) { + return; + } + + this.initCheckInterval = setInterval(() => { + this.initializationAttempts++; + + if (!this.voltOpsClient) { + const registryClient = AgentRegistry.getInstance().getGlobalVoltOpsClient(); + if (registryClient?.hasValidKeys()) { + this.voltOpsClient = registryClient; + this.initializationPromise = this.initialize(); + } + } + + const initialized = Boolean(this.database); + const exhausted = this.initializationAttempts >= this.maxInitializationAttempts; + + if (initialized || exhausted) { + if (this.initCheckInterval) { + clearInterval(this.initCheckInterval); + this.initCheckInterval = undefined; + } + + if (exhausted && !this.voltOpsClient) { + this.log("VoltOps client not available after waiting for managed memory initialization"); + } + } + }, 100); + } + + private async withClientContext( + handler: (context: { + client: VoltOpsClient; + database: ManagedMemoryDatabaseSummary; + }) => Promise, + ): Promise { + await this.ensureInitialized(); + + if (!this.voltOpsClient) { + throw new Error("VoltOps client is not available to execute managed memory operation"); + } + + if (!this.database) { + throw new Error("Managed memory database metadata is unavailable"); + } + + return handler({ client: this.voltOpsClient, database: this.database }); + } + + private log(message: string, context?: string): void { + if (this.debug) { + console.log("[ManagedMemoryVectorAdapter]", message, context ?? ""); + } + } + + store(id: string, vector: number[], metadata?: Record): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log("Storing managed memory vector", safeStringify({ id })); + await client.managedMemory.vectors.store(database.id, { + id, + vector, + metadata, + }); + }).then(() => undefined); + } + + storeBatch(items: VectorItem[]): Promise { + if (items.length === 0) { + return Promise.resolve(); + } + + return this.withClientContext(async ({ client, database }) => { + this.log("Storing managed memory vectors batch", safeStringify({ count: items.length })); + await client.managedMemory.vectors.storeBatch(database.id, { items }); + }).then(() => undefined); + } + + search( + vector: number[], + options?: { limit?: number; filter?: Record; threshold?: number }, + ): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Searching managed memory vectors", safeStringify({ limit: options?.limit })); + return client.managedMemory.vectors.search(database.id, { + vector, + limit: options?.limit, + threshold: options?.threshold, + filter: options?.filter, + }); + }); + } + + delete(id: string): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log("Deleting managed memory vector", safeStringify({ id })); + await client.managedMemory.vectors.delete(database.id, id); + }).then(() => undefined); + } + + deleteBatch(ids: string[]): Promise { + if (ids.length === 0) { + return Promise.resolve(); + } + + return this.withClientContext(async ({ client, database }) => { + this.log("Deleting managed memory vector batch", safeStringify({ count: ids.length })); + await client.managedMemory.vectors.deleteBatch(database.id, { ids }); + }).then(() => undefined); + } + + clear(): Promise { + return this.withClientContext(async ({ client, database }) => { + this.log("Clearing managed memory vectors"); + await client.managedMemory.vectors.clear(database.id); + }).then(() => undefined); + } + + count(): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Counting managed memory vectors"); + return client.managedMemory.vectors.count(database.id); + }); + } + + get(id: string): Promise { + return this.withClientContext(({ client, database }) => { + this.log("Fetching managed memory vector", safeStringify({ id })); + return client.managedMemory.vectors.get(database.id, id); + }); + } +} diff --git a/packages/voltagent-memory/tsconfig.json b/packages/voltagent-memory/tsconfig.json new file mode 100644 index 000000000..fb0c6c836 --- /dev/null +++ b/packages/voltagent-memory/tsconfig.json @@ -0,0 +1,30 @@ +{ + "compilerOptions": { + "target": "es2018", + "lib": ["dom", "dom.iterable", "esnext"], + "module": "esnext", + "moduleResolution": "node", + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "outDir": "./dist", + "rootDir": "./", + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "strictFunctionTypes": true, + "strictBindCallApply": true, + "strictPropertyInitialization": true, + "noImplicitThis": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "types": ["node", "vitest/globals"] + }, + "include": ["src/**/*.ts", "__tests__/**/*.ts"], + "exclude": ["node_modules", "dist"] +} diff --git a/packages/voltagent-memory/tsup.config.ts b/packages/voltagent-memory/tsup.config.ts new file mode 100644 index 000000000..0819104fd --- /dev/null +++ b/packages/voltagent-memory/tsup.config.ts @@ -0,0 +1,19 @@ +import { defineConfig } from "tsup"; +import { markAsExternalPlugin } from "../shared/tsup-plugins/mark-as-external"; + +export default defineConfig({ + entry: ["src/index.ts"], + format: ["cjs", "esm"], + splitting: false, + sourcemap: true, + clean: false, + target: "es2022", + outDir: "dist", + minify: false, + dts: true, + esbuildPlugins: [markAsExternalPlugin], + esbuildOptions(options) { + options.keepNames = true; + return options; + }, +}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 47a40713f..fbabc503e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -182,28 +182,6 @@ importers: specifier: ^5.8.2 version: 5.9.2 - examples/sdk-trace-example: - dependencies: - '@voltagent/logger': - specifier: ^1.0.2 - version: link:../../packages/logger - '@voltagent/sdk': - specifier: ^0.1.6 - version: 0.1.6(@voltagent/logger@packages+logger)(zod@3.25.76) - '@voltagent/server-hono': - specifier: ^1.0.15 - version: link:../../packages/server-hono - devDependencies: - '@types/node': - specifier: ^24.2.1 - version: 24.2.1 - tsx: - specifier: ^4.19.3 - version: 4.20.4 - typescript: - specifier: ^5.8.2 - version: 5.9.2 - examples/with-a2a-server: dependencies: '@ai-sdk/openai': @@ -1798,6 +1776,61 @@ importers: specifier: ^0.5.3 version: 0.5.3(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@types/node@24.2.1)(@types/react@19.1.10)(@vitejs/plugin-react@4.7.0)(tsx@4.20.4)(vite@6.3.5) + examples/with-viteval/dist: + dependencies: + '@ai-sdk/openai': + specifier: ^2.0.2 + version: 2.0.17(zod@3.25.76) + '@voltagent/cli': + specifier: ^0.1.10 + version: link:../../../packages/cli + '@voltagent/core': + specifier: ^1.1.5 + version: link:../../../packages/core + '@voltagent/libsql': + specifier: ^1.0.2 + version: link:../../../packages/libsql + '@voltagent/logger': + specifier: ^1.0.1 + version: link:../../../packages/logger + '@voltagent/server-hono': + specifier: ^1.0.4 + version: link:../../../packages/server-hono + ai: + specifier: ^5.0.12 + version: 5.0.19(zod@3.25.76) + consola: + specifier: ^3.4.2 + version: 3.4.2 + envalid: + specifier: ^8.1.0 + version: 8.1.0 + yargs: + specifier: ^18.0.0 + version: 18.0.0 + zod: + specifier: ^3.25.76 + version: 3.25.76 + devDependencies: + '@tsconfig/node24': + specifier: ^24.0.1 + version: 24.0.1 + '@types/yargs': + specifier: ^17.0.33 + version: 17.0.33 + dotenv: + specifier: ^16.4.5 + version: 16.6.1 + tsx: + specifier: ^4.19.3 + version: 4.20.4 + typescript: + specifier: ^5.8.2 + version: 5.9.2 + viteval: + specifier: ^0.1.7 + version: 0.1.9(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@types/node@24.2.1)(@vitejs/plugin-react@4.7.0)(tsx@4.20.4)(vite@6.3.5) + examples/with-voice-elevenlabs: dependencies: '@ai-sdk/openai': @@ -1967,13 +2000,47 @@ importers: specifier: ^5.8.2 version: 5.9.2 + examples/with-voltagent-managed-memory: + dependencies: + '@ai-sdk/openai': + specifier: ^2.0.2 + version: 2.0.17(zod@3.25.76) + '@voltagent/core': + specifier: ^1.1.23 + version: link:../../packages/core + '@voltagent/logger': + specifier: ^1.0.2 + version: link:../../packages/logger + '@voltagent/server-hono': + specifier: ^1.0.15 + version: link:../../packages/server-hono + '@voltagent/voltagent-memory': + specifier: ^0.1.0 + version: link:../../packages/voltagent-memory + ai: + specifier: ^5.0.12 + version: 5.0.19(zod@3.25.76) + zod: + specifier: ^3.25.76 + version: 3.25.76 + devDependencies: + '@types/node': + specifier: ^24.2.1 + version: 24.2.1 + tsx: + specifier: ^4.19.3 + version: 4.20.4 + typescript: + specifier: ^5.8.2 + version: 5.9.2 + examples/with-whatsapp: dependencies: '@ai-sdk/openai': - specifier: ^2.0.34 + specifier: ^2.0.2 version: 2.0.42(zod@3.25.76) '@supabase/supabase-js': - specifier: ^2.57.4 + specifier: ^2.49.4 version: 2.58.0 '@voltagent/cli': specifier: ^0.1.11 @@ -1991,10 +2058,10 @@ importers: specifier: ^1.0.15 version: link:../../packages/server-hono ai: - specifier: ^5.0.51 + specifier: ^5.0.12 version: 5.0.59(zod@3.25.76) dotenv: - specifier: ^16.4.7 + specifier: ^16.4.5 version: 16.6.1 zod: specifier: ^3.25.76 @@ -2794,7 +2861,7 @@ importers: version: 1.59.0 openai: specifier: ^4.91.0 - version: 4.104.0(zod@3.25.76) + version: 4.104.0(zod@4.1.11) devDependencies: '@types/node': specifier: ^24.2.1 @@ -2815,6 +2882,34 @@ importers: specifier: ^3.2.4 version: 3.2.4(@types/node@24.2.1)(@vitest/ui@1.6.1)(jsdom@22.1.0) + packages/voltagent-memory: + dependencies: + '@voltagent/internal': + specifier: ^0.0.11 + version: link:../internal + devDependencies: + '@types/node': + specifier: ^24.2.1 + version: 24.2.1 + '@vitest/coverage-v8': + specifier: ^3.2.4 + version: 3.2.4(vitest@3.2.4) + '@voltagent/core': + specifier: ^1.1.13 + version: link:../core + ai: + specifier: ^5.0.12 + version: 5.0.19(zod@3.25.76) + tsup: + specifier: ^8.5.0 + version: 8.5.0(@swc/core@1.5.29)(typescript@5.9.2) + typescript: + specifier: ^5.8.2 + version: 5.9.2 + vitest: + specifier: ^3.2.4 + version: 3.2.4(@types/node@24.2.1)(@vitest/ui@1.6.1)(jsdom@22.1.0) + packages: /@a2a-js/sdk@0.2.5: @@ -3872,7 +3967,6 @@ packages: hasBin: true dependencies: '@babel/types': 7.28.4 - dev: true /@babel/plugin-bugfix-firefox-class-in-computed-class-key@7.27.1(@babel/core@7.28.0): resolution: {integrity: sha512-QPG3C9cCVRQLxAVwmefEmwdTanECuUBMQZ/ym5kiw3XKCGA7qkuQLcjWWHcrD/GKbn/WmJwaezfuuAOcyKlRPA==} @@ -4916,9 +5010,9 @@ packages: '@babel/code-frame': 7.27.1 '@babel/generator': 7.28.0 '@babel/helper-globals': 7.28.0 - '@babel/parser': 7.28.0 + '@babel/parser': 7.28.4 '@babel/template': 7.27.2 - '@babel/types': 7.28.2 + '@babel/types': 7.28.4 debug: 4.4.1(supports-color@10.2.2) transitivePeerDependencies: - supports-color @@ -4936,7 +5030,6 @@ packages: dependencies: '@babel/helper-string-parser': 7.27.1 '@babel/helper-validator-identifier': 7.27.1 - dev: true /@balena/dockerignore@1.0.2: resolution: {integrity: sha512-wMue2Sy4GAVTk6Ic4tJVcnfdau+gx2EnG7S+uAEe+TWJFqE4YoWN4/H8MSLj4eYJKxGg26lZwboEniNiNwZQ6Q==} @@ -6703,21 +6796,6 @@ packages: hono: 4.9.1 dev: false - /@hono/node-ws@1.2.0(@hono/node-server@1.18.2)(hono@4.9.1): - resolution: {integrity: sha512-OBPQ8OSHBw29mj00wT/xGYtB6HY54j0fNSdVZ7gZM3TUeq0So11GXaWtFf1xWxQNfumKIsj0wRuLKWfVsO5GgQ==} - engines: {node: '>=18.14.1'} - peerDependencies: - '@hono/node-server': ^1.11.1 - hono: ^4.6.0 - dependencies: - '@hono/node-server': 1.18.2(hono@4.9.1) - hono: 4.9.1 - ws: 8.18.3 - transitivePeerDependencies: - - bufferutil - - utf-8-validate - dev: false - /@hono/swagger-ui@0.5.2(hono@4.9.1): resolution: {integrity: sha512-7wxLKdb8h7JTdZ+K8DJNE3KXQMIpJejkBTQjrYlUWF28Z1PGOKw6kUykARe5NTfueIN37jbyG/sBYsbzXzG53A==} peerDependencies: @@ -12624,7 +12702,7 @@ packages: dependencies: '@babel/core': 7.28.0 '@babel/generator': 7.28.0 - '@babel/parser': 7.28.0 + '@babel/parser': 7.28.4 '@babel/preset-typescript': 7.27.1(@babel/core@7.28.0) ansis: 4.1.0 diff: 8.0.2 @@ -13428,7 +13506,7 @@ packages: istanbul-lib-report: 3.0.1 istanbul-lib-source-maps: 5.0.6 istanbul-reports: 3.1.7 - magic-string: 0.30.17 + magic-string: 0.30.19 magicast: 0.3.5 std-env: 3.9.0 test-exclude: 7.0.1 @@ -13525,6 +13603,75 @@ packages: tinyrainbow: 2.0.0 dev: true + /@viteval/cli@0.2.4(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@types/node@24.2.1)(@vitejs/plugin-react@4.7.0)(tsx@4.20.4)(vite@6.3.5): + resolution: {integrity: sha512-if8W31fZc5XKZp8qEjwc05x+pbLpT90ULmApeJHtdsSmWOr8Km0f81nQL7xpGNRJmKTNPq5iXmBshc/wDkpZHQ==} + hasBin: true + dependencies: + '@opentf/cli-pbar': 0.7.2 + '@viteval/core': 0.1.8(@types/node@24.2.1)(jiti@2.5.1)(tsx@4.20.4) + '@viteval/internal': 0.0.4 + '@viteval/ui': 0.0.1(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@vitejs/plugin-react@4.7.0)(vite@6.3.5) + c12: 3.2.0(magicast@0.3.5) + chalk: 5.6.2 + consola: 3.4.2 + find-up: 7.0.0 + glob: 11.0.3 + jiti: 2.5.1 + ora: 8.2.0 + ts-pattern: 5.8.0 + type-fest: 4.41.0 + yargs: 18.0.0 + transitivePeerDependencies: + - '@azure/app-configuration' + - '@azure/cosmos' + - '@azure/data-tables' + - '@azure/identity' + - '@azure/keyvault-secrets' + - '@azure/storage-blob' + - '@capacitor/preferences' + - '@deno/kv' + - '@electric-sql/pglite' + - '@libsql/client' + - '@netlify/blobs' + - '@planetscale/database' + - '@rsbuild/core' + - '@tanstack/query-core' + - '@tanstack/react-query' + - '@tanstack/router-core' + - '@types/node' + - '@types/react' + - '@upstash/redis' + - '@vercel/blob' + - '@vercel/functions' + - '@vercel/kv' + - '@vitejs/plugin-react' + - aws4fetch + - better-sqlite3 + - drizzle-orm + - encoding + - idb-keyval + - less + - lightningcss + - magicast + - mysql2 + - rolldown + - sass + - sass-embedded + - sqlite3 + - stylus + - sugarss + - supports-color + - terser + - tsx + - uploadthing + - vite + - vite-plugin-solid + - webpack + - ws + - xml2js + - yaml + dev: true + /@viteval/cli@0.5.3(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@types/node@24.2.1)(@types/react@19.1.10)(@vitejs/plugin-react@4.7.0)(tsx@4.20.4)(vite@6.3.5): resolution: {integrity: sha512-S6PUAaUNAHJ4xEb7Pkev1wAyi0dG8OaT/EHmN24gQOJmxvbW5JSXEYbcpCqDxB0LkuRfiKhxcOM+6m/7NWePCw==} hasBin: true @@ -13596,6 +13743,41 @@ packages: - yaml dev: true + /@viteval/core@0.1.8(@types/node@24.2.1)(jiti@2.5.1)(tsx@4.20.4): + resolution: {integrity: sha512-DPYn5Tj1ex3ktD+7p1CzEjl9e2J16z3RVYmE1Ci8S+ja2XdE5lDfqwJEKNaAyO9NCCVEJn3H1GE+DAFEZ8UzoA==} + dependencies: + '@vitest/runner': 3.2.4 + '@viteval/internal': 0.0.4 + ajv: 8.17.1 + autoevals: 0.0.130 + chalk: 5.6.2 + compute-cosine-similarity: 1.1.0 + find-up: 7.0.0 + js-levenshtein: 1.1.6 + js-yaml: 4.1.0 + linear-sum-assignment: 1.0.7 + mustache: 4.2.0 + openai: 4.104.0(zod@4.1.11) + ts-pattern: 5.8.0 + vite-node: 3.2.4(@types/node@24.2.1)(jiti@2.5.1)(tsx@4.20.4) + zod: 4.1.11 + transitivePeerDependencies: + - '@types/node' + - encoding + - jiti + - less + - lightningcss + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + - tsx + - ws + - yaml + dev: true + /@viteval/core@0.5.3(@types/node@24.2.1)(jiti@2.5.1)(tsx@4.20.4): resolution: {integrity: sha512-v3+Z2Icx0QeTqbIPBlc7JcSjIzAwg1BMGPVhS3n9dC0yAf0w1sBMnfSjwefXE2exbRXpAWzMzc44wXyg17Rabg==} dependencies: @@ -13631,10 +13813,73 @@ packages: - yaml dev: true + /@viteval/internal@0.0.4: + resolution: {integrity: sha512-T1Gi3fJGUGsiZYD9ngA/bmZ4mwb4AjnPnbKdLQduG82XP0D2qBy4Rhp9HGLTieZxkIdZir1Fyii0N2sX5I/Vtg==} + dev: true + /@viteval/internal@0.5.3: resolution: {integrity: sha512-7Lq4nY/Km5GFUd8DmvvcoMhqxUcrTQhMorbZzk66UfkU3pp1g/FD5ImES1G1ll7QL/3Y0xEav+O/kDR5BJxtWQ==} dev: true + /@viteval/ui@0.0.1(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@vitejs/plugin-react@4.7.0)(vite@6.3.5): + resolution: {integrity: sha512-keEv64lqtKww0Ny+Ywhdz4ZAQV9nGvHN0Ci+fkr/HGnLvHiikhuAzYSW/zkB8HhO7fElBfpCx/Lv8lT1RRGzJw==} + dependencies: + '@iconify-json/mdi': 1.2.3 + '@iconify/react': 6.0.2(react@19.1.1) + '@radix-ui/react-slot': 1.2.3(@types/react@19.1.10)(react@19.1.1) + '@tanstack/react-router': 1.131.44(react-dom@19.1.1)(react@19.1.1) + '@tanstack/react-router-ssr-query': 1.131.44(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/react-router@1.131.44)(@tanstack/router-core@1.131.44)(react-dom@19.1.1)(react@19.1.1) + '@tanstack/react-start': 1.131.44(@tanstack/react-router@1.131.44)(@vitejs/plugin-react@4.7.0)(react-dom@19.1.1)(react@19.1.1)(vite@6.3.5) + '@tanstack/router-plugin': 1.131.44(@tanstack/react-router@1.131.44)(vite@6.3.5) + '@viteval/internal': 0.0.4 + class-variance-authority: 0.7.1 + clsx: 2.1.1 + lucide-react: 0.542.0(react@19.1.1) + react: 19.1.1 + react-dom: 19.1.1(react@19.1.1) + tailwind-merge: 3.3.1 + tailwindcss: 4.1.13 + tw-animate-css: 1.3.8 + zod: 4.1.11 + transitivePeerDependencies: + - '@azure/app-configuration' + - '@azure/cosmos' + - '@azure/data-tables' + - '@azure/identity' + - '@azure/keyvault-secrets' + - '@azure/storage-blob' + - '@capacitor/preferences' + - '@deno/kv' + - '@electric-sql/pglite' + - '@libsql/client' + - '@netlify/blobs' + - '@planetscale/database' + - '@rsbuild/core' + - '@tanstack/query-core' + - '@tanstack/react-query' + - '@tanstack/router-core' + - '@types/react' + - '@upstash/redis' + - '@vercel/blob' + - '@vercel/functions' + - '@vercel/kv' + - '@vitejs/plugin-react' + - aws4fetch + - better-sqlite3 + - drizzle-orm + - encoding + - idb-keyval + - mysql2 + - rolldown + - sqlite3 + - supports-color + - uploadthing + - vite + - vite-plugin-solid + - webpack + - xml2js + dev: true + /@viteval/ui@0.5.3(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@types/react@19.1.10)(@vitejs/plugin-react@4.7.0)(vite@6.3.5): resolution: {integrity: sha512-58R8Da35CdhpdWclZT/N5HgKrInkZs+eXtt8qIrLNO8Gn6EmMqJNvNWsXqtM8YHC8R5bf8LbplTSF9g8N2BClw==} dependencies: @@ -13705,39 +13950,6 @@ packages: - xml2js dev: true - /@voltagent/core@0.1.86(@voltagent/logger@packages+logger)(zod@3.25.76): - resolution: {integrity: sha512-sQW3n9QcLlRwkJWuoKlIqXfqu24A03H+LsssSMwzQeEZlBBixMG7KnPaFCB7HzsZRNV/Fr1W8tNfl9cbexevpw==} - peerDependencies: - '@voltagent/logger': ^0.1.0 - zod: ^3.25.0 - peerDependenciesMeta: - '@voltagent/logger': - optional: true - dependencies: - '@hono/node-server': 1.18.2(hono@4.9.1) - '@hono/node-ws': 1.2.0(@hono/node-server@1.18.2)(hono@4.9.1) - '@hono/swagger-ui': 0.5.2(hono@4.9.1) - '@hono/zod-openapi': 0.19.10(hono@4.9.1)(zod@3.25.76) - '@libsql/client': 0.15.10 - '@modelcontextprotocol/sdk': 1.17.2 - '@opentelemetry/api': 1.9.0 - '@opentelemetry/sdk-trace-base': 2.1.0(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-node': 2.0.1(@opentelemetry/api@1.9.0) - '@types/ws': 8.18.1 - '@voltagent/internal': 0.0.9 - '@voltagent/logger': link:packages/logger - hono: 4.9.1 - ts-pattern: 5.8.0 - uuid: 9.0.1 - ws: 8.18.3 - zod: 3.25.76 - zod-from-json-schema: 0.0.5 - transitivePeerDependencies: - - bufferutil - - supports-color - - utf-8-validate - dev: false - /@voltagent/internal@0.0.9: resolution: {integrity: sha512-Kaa2jW60VsfYVotuXC81LmNOJ07Lf1yq36vMteNKKa5seIsKkJ75PvIbMp52eEZ/ky/oBXrs94UXrQNqXBJ80Q==} dev: false @@ -13750,18 +13962,6 @@ packages: pino-pretty: 13.1.1 dev: false - /@voltagent/sdk@0.1.6(@voltagent/logger@packages+logger)(zod@3.25.76): - resolution: {integrity: sha512-ofyk36gaoF4unwEJIAKTWKjq9LaD1QUCrChGIPdChy8c81MsTPQdihNMT2GwIJeQMGCFAd30ybTBZ6ZvlI7oLQ==} - dependencies: - '@voltagent/core': 0.1.86(@voltagent/logger@packages+logger)(zod@3.25.76) - transitivePeerDependencies: - - '@voltagent/logger' - - bufferutil - - supports-color - - utf-8-validate - - zod - dev: false - /@vue/compiler-core@3.5.22: resolution: {integrity: sha512-jQ0pFPmZwTEiRNSb+i9Ow/I/cHv2tXYqsnHKKyCQ08irI2kdF5qmYedmF8si8mA7zepUFmJ2hqzS8CQmNOWOkQ==} dependencies: @@ -14421,6 +14621,23 @@ packages: when-exit: 2.1.4 dev: true + /autoevals@0.0.130: + resolution: {integrity: sha512-JS0T/YCEH13AAOGiWWGJDkIPP8LsDmRBYr3EazTukHxvd0nidOW7fGj0qVPFx2bARrSNO9AfCR6xoTP/5m3Bmw==} + dependencies: + ajv: 8.17.1 + compute-cosine-similarity: 1.1.0 + js-levenshtein: 1.1.6 + js-yaml: 4.1.0 + linear-sum-assignment: 1.0.7 + mustache: 4.2.0 + openai: 4.104.0(zod@3.25.76) + zod: 3.25.76 + zod-to-json-schema: 3.24.6(zod@3.25.76) + transitivePeerDependencies: + - encoding + - ws + dev: true + /autoevals@0.0.131: resolution: {integrity: sha512-F+3lraja+Ms7n1M2cpWl65N7AYx4sPocRW454H5HlSGabYMfuFOUxw8IXmEYDkQ38BxtZ0Wd5ZAQj9RF59YJWw==} dependencies: @@ -14465,9 +14682,9 @@ packages: resolution: {integrity: sha512-DV5bdJZTzZ0zn0DC24v3jD7Mnidh6xhKa4GfKCbq3sfW8kaWhDdZjP3i81geA8T33tdYqWKw4D3fVv0CwEgKVA==} dependencies: '@babel/core': 7.28.0 - '@babel/parser': 7.28.0 + '@babel/parser': 7.28.4 '@babel/traverse': 7.28.0 - '@babel/types': 7.28.2 + '@babel/types': 7.28.4 transitivePeerDependencies: - supports-color dev: true @@ -14521,7 +14738,7 @@ packages: engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} dependencies: '@babel/template': 7.27.2 - '@babel/types': 7.28.2 + '@babel/types': 7.28.4 '@types/babel__core': 7.20.5 '@types/babel__traverse': 7.28.0 dev: true @@ -19809,7 +20026,7 @@ packages: engines: {node: '>=8'} dependencies: '@babel/core': 7.28.0 - '@babel/parser': 7.28.0 + '@babel/parser': 7.28.4 '@istanbuljs/schema': 0.1.3 istanbul-lib-coverage: 3.2.2 semver: 6.3.1 @@ -19822,7 +20039,7 @@ packages: engines: {node: '>=10'} dependencies: '@babel/core': 7.28.0 - '@babel/parser': 7.28.0 + '@babel/parser': 7.28.4 '@istanbuljs/schema': 0.1.3 istanbul-lib-coverage: 3.2.2 semver: 7.7.2 @@ -20214,7 +20431,7 @@ packages: '@babel/generator': 7.28.0 '@babel/plugin-syntax-jsx': 7.27.1(@babel/core@7.28.0) '@babel/plugin-syntax-typescript': 7.27.1(@babel/core@7.28.0) - '@babel/types': 7.28.2 + '@babel/types': 7.28.4 '@jest/expect-utils': 29.7.0 '@jest/transform': 29.7.0 '@jest/types': 29.6.3 @@ -21437,8 +21654,8 @@ packages: /magicast@0.3.5: resolution: {integrity: sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==} dependencies: - '@babel/parser': 7.28.0 - '@babel/types': 7.28.2 + '@babel/parser': 7.28.4 + '@babel/types': 7.28.4 source-map-js: 1.2.1 dev: true @@ -22749,7 +22966,7 @@ packages: resolution: {integrity: sha512-3VW/8JpPqPvnJvseXowjZcirPisssnBuDikk6JIZ8jQzF7KJQX52iPFX4RYYxLycYH7IbMRSPUOga/esVjy5Yg==} engines: {node: '>=18'} dependencies: - '@babel/parser': 7.28.0 + '@babel/parser': 7.28.4 dev: true /node-stream-zip@1.15.0: @@ -23373,6 +23590,29 @@ packages: transitivePeerDependencies: - encoding + /openai@4.104.0(zod@4.1.11): + resolution: {integrity: sha512-p99EFNsA/yX6UhVO93f5kJsDRLAg+CTA2RBqdHK4RtK8u5IJw32Hyb2dTGKbnnFmnuoBv5r7Z2CURI9sGZpSuA==} + hasBin: true + peerDependencies: + ws: ^8.18.0 + zod: ^3.23.8 + peerDependenciesMeta: + ws: + optional: true + zod: + optional: true + dependencies: + '@types/node': 18.19.122 + '@types/node-fetch': 2.6.13 + abort-controller: 3.0.0 + agentkeepalive: 4.6.0 + form-data-encoder: 1.7.2 + formdata-node: 4.4.1 + node-fetch: 2.7.0 + zod: 4.1.11 + transitivePeerDependencies: + - encoding + /openai@5.20.3(zod@3.25.76): resolution: {integrity: sha512-8V0KgAcPFppDIP8uMBOkhRrhDBuxNQYQxb9IovP4NN4VyaYGISAzYexyYYuAwVul2HB75Wpib0xDboYJqRMNow==} hasBin: true @@ -28119,6 +28359,64 @@ packages: - yaml dev: true + /viteval@0.1.9(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@types/node@24.2.1)(@vitejs/plugin-react@4.7.0)(tsx@4.20.4)(vite@6.3.5): + resolution: {integrity: sha512-Auc7RGngDD7FU2XNPk6OCEe2gqChejhDLg57l/GSCknnafm1DyomyxAf0glSO0nE1go52raZodMWQevhRbrrwA==} + hasBin: true + dependencies: + '@viteval/cli': 0.2.4(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@types/node@24.2.1)(@vitejs/plugin-react@4.7.0)(tsx@4.20.4)(vite@6.3.5) + '@viteval/core': 0.1.8(@types/node@24.2.1)(jiti@2.5.1)(tsx@4.20.4) + transitivePeerDependencies: + - '@azure/app-configuration' + - '@azure/cosmos' + - '@azure/data-tables' + - '@azure/identity' + - '@azure/keyvault-secrets' + - '@azure/storage-blob' + - '@capacitor/preferences' + - '@deno/kv' + - '@electric-sql/pglite' + - '@libsql/client' + - '@netlify/blobs' + - '@planetscale/database' + - '@rsbuild/core' + - '@tanstack/query-core' + - '@tanstack/react-query' + - '@tanstack/router-core' + - '@types/node' + - '@types/react' + - '@upstash/redis' + - '@vercel/blob' + - '@vercel/functions' + - '@vercel/kv' + - '@vitejs/plugin-react' + - aws4fetch + - better-sqlite3 + - drizzle-orm + - encoding + - idb-keyval + - jiti + - less + - lightningcss + - magicast + - mysql2 + - rolldown + - sass + - sass-embedded + - sqlite3 + - stylus + - sugarss + - supports-color + - terser + - tsx + - uploadthing + - vite + - vite-plugin-solid + - webpack + - ws + - xml2js + - yaml + dev: true + /viteval@0.5.3(@tanstack/query-core@5.89.0)(@tanstack/react-query@5.89.0)(@tanstack/router-core@1.131.44)(@types/node@24.2.1)(@types/react@19.1.10)(@vitejs/plugin-react@4.7.0)(tsx@4.20.4)(vite@6.3.5): resolution: {integrity: sha512-phDrceVUtOje90Oy0v0jeSuAC1FxGrho34KGUntUs9ZG5nJe+CZt59YykasOPdLv0HA5oQgRAkOY2xUvwmaRag==} hasBin: true diff --git a/website/docs/agents/memory.md b/website/docs/agents/memory.md index 095e5f71c..e6c982423 100644 --- a/website/docs/agents/memory.md +++ b/website/docs/agents/memory.md @@ -5,86 +5,63 @@ slug: /agents/memory # Agent Memory -Memory enables agents to remember past interactions and maintain conversation context. This guide shows how to configure memory for your agents. +VoltAgent's `Memory` class stores conversation history and enables agents to maintain context across interactions. Supports persistent storage, semantic search, and working memory. -## Default Behavior +## Storage Providers -Agents automatically use in-memory storage by default, storing conversations in application memory: +| Provider | Package | Persistence | Use Case | +| ------------------ | ----------------------------- | ---------------------- | -------------------------------- | +| **InMemory** | `@voltagent/core` | None (RAM only) | Development, testing | +| **Managed Memory** | `@voltagent/voltagent-memory` | VoltOps-hosted | Production-ready, zero-setup | +| **LibSQL** | `@voltagent/libsql` | Local SQLite or remote | Self-hosted, edge deployments | +| **Postgres** | `@voltagent/postgres` | Self-hosted Postgres | Existing Postgres infrastructure | +| **Supabase** | `@voltagent/supabase` | Supabase | Supabase-based applications | + +## Core Features + +- **Conversation Storage** - Messages stored per `userId` and `conversationId` +- **Semantic Search** - Retrieve past messages by similarity (requires embedding + vector adapters) +- **Working Memory** - Compact context storage (Markdown template, JSON schema, or free-form) +- **Workflow State** - Suspendable workflow checkpoint storage + +## Quick Start ```typescript -import { Agent } from "@voltagent/core"; +import { Agent, Memory } from "@voltagent/core"; +import { ManagedMemoryAdapter } from "@voltagent/voltagent-memory"; import { openai } from "@ai-sdk/openai"; +const memory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "my-app-memory", + }), +}); + const agent = new Agent({ name: "Assistant", - instructions: "You are a helpful assistant", - model: openai("gpt-4o"), - // Memory is automatically enabled with in-memory storage + model: openai("gpt-4o-mini"), + memory, }); -``` -## Using Memory - -To maintain conversation context, provide `userId` and `conversationId`: - -```typescript // First message -const response1 = await agent.generateText("My name is Sarah", { +await agent.generateText("My name is Sarah", { userId: "user-123", conversationId: "chat-001", }); -// Follow-up message - agent remembers the name -const response2 = await agent.generateText("What's my name?", { +// Agent remembers context +await agent.generateText("What's my name?", { userId: "user-123", - conversationId: "chat-001", // Same conversation ID -}); -console.log(response2.text); // "Your name is Sarah" -``` - -## Persistent Storage - -For conversations that survive application restarts, configure `Memory` with a persistent adapter such as LibSQL: - -```typescript -import { Agent, Memory } from "@voltagent/core"; -import { LibSQLMemoryAdapter } from "@voltagent/libsql"; // npm install @voltagent/libsql -import { openai } from "@ai-sdk/openai"; - -const agent = new Agent({ - name: "Persistent Assistant", - instructions: "You are a helpful assistant", - model: openai("gpt-4o"), - memory: new Memory({ - storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), - }), -}); -``` - -## Disabling Memory - -For stateless agents that don't need conversation history: - -```typescript -const agent = new Agent({ - name: "Stateless Assistant", - instructions: "You provide one-off responses", - model: openai("gpt-4o"), - memory: false, // Disable memory completely + conversationId: "chat-001", }); ``` -## Available Memory Providers - -- **[InMemoryStorageAdapter](./memory/in-memory.md)** (Default) - Built into `@voltagent/core` -- **[LibSQLMemoryAdapter](./memory/libsql.md)** - Install: `npm install @voltagent/libsql` -- **[PostgreSQLMemoryAdapter](./memory/postgres.md)** - Install: `npm install @voltagent/postgres` -- **[SupabaseMemoryAdapter](./memory/supabase.md)** - Install: `npm install @voltagent/supabase` - -## Learn More +## Complete Documentation -For detailed information about memory configuration, providers, and advanced usage: +For detailed configuration, provider setup, and advanced features: -- **[Memory Overview](./memory/overview.md)** - Complete memory documentation -- **[Memory Providers](./memory/overview.md#memory-providers)** - Detailed provider comparison -- **[Custom Providers](./memory/overview.md#implementing-custom-memory-providers)** - Build your own storage +- **[Memory Overview](./memory/overview.md)** - Full memory system documentation +- **[Managed Memory](./memory/managed-memory.md)** - Production-ready hosted storage +- **[Semantic Search](./memory/semantic-search.md)** - Vector-based message retrieval +- **[Working Memory](./memory/working-memory.md)** - Compact context management +- **[Storage Adapters](./memory/in-memory.md)** - Provider-specific guides diff --git a/website/docs/agents/memory/in-memory.md b/website/docs/agents/memory/in-memory.md index 57b9013d8..cc5c8aef7 100644 --- a/website/docs/agents/memory/in-memory.md +++ b/website/docs/agents/memory/in-memory.md @@ -5,70 +5,149 @@ slug: /agents/memory/in-memory # In-Memory Storage -VoltAgent's core package (`@voltagent/core`) includes `InMemoryStorageAdapter`, a simple storage adapter (for the `Memory` class) that stores conversation history in application memory. +`InMemoryStorageAdapter` stores conversation history in application memory. Data is lost when the application restarts. -## Overview +## Default Behavior -- **Use Case:** Development, testing, demos, or any scenario where persistent memory across application restarts is not required. -- **Pros:** Zero external dependencies, extremely fast, easy to use. -- **Cons:** All stored data (conversation history, agent state) is **lost** when the application stops or restarts. -- **Availability:** Included directly in `@voltagent/core`. +Agents use in-memory storage by default when no `memory` option is provided: -## Configuration +```ts +import { Agent } from "@voltagent/core"; +import { openai } from "@ai-sdk/openai"; + +// Uses InMemoryStorageAdapter automatically +const agent = new Agent({ + name: "Assistant", + instructions: "Help users with questions.", + model: openai("gpt-4o-mini"), +}); +``` + +## Explicit Configuration -By default, agents use in-memory storage without any configuration. To customize (e.g., storage limits), configure it explicitly. +Configure storage limits explicitly: -```typescript +```ts import { Agent, Memory, InMemoryStorageAdapter } from "@voltagent/core"; import { openai } from "@ai-sdk/openai"; -// Optional: Configure in-memory storage explicitly const memory = new Memory({ - storage: new InMemoryStorageAdapter({ storageLimit: 100 }), + storage: new InMemoryStorageAdapter({ + storageLimit: 100, // max messages per userId/conversationId (default: 100) + }), }); const agent = new Agent({ - name: "Ephemeral Agent", - instructions: "An agent using in-memory storage (history resets on restart).", - model: openai("gpt-4o"), - memory, // Optional; default is also in-memory + name: "Assistant", + model: openai("gpt-4o-mini"), + memory, }); - -// Interactions with this agent will use the in-memory store. -// await agent.generateText("Remember this info.", { userId: "user1", conversationId: "conv1" }); -// // If the app restarts here, the above message is lost. -// await agent.generateText("Do you remember?", { userId: "user1", conversationId: "conv1" }); ``` -**Configuration Options (InMemoryStorageAdapter):** +## Features -- `storageLimit` (number, optional): The maximum number of messages to retain per unique `userId`/`conversationId`. Oldest messages are pruned when exceeded. Defaults to `100`. +### Conversation Storage -## Working Memory +- Messages stored per `userId` and `conversationId` +- Oldest messages pruned when `storageLimit` exceeded +- All `StorageAdapter` methods supported -`InMemoryStorageAdapter` implements working memory storage for both conversation and user scopes using in‑process metadata fields. Enable it via `Memory({ workingMemory: { enabled: true, ... } })`. See: [Working Memory](./working-memory.md). +### Working Memory -## Semantic Search (Embeddings + Vectors) +Supports both conversation and user-scoped working memory: -The in-memory storage can be combined with `AiSdkEmbeddingAdapter` and `InMemoryVectorAdapter` to enable semantic retrieval in development: +```ts +const memory = new Memory({ + storage: new InMemoryStorageAdapter(), + workingMemory: { + enabled: true, + scope: "conversation", // or "user" + }, +}); +``` + +See [Working Memory](./working-memory.md) for configuration details. + +### Semantic Search (Development) + +Combine with `InMemoryVectorAdapter` for semantic search during development: ```ts -import { Memory, AiSdkEmbeddingAdapter, InMemoryVectorAdapter } from "@voltagent/core"; -import { InMemoryStorageAdapter } from "@voltagent/core"; +import { + Memory, + AiSdkEmbeddingAdapter, + InMemoryVectorAdapter, + InMemoryStorageAdapter, +} from "@voltagent/core"; import { openai } from "@ai-sdk/openai"; const memory = new Memory({ - storage: new InMemoryStorageAdapter({ storageLimit: 100 }), + storage: new InMemoryStorageAdapter(), embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), vector: new InMemoryVectorAdapter(), }); ``` -## When to Use +Both storage and vectors are lost on restart. For persistent vectors, use `LibSQLVectorAdapter`. + +## Use Cases + +### Development & Testing + +Test agent logic without database setup: + +```ts +import { Agent, Memory, InMemoryStorageAdapter } from "@voltagent/core"; +import { openai } from "@ai-sdk/openai"; + +const testAgent = new Agent({ + name: "Test Assistant", + model: openai("gpt-4o-mini"), + memory: new Memory({ + storage: new InMemoryStorageAdapter({ storageLimit: 50 }), + }), +}); + +// Test conversations without persistence +await testAgent.generateText("Test message", { + userId: "test-user", + conversationId: "test-conversation", +}); +``` + +### Stateless Deployments + +Serverless functions or ephemeral containers where persistence isn't needed: + +```ts +// Cloud function handler +export async function handler(event) { + const agent = new Agent({ + name: "Serverless Assistant", + model: openai("gpt-4o-mini"), + // Default in-memory storage + }); + + return await agent.generateText(event.message, { + userId: event.userId, + conversationId: event.sessionId, + }); +} +``` + +### Demos & Examples + +Quick prototypes without infrastructure dependencies. + +## Limitations + +- **No persistence** - All data lost on restart +- **Memory usage** - Large message counts consume application memory +- **Not for production** - Use persistent adapters for production applications -- **Development & Testing:** Quickly test agent logic without setting up a database. -- **Stateless Use Cases:** When conversation history is not needed between sessions or application runs. -- **Demos & Examples:** Simple setup for showcasing agent capabilities. -- **Caching Layers:** Could potentially be used as a short-term cache in more complex memory strategies (though not its primary design). +## Learn More -Avoid using `InMemoryStorage` in production environments where conversation history needs to be persistent. +- **[Managed Memory](./managed-memory.md)** - Production-ready hosted memory with zero setup +- **[LibSQL / SQLite](./libsql.md)** - Self-hosted SQLite or edge deployments +- **[PostgreSQL](./postgres.md)** - Self-hosted Postgres adapter +- **[Supabase](./supabase.md)** - Supabase integration diff --git a/website/docs/agents/memory/libsql.md b/website/docs/agents/memory/libsql.md index a1271317f..e75160472 100644 --- a/website/docs/agents/memory/libsql.md +++ b/website/docs/agents/memory/libsql.md @@ -1,260 +1,184 @@ --- -title: LibSQL / Turso / SQLite Memory +title: LibSQL / SQLite Memory slug: /agents/memory/libsql --- -# LibSQL / Turso / SQLite Memory +# LibSQL / SQLite Memory -VoltAgent provides a separate package (`@voltagent/libsql`) that includes `LibSQLMemoryAdapter`, a storage adapter for the `Memory` class using [LibSQL](https://github.com/tursodatabase/libsql) for persistent storage. +`LibSQLMemoryAdapter` stores conversations in LibSQL, supporting local SQLite files, remote instances, and self-hosted `sqld`. -This provider is versatile and can connect to: - -- **Local SQLite files:** Ideal for development, testing, and simple deployments. -- **[Turso](https://turso.tech/):** A distributed database platform built on LibSQL, offering a globally available edge database. -- **Self-hosted `sqld` instances:** If you run your own LibSQL server. - -## Setup - -Install the LibSQL package: +## Installation ```bash npm install @voltagent/libsql -# or -yarn add @voltagent/libsql -# or -pnpm add @voltagent/libsql ``` -If you plan to use Turso, you might need the Turso CLI for setup: `npm install -g @tursodatabase/cli` - ## Configuration -Initialize `LibSQLMemoryAdapter` and pass it to the `Memory` instance used by your `Agent`: - -```typescript +```ts import { Agent, Memory } from "@voltagent/core"; import { LibSQLMemoryAdapter } from "@voltagent/libsql"; import { openai } from "@ai-sdk/openai"; -import { createPinoLogger } from "@voltagent/logger"; -// Create logger -const logger = createPinoLogger({ - name: "my-app", - level: "info", +// Local SQLite +const memory = new Memory({ + storage: new LibSQLMemoryAdapter({ + url: "file:./.voltagent/memory.db", // or ":memory:" for ephemeral + }), }); -// Configure LibSQL memory adapter -const memoryStorage = new LibSQLMemoryAdapter({ - // Required: Connection URL - url: process.env.DATABASE_URL || "file:./.voltagent/memory.db", // Example: Env var for Turso, fallback to local file - - // Required for Turso / Remote sqld (if not using TLS or auth is needed) - authToken: process.env.DATABASE_AUTH_TOKEN, - - // Optional: Logger for debugging - logger: logger.child({ component: "libsql" }), - - // Optional: Prefix for database table names - tablePrefix: "my_agent_memory", // Defaults to 'voltagent_memory' - - // Optional: Storage limit (max number of messages per user/conversation) - // storageLimit: 100, // Defaults to 100 +// Remote instance +const memory = new Memory({ + storage: new LibSQLMemoryAdapter({ + url: "libsql://your-database.example.io", + authToken: process.env.LIBSQL_AUTH_TOKEN, + }), }); const agent = new Agent({ - name: "LibSQL Memory Agent", - instructions: "An agent using LibSQL for memory.", - model: openai("gpt-4o"), - memory: new Memory({ storage: memoryStorage }), + name: "Assistant", + model: openai("gpt-4o-mini"), + memory, }); ``` -**Configuration Options:** - -- `url` (string, required): The connection URL for your LibSQL database. - - **Local SQLite:** Use `file:`. - - `file:memory.db`: Creates/uses `memory.db` in the current working directory. - - `file:.voltagent/memory.db`: Creates/uses `memory.db` inside a `.voltagent` subdirectory (created automatically if it doesn't exist). - - `file:/path/to/your/database.db`: Absolute path. - - **Turso:** Use the `libsql://your-database-name-username.turso.io` URL provided in your Turso dashboard. - - **Remote `sqld`:** Use the appropriate `libsql://` or `http(s)://` URL for your server. -- `authToken` (string, optional): Required for authenticated connections to Turso or remote `sqld` instances. -- `tablePrefix` (string, optional): A prefix added to all database tables created by this provider (e.g., `my_prefix_messages`, `my_prefix_conversations`). Defaults to `voltagent_memory`. -- `storageLimit` (number, optional): The maximum number of messages to retain per user/conversation thread. Older messages are automatically pruned when the limit is exceeded. Defaults to `100`. -- `logger` (Logger, optional): A logger instance for debugging output. Typically created with `logger.child({ component: "libsql" })`. - -## Conversation Management - -The LibSQL provider includes enhanced support for managing conversations across multiple users: - -```typescript -// Get conversations for a specific user -const conversations = await memoryStorage.getConversationsByUserId("user-123", { - limit: 50, - orderBy: "updated_at", - orderDirection: "DESC", -}); +### Configuration Options -// Complex queries (filters, sort, pagination) -const recentConversations = await memoryStorage.queryConversations({ - userId: "user-123", - limit: 10, - orderBy: "updated_at", - orderDirection: "DESC", -}); +| Option | Type | Description | +| -------------- | -------- | ---------------------------------------------------------- | +| `url` | `string` | Connection URL (`file:`, `libsql://`, or `:memory:`) | +| `authToken` | `string` | Auth token for remote instances (optional for local files) | +| `tablePrefix` | `string` | Table name prefix (default: `voltagent_memory`) | +| `storageLimit` | `number` | Max messages per conversation (default: `100`) | +| `logger` | `Logger` | Optional logger for debugging | -// Pagination via limit/offset -const page1 = await memoryStorage.queryConversations({ userId: "user-123", limit: 20, offset: 0 }); -const page2 = await memoryStorage.queryConversations({ userId: "user-123", limit: 20, offset: 20 }); +### URL Formats -// Get a conversation by ID -const conversation = await memoryStorage.getConversation("conversation-id"); +- **Local SQLite**: `file:./.voltagent/memory.db` or `file:memory.db` +- **In-memory (testing)**: `:memory:` or `file::memory:` +- **Remote instance** (e.g., Turso): `libsql://your-database.example.io` +- **Self-hosted sqld**: `libsql://your-server.com` or `https://your-server.com` -// Create and update conversations -const newConversation = await memoryStorage.createConversation({ - id: "conversation-id", - resourceId: "app-resource-1", - userId: "user-123", - title: "New Chat Session", - metadata: { source: "web-app" }, -}); - -await memoryStorage.updateConversation("conversation-id", { - title: "Updated Title", -}); -``` +## Features -## Querying Conversations +### Automatic Schema Creation -The LibSQL storage provides powerful conversation querying capabilities with filtering, pagination, and sorting options: +Tables are created automatically on first use: -```typescript -// Query with multiple filters -const workConversations = await memoryStorage.queryConversations({ - userId: "user-123", - resourceId: "work-agent", - limit: 25, - offset: 0, - orderBy: "created_at", - orderDirection: "DESC", -}); - -// Get all conversations for a user -const userConversations = await memoryStorage.queryConversations({ - userId: "user-123", - limit: 50, -}); - -// Get conversations for a specific resource -const resourceConversations = await memoryStorage.queryConversations({ - resourceId: "chatbot-v1", - limit: 100, - orderBy: "updated_at", -}); - -// Admin view - get all conversations -const allConversations = await memoryStorage.queryConversations({ - limit: 200, - orderBy: "created_at", - orderDirection: "ASC", -}); -``` +- `${tablePrefix}_users` +- `${tablePrefix}_conversations` +- `${tablePrefix}_messages` +- `${tablePrefix}_workflow_states` -**Query Options:** +Schema migrations run automatically when updating VoltAgent versions. -- `userId` (optional): Filter conversations by specific user -- `resourceId` (optional): Filter conversations by specific resource -- `limit` (optional): Maximum number of conversations to return (default: 50) -- `offset` (optional): Number of conversations to skip for pagination (default: 0) -- `orderBy` (optional): Field to sort by: 'created_at', 'updated_at', or 'title' (default: 'updated_at') -- `orderDirection` (optional): Sort direction: 'ASC' or 'DESC' (default: 'DESC') +### Conversation Storage -## Getting Conversation Messages +- Messages stored per `userId` and `conversationId` +- Oldest messages pruned when `storageLimit` exceeded +- All `StorageAdapter` methods supported -Retrieve messages for a specific conversation: +### Working Memory -```typescript -// Get recent messages (chronological order) -const messages = await memoryStorage.getMessages("user-123", "conversation-456", { - limit: 50, -}); +Supports both conversation and user-scoped working memory: -// Use time-based pagination when needed -const older = await memoryStorage.getMessages("user-123", "conversation-456", { - before: new Date("2024-01-01T00:00:00Z"), - limit: 50, +```ts +const memory = new Memory({ + storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), + workingMemory: { + enabled: true, + scope: "conversation", // or "user" + template: `# Context\n- Key facts:\n`, + }, }); ``` -**Message Query Options:** - -- `limit` (optional): Maximum number of messages to return (default: 100) -- `before` (optional): Only messages created before this date -- `after` (optional): Only messages created after this date -- `roles` (optional): Filter by roles, e.g., `["user", "assistant"]` +See [Working Memory](./working-memory.md) for configuration details. -Messages are returned in chronological order (oldest first) for natural conversation flow. +### Vector Storage (Optional) -## Automatic Table Creation +Use `LibSQLVectorAdapter` for persistent vector storage: -`LibSQLMemoryAdapter` **automatically creates** the necessary tables with your configured prefix: - -- `${tablePrefix}_users` -- `${tablePrefix}_conversations` -- `${tablePrefix}_messages` -- `${tablePrefix}_workflow_states` - -This simplifies setup, especially for local development using SQLite files. +```ts +import { Memory, AiSdkEmbeddingAdapter } from "@voltagent/core"; +import { LibSQLMemoryAdapter, LibSQLVectorAdapter } from "@voltagent/libsql"; +import { openai } from "@ai-sdk/openai"; -The provider also **automatically migrates** existing databases to new schemas when you update VoltAgent, ensuring backward compatibility. +const memory = new Memory({ + storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), + embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), + vector: new LibSQLVectorAdapter({ url: "file:./.voltagent/memory.db" }), +}); +``` -## Working Memory +**Vector adapter options:** -`LibSQLMemoryAdapter` implements working memory operations used by `Memory`: +- Local file: Same URL as storage adapter +- In-memory: `:memory:` (vectors lost on restart) +- Turso: Same URL + auth token as storage adapter -- Conversation-scoped working memory is stored under `conversations.metadata.workingMemory`. -- User-scoped working memory is stored under `users.metadata.workingMemory`. +See [Semantic Search](./semantic-search.md) for usage. -Enable via `Memory({ workingMemory: { enabled: true, template | schema, scope } })`. See: [Working Memory](./working-memory.md). +**Use cases:** -Programmatic APIs (via `Memory`): +- Local development with SQLite files +- Edge deployments with remote instances +- Serverless environments +- Simple deployments without separate database servers -- `getWorkingMemory({ conversationId?, userId? })` -- `updateWorkingMemory({ conversationId?, userId?, content })` -- `clearWorkingMemory({ conversationId?, userId? })` +For production-ready zero-setup hosting, see [Managed Memory](./managed-memory.md). -## Semantic Search (Embeddings + Vectors) +## Examples -Vector search is configured on `Memory` independently of the storage adapter. To persist vectors with LibSQL, use `LibSQLVectorAdapter`: +### Development with Local SQLite ```ts -import { Memory, AiSdkEmbeddingAdapter } from "@voltagent/core"; -import { LibSQLMemoryAdapter, LibSQLVectorAdapter } from "@voltagent/libsql"; +import { Agent, Memory } from "@voltagent/core"; +import { LibSQLMemoryAdapter } from "@voltagent/libsql"; import { openai } from "@ai-sdk/openai"; -const memory = new Memory({ - storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), - embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), - vector: new LibSQLVectorAdapter({ url: "file:./.voltagent/memory.db" }), +const agent = new Agent({ + name: "Dev Assistant", + model: openai("gpt-4o-mini"), + memory: new Memory({ + storage: new LibSQLMemoryAdapter({ + url: "file:./.voltagent/dev-memory.db", + }), + }), }); ``` -Tips: +### Remote Instance -- For tests/ephemeral runs use `url: ":memory:"` (or `"file::memory:"`). -- Do not pass `mode=memory` in the URL; LibSQL client doesn’t support it; use `:memory:`. -- For production persistence use a file path (e.g., `file:./.voltagent/memory.db`) or a remote Turso URL. +```ts +import { Agent, Memory } from "@voltagent/core"; +import { LibSQLMemoryAdapter } from "@voltagent/libsql"; +import { openai } from "@ai-sdk/openai"; -Use with agent calls by passing `semanticMemory` options. See: [Semantic Search](./semantic-search.md). +const agent = new Agent({ + name: "Assistant", + model: openai("gpt-4o-mini"), + memory: new Memory({ + storage: new LibSQLMemoryAdapter({ + url: process.env.LIBSQL_DATABASE_URL!, + authToken: process.env.LIBSQL_AUTH_TOKEN!, + }), + }), +}); +``` -## Notes +### Testing with In-Memory Database -- LibSQL is ideal for local development (SQLite file) and Turso deployments. -- Default memory is in-memory; configure LibSQL explicitly for persistence. +```ts +const testMemory = new Memory({ + storage: new LibSQLMemoryAdapter({ + url: ":memory:", // ephemeral + }), +}); +``` -## Use Cases +## Learn More -- **Local Development & Testing:** Quickly set up persistent memory using a local SQLite file without needing external database services. -- **Serverless & Edge Functions:** SQLite databases (via LibSQL) can often be used effectively in serverless environments. -- **Turso Integration:** Leverage Turso's distributed edge database for low-latency memory access for globally distributed applications. -- **Simple Deployments:** Suitable for applications where managing a separate database server is overkill. +- **[Managed Memory](./managed-memory.md)** - Production-ready hosted memory with zero setup +- **[Working Memory](./working-memory.md)** - Maintain compact context +- **[Semantic Search](./semantic-search.md)** - Vector search with LibSQLVectorAdapter diff --git a/website/docs/agents/memory/managed-memory.md b/website/docs/agents/memory/managed-memory.md new file mode 100644 index 000000000..b2bd8c9cb --- /dev/null +++ b/website/docs/agents/memory/managed-memory.md @@ -0,0 +1,274 @@ +--- +title: Managed Memory +slug: /agents/memory/managed-memory +--- + +# Managed Memory + +VoltOps Managed Memory is a production-ready hosted memory service for VoltAgent. Create a database through the VoltOps Console and connect using API credentials - no infrastructure provisioning or schema management required. + +## Availability + +- **US Region**: Virginia (us-east-1) +- **EU Region**: Germany (eu-central-1) + +## Installation + +```bash +npm install @voltagent/voltagent-memory +``` + +## Configuration + +### Automatic Setup (Recommended) + +Get your credentials from [console.voltagent.dev/memory/managed-memory](https://console.voltagent.dev/memory/managed-memory) and set environment variables: + +```bash +# .env +VOLTAGENT_PUBLIC_KEY=pk_... +VOLTAGENT_SECRET_KEY=sk_... +``` + +```ts +import { Agent, Memory } from "@voltagent/core"; +import { ManagedMemoryAdapter } from "@voltagent/voltagent-memory"; +import { openai } from "@ai-sdk/openai"; + +// Adapter automatically uses VoltOps credentials from environment +const memory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "production-memory", + }), +}); + +const agent = new Agent({ + name: "Assistant", + model: openai("gpt-4o-mini"), + memory, +}); +``` + +The adapter checks `AgentRegistry` for a global `VoltOpsClient` instance configured from environment variables. This is the simplest setup - no need to instantiate `VoltOpsClient` manually. + +### Manual Setup + +Pass a `VoltOpsClient` instance explicitly: + +```ts +import { Agent, Memory, VoltOpsClient } from "@voltagent/core"; +import { ManagedMemoryAdapter } from "@voltagent/voltagent-memory"; +import { openai } from "@ai-sdk/openai"; + +const voltOpsClient = new VoltOpsClient({ + publicKey: process.env.VOLTAGENT_PUBLIC_KEY!, + secretKey: process.env.VOLTAGENT_SECRET_KEY!, + // baseUrl: "https://api.voltagent.dev", // optional +}); + +const memory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "production-memory", + voltOpsClient, // explicit client + }), +}); + +const agent = new Agent({ + name: "Assistant", + model: openai("gpt-4o-mini"), + memory, +}); +``` + +Use manual setup when: + +- Running multiple adapters with different credentials +- Dynamically changing credentials at runtime +- Testing with mock clients + +### Configuration Options + +| Option | Type | Required | Description | +| --------------- | --------------- | -------- | ---------------------------------------------------------------- | +| `databaseName` | `string` | Yes\* | Database name from VoltOps Console | +| `databaseId` | `string` | Yes\* | Database ID from VoltOps API (alternative to `databaseName`) | +| `voltOpsClient` | `VoltOpsClient` | No | Explicit VoltOps client (optional if credentials in environment) | +| `debug` | `boolean` | No | Enable debug logging (default: `false`) | + +\*Either `databaseName` or `databaseId` is required. + +## Creating a Database + +1. Navigate to [console.voltagent.dev/memory/managed-memory](https://console.voltagent.dev/memory/managed-memory) +2. Click **Create Database** +3. Enter a name and select region (US or EU) +4. Copy the database credentials + +To get your VoltOps API keys, go to **Settings** in the console to find your public and secret keys. + +## Features + +### Conversation Storage + +All `StorageAdapter` methods are supported: + +- Message persistence (`addMessage`, `addMessages`, `getMessages`, `clearMessages`) +- Conversation management (`createConversation`, `getConversation`, `updateConversation`, `deleteConversation`) +- Working memory (`getWorkingMemory`, `setWorkingMemory`, `deleteWorkingMemory`) +- Workflow state (`getWorkflowState`, `setWorkflowState`, `updateWorkflowState`) + +### Vector Storage (Optional) + +Enable semantic search with `ManagedMemoryVectorAdapter`: + +```ts +import { ManagedMemoryAdapter, ManagedMemoryVectorAdapter } from "@voltagent/voltagent-memory"; +import { AiSdkEmbeddingAdapter, Memory } from "@voltagent/core"; +import { openai } from "@ai-sdk/openai"; + +const memory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "production-memory", + }), + embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), + vector: new ManagedMemoryVectorAdapter({ + databaseName: "production-memory", + }), +}); +``` + +Both adapters resolve credentials the same way (environment or explicit client). See [Semantic Search](./semantic-search.md) for usage. + +## Migration from Self-Hosted + +### Export from LibSQL + +```ts +import { LibSQLMemoryAdapter } from "@voltagent/libsql"; + +const localAdapter = new LibSQLMemoryAdapter({ + url: "file:./.voltagent/memory.db", +}); + +// Export conversations for a user +const conversations = await localAdapter.getConversationsByUserId("user-123"); + +for (const conv of conversations) { + const messages = await localAdapter.getMessages("user-123", conv.id); + console.log(`Conversation ${conv.id}: ${messages.length} messages`); +} +``` + +### Import to Managed Memory + +```ts +import { ManagedMemoryAdapter } from "@voltagent/voltagent-memory"; +import { Memory } from "@voltagent/core"; + +const managedMemory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "production-memory", + }), +}); + +// Import conversation +await managedMemory.createConversation({ + id: conv.id, + userId: conv.userId, + resourceId: conv.resourceId, + title: conv.title, + metadata: conv.metadata, +}); + +// Import messages +await managedMemory.addMessages(messages, conv.userId, conv.id); +``` + +Bulk import/export APIs are planned for future releases. + +## Use Cases + +### Development & Prototyping + +```ts +// No database setup required for pilots +const memory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "dev-memory", + }), +}); +``` + +### Multi-Region Deployment + +```ts +// US database for North American users +const usMemory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "memory-us", + }), +}); + +// EU database for European users +const euMemory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "memory-eu", + }), +}); +``` + +### Team Collaboration + +Multiple developers connect to the same managed database using shared VoltOps credentials. Create separate databases for staging and production. + +## Limitations + +- **Regional latency**: Choose the region closest to your application servers +- **Storage quotas**: Check VoltOps Console for plan-specific limits +- **Credential rotation**: Update environment variables when rotating credentials + +## Comparison with Self-Hosted + +| Feature | Managed Memory | Self-Hosted | +| ------------------- | ----------------- | ------------------------- | +| Setup time | < 3 minutes | Hours | +| Schema management | Automatic | Manual migrations | +| Regional hosting | US & EU | DIY | +| Credential rotation | API-based | Manual | +| Monitoring | Console dashboard | DIY | +| Cost | Usage-based | Infrastructure + ops time | + +Production-ready for teams without database infrastructure or during rapid deployment. + +## Troubleshooting + +### Adapter initialization failed + +``` +Error: Unable to locate managed memory database +``` + +**Solution**: Verify `databaseName` matches the name in VoltOps Console, or use `databaseId` instead. + +### VoltOps client not available + +``` +Error: VoltOps client is not available for managed memory initialization +``` + +**Solution**: Ensure `VOLTAGENT_PUBLIC_KEY` and `VOLTAGENT_SECRET_KEY` environment variables are set, or pass `voltOpsClient` explicitly. + +### Enable debug logging + +```ts +const adapter = new ManagedMemoryAdapter({ + databaseName: "production-memory", + debug: true, // Logs all API calls and metadata +}); +``` + +## Learn More + +- **[Semantic Search](./semantic-search.md)** - Enable vector search with managed memory +- **[Working Memory](./working-memory.md)** - Configure working memory with managed storage +- **[PostgreSQL](./postgres.md)** - Self-hosted Postgres alternative diff --git a/website/docs/agents/memory/overview.md b/website/docs/agents/memory/overview.md index 4eea3819e..93f7e0201 100644 --- a/website/docs/agents/memory/overview.md +++ b/website/docs/agents/memory/overview.md @@ -5,288 +5,122 @@ slug: /agents/memory/overview # Memory Overview -Conversational AI agents often need to remember past interactions to maintain context, understand user preferences, and provide more coherent and personalized responses. Without memory, each interaction would be treated in isolation, leading to repetitive questions and unnatural conversations. +VoltAgent's `Memory` class stores conversation history and optional semantic search vectors. Agents retrieve past messages before generating responses and persist new interactions after completion. -VoltAgent provides a unified `Memory` class with pluggable storage adapters. It stores and retrieves conversation history, and optionally supports embedding-powered semantic search and structured working memory. +## Storage Providers -## Why Use Memory? +| Provider | Package | Persistence | Use Case | +| ------------------ | ----------------------------- | ---------------------- | -------------------------------- | +| **InMemory** | `@voltagent/core` | None (RAM only) | Development, testing | +| **Managed Memory** | `@voltagent/voltagent-memory` | VoltOps-hosted | Production-ready, zero-setup | +| **LibSQL** | `@voltagent/libsql` | Local SQLite or remote | Self-hosted, edge deployments | +| **Postgres** | `@voltagent/postgres` | Self-hosted Postgres | Existing Postgres infrastructure | +| **Supabase** | `@voltagent/supabase` | Supabase | Supabase-based applications | -- **Context Preservation:** Enables agents to recall previous messages in a conversation, understanding follow-up questions and references. -- **Personalization:** Allows agents to remember user-specific details (like name, preferences, past requests) for a tailored experience. -- **Coherence:** Ensures conversations flow naturally without the agent constantly losing track of the topic. -- **Long-Term State:** Can be used to store summaries or key information extracted from conversations over extended periods. +## Core Features -## Default Memory Behavior +### Conversation Storage -By default, agents use in-memory storage (no persistence) with zero configuration. If you don't provide a `memory` option, VoltAgent falls back to an in-memory adapter that: +- Messages stored per `userId` and `conversationId` +- Auto-creates conversations on first message +- Configurable message limits (oldest pruned first) -1. Stores conversation history in application memory. -2. Maintains context during the application runtime. -3. Loses data when the application restarts (suitable for development and stateless deployments). +### Semantic Search (Optional) -For persistent storage across restarts, configure `Memory` with a storage adapter such as `LibSQLMemoryAdapter`, `PostgreSQLMemoryAdapter`, or `SupabaseMemoryAdapter`. See the specific adapter docs for details. +- Requires `embedding` + `vector` adapters +- Auto-embeds messages on save +- Retrieves similar past messages by content, not recency +- Merges semantic results with recent messages -## Disabling Memory +### Working Memory (Optional) -You can completely disable memory persistence and retrieval by setting the `memory` property to `false` in the `Agent` constructor: +- Stores compact context across conversation turns +- Three formats: Markdown template, JSON schema (Zod), or free-form +- Two scopes: `conversation` (default) or `user` +- Agent exposes tools: `get_working_memory`, `update_working_memory`, `clear_working_memory` -```ts -const agent = new Agent({ - name: "Stateless Assistant", - instructions: "This agent has no memory.", - model: openai("gpt-4o"), - memory: false, // disable memory entirely -}); -``` - -When memory is disabled, the agent won't store or retrieve any conversation history, making it stateless for each interaction. - -## Separate Conversation and History Memory +### Workflow State -VoltAgent manages conversation memory via the `memory` option. Observability (execution logs) is handled via OpenTelemetry and VoltOps integrations, and is not tied to conversation storage. +- Suspendable workflow checkpoint storage +- Tracks execution state, context, suspension metadata -## Working Memory +## Agent Configuration -Working memory lets the agent persist concise, important context across turns (conversation-scoped by default, optionally user-scoped). Configuration is part of the `Memory` constructor via `workingMemory`. - -Supported modes: - -- Template (Markdown): `workingMemory: { enabled: true, template: string }` -- JSON schema (Zod): `workingMemory: { enabled: true, schema: z.object({...}) }` -- Free-form: `workingMemory: { enabled: true }` - -Scope: - -- `scope?: 'conversation' | 'user'` (defaults to `conversation`) - -Example (template-based, conversation-scoped): +Agents accept a `memory` option: ```ts import { Agent, Memory } from "@voltagent/core"; -import { LibSQLMemoryAdapter } from "@voltagent/libsql"; import { openai } from "@ai-sdk/openai"; -const memory = new Memory({ - storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), - workingMemory: { - enabled: true, - template: ` -# Profile -- Name: -- Role: - -# Goals -- - -# Preferences -- -`, - // scope: 'conversation' // default - }, -}); - -const agent = new Agent({ +// Default: in-memory storage (no persistence) +const agent1 = new Agent({ name: "Assistant", - instructions: "Use working memory to maintain key facts.", model: openai("gpt-4o-mini"), - memory, + // memory: undefined // implicit default }); -// When the agent runs with user/conversation IDs, it appends -// working-memory instructions to the system prompt before the LLM call -const res = await agent.generateText("Let's plan this week", { - userId: "u1", - conversationId: "c1", +// Disable memory entirely +const agent2 = new Agent({ + name: "Stateless", + model: openai("gpt-4o-mini"), + memory: false, }); -``` -Example (JSON schema, user-scoped): - -```ts -import { z } from "zod"; -import { Agent, Memory } from "@voltagent/core"; +// Persistent storage import { LibSQLMemoryAdapter } from "@voltagent/libsql"; -import { openai } from "@ai-sdk/openai"; -const workingSchema = z.object({ - userProfile: z - .object({ - name: z.string().optional(), - timezone: z.string().optional(), - }) - .optional(), - tasks: z.array(z.string()).optional(), -}); - -const memory = new Memory({ - storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), - workingMemory: { - enabled: true, - scope: "user", - schema: workingSchema, - }, +const agent3 = new Agent({ + name: "Persistent", + model: openai("gpt-4o-mini"), + memory: new Memory({ + storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), + }), }); - -const agent = new Agent({ name: "Planner", model: openai("gpt-4o-mini"), memory }); ``` -Programmatic API: - -- `memory.getWorkingMemory({ conversationId?, userId? }) → Promise` -- `memory.updateWorkingMemory({ conversationId?, userId?, content })` where `content` is a string or an object matching the schema when configured (validated internally). Stores as string (Markdown or JSON) under the hood. -- `memory.clearWorkingMemory({ conversationId?, userId? })` -- `memory.getWorkingMemoryFormat() → 'markdown' | 'json' | null` -- `memory.getWorkingMemoryTemplate() → string | null` -- `memory.getWorkingMemorySchema() → z.ZodObject | null` - -Tools registered when working memory is configured: - -- `get_working_memory()` → returns the current content string -- `update_working_memory(content)` → updates content (typed to schema if configured) -- `clear_working_memory()` → clears content - -Agent prompt integration: - -- On each call with `userId` and `conversationId`, the agent appends a working-memory instruction block to the system prompt (including template/schema and current content if present). +## Usage with User and Conversation IDs -## Semantic Search (Embeddings + Vectors) - -To enable semantic retrieval of past messages, configure both an embedding adapter and a vector adapter. Memory embeds text parts of messages and stores vectors with metadata. - -## Message Persistence Pipeline - -VoltAgent batches every step into a single assistant response (tool call, tool result, follow-up text) before writing to memory. Saves are debounced for performance, and the agent flushes the queue when a request finishes—even on errors. The most recent step stays recorded if the loop stops midway, so conversation history remains consistent across restarts. - -Adapters: - -- `AiSdkEmbeddingAdapter` (wraps ai-sdk embedding models) -- `InMemoryVectorAdapter` (lightweight dev vector store) -- `LibSQLVectorAdapter` from `@voltagent/libsql` (persistent vectors via LibSQL/Turso/SQLite) - -Example (dev vector store): +Provide `userId` and `conversationId` in generation calls to scope memory: ```ts -import { Agent, Memory, AiSdkEmbeddingAdapter, InMemoryVectorAdapter } from "@voltagent/core"; -import { LibSQLMemoryAdapter } from "@voltagent/libsql"; -import { openai } from "@ai-sdk/openai"; - -const memory = new Memory({ - storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), - embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), - vector: new InMemoryVectorAdapter(), - enableCache: true, // optional embedding cache -}); - -const agent = new Agent({ name: "Helper", model: openai("gpt-4o-mini"), memory }); - -// Enable semantic search per-call (defaults shown; enabled auto when vectors present) -const out = await agent.generateText("What did I say about pricing last week?", { - userId: "u1", - conversationId: "c1", - semanticMemory: { - enabled: true, - semanticLimit: 5, - semanticThreshold: 0.7, - mergeStrategy: "append", // default ('prepend' | 'append' | 'interleave') - }, -}); -``` - -Example (persistent vectors with LibSQL): - -```ts -import { Agent, Memory, AiSdkEmbeddingAdapter } from "@voltagent/core"; -import { LibSQLMemoryAdapter, LibSQLVectorAdapter } from "@voltagent/libsql"; -import { openai } from "@ai-sdk/openai"; - -const memory = new Memory({ - storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), - embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), - vector: new LibSQLVectorAdapter({ url: "file:./.voltagent/memory.db" }), +const response = await agent.generateText("What did we discuss yesterday?", { + userId: "user-123", + conversationId: "thread-abc", }); - -// For ephemeral tests, use in‑memory DB: -// new LibSQLVectorAdapter({ url: ":memory:" }) // or "file::memory:" ``` -How it works: - -- On save, Memory embeds text parts of messages and stores vectors with metadata `{ messageId, conversationId, userId, role, createdAt }` and ID pattern `msg_${conversationId}_${message.id}`. -- On read with semantic search enabled, Memory searches similar messages and merges them with recent messages using the configured strategy. - -Programmatic search: - -- `memory.hasVectorSupport()` → boolean -- `memory.searchSimilar(query, { limit?, threshold?, filter? }) → Promise` - -## Memory Providers - -VoltAgent achieves persistence via swappable storage adapters you pass to `new Memory({ storage: ... })`: - -- **[`LibSQLMemoryAdapter`](./libsql.md):** From `@voltagent/libsql` (LibSQL/Turso/SQLite) -- **[`PostgreSQLMemoryAdapter`](./postgres.md):** From `@voltagent/postgres` -- **[`SupabaseMemoryAdapter`](./supabase.md):** From `@voltagent/supabase` -- **[`InMemoryStorageAdapter`](./in-memory.md):** Default in-memory adapter (no persistence) - -Optional components: - -- Embeddings via `AiSdkEmbeddingAdapter` (choose any ai-sdk embedding model) -- Vector store via `InMemoryVectorAdapter` (or custom) - -## How Memory Works with Agents - -When you configure an `Agent` with a memory provider instance (or use the default), VoltAgent's internal `MemoryManager` performs the following steps: - -1. **Retrieval:** Before generating a response (e.g., during `agent.generateText()`), the manager fetches relevant conversation history or state from the memory provider based on the provided `userId` and `conversationId`. -2. **Injection:** This retrieved context is typically formatted and added to the prompt sent to the LLM, giving it the necessary background information. -3. **Saving:** After an interaction completes, the new messages (user input and agent response) are saved back to the memory provider, associated with the same `userId` and `conversationId`. +**Behavior:** -These steps run whenever you call the agent's core interaction methods (`generateText`, `streamText`, `generateObject`, `streamObject`). - -## User and Conversation Identification - -To separate conversations for different users or different chat sessions within the same application, you **must** provide `userId` and `conversationId` in the options when calling agent methods directly in your code. If you are interacting with the agent via the [Core API](../../api/overview.md), you can pass these same identifiers within the `options` object in your request body. See the [API examples](../../api/endpoints/agents.md#generate-text) for details on the API usage. - -When calling agent methods directly: - -```ts -const response = await agent.generateText("Hello, how can you help me?", { - userId: "user-123", // Identifies the specific user - conversationId: "chat-session-xyz", // Identifies this specific conversation thread -}); -``` - -These identifiers work consistently across all agent generation methods (`generateText`, `streamText`, `generateObject`, `streamObject`). +- **Both provided**: Retrieves history for that specific thread +- **Only userId**: New `conversationId` generated per call (fresh context each time) +- **Neither provided**: Uses default user ID with new conversation ID ## Examples -### Default (in-memory) +### Managed Memory (Zero Setup) ```ts -import { Agent } from "@voltagent/core"; +import { Agent, Memory } from "@voltagent/core"; +import { ManagedMemoryAdapter } from "@voltagent/voltagent-memory"; +import { VoltOpsClient } from "@voltagent/core"; import { openai } from "@ai-sdk/openai"; -const agent = new Agent({ - name: "My Assistant", - instructions: "Uses default in-memory storage.", - model: openai("gpt-4o-mini"), +const voltOpsClient = new VoltOpsClient({ + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, }); -``` -### Persistent (LibSQL) - -```ts -import { Agent, Memory } from "@voltagent/core"; -import { LibSQLMemoryAdapter } from "@voltagent/libsql"; -import { openai } from "@ai-sdk/openai"; +const memory = new Memory({ + storage: new ManagedMemoryAdapter({ + databaseName: "production-memory", + voltOpsClient, + }), +}); const agent = new Agent({ - name: "Persistent Assistant", - instructions: "Uses LibSQL for memory.", + name: "Assistant", model: openai("gpt-4o-mini"), - memory: new Memory({ - storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), - }), + memory, }); ``` @@ -296,203 +130,82 @@ const agent = new Agent({ import { Agent, Memory, AiSdkEmbeddingAdapter, InMemoryVectorAdapter } from "@voltagent/core"; import { LibSQLMemoryAdapter } from "@voltagent/libsql"; import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; const memory = new Memory({ storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), vector: new InMemoryVectorAdapter(), - workingMemory: { enabled: true }, + workingMemory: { + enabled: true, + scope: "conversation", + schema: z.object({ + preferences: z.array(z.string()).optional(), + goals: z.array(z.string()).optional(), + }), + }, }); const agent = new Agent({ - name: "Smart Memory Assistant", - instructions: "Retrieves with semantic search and tracks working memory.", + name: "Smart Assistant", model: openai("gpt-4o-mini"), memory, }); -``` - -### How User and Conversation IDs Work - -- **`userId`**: A unique string identifying the end-user. Memory entries are segregated per user. If omitted, it defaults to the string `"default"`. -- **`conversationId`**: A unique string identifying a specific conversation thread for a user. This allows a single user to have multiple parallel conversations. - - **If provided:** The agent retrieves and saves messages associated with this specific thread. - - **If omitted:** A **new random UUID is generated for each request**, effectively starting a new, separate conversation every time. This is useful for one-off tasks or ensuring a clean slate for each interaction when context isn't needed. - -**Key Behaviors:** - -1. **Context Retrieval**: Before calling the LLM, the `MemoryManager` retrieves previous messages associated with the given `userId` and `conversationId` from the memory provider. -2. **Message Storage**: After the interaction, new user input and agent responses are stored using the same `userId` and `conversationId`. -3. **Continuity**: Providing the same `userId` and `conversationId` across multiple requests keeps the context of that specific thread. -4. **New Conversations**: Omitting `conversationId` guarantees a fresh conversation context for each request. - -```ts -// To start a NEW conversation each time (or for single-turn interactions): -// Omit conversationId; VoltAgent generates a new one for each call. -const response1 = await agent.generateText("Help with account setup", { userId: "user-123" }); -const response2 = await agent.generateText("Question about billing issue", { userId: "user-123" }); // Starts another new conversation - -// To MAINTAIN a continuous conversation across requests: -// Always provide the SAME conversationId. -const SUPPORT_THREAD_ID = "case-987-abc"; -const responseA = await agent.generateText("My router is not working.", { - userId: "user-456", - conversationId: SUPPORT_THREAD_ID, -}); -// Agent remembers the router issue for the next call with the same ID -const responseB = await agent.generateText("I tried restarting it, still no luck.", { - userId: "user-456", - conversationId: SUPPORT_THREAD_ID, -}); -``` - -## Context Management -When interacting with an agent that has memory enabled, the `MemoryManager` retrieves recent messages for the given `userId` and `conversationId` and includes them as context in the prompt sent to the LLM. - -```ts -// The agent retrieves history for user-123/chat-session-xyz -// and includes up to N recent messages (determined by the provider/manager) in the LLM prompt. -const response = await agent.generateText("What was the first thing I asked you?", { +// Enable semantic search per call +const result = await agent.generateText("What preferences did I mention?", { userId: "user-123", - conversationId: "chat-session-xyz", - // contextLimit: 10, // Note: contextLimit is typically managed by MemoryOptions now + conversationId: "thread-abc", + semanticMemory: { + enabled: true, + semanticLimit: 5, + semanticThreshold: 0.7, + }, }); ``` -How many messages are retrieved is often determined by the `storageLimit` configured on the Memory Provider or internal logic within the `MemoryManager`. This is crucial for: - -1. **Coherence**: Providing the LLM with enough history to understand the ongoing conversation. -2. **Cost/Performance**: Limiting the context size to manage LLM token usage (cost) and potentially reduce latency. -3. **Relevance**: Ensuring the context is relevant without overwhelming the LLM with excessive or old information. - -## Implementing Custom Memory Providers - -To use a custom database or storage system, implement the `StorageAdapter` interface (`@voltagent/core` → `memory/types`). Observability is separate. The adapter only persists conversation messages, working memory, and workflow state for suspension/resume. Embedding and vector search are separate adapters. - -Required methods (summary): - -- Messages: `addMessage`, `addMessages`, `getMessages`, `clearMessages` -- Conversations: `createConversation`, `getConversation`, `getConversations`, `getConversationsByUserId`, `queryConversations`, `updateConversation`, `deleteConversation` -- Working memory: `getWorkingMemory`, `setWorkingMemory`, `deleteWorkingMemory` -- Workflow state: `getWorkflowState`, `setWorkflowState`, `updateWorkflowState`, `getSuspendedWorkflowStates` - -Implementation notes: - -- Store `UIMessage` values as data. Return messages in chronological order (oldest first). -- Support `storageLimit`. When the limit is exceeded, prune the oldest messages. -- Working memory content is a string. When a schema is configured, `Memory` converts the provided object to a JSON string before calling the adapter. +## Custom Adapters -Skeleton: +Implement the `StorageAdapter` interface to use custom databases: ```ts -import type { - StorageAdapter, - UIMessage, - Conversation, - CreateConversationInput, - ConversationQueryOptions, - WorkflowStateEntry, - WorkingMemoryScope, -} from "@voltagent/core"; +import type { StorageAdapter, UIMessage, Conversation } from "@voltagent/core"; export class MyStorageAdapter implements StorageAdapter { - // Messages - async addMessage(msg: UIMessage, userId: string, conversationId: string): Promise {} - async addMessages(msgs: UIMessage[], userId: string, conversationId: string): Promise {} + async addMessage(msg: UIMessage, userId: string, conversationId: string): Promise { + // Store message in your database + } + async getMessages( userId: string, conversationId: string, - options?: { limit?: number; before?: Date; after?: Date; roles?: string[] } + options?: { limit?: number } ): Promise { + // Retrieve messages in chronological order (oldest first) return []; } - async clearMessages(userId: string, conversationId?: string): Promise {} - // Conversations async createConversation(input: CreateConversationInput): Promise { - throw new Error("Not implemented"); - } - async getConversation(id: string): Promise { - return null; - } - async getConversations(resourceId: string): Promise { - return []; - } - async getConversationsByUserId( - userId: string, - options?: Omit - ): Promise { - return []; - } - async queryConversations(options: ConversationQueryOptions): Promise { - return []; - } - async updateConversation( - id: string, - updates: Partial> - ): Promise { - throw new Error("Not implemented"); - } - async deleteConversation(id: string): Promise {} - - // Working memory - async getWorkingMemory(params: { - conversationId?: string; - userId?: string; - scope: WorkingMemoryScope; - }): Promise { - return null; - } - async setWorkingMemory(params: { - conversationId?: string; - userId?: string; - content: string; - scope: WorkingMemoryScope; - }): Promise {} - async deleteWorkingMemory(params: { - conversationId?: string; - userId?: string; - scope: WorkingMemoryScope; - }): Promise {} - - // Workflow state - async getWorkflowState(id: string): Promise { - return null; - } - async setWorkflowState(id: string, state: WorkflowStateEntry): Promise {} - async updateWorkflowState(id: string, updates: Partial): Promise {} - async getSuspendedWorkflowStates(workflowId: string): Promise { - return []; + // Create conversation record } -} - -// Usage example -import { Agent, Memory } from "@voltagent/core"; -import { openai } from "@ai-sdk/openai"; - -const memory = new Memory({ - storage: new MyStorageAdapter(), -}); -const agent = new Agent({ - name: "Helper", - model: openai("gpt-4o-mini"), - memory, -}); + // ... implement remaining StorageAdapter methods +} ``` -## Best Practices +Required methods: -1. **Choose the Right Adapter**: Use `InMemoryStorageAdapter` for development/testing or stateless deployments. Use `LibSQLMemoryAdapter` from `@voltagent/libsql` (local/Turso) or a database-backed adapter (like `PostgreSQLMemoryAdapter` in `@voltagent/postgres` or `SupabaseMemoryAdapter` in `@voltagent/supabase`) for production persistence. -2. **User Privacy**: Be mindful of storing conversation data. Implement clear data retention policies and provide mechanisms for users to manage or delete their history (e.g., using `deleteConversation` or custom logic) if required by privacy regulations. -3. **Context Management**: While `contextLimit` is less directly used now, be aware of the `storageLimit` on your memory provider, as this often dictates the maximum history retrieved. -4. **Memory Efficiency**: For high-volume applications using persistent storage, monitor database size and performance. Consider setting appropriate `storageLimit` values on your memory provider to prevent unbounded growth and ensure efficient retrieval. -5. **Error Handling**: Wrap agent interactions in `try...catch` blocks, as memory operations (especially with external databases) can potentially fail. -6. **Use `userId` and `conversationId`**: Always provide these identifiers in production applications to correctly scope memory and maintain context for individual users and conversation threads. +- Messages: `addMessage`, `addMessages`, `getMessages`, `clearMessages` +- Conversations: `createConversation`, `getConversation`, `getConversations`, `getConversationsByUserId`, `queryConversations`, `updateConversation`, `deleteConversation` +- Working memory: `getWorkingMemory`, `setWorkingMemory`, `deleteWorkingMemory` +- Workflow state: `getWorkflowState`, `setWorkflowState`, `updateWorkflowState`, `getSuspendedWorkflowStates` -Explore the specific documentation for each provider to learn more: +## Learn More -- **[LibSQL / Turso / SQLite](./libsql.md)** -- **[In-Memory Storage](./in-memory.md)** -- **[Supabase](./supabase.md)** +- **[Managed Memory](./managed-memory.md)** - Production-ready hosted memory with zero setup +- **[Semantic Search](./semantic-search.md)** - Retrieve messages by similarity +- **[Working Memory](./working-memory.md)** - Maintain compact context across turns +- **[LibSQL / SQLite](./libsql.md)** - Self-hosted SQLite or edge deployments +- **[PostgreSQL](./postgres.md)** - Self-hosted Postgres adapter +- **[Supabase](./supabase.md)** - Supabase integration +- **[In-Memory Storage](./in-memory.md)** - Default ephemeral storage diff --git a/website/docs/agents/memory/postgres.md b/website/docs/agents/memory/postgres.md index 9b3d13001..17d0b58b7 100644 --- a/website/docs/agents/memory/postgres.md +++ b/website/docs/agents/memory/postgres.md @@ -8,15 +8,9 @@ import TabItem from '@theme/TabItem'; # PostgreSQL Memory -The `@voltagent/postgres` package provides a `PostgreSQLMemoryAdapter` storage adapter for the `Memory` class, using PostgreSQL for persistent conversation storage. +`PostgreSQLMemoryAdapter` stores conversations in PostgreSQL for production applications with existing Postgres infrastructure. -This is ideal for production applications requiring enterprise-grade database storage, complex queries, or integration with existing PostgreSQL infrastructure. - -## Setup - -### Install Package - -First, install the necessary packages: +## Installation @@ -42,249 +36,207 @@ pnpm add @voltagent/postgres -### Database Requirements - -You need a PostgreSQL server (version 12 or higher recommended). The provider **automatically creates** all necessary tables and indexes when initialized, so no manual SQL setup is required. - -### Credentials - -You'll need your PostgreSQL connection details: - -- **Host:** Your PostgreSQL server hostname -- **Port:** Usually 5432 -- **Database:** Database name -- **User:** Database username -- **Password:** Database password - -Store these credentials securely, typically as environment variables or use a connection string format. - ## Configuration -Import `PostgreSQLMemoryAdapter` and initialize it with your credentials, then pass it to `new Memory({ storage: ... })`: - -```typescript +```ts import { Agent, Memory } from "@voltagent/core"; import { PostgreSQLMemoryAdapter } from "@voltagent/postgres"; import { openai } from "@ai-sdk/openai"; -// Using connection string (recommended) -const storage = new PostgreSQLMemoryAdapter({ - connection: process.env.DATABASE_URL || "postgresql://postgres:password@localhost:5432/mydb", - // Optional: Adjust connection pool size - maxConnections: 10, - // Optional: Specify a custom base table name prefix - tablePrefix: "voltagent_memory", // Defaults to 'voltagent_memory' - // Optional: Storage limit (max number of messages per user/conversation) - storageLimit: 100, // Defaults to 100 +// Using connection string +const memory = new Memory({ + storage: new PostgreSQLMemoryAdapter({ + connection: process.env.DATABASE_URL!, + // or: "postgresql://user:password@localhost:5432/mydb" + }), }); -// Alternative: Using connection object -const storage = new PostgreSQLMemoryAdapter({ - connection: { - host: process.env.DB_HOST || "localhost", - port: parseInt(process.env.DB_PORT || "5432"), - database: process.env.DB_NAME || "mydb", - user: process.env.DB_USER || "postgres", - password: process.env.DB_PASSWORD, - ssl: process.env.NODE_ENV === "production", // Enable SSL for production - }, - maxConnections: 10, - tablePrefix: "voltagent_memory", - storageLimit: 100, +// Using connection object +const memory = new Memory({ + storage: new PostgreSQLMemoryAdapter({ + connection: { + host: process.env.DB_HOST || "localhost", + port: parseInt(process.env.DB_PORT || "5432"), + database: process.env.DB_NAME!, + user: process.env.DB_USER!, + password: process.env.DB_PASSWORD!, + ssl: process.env.NODE_ENV === "production", + }, + }), }); const agent = new Agent({ - name: "PostgreSQL Memory Agent", - instructions: "An agent using PostgreSQL for memory.", - model: openai("gpt-4o"), - memory: new Memory({ storage }), + name: "Assistant", + model: openai("gpt-4o-mini"), + memory, }); ``` -**Configuration Options:** - -- `connection` (string or object, required): Database connection details. - - **Connection string:** `"postgresql://user:password@host:port/database"` - - **Connection object:** `{ host, port, database, user, password, ssl }` -- `maxConnections` (number, optional): Maximum connections in the pool. Defaults to `10`. -- `tablePrefix` (string, optional): Prefix for database table names. Defaults to `voltagent_memory`. -- `storageLimit` (number, optional): Maximum messages to retain per conversation. Defaults to `100`. -- `debug` (boolean, optional): Enable debug logging. Defaults to `false`. +### Configuration Options -## Conversation Management +| Option | Type | Description | +| ---------------- | ------------------ | -------------------------------------------------------------------------------------- | +| `connection` | `string \| object` | Connection string or object with `host`, `port`, `database`, `user`, `password`, `ssl` | +| `maxConnections` | `number` | Connection pool size (default: `10`) | +| `tablePrefix` | `string` | Table name prefix (default: `voltagent_memory`) | +| `storageLimit` | `number` | Max messages per conversation (default: `100`) | +| `debug` | `boolean` | Enable debug logging (default: `false`) | -### Get User's Conversations +## Features -```typescript -// Get recent conversations for a user -const conversations = await storage.getConversationsByUserId("user-123", { - limit: 50, - orderBy: "updated_at", - orderDirection: "DESC", -}); - -// Display in sidebar like ChatGPT -conversations.forEach((conv) => { - console.log(`${conv.title} - ${conv.updatedAt}`); -}); -``` +### Automatic Schema Creation -### Pagination and Sorting +Tables are created automatically on first use: -```typescript -// Recent chats with sorting -const recentChats = await storage.queryConversations({ - userId: "user-123", - limit: 20, - orderBy: "updated_at", - orderDirection: "DESC", -}); - -// Offset-based pagination -const page1 = await storage.queryConversations({ userId: "user-123", limit: 10, offset: 0 }); -const page2 = await storage.queryConversations({ userId: "user-123", limit: 10, offset: 10 }); -``` +- `${tablePrefix}_users` +- `${tablePrefix}_conversations` +- `${tablePrefix}_messages` +- `${tablePrefix}_workflow_states` -## Querying Conversations +PostgreSQL version 12 or higher recommended. -The PostgreSQL storage provides powerful conversation querying capabilities with filtering, pagination, and sorting options: +### Conversation Storage -```typescript -// Query with multiple filters -const workConversations = await storage.queryConversations({ - userId: "user-123", - resourceId: "work-agent", - limit: 25, - offset: 0, - orderBy: "created_at", - orderDirection: "DESC", -}); +- Messages stored per `userId` and `conversationId` +- Oldest messages pruned when `storageLimit` exceeded +- All `StorageAdapter` methods supported +- Supports complex queries with filtering, pagination, and sorting -// Get all conversations for a user -const userConversations = await storage.queryConversations({ - userId: "user-123", - limit: 50, -}); +### Working Memory -// Get conversations for a specific resource -const resourceConversations = await storage.queryConversations({ - resourceId: "chatbot-v1", - limit: 100, - orderBy: "updated_at", -}); +Supports both conversation and user-scoped working memory: -// Admin view - get all conversations -const allConversations = await storage.queryConversations({ - limit: 200, - orderBy: "created_at", - orderDirection: "ASC", +```ts +const memory = new Memory({ + storage: new PostgreSQLMemoryAdapter({ + connection: process.env.DATABASE_URL!, + }), + workingMemory: { + enabled: true, + scope: "user", // or "conversation" + schema: z.object({ + preferences: z.array(z.string()).optional(), + }), + }, }); ``` -**Query Options:** +Storage: + +- Conversation scope: `conversations.metadata.workingMemory` +- User scope: `${tablePrefix}_users.metadata.workingMemory` -- `userId` (optional): Filter conversations by specific user -- `resourceId` (optional): Filter conversations by specific resource -- `limit` (optional): Maximum number of conversations to return (default: 50) -- `offset` (optional): Number of conversations to skip for pagination (default: 0) -- `orderBy` (optional): Field to sort by: 'created_at', 'updated_at', or 'title' (default: 'updated_at') -- `orderDirection` (optional): Sort direction: 'ASC' or 'DESC' (default: 'DESC') +See [Working Memory](./working-memory.md) for configuration details. -## Getting Conversation Messages +### PostgreSQL Vector Storage -Retrieve messages for a specific conversation: +Store vector embeddings directly in PostgreSQL for semantic search (no extensions required): -```typescript -// Get recent messages (chronological order) -const messages = await storage.getMessages("user-123", "conversation-456", { limit: 50 }); +```ts +import { Memory, AiSdkEmbeddingAdapter } from "@voltagent/core"; +import { PostgreSQLMemoryAdapter, PostgresVectorAdapter } from "@voltagent/postgres"; +import { openai } from "@ai-sdk/openai"; -// Time-based pagination -const older = await storage.getMessages("user-123", "conversation-456", { - before: new Date("2024-01-01T00:00:00Z"), - limit: 50, +const memory = new Memory({ + storage: new PostgreSQLMemoryAdapter({ + connection: process.env.DATABASE_URL!, + }), + embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), + vector: new PostgresVectorAdapter({ + connection: process.env.DATABASE_URL!, + }), }); ``` -**Message Query Options:** - -- `limit` (optional): Maximum number of messages to return (default: 100) -- `before` (optional): Only messages created before this date -- `after` (optional): Only messages created after this date -- `roles` (optional): Filter by roles, e.g., `["user", "assistant"]` - -Messages are returned in chronological order (oldest first) for natural conversation flow. - -## Automatic Table Creation - -`PostgreSQLMemoryAdapter` **automatically creates** the necessary tables (with your `tablePrefix`) and indexes if they don't already exist: - -- `${tablePrefix}_users` -- `${tablePrefix}_conversations` -- `${tablePrefix}_messages` -- `${tablePrefix}_workflow_states` - -This simplifies setup for both development and production. +**How It Works:** -## Production Considerations +- **BYTEA Storage**: Vectors are stored as binary data using PostgreSQL's native `BYTEA` type +- **In-Memory Similarity**: Cosine similarity is computed in-memory after loading vectors +- **No Extensions**: Works with vanilla PostgreSQL 12+ without any extensions +- **Automatic Setup**: Creates tables and indexes automatically on first use -For production applications, consider: - -- **SSL Connections:** Enable SSL by setting `ssl: true` in your connection configuration. -- **Connection Pooling:** Adjust `maxConnections` based on your application's concurrent usage. -- **Environment Variables:** Store database credentials securely using environment variables. -- **Database Backups:** Implement regular backup strategies for your PostgreSQL database. +**Configuration Options:** -## Use Cases +```ts +const vectorAdapter = new PostgresVectorAdapter({ + connection: process.env.DATABASE_URL!, -- **Production Applications:** Enterprise-grade applications requiring robust, scalable database storage. -- **Existing PostgreSQL Infrastructure:** Applications already using PostgreSQL for other data. -- **Complex Queries:** Scenarios requiring advanced SQL capabilities or data analytics. -- **High Availability:** Applications requiring database replication and failover capabilities. -- **Team Collaboration:** Multi-user applications where conversation data needs to be shared or analyzed. + // Optional: customize table name (default: "voltagent_vector") + tablePrefix: "custom_vector", -## Error Handling + // Optional: LRU cache size for frequently accessed vectors (default: 100) + cacheSize: 100, -```typescript -try { - await storage.addMessage(message, userId, conversationId); -} catch (error) { - if (error.message.includes("foreign key constraint")) { - console.error("Conversation does not exist"); - } else { - console.error("Database error:", error); - } -} + // Optional: max vector dimensions (default: 1536) + maxVectorDimensions: 1536, +}); ``` -## Working Memory +**Benefits:** -`PostgreSQLMemoryAdapter` implements working memory operations used by `Memory`: +- **Unified Storage**: Keep messages and vectors in the same database +- **No Extensions**: Works with any PostgreSQL instance (managed or self-hosted) +- **Simple Setup**: No extension installation or special configuration needed +- **Standard Tools**: Use familiar PostgreSQL monitoring and backup tools -- Conversation-scoped working memory is stored under `conversations.metadata.workingMemory`. -- User-scoped working memory is stored in the `${tablePrefix}_users` table `metadata.workingMemory` field. +**Performance Considerations:** -Enable via `Memory({ workingMemory: { enabled: true, template | schema, scope } })`. See: [Working Memory](./working-memory.md). +This adapter loads all vectors into memory for similarity computation, which works well for: -Programmatic APIs (via `Memory`): +- Small to medium datasets (< 10,000 vectors) +- Development and prototyping +- Environments where extension installation is restricted -- `getWorkingMemory({ conversationId?, userId? })` -- `updateWorkingMemory({ conversationId?, userId?, content })` -- `clearWorkingMemory({ conversationId?, userId? })` +For large-scale production with millions of vectors, consider specialized vector databases or use `InMemoryVectorAdapter` with periodic persistence. -## Semantic Search (Embeddings + Vectors) +See [Semantic Search](./semantic-search.md) for detailed configuration and usage. -Vector search is configured on `Memory` independently of the storage adapter. To enable semantic retrieval with PostgreSQL storage, attach an embedding adapter and a vector adapter (e.g., in-memory for development): +## Production Setup ```ts -import { Memory, AiSdkEmbeddingAdapter, InMemoryVectorAdapter } from "@voltagent/core"; +import { Agent, Memory } from "@voltagent/core"; import { PostgreSQLMemoryAdapter } from "@voltagent/postgres"; import { openai } from "@ai-sdk/openai"; const memory = new Memory({ - storage: new PostgreSQLMemoryAdapter({ connection: process.env.DATABASE_URL! }), - embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), - vector: new InMemoryVectorAdapter(), + storage: new PostgreSQLMemoryAdapter({ + connection: { + host: process.env.DB_HOST!, + port: parseInt(process.env.DB_PORT || "5432"), + database: process.env.DB_NAME!, + user: process.env.DB_USER!, + password: process.env.DB_PASSWORD!, + ssl: true, // Enable SSL for production + }, + maxConnections: 20, // Adjust based on load + storageLimit: 200, // Retain more history + }), +}); + +const agent = new Agent({ + name: "Production Assistant", + model: openai("gpt-4o-mini"), + memory, }); ``` -Use with agent calls by passing `semanticMemory` options. See: [Semantic Search](./semantic-search.md). +### Security + +- Use SSL connections in production (`ssl: true`) +- Store credentials in environment variables +- Implement regular database backups +- Adjust `maxConnections` based on concurrent usage + +**Use cases:** + +- Applications with existing Postgres infrastructure +- High-availability requirements (replication, failover) +- Complex SQL queries or analytics on conversation data + +For production-ready zero-setup hosting, see [Managed Memory](./managed-memory.md). + +## Learn More + +- **[Managed Memory](./managed-memory.md)** - Production-ready hosted memory with zero setup +- **[Working Memory](./working-memory.md)** - Maintain compact context +- **[Semantic Search](./semantic-search.md)** - Vector search configuration diff --git a/website/docs/agents/memory/semantic-search.md b/website/docs/agents/memory/semantic-search.md index 7f9ab6d97..61486d4eb 100644 --- a/website/docs/agents/memory/semantic-search.md +++ b/website/docs/agents/memory/semantic-search.md @@ -5,12 +5,7 @@ slug: /agents/memory/semantic-search # Semantic Search -Semantic search retrieves past messages by similarity. It requires: - -- An embedding adapter to create vectors from text -- A vector adapter to store and search vectors - -The core provides `AiSdkEmbeddingAdapter` and `InMemoryVectorAdapter`. For persistent vectors, use `LibSQLVectorAdapter` from `@voltagent/libsql`. +Semantic search retrieves past messages by similarity rather than recency. It requires an embedding adapter (text to vectors) and a vector adapter (storage and search). ## Configuration @@ -22,72 +17,208 @@ import { openai } from "@ai-sdk/openai"; const memory = new Memory({ storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), - // Option A (dev): - // vector: new InMemoryVectorAdapter(), - // Option B (persistent vectors): - vector: new LibSQLVectorAdapter({ url: "file:./.voltagent/memory.db" }), - enableCache: true, // optional cache for embeddings + vector: new LibSQLVectorAdapter({ url: "file:./.voltagent/memory.db" }), // or InMemoryVectorAdapter() for dev + enableCache: true, // optional embedding cache }); -const agent = new Agent({ name: "assistant", model: openai("gpt-4o-mini"), memory }); +const agent = new Agent({ + name: "Assistant", + model: openai("gpt-4o-mini"), + memory, +}); ``` -### Call Options +### Available Adapters + +**Embedding:** + +- `AiSdkEmbeddingAdapter` - Wraps any AI SDK embedding model + +**Vector Storage:** + +- `InMemoryVectorAdapter` (`@voltagent/core`) - Development only +- `LibSQLVectorAdapter` (`@voltagent/libsql`) - Persistent vectors in SQLite/LibSQL/Turso +- `PostgresVectorAdapter` (`@voltagent/postgres`) - Persistent vectors in Postgres with pgvector +- `ManagedMemoryVectorAdapter` (`@voltagent/voltagent-memory`) - VoltOps-hosted vectors -Enable semantic search per call using `semanticMemory` (defaults shown below): +## Usage + +Enable semantic search per generation call: ```ts -const out = await agent.generateText("What did we decide about pricing?", { - userId: "u1", - conversationId: "c1", +const result = await agent.generateText("What pricing model did we discuss?", { + userId: "user-123", + conversationId: "thread-abc", semanticMemory: { - enabled: true, // default: true when vector support is present - semanticLimit: 5, // default - semanticThreshold: 0.7, // default - mergeStrategy: "append", // default ('prepend' | 'append' | 'interleave') + enabled: true, // default: auto-enabled when vector support is present + semanticLimit: 5, // number of similar messages to retrieve + semanticThreshold: 0.7, // minimum similarity score (0-1) + mergeStrategy: "append", // "prepend" | "append" | "interleave" }, }); ``` -## Behavior +### Default Behavior + +When `embedding` and `vector` adapters are configured: + +- Semantic search auto-enables for calls with `userId` and `conversationId` +- Default `semanticLimit`: 5 messages +- Default `semanticThreshold`: 0.7 +- Default `mergeStrategy`: `"append"` (recent messages first, then similar messages) + +### Merge Strategies -When vectors are configured, `Memory` embeds text parts of messages and stores them with IDs: +- **`append`** (default): `[recent messages] + [similar messages]` - preserves chronological order +- **`prepend`**: `[similar messages] + [recent messages]` - emphasizes relevance +- **`interleave`**: Alternates between similar and recent messages -- `msg_${conversationId}_${message.id}` +## How It Works -Each vector has metadata: +### On Message Save -- `messageId`, `conversationId`, `userId`, `role`, `createdAt` +When saving messages, the `Memory` class: -On read with semantic search enabled: +1. Extracts text content from `UIMessage.parts` +2. Generates embeddings via the embedding adapter +3. Stores vectors with metadata: + - ID: `msg_${conversationId}_${message.id}` + - Metadata: `{ messageId, conversationId, userId, role, createdAt }` -1. Embed the current query. -2. Search similar vectors with `limit` and `threshold`. -3. Load the matching messages. -4. Merge with recent messages using `mergeStrategy`. +### On Message Retrieval -## Programmatic Search +When semantic search is enabled: -`Memory` also exposes a direct API: +1. Embed the current query +2. Search for similar vectors using the vector adapter +3. Retrieve matching messages by ID +4. Merge with recent messages using the configured strategy +5. Remove duplicates (messages in both sets) -- `hasVectorSupport() → boolean` -- `searchSimilar(query: string, { limit?, threshold?, filter? }) → Promise` +## Programmatic API -The in‑memory vector adapter uses cosine similarity and supports a metadata `filter` on stored items. +Direct search without agent generation: -LibSQL vector adapter persists vectors as BLOBs with metadata and supports `limit`, `threshold`, and metadata `filter`. For tests, prefer `url: ":memory:"` (or `"file::memory:"`); for production, use a file path (e.g., `file:./.voltagent/memory.db`) or a remote Turso URL. +```ts +// Check if vectors are configured +const hasVectors = memory.hasVectorSupport(); // boolean + +// Search similar messages +const results = await memory.searchSimilar("pricing discussion", { + limit: 10, + threshold: 0.8, + filter: { userId: "user-123", conversationId: "thread-abc" }, +}); + +for (const result of results) { + console.log(result.id, result.score, result.metadata); +} +``` -Note on defaults: +## Embedding Cache -- Semantic memory auto‑enables when you pass `userId` and `conversationId` and your `Memory` has both an embedding and a vector adapter. -- Default merge strategy is `append` to preserve recency first and attach semantically similar messages afterwards. +Enable caching to avoid re-embedding identical text: + +```ts +const memory = new Memory({ + storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), + embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), + vector: new LibSQLVectorAdapter({ url: "file:./.voltagent/memory.db" }), + enableCache: true, // enable cache + cacheSize: 1000, // max entries (default: 1000) + cacheTTL: 3600000, // TTL in ms (default: 1 hour) +}); +``` + +The cache stores `text → vector` mappings in memory with LRU eviction. + +## Vector Adapters + +### InMemoryVectorAdapter + +```ts +import { InMemoryVectorAdapter } from "@voltagent/core"; + +const vector = new InMemoryVectorAdapter(); +``` -## Embedding Details +- Uses cosine similarity +- Supports metadata filtering +- Lost on restart (use for development only) -`AiSdkEmbeddingAdapter` wraps ai‑sdk embedding models. It supports: +### LibSQLVectorAdapter + +```ts +import { LibSQLVectorAdapter } from "@voltagent/libsql"; + +// Local SQLite +const vector = new LibSQLVectorAdapter({ url: "file:./.voltagent/memory.db" }); + +// In-memory (testing) +const vector = new LibSQLVectorAdapter({ url: ":memory:" }); + +// Turso +const vector = new LibSQLVectorAdapter({ + url: "libsql://your-db.turso.io", + authToken: process.env.TURSO_AUTH_TOKEN, +}); +``` + +- Stores vectors as BLOBs +- Supports metadata filtering and thresholds +- Persistent across restarts + +### ManagedMemoryVectorAdapter + +```ts +import { ManagedMemoryVectorAdapter } from "@voltagent/voltagent-memory"; + +const vector = new ManagedMemoryVectorAdapter({ + databaseName: "production-memory", + // voltOpsClient optional (auto-resolves from environment) +}); +``` + +- VoltOps-hosted vectors with zero setup +- See [Managed Memory](./managed-memory.md) for configuration + +## Example: Full Semantic Search Setup + +```ts +import { Agent, Memory, AiSdkEmbeddingAdapter } from "@voltagent/core"; +import { LibSQLMemoryAdapter, LibSQLVectorAdapter } from "@voltagent/libsql"; +import { openai } from "@ai-sdk/openai"; + +const memory = new Memory({ + storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), + embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), + vector: new LibSQLVectorAdapter({ url: "file:./.voltagent/memory.db" }), + enableCache: true, +}); + +const agent = new Agent({ + name: "Research Assistant", + instructions: "Help users recall past discussions and find relevant information.", + model: openai("gpt-4o-mini"), + memory, +}); + +// Semantic search automatically enabled +const result = await agent.generateText( + "What did we decide about the API authentication approach?", + { + userId: "user-123", + conversationId: "project-alpha", + semanticMemory: { + semanticLimit: 10, + semanticThreshold: 0.75, + }, + } +); +``` -- Single and batch embedding -- Optional normalization (`normalize: boolean`) -- Basic batching (`maxBatchSize`) and a simple in‑process cache (`enableCache`, `cacheSize`, `cacheTTL` on `Memory`) +## Learn More -The embedding dimensions are inferred after the first call. +- **[Working Memory](./working-memory.md)** - Maintain compact context across turns +- **[Managed Memory](./managed-memory.md)** - Zero-setup vector storage +- **[LibSQL / Turso](./libsql.md)** - Self-hosted vector storage diff --git a/website/docs/agents/memory/supabase.md b/website/docs/agents/memory/supabase.md index b871ae08b..3ac5a6b60 100644 --- a/website/docs/agents/memory/supabase.md +++ b/website/docs/agents/memory/supabase.md @@ -8,15 +8,9 @@ import TabItem from '@theme/TabItem'; # Supabase Memory -The `@voltagent/supabase` package provides a `SupabaseMemoryAdapter` storage adapter for the `Memory` class that uses a [Supabase](https://supabase.com) project (PostgreSQL database) for persistent storage of conversation memory. +`SupabaseMemoryAdapter` stores conversations in Supabase Postgres for applications already using Supabase. -This is a good choice if your application is already built on Supabase or if you require a robust, scalable PostgreSQL backend with managed features like authentication, real-time subscriptions, and storage. - -## Setup - -### Install Package - -First, install the necessary packages: +## Installation @@ -42,21 +36,15 @@ pnpm add @voltagent/supabase @supabase/supabase-js -### Database Setup +## Database Setup -Run the SQL below in the Supabase SQL Editor. Replace the `voltagent_memory` prefix if you configure a different `tableName`. +Run this SQL in your Supabase SQL Editor (adjusts table prefix if needed):
-Fresh installation SQL +Schema SQL (click to expand) ```sql --- Base table names (change prefix if needed) --- conversations: voltagent_memory_conversations --- messages: voltagent_memory_messages --- users: voltagent_memory_users --- workflow: voltagent_memory_workflow_states - --- Users table (for user‑scoped working memory) +-- Users table (for user-scoped working memory) CREATE TABLE IF NOT EXISTS voltagent_memory_users ( id TEXT PRIMARY KEY, metadata JSONB, @@ -124,291 +112,149 @@ CREATE INDEX IF NOT EXISTS idx_voltagent_memory_workflow_states_status
-
-Migration from older schema to current +## Credentials -```sql --- Tables --- conversations: voltagent_memory_conversations --- messages: voltagent_memory_messages --- users: voltagent_memory_users --- workflow: voltagent_memory_workflow_states - --- 1) Ensure conversations has required columns -ALTER TABLE voltagent_memory_conversations - ADD COLUMN IF NOT EXISTS user_id TEXT NOT NULL DEFAULT 'default', - ADD COLUMN IF NOT EXISTS resource_id TEXT NOT NULL DEFAULT ''; - --- 2) Add V2 columns to messages -ALTER TABLE voltagent_memory_messages - ADD COLUMN IF NOT EXISTS parts JSONB, - ADD COLUMN IF NOT EXISTS metadata JSONB, - ADD COLUMN IF NOT EXISTS format_version INTEGER DEFAULT 2, - ADD COLUMN IF NOT EXISTS user_id TEXT NOT NULL DEFAULT 'default'; - --- 3) Make legacy columns nullable if present -ALTER TABLE voltagent_memory_messages - ALTER COLUMN content DROP NOT NULL, - ALTER COLUMN type DROP NOT NULL; - --- 4) Create users table for user‑scoped working memory -CREATE TABLE IF NOT EXISTS voltagent_memory_users ( - id TEXT PRIMARY KEY, - metadata JSONB, - created_at TIMESTAMPTZ NOT NULL DEFAULT timezone('utc'::text, now()), - updated_at TIMESTAMPTZ NOT NULL DEFAULT timezone('utc'::text, now()) -); +Get your Supabase credentials: --- 5) Create workflow states table -CREATE TABLE IF NOT EXISTS voltagent_memory_workflow_states ( - id TEXT PRIMARY KEY, - workflow_id TEXT NOT NULL, - workflow_name TEXT NOT NULL, - status TEXT NOT NULL, - suspension JSONB, - user_id TEXT, - conversation_id TEXT, - metadata JSONB, - created_at TIMESTAMPTZ NOT NULL, - updated_at TIMESTAMPTZ NOT NULL -); +1. Go to [Supabase Dashboard](https://app.supabase.com) +2. Open your project +3. Go to **Project Settings** → **API** +4. Copy **Project URL** and **anon key** --- 6) Indexes -CREATE INDEX IF NOT EXISTS idx_voltagent_memory_conversations_user_id - ON voltagent_memory_conversations(user_id); - -CREATE INDEX IF NOT EXISTS idx_voltagent_memory_conversations_resource_id - ON voltagent_memory_conversations(resource_id); - -CREATE INDEX IF NOT EXISTS idx_voltagent_memory_messages_conversation_id - ON voltagent_memory_messages(conversation_id); - -CREATE INDEX IF NOT EXISTS idx_voltagent_memory_messages_created_at - ON voltagent_memory_messages(created_at); - -CREATE INDEX IF NOT EXISTS idx_voltagent_memory_workflow_states_workflow_id - ON voltagent_memory_workflow_states(workflow_id); - -CREATE INDEX IF NOT EXISTS idx_voltagent_memory_workflow_states_status - ON voltagent_memory_workflow_states(status); -``` - -
- -### Credentials - -You will need your Supabase project's URL and `anon` key. - -1. Navigate to your project in the [Supabase Dashboard](https://app.supabase.com). -2. Go to **Project Settings** (the gear icon). -3. Select the **API** section. -4. Find your **Project URL** and the **Project API key** labelled `anon` (public). - -Store these credentials securely, typically as environment variables (e.g., `SUPABASE_URL` and `SUPABASE_KEY`). +Store as environment variables: `SUPABASE_URL` and `SUPABASE_KEY` ## Configuration -Import `SupabaseMemoryAdapter` and initialize it with your credentials: - -```typescript +```ts import { Agent, Memory } from "@voltagent/core"; import { SupabaseMemoryAdapter } from "@voltagent/supabase"; -import { createPinoLogger } from "@voltagent/logger"; import { openai } from "@ai-sdk/openai"; -// Get credentials from environment variables -const supabaseUrl = process.env.SUPABASE_URL; -const supabaseKey = process.env.SUPABASE_KEY; - -if (!supabaseUrl || !supabaseKey) { - throw new Error("Supabase URL and Key must be provided via environment variables."); -} - -// Initialize Supabase memory adapter -const storage = new SupabaseMemoryAdapter({ - supabaseUrl, - supabaseKey, - // Optional: Specify a custom base table name prefix - // This MUST match the prefix used in your SQL setup if customized. - tableName: "voltagent_memory", // Defaults to 'voltagent_memory' - // Optional: Limit the number of messages stored per conversation - storageLimit: 100, // Defaults to 100 - // Optional: Enable verbose debug logging from the memory provider - debug: true, // Defaults to false - // Optional: Custom logger for structured logging - logger: createPinoLogger({ name: "memory-supabase" }), +// Using URL and key +const memory = new Memory({ + storage: new SupabaseMemoryAdapter({ + supabaseUrl: process.env.SUPABASE_URL!, + supabaseKey: process.env.SUPABASE_KEY!, + }), }); -// Alternative: Use existing Supabase client +// Using existing Supabase client import { createClient } from "@supabase/supabase-js"; -const supabaseClient = createClient(supabaseUrl, supabaseKey); -const storage = new SupabaseMemoryAdapter({ - client: supabaseClient, - tableName: "voltagent_memory", // Optional - storageLimit: 150, // Optional: Custom storage limit - debug: false, // Optional: Debug logging - logger: createPinoLogger({ name: "memory-supabase" }), // Optional: Custom logger +const supabaseClient = createClient(process.env.SUPABASE_URL!, process.env.SUPABASE_KEY!); + +const memory = new Memory({ + storage: new SupabaseMemoryAdapter({ + client: supabaseClient, + }), }); const agent = new Agent({ - name: "Supabase Memory Agent", - instructions: "An agent using Supabase for memory.", - model: openai("gpt-4o"), - memory: new Memory({ storage }), + name: "Assistant", + model: openai("gpt-4o-mini"), + memory, }); ``` -**Configuration Options:** - -When using Supabase URL and key: - -- `supabaseUrl` (string, required): Your Supabase project URL. -- `supabaseKey` (string, required): Your Supabase project `anon` key (or a service role key if used in a secure backend environment, though `anon` key with appropriate RLS policies is often sufficient). -- `tableName` (string, optional): A prefix for the database table names. Defaults to `voltagent_memory`. If you change this, ensure your SQL table creation script uses the same prefix. -- `storageLimit` (number, optional): The maximum number of messages to retain per conversation. When the limit is reached, the oldest messages are automatically deleted to make room for new ones. Defaults to `100`. -- `debug` (boolean, optional): Enables detailed logging from the `SupabaseMemory` provider to the console, useful for understanding memory operations during development. Defaults to `false`. -- `logger` (Logger, optional): Custom logger instance for structured logging. Supports any logger that implements the standard logger interface (e.g., Pino, Winston). When provided, this overrides the `debug` option. +### Configuration Options -When using an existing Supabase client: +| Option | Type | Description | +| -------------- | ---------------- | ----------------------------------------------------- | +| `supabaseUrl` | `string` | Supabase project URL (required if not using `client`) | +| `supabaseKey` | `string` | Supabase anon key (required if not using `client`) | +| `client` | `SupabaseClient` | Existing Supabase client (alternative to URL/key) | +| `tableName` | `string` | Table name prefix (default: `voltagent_memory`) | +| `storageLimit` | `number` | Max messages per conversation (default: `100`) | +| `debug` | `boolean` | Enable debug logging (default: `false`) | +| `logger` | `Logger` | Optional logger for structured logging | -- `client` (SupabaseClient, required when not using supabaseUrl/supabaseKey): An existing Supabase client instance. The constructor validates that this is a proper SupabaseClient instance. -- `tableName` (string, optional): Table name prefix when using existing client. -- `storageLimit` (number, optional): Storage limit when using existing client. Defaults to `100`. -- `debug` (boolean, optional): Debug logging when using existing client. Defaults to `false`. -- `logger` (Logger, optional): Custom logger instance for structured logging. +**Note**: Table prefix must match the SQL schema. If you use a custom `tableName`, update the SQL accordingly. -## Conversation Management +## Features -The Supabase provider supports conversation management similar to other storage providers: +- Messages stored per `userId` and `conversationId` +- Oldest messages pruned when `storageLimit` exceeded +- Supports complex queries with filtering, pagination, and sorting -```typescript -// Get conversations for a specific user -const conversations = await memory.getConversationsByUserId("user-123", { - limit: 50, - orderBy: "updated_at", - orderDirection: "DESC", -}); - -// Create and update conversations -const newConversation = await memory.createConversation({ - id: "conversation-id", - resourceId: "app-resource-1", - userId: "user-123", - title: "New Chat Session", - metadata: { source: "web-app" }, -}); +### Working Memory -await memory.updateConversation("conversation-id", { - title: "Updated Title", +```ts +const memory = new Memory({ + storage: new SupabaseMemoryAdapter({ + supabaseUrl: process.env.SUPABASE_URL!, + supabaseKey: process.env.SUPABASE_KEY!, + }), + workingMemory: { + enabled: true, + scope: "conversation", // or "user" + }, }); ``` -## Querying Conversations - -The Supabase storage provides conversation querying capabilities with filtering, pagination, and sorting options: +See [Working Memory](./working-memory.md). -```typescript -// Query with multiple filters -const workConversations = await memory.queryConversations({ - userId: "user-123", - resourceId: "work-agent", - limit: 25, - offset: 0, - orderBy: "created_at", - orderDirection: "DESC", -}); - -// Get all conversations for a user -const userConversations = await memory.queryConversations({ - userId: "user-123", - limit: 50, -}); +### Semantic Search -// Get conversations for a specific resource -const resourceConversations = await memory.queryConversations({ - resourceId: "chatbot-v1", - limit: 100, - orderBy: "updated_at", -}); +```ts +import { Memory, AiSdkEmbeddingAdapter, InMemoryVectorAdapter } from "@voltagent/core"; +import { SupabaseMemoryAdapter } from "@voltagent/supabase"; +import { openai } from "@ai-sdk/openai"; -// Admin view - get all conversations -const allConversations = await memory.queryConversations({ - limit: 200, - orderBy: "created_at", - orderDirection: "ASC", +const memory = new Memory({ + storage: new SupabaseMemoryAdapter({ + supabaseUrl: process.env.SUPABASE_URL!, + supabaseKey: process.env.SUPABASE_KEY!, + }), + embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), + vector: new InMemoryVectorAdapter(), // or pgvector adapter }); ``` -**Query Options:** +See [Semantic Search](./semantic-search.md). -- `userId` (optional): Filter conversations by specific user -- `resourceId` (optional): Filter conversations by specific resource -- `limit` (optional): Maximum number of conversations to return (default: 50) -- `offset` (optional): Number of conversations to skip for pagination (default: 0) -- `orderBy` (optional): Field to sort by: 'created_at', 'updated_at', or 'title' (default: 'updated_at') -- `orderDirection` (optional): Sort direction: 'ASC' or 'DESC' (default: 'DESC') +## Production Setup -## Getting Conversation Messages +```ts +import { Agent, Memory } from "@voltagent/core"; +import { SupabaseMemoryAdapter } from "@voltagent/supabase"; +import { createClient } from "@supabase/supabase-js"; +import { openai } from "@ai-sdk/openai"; -Retrieve messages for a specific conversation: +const supabaseClient = createClient( + process.env.SUPABASE_URL!, + process.env.SUPABASE_KEY! // or service_role key for backend +); -```typescript -// Get recent messages (chronological order) -const messages = await memory.getMessages("user-123", "conversation-456", { limit: 50 }); +const memory = new Memory({ + storage: new SupabaseMemoryAdapter({ + client: supabaseClient, + storageLimit: 200, + }), +}); -// Time-based pagination -const older = await memory.getMessages("user-123", "conversation-456", { - before: new Date("2024-01-01T00:00:00Z"), - limit: 50, +const agent = new Agent({ + name: "Assistant", + model: openai("gpt-4o-mini"), + memory, }); ``` -**Message Query Options:** - -- `limit` (optional): Maximum number of messages to return (default: 100) -- `before` (optional): Only messages created before this date -- `after` (optional): Only messages created after this date -- `roles` (optional): Filter by roles, e.g., `["user", "assistant"]` - -Messages are returned in chronological order (oldest first) for natural conversation flow. - -## Use Cases - -- Applications already using Supabase for backend services. -- Projects requiring a scalable, managed PostgreSQL database. -- Scenarios where leveraging Supabase features like Auth, Realtime, or Storage alongside agent memory is beneficial. -- Production environments where robust data management and security policies (RLS) are essential. - -## Working Memory +**Security:** -`SupabaseMemoryAdapter` implements working memory operations used by `Memory`: +- Use `anon` key with Row Level Security (RLS) policies +- Use `service_role` key only in secure backend environments +- Store credentials in environment variables -- Conversation-scoped working memory is stored under `conversations.metadata.workingMemory`. -- User-scoped working memory is stored in the `${tableName}_users` table `metadata.workingMemory` field. +**Use cases:** -Enable via `Memory({ workingMemory: { enabled: true, template | schema, scope } })`. See: [Working Memory](./working-memory.md). +- Applications already using Supabase +- Projects leveraging Supabase Auth, Realtime, or Storage +- Environments requiring RLS policies -Programmatic APIs (via `Memory`): +For production-ready zero-setup hosting, see [Managed Memory](./managed-memory.md). -- `getWorkingMemory({ conversationId?, userId? })` -- `updateWorkingMemory({ conversationId?, userId?, content })` -- `clearWorkingMemory({ conversationId?, userId? })` - -## Semantic Search (Embeddings + Vectors) - -Vector search is configured on `Memory` independently of the storage adapter. To enable semantic retrieval with Supabase storage, attach an embedding adapter and a vector adapter: - -```ts -import { Memory, AiSdkEmbeddingAdapter, InMemoryVectorAdapter } from "@voltagent/core"; -import { SupabaseMemoryAdapter } from "@voltagent/supabase"; -import { openai } from "@ai-sdk/openai"; - -const memory = new Memory({ - storage: new SupabaseMemoryAdapter({ supabaseUrl, supabaseKey }), - embedding: new AiSdkEmbeddingAdapter(openai.embedding("text-embedding-3-small")), - vector: new InMemoryVectorAdapter(), -}); -``` +## Learn More -Use with agent calls by passing `semanticMemory` options. See: [Semantic Search](./semantic-search.md). +- **[Managed Memory](./managed-memory.md)** - Production-ready hosted memory with zero setup +- **[Working Memory](./working-memory.md)** - Maintain compact context +- **[Semantic Search](./semantic-search.md)** - Vector search configuration diff --git a/website/docs/agents/memory/working-memory.md b/website/docs/agents/memory/working-memory.md index b1a0288f8..10721a7ae 100644 --- a/website/docs/agents/memory/working-memory.md +++ b/website/docs/agents/memory/working-memory.md @@ -5,94 +5,297 @@ slug: /agents/memory/working-memory # Working Memory -Working memory stores compact context across turns. It can be conversation‑scoped or user‑scoped. It is configured on the `Memory` instance. +Working memory stores compact context across conversation turns. Unlike full message history, it tracks key facts, preferences, or goals that persist throughout interactions. ## Configuration -Use one of three modes: +Three formats supported: -- Template (Markdown): `template: string` -- JSON schema (Zod): `schema: z.object({ ... })` -- Free‑form: no template or schema +1. **Markdown template** - Structured text with sections +2. **JSON schema** - Validated structured data using Zod +3. **Free-form** - Unstructured text -Scope defaults to `conversation`. Set `scope: 'user'` to store context at the user level. +Scope options: + +- **`conversation`** (default) - Context per conversation thread +- **`user`** - Context shared across all user conversations + +### Markdown Template ```ts import { Agent, Memory } from "@voltagent/core"; import { LibSQLMemoryAdapter } from "@voltagent/libsql"; import { openai } from "@ai-sdk/openai"; -import { z } from "zod"; -// Template (Markdown), conversation scope -const memoryWithTemplate = new Memory({ +const memory = new Memory({ storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), workingMemory: { enabled: true, + scope: "conversation", // default template: ` -# Profile +# User Profile - Name: - Role: +- Timezone: -# Goals +# Current Goals +- + +# Preferences - `, }, }); -// JSON schema, user scope -const workingSchema = z.object({ - userProfile: z - .object({ name: z.string().optional(), timezone: z.string().optional() }) - .optional(), - tasks: z.array(z.string()).optional(), +const agent = new Agent({ + name: "Assistant", + model: openai("gpt-4o-mini"), + memory, }); +``` + +### JSON Schema + +```ts +import { z } from "zod"; -const memoryWithSchema = new Memory({ +const memory = new Memory({ storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), - workingMemory: { enabled: true, scope: "user", schema: workingSchema }, + workingMemory: { + enabled: true, + scope: "user", // persist across conversations + schema: z.object({ + profile: z + .object({ + name: z.string().optional(), + role: z.string().optional(), + timezone: z.string().optional(), + }) + .optional(), + preferences: z.array(z.string()).optional(), + goals: z.array(z.string()).optional(), + }), + }, }); +``` -const agent = new Agent({ - name: "assistant", - model: openai("gpt-4o-mini"), - memory: memoryWithTemplate, +### Free-Form + +```ts +const memory = new Memory({ + storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), + workingMemory: { + enabled: true, // no template or schema + }, }); ``` -## Built‑in Tools +## Agent Integration -When working memory is enabled, the agent exposes three tools: +When working memory is enabled, the agent: -- `get_working_memory()` → returns the current content string -- `update_working_memory(content)` → updates content; when a schema is configured, `content` is validated -- `clear_working_memory()` → clears content +1. **Adds instructions to system prompt** - Includes current working memory content and usage guidelines +2. **Exposes tools automatically**: + - `get_working_memory()` - Retrieve current content + - `update_working_memory(content)` - Update content (validated against schema if configured) + - `clear_working_memory()` - Clear content -## Prompt Integration +The agent manages working memory proactively based on conversation flow. -When you call the agent with `userId` and `conversationId`, the agent appends a working‑memory instruction block to the system prompt. The block includes: +## Update Modes -- Usage guidelines -- The template or a note about the JSON schema -- The current working memory content (if any) +Two modes available: -This logic is implemented in `Memory.getWorkingMemoryInstructions()` and used by the agent before generation. +### Append Mode (Default) + +Safely merges new information with existing content: + +```ts +// Existing working memory: +// { profile: { name: "Alice" }, goals: ["Learn TypeScript"] } + +// Agent calls update_working_memory with: +// { goals: ["Build an API"] } + +// Result (merged): +// { profile: { name: "Alice" }, goals: ["Learn TypeScript", "Build an API"] } +``` + +**For JSON schemas:** + +- Objects: Deep merge +- Arrays: Deduplicated concatenation +- Primitives: Overwrite + +**For Markdown:** + +- New content appended with separator + +### Replace Mode + +Overwrites all existing content: + +```ts +await memory.updateWorkingMemory({ + conversationId: "thread-123", + userId: "user-456", + content: { profile: { name: "Bob" } }, // existing data lost + options: { mode: "replace" }, +}); +``` + +Use replace mode only when intentionally resetting all context. ## Programmatic API -Methods on `Memory`: +Direct memory access without agent tools: + +```ts +// Get current working memory +const content = await memory.getWorkingMemory({ + conversationId: "thread-123", + userId: "user-456", +}); +console.log(content); // string (JSON or Markdown) + +// Update (default: append mode) +await memory.updateWorkingMemory({ + conversationId: "thread-123", + userId: "user-456", + content: { goals: ["Complete onboarding"] }, // object or string +}); + +// Update (replace mode) +await memory.updateWorkingMemory({ + conversationId: "thread-123", + userId: "user-456", + content: "Fresh context", + options: { mode: "replace" }, +}); + +// Clear +await memory.clearWorkingMemory({ + conversationId: "thread-123", + userId: "user-456", +}); + +// Introspect configuration +const format = memory.getWorkingMemoryFormat(); // "markdown" | "json" | null +const template = memory.getWorkingMemoryTemplate(); // string | null +const schema = memory.getWorkingMemorySchema(); // ZodObject | null +``` + +## Storage Implementation + +Working memory is stored differently per scope: + +**Conversation scope:** + +- Stored in `conversations.metadata.workingMemory` field +- Isolated per conversation thread + +**User scope:** + +- Stored in `${tablePrefix}_users.metadata.workingMemory` field (adapter-specific) +- Shared across all user conversations + +All official adapters (LibSQL, Postgres, Supabase, Managed Memory) support both scopes. + +## Example: User-Scoped Preferences + +```ts +import { Agent, Memory } from "@voltagent/core"; +import { LibSQLMemoryAdapter } from "@voltagent/libsql"; +import { openai } from "@ai-sdk/openai"; +import { z } from "zod"; + +const userPreferencesSchema = z.object({ + name: z.string().optional(), + timezone: z.string().optional(), + communicationStyle: z.enum(["formal", "casual"]).optional(), + interests: z.array(z.string()).optional(), +}); + +const memory = new Memory({ + storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), + workingMemory: { + enabled: true, + scope: "user", // persist across all conversations + schema: userPreferencesSchema, + }, +}); + +const agent = new Agent({ + name: "Personal Assistant", + instructions: "Adapt responses based on user preferences stored in working memory.", + model: openai("gpt-4o-mini"), + memory, +}); -- `getWorkingMemory({ conversationId?, userId? }) → Promise` -- `updateWorkingMemory({ conversationId?, userId?, content }) → Promise` -- `clearWorkingMemory({ conversationId?, userId? }) → Promise` -- `getWorkingMemoryFormat() → 'markdown' | 'json' | null` -- `getWorkingMemoryTemplate() → string | null` -- `getWorkingMemorySchema() → z.ZodObject | null` +// First conversation +await agent.generateText("I prefer casual communication and I'm into AI and music.", { + userId: "user-123", + conversationId: "conv-1", +}); + +// Different conversation - agent remembers user preferences +await agent.generateText("What should I learn next?", { + userId: "user-123", + conversationId: "conv-2", // different thread +}); +``` + +## Example: Conversation-Scoped Goals + +```ts +const projectMemory = new Memory({ + storage: new LibSQLMemoryAdapter({ url: "file:./.voltagent/memory.db" }), + workingMemory: { + enabled: true, + scope: "conversation", // isolated per project + template: ` +# Project Context +- Name: +- Deadline: +- Tech Stack: + +# Current Sprint +- Goals: +- Blockers: +`, + }, +}); + +const agent = new Agent({ + name: "Project Manager", + instructions: "Track project context and help with sprint planning.", + model: openai("gpt-4o-mini"), + memory: projectMemory, +}); + +// Each project gets its own working memory +await agent.generateText("Let's plan the e-commerce project using Next.js.", { + userId: "user-123", + conversationId: "project-ecommerce", +}); + +await agent.generateText("For the analytics dashboard, we'll use React and D3.", { + userId: "user-123", + conversationId: "project-analytics", +}); +``` -## Storage Notes +## Best Practices -The adapter decides where to store working memory. The official adapters use metadata fields: +1. **Use JSON schemas for structured data** - Ensures type safety and validation +2. **Use Markdown templates for narrative context** - Better for summaries, notes, observations +3. **Default to append mode** - Safer than replace; preserves existing data +4. **User scope for preferences** - Name, timezone, communication style +5. **Conversation scope for session data** - Goals, tasks, project details +6. **Keep it compact** - Working memory supplements message history, not replaces it -- Conversation scope: `conversations.metadata.workingMemory` -- User scope: `${tablePrefix}_users.metadata.workingMemory` +## Learn More -See the provider pages for details. +- **[Semantic Search](./semantic-search.md)** - Retrieve relevant past messages +- **[Managed Memory](./managed-memory.md)** - Zero-setup working memory storage +- **[LibSQL / Turso](./libsql.md)** - Self-hosted working memory storage diff --git a/website/docs/agents/overview.md b/website/docs/agents/overview.md index 5d99a10ea..780e3c88a 100644 --- a/website/docs/agents/overview.md +++ b/website/docs/agents/overview.md @@ -696,8 +696,8 @@ const dynamicAgent = new Agent({ // Option 3: VoltOps Management (enterprise-grade) const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, }); const managedAgent = new Agent({ diff --git a/website/docs/agents/prompts.md b/website/docs/agents/prompts.md index 8bf18068f..a0fe4f418 100644 --- a/website/docs/agents/prompts.md +++ b/website/docs/agents/prompts.md @@ -201,8 +201,8 @@ VoltOps provides a complete prompt management platform with version control, tea 4. **Add to your .env file**: ```bash -VOLTOPS_PUBLIC_KEY=pk_your_public_key_here -VOLTOPS_SECRET_KEY=sk_your_secret_key_here +VOLTAGENT_PUBLIC_KEY=pk_your_public_key_here +VOLTAGENT_SECRET_KEY=sk_your_secret_key_here ``` ### Step 2: Create Your First Prompt @@ -254,8 +254,8 @@ import { Agent, VoltAgent, VoltOpsClient } from "@voltagent/core"; import { openai } from "@ai-sdk/openai"; const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, prompts: true, promptCache: { enabled: true, @@ -323,8 +323,8 @@ You can also access prompts directly from the VoltOpsClient outside of agent ins ```typescript // Direct access for testing or utility functions const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, }); // Get prompt directly @@ -759,8 +759,8 @@ return await prompts.getPrompt({ Error: Authentication failed // Solution: Verify environment variables -console.log("Public Key:", process.env.VOLTOPS_PUBLIC_KEY?.substring(0, 8) + "..."); -console.log("Secret Key:", process.env.VOLTOPS_SECRET_KEY ? "Set" : "Missing"); +console.log("Public Key:", process.env.VOLTAGENT_PUBLIC_KEY?.substring(0, 8) + "..."); +console.log("Secret Key:", process.env.VOLTAGENT_SECRET_KEY ? "Set" : "Missing"); ``` ### Debug Tips @@ -770,8 +770,8 @@ console.log("Secret Key:", process.env.VOLTOPS_SECRET_KEY ? "Set" : "Missing"); ```typescript // Test VoltOps connection outside of agent const voltOpsClient = new VoltOpsClient({ - publicKey: process.env.VOLTOPS_PUBLIC_KEY, - secretKey: process.env.VOLTOPS_SECRET_KEY, + publicKey: process.env.VOLTAGENT_PUBLIC_KEY, + secretKey: process.env.VOLTAGENT_SECRET_KEY, }); const promptManager = voltOpsClient.prompts; diff --git a/website/plugins/fetch-tweets.js b/website/plugins/fetch-tweets.js index e8cc89ed1..913f93ba1 100644 --- a/website/plugins/fetch-tweets.js +++ b/website/plugins/fetch-tweets.js @@ -118,25 +118,6 @@ const sanitizeVideo = (video) => { }); }; -const sanitizeCard = (card) => { - if (!card) { - return undefined; - } - - const thumbnail = card.binding_values?.thumbnail_image_large; - if (!thumbnail) { - return undefined; - } - - return compactObject({ - binding_values: { - thumbnail_image_large: thumbnail, - }, - name: card.name, - url: card.url, - }); -}; - const sanitizeTweet = (tweet) => { if (!tweet) { return undefined; @@ -156,7 +137,6 @@ const sanitizeTweet = (tweet) => { user: sanitizeUser(tweet.user), photos: sanitizeArray(tweet.photos, sanitizePhoto), video: sanitizeVideo(tweet.video), - card: sanitizeCard(tweet.card), in_reply_to_screen_name: tweet.in_reply_to_screen_name, in_reply_to_status_id_str: tweet.in_reply_to_status_id_str, }) || {}; diff --git a/website/sidebars.ts b/website/sidebars.ts index fef5c9067..2e0f4721b 100644 --- a/website/sidebars.ts +++ b/website/sidebars.ts @@ -86,6 +86,7 @@ const sidebars: SidebarsConfig = { label: "Storage Adapters", items: [ "agents/memory/in-memory", + "agents/memory/managed-memory", "agents/memory/libsql", "agents/memory/postgres", "agents/memory/supabase", diff --git a/website/src/data/tweets.json b/website/src/data/tweets.json index e7b8be324..e9828bd82 100644 --- a/website/src/data/tweets.json +++ b/website/src/data/tweets.json @@ -393,7 +393,7 @@ 9 ], "id_str": "19107253", - "name": "Tanny Siwik", + "name": "Tom Siwik", "screen_name": "tomsiwik" } ], @@ -592,7 +592,7 @@ "image_value": { "height": 315, "width": 600, - "url": "https://pbs.twimg.com/card_img/1970565739817115652/HD5si5MR?format=jpg&name=600x600" + "url": "https://pbs.twimg.com/card_img/1973103689876512772/gxD_gRLF?format=jpg&name=600x600" }, "type": "IMAGE" }