Skip to content

Commit 6e126cd

Browse files
committed
feat: upgrade memory system with unified management and config integration
- Upgrade to comprehensive `MemoryManager` with short-term/long-term memory - Add automatic memory migration and intelligent storage decisions - Implement semantic search and multi-weight ranking capabilities - Introduce `ConfigLoader` for centralized YAML configuration - Integrate upgraded memory system into `SlaverAgent` - Support persistence, statistics, and state management - Update `requirements.txt` and `config.yaml` for enhanced features
1 parent c79c9b9 commit 6e126cd

File tree

9 files changed

+1397
-3
lines changed

9 files changed

+1397
-3
lines changed

requirements.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,3 +92,6 @@ websockets==15.0.1
9292
Werkzeug==3.1.3
9393
wsproto==1.2.0
9494
zipp==3.23.0
95+
# Memory module dependencies
96+
mem0ai>=0.1.115
97+
qdrant-client>=1.7.0

slaver/agents/slaver_agent.py

Lines changed: 104 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from rich.panel import Panel
1212
from rich.text import Text
1313
from tools.memory import ActionStep, AgentMemory, SceneMemory
14+
from tools.memory import MemoryManager
1415
from tools.monitoring import AgentLogger, LogLevel, Monitor
1516

1617
logger = getLogger(__name__)
@@ -40,6 +41,7 @@ def __init__(
4041
verbosity_level: LogLevel = LogLevel.INFO,
4142
step_callbacks: Optional[List[Callable]] = None,
4243
log_file: Optional[str] = None,
44+
config: Optional[Dict[str, Any]] = None,
4345
):
4446
self.tools = tools
4547
self.model = model
@@ -50,8 +52,9 @@ def __init__(
5052
self.max_steps = max_steps
5153
self.step_number = 0
5254
self.state = {}
53-
self.memory = AgentMemory()
54-
self.scene = SceneMemory(collaborator)
55+
self.memory = AgentMemory() # Maintain backward compatibility
56+
self.memory_manager = MemoryManager(config=config) # New memory manager with config
57+
self.scene = SceneMemory(collaborator) # Scene memory for environment tracking
5558
self.logger = AgentLogger(level=verbosity_level, log_file=log_file)
5659
self.monitor = Monitor(self.model, self.logger)
5760
self.step_callbacks = step_callbacks if step_callbacks is not None else []
@@ -85,6 +88,16 @@ async def run(
8588

8689
if reset:
8790
self.memory.reset()
91+
# Record task start to new memory system
92+
await self.memory_manager.add_message(
93+
role="system",
94+
content=f"Starting task: {task}",
95+
metadata={
96+
"task_type": "task_start",
97+
"robot_name": self.robot_name,
98+
"step_number": self.step_number
99+
}
100+
)
88101
self.step_number = 1
89102

90103
self.logger.log_task(
@@ -103,12 +116,34 @@ async def run(
103116
)
104117
answer = await self.step(step)
105118
if answer == "final_answer":
119+
# Record task completion to new memory system
120+
await self.memory_manager.add_message(
121+
role="system",
122+
content=f"Task completed: {self.task}",
123+
metadata={
124+
"task_type": "task_complete",
125+
"robot_name": self.robot_name,
126+
"steps_taken": self.step_number,
127+
"message_type": "task_completion"
128+
}
129+
)
106130
return "Mission accomplished"
107131

108132
self.collaborator.record_agent_status(self.robot_name, answer)
109133
step.end_time = time.time()
110134
self.step_number += 1
111135

136+
# Record task failure to new memory system
137+
await self.memory_manager.add_message(
138+
role="system",
139+
content=f"Task failed: {self.task} - Maximum steps reached",
140+
metadata={
141+
"task_type": "task_failed",
142+
"robot_name": self.robot_name,
143+
"steps_taken": self.step_number,
144+
"message_type": "task_failure"
145+
}
146+
)
112147
return "Maximum number of attempts reached, Mission not completed"
113148

114149
def step(self) -> Optional[Any]:
@@ -208,6 +243,19 @@ async def step(self, memory_step: ActionStep) -> Union[None, Any]:
208243
stop_sequences=["Observation:"],
209244
)
210245
memory_step.model_output_message = model_message
246+
247+
# Record model output to new memory system
248+
await self.memory_manager.add_message(
249+
role="assistant",
250+
content=model_message.content or str(model_message.raw),
251+
metadata={
252+
"step_number": self.step_number,
253+
"robot_name": self.robot_name,
254+
"model_path": self.model_path,
255+
"message_type": "model_output"
256+
}
257+
)
258+
211259
self.logger.log_markdown(
212260
content=(
213261
model_message.content
@@ -230,5 +278,58 @@ async def step(self, memory_step: ActionStep) -> Union[None, Any]:
230278
return "final_answer"
231279
else:
232280
self.tool_call.append(current_call)
281+
282+
# Record tool call to new memory system
283+
await self.memory_manager.add_message(
284+
role="assistant",
285+
content=f"Calling tool: {tool_name}",
286+
metadata={
287+
"step_number": self.step_number,
288+
"tool_name": tool_name,
289+
"tool_arguments": tool_arguments,
290+
"robot_name": self.robot_name,
291+
"message_type": "tool_call"
292+
}
293+
)
233294

234-
return await self._execute_tool_call(tool_name, tool_arguments, memory_step)
295+
observation = await self._execute_tool_call(tool_name, tool_arguments, memory_step)
296+
297+
# Record tool execution result to new memory system
298+
await self.memory_manager.add_message(
299+
role="system",
300+
content=f"Tool {tool_name} execution result: {observation}",
301+
metadata={
302+
"step_number": self.step_number,
303+
"tool_name": tool_name,
304+
"observation_type": "tool_result",
305+
"robot_name": self.robot_name,
306+
"message_type": "observation"
307+
}
308+
)
309+
310+
return observation
311+
312+
# Convenience methods for new memory system
313+
async def search_memory(self, query: str, limit: int = 10):
314+
"""Search messages in memory"""
315+
return await self.memory_manager.search_messages(query, limit)
316+
317+
async def get_memory_stats(self):
318+
"""Get memory statistics"""
319+
return await self.memory_manager.get_memory_stats()
320+
321+
async def record_important_info(self, thinking: str, content: list, **kwargs):
322+
"""Record important information to long-term memory"""
323+
return await self.memory_manager.record_important_info(thinking, content, **kwargs)
324+
325+
async def retrieve_important_info(self, keywords: list, limit: int = 5):
326+
"""Retrieve important information from long-term memory"""
327+
return await self.memory_manager.retrieve_important_info(keywords, limit)
328+
329+
def save_memory_state(self, filepath: str):
330+
"""Save memory state"""
331+
self.memory_manager.save_state(filepath)
332+
333+
def load_memory_state(self, filepath: str):
334+
"""Load memory state"""
335+
self.memory_manager.load_state(filepath)

slaver/config.yaml

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,5 +54,64 @@ robot:
5454
call_type: remote
5555
path: "http://127.0.0.1:8000"
5656

57+
# Memory System Configuration
58+
memory:
59+
# Short-term memory configuration
60+
short_term:
61+
# Maximum number of messages to store in short-term memory
62+
max_size: 1000
63+
# Auto-migrate to long-term memory when short-term exceeds this count
64+
auto_migrate_threshold: 100
65+
# Auto-migrate to long-term memory after this many hours
66+
migration_age_hours: 24
67+
68+
# Long-term memory configuration
69+
long_term:
70+
# Agent identifier
71+
agent_name: "roboos_agent"
72+
# User identifier
73+
user_name: "default_user"
74+
# Run identifier
75+
run_name: "default_run"
76+
77+
# Vector store configuration
78+
vector_store:
79+
# Vector store provider (qdrant, chroma, etc.)
80+
provider: "qdrant"
81+
config:
82+
# Store vectors on disk for persistence
83+
on_disk: true
84+
# Collection name for storing vectors
85+
collection_name: "roboos_memory"
86+
# Qdrant server configuration
87+
host: "localhost"
88+
port: 6333
89+
# API key for Qdrant (if required)
90+
api_key: null
91+
92+
# mem0 configuration
93+
mem0:
94+
# Embedding model configuration
95+
embedder:
96+
provider: "huggingface"
97+
config:
98+
model: "sentence-transformers/all-MiniLM-L6-v2"
99+
100+
# LLM model configuration for memory operations
101+
llm:
102+
provider: "openai"
103+
config:
104+
# Use local vLLM service with OpenAI-compatible API
105+
model: "RoboBrain2.0-7B"
106+
api_key: "EMPTY" # vLLM doesn't require real API key
107+
base_url: "http://localhost:4567/v1"
108+
109+
# Memory retrieval configuration
110+
retrieval:
111+
# Number of memories to retrieve by default
112+
default_limit: 5
113+
# Similarity threshold for memory retrieval
114+
similarity_threshold: 0.7
115+
57116
# Output reasoning context, time cost and other information
58117
profiling: true

slaver/tools/config_loader.py

Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,160 @@
1+
#!/usr/bin/env python3
2+
# -*- coding: utf-8 -*-
3+
"""
4+
Configuration Loader
5+
6+
Utility for loading and managing configuration from config.yaml file.
7+
"""
8+
9+
import os
10+
import yaml
11+
from typing import Any, Dict, Optional
12+
from logging import getLogger
13+
14+
logger = getLogger(__name__)
15+
16+
17+
class ConfigLoader:
18+
"""Configuration loader for RoboOS system"""
19+
20+
def __init__(self, config_path: Optional[str] = None):
21+
"""Initialize configuration loader
22+
23+
Args:
24+
config_path: Path to config.yaml file. If None, uses default path.
25+
"""
26+
if config_path is None:
27+
# Default config path relative to this file
28+
current_dir = os.path.dirname(os.path.abspath(__file__))
29+
config_path = os.path.join(current_dir, '..', 'config.yaml')
30+
31+
self.config_path = config_path
32+
self._config: Optional[Dict[str, Any]] = None
33+
34+
def load_config(self) -> Dict[str, Any]:
35+
"""Load configuration from YAML file
36+
37+
Returns:
38+
Configuration dictionary
39+
40+
Raises:
41+
FileNotFoundError: If config file doesn't exist
42+
yaml.YAMLError: If config file is invalid YAML
43+
"""
44+
if self._config is not None:
45+
return self._config
46+
47+
if not os.path.exists(self.config_path):
48+
raise FileNotFoundError(f"Config file not found: {self.config_path}")
49+
50+
try:
51+
with open(self.config_path, 'r', encoding='utf-8') as f:
52+
self._config = yaml.safe_load(f)
53+
54+
logger.info(f"Configuration loaded from {self.config_path}")
55+
return self._config
56+
57+
except yaml.YAMLError as e:
58+
logger.error(f"Failed to parse config file {self.config_path}: {e}")
59+
raise
60+
except Exception as e:
61+
logger.error(f"Failed to load config file {self.config_path}: {e}")
62+
raise
63+
64+
def get_memory_config(self) -> Dict[str, Any]:
65+
"""Get memory system configuration
66+
67+
Returns:
68+
Memory configuration dictionary
69+
"""
70+
config = self.load_config()
71+
return config.get("memory", {})
72+
73+
def get_short_term_config(self) -> Dict[str, Any]:
74+
"""Get short-term memory configuration
75+
76+
Returns:
77+
Short-term memory configuration dictionary
78+
"""
79+
memory_config = self.get_memory_config()
80+
return memory_config.get("short_term", {})
81+
82+
def get_long_term_config(self) -> Dict[str, Any]:
83+
"""Get long-term memory configuration
84+
85+
Returns:
86+
Long-term memory configuration dictionary
87+
"""
88+
memory_config = self.get_memory_config()
89+
return memory_config.get("long_term", {})
90+
91+
def get_model_config(self) -> Dict[str, Any]:
92+
"""Get model configuration
93+
94+
Returns:
95+
Model configuration dictionary
96+
"""
97+
config = self.load_config()
98+
return config.get("model", {})
99+
100+
def get_tool_config(self) -> Dict[str, Any]:
101+
"""Get tool configuration
102+
103+
Returns:
104+
Tool configuration dictionary
105+
"""
106+
config = self.load_config()
107+
return config.get("tool", {})
108+
109+
def get_collaborator_config(self) -> Dict[str, Any]:
110+
"""Get collaborator configuration
111+
112+
Returns:
113+
Collaborator configuration dictionary
114+
"""
115+
config = self.load_config()
116+
return config.get("collaborator", {})
117+
118+
def get_robot_config(self) -> Dict[str, Any]:
119+
"""Get robot configuration
120+
121+
Returns:
122+
Robot configuration dictionary
123+
"""
124+
config = self.load_config()
125+
return config.get("robot", {})
126+
127+
def reload_config(self) -> Dict[str, Any]:
128+
"""Reload configuration from file
129+
130+
Returns:
131+
Updated configuration dictionary
132+
"""
133+
self._config = None
134+
return self.load_config()
135+
136+
137+
def load_config(config_path: Optional[str] = None) -> Dict[str, Any]:
138+
"""Convenience function to load configuration
139+
140+
Args:
141+
config_path: Path to config.yaml file
142+
143+
Returns:
144+
Configuration dictionary
145+
"""
146+
loader = ConfigLoader(config_path)
147+
return loader.load_config()
148+
149+
150+
def get_memory_config(config_path: Optional[str] = None) -> Dict[str, Any]:
151+
"""Convenience function to get memory configuration
152+
153+
Args:
154+
config_path: Path to config.yaml file
155+
156+
Returns:
157+
Memory configuration dictionary
158+
"""
159+
loader = ConfigLoader(config_path)
160+
return loader.get_memory_config()

0 commit comments

Comments
 (0)