diff --git a/README.md b/README.md index 39701efd..f6d59e64 100644 --- a/README.md +++ b/README.md @@ -759,12 +759,19 @@ The Mem0 Memory Tool supports three different backend configurations: | OPENSEARCH_HOST | OpenSearch Host URL | None | OpenSearch | | AWS_REGION | AWS Region for OpenSearch | us-west-2 | OpenSearch | | DEV | Enable development mode (bypasses confirmations) | false | All modes | +| MEM0_LLM_PROVIDER | LLM provider for memory processing | aws_bedrock | All modes | +| MEM0_LLM_MODEL | LLM model for memory processing | anthropic.claude-3-5-haiku-20241022-v1:0 | All modes | +| MEM0_LLM_TEMPERATURE | LLM temperature (0.0-2.0) | 0.1 | All modes | +| MEM0_LLM_MAX_TOKENS | LLM maximum tokens | 2000 | All modes | +| MEM0_EMBEDDER_PROVIDER | Embedder provider for vector embeddings | aws_bedrock | All modes | +| MEM0_EMBEDDER_MODEL | Embedder model for vector embeddings | amazon.titan-embed-text-v2:0 | All modes | + **Note**: - If `MEM0_API_KEY` is set, the tool will use the Mem0 Platform - If `OPENSEARCH_HOST` is set, the tool will use OpenSearch - If neither is set, the tool will default to FAISS (requires `faiss-cpu` package) - +- LLM configuration applies to all backend modes and allows customization of the language model used for memory processing #### Memory Tool | Environment Variable | Description | Default | diff --git a/src/strands_tools/mem0_memory.py b/src/strands_tools/mem0_memory.py index 5840deaa..2294bf08 100644 --- a/src/strands_tools/mem0_memory.py +++ b/src/strands_tools/mem0_memory.py @@ -150,13 +150,16 @@ class Mem0ServiceClient: """Client for interacting with Mem0 service.""" DEFAULT_CONFIG = { - "embedder": {"provider": "aws_bedrock", "config": {"model": "amazon.titan-embed-text-v2:0"}}, + "embedder": { + "provider": os.environ.get("MEM0_EMBEDDER_PROVIDER", "aws_bedrock"), + "config": {"model": os.environ.get("MEM0_EMBEDDER_MODEL", "amazon.titan-embed-text-v2:0")}, + }, "llm": { - "provider": "aws_bedrock", + "provider": os.environ.get("MEM0_LLM_PROVIDER", "aws_bedrock"), "config": { - "model": "anthropic.claude-3-5-haiku-20241022-v1:0", - "temperature": 0.1, - "max_tokens": 2000, + "model": os.environ.get("MEM0_LLM_MODEL", "anthropic.claude-3-5-haiku-20241022-v1:0"), + "temperature": float(os.environ.get("MEM0_LLM_TEMPERATURE", 0.1)), + "max_tokens": int(os.environ.get("MEM0_LLM_MAX_TOKENS", 2000)), }, }, "vector_store": { diff --git a/tests/test_mem0.py b/tests/test_mem0.py index f9a4210a..a3d4a0a5 100644 --- a/tests/test_mem0.py +++ b/tests/test_mem0.py @@ -54,7 +54,18 @@ def extract_result_text(result): return str(result) -@patch.dict(os.environ, {"OPENSEARCH_HOST": "test.opensearch.amazonaws.com"}) +@patch.dict( + os.environ, + { + "MEM0_LLM_PROVIDER": "openai", + "MEM0_LLM_MODEL": "gpt-4o", + "MEM0_LLM_TEMPERATURE": "0.2", + "MEM0_LLM_MAX_TOKENS": "4000", + "MEM0_EMBEDDER_PROVIDER": "openai", + "MEM0_EMBEDDER_MODEL": "text-embedding-3-large", + "OPENSEARCH_HOST": "test.opensearch.amazonaws.com", + }, +) @patch("strands_tools.mem0_memory.Mem0Memory") @patch("strands_tools.mem0_memory.boto3.Session") def test_store_memory(mock_boto3_session, mock_mem0_memory, mock_tool):