|  | 
| 1 |  | -# tests/test_reasoning/test_reasoning.py | 
| 2 |  | - | 
| 3 | 1 | from unittest.mock import Mock | 
| 4 | 2 | 
 | 
| 5 | 3 | from mesa_llm.reasoning.reasoning import ( | 
| 6 | 4 |     Observation, | 
| 7 | 5 |     Plan, | 
|  | 6 | +    Reasoning, | 
| 8 | 7 | ) | 
| 9 | 8 | 
 | 
| 10 | 9 | 
 | 
| @@ -39,3 +38,53 @@ def test_plan_creation(self): | 
| 39 | 38 |         assert plan.step == 1 | 
| 40 | 39 |         assert plan.llm_plan == mock_llm_response | 
| 41 | 40 |         assert plan.ttl == 3 | 
|  | 41 | + | 
|  | 42 | + | 
|  | 43 | +class TestReasoningBase: | 
|  | 44 | +    """Tests for the abstract Reasoning base class.""" | 
|  | 45 | + | 
|  | 46 | +    def test_execute_tool_call_generates_plan(self): | 
|  | 47 | +        """Test that the base execute_tool_call method produces a Plan.""" | 
|  | 48 | +        # 1. Setup a mock agent with all necessary components | 
|  | 49 | +        mock_agent = Mock() | 
|  | 50 | +        mock_agent.model.steps = 5 | 
|  | 51 | + | 
|  | 52 | +        # Mock the LLM and its response | 
|  | 53 | +        mock_llm_response = Mock() | 
|  | 54 | +        mock_llm_response.choices = [Mock()] | 
|  | 55 | +        mock_llm_response.choices[0].message = "Final LLM message" | 
|  | 56 | +        mock_agent.llm.generate.return_value = mock_llm_response | 
|  | 57 | + | 
|  | 58 | +        # Mock the Tool Manager | 
|  | 59 | +        mock_agent.tool_manager.get_all_tools_schema.return_value = [ | 
|  | 60 | +            {"schema": "example"} | 
|  | 61 | +        ] | 
|  | 62 | + | 
|  | 63 | +        # 2. Instantiate a concrete implementation of Reasoning to test the base method | 
|  | 64 | +        class ConcreteReasoning(Reasoning): | 
|  | 65 | +            def plan(self, prompt, obs=None, ttl=1, selected_tools=None): | 
|  | 66 | +                pass  # Not needed for this test | 
|  | 67 | + | 
|  | 68 | +        reasoning = ConcreteReasoning(agent=mock_agent) | 
|  | 69 | + | 
|  | 70 | +        # 3. Call the method we want to test | 
|  | 71 | +        chaining_message = "Execute the plan." | 
|  | 72 | +        result_plan = reasoning.execute_tool_call( | 
|  | 73 | +            chaining_message, selected_tools=["tool1"] | 
|  | 74 | +        ) | 
|  | 75 | + | 
|  | 76 | +        # 4. Assert the results | 
|  | 77 | +        # Assert that the LLM was called with the correct parameters | 
|  | 78 | +        mock_agent.llm.generate.assert_called_once_with( | 
|  | 79 | +            prompt=chaining_message, | 
|  | 80 | +            tool_schema=[{"schema": "example"}], | 
|  | 81 | +            tool_choice="required", | 
|  | 82 | +        ) | 
|  | 83 | +        # Assert that the tool manager was asked for the correct schema | 
|  | 84 | +        mock_agent.tool_manager.get_all_tools_schema.assert_called_once_with( | 
|  | 85 | +            selected_tools=["tool1"] | 
|  | 86 | +        ) | 
|  | 87 | +        # Assert that the output is a correctly formed Plan object | 
|  | 88 | +        assert isinstance(result_plan, Plan) | 
|  | 89 | +        assert result_plan.step == 5 | 
|  | 90 | +        assert result_plan.llm_plan == "Final LLM message" | 
0 commit comments