diff --git a/autogen/oai/ollama.py b/autogen/oai/ollama.py index cf154f635da9..636d152ae85c 100644 --- a/autogen/oai/ollama.py +++ b/autogen/oai/ollama.py @@ -127,7 +127,7 @@ def parse_params(self, params: Dict[str, Any]) -> Dict[str, Any]: if "num_predict" in params: # Maximum number of tokens to predict, note: -1 is infinite, -2 is fill context, 128 is default - ollama_params["num_predict"] = validate_parameter(params, "num_predict", int, False, 128, None, None) + options_dict["num_predict"] = validate_parameter(params, "num_predict", int, False, 128, None, None) if "repeat_penalty" in params: options_dict["repeat_penalty"] = validate_parameter( @@ -138,15 +138,15 @@ def parse_params(self, params: Dict[str, Any]) -> Dict[str, Any]: options_dict["seed"] = validate_parameter(params, "seed", int, False, 42, None, None) if "temperature" in params: - ollama_params["temperature"] = validate_parameter( + options_dict["temperature"] = validate_parameter( params, "temperature", (int, float), False, 0.8, None, None ) if "top_k" in params: - ollama_params["top_k"] = validate_parameter(params, "top_k", int, False, 40, None, None) + options_dict["top_k"] = validate_parameter(params, "top_k", int, False, 40, None, None) if "top_p" in params: - ollama_params["top_p"] = validate_parameter(params, "top_p", (int, float), False, 0.9, None, None) + options_dict["top_p"] = validate_parameter(params, "top_p", (int, float), False, 0.9, None, None) if self._native_tool_calls and self._tools_in_conversation and not self._should_hide_tools: ollama_params["tools"] = params["tools"] diff --git a/setup.py b/setup.py index fcce7d5e9dcd..65f5cbe8f029 100644 --- a/setup.py +++ b/setup.py @@ -105,7 +105,7 @@ "mistral": ["mistralai>=1.0.1"], "groq": ["groq>=0.9.0"], "cohere": ["cohere>=5.5.8"], - "ollama": ["ollama>=0.3.1", "fix_busted_json>=0.0.18"], + "ollama": ["ollama>=0.3.3", "fix_busted_json>=0.0.18"], "bedrock": ["boto3>=1.34.149"], } diff --git a/test/oai/test_ollama.py b/test/oai/test_ollama.py index 729e1b95d816..5491a04d7bec 100644 --- a/test/oai/test_ollama.py +++ b/test/oai/test_ollama.py @@ -65,13 +65,13 @@ def test_parsing_params(ollama_client): } expected_params = { "model": "llama3.1:8b", - "temperature": 0.8, - "num_predict": 128, - "top_k": 40, - "top_p": 0.9, "options": { "repeat_penalty": 1.1, "seed": 42, + "temperature": 0.8, + "num_predict": 128, + "top_k": 40, + "top_p": 0.9, }, "stream": False, }