-
-
Notifications
You must be signed in to change notification settings - Fork 6.6k
feat(ovhcloud): Add support of responses API #22902
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| """OVHCloud Responses API support""" |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,113 @@ | ||
| """ | ||
| Support for OVHcloud AI Endpoints `/v1/responses` endpoint. | ||
|
|
||
| Our unified API follows the OpenAI standard. | ||
| More information on our website: https://oai.endpoints.kepler.ai.cloud.ovh.net/doc/gpt-oss-20b/openapi.json | ||
| """ | ||
| from typing import Optional | ||
| import litellm | ||
| from litellm._logging import verbose_logger | ||
| from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig | ||
| from litellm.secret_managers.main import get_secret_str | ||
| from litellm.types.router import GenericLiteLLMParams | ||
| from litellm.types.utils import LlmProviders | ||
| from litellm.utils import get_model_info | ||
|
|
||
| class OVHCloudResponsesAPIConfig(OpenAIResponsesAPIConfig): | ||
eliasto marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| """ | ||
| Configuration for OVHCloud AI Endpoints Responses API. | ||
|
|
||
| Inherits from OpenAIResponsesAPIConfig since OVHCloud's Responses API follows | ||
| the OpenAI specification. | ||
|
|
||
| Reference: https://oai.endpoints.kepler.ai.cloud.ovh.net/doc/gpt-oss-20b/openapi.json | ||
| """ | ||
|
|
||
| @property | ||
| def custom_llm_provider(self) -> LlmProviders: | ||
| return LlmProviders.OVHCLOUD | ||
|
|
||
| def get_supported_openai_params(self, model: str) -> list: | ||
| """ | ||
| Get supported OpenAI params, filtering tool-related params for models | ||
| that don't support function calling. | ||
|
|
||
| Details about function calling support can be found here: | ||
| https://help.ovhcloud.com/csm/en-gb-public-cloud-ai-endpoints-function-calling?id=kb_article_view&sysparm_article=KB0071907 | ||
| """ | ||
| supported_params = super().get_supported_openai_params(model) | ||
|
|
||
| supports_function_calling: Optional[bool] = None | ||
| try: | ||
| model_info = get_model_info(model, custom_llm_provider="ovhcloud") | ||
| supports_function_calling = model_info.get( | ||
| "supports_function_calling", False | ||
| ) | ||
| except Exception as e: | ||
| verbose_logger.debug(f"Error getting supported OpenAI params: {e}") | ||
| pass | ||
|
|
||
| if supports_function_calling is not True: | ||
| verbose_logger.debug( | ||
| "You can see our models supporting function_calling in our catalog: https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog/ " | ||
| ) | ||
| # Remove tool-related params for models that don't support function calling | ||
| for param in ("tools", "tool_choice"): | ||
| if param in supported_params: | ||
| supported_params.remove(param) | ||
|
|
||
| return supported_params | ||
|
|
||
| def validate_environment( | ||
| self, headers: dict, model: str, litellm_params: Optional[GenericLiteLLMParams] | ||
| ) -> dict: | ||
| """ | ||
| Validate environment and set up headers for OVHCloud API. | ||
|
|
||
| Uses OVHCLOUD_API_KEY from environment or litellm_params. | ||
| """ | ||
| litellm_params = litellm_params or GenericLiteLLMParams() | ||
| api_key = ( | ||
| litellm_params.api_key | ||
| or litellm.api_key | ||
| or litellm.ovhcloud_key | ||
| or get_secret_str("OVHCLOUD_API_KEY") | ||
| ) | ||
|
|
||
| if not api_key: | ||
| raise ValueError( | ||
| "OVHcloud AI Endpoints API key is required. Set OVHCLOUD_API_KEY environment variable or pass api_key parameter." | ||
| ) | ||
|
|
||
| headers.update( | ||
| { | ||
| "Authorization": f"Bearer {api_key}", | ||
| } | ||
| ) | ||
| return headers | ||
|
|
||
| def get_complete_url( | ||
| self, | ||
| api_base: Optional[str], | ||
| litellm_params: dict, | ||
| ) -> str: | ||
| """ | ||
| Get the complete URL for OVHcloud AI Endpoints Responses API endpoint. | ||
|
|
||
| Returns: | ||
| str: The full URL for the OVHcloud AI Endpoints /v1/responses endpoint | ||
| """ | ||
| api_base = ( | ||
| api_base | ||
| or litellm.api_base | ||
| or get_secret_str("OVHCLOUD_API_BASE") | ||
| or "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1" | ||
| ) | ||
eliasto marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| # Remove trailing slashes | ||
| api_base = api_base.rstrip("/") | ||
|
|
||
| # Avoid double-appending /responses | ||
| if not api_base.endswith("/responses"): | ||
| return f"{api_base}/responses" | ||
| return api_base | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,99 @@ | ||
| """ | ||
| Tests for OVHCloud Responses API transformation | ||
| Tests the OVHCloudResponsesAPIConfig class that handles OVHCloud-specific | ||
| transformations for the Responses API. | ||
| Source: litellm/llms/ovhcloud/responses/transformation.py | ||
| """ | ||
| import os | ||
| import sys | ||
|
|
||
| sys.path.insert(0, os.path.abspath("../../../../..")) | ||
|
|
||
| import pytest | ||
|
|
||
| from litellm.llms.ovhcloud.responses.transformation import OVHCloudResponsesAPIConfig | ||
| from litellm.types.router import GenericLiteLLMParams | ||
| from litellm.types.utils import LlmProviders | ||
| from litellm.utils import ProviderConfigManager | ||
|
|
||
|
|
||
| class TestOVHCloudResponsesAPITransformation: | ||
| """Test OVHCloud Responses API configuration and transformations""" | ||
|
|
||
| def test_ovhcloud_provider_config_registration(self): | ||
| """Test that OVHCloud provider returns OVHCloudResponsesAPIConfig""" | ||
| config = ProviderConfigManager.get_provider_responses_api_config( | ||
| model="ovhcloud/gpt-oss-120b", | ||
| provider=LlmProviders.OVHCLOUD, | ||
| ) | ||
|
|
||
| assert config is not None, "Config should not be None for OVHCloud provider" | ||
| assert isinstance( | ||
| config, OVHCloudResponsesAPIConfig | ||
| ), f"Expected OVHCloudResponsesAPIConfig, got {type(config)}" | ||
| assert ( | ||
| config.custom_llm_provider == LlmProviders.OVHCLOUD | ||
| ), "custom_llm_provider should be OVHCLOUD" | ||
|
|
||
| def test_ovhcloud_responses_endpoint_url(self): | ||
| """Test that get_complete_url returns correct OVHCloud endpoint""" | ||
| config = OVHCloudResponsesAPIConfig() | ||
|
|
||
| # Test with default OVHCloud API base | ||
| url = config.get_complete_url(api_base=None, litellm_params={}) | ||
| assert url == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/responses", f"Expected OVHCloud responses endpoint, got {url}" | ||
|
|
||
| # Test with custom api_base | ||
| custom_url = config.get_complete_url( | ||
| api_base="https://custom.ovhcloud.example.com/v1", | ||
| litellm_params={} | ||
| ) | ||
| assert custom_url == "https://custom.ovhcloud.example.com/v1/responses", f"Expected custom endpoint, got {custom_url}" | ||
|
|
||
| # Test with trailing slash | ||
| url_with_slash = config.get_complete_url( | ||
| api_base="https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/", | ||
| litellm_params={} | ||
| ) | ||
| assert url_with_slash == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/responses", "Should handle trailing slash" | ||
|
|
||
| def test_validate_environment_with_api_key(self): | ||
| """Test that validate_environment sets Authorization header correctly""" | ||
| config = OVHCloudResponsesAPIConfig() | ||
|
|
||
| headers = {} | ||
| litellm_params = GenericLiteLLMParams(api_key="test-api-key-123") | ||
|
|
||
| result = config.validate_environment( | ||
| headers=headers, | ||
| model="ovhcloud/gpt-oss-120b", | ||
| litellm_params=litellm_params | ||
| ) | ||
|
|
||
| assert "Authorization" in result | ||
| assert result["Authorization"] == "Bearer test-api-key-123" | ||
|
|
||
| def test_validate_environment_missing_api_key(self): | ||
| """Test that validate_environment raises error when API key is missing""" | ||
| config = OVHCloudResponsesAPIConfig() | ||
|
|
||
| headers = {} | ||
|
|
||
| with pytest.raises(ValueError, match="OVHcloud AI Endpoints API key is required"): | ||
| config.validate_environment( | ||
| headers=headers, | ||
| model="ovhcloud/gpt-oss-120b", | ||
| litellm_params=None | ||
|
Comment on lines
+73
to
+88
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Test may not raise due to uncleared global state
The fix is to mock/patch the global keys and the secret getter for the duration of this test: from unittest.mock import patch
def test_validate_environment_missing_api_key(self):
"""Test that validate_environment raises error when API key is missing"""
config = OVHCloudResponsesAPIConfig()
headers = {}
with (
patch.object(litellm, "api_key", None),
patch.object(litellm, "ovhcloud_key", None),
patch("litellm.llms.ovhcloud.responses.transformation.get_secret_str", return_value=None),
):
with pytest.raises(ValueError, match="OVHcloud AI Endpoints API key is required"):
config.validate_environment(
headers=headers,
model="ovhcloud/gpt-oss-120b",
litellm_params=None,
) |
||
| ) | ||
|
|
||
| def test_supported_params_includes_openai_params(self): | ||
| """Test that get_supported_openai_params includes standard OpenAI params""" | ||
| config = OVHCloudResponsesAPIConfig() | ||
| supported = config.get_supported_openai_params("ovhcloud/gpt-oss-120b") | ||
|
|
||
| # OVHCloud follows OpenAI spec, so should support standard params | ||
| assert "model" in supported, "model should be supported" | ||
| assert "input" in supported, "input should be supported" | ||
| assert "temperature" in supported, "temperature should be supported" | ||
Uh oh!
There was an error while loading. Please reload this page.