Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions litellm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1449,6 +1449,7 @@ def set_global_gitlab_config(config: Dict[str, Any]) -> None:
from .llms.perplexity.responses.transformation import PerplexityResponsesConfig as PerplexityResponsesConfig
from .llms.databricks.responses.transformation import DatabricksResponsesAPIConfig as DatabricksResponsesAPIConfig
from .llms.openrouter.responses.transformation import OpenRouterResponsesAPIConfig as OpenRouterResponsesAPIConfig
from .llms.ovhcloud.responses.transformation import OVHCloudResponsesAPIConfig as OVHCloudResponsesAPIConfig
from .llms.gemini.interactions.transformation import GoogleAIStudioInteractionsConfig as GoogleAIStudioInteractionsConfig
from .llms.openai.chat.o_series_transformation import OpenAIOSeriesConfig as OpenAIOSeriesConfig, OpenAIOSeriesConfig as OpenAIO1Config
from .llms.anthropic.skills.transformation import AnthropicSkillsConfig as AnthropicSkillsConfig
Expand Down
5 changes: 5 additions & 0 deletions litellm/_lazy_imports_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,7 @@
"PerplexityResponsesConfig",
"DatabricksResponsesAPIConfig",
"OpenRouterResponsesAPIConfig",
"OVHCloudResponsesAPIConfig",
"GoogleAIStudioInteractionsConfig",
"OpenAIOSeriesConfig",
"AnthropicSkillsConfig",
Expand Down Expand Up @@ -930,6 +931,10 @@
".llms.openrouter.responses.transformation",
"OpenRouterResponsesAPIConfig",
),
"OVHCloudResponsesAPIConfig": (
".llms.ovhcloud.responses.transformation",
"OVHCloudResponsesAPIConfig",
),
"GoogleAIStudioInteractionsConfig": (
".llms.gemini.interactions.transformation",
"GoogleAIStudioInteractionsConfig",
Expand Down
1 change: 1 addition & 0 deletions litellm/llms/ovhcloud/responses/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""OVHCloud Responses API support"""
113 changes: 113 additions & 0 deletions litellm/llms/ovhcloud/responses/transformation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
"""
Support for OVHcloud AI Endpoints `/v1/responses` endpoint.

Our unified API follows the OpenAI standard.
More information on our website: https://oai.endpoints.kepler.ai.cloud.ovh.net/doc/gpt-oss-20b/openapi.json
"""
from typing import Optional
import litellm
from litellm._logging import verbose_logger
from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig
from litellm.secret_managers.main import get_secret_str
from litellm.types.router import GenericLiteLLMParams
from litellm.types.utils import LlmProviders
from litellm.utils import get_model_info

class OVHCloudResponsesAPIConfig(OpenAIResponsesAPIConfig):
"""
Configuration for OVHCloud AI Endpoints Responses API.

Inherits from OpenAIResponsesAPIConfig since OVHCloud's Responses API follows
the OpenAI specification.

Reference: https://oai.endpoints.kepler.ai.cloud.ovh.net/doc/gpt-oss-20b/openapi.json
"""

@property
def custom_llm_provider(self) -> LlmProviders:
return LlmProviders.OVHCLOUD

def get_supported_openai_params(self, model: str) -> list:
"""
Get supported OpenAI params, filtering tool-related params for models
that don't support function calling.

Details about function calling support can be found here:
https://help.ovhcloud.com/csm/en-gb-public-cloud-ai-endpoints-function-calling?id=kb_article_view&sysparm_article=KB0071907
"""
supported_params = super().get_supported_openai_params(model)

supports_function_calling: Optional[bool] = None
try:
model_info = get_model_info(model, custom_llm_provider="ovhcloud")
supports_function_calling = model_info.get(
"supports_function_calling", False
)
except Exception as e:
verbose_logger.debug(f"Error getting supported OpenAI params: {e}")
pass

if supports_function_calling is not True:
verbose_logger.debug(
"You can see our models supporting function_calling in our catalog: https://www.ovhcloud.com/en/public-cloud/ai-endpoints/catalog/ "
)
# Remove tool-related params for models that don't support function calling
for param in ("tools", "tool_choice"):
if param in supported_params:
supported_params.remove(param)

return supported_params

def validate_environment(
self, headers: dict, model: str, litellm_params: Optional[GenericLiteLLMParams]
) -> dict:
"""
Validate environment and set up headers for OVHCloud API.

Uses OVHCLOUD_API_KEY from environment or litellm_params.
"""
litellm_params = litellm_params or GenericLiteLLMParams()
api_key = (
litellm_params.api_key
or litellm.api_key
or litellm.ovhcloud_key
or get_secret_str("OVHCLOUD_API_KEY")
)

if not api_key:
raise ValueError(
"OVHcloud AI Endpoints API key is required. Set OVHCLOUD_API_KEY environment variable or pass api_key parameter."
)

headers.update(
{
"Authorization": f"Bearer {api_key}",
}
)
return headers

def get_complete_url(
self,
api_base: Optional[str],
litellm_params: dict,
) -> str:
"""
Get the complete URL for OVHcloud AI Endpoints Responses API endpoint.

Returns:
str: The full URL for the OVHcloud AI Endpoints /v1/responses endpoint
"""
api_base = (
api_base
or litellm.api_base
or get_secret_str("OVHCLOUD_API_BASE")
or "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1"
)

# Remove trailing slashes
api_base = api_base.rstrip("/")

# Avoid double-appending /responses
if not api_base.endswith("/responses"):
return f"{api_base}/responses"
return api_base
2 changes: 2 additions & 0 deletions litellm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -8353,6 +8353,8 @@ def get_provider_responses_api_config(
return litellm.OpenRouterResponsesAPIConfig()
elif litellm.LlmProviders.HOSTED_VLLM == provider:
return litellm.HostedVLLMResponsesAPIConfig()
elif litellm.LlmProviders.OVHCLOUD == provider:
return litellm.OVHCloudResponsesAPIConfig()
return None

@staticmethod
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
"""
Tests for OVHCloud Responses API transformation
Tests the OVHCloudResponsesAPIConfig class that handles OVHCloud-specific
transformations for the Responses API.
Source: litellm/llms/ovhcloud/responses/transformation.py
"""
import os
import sys

sys.path.insert(0, os.path.abspath("../../../../.."))

import pytest

from litellm.llms.ovhcloud.responses.transformation import OVHCloudResponsesAPIConfig
from litellm.types.router import GenericLiteLLMParams
from litellm.types.utils import LlmProviders
from litellm.utils import ProviderConfigManager


class TestOVHCloudResponsesAPITransformation:
"""Test OVHCloud Responses API configuration and transformations"""

def test_ovhcloud_provider_config_registration(self):
"""Test that OVHCloud provider returns OVHCloudResponsesAPIConfig"""
config = ProviderConfigManager.get_provider_responses_api_config(
model="ovhcloud/gpt-oss-120b",
provider=LlmProviders.OVHCLOUD,
)

assert config is not None, "Config should not be None for OVHCloud provider"
assert isinstance(
config, OVHCloudResponsesAPIConfig
), f"Expected OVHCloudResponsesAPIConfig, got {type(config)}"
assert (
config.custom_llm_provider == LlmProviders.OVHCLOUD
), "custom_llm_provider should be OVHCLOUD"

def test_ovhcloud_responses_endpoint_url(self):
"""Test that get_complete_url returns correct OVHCloud endpoint"""
config = OVHCloudResponsesAPIConfig()

# Test with default OVHCloud API base
url = config.get_complete_url(api_base=None, litellm_params={})
assert url == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/responses", f"Expected OVHCloud responses endpoint, got {url}"

# Test with custom api_base
custom_url = config.get_complete_url(
api_base="https://custom.ovhcloud.example.com/v1",
litellm_params={}
)
assert custom_url == "https://custom.ovhcloud.example.com/v1/responses", f"Expected custom endpoint, got {custom_url}"

# Test with trailing slash
url_with_slash = config.get_complete_url(
api_base="https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/",
litellm_params={}
)
assert url_with_slash == "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/responses", "Should handle trailing slash"

def test_validate_environment_with_api_key(self):
"""Test that validate_environment sets Authorization header correctly"""
config = OVHCloudResponsesAPIConfig()

headers = {}
litellm_params = GenericLiteLLMParams(api_key="test-api-key-123")

result = config.validate_environment(
headers=headers,
model="ovhcloud/gpt-oss-120b",
litellm_params=litellm_params
)

assert "Authorization" in result
assert result["Authorization"] == "Bearer test-api-key-123"

def test_validate_environment_missing_api_key(self):
"""Test that validate_environment raises error when API key is missing"""
config = OVHCloudResponsesAPIConfig()

headers = {}

with pytest.raises(ValueError, match="OVHcloud AI Endpoints API key is required"):
config.validate_environment(
headers=headers,
model="ovhcloud/gpt-oss-120b",
litellm_params=None
Comment on lines +73 to +88
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Test may not raise due to uncleared global state

test_validate_environment_missing_api_key relies on litellm.api_key, litellm.ovhcloud_key, and the OVHCLOUD_API_KEY environment variable all being falsy. If any other test in the suite has set litellm.api_key (a common global that many providers fall back to), this test will silently pass through validate_environment without hitting the raise ValueError and pytest will fail with DID NOT RAISE.

The fix is to mock/patch the global keys and the secret getter for the duration of this test:

from unittest.mock import patch

def test_validate_environment_missing_api_key(self):
    """Test that validate_environment raises error when API key is missing"""
    config = OVHCloudResponsesAPIConfig()
    headers = {}

    with (
        patch.object(litellm, "api_key", None),
        patch.object(litellm, "ovhcloud_key", None),
        patch("litellm.llms.ovhcloud.responses.transformation.get_secret_str", return_value=None),
    ):
        with pytest.raises(ValueError, match="OVHcloud AI Endpoints API key is required"):
            config.validate_environment(
                headers=headers,
                model="ovhcloud/gpt-oss-120b",
                litellm_params=None,
            )

)

def test_supported_params_includes_openai_params(self):
"""Test that get_supported_openai_params includes standard OpenAI params"""
config = OVHCloudResponsesAPIConfig()
supported = config.get_supported_openai_params("ovhcloud/gpt-oss-120b")

# OVHCloud follows OpenAI spec, so should support standard params
assert "model" in supported, "model should be supported"
assert "input" in supported, "input should be supported"
assert "temperature" in supported, "temperature should be supported"
Loading