diff --git a/docs/my-website/docs/image_generation.md b/docs/my-website/docs/image_generation.md index b4eaef36521..7f27f48f910 100644 --- a/docs/my-website/docs/image_generation.md +++ b/docs/my-website/docs/image_generation.md @@ -15,7 +15,7 @@ import TabItem from '@theme/TabItem'; | Fallbacks | ✅ | Works between supported models | | Loadbalancing | ✅ | Works between supported models | | Guardrails | ✅ | Applies to input prompts (non-streaming only) | -| Supported Providers | OpenAI, Azure, Google AI Studio, Vertex AI, AWS Bedrock, Recraft, Xinference, Nscale | | +| Supported Providers | OpenAI, Azure, Google AI Studio, Vertex AI, AWS Bedrock, Recraft, OpenRouter, Xinference, Nscale | | ## Quick Start @@ -238,6 +238,27 @@ print(response) See Recraft usage with LiteLLM [here](./providers/recraft.md#image-generation) +## OpenRouter Image Generation Models + +Use this for image generation models available through OpenRouter (e.g., Google Gemini image generation models) + +#### Usage + +```python showLineNumbers +from litellm import image_generation +import os + +os.environ['OPENROUTER_API_KEY'] = "your-api-key" + +response = image_generation( + model="openrouter/google/gemini-2.5-flash-image", + prompt="A beautiful sunset over a calm ocean", + size="1024x1024", + quality="high", +) +print(response) +``` + ## OpenAI Compatible Image Generation Models Use this for calling `/image_generation` endpoints on OpenAI Compatible Servers, example https://github.com/xorbitsai/inference @@ -301,5 +322,6 @@ print(f"response: {response}") | Vertex AI | [Vertex AI Image Generation →](./providers/vertex_image) | | AWS Bedrock | [Bedrock Image Generation →](./providers/bedrock) | | Recraft | [Recraft Image Generation →](./providers/recraft#image-generation) | +| OpenRouter | [OpenRouter Image Generation →](./providers/openrouter#image-generation) | | Xinference | [Xinference Image Generation →](./providers/xinference#image-generation) | | Nscale | [Nscale Image Generation →](./providers/nscale#image-generation) | \ No newline at end of file diff --git a/docs/my-website/docs/providers/openrouter.md b/docs/my-website/docs/providers/openrouter.md index a1ed6c4466e..38eb998c98b 100644 --- a/docs/my-website/docs/providers/openrouter.md +++ b/docs/my-website/docs/providers/openrouter.md @@ -93,3 +93,120 @@ response = embedding( ) print(response) ``` + +## Image Generation + +OpenRouter supports image generation through select models like Google Gemini image generation models. LiteLLM transforms standard image generation requests to OpenRouter's chat completion format. + +### Supported Parameters + +- `size`: Maps to OpenRouter's `aspect_ratio` format + - `1024x1024` → `1:1` (square) + - `1536x1024` → `3:2` (landscape) + - `1024x1536` → `2:3` (portrait) + - `1792x1024` → `16:9` (wide landscape) + - `1024x1792` → `9:16` (tall portrait) + +- `quality`: Maps to OpenRouter's `image_size` format (Gemini models) + - `low` or `standard` → `1K` + - `medium` → `2K` + - `high` or `hd` → `4K` + +- `n`: Number of images to generate + +### Usage + +```python +from litellm import image_generation +import os + +os.environ["OPENROUTER_API_KEY"] = "your-api-key" + +# Basic image generation +response = image_generation( + model="openrouter/google/gemini-2.5-flash-image", + prompt="A beautiful sunset over a calm ocean", +) +print(response) +``` + +### Advanced Usage with Parameters + +```python +from litellm import image_generation +import os + +os.environ["OPENROUTER_API_KEY"] = "your-api-key" + +# Generate high-quality landscape image +response = image_generation( + model="openrouter/google/gemini-2.5-flash-image", + prompt="A serene mountain landscape with a lake", + size="1536x1024", # Landscape format + quality="high", # High quality (4K) +) + +# Access the generated image +image_data = response.data[0] +if image_data.b64_json: + # Base64 encoded image + print(f"Generated base64 image: {image_data.b64_json[:50]}...") +elif image_data.url: + # Image URL + print(f"Generated image URL: {image_data.url}") +``` + +### Using OpenRouter-Specific Parameters + +You can also pass OpenRouter-specific parameters directly using `image_config`: + +```python +from litellm import image_generation +import os + +os.environ["OPENROUTER_API_KEY"] = "your-api-key" + +response = image_generation( + model="openrouter/google/gemini-2.5-flash-image", + prompt="A futuristic cityscape at night", + image_config={ + "aspect_ratio": "16:9", # OpenRouter native format + "image_size": "4K" # OpenRouter native format + } +) +print(response) +``` + +### Response Format + +The response follows the standard LiteLLM ImageResponse format: + +```python +{ + "created": 1703658209, + "data": [{ + "b64_json": "iVBORw0KGgoAAAANSUhEUgAA...", # Base64 encoded image + "url": None, + "revised_prompt": None + }], + "usage": { + "input_tokens": 10, + "output_tokens": 1290, + "total_tokens": 1300 + } +} +``` + +### Cost Tracking + +OpenRouter provides cost information in the response, which LiteLLM automatically tracks: + +```python +response = image_generation( + model="openrouter/google/gemini-2.5-flash-image", + prompt="A cute baby sea otter", +) + +# Cost is available in the response metadata +print(f"Request cost: ${response._hidden_params['additional_headers']['llm_provider-x-litellm-response-cost']}") +``` diff --git a/litellm/images/main.py b/litellm/images/main.py index cf588cbcf0f..1b09c20d350 100644 --- a/litellm/images/main.py +++ b/litellm/images/main.py @@ -404,6 +404,7 @@ def image_generation( # noqa: PLR0915 litellm.LlmProviders.STABILITY, litellm.LlmProviders.RUNWAYML, litellm.LlmProviders.VERTEX_AI, + litellm.LlmProviders.OPENROUTER ): if image_generation_config is None: raise ValueError( diff --git a/litellm/llms/openrouter/image_generation/__init__.py b/litellm/llms/openrouter/image_generation/__init__.py new file mode 100644 index 00000000000..f2d06439d40 --- /dev/null +++ b/litellm/llms/openrouter/image_generation/__init__.py @@ -0,0 +1,13 @@ +from litellm.llms.base_llm.image_generation.transformation import ( + BaseImageGenerationConfig, +) + +from .transformation import OpenRouterImageGenerationConfig + +__all__ = [ + "OpenRouterImageGenerationConfig", +] + + +def get_openrouter_image_generation_config(model: str) -> BaseImageGenerationConfig: + return OpenRouterImageGenerationConfig() \ No newline at end of file diff --git a/litellm/llms/openrouter/image_generation/transformation.py b/litellm/llms/openrouter/image_generation/transformation.py new file mode 100644 index 00000000000..92084b533af --- /dev/null +++ b/litellm/llms/openrouter/image_generation/transformation.py @@ -0,0 +1,414 @@ +""" +OpenRouter Image Generation Support + +OpenRouter provides image generation through chat completion endpoints. +Models like google/gemini-2.5-flash-image return images in the message content. + +Response format: +{ + "choices": [{ + "message": { + "content": "Here is a beautiful sunset for you! ", + "role": "assistant", + "images": [{ + "image_url": {"url": "data:image/png;base64,..."}, + "index": 0, + "type": "image_url" + }] + } + }], + "usage": { + "completion_tokens": 1299, + "prompt_tokens": 6, + "total_tokens": 1305, + "completion_tokens_details": {"image_tokens": 1290}, + "cost": 0.0387243 + } +} +""" + +from typing import TYPE_CHECKING, Any, List, Optional, Union + +import httpx + +import litellm +from litellm.llms.base_llm.chat.transformation import BaseLLMException +from litellm.llms.base_llm.image_generation.transformation import ( + BaseImageGenerationConfig, +) +from litellm.secret_managers.main import get_secret_str +from litellm.types.llms.openai import OpenAIImageGenerationOptionalParams, AllMessageValues +from litellm.types.utils import ImageObject, ImageResponse, ImageUsage, ImageUsageInputTokensDetails +from litellm.llms.openrouter.common_utils import OpenRouterException + + +if TYPE_CHECKING: + from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj +else: + LiteLLMLoggingObj = Any + + +class OpenRouterImageGenerationConfig(BaseImageGenerationConfig): + """ + Configuration for OpenRouter image generation via chat completions. + + OpenRouter uses chat completion endpoints for image generation, + so we need to transform image generation requests to chat format + and extract images from chat responses. + """ + + def get_supported_openai_params( + self, model: str + ) -> List[OpenAIImageGenerationOptionalParams]: + """ + Get supported OpenAI parameters for OpenRouter image generation. + + Since OpenRouter uses chat completions for image generation, + we support standard image generation params. + """ + return [ + "size", + "quality", + "n", + ] + + def map_openai_params( + self, + non_default_params: dict, + optional_params: dict, + model: str, + drop_params: bool, + ) -> dict: + """ + Map image generation params to OpenRouter chat completion format. + + Maps OpenAI parameters to OpenRouter's image_config format: + - size -> image_config.aspect_ratio + - quality -> image_config.image_size + """ + supported_params = self.get_supported_openai_params(model) + + for key, value in non_default_params.items(): + if key in supported_params: + if key == "size": + # Map OpenAI size to OpenRouter aspect_ratio + aspect_ratio = self._map_size_to_aspect_ratio(value) + if "image_config" not in optional_params: + optional_params["image_config"] = {} + optional_params["image_config"]["aspect_ratio"] = aspect_ratio + elif key == "quality": + # Map OpenAI quality to OpenRouter image_size + image_size = self._map_quality_to_image_size(value) + if image_size: + if "image_config" not in optional_params: + optional_params["image_config"] = {} + optional_params["image_config"]["image_size"] = image_size + else: + # Pass through other supported params (like n) + optional_params[key] = value + elif not drop_params: + # If not supported and drop_params is False, pass through + optional_params[key] = value + + return optional_params + + def _map_size_to_aspect_ratio(self, size: str) -> str: + """ + Map OpenAI size format to OpenRouter aspect_ratio format. + + OpenAI sizes: + - 1024x1024 (square) + - 1536x1024 (landscape) + - 1024x1536 (portrait) + - 1792x1024 (wide landscape, dall-e-3) + - 1024x1792 (tall portrait, dall-e-3) + - 256x256, 512x512 (dall-e-2) + - auto (default) + + OpenRouter aspect_ratios: + - 1:1 → 1024×1024 (default) + - 2:3 → 832×1248 + - 3:2 → 1248×832 + - 3:4 → 864×1184 + - 4:3 → 1184×864 + - 4:5 → 896×1152 + - 5:4 → 1152×896 + - 9:16 → 768×1344 + - 16:9 → 1344×768 + - 21:9 → 1536×672 + """ + size_to_aspect_ratio = { + # Square formats + "256x256": "1:1", + "512x512": "1:1", + "1024x1024": "1:1", + # Landscape formats + "1536x1024": "3:2", # 1.5:1 ratio, closest to 3:2 + "1792x1024": "16:9", # 1.75:1 ratio, closest to 16:9 + # Portrait formats + "1024x1536": "2:3", # 0.67:1 ratio, closest to 2:3 + "1024x1792": "9:16", # 0.57:1 ratio, closest to 9:16 + # Default + "auto": "1:1", + } + return size_to_aspect_ratio.get(size, "1:1") + + def _map_quality_to_image_size(self, quality: str) -> Optional[str]: + """ + Map OpenAI quality to OpenRouter image_size format. + + OpenAI quality values: + - auto (default) - automatically select best quality + - high, medium, low - for GPT image models + - hd, standard - for dall-e-3 + + OpenRouter image_size values (Gemini only): + - 1K → Standard resolution (default) + - 2K → Higher resolution + - 4K → Highest resolution + """ + quality_to_image_size = { + # OpenAI quality mappings + "low": "1K", + "standard": "1K", + "medium": "2K", + "high": "4K", + "hd": "4K", + # Auto defaults to standard + "auto": "1K", + } + return quality_to_image_size.get(quality) + + def _set_usage_and_cost( + self, + model_response: ImageResponse, + response_json: dict, + model: str, + ) -> None: + """ + Extract and set usage and cost information from OpenRouter response. + + Args: + model_response: ImageResponse object to populate + response_json: Parsed JSON response from OpenRouter + model: The model name + """ + usage_data = response_json.get("usage", {}) + if usage_data: + prompt_tokens = usage_data.get("prompt_tokens", 0) + total_tokens = usage_data.get("total_tokens", 0) + + completion_tokens_details = usage_data.get("completion_tokens_details", {}) + image_tokens = completion_tokens_details.get("image_tokens", 0) + + model_response.usage = ImageUsage( + input_tokens=prompt_tokens, + input_tokens_details=ImageUsageInputTokensDetails( + image_tokens=0, # Input doesn't contain images for generation + text_tokens=prompt_tokens, + ), + output_tokens=image_tokens, + total_tokens=total_tokens, + ) + + cost = usage_data.get("cost") + if cost is not None: + if not hasattr(model_response, "_hidden_params"): + model_response._hidden_params = {} + if "additional_headers" not in model_response._hidden_params: + model_response._hidden_params["additional_headers"] = {} + model_response._hidden_params["additional_headers"][ + "llm_provider-x-litellm-response-cost" + ] = float(cost) + + cost_details = usage_data.get("cost_details", {}) + if cost_details: + if "response_cost_details" not in model_response._hidden_params: + model_response._hidden_params["response_cost_details"] = {} + model_response._hidden_params["response_cost_details"].update(cost_details) + + model_response._hidden_params["model"] = response_json.get("model", model) + + def get_complete_url( + self, + api_base: Optional[str], + api_key: Optional[str], + model: str, + optional_params: dict, + litellm_params: dict, + stream: Optional[bool] = None, + ) -> str: + """ + Get the complete URL for OpenRouter image generation. + + OpenRouter uses chat completions endpoint for image generation. + Default: https://openrouter.ai/api/v1/chat/completions + """ + if api_base: + if not api_base.endswith("/chat/completions"): + api_base = api_base.rstrip("/") + return f"{api_base}/chat/completions" + return api_base + + return "https://openrouter.ai/api/v1/chat/completions" + + def validate_environment( + self, + headers: dict, + model: str, + messages: List[AllMessageValues], + optional_params: dict, + litellm_params: dict, + api_key: Optional[str] = None, + api_base: Optional[str] = None, + ) -> dict: + api_key = ( + api_key + or litellm.api_key + or get_secret_str("OPENROUTER_API_KEY") + ) + headers.update( + { + "Authorization": f"Bearer {api_key}", + } + ) + return headers + + def transform_image_generation_request( + self, + model: str, + prompt: str, + optional_params: dict, + litellm_params: dict, + headers: dict, + ) -> dict: + """ + Transform image generation request to OpenRouter chat completion format. + + Args: + model: The model name + prompt: The image generation prompt + optional_params: Optional parameters (including image_config) + litellm_params: LiteLLM parameters + headers: Request headers + + Returns: + dict: Request body in chat completion format with image_config + """ + request_body = { + "model": model, + "messages": [ + { + "role": "user", + "content": prompt + } + ] + } + + # These will be passed through to OpenRouter + for key, value in optional_params.items(): + if key not in ["model", "messages", "modalities"]: + request_body[key] = value + + return request_body + + def transform_image_generation_response( + self, + model: str, + raw_response: httpx.Response, + model_response: ImageResponse, + logging_obj: LiteLLMLoggingObj, + request_data: dict, + optional_params: dict, + litellm_params: dict, + encoding: Any, + api_key: Optional[str] = None, + json_mode: Optional[bool] = None, + ) -> ImageResponse: + """ + Transform OpenRouter chat completion response to ImageResponse format. + + Extracts images from the message content and maps usage/cost information. + + Args: + model: The model name + raw_response: Raw HTTP response from OpenRouter + model_response: ImageResponse object to populate + logging_obj: Logging object + request_data: Original request data + optional_params: Optional parameters + litellm_params: LiteLLM parameters + encoding: Encoding + api_key: API key + json_mode: JSON mode flag + + Returns: + ImageResponse: Populated image response + """ + try: + response_json = raw_response.json() + except Exception as e: + raise OpenRouterException( + message=f"Error parsing OpenRouter response: {str(e)}", + status_code=raw_response.status_code, + headers=raw_response.headers, + ) + + if not model_response.data: + model_response.data = [] + + try: + choices = response_json.get("choices", []) + + for choice in choices: + message = choice.get("message", {}) + images = message.get("images", []) + + for image_data in images: + image_url_obj = image_data.get("image_url", {}) + image_url = image_url_obj.get("url") + + if image_url: + if image_url.startswith("data:"): + # Extract base64 data + # Format: data:image/png;base64, + parts = image_url.split(",", 1) + b64_data = parts[1] if len(parts) > 1 else None + + model_response.data.append( + ImageObject( + b64_json=b64_data, + url=None, + revised_prompt=None, + ) + ) + else: + model_response.data.append( + ImageObject( + b64_json=None, + url=image_url, + revised_prompt=None, + ) + ) + + # Extract and set usage and cost information + self._set_usage_and_cost(model_response, response_json, model) + + return model_response + + except Exception as e: + raise OpenRouterException( + message=f"Error transforming OpenRouter image generation response: {str(e)}", + status_code=500, + headers={}, + ) + + def get_error_class( + self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers] + ) -> BaseLLMException: + """Get the appropriate error class for OpenRouter errors.""" + return OpenRouterException( + message=error_message, + status_code=status_code, + headers=headers, + ) diff --git a/litellm/utils.py b/litellm/utils.py index fc8982f382f..d878e252ca8 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8180,6 +8180,12 @@ def get_provider_image_generation_config( ) return get_vertex_ai_image_generation_config(model) + elif LlmProviders.OPENROUTER == provider: + from litellm.llms.openrouter.image_generation import ( + get_openrouter_image_generation_config, + ) + + return get_openrouter_image_generation_config(model) return None @staticmethod diff --git a/tests/test_litellm/llms/openrouter/image_generation/test_openrouter_image_gen_transformation.py b/tests/test_litellm/llms/openrouter/image_generation/test_openrouter_image_gen_transformation.py new file mode 100644 index 00000000000..a247b3c0272 --- /dev/null +++ b/tests/test_litellm/llms/openrouter/image_generation/test_openrouter_image_gen_transformation.py @@ -0,0 +1,573 @@ +import json +import os +import sys +from unittest.mock import MagicMock, patch + +import httpx +import pytest + +sys.path.insert( + 0, os.path.abspath("../../../../..") +) # Adds the parent directory to the system path + +from litellm.llms.openrouter.image_generation.transformation import ( + OpenRouterImageGenerationConfig, +) +from litellm.llms.openrouter.common_utils import OpenRouterException +from litellm.types.utils import ImageResponse + + +class TestOpenRouterImageGenerationTransformation: + def setup_method(self): + """Set up test fixtures before each test method.""" + self.config = OpenRouterImageGenerationConfig() + self.model = "google/gemini-2.5-flash-image" + self.logging_obj = MagicMock() + + def test_get_supported_openai_params(self): + """Test that get_supported_openai_params returns correct parameters.""" + supported_params = self.config.get_supported_openai_params(self.model) + + assert "size" in supported_params + assert "quality" in supported_params + assert "n" in supported_params + assert len(supported_params) == 3 + + def test_map_size_to_aspect_ratio_square(self): + """Test mapping square sizes to aspect ratio.""" + assert self.config._map_size_to_aspect_ratio("256x256") == "1:1" + assert self.config._map_size_to_aspect_ratio("512x512") == "1:1" + assert self.config._map_size_to_aspect_ratio("1024x1024") == "1:1" + + def test_map_size_to_aspect_ratio_landscape(self): + """Test mapping landscape sizes to aspect ratio.""" + assert self.config._map_size_to_aspect_ratio("1536x1024") == "3:2" + assert self.config._map_size_to_aspect_ratio("1792x1024") == "16:9" + + def test_map_size_to_aspect_ratio_portrait(self): + """Test mapping portrait sizes to aspect ratio.""" + assert self.config._map_size_to_aspect_ratio("1024x1536") == "2:3" + assert self.config._map_size_to_aspect_ratio("1024x1792") == "9:16" + + def test_map_size_to_aspect_ratio_auto(self): + """Test mapping auto size to default aspect ratio.""" + assert self.config._map_size_to_aspect_ratio("auto") == "1:1" + + def test_map_size_to_aspect_ratio_unknown(self): + """Test mapping unknown size defaults to 1:1.""" + assert self.config._map_size_to_aspect_ratio("999x999") == "1:1" + + def test_map_quality_to_image_size_low(self): + """Test mapping low quality values to 1K.""" + assert self.config._map_quality_to_image_size("low") == "1K" + assert self.config._map_quality_to_image_size("standard") == "1K" + assert self.config._map_quality_to_image_size("auto") == "1K" + + def test_map_quality_to_image_size_medium(self): + """Test mapping medium quality to 2K.""" + assert self.config._map_quality_to_image_size("medium") == "2K" + + def test_map_quality_to_image_size_high(self): + """Test mapping high quality values to 4K.""" + assert self.config._map_quality_to_image_size("high") == "4K" + assert self.config._map_quality_to_image_size("hd") == "4K" + + def test_map_quality_to_image_size_unknown(self): + """Test mapping unknown quality returns None.""" + assert self.config._map_quality_to_image_size("unknown") is None + + def test_map_openai_params_size_only(self): + """Test that map_openai_params correctly maps size parameter.""" + non_default_params = {"size": "1024x1024"} + optional_params = {} + + result = self.config.map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + model=self.model, + drop_params=False + ) + + assert "image_config" in result + assert result["image_config"]["aspect_ratio"] == "1:1" + + def test_map_openai_params_quality_only(self): + """Test that map_openai_params correctly maps quality parameter.""" + non_default_params = {"quality": "high"} + optional_params = {} + + result = self.config.map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + model=self.model, + drop_params=False + ) + + assert "image_config" in result + assert result["image_config"]["image_size"] == "4K" + + def test_map_openai_params_size_and_quality(self): + """Test that map_openai_params correctly maps both size and quality.""" + non_default_params = { + "size": "1792x1024", + "quality": "hd" + } + optional_params = {} + + result = self.config.map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + model=self.model, + drop_params=False + ) + + assert "image_config" in result + assert result["image_config"]["aspect_ratio"] == "16:9" + assert result["image_config"]["image_size"] == "4K" + + def test_map_openai_params_with_n_parameter(self): + """Test that map_openai_params correctly passes through n parameter.""" + non_default_params = { + "size": "1024x1024", + "n": 2 + } + optional_params = {} + + result = self.config.map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + model=self.model, + drop_params=False + ) + + assert "image_config" in result + assert result["image_config"]["aspect_ratio"] == "1:1" + assert result["n"] == 2 + + def test_map_openai_params_unsupported_param_drop_false(self): + """Test that unsupported params are passed through when drop_params=False.""" + non_default_params = { + "size": "1024x1024", + "unsupported_param": "value" + } + optional_params = {} + + result = self.config.map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + model=self.model, + drop_params=False + ) + + assert "image_config" in result + assert result["unsupported_param"] == "value" + + def test_map_openai_params_unsupported_param_drop_true(self): + """Test that unsupported params are dropped when drop_params=True.""" + non_default_params = { + "size": "1024x1024", + "unsupported_param": "value" + } + optional_params = {} + + result = self.config.map_openai_params( + non_default_params=non_default_params, + optional_params=optional_params, + model=self.model, + drop_params=True + ) + + assert "image_config" in result + assert "unsupported_param" not in result + + def test_get_complete_url_default(self): + """Test that get_complete_url returns default OpenRouter URL.""" + result = self.config.get_complete_url( + api_base=None, + api_key="test_key", + model=self.model, + optional_params={}, + litellm_params={} + ) + + assert result == "https://openrouter.ai/api/v1/chat/completions" + + def test_get_complete_url_with_custom_base(self): + """Test that get_complete_url uses custom api_base.""" + custom_base = "https://custom.openrouter.ai/api/v1" + + result = self.config.get_complete_url( + api_base=custom_base, + api_key="test_key", + model=self.model, + optional_params={}, + litellm_params={} + ) + + assert result == f"{custom_base}/chat/completions" + + def test_get_complete_url_with_base_already_complete(self): + """Test that get_complete_url doesn't duplicate /chat/completions.""" + custom_base = "https://custom.openrouter.ai/api/v1/chat/completions" + + result = self.config.get_complete_url( + api_base=custom_base, + api_key="test_key", + model=self.model, + optional_params={}, + litellm_params={} + ) + + assert result == custom_base + + @patch("litellm.llms.openrouter.image_generation.transformation.get_secret_str") + def test_validate_environment_with_api_key(self, mock_get_secret): + """Test that validate_environment correctly sets authorization header.""" + headers = {} + api_key = "test_api_key" + + result = self.config.validate_environment( + headers=headers, + model=self.model, + messages=[], + optional_params={}, + litellm_params={}, + api_key=api_key + ) + + assert result["Authorization"] == f"Bearer {api_key}" + mock_get_secret.assert_not_called() + + @patch("litellm.llms.openrouter.image_generation.transformation.get_secret_str") + def test_validate_environment_with_secret_key(self, mock_get_secret): + """Test that validate_environment uses secret API key when api_key is None.""" + mock_get_secret.return_value = "secret_api_key" + headers = {} + + result = self.config.validate_environment( + headers=headers, + model=self.model, + messages=[], + optional_params={}, + litellm_params={}, + api_key=None + ) + + assert result["Authorization"] == "Bearer secret_api_key" + mock_get_secret.assert_called_once_with("OPENROUTER_API_KEY") + + def test_transform_image_generation_request_basic(self): + """Test that transform_image_generation_request creates correct request body.""" + prompt = "A beautiful sunset over mountains" + optional_params = {} + + result = self.config.transform_image_generation_request( + model=self.model, + prompt=prompt, + optional_params=optional_params, + litellm_params={}, + headers={} + ) + + assert result["model"] == self.model + assert result["messages"] == [{"role": "user", "content": prompt}] + assert "modalities" not in result # modalities should not be added by default + + def test_transform_image_generation_request_with_image_config(self): + """Test that transform_image_generation_request includes image_config.""" + prompt = "A beautiful sunset" + optional_params = { + "image_config": { + "aspect_ratio": "16:9", + "image_size": "4K" + }, + "n": 2 + } + + result = self.config.transform_image_generation_request( + model=self.model, + prompt=prompt, + optional_params=optional_params, + litellm_params={}, + headers={} + ) + + assert result["model"] == self.model + assert result["messages"] == [{"role": "user", "content": prompt}] + assert result["image_config"]["aspect_ratio"] == "16:9" + assert result["image_config"]["image_size"] == "4K" + assert result["n"] == 2 + + def test_transform_image_generation_response_with_base64_images(self): + """Test that transform_image_generation_response correctly extracts base64 images.""" + response_data = { + "choices": [{ + "message": { + "content": "Here is your image!", + "role": "assistant", + "images": [{ + "image_url": {"url": "data:image/png;base64,iVBORw0KGgoAAAANS"}, + "index": 0, + "type": "image_url" + }] + } + }], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 1300, + "total_tokens": 1310, + "completion_tokens_details": {"image_tokens": 1290}, + "cost": 0.0387243 + }, + "model": "google/gemini-2.5-flash-image" + } + + mock_response = MagicMock() + mock_response.json.return_value = response_data + mock_response.status_code = 200 + mock_response.headers = {} + + model_response = ImageResponse(data=[]) + + result = self.config.transform_image_generation_response( + model=self.model, + raw_response=mock_response, + model_response=model_response, + logging_obj=self.logging_obj, + request_data={}, + optional_params={}, + litellm_params={}, + encoding=None + ) + + assert len(result.data) == 1 + assert result.data[0].b64_json == "iVBORw0KGgoAAAANS" + assert result.data[0].url is None + + def test_transform_image_generation_response_with_url_images(self): + """Test that transform_image_generation_response correctly extracts URL images.""" + response_data = { + "choices": [{ + "message": { + "content": "Here is your image!", + "role": "assistant", + "images": [{ + "image_url": {"url": "https://example.com/image.png"}, + "index": 0, + "type": "image_url" + }] + } + }], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 1300, + "total_tokens": 1310 + }, + "model": "google/gemini-2.5-flash-image" + } + + mock_response = MagicMock() + mock_response.json.return_value = response_data + mock_response.status_code = 200 + mock_response.headers = {} + + model_response = ImageResponse(data=[]) + + result = self.config.transform_image_generation_response( + model=self.model, + raw_response=mock_response, + model_response=model_response, + logging_obj=self.logging_obj, + request_data={}, + optional_params={}, + litellm_params={}, + encoding=None + ) + + assert len(result.data) == 1 + assert result.data[0].url == "https://example.com/image.png" + assert result.data[0].b64_json is None + + def test_transform_image_generation_response_with_usage_and_cost(self): + """Test that transform_image_generation_response correctly extracts usage and cost.""" + response_data = { + "choices": [{ + "message": { + "content": "Here is your image!", + "role": "assistant", + "images": [{ + "image_url": {"url": "data:image/png;base64,abc123"}, + "index": 0, + "type": "image_url" + }] + } + }], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 1300, + "total_tokens": 1310, + "completion_tokens_details": {"image_tokens": 1290}, + "cost": 0.0387243, + "cost_details": {"input_cost": 0.001, "output_cost": 0.037} + }, + "model": "google/gemini-2.5-flash-image" + } + + mock_response = MagicMock() + mock_response.json.return_value = response_data + mock_response.status_code = 200 + mock_response.headers = {} + + model_response = ImageResponse(data=[]) + + result = self.config.transform_image_generation_response( + model=self.model, + raw_response=mock_response, + model_response=model_response, + logging_obj=self.logging_obj, + request_data={}, + optional_params={}, + litellm_params={}, + encoding=None + ) + + # Check usage + assert result.usage is not None + assert result.usage.input_tokens == 10 + assert result.usage.output_tokens == 1290 + assert result.usage.total_tokens == 1310 + assert result.usage.input_tokens_details.text_tokens == 10 + assert result.usage.input_tokens_details.image_tokens == 0 + + # Check cost + assert hasattr(result, "_hidden_params") + assert "additional_headers" in result._hidden_params + assert result._hidden_params["additional_headers"]["llm_provider-x-litellm-response-cost"] == 0.0387243 + + # Check cost details + assert "response_cost_details" in result._hidden_params + assert result._hidden_params["response_cost_details"]["input_cost"] == 0.001 + assert result._hidden_params["response_cost_details"]["output_cost"] == 0.037 + + # Check model + assert result._hidden_params["model"] == "google/gemini-2.5-flash-image" + + def test_transform_image_generation_response_multiple_images(self): + """Test that transform_image_generation_response handles multiple images.""" + response_data = { + "choices": [{ + "message": { + "content": "Here are your images!", + "role": "assistant", + "images": [ + { + "image_url": {"url": "data:image/png;base64,image1data"}, + "index": 0, + "type": "image_url" + }, + { + "image_url": {"url": "data:image/png;base64,image2data"}, + "index": 1, + "type": "image_url" + } + ] + } + }], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 2600, + "total_tokens": 2610 + }, + "model": "google/gemini-2.5-flash-image" + } + + mock_response = MagicMock() + mock_response.json.return_value = response_data + mock_response.status_code = 200 + mock_response.headers = {} + + model_response = ImageResponse(data=[]) + + result = self.config.transform_image_generation_response( + model=self.model, + raw_response=mock_response, + model_response=model_response, + logging_obj=self.logging_obj, + request_data={}, + optional_params={}, + litellm_params={}, + encoding=None + ) + + assert len(result.data) == 2 + assert result.data[0].b64_json == "image1data" + assert result.data[1].b64_json == "image2data" + + def test_transform_image_generation_response_json_error(self): + """Test that transform_image_generation_response raises error on invalid JSON.""" + mock_response = MagicMock() + mock_response.json.side_effect = json.JSONDecodeError("Invalid JSON", "", 0) + mock_response.status_code = 500 + mock_response.headers = {} + + model_response = ImageResponse(data=[]) + + with pytest.raises(OpenRouterException) as exc_info: + self.config.transform_image_generation_response( + model=self.model, + raw_response=mock_response, + model_response=model_response, + logging_obj=self.logging_obj, + request_data={}, + optional_params={}, + litellm_params={}, + encoding=None + ) + + assert "Error parsing OpenRouter response" in str(exc_info.value) + assert exc_info.value.status_code == 500 + + def test_transform_image_generation_response_transformation_error(self): + """Test that transform_image_generation_response handles transformation errors.""" + response_data = { + "choices": [{ + "message": { + "content": "Here is your image!", + "role": "assistant", + "images": "invalid_format" # Invalid format + } + }] + } + + mock_response = MagicMock() + mock_response.json.return_value = response_data + mock_response.status_code = 200 + mock_response.headers = {} + + model_response = ImageResponse(data=[]) + + with pytest.raises(OpenRouterException) as exc_info: + self.config.transform_image_generation_response( + model=self.model, + raw_response=mock_response, + model_response=model_response, + logging_obj=self.logging_obj, + request_data={}, + optional_params={}, + litellm_params={}, + encoding=None + ) + + assert "Error transforming OpenRouter image generation response" in str(exc_info.value) + + def test_get_error_class(self): + """Test that get_error_class returns OpenRouterException.""" + error = self.config.get_error_class( + error_message="Test error", + status_code=400, + headers={"Content-Type": "application/json"} + ) + + assert isinstance(error, OpenRouterException) + assert "Test error" in str(error) + assert error.status_code == 400