Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 54 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -839,6 +839,52 @@ jobs:
paths:
- guardrails_coverage.xml
- guardrails_coverage

google_generate_content_endpoint_testing:
docker:
- image: cimg/python:3.11
auth:
username: ${DOCKERHUB_USERNAME}
password: ${DOCKERHUB_PASSWORD}
working_directory: ~/project

steps:
- checkout
- setup_google_dns
- run:
name: Install Dependencies
command: |
python -m pip install --upgrade pip
python -m pip install -r requirements.txt
pip install "pytest==7.3.1"
pip install "pytest-retry==1.6.3"
pip install "pytest-cov==5.0.0"
pip install "pytest-asyncio==0.21.1"
pip install "respx==0.22.0"
pip install "pydantic==2.10.2"
# Run pytest and generate JUnit XML report
- run:
name: Run tests
command: |
pwd
ls
python -m pytest -vv tests/unified_google_tests --cov=litellm --cov-report=xml -x -s -v --junitxml=test-results/junit.xml --durations=5
no_output_timeout: 120m
- run:
name: Rename the coverage files
command: |
mv coverage.xml google_generate_content_endpoint_coverage.xml
mv .coverage google_generate_content_endpoint_coverage

# Store test results
- store_test_results:
path: test-results
- persist_to_workspace:
root: .
paths:
- google_generate_content_endpoint_coverage.xml
- google_generate_content_endpoint_coverage

llm_responses_api_testing:
docker:
- image: cimg/python:3.11
Expand Down Expand Up @@ -3001,6 +3047,12 @@ workflows:
only:
- main
- /litellm_.*/
- google_generate_content_endpoint_testing:
filters:
branches:
only:
- main
- /litellm_.*/
- llm_responses_api_testing:
filters:
branches:
Expand Down Expand Up @@ -3047,6 +3099,7 @@ workflows:
requires:
- llm_translation_testing
- mcp_testing
- google_generate_content_endpoint_testing
- guardrails_testing
- llm_responses_api_testing
- litellm_mapped_tests
Expand Down Expand Up @@ -3106,6 +3159,7 @@ workflows:
- test_bad_database_url
- llm_translation_testing
- mcp_testing
- google_generate_content_endpoint_testing
- llm_responses_api_testing
- litellm_mapped_tests
- batches_testing
Expand Down
16 changes: 16 additions & 0 deletions litellm/google_genai/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,14 @@
GenerateContentConfigDict,
GenerateContentContentListUnionDict,
GenerateContentResponse,
ToolConfigDict,
)
else:
GenerateContentConfigDict = Any
GenerateContentContentListUnionDict = Any
GenerateContentResponse = Any
ToolConfigDict = Any


####### ENVIRONMENT VARIABLES ###################
# Initialize any necessary instances or variables here
Expand Down Expand Up @@ -83,6 +86,7 @@ def setup_generate_content_call(
config: Optional[GenerateContentConfigDict] = None,
custom_llm_provider: Optional[str] = None,
stream: bool = False,
tools: Optional[ToolConfigDict] = None,
**kwargs,
) -> GenerateContentSetupResult:
"""
Expand Down Expand Up @@ -166,6 +170,7 @@ def setup_generate_content_call(
generate_content_provider_config.transform_generate_content_request(
model=model,
contents=contents,
tools=tools,
generate_content_config_dict=generate_content_config_dict,
)
)
Expand Down Expand Up @@ -200,6 +205,7 @@ async def agenerate_content(
model: str,
contents: GenerateContentContentListUnionDict,
config: Optional[GenerateContentConfigDict] = None,
tools: Optional[ToolConfigDict] = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Optional[Dict[str, Any]] = None,
Expand Down Expand Up @@ -235,6 +241,7 @@ async def agenerate_content(
extra_body=extra_body,
timeout=timeout,
custom_llm_provider=custom_llm_provider,
tools=tools,
**kwargs,
)

Expand Down Expand Up @@ -263,6 +270,7 @@ def generate_content(
model: str,
contents: GenerateContentContentListUnionDict,
config: Optional[GenerateContentConfigDict] = None,
tools: Optional[ToolConfigDict] = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Optional[Dict[str, Any]] = None,
Expand Down Expand Up @@ -296,6 +304,7 @@ def generate_content(
config=config,
custom_llm_provider=custom_llm_provider,
stream=False,
tools=tools,
**kwargs,
)

Expand All @@ -316,6 +325,7 @@ def generate_content(
response = base_llm_http_handler.generate_content_handler(
model=setup_result.model,
contents=contents,
tools=tools,
generate_content_provider_config=setup_result.generate_content_provider_config,
generate_content_config_dict=setup_result.generate_content_config_dict,
custom_llm_provider=setup_result.custom_llm_provider,
Expand Down Expand Up @@ -346,6 +356,7 @@ async def agenerate_content_stream(
model: str,
contents: GenerateContentContentListUnionDict,
config: Optional[GenerateContentConfigDict] = None,
tools: Optional[ToolConfigDict] = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Optional[Dict[str, Any]] = None,
Expand Down Expand Up @@ -377,6 +388,7 @@ async def agenerate_content_stream(
"config": config,
"custom_llm_provider": custom_llm_provider,
"stream": True,
"tools": tools,
**kwargs,
}
)
Expand All @@ -402,6 +414,7 @@ async def agenerate_content_stream(
contents=contents,
generate_content_provider_config=setup_result.generate_content_provider_config,
generate_content_config_dict=setup_result.generate_content_config_dict,
tools=tools,
custom_llm_provider=setup_result.custom_llm_provider,
litellm_params=setup_result.litellm_params,
logging_obj=setup_result.litellm_logging_obj,
Expand Down Expand Up @@ -429,6 +442,7 @@ def generate_content_stream(
model: str,
contents: GenerateContentContentListUnionDict,
config: Optional[GenerateContentConfigDict] = None,
tools: Optional[ToolConfigDict] = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Optional[Dict[str, Any]] = None,
Expand All @@ -454,6 +468,7 @@ def generate_content_stream(
config=config,
custom_llm_provider=custom_llm_provider,
stream=True,
tools=tools,
**kwargs,
)

Expand All @@ -476,6 +491,7 @@ def generate_content_stream(
contents=contents,
generate_content_provider_config=setup_result.generate_content_provider_config,
generate_content_config_dict=setup_result.generate_content_config_dict,
tools=tools,
custom_llm_provider=setup_result.custom_llm_provider,
litellm_params=setup_result.litellm_params,
logging_obj=setup_result.litellm_logging_obj,
Expand Down
4 changes: 4 additions & 0 deletions litellm/llms/base_llm/google_genai/transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,14 @@
GenerateContentConfigDict,
GenerateContentContentListUnionDict,
GenerateContentResponse,
ToolConfigDict,
)
else:
GenerateContentConfigDict = Any
GenerateContentContentListUnionDict = Any
GenerateContentResponse = Any
LiteLLMLoggingObj = Any
ToolConfigDict = Any

from litellm.types.router import GenericLiteLLMParams

Expand Down Expand Up @@ -145,6 +147,7 @@ def transform_generate_content_request(
self,
model: str,
contents: GenerateContentContentListUnionDict,
tools: Optional[ToolConfigDict],
generate_content_config_dict: Dict,
) -> dict:
"""
Expand All @@ -153,6 +156,7 @@ def transform_generate_content_request(
Args:
model: The model name
contents: Input contents
tools: Tools
generate_content_request_params: Request parameters
litellm_params: LiteLLM parameters
headers: Request headers
Expand Down
5 changes: 5 additions & 0 deletions litellm/llms/custom_httpx/llm_http_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -3196,6 +3196,7 @@ def generate_content_handler(
contents: Any,
generate_content_provider_config: BaseGoogleGenAIGenerateContentConfig,
generate_content_config_dict: Dict,
tools: Any,
custom_llm_provider: str,
litellm_params: GenericLiteLLMParams,
logging_obj: LiteLLMLoggingObj,
Expand All @@ -3221,6 +3222,7 @@ def generate_content_handler(
contents=contents,
generate_content_provider_config=generate_content_provider_config,
generate_content_config_dict=generate_content_config_dict,
tools=tools,
custom_llm_provider=custom_llm_provider,
litellm_params=litellm_params,
logging_obj=logging_obj,
Expand Down Expand Up @@ -3256,6 +3258,7 @@ def generate_content_handler(
data = generate_content_provider_config.transform_generate_content_request(
model=model,
contents=contents,
tools=tools,
generate_content_config_dict=generate_content_config_dict,
)

Expand Down Expand Up @@ -3317,6 +3320,7 @@ async def async_generate_content_handler(
contents: Any,
generate_content_provider_config: BaseGoogleGenAIGenerateContentConfig,
generate_content_config_dict: Dict,
tools: Any,
custom_llm_provider: str,
litellm_params: GenericLiteLLMParams,
logging_obj: LiteLLMLoggingObj,
Expand Down Expand Up @@ -3360,6 +3364,7 @@ async def async_generate_content_handler(
data = generate_content_provider_config.transform_generate_content_request(
model=model,
contents=contents,
tools=tools,
generate_content_config_dict=generate_content_config_dict,
)

Expand Down
5 changes: 5 additions & 0 deletions litellm/llms/gemini/google_genai/transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,14 @@
GenerateContentConfigDict,
GenerateContentContentListUnionDict,
GenerateContentResponse,
ToolConfigDict,
)
else:
GenerateContentConfigDict = Any
GenerateContentContentListUnionDict = Any
GenerateContentResponse = Any
ToolConfigDict = Any

from ..common_utils import get_api_key_from_env

class GoogleGenAIConfig(BaseGoogleGenAIGenerateContentConfig, VertexLLM):
Expand Down Expand Up @@ -258,6 +261,7 @@ def transform_generate_content_request(
self,
model: str,
contents: GenerateContentContentListUnionDict,
tools: Optional[ToolConfigDict],
generate_content_config_dict: Dict,
) -> dict:
from litellm.types.google_genai.main import (
Expand All @@ -267,6 +271,7 @@ def transform_generate_content_request(
typed_generate_content_request = GenerateContentRequestDict(
model=model,
contents=contents,
tools=tools,
generationConfig=GenerateContentConfigDict(**generate_content_config_dict),
)

Expand Down
25 changes: 24 additions & 1 deletion litellm/llms/vertex_ai/google_genai/transformation.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,39 @@
"""
Transformation for Calling Google models in their native format.
"""
from typing import Literal
from typing import Literal, Optional, Union

from litellm.llms.gemini.google_genai.transformation import GoogleGenAIConfig
from litellm.types.router import GenericLiteLLMParams


class VertexAIGoogleGenAIConfig(GoogleGenAIConfig):
"""
Configuration for calling Google models in their native format.
"""
HEADER_NAME = "Authorization"
BEARER_PREFIX = "Bearer"

@property
def custom_llm_provider(self) -> Literal["gemini", "vertex_ai"]:
return "vertex_ai"


def validate_environment(
self,
api_key: Optional[str],
headers: Optional[dict],
model: str,
litellm_params: Optional[Union[GenericLiteLLMParams, dict]]
) -> dict:
default_headers = {
"Content-Type": "application/json",
}

if api_key is not None:
default_headers[self.HEADER_NAME] = f"{self.BEARER_PREFIX} {api_key}"
if headers is not None:
default_headers.update(headers)

return default_headers

8 changes: 3 additions & 5 deletions litellm/proxy/proxy_config.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
model_list:
- model_name: openai/*
- model_name: vertex_ai/*
litellm_params:
model: openai/*

model: vertex_ai/*

litellm_settings:
success_callback: ["mlflow"]
failure_callback: ["mlflow"]
callbacks: ["datadog_llm_observability"]
2 changes: 2 additions & 0 deletions litellm/types/google_genai/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@
GenerateContentContentListUnionDict = _genai_types.ContentListUnionDict
GenerateContentConfigDict = _genai_types.GenerateContentConfigDict
GenerateContentRequestParametersDict = _genai_types._GenerateContentParametersDict
ToolConfigDict = _genai_types.ToolConfigDict

class GenerateContentRequestDict(GenerateContentRequestParametersDict): # type: ignore[misc]
generationConfig: Optional[Any]
tools: Optional[ToolConfigDict]


class GenerateContentResponse(GoogleGenAIGenerateContentResponse, BaseLiteLLMOpenAIResponseObject): # type: ignore[misc]
Expand Down
Loading
Loading