diff --git a/litellm/llms/vertex_ai/gemini/transformation.py b/litellm/llms/vertex_ai/gemini/transformation.py index 6a5ac92816b..5d397297891 100644 --- a/litellm/llms/vertex_ai/gemini/transformation.py +++ b/litellm/llms/vertex_ai/gemini/transformation.py @@ -533,11 +533,12 @@ def _pop_and_merge_extra_body(data: RequestBody, optional_params: dict) -> None: """Pop extra_body from optional_params and shallow-merge into data, deep-merging dict values.""" extra_body: Optional[dict] = optional_params.pop("extra_body", None) if extra_body is not None: + data_dict: dict = data # type: ignore[assignment] for k, v in extra_body.items(): - if k in data and isinstance(data[k], dict) and isinstance(v, dict): - data[k].update(v) + if k in data_dict and isinstance(data_dict[k], dict) and isinstance(v, dict): + data_dict[k].update(v) else: - data[k] = v + data_dict[k] = v def _transform_request_body( diff --git a/litellm/proxy/_experimental/mcp_server/server.py b/litellm/proxy/_experimental/mcp_server/server.py index ec31652aa54..ba107a9dd10 100644 --- a/litellm/proxy/_experimental/mcp_server/server.py +++ b/litellm/proxy/_experimental/mcp_server/server.py @@ -2029,7 +2029,7 @@ async def handle_streamable_http_mcp( # Inject masked debug headers when client sends x-litellm-mcp-debug: true _debug_headers = MCPDebug.maybe_build_debug_headers( raw_headers=raw_headers, - scope=scope, + scope=dict(scope), mcp_servers=mcp_servers, mcp_auth_header=mcp_auth_header, mcp_server_auth_headers=mcp_server_auth_headers, diff --git a/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py b/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py index fb587975ed5..56b513554a8 100644 --- a/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py +++ b/litellm/proxy/pass_through_endpoints/pass_through_endpoints.py @@ -1193,14 +1193,11 @@ async def endpoint_func( # type: ignore final_query_params.update(query_params) # When a caller (e.g. bedrock_proxy_route) supplies a pre-built # body, use it instead of the body parsed from the raw request. + final_custom_body: Optional[dict] = None if custom_body is not None: final_custom_body = custom_body - else: - final_custom_body = ( - custom_body_data - if isinstance(custom_body_data, dict) or custom_body_data is None - else None - ) + elif isinstance(custom_body_data, dict): + final_custom_body = custom_body_data return await pass_through_request( # type: ignore request=request, diff --git a/litellm/proxy/vector_store_endpoints/endpoints.py b/litellm/proxy/vector_store_endpoints/endpoints.py index 0775e05f4fa..30cabd3eeff 100644 --- a/litellm/proxy/vector_store_endpoints/endpoints.py +++ b/litellm/proxy/vector_store_endpoints/endpoints.py @@ -1,4 +1,4 @@ -from typing import Dict, Optional +from typing import Any, Dict, Optional from fastapi import APIRouter, Depends, HTTPException, Request, Response @@ -230,7 +230,7 @@ async def vector_store_create( ) # Get managed vector stores hook - managed_vector_stores = proxy_logging_obj.get_proxy_hook("managed_vector_stores") + managed_vector_stores: Any = proxy_logging_obj.get_proxy_hook("managed_vector_stores") if managed_vector_stores is None: raise HTTPException( status_code=500, diff --git a/litellm/responses/litellm_completion_transformation/transformation.py b/litellm/responses/litellm_completion_transformation/transformation.py index 08e31c59662..900f56fea26 100644 --- a/litellm/responses/litellm_completion_transformation/transformation.py +++ b/litellm/responses/litellm_completion_transformation/transformation.py @@ -1500,7 +1500,7 @@ def transform_chat_completion_response_to_responses_api_response( previous_response_id=getattr( chat_completion_response, "previous_response_id", None ), - reasoning=Reasoning(), + reasoning=dict(Reasoning()), status=LiteLLMCompletionResponsesConfig._map_chat_completion_finish_reason_to_responses_status( finish_reason ), @@ -1516,7 +1516,7 @@ def transform_chat_completion_response_to_responses_api_response( # Surface provider-specific fields (generic passthrough from any provider) provider_fields = responses_api_response._hidden_params.get("provider_specific_fields") if provider_fields: - responses_api_response.provider_specific_fields = provider_fields + setattr(responses_api_response, "provider_specific_fields", provider_fields) return responses_api_response diff --git a/litellm/types/proxy/guardrails/guardrail_hooks/zscaler_ai_guard.py b/litellm/types/proxy/guardrails/guardrail_hooks/zscaler_ai_guard.py index 7cbdf751e1b..f522f5b470a 100644 --- a/litellm/types/proxy/guardrails/guardrail_hooks/zscaler_ai_guard.py +++ b/litellm/types/proxy/guardrails/guardrail_hooks/zscaler_ai_guard.py @@ -106,6 +106,7 @@ def validate_endpoint_configuration(self) -> "ZscalerAIGuardConfigModel": ) # Check for configuration issues + assert api_base is not None # always set via env default above is_resolve_policy = api_base.endswith("/resolve-and-execute-policy") is_execute_policy = api_base.endswith("/execute-policy") and not is_resolve_policy