Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 78 additions & 1 deletion litellm/proxy/management_endpoints/common_utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from typing import Any, Dict, Optional, Union
from typing import Any, Dict, Optional, Union, TYPE_CHECKING

from litellm._logging import verbose_proxy_logger
from litellm.caching import DualCache
from litellm.proxy._types import (
KeyRequestBase,
LiteLLM_ManagementEndpoint_MetadataFields,
Expand All @@ -11,6 +13,9 @@
)
from litellm.proxy.utils import _premium_user_check

if TYPE_CHECKING:
from litellm.proxy.utils import PrismaClient, ProxyLogging


def _user_has_admin_view(user_api_key_dict: UserAPIKeyAuth) -> bool:
return (
Expand All @@ -31,6 +36,78 @@ def _is_user_team_admin(
return False


async def _user_has_admin_privileges(
user_api_key_dict: UserAPIKeyAuth,
prisma_client: Optional["PrismaClient"] = None,
user_api_key_cache: Optional["DualCache"] = None,
proxy_logging_obj: Optional["ProxyLogging"] = None,
) -> bool:
"""
Check if user has admin privileges (proxy admin, team admin, or org admin).

Args:
user_api_key_dict: User API key authentication object
prisma_client: Prisma client for database operations
user_api_key_cache: Cache for user API keys
proxy_logging_obj: Proxy logging object

Returns:
True if user is proxy admin, team admin for any team, or org admin for any organization
"""
# Check if user is proxy admin
if user_api_key_dict.user_role == LitellmUserRoles.PROXY_ADMIN:
return True

# If no database connection, can't check team/org admin status
if prisma_client is None or user_api_key_dict.user_id is None:
return False

# Get user object to check team and org admin status
from litellm.caching import DualCache as DualCacheImport
from litellm.proxy.auth.auth_checks import get_user_object

try:
user_obj = await get_user_object(
user_id=user_api_key_dict.user_id,
prisma_client=prisma_client,
user_api_key_cache=user_api_key_cache or DualCacheImport(),
user_id_upsert=False,
proxy_logging_obj=proxy_logging_obj,
)

if user_obj is None:
return False

# Check if user is org admin for any organization
if user_obj.organization_memberships is not None:
for membership in user_obj.organization_memberships:
if membership.user_role == LitellmUserRoles.ORG_ADMIN.value:
return True

# Check if user is team admin for any team
if user_obj.teams is not None and len(user_obj.teams) > 0:
# Get all teams user is in
teams = await prisma_client.db.litellm_teamtable.find_many(
where={"team_id": {"in": user_obj.teams}}
)

for team in teams:
team_obj = LiteLLM_TeamTable(**team.model_dump())
if _is_user_team_admin(
user_api_key_dict=user_api_key_dict, team_obj=team_obj
):
return True

except Exception as e:
# If there's an error checking, default to False for security
verbose_proxy_logger.debug(
f"Error checking admin privileges for user {user_api_key_dict.user_id}: {e}"
)
return False

return False


def _set_object_metadata_field(
object_data: Union[
LiteLLM_TeamTable,
Expand Down
72 changes: 72 additions & 0 deletions litellm/proxy/proxy_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -5070,6 +5070,7 @@ async def model_list(
only_model_access_groups: Optional[bool] = False,
include_metadata: Optional[bool] = False,
fallback_type: Optional[str] = None,
scope: Optional[str] = None,
):
"""
Use `/model/info` - to get detailed model information, example - pricing, mode, etc.
Expand All @@ -5080,14 +5081,85 @@ async def model_list(
- include_metadata: Include additional metadata in the response with fallback information
- fallback_type: Type of fallbacks to include ("general", "context_window", "content_policy")
Defaults to "general" when include_metadata=true
- scope: Optional scope parameter. Currently only accepts "expand".
When scope=expand is passed, proxy admins, team admins, and org admins
will receive all proxy models as if they are a proxy admin.
"""
global llm_model_list, general_settings, llm_router, prisma_client, user_api_key_cache, proxy_logging_obj

from litellm.proxy.management_endpoints.common_utils import (
_user_has_admin_privileges,
)
from litellm.proxy.utils import (
create_model_info_response,
get_available_models_for_user,
)

# Validate scope parameter if provided
if scope is not None and scope != "expand":
raise HTTPException(
status_code=400,
detail=f"Invalid scope parameter. Only 'expand' is currently supported. Received: {scope}",
)

# Check if scope=expand is requested and user has admin privileges
should_expand_scope = False
if scope == "expand":
should_expand_scope = await _user_has_admin_privileges(
user_api_key_dict=user_api_key_dict,
prisma_client=prisma_client,
user_api_key_cache=user_api_key_cache,
proxy_logging_obj=proxy_logging_obj,
)

# If scope=expand and user has admin privileges, return all proxy models
if should_expand_scope:
# Get all proxy models as if user is a proxy admin
if llm_router is None:
proxy_model_list = []
model_access_groups = {}
else:
proxy_model_list = llm_router.get_model_names()
model_access_groups = llm_router.get_model_access_groups()

# Include model access groups if requested
if include_model_access_groups:
proxy_model_list = list(set(proxy_model_list + list(model_access_groups.keys())))

# Get complete model list including wildcard routes if requested
from litellm.proxy.auth.model_checks import get_complete_model_list

all_models = get_complete_model_list(
key_models=[],
team_models=[],
proxy_model_list=proxy_model_list,
user_model=None,
infer_model_from_keys=False,
return_wildcard_routes=return_wildcard_routes or False,
llm_router=llm_router,
model_access_groups=model_access_groups,
include_model_access_groups=include_model_access_groups or False,
only_model_access_groups=only_model_access_groups or False,
)

# Build response data with all proxy models
model_data = []
for model in all_models:
model_info = create_model_info_response(
model_id=model,
provider="openai",
include_metadata=include_metadata or False,
fallback_type=fallback_type,
llm_router=llm_router,
)
model_data.append(model_info)

return dict(
data=model_data,
object="list",
)

# Otherwise, use the normal behavior (current implementation)
# Get available models for the user
all_models = await get_available_models_for_user(
user_api_key_dict=user_api_key_dict,
Expand Down
Loading
Loading