Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions llama_stack/providers/utils/inference/litellm_openai_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,15 @@ def __init__(
provider_data_api_key_field: str,
openai_compat_api_base: str | None = None,
):
"""
Initialize the LiteLLMOpenAIMixin.

:param model_entries: The model entries to register.
:param api_key_from_config: The API key to use from the config.
:param provider_data_api_key_field: The field in the provider data that contains the API key.
:param litellm_provider_name: The name of the provider, used for model lookups.
:param openai_compat_api_base: The base URL for OpenAI compatibility, or None if not using OpenAI compatibility.
"""
ModelRegistryHelper.__init__(self, model_entries)

self.litellm_provider_name = litellm_provider_name
Expand Down Expand Up @@ -428,3 +437,17 @@ async def batch_chat_completion(
logprobs: LogProbConfig | None = None,
):
raise NotImplementedError("Batch chat completion is not supported for OpenAI Compat")

async def check_model_availability(self, model: str) -> bool:
"""
Check if a specific model is available via LiteLLM for the current
provider (self.litellm_provider_name).

:param model: The model identifier to check.
:return: True if the model is available dynamically, False otherwise.
"""
if self.litellm_provider_name not in litellm.models_by_provider:
logger.error(f"Provider {self.litellm_provider_name} is not registered in litellm.")
return False

return model in litellm.models_by_provider[self.litellm_provider_name]
Loading