diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 4ac4159558f..18589fa009c 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -23676,6 +23676,20 @@ "output_cost_per_token": 6.5e-07, "supports_tool_choice": true }, + "openrouter/moonshotai/kimi-k2.5": { + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 6e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://openrouter.ai/moonshotai/kimi-k2.5", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, "openrouter/nousresearch/nous-hermes-llama2-13b": { "input_cost_per_token": 2e-07, "litellm_provider": "openrouter", diff --git a/tests/test_litellm/test_utils.py b/tests/test_litellm/test_utils.py index f6c24d19df5..6a79fd0823b 100644 --- a/tests/test_litellm/test_utils.py +++ b/tests/test_litellm/test_utils.py @@ -2543,6 +2543,48 @@ def test_model_info_for_vertex_ai_deepseek_model(): print("vertex deepseek model info", model_info) +def test_model_info_for_openrouter_kimi_k2_5(): + """ + Test that openrouter/moonshotai/kimi-k2.5 model info is correctly configured + in model_prices_and_context_window.json. + + Model properties from OpenRouter API: + - context_length: 262144 + - pricing: prompt=$0.0000006, completion=$0.000003, input_cache_read=$0.0000001 + - modality: text+image->text (supports vision) + - supports: tool_choice, tools (function calling) + """ + import json + from pathlib import Path + + # Load directly from the local JSON file + json_path = Path(__file__).parents[2] / "model_prices_and_context_window.json" + with open(json_path) as f: + model_cost = json.load(f) + + model_info = model_cost.get("openrouter/moonshotai/kimi-k2.5") + assert model_info is not None, "Model not found in model_prices_and_context_window.json" + assert model_info["litellm_provider"] == "openrouter" + assert model_info["mode"] == "chat" + + # Verify context window + assert model_info["max_input_tokens"] == 262144 + assert model_info["max_output_tokens"] == 262144 + assert model_info["max_tokens"] == 262144 + + # Verify pricing + assert model_info["input_cost_per_token"] == 6e-07 + assert model_info["output_cost_per_token"] == 3e-06 + assert model_info["cache_read_input_token_cost"] == 1e-07 + + # Verify capabilities + assert model_info["supports_vision"] is True + assert model_info["supports_function_calling"] is True + assert model_info["supports_tool_choice"] is True + + print("openrouter kimi-k2.5 model info", model_info) + + class TestGetValidModelsWithCLI: """Test get_valid_models function as used in CLI token usage"""