From 0778810c4025bf80a3966f3d37cedcb73a327664 Mon Sep 17 00:00:00 2001 From: "Shin (LiteLLM AI)" Date: Sat, 31 Jan 2026 20:48:00 +0000 Subject: [PATCH] fix(test): correct prompt_tokens in test_string_cost_values The test had prompt_tokens=1000 but the sum of token details was 1150 (text=700 + audio=100 + cached=200 + cache_creation=150). This triggered the double-counting detection logic which recalculated text_tokens to 550, causing the assertion to fail. Fixed by setting prompt_tokens=1150 to match the sum of details. --- .../llm_cost_calc/test_llm_cost_calc_utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test_litellm/litellm_core_utils/llm_cost_calc/test_llm_cost_calc_utils.py b/tests/test_litellm/litellm_core_utils/llm_cost_calc/test_llm_cost_calc_utils.py index 5ba78d9eed1..9e70f3e08d5 100644 --- a/tests/test_litellm/litellm_core_utils/llm_cost_calc/test_llm_cost_calc_utils.py +++ b/tests/test_litellm/litellm_core_utils/llm_cost_calc/test_llm_cost_calc_utils.py @@ -313,10 +313,12 @@ def test_string_cost_values(): } # Test usage with various token types + # Note: prompt_tokens must equal sum of details to avoid double-counting adjustment + # text_tokens(700) + audio_tokens(100) + cached_tokens(200) + cache_creation_tokens(150) = 1150 usage = Usage( - prompt_tokens=1000, + prompt_tokens=1150, completion_tokens=500, - total_tokens=1500, + total_tokens=1650, prompt_tokens_details=PromptTokensDetailsWrapper( audio_tokens=100, cached_tokens=200, text_tokens=700, image_tokens=None, cache_creation_tokens=150 ),