@@ -58,6 +58,39 @@ def test_update_config(model, model_id):
5858 assert tru_model_id == exp_model_id
5959
6060
61+ @pytest .mark .parametrize (
62+ "client_args, model_id, expected_model_id" ,
63+ [
64+ ({"use_litellm_proxy" : True }, "openai/gpt-4" , "litellm_proxy/openai/gpt-4" ),
65+ ({"use_litellm_proxy" : False }, "openai/gpt-4" , "openai/gpt-4" ),
66+ ({"use_litellm_proxy" : None }, "openai/gpt-4" , "openai/gpt-4" ),
67+ ({}, "openai/gpt-4" , "openai/gpt-4" ),
68+ (None , "openai/gpt-4" , "openai/gpt-4" ),
69+ ({"use_litellm_proxy" : True }, "litellm_proxy/openai/gpt-4" , "litellm_proxy/openai/gpt-4" ),
70+ ({"use_litellm_proxy" : False }, "litellm_proxy/openai/gpt-4" , "litellm_proxy/openai/gpt-4" ),
71+ ],
72+ )
73+ def test__init__use_litellm_proxy_prefix (client_args , model_id , expected_model_id ):
74+ """Test litellm_proxy prefix behavior for various configurations."""
75+ model = LiteLLMModel (client_args = client_args , model_id = model_id )
76+ assert model .get_config ()["model_id" ] == expected_model_id
77+
78+
79+ @pytest .mark .parametrize (
80+ "client_args, initial_model_id, new_model_id, expected_model_id" ,
81+ [
82+ ({"use_litellm_proxy" : True }, "openai/gpt-4" , "anthropic/claude-3" , "litellm_proxy/anthropic/claude-3" ),
83+ ({"use_litellm_proxy" : False }, "openai/gpt-4" , "anthropic/claude-3" , "anthropic/claude-3" ),
84+ (None , "openai/gpt-4" , "anthropic/claude-3" , "anthropic/claude-3" ),
85+ ],
86+ )
87+ def test_update_config_proxy_prefix (client_args , initial_model_id , new_model_id , expected_model_id ):
88+ """Test that update_config applies proxy prefix correctly."""
89+ model = LiteLLMModel (client_args = client_args , model_id = initial_model_id )
90+ model .update_config (model_id = new_model_id )
91+ assert model .get_config ()["model_id" ] == expected_model_id
92+
93+
6194@pytest .mark .parametrize (
6295 "content, exp_result" ,
6396 [
0 commit comments