From bde97feb451f8bf6c474525791ee5341a83ce56c Mon Sep 17 00:00:00 2001 From: Iman Tabrizian <10105175+tabrizian@users.noreply.github.com> Date: Mon, 18 Aug 2025 17:46:05 -0700 Subject: [PATCH] Fix test_llm_args.py Signed-off-by: Iman Tabrizian <10105175+tabrizian@users.noreply.github.com> --- tests/unittest/llmapi/test_llm_args.py | 31 ++++++++++++-------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/tests/unittest/llmapi/test_llm_args.py b/tests/unittest/llmapi/test_llm_args.py index acb831837cd..66f04946608 100644 --- a/tests/unittest/llmapi/test_llm_args.py +++ b/tests/unittest/llmapi/test_llm_args.py @@ -426,23 +426,20 @@ class TestTorchLlmArgs: @print_traceback_on_error def test_runtime_sizes(self): - llm = TorchLLM( - llama_model_path, - max_beam_width=1, - max_num_tokens=256, - max_seq_len=128, - max_batch_size=8, - ) - - assert llm.args.max_beam_width == 1 - assert llm.args.max_num_tokens == 256 - assert llm.args.max_seq_len == 128 - assert llm.args.max_batch_size == 8 - - assert llm._executor_config.max_beam_width == 1 - assert llm._executor_config.max_num_tokens == 256 - assert llm._executor_config.max_seq_len == 128 - assert llm._executor_config.max_batch_size == 8 + with TorchLLM(llama_model_path, + max_beam_width=1, + max_num_tokens=256, + max_seq_len=128, + max_batch_size=8) as llm: + assert llm.args.max_beam_width == 1 + assert llm.args.max_num_tokens == 256 + assert llm.args.max_seq_len == 128 + assert llm.args.max_batch_size == 8 + + assert llm._executor_config.max_beam_width == 1 + assert llm._executor_config.max_num_tokens == 256 + assert llm._executor_config.max_seq_len == 128 + assert llm._executor_config.max_batch_size == 8 def test_dynamic_setattr(self): with pytest.raises(pydantic_core._pydantic_core.ValidationError):