diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 389cb8728103..057fbb3528a7 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -64,18 +64,10 @@ def load_general_plugins(): # see https://github.com/pytorch/pytorch/blob/43c5f59/torch/_dynamo/config.py#L158 torch._dynamo.config.disable = True elif current_platform.is_hpu(): - # NOTE(kzawora): PT HPU lazy backend (PT_HPU_LAZY_MODE = 1) - # does not support torch.compile - # Eager backend (PT_HPU_LAZY_MODE = 0) must be selected for - # torch.compile support - is_lazy = os.environ.get('PT_HPU_LAZY_MODE', '1') == '1' - if is_lazy: + os.environ['PT_HPU_ENABLE_LAZY_COLLECTIVES'] = 'true' + import habana_frameworks.torch as htorch + if htorch.utils.internal.is_lazy(): torch._dynamo.config.disable = True - # NOTE(kzawora) multi-HPU inference with HPUGraphs (lazy-only) - # requires enabling lazy collectives - # see https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_HPU_Graphs.html # noqa: E501 - os.environ['PT_HPU_ENABLE_LAZY_COLLECTIVES'] = 'true' - plugins = load_plugins_by_group(group='vllm.general_plugins') # general plugins, we only need to execute the loaded functions for func in plugins.values():