diff --git a/.github/workflows/bot_pr_create.yaml b/.github/workflows/bot_pr_create.yaml index bb956c2a8fd..4bf3fc082ef 100644 --- a/.github/workflows/bot_pr_create.yaml +++ b/.github/workflows/bot_pr_create.yaml @@ -37,7 +37,7 @@ jobs: steps: - name: Get vLLM version run: | - VLLM_COMMIT=15d76f74e2fdb12a95ea00f0ca283acf6219a2b7 + VLLM_COMMIT=4034c3d32e30d01639459edd3ab486f56993876d echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> "$GITHUB_ENV" - name: Checkout repository diff --git a/.github/workflows/dockerfiles/Dockerfile.lint b/.github/workflows/dockerfiles/Dockerfile.lint index a5aee23d769..64068c64939 100644 --- a/.github/workflows/dockerfiles/Dockerfile.lint +++ b/.github/workflows/dockerfiles/Dockerfile.lint @@ -27,7 +27,7 @@ RUN apt-get update -y && \ ARG VLLM_REPO=https://github.com/vllm-project/vllm.git # For lint purpose, actually we need make a main2main matching. -ARG VLLM_COMMIT=15d76f74e2fdb12a95ea00f0ca283acf6219a2b7 +ARG VLLM_COMMIT=4034c3d32e30d01639459edd3ab486f56993876d RUN git clone $VLLM_REPO /vllm-workspace/vllm && \ cd /vllm-workspace/vllm && \ git checkout $VLLM_COMMIT diff --git a/.github/workflows/pr_test_full.yaml b/.github/workflows/pr_test_full.yaml index 2e9f78ced81..ffe53d18759 100644 --- a/.github/workflows/pr_test_full.yaml +++ b/.github/workflows/pr_test_full.yaml @@ -75,7 +75,7 @@ jobs: name: e2e-full strategy: matrix: - vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7, v0.16.0] + vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0] needs: [changes] if: ${{ needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.e2e_tracker == true }} uses: ./.github/workflows/_e2e_test.yaml diff --git a/.github/workflows/pr_test_light.yaml b/.github/workflows/pr_test_light.yaml index a95ffec46d4..d3c9a88148b 100644 --- a/.github/workflows/pr_test_light.yaml +++ b/.github/workflows/pr_test_light.yaml @@ -41,7 +41,7 @@ jobs: lint: uses: ./.github/workflows/_pre_commit.yml with: - vllm: 15d76f74e2fdb12a95ea00f0ca283acf6219a2b7 + vllm: 4034c3d32e30d01639459edd3ab486f56993876d changes: runs-on: linux-aarch64-a2b3-0 outputs: @@ -89,7 +89,7 @@ jobs: if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }} strategy: matrix: - vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7, v0.16.0] + vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0] uses: ./.github/workflows/_unit_test.yaml with: vllm: ${{ matrix.vllm_version }} @@ -101,7 +101,7 @@ jobs: name: e2e-light strategy: matrix: - vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7, v0.16.0] + vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0] # Note (yikun): If CI resource are limited we can split job into two chain jobs needs: [lint, changes] # only trigger e2e test after lint passed and the change is e2e related with pull request. diff --git a/.github/workflows/schedule_codecov_refresh.yaml b/.github/workflows/schedule_codecov_refresh.yaml index e693cf7bd18..dd4f2c8484b 100644 --- a/.github/workflows/schedule_codecov_refresh.yaml +++ b/.github/workflows/schedule_codecov_refresh.yaml @@ -33,7 +33,7 @@ jobs: name: refresh codecov strategy: matrix: - vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7] + vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d] uses: ./.github/workflows/_unit_test.yaml with: vllm: ${{ matrix.vllm_version }} diff --git a/docs/source/community/versioning_policy.md b/docs/source/community/versioning_policy.md index 94b3059b0d3..7c0f5f30d01 100644 --- a/docs/source/community/versioning_policy.md +++ b/docs/source/community/versioning_policy.md @@ -57,7 +57,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL | vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | |-------------|--------------|------------------|-------------|--------------------| -| main | 4572a06afe96d0a6d5d3efacf130c71505dd2bc9, v0.16.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 | +| main | 4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 | ## Release cadence diff --git a/tests/ut/_310p/quantization/test_modelslim_config_310.py b/tests/ut/_310p/quantization/test_modelslim_config_310.py index 7e614e21f83..268b98907b3 100644 --- a/tests/ut/_310p/quantization/test_modelslim_config_310.py +++ b/tests/ut/_310p/quantization/test_modelslim_config_310.py @@ -70,6 +70,7 @@ def test_get_quant_method_for_fused_moe_310(self): fused_moe_layer = MagicMock(spec=FusedMoE) fused_moe_layer.moe = MagicMock(spec=FusedMoEConfig) fused_moe_layer.moe_config = MagicMock(spec=FusedMoEConfig) + fused_moe_layer.moe_config.moe_backend = "auto" fused_moe_layer.moe_config.moe_parallel_config = MagicMock(spec=FusedMoEParallelConfig) fused_moe_layer.moe_config.moe_parallel_config.use_ep = True fused_moe_layer.moe_config.moe_parallel_config.dp_size = 1 diff --git a/tests/ut/distributed/test_communicator.py b/tests/ut/distributed/test_communicator.py index c929741eb71..f968d26ed2e 100644 --- a/tests/ut/distributed/test_communicator.py +++ b/tests/ut/distributed/test_communicator.py @@ -44,7 +44,8 @@ def patched_all_to_all(output_tensor_list, gather_sizes = [2, 2] input_ = torch.tensor([10, 20, 30, 40]) - comm = NPUCommunicator(cpu_group=dist.group.WORLD) + with patch.dict(dist.distributed_c10d._world.pg_map, {dist.group.WORLD: MagicMock()}, clear=False): + comm = NPUCommunicator(cpu_group=dist.group.WORLD) output = comm.all_to_all(input_, scatter_sizes=scatter_sizes, @@ -84,7 +85,8 @@ def patched_all_to_all(output_tensor_list, input_ = torch.tensor([[10, 20], [30, 40]]) - comm = NPUCommunicator(cpu_group=dist.group.WORLD) - output = comm.all_to_all(input_, scatter_dim=0, gather_dim=0) + with patch.dict(dist.distributed_c10d._world.pg_map, {dist.group.WORLD: MagicMock()}, clear=False): + comm = NPUCommunicator(cpu_group=dist.group.WORLD) + output = comm.all_to_all(input_, scatter_dim=0, gather_dim=0) assert output.tolist() == [[10, 20], [50, 60]] diff --git a/vllm_ascend/core/recompute_scheduler.py b/vllm_ascend/core/recompute_scheduler.py index b1aaff7c88a..c4f39f02e78 100644 --- a/vllm_ascend/core/recompute_scheduler.py +++ b/vllm_ascend/core/recompute_scheduler.py @@ -22,7 +22,6 @@ from collections import defaultdict from dataclasses import dataclass, fields -from vllm._bc_linter import bc_linter_include from vllm.config import SchedulerConfig, VllmConfig from vllm.distributed.ec_transfer.ec_connector.base import ECConnectorMetadata from vllm.distributed.kv_events import KVEventBatch @@ -73,7 +72,6 @@ class RecomputeReqInfo: client_index: int = 0 -@bc_linter_include @dataclass class RecomputeSchedulerOutput(SchedulerOutput): recomputed_reqs: list[RecomputeReqInfo] | None = None diff --git a/vllm_ascend/ops/mm_encoder_attention.py b/vllm_ascend/ops/mm_encoder_attention.py index 4122c7f178e..9d497059698 100644 --- a/vllm_ascend/ops/mm_encoder_attention.py +++ b/vllm_ascend/ops/mm_encoder_attention.py @@ -96,6 +96,7 @@ def forward_oot( value: torch.Tensor, cu_seqlens: torch.Tensor | None = None, max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention + sequence_lengths: torch.Tensor | None = None, ): bsz, q_len = query.size()[:2] kv_len = key.size(1) diff --git a/vllm_ascend/patch/__init__.py b/vllm_ascend/patch/__init__.py index 48cabca293f..e811c128882 100644 --- a/vllm_ascend/patch/__init__.py +++ b/vllm_ascend/patch/__init__.py @@ -94,6 +94,20 @@ # Future Plan: # Remove this patch when vLLM merge the PR. # +# ** 6. File: platform/patch_fusion_matcher_compat_ops.py** +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# 1. `torch.ops._C.rms_norm`, `torch.ops._C.fused_add_rms_norm`, +# Why: +# upstream vLLM initializes fusion matcher global operators at import time. +# On Ascend environment these symbols may be absent and cause import failure. +# How: +# inject placeholders only when the symbols are missing so import can continue. +# Related PR (if no, explain why): +# temporary compatibility patch before upstream adjustment is merged. +# Future Plan: +# remove this patch once upstream no longer requires these global symbols or +# provides a backend-safe initialization path. +# # * Worker Patch: # =============== # diff --git a/vllm_ascend/patch/platform/__init__.py b/vllm_ascend/patch/platform/__init__.py index 52d9a74b156..d6397eace9d 100644 --- a/vllm_ascend/patch/platform/__init__.py +++ b/vllm_ascend/patch/platform/__init__.py @@ -17,6 +17,7 @@ import os import vllm_ascend.patch.platform.patch_distributed # noqa +import vllm_ascend.patch.platform.patch_fusion_matcher_compat_ops # noqa import vllm_ascend.patch.platform.patch_mamba_config # noqa import vllm_ascend.patch.platform.patch_sched_yield # noqa diff --git a/vllm_ascend/patch/platform/patch_fusion_matcher_compat_ops.py b/vllm_ascend/patch/platform/patch_fusion_matcher_compat_ops.py new file mode 100644 index 00000000000..6e6bcb75f9b --- /dev/null +++ b/vllm_ascend/patch/platform/patch_fusion_matcher_compat_ops.py @@ -0,0 +1,24 @@ +import torch + + +class _MissingOp: + def __init__(self, op_name: str): + self.op_name = op_name + self.default = self + + def __call__(self, *args, **kwargs): + raise RuntimeError(f"Missing upstream op `{self.op_name}` was invoked.") + + +def _set_missing(namespace, op_name: str, full_name: str) -> None: + if not hasattr(namespace, op_name): + setattr(namespace, op_name, _MissingOp(full_name)) + + +_set_missing(torch.ops._C, "rms_norm", "torch.ops._C.rms_norm") +_set_missing(torch.ops._C, "fused_add_rms_norm", "torch.ops._C.fused_add_rms_norm") +_set_missing(torch.ops._C, "rotary_embedding", "torch.ops._C.rotary_embedding") +_set_missing(torch.ops._C, "static_scaled_fp8_quant", "torch.ops._C.static_scaled_fp8_quant") +_set_missing(torch.ops._C, "dynamic_scaled_fp8_quant", "torch.ops._C.dynamic_scaled_fp8_quant") +_set_missing(torch.ops._C, "dynamic_per_token_scaled_fp8_quant", "torch.ops._C.dynamic_per_token_scaled_fp8_quant") +_set_missing(torch.ops._C, "silu_and_mul", "torch.ops._C.silu_and_mul") diff --git a/vllm_ascend/worker/model_runner_v1.py b/vllm_ascend/worker/model_runner_v1.py index 0bbe13f5a23..923f4fd4db3 100644 --- a/vllm_ascend/worker/model_runner_v1.py +++ b/vllm_ascend/worker/model_runner_v1.py @@ -120,6 +120,7 @@ is_moe_model, lmhead_tp_enable, set_weight_prefetch_method, + vllm_version_is, ) from vllm_ascend.worker.npu_input_batch import NPUInputBatch from vllm_ascend.worker.pcp_utils import PCPManager @@ -1826,16 +1827,26 @@ def _determine_batch_execution_and_padding( has_lora = len(self.input_batch.lora_id_to_lora_request) > 0 if force_has_lora is None else force_has_lora # ruff: noqa: E731 - dispatch_cudagraph = ( - lambda num_tokens, disable_full: self.cudagraph_dispatcher.dispatch( - num_tokens=num_tokens, - has_lora=has_lora, - uniform_decode=uniform_decode, - disable_full=disable_full, - ) - if not force_eager - else (CUDAGraphMode.NONE, BatchDescriptor(num_tokens_padded)) - ) + def dispatch_cudagraph(num_tokens, disable_full=False, valid_modes=None): + if force_eager: + return (CUDAGraphMode.NONE, BatchDescriptor(num_tokens_padded)) + + if vllm_version_is("0.16.0"): + return self.cudagraph_dispatcher.dispatch( + num_tokens=num_tokens, + has_lora=has_lora, + uniform_decode=uniform_decode, + disable_full=disable_full, + ) + else: + return self.cudagraph_dispatcher.dispatch( + num_tokens=num_tokens, + has_lora=has_lora, + uniform_decode=uniform_decode, + valid_modes=valid_modes, + invalid_modes={CUDAGraphMode.FULL} if disable_full else None, + ) + cudagraph_mode, batch_descriptor = dispatch_cudagraph(num_tokens_padded, use_cascade_attn or has_encoder_output) num_tokens_padded = batch_descriptor.num_tokens if enable_sp(self.vllm_config): @@ -1856,10 +1867,16 @@ def _determine_batch_execution_and_padding( dp_rank = self.parallel_config.data_parallel_rank num_tokens_padded = int(num_tokens_across_dp[dp_rank].item()) # Re-dispatch with DP padding - cudagraph_mode, batch_descriptor = dispatch_cudagraph( - num_tokens_padded, - disable_full=synced_cudagraph_mode <= CUDAGraphMode.PIECEWISE.value, - ) + if vllm_version_is("0.16.0"): + cudagraph_mode, batch_descriptor = dispatch_cudagraph( + num_tokens_padded, + disable_full=synced_cudagraph_mode <= CUDAGraphMode.PIECEWISE.value, + ) + else: + cudagraph_mode, batch_descriptor = dispatch_cudagraph( + num_tokens_padded, + valid_modes={CUDAGraphMode(synced_cudagraph_mode)}, + ) # Assert to make sure the agreed upon token count is correct otherwise # num_tokens_across_dp will no-longer be valid assert batch_descriptor.num_tokens == num_tokens_padded diff --git a/vllm_ascend/worker/worker.py b/vllm_ascend/worker/worker.py index d746b1cdde2..b8cbbdbd27a 100644 --- a/vllm_ascend/worker/worker.py +++ b/vllm_ascend/worker/worker.py @@ -430,7 +430,7 @@ def load_model(self) -> None: with context, set_current_vllm_config(self.vllm_config): self.model_runner.load_model() - def compile_or_warm_up_model(self) -> None: + def compile_or_warm_up_model(self) -> float: # Note: need to adapt for graph mode. warmup_sizes = (self.vllm_config.compilation_config.compile_sizes or []).copy() if not self.model_config.enforce_eager: @@ -462,6 +462,7 @@ def compile_or_warm_up_model(self) -> None: # Reset the seed to ensure that the random state is not affected by # the model initialization and profiling. set_random_seed(self.model_config.seed) + return self.vllm_config.compilation_config.compilation_time def _warm_up_atb(self): x = torch.rand((2, 4), dtype=torch.float16).npu()