From 19ca24c1e9dce259abde157bdecbc8e4ea284ab0 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Thu, 19 Mar 2026 09:55:03 +0000 Subject: [PATCH 01/10] Upgrade vllm commit to 03_19 Signed-off-by: leo-pony --- .github/workflows/bot_pr_create.yaml | 2 +- .github/workflows/dockerfiles/Dockerfile.lint | 2 +- .github/workflows/pr_test_full.yaml | 2 +- .github/workflows/pr_test_light.yaml | 6 +++--- .github/workflows/schedule_codecov_refresh.yaml | 2 +- docs/source/community/versioning_policy.md | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/bot_pr_create.yaml b/.github/workflows/bot_pr_create.yaml index 31d9bbe80ee..fb48e0f8221 100644 --- a/.github/workflows/bot_pr_create.yaml +++ b/.github/workflows/bot_pr_create.yaml @@ -37,7 +37,7 @@ jobs: steps: - name: Get vLLM version run: | - VLLM_COMMIT=8b6325758cce5f9c36d38f2462edbd368b97a07c + VLLM_COMMIT=6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209 echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> "$GITHUB_ENV" - name: Checkout repository diff --git a/.github/workflows/dockerfiles/Dockerfile.lint b/.github/workflows/dockerfiles/Dockerfile.lint index f92e3f3c911..2164e8db37c 100644 --- a/.github/workflows/dockerfiles/Dockerfile.lint +++ b/.github/workflows/dockerfiles/Dockerfile.lint @@ -27,7 +27,7 @@ RUN apt-get update -y && \ ARG VLLM_REPO=https://github.com/vllm-project/vllm.git # For lint purpose, actually we need make a main2main matching. -ARG VLLM_COMMIT=8b6325758cce5f9c36d38f2462edbd368b97a07c +ARG VLLM_COMMIT=6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209 RUN git clone $VLLM_REPO /vllm-workspace/vllm && \ cd /vllm-workspace/vllm && \ git checkout $VLLM_COMMIT diff --git a/.github/workflows/pr_test_full.yaml b/.github/workflows/pr_test_full.yaml index a1c9f9a9bc7..601dafb3de9 100644 --- a/.github/workflows/pr_test_full.yaml +++ b/.github/workflows/pr_test_full.yaml @@ -75,7 +75,7 @@ jobs: name: e2e-full strategy: matrix: - vllm_version: [8b6325758cce5f9c36d38f2462edbd368b97a07c, v0.18.0] + vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.17.0] needs: [changes] if: ${{ needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.e2e_tracker == true }} uses: ./.github/workflows/_e2e_test.yaml diff --git a/.github/workflows/pr_test_light.yaml b/.github/workflows/pr_test_light.yaml index 9ee40f3ab81..afa43c39daf 100644 --- a/.github/workflows/pr_test_light.yaml +++ b/.github/workflows/pr_test_light.yaml @@ -41,7 +41,7 @@ jobs: lint: uses: ./.github/workflows/_pre_commit.yml with: - vllm: 8b6325758cce5f9c36d38f2462edbd368b97a07c + vllm: 6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209 changes: runs-on: linux-aarch64-a2b3-0 outputs: @@ -90,7 +90,7 @@ jobs: if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }} strategy: matrix: - vllm_version: [8b6325758cce5f9c36d38f2462edbd368b97a07c, v0.18.0] + vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.17.0] uses: ./.github/workflows/_unit_test.yaml with: vllm: ${{ matrix.vllm_version }} @@ -102,7 +102,7 @@ jobs: name: e2e-light strategy: matrix: - vllm_version: [8b6325758cce5f9c36d38f2462edbd368b97a07c, v0.18.0] + vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.17.0] # Note (yikun): If CI resource are limited we can split job into two chain jobs needs: [lint, changes] # only trigger e2e test after lint passed and the change is e2e related with pull request. diff --git a/.github/workflows/schedule_codecov_refresh.yaml b/.github/workflows/schedule_codecov_refresh.yaml index fd921458f87..3c4b8e94bc5 100644 --- a/.github/workflows/schedule_codecov_refresh.yaml +++ b/.github/workflows/schedule_codecov_refresh.yaml @@ -33,7 +33,7 @@ jobs: name: refresh codecov strategy: matrix: - vllm_version: [8b6325758cce5f9c36d38f2462edbd368b97a07c] + vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209] uses: ./.github/workflows/_unit_test.yaml with: vllm: ${{ matrix.vllm_version }} diff --git a/docs/source/community/versioning_policy.md b/docs/source/community/versioning_policy.md index a221d657b85..6672a020d57 100644 --- a/docs/source/community/versioning_policy.md +++ b/docs/source/community/versioning_policy.md @@ -59,7 +59,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL | vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | |-------------|--------------|------------------|-------------|--------------------| -| main | 8b6325758cce5f9c36d38f2462edbd368b97a07c, v0.18.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 | +| main | 6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.17.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 | ## Release cadence From 2e9ca1dd7b95cbeeed157622262e3148c877076b Mon Sep 17 00:00:00 2001 From: leo-pony Date: Thu, 19 Mar 2026 09:55:33 +0000 Subject: [PATCH 02/10] Set continue on error to true Signed-off-by: leo-pony --- .github/workflows/_e2e_test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/_e2e_test.yaml b/.github/workflows/_e2e_test.yaml index 475ee2aa1ed..966ad2c408a 100644 --- a/.github/workflows/_e2e_test.yaml +++ b/.github/workflows/_e2e_test.yaml @@ -18,7 +18,7 @@ on: continue_on_error: required: false type: boolean - default: false + default: true env: UV_INDEX_URL: http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple UV_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi From e58b475ba690c4fda58a4093b377340abe232a7a Mon Sep 17 00:00:00 2001 From: leo-pony Date: Thu, 19 Mar 2026 12:47:09 +0000 Subject: [PATCH 03/10] UT StatelessProcessGroup fix. Root cause: Upstream vLLM PR #36330 (elastic_ep: Fix stateless group port races, commit de1a86b7d) refactored StatelessProcessGroup and removed the socket: socket.socket | None field. The socket ownership was moved to a new create_tcp_store() helper instead of being stored as a field on the dataclass. Fix: Removed the now-invalid socket=None keyword argument from the StatelessProcessGroup Signed-off-by: leo-pony --- .../device_communicators/test_pyhccl.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/ut/distributed/device_communicators/test_pyhccl.py b/tests/ut/distributed/device_communicators/test_pyhccl.py index 16eb0954072..5271fc7afc2 100644 --- a/tests/ut/distributed/device_communicators/test_pyhccl.py +++ b/tests/ut/distributed/device_communicators/test_pyhccl.py @@ -6,6 +6,7 @@ from tests.ut.base import TestBase from vllm_ascend.distributed.device_communicators.pyhccl import \ PyHcclCommunicator +from vllm_ascend.utils import ACL_FORMAT_FRACTAL_NZ, vllm_version_is class MockHcclLib: @@ -45,10 +46,15 @@ def test_load_hccl_fail(self): @patch("vllm_ascend.utils.current_stream", return_value=MagicMock(npu_stream=5678)) def test_stateless_group(self, *_): - group = StatelessProcessGroup(rank=3, - world_size=4, - store=None, - socket=None) + if vllm_version_is("0.17.0"): + group = StatelessProcessGroup(rank=3, + world_size=4, + store=None, + socket=None) + else: + group = StatelessProcessGroup(rank=3, + world_size=4, + store=None) comm = PyHcclCommunicator(group=group, device=3) From b35c62c23f28e0fe3775f4efb72755b37752ed91 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Fri, 20 Mar 2026 03:42:08 +0000 Subject: [PATCH 04/10] Root Cause: Upstream commit 0d81a1fe6 ([V0 Deprecation] Deprecate virtual engine #37195) removed the virtual_engine parameter from set_forward_context(), create_forward_context(), and the ForwardContext dataclass. This broke vllm-ascend which was passing virtual_engine in kwargs and accessing forward_context.virtual_engine Signed-off-by: leo-pony --- vllm_ascend/ascend_forward_context.py | 1 - vllm_ascend/ops/mla.py | 2 +- vllm_ascend/patch/worker/patch_qwen3_5.py | 2 +- vllm_ascend/patch/worker/patch_qwen3_next.py | 2 +- vllm_ascend/platform.py | 2 -- 5 files changed, 3 insertions(+), 6 deletions(-) diff --git a/vllm_ascend/ascend_forward_context.py b/vllm_ascend/ascend_forward_context.py index 243dede6f26..f7d145c5fd3 100644 --- a/vllm_ascend/ascend_forward_context.py +++ b/vllm_ascend/ascend_forward_context.py @@ -53,7 +53,6 @@ def set_ascend_forward_context( forward_context_kwargs = { "attn_metadata": attn_metadata, "vllm_config": vllm_config, - "virtual_engine": virtual_engine, "num_tokens": num_tokens, "num_tokens_across_dp": num_tokens_across_dp, "cudagraph_runtime_mode": aclgraph_runtime_mode, diff --git a/vllm_ascend/ops/mla.py b/vllm_ascend/ops/mla.py index 4da2507efae..17a5858aa68 100644 --- a/vllm_ascend/ops/mla.py +++ b/vllm_ascend/ops/mla.py @@ -183,7 +183,7 @@ def mla_forward( attn_metadata = forward_context.attn_metadata[self.mla_attn.layer_name] else: attn_metadata = forward_context.attn_metadata - kv_cache = self.mla_attn.kv_cache[forward_context.virtual_engine] + kv_cache = self.mla_attn.kv_cache[0] self.mla_attn.impl.forward( self.mla_attn.layer_name, hidden_states, kv_cache, attn_metadata, need_gather_q_kv, output ) diff --git a/vllm_ascend/patch/worker/patch_qwen3_5.py b/vllm_ascend/patch/worker/patch_qwen3_5.py index 0aa9bce6d8a..19adf6a9c61 100644 --- a/vllm_ascend/patch/worker/patch_qwen3_5.py +++ b/vllm_ascend/patch/worker/patch_qwen3_5.py @@ -66,7 +66,7 @@ def _forward_core( non_spec_token_indx = attn_metadata.non_spec_token_indx spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501 non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501 - self_kv_cache = self.kv_cache[forward_context.virtual_engine] + self_kv_cache = self.kv_cache[0] conv_state = self_kv_cache[0].transpose(-1, -2) ssm_state = self_kv_cache[1] num_actual_tokens = attn_metadata.num_actual_tokens diff --git a/vllm_ascend/patch/worker/patch_qwen3_next.py b/vllm_ascend/patch/worker/patch_qwen3_next.py index d458aefc02f..5f2021dbe8b 100644 --- a/vllm_ascend/patch/worker/patch_qwen3_next.py +++ b/vllm_ascend/patch/worker/patch_qwen3_next.py @@ -124,7 +124,7 @@ def _forward_core( non_spec_token_indx = attn_metadata.non_spec_token_indx spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501 non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501 - self_kv_cache = self.kv_cache[forward_context.virtual_engine] + self_kv_cache = self.kv_cache[0] conv_state = self_kv_cache[0].transpose(-1, -2) ssm_state = self_kv_cache[1] num_actual_tokens = attn_metadata.num_actual_tokens diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index bf460a83126..fc9c63fccfe 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -551,7 +551,6 @@ def set_additional_forward_context( attn_metadata: dict[str, Any], vllm_config: VllmConfig, dp_metadata, - virtual_engine: int = 0, num_tokens: int = 0, num_tokens_across_dp: torch.Tensor | None = None, cudagraph_runtime_mode=None, @@ -565,7 +564,6 @@ def set_additional_forward_context( vllm_config (VllmConfig): configuration of vllm. dp_metadata (Dpmetadata): metadata for data parallelism. lack of typehint because of circular import. - virtual_engine (int, optional): index of virtual engine. Defaults to 0. num_tokens (int | None, optional): number of tokens. Defaults to None. num_tokens_across_dp (torch.Tensor | None, optional): number of tokens across data parallelism.Defaults to None. From 68c6eb2434e6df4d54a363b681f1ffbf006d23d4 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Fri, 20 Mar 2026 07:14:30 +0000 Subject: [PATCH 05/10] compatible with v0.17.0 for virtual_engine args remove Signed-off-by: leo-pony --- vllm_ascend/ascend_forward_context.py | 3 +++ vllm_ascend/ops/mla.py | 2 +- vllm_ascend/patch/worker/patch_qwen3_5.py | 4 ++-- vllm_ascend/patch/worker/patch_qwen3_next.py | 4 ++-- vllm_ascend/platform.py | 2 ++ 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/vllm_ascend/ascend_forward_context.py b/vllm_ascend/ascend_forward_context.py index f7d145c5fd3..c201695fcba 100644 --- a/vllm_ascend/ascend_forward_context.py +++ b/vllm_ascend/ascend_forward_context.py @@ -19,6 +19,7 @@ is_drafter_moe_model, is_moe_model, speculative_enable_dispatch_gmm_combine_decode, + vllm_version_is, ) @@ -59,6 +60,8 @@ def set_ascend_forward_context( "batch_descriptor": batch_descriptor, "skip_compiled": skip_compiled, } + if vllm_version_is("0.17.0"): + forward_context_kwargs["virtual_engine"] = virtual_engine with set_forward_context(**forward_context_kwargs): forward_context = get_forward_context() diff --git a/vllm_ascend/ops/mla.py b/vllm_ascend/ops/mla.py index 17a5858aa68..a9799dae996 100644 --- a/vllm_ascend/ops/mla.py +++ b/vllm_ascend/ops/mla.py @@ -183,7 +183,7 @@ def mla_forward( attn_metadata = forward_context.attn_metadata[self.mla_attn.layer_name] else: attn_metadata = forward_context.attn_metadata - kv_cache = self.mla_attn.kv_cache[0] + kv_cache = self.mla_attn.kv_cache[forward_context.virtual_engine if vllm_version_is("0.17.0") else 0] self.mla_attn.impl.forward( self.mla_attn.layer_name, hidden_states, kv_cache, attn_metadata, need_gather_q_kv, output ) diff --git a/vllm_ascend/patch/worker/patch_qwen3_5.py b/vllm_ascend/patch/worker/patch_qwen3_5.py index 19adf6a9c61..9e9923906d6 100644 --- a/vllm_ascend/patch/worker/patch_qwen3_5.py +++ b/vllm_ascend/patch/worker/patch_qwen3_5.py @@ -29,7 +29,7 @@ from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector from vllm_ascend.ops.triton.fla.sigmoid_gating import fused_sigmoid_gating_delta_rule_update from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch -from vllm_ascend.utils import enable_sp +from vllm_ascend.utils import enable_sp, vllm_version_is class AscendQwen3_5GatedDeltaNet(Qwen3_5GatedDeltaNet): @@ -66,7 +66,7 @@ def _forward_core( non_spec_token_indx = attn_metadata.non_spec_token_indx spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501 non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501 - self_kv_cache = self.kv_cache[0] + self_kv_cache = self.kv_cache[forward_context.virtual_engine if vllm_version_is("0.17.0") else 0] conv_state = self_kv_cache[0].transpose(-1, -2) ssm_state = self_kv_cache[1] num_actual_tokens = attn_metadata.num_actual_tokens diff --git a/vllm_ascend/patch/worker/patch_qwen3_next.py b/vllm_ascend/patch/worker/patch_qwen3_next.py index 5f2021dbe8b..876082cc590 100644 --- a/vllm_ascend/patch/worker/patch_qwen3_next.py +++ b/vllm_ascend/patch/worker/patch_qwen3_next.py @@ -32,7 +32,7 @@ from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector from vllm_ascend.ops.triton.fla.fused_qkvzba_split_reshape import fused_qkvzba_split_reshape_cat from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch -from vllm_ascend.utils import enable_sp +from vllm_ascend.utils import enable_sp, vllm_version_is class AscendQwen3Next_GatedDeltaNet(Qwen3NextGatedDeltaNet): @@ -124,7 +124,7 @@ def _forward_core( non_spec_token_indx = attn_metadata.non_spec_token_indx spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501 non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501 - self_kv_cache = self.kv_cache[0] + self_kv_cache = self.kv_cache[forward_context.virtual_engine if vllm_version_is("0.17.0") else 0] conv_state = self_kv_cache[0].transpose(-1, -2) ssm_state = self_kv_cache[1] num_actual_tokens = attn_metadata.num_actual_tokens diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index fc9c63fccfe..179e6a7edcc 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -551,6 +551,7 @@ def set_additional_forward_context( attn_metadata: dict[str, Any], vllm_config: VllmConfig, dp_metadata, + virtual_engine: int = 0, # ToDo:: Remove me when upgrade to vllm 0.18.0 from 0.17.0 num_tokens: int = 0, num_tokens_across_dp: torch.Tensor | None = None, cudagraph_runtime_mode=None, @@ -564,6 +565,7 @@ def set_additional_forward_context( vllm_config (VllmConfig): configuration of vllm. dp_metadata (Dpmetadata): metadata for data parallelism. lack of typehint because of circular import. + virtual_engine (int, optional): index of virtual engine. Defaults to 0. num_tokens (int | None, optional): number of tokens. Defaults to None. num_tokens_across_dp (torch.Tensor | None, optional): number of tokens across data parallelism.Defaults to None. From 4f70ceff3f71afe58de201185265f9b066fee7d0 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Fri, 20 Mar 2026 07:18:24 +0000 Subject: [PATCH 06/10] recover continue-on-error to false Signed-off-by: leo-pony --- .github/workflows/_e2e_test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/_e2e_test.yaml b/.github/workflows/_e2e_test.yaml index 966ad2c408a..475ee2aa1ed 100644 --- a/.github/workflows/_e2e_test.yaml +++ b/.github/workflows/_e2e_test.yaml @@ -18,7 +18,7 @@ on: continue_on_error: required: false type: boolean - default: true + default: false env: UV_INDEX_URL: http://cache-service.nginx-pypi-cache.svc.cluster.local/pypi/simple UV_EXTRA_INDEX_URL: https://mirrors.huaweicloud.com/ascend/repos/pypi From c104b7a59ca9ce9d9c198e4d61d1d71df82f8e83 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Mon, 23 Mar 2026 03:17:52 +0000 Subject: [PATCH 07/10] remove vllm 0.17.0 compatible Signed-off-by: leo-pony --- .../distributed/device_communicators/test_pyhccl.py | 13 +++---------- vllm_ascend/ascend_forward_context.py | 3 --- vllm_ascend/ops/mla.py | 2 +- vllm_ascend/patch/worker/patch_qwen3_5.py | 4 ++-- vllm_ascend/patch/worker/patch_qwen3_next.py | 4 ++-- vllm_ascend/platform.py | 2 -- 6 files changed, 8 insertions(+), 20 deletions(-) diff --git a/tests/ut/distributed/device_communicators/test_pyhccl.py b/tests/ut/distributed/device_communicators/test_pyhccl.py index 5271fc7afc2..2ef82daec5f 100644 --- a/tests/ut/distributed/device_communicators/test_pyhccl.py +++ b/tests/ut/distributed/device_communicators/test_pyhccl.py @@ -6,7 +6,6 @@ from tests.ut.base import TestBase from vllm_ascend.distributed.device_communicators.pyhccl import \ PyHcclCommunicator -from vllm_ascend.utils import ACL_FORMAT_FRACTAL_NZ, vllm_version_is class MockHcclLib: @@ -46,15 +45,9 @@ def test_load_hccl_fail(self): @patch("vllm_ascend.utils.current_stream", return_value=MagicMock(npu_stream=5678)) def test_stateless_group(self, *_): - if vllm_version_is("0.17.0"): - group = StatelessProcessGroup(rank=3, - world_size=4, - store=None, - socket=None) - else: - group = StatelessProcessGroup(rank=3, - world_size=4, - store=None) + group = StatelessProcessGroup(rank=3, + world_size=4, + store=None) comm = PyHcclCommunicator(group=group, device=3) diff --git a/vllm_ascend/ascend_forward_context.py b/vllm_ascend/ascend_forward_context.py index c201695fcba..f7d145c5fd3 100644 --- a/vllm_ascend/ascend_forward_context.py +++ b/vllm_ascend/ascend_forward_context.py @@ -19,7 +19,6 @@ is_drafter_moe_model, is_moe_model, speculative_enable_dispatch_gmm_combine_decode, - vllm_version_is, ) @@ -60,8 +59,6 @@ def set_ascend_forward_context( "batch_descriptor": batch_descriptor, "skip_compiled": skip_compiled, } - if vllm_version_is("0.17.0"): - forward_context_kwargs["virtual_engine"] = virtual_engine with set_forward_context(**forward_context_kwargs): forward_context = get_forward_context() diff --git a/vllm_ascend/ops/mla.py b/vllm_ascend/ops/mla.py index a9799dae996..17a5858aa68 100644 --- a/vllm_ascend/ops/mla.py +++ b/vllm_ascend/ops/mla.py @@ -183,7 +183,7 @@ def mla_forward( attn_metadata = forward_context.attn_metadata[self.mla_attn.layer_name] else: attn_metadata = forward_context.attn_metadata - kv_cache = self.mla_attn.kv_cache[forward_context.virtual_engine if vllm_version_is("0.17.0") else 0] + kv_cache = self.mla_attn.kv_cache[0] self.mla_attn.impl.forward( self.mla_attn.layer_name, hidden_states, kv_cache, attn_metadata, need_gather_q_kv, output ) diff --git a/vllm_ascend/patch/worker/patch_qwen3_5.py b/vllm_ascend/patch/worker/patch_qwen3_5.py index 9e9923906d6..19adf6a9c61 100644 --- a/vllm_ascend/patch/worker/patch_qwen3_5.py +++ b/vllm_ascend/patch/worker/patch_qwen3_5.py @@ -29,7 +29,7 @@ from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector from vllm_ascend.ops.triton.fla.sigmoid_gating import fused_sigmoid_gating_delta_rule_update from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch -from vllm_ascend.utils import enable_sp, vllm_version_is +from vllm_ascend.utils import enable_sp class AscendQwen3_5GatedDeltaNet(Qwen3_5GatedDeltaNet): @@ -66,7 +66,7 @@ def _forward_core( non_spec_token_indx = attn_metadata.non_spec_token_indx spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501 non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501 - self_kv_cache = self.kv_cache[forward_context.virtual_engine if vllm_version_is("0.17.0") else 0] + self_kv_cache = self.kv_cache[0] conv_state = self_kv_cache[0].transpose(-1, -2) ssm_state = self_kv_cache[1] num_actual_tokens = attn_metadata.num_actual_tokens diff --git a/vllm_ascend/patch/worker/patch_qwen3_next.py b/vllm_ascend/patch/worker/patch_qwen3_next.py index 876082cc590..5f2021dbe8b 100644 --- a/vllm_ascend/patch/worker/patch_qwen3_next.py +++ b/vllm_ascend/patch/worker/patch_qwen3_next.py @@ -32,7 +32,7 @@ from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector from vllm_ascend.ops.triton.fla.fused_qkvzba_split_reshape import fused_qkvzba_split_reshape_cat from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch -from vllm_ascend.utils import enable_sp, vllm_version_is +from vllm_ascend.utils import enable_sp class AscendQwen3Next_GatedDeltaNet(Qwen3NextGatedDeltaNet): @@ -124,7 +124,7 @@ def _forward_core( non_spec_token_indx = attn_metadata.non_spec_token_indx spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501 non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501 - self_kv_cache = self.kv_cache[forward_context.virtual_engine if vllm_version_is("0.17.0") else 0] + self_kv_cache = self.kv_cache[0] conv_state = self_kv_cache[0].transpose(-1, -2) ssm_state = self_kv_cache[1] num_actual_tokens = attn_metadata.num_actual_tokens diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index 179e6a7edcc..fc9c63fccfe 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -551,7 +551,6 @@ def set_additional_forward_context( attn_metadata: dict[str, Any], vllm_config: VllmConfig, dp_metadata, - virtual_engine: int = 0, # ToDo:: Remove me when upgrade to vllm 0.18.0 from 0.17.0 num_tokens: int = 0, num_tokens_across_dp: torch.Tensor | None = None, cudagraph_runtime_mode=None, @@ -565,7 +564,6 @@ def set_additional_forward_context( vllm_config (VllmConfig): configuration of vllm. dp_metadata (Dpmetadata): metadata for data parallelism. lack of typehint because of circular import. - virtual_engine (int, optional): index of virtual engine. Defaults to 0. num_tokens (int | None, optional): number of tokens. Defaults to None. num_tokens_across_dp (torch.Tensor | None, optional): number of tokens across data parallelism.Defaults to None. From 5cff57aa85575d422df2ebc0b295338d136cbb4d Mon Sep 17 00:00:00 2001 From: leo-pony Date: Mon, 23 Mar 2026 03:24:59 +0000 Subject: [PATCH 08/10] fix error vllm version for ci Signed-off-by: leo-pony --- .github/workflows/pr_test_full.yaml | 2 +- .github/workflows/pr_test_light.yaml | 4 ++-- docs/source/community/versioning_policy.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pr_test_full.yaml b/.github/workflows/pr_test_full.yaml index 601dafb3de9..deca948dcf1 100644 --- a/.github/workflows/pr_test_full.yaml +++ b/.github/workflows/pr_test_full.yaml @@ -75,7 +75,7 @@ jobs: name: e2e-full strategy: matrix: - vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.17.0] + vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.18.0] needs: [changes] if: ${{ needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.e2e_tracker == true }} uses: ./.github/workflows/_e2e_test.yaml diff --git a/.github/workflows/pr_test_light.yaml b/.github/workflows/pr_test_light.yaml index afa43c39daf..3e705778f8b 100644 --- a/.github/workflows/pr_test_light.yaml +++ b/.github/workflows/pr_test_light.yaml @@ -90,7 +90,7 @@ jobs: if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }} strategy: matrix: - vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.17.0] + vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.18.0] uses: ./.github/workflows/_unit_test.yaml with: vllm: ${{ matrix.vllm_version }} @@ -102,7 +102,7 @@ jobs: name: e2e-light strategy: matrix: - vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.17.0] + vllm_version: [6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.18.0] # Note (yikun): If CI resource are limited we can split job into two chain jobs needs: [lint, changes] # only trigger e2e test after lint passed and the change is e2e related with pull request. diff --git a/docs/source/community/versioning_policy.md b/docs/source/community/versioning_policy.md index 6672a020d57..49b549581c7 100644 --- a/docs/source/community/versioning_policy.md +++ b/docs/source/community/versioning_policy.md @@ -59,7 +59,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL | vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | |-------------|--------------|------------------|-------------|--------------------| -| main | 6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.17.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 | +| main | 6a9cceb219fcbd6b1eb540ddfdc77ec160f0e209, v0.18.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 | ## Release cadence From 137395d406c319124f8ddf2d7d5d392c79995776 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Mon, 23 Mar 2026 03:50:08 +0000 Subject: [PATCH 09/10] add vllm 0.18.0 compatible support Signed-off-by: leo-pony --- .../distributed/device_communicators/test_pyhccl.py | 13 ++++++++++--- vllm_ascend/ascend_forward_context.py | 3 +++ vllm_ascend/ops/mla.py | 2 +- vllm_ascend/patch/worker/patch_qwen3_5.py | 4 ++-- vllm_ascend/patch/worker/patch_qwen3_next.py | 4 ++-- vllm_ascend/platform.py | 2 ++ 6 files changed, 20 insertions(+), 8 deletions(-) diff --git a/tests/ut/distributed/device_communicators/test_pyhccl.py b/tests/ut/distributed/device_communicators/test_pyhccl.py index 2ef82daec5f..bb0fd592025 100644 --- a/tests/ut/distributed/device_communicators/test_pyhccl.py +++ b/tests/ut/distributed/device_communicators/test_pyhccl.py @@ -6,6 +6,7 @@ from tests.ut.base import TestBase from vllm_ascend.distributed.device_communicators.pyhccl import \ PyHcclCommunicator +from vllm_ascend.utils import ACL_FORMAT_FRACTAL_NZ, vllm_version_is class MockHcclLib: @@ -45,9 +46,15 @@ def test_load_hccl_fail(self): @patch("vllm_ascend.utils.current_stream", return_value=MagicMock(npu_stream=5678)) def test_stateless_group(self, *_): - group = StatelessProcessGroup(rank=3, - world_size=4, - store=None) + if vllm_version_is("0.18.0"): + group = StatelessProcessGroup(rank=3, + world_size=4, + store=None, + socket=None) + else: + group = StatelessProcessGroup(rank=3, + world_size=4, + store=None) comm = PyHcclCommunicator(group=group, device=3) diff --git a/vllm_ascend/ascend_forward_context.py b/vllm_ascend/ascend_forward_context.py index f7d145c5fd3..d013be34db2 100644 --- a/vllm_ascend/ascend_forward_context.py +++ b/vllm_ascend/ascend_forward_context.py @@ -19,6 +19,7 @@ is_drafter_moe_model, is_moe_model, speculative_enable_dispatch_gmm_combine_decode, + vllm_version_is, ) @@ -59,6 +60,8 @@ def set_ascend_forward_context( "batch_descriptor": batch_descriptor, "skip_compiled": skip_compiled, } + if vllm_version_is("0.18.0"): + forward_context_kwargs["virtual_engine"] = virtual_engine with set_forward_context(**forward_context_kwargs): forward_context = get_forward_context() diff --git a/vllm_ascend/ops/mla.py b/vllm_ascend/ops/mla.py index 17a5858aa68..c6a50035b70 100644 --- a/vllm_ascend/ops/mla.py +++ b/vllm_ascend/ops/mla.py @@ -183,7 +183,7 @@ def mla_forward( attn_metadata = forward_context.attn_metadata[self.mla_attn.layer_name] else: attn_metadata = forward_context.attn_metadata - kv_cache = self.mla_attn.kv_cache[0] + kv_cache = self.mla_attn.kv_cache[forward_context.virtual_engine if vllm_version_is("0.18.0") else 0] self.mla_attn.impl.forward( self.mla_attn.layer_name, hidden_states, kv_cache, attn_metadata, need_gather_q_kv, output ) diff --git a/vllm_ascend/patch/worker/patch_qwen3_5.py b/vllm_ascend/patch/worker/patch_qwen3_5.py index 19adf6a9c61..748438fb891 100644 --- a/vllm_ascend/patch/worker/patch_qwen3_5.py +++ b/vllm_ascend/patch/worker/patch_qwen3_5.py @@ -29,7 +29,7 @@ from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector from vllm_ascend.ops.triton.fla.sigmoid_gating import fused_sigmoid_gating_delta_rule_update from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch -from vllm_ascend.utils import enable_sp +from vllm_ascend.utils import enable_sp, vllm_version_is class AscendQwen3_5GatedDeltaNet(Qwen3_5GatedDeltaNet): @@ -66,7 +66,7 @@ def _forward_core( non_spec_token_indx = attn_metadata.non_spec_token_indx spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501 non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501 - self_kv_cache = self.kv_cache[0] + self_kv_cache = self.kv_cache[forward_context.virtual_engine if vllm_version_is("0.18.0") else 0] conv_state = self_kv_cache[0].transpose(-1, -2) ssm_state = self_kv_cache[1] num_actual_tokens = attn_metadata.num_actual_tokens diff --git a/vllm_ascend/patch/worker/patch_qwen3_next.py b/vllm_ascend/patch/worker/patch_qwen3_next.py index 5f2021dbe8b..bbbeb8bb079 100644 --- a/vllm_ascend/patch/worker/patch_qwen3_next.py +++ b/vllm_ascend/patch/worker/patch_qwen3_next.py @@ -32,7 +32,7 @@ from vllm_ascend.attention.utils import maybe_save_kv_layer_to_connector from vllm_ascend.ops.triton.fla.fused_qkvzba_split_reshape import fused_qkvzba_split_reshape_cat from vllm_ascend.ops.triton.fused_gdn_gating import fused_gdn_gating_patch -from vllm_ascend.utils import enable_sp +from vllm_ascend.utils import enable_sp, vllm_version_is class AscendQwen3Next_GatedDeltaNet(Qwen3NextGatedDeltaNet): @@ -124,7 +124,7 @@ def _forward_core( non_spec_token_indx = attn_metadata.non_spec_token_indx spec_state_indices_tensor = attn_metadata.spec_state_indices_tensor # noqa: E501 non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501 - self_kv_cache = self.kv_cache[0] + self_kv_cache = self.kv_cache[forward_context.virtual_engine if vllm_version_is("0.18.0") else 0] conv_state = self_kv_cache[0].transpose(-1, -2) ssm_state = self_kv_cache[1] num_actual_tokens = attn_metadata.num_actual_tokens diff --git a/vllm_ascend/platform.py b/vllm_ascend/platform.py index fc9c63fccfe..c6f6f852d9d 100644 --- a/vllm_ascend/platform.py +++ b/vllm_ascend/platform.py @@ -551,6 +551,7 @@ def set_additional_forward_context( attn_metadata: dict[str, Any], vllm_config: VllmConfig, dp_metadata, + virtual_engine: int = 0, # ToDo:: Remove me when upgrade to vllm 0.19.0 from 0.18.0 num_tokens: int = 0, num_tokens_across_dp: torch.Tensor | None = None, cudagraph_runtime_mode=None, @@ -564,6 +565,7 @@ def set_additional_forward_context( vllm_config (VllmConfig): configuration of vllm. dp_metadata (Dpmetadata): metadata for data parallelism. lack of typehint because of circular import. + virtual_engine (int, optional): index of virtual engine. Defaults to 0. num_tokens (int | None, optional): number of tokens. Defaults to None. num_tokens_across_dp (torch.Tensor | None, optional): number of tokens across data parallelism.Defaults to None. From e3e2195b1147df01588be04b89d239e24fd657d0 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Mon, 23 Mar 2026 03:57:59 +0000 Subject: [PATCH 10/10] fix pre-commit vllm_version_is not importted Signed-off-by: leo-pony --- vllm_ascend/ops/mla.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm_ascend/ops/mla.py b/vllm_ascend/ops/mla.py index c6a50035b70..1d420a14bab 100644 --- a/vllm_ascend/ops/mla.py +++ b/vllm_ascend/ops/mla.py @@ -33,7 +33,7 @@ from vllm_ascend.ascend_config import get_ascend_config from vllm_ascend.ascend_forward_context import _EXTRA_CTX -from vllm_ascend.utils import is_vl_model, parse_layer_idx +from vllm_ascend.utils import is_vl_model, parse_layer_idx, vllm_version_is class IndexerWrapper(nn.Module):