From 0475e3810ea149130a91d2363790aa70ad1bb658 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Mon, 29 Dec 2025 15:47:32 +0800 Subject: [PATCH 1/2] fix break by vllm pr 31395 Signed-off-by: leo-pony --- vllm_ascend/worker/worker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/vllm_ascend/worker/worker.py b/vllm_ascend/worker/worker.py index f061ba46b05..f9985a067b5 100644 --- a/vllm_ascend/worker/worker.py +++ b/vllm_ascend/worker/worker.py @@ -27,7 +27,7 @@ import vllm.envs as envs_vllm from torch_npu.op_plugin.atb._atb_ops import _register_atb_extensions from torch_npu.profiler import dynamic_profile as dp -from vllm.config import VllmConfig +from vllm.config import VllmConfig, set_current_vllm_config from vllm.distributed import (ensure_model_parallel_initialized, init_distributed_environment) from vllm.distributed.ec_transfer import ensure_ec_transfer_initialized @@ -351,7 +351,8 @@ def load_model(self) -> None: else: from contextlib import nullcontext context = nullcontext() # type: ignore - with context: + + with context, set_current_vllm_config(self.vllm_config): self.model_runner.load_model() def compile_or_warm_up_model(self) -> None: From ea63759fc9c4e2a7c2bd01234f270ded3d1f78d2 Mon Sep 17 00:00:00 2001 From: leo-pony Date: Mon, 29 Dec 2025 15:50:10 +0800 Subject: [PATCH 2/2] pin vllm version to 12-29 Signed-off-by: leo-pony --- .github/workflows/bot_pr_create.yaml | 2 +- .github/workflows/pr_test_full.yaml | 2 +- .github/workflows/pr_test_light.yaml | 6 +++--- docs/source/community/versioning_policy.md | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/bot_pr_create.yaml b/.github/workflows/bot_pr_create.yaml index ee9b6c9ebee..3416973cba0 100644 --- a/.github/workflows/bot_pr_create.yaml +++ b/.github/workflows/bot_pr_create.yaml @@ -34,7 +34,7 @@ jobs: steps: - name: Get vLLM version run: | - VLLM_COMMIT=5326c89803566a131c928f7fdd2100b75c981a42 + VLLM_COMMIT=45c1ca1ca1ee8fa06df263c8715e8a412ff408d4 echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> $GITHUB_ENV - name: Checkout repository diff --git a/.github/workflows/pr_test_full.yaml b/.github/workflows/pr_test_full.yaml index 85966381799..f81081b1399 100644 --- a/.github/workflows/pr_test_full.yaml +++ b/.github/workflows/pr_test_full.yaml @@ -74,7 +74,7 @@ jobs: name: e2e-full strategy: matrix: - vllm_version: [5326c89803566a131c928f7fdd2100b75c981a42, v0.13.0] + vllm_version: [45c1ca1ca1ee8fa06df263c8715e8a412ff408d4, v0.13.0] needs: [changes] if: ${{ needs.changes.outputs.e2e_tracker == 'true' }} uses: ./.github/workflows/_e2e_test.yaml diff --git a/.github/workflows/pr_test_light.yaml b/.github/workflows/pr_test_light.yaml index e4fd67d72b4..15e8652e34a 100644 --- a/.github/workflows/pr_test_light.yaml +++ b/.github/workflows/pr_test_light.yaml @@ -42,7 +42,7 @@ jobs: lint: uses: ./.github/workflows/_pre_commit.yml with: - vllm: 5326c89803566a131c928f7fdd2100b75c981a42 + vllm: 45c1ca1ca1ee8fa06df263c8715e8a412ff408d4 changes: runs-on: linux-aarch64-a2-0 outputs: @@ -90,7 +90,7 @@ jobs: SOC_VERSION: ascend910b1 strategy: matrix: - vllm_version: [5326c89803566a131c928f7fdd2100b75c981a42, v0.13.0] + vllm_version: [45c1ca1ca1ee8fa06df263c8715e8a412ff408d4, v0.13.0] steps: - name: Free up disk space @@ -160,7 +160,7 @@ jobs: name: e2e-light strategy: matrix: - vllm_version: [5326c89803566a131c928f7fdd2100b75c981a42, v0.13.0] + vllm_version: [45c1ca1ca1ee8fa06df263c8715e8a412ff408d4, v0.13.0] # Note (yikun): If CI resource are limited we can split job into two chain jobs needs: [lint, changes] # only trigger e2e test after lint passed and the change is e2e related with pull request. diff --git a/docs/source/community/versioning_policy.md b/docs/source/community/versioning_policy.md index fdb7fbaba52..20ddab9383a 100644 --- a/docs/source/community/versioning_policy.md +++ b/docs/source/community/versioning_policy.md @@ -51,7 +51,7 @@ If you're using v0.7.3, don't forget to install [mindie-turbo](https://pypi.org/ For main branch of vLLM Ascend, we usually make it compatible with the latest vLLM release and a newer commit hash of vLLM. Please note that this table is usually updated. Please check it regularly. | vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | |-------------|--------------|------------------|-------------|--------------------| -| main | 5326c89803566a131c928f7fdd2100b75c981a42, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 | +| main | 45c1ca1ca1ee8fa06df263c8715e8a412ff408d4, v0.13.0 tag | >= 3.10, < 3.12 | 8.3.RC2 | 2.8.0 / 2.8.0 | ## Release cadence