diff --git a/.github/workflows/_e2e_nightly_multi_node.yaml b/.github/workflows/_e2e_nightly_multi_node.yaml index fa3659e5600..84593c57e0f 100644 --- a/.github/workflows/_e2e_nightly_multi_node.yaml +++ b/.github/workflows/_e2e_nightly_multi_node.yaml @@ -32,7 +32,7 @@ on: description: how many pods will be pulled up via lws.yaml, indicates number of nodes we need vllm_version: required: false - default: "v0.14.1" + default: "v0.15.0rc0" type: string description: vllm version to use vllm_ascend_remote_url: diff --git a/.github/workflows/bot_pr_create.yaml b/.github/workflows/bot_pr_create.yaml index 1dea09f844d..961b2361a5c 100644 --- a/.github/workflows/bot_pr_create.yaml +++ b/.github/workflows/bot_pr_create.yaml @@ -37,7 +37,7 @@ jobs: steps: - name: Get vLLM version run: | - VLLM_COMMIT=dc917cceb877dfd13f98c538c4c96158047d98bd + VLLM_COMMIT=cf1167e50b809f18efd21fb3418dd75d2805b14f echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> $GITHUB_ENV - name: Checkout repository diff --git a/.github/workflows/dockerfiles/Dockerfile.lint b/.github/workflows/dockerfiles/Dockerfile.lint index 9bb2a5b8aa5..1abfbbe08d0 100644 --- a/.github/workflows/dockerfiles/Dockerfile.lint +++ b/.github/workflows/dockerfiles/Dockerfile.lint @@ -27,7 +27,7 @@ RUN apt-get update -y && \ ARG VLLM_REPO=https://github.com/vllm-project/vllm.git # For lint purpose, actually we need make a main2main matching. -ARG VLLM_COMMIT=dc917cceb877dfd13f98c538c4c96158047d98bd +ARG VLLM_COMMIT=cf1167e50b809f18efd21fb3418dd75d2805b14f RUN git clone $VLLM_REPO /vllm-workspace/vllm && \ cd /vllm-workspace/vllm && \ git checkout $VLLM_COMMIT diff --git a/.github/workflows/pr_test_full.yaml b/.github/workflows/pr_test_full.yaml index 740d2cb1245..8cdb4496fa7 100644 --- a/.github/workflows/pr_test_full.yaml +++ b/.github/workflows/pr_test_full.yaml @@ -75,7 +75,7 @@ jobs: name: e2e-full strategy: matrix: - vllm_version: [dc917cceb877dfd13f98c538c4c96158047d98bd, v0.14.1] + vllm_version: [cf1167e50b809f18efd21fb3418dd75d2805b14f, v0.15.0rc0] needs: [changes] if: ${{ needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.e2e_tracker == true }} uses: ./.github/workflows/_e2e_test.yaml diff --git a/.github/workflows/pr_test_light.yaml b/.github/workflows/pr_test_light.yaml index a698639b277..b0a389909c9 100644 --- a/.github/workflows/pr_test_light.yaml +++ b/.github/workflows/pr_test_light.yaml @@ -41,7 +41,7 @@ jobs: lint: uses: ./.github/workflows/_pre_commit.yml with: - vllm: dc917cceb877dfd13f98c538c4c96158047d98bd + vllm: cf1167e50b809f18efd21fb3418dd75d2805b14f changes: runs-on: linux-aarch64-a2-0 outputs: @@ -87,7 +87,7 @@ jobs: if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }} strategy: matrix: - vllm_version: [dc917cceb877dfd13f98c538c4c96158047d98bd, v0.14.1] + vllm_version: [cf1167e50b809f18efd21fb3418dd75d2805b14f, v0.15.0rc0] uses: ./.github/workflows/_unit_test.yaml with: vllm: ${{ matrix.vllm_version }} @@ -99,7 +99,7 @@ jobs: name: e2e-light strategy: matrix: - vllm_version: [dc917cceb877dfd13f98c538c4c96158047d98bd, v0.14.1] + vllm_version: [cf1167e50b809f18efd21fb3418dd75d2805b14f, v0.15.0rc0] # Note (yikun): If CI resource are limited we can split job into two chain jobs needs: [lint, changes] # only trigger e2e test after lint passed and the change is e2e related with pull request. diff --git a/.github/workflows/schedule_codecov_refresh.yaml b/.github/workflows/schedule_codecov_refresh.yaml index 98f862918a3..af0c278dcb5 100644 --- a/.github/workflows/schedule_codecov_refresh.yaml +++ b/.github/workflows/schedule_codecov_refresh.yaml @@ -33,7 +33,7 @@ jobs: name: refresh codecov strategy: matrix: - vllm_version: [dc917cceb877dfd13f98c538c4c96158047d98bd] + vllm_version: [cf1167e50b809f18efd21fb3418dd75d2805b14f] uses: ./.github/workflows/_unit_test.yaml with: vllm: ${{ matrix.vllm_version }} diff --git a/.github/workflows/schedule_nightly_test_a2.yaml b/.github/workflows/schedule_nightly_test_a2.yaml index d25ffd58c66..9577d0e4120 100644 --- a/.github/workflows/schedule_nightly_test_a2.yaml +++ b/.github/workflows/schedule_nightly_test_a2.yaml @@ -133,7 +133,7 @@ jobs: - Qwen3-Omni-30B-A3B-Instruct uses: ./.github/workflows/_e2e_nightly_single_node_models.yaml with: - vllm: v0.14.1 + vllm: v0.15.0rc0 runner: ${{ matrix.test_config.os }} model_list: ${{ toJson(matrix.test_config.model_list) }} image: 'swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.5.0-910b-ubuntu22.04-py3.11' diff --git a/.github/workflows/schedule_test_benchmarks.yaml b/.github/workflows/schedule_test_benchmarks.yaml index 8585670ddd7..904bee0a84a 100644 --- a/.github/workflows/schedule_test_benchmarks.yaml +++ b/.github/workflows/schedule_test_benchmarks.yaml @@ -51,7 +51,7 @@ jobs: strategy: matrix: include: - - vllm_branch: v0.14.1 + - vllm_branch: v0.15.0rc0 vllm_ascend_branch: main max-parallel: 1 container: diff --git a/Dockerfile b/Dockerfile index 02e2820969a..75ae6462725 100644 --- a/Dockerfile +++ b/Dockerfile @@ -48,7 +48,7 @@ RUN pip config set global.index-url ${PIP_INDEX_URL} # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git -ARG VLLM_TAG=v0.14.1 +ARG VLLM_TAG=v0.15.0rc0 RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm/[audio] --extra-index https://download.pytorch.org/whl/cpu/ && \ diff --git a/Dockerfile.310p b/Dockerfile.310p index 07b97bc1ca3..f4f142c8ccd 100644 --- a/Dockerfile.310p +++ b/Dockerfile.310p @@ -40,7 +40,7 @@ RUN pip config set global.index-url ${PIP_INDEX_URL} # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git -ARG VLLM_TAG=v0.14.1 +ARG VLLM_TAG=v0.15.0rc0 RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm/[audio] --extra-index https://download.pytorch.org/whl/cpu/ && \ diff --git a/Dockerfile.310p.openEuler b/Dockerfile.310p.openEuler index abf70a8812c..b07fbb83e90 100644 --- a/Dockerfile.310p.openEuler +++ b/Dockerfile.310p.openEuler @@ -36,7 +36,7 @@ COPY . /vllm-workspace/vllm-ascend/ # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git -ARG VLLM_TAG=v0.14.1 +ARG VLLM_TAG=v0.15.0rc0 RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/[audio] --extra-index https://download.pytorch.org/whl/cpu/ && \ diff --git a/Dockerfile.a3 b/Dockerfile.a3 index 7688c14d6a8..94b40bc16f1 100644 --- a/Dockerfile.a3 +++ b/Dockerfile.a3 @@ -47,7 +47,7 @@ RUN apt-get update -y && \ # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git -ARG VLLM_TAG=v0.14.1 +ARG VLLM_TAG=v0.15.0rc0 RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm-workspace/vllm/[audio] --extra-index https://download.pytorch.org/whl/cpu/ && \ diff --git a/Dockerfile.a3.openEuler b/Dockerfile.a3.openEuler index 6129adc7982..65de41960dc 100644 --- a/Dockerfile.a3.openEuler +++ b/Dockerfile.a3.openEuler @@ -50,7 +50,7 @@ RUN yum update -y && \ # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git -ARG VLLM_TAG=v0.14.1 +ARG VLLM_TAG=v0.15.0rc0 RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/[audio] --extra-index https://download.pytorch.org/whl/cpu/ && \ diff --git a/Dockerfile.openEuler b/Dockerfile.openEuler index 7603372f6af..269e4d7a15f 100644 --- a/Dockerfile.openEuler +++ b/Dockerfile.openEuler @@ -50,7 +50,7 @@ RUN yum update -y && \ # Install vLLM ARG VLLM_REPO=https://github.com/vllm-project/vllm.git -ARG VLLM_TAG=v0.14.1 +ARG VLLM_TAG=v0.15.0rc0 RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm-workspace/vllm # In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it. RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm-workspace/vllm/[audio] --extra-index https://download.pytorch.org/whl/cpu/ && \ diff --git a/docs/source/community/versioning_policy.md b/docs/source/community/versioning_policy.md index 801c8a2a08c..3637d89cc5f 100644 --- a/docs/source/community/versioning_policy.md +++ b/docs/source/community/versioning_policy.md @@ -55,7 +55,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL | vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu | |-------------|--------------|------------------|-------------|--------------------| -| main | dc917cceb877dfd13f98c538c4c96158047d98bd, v0.14.1 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 | +| main | cf1167e50b809f18efd21fb3418dd75d2805b14f, v0.15.0rc0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 | ## Release cadence diff --git a/docs/source/conf.py b/docs/source/conf.py index 47ebcec8a09..a00bccc5d56 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -65,7 +65,7 @@ # the branch of vllm, used in vllm clone # - main branch: 'main' # - vX.Y.Z branch: 'vX.Y.Z' - "vllm_version": "v0.14.1", + "vllm_version": "v0.15.0rc0", # the branch of vllm-ascend, used in vllm-ascend clone and image tag # - main branch: 'main' # - vX.Y.Z branch: latest vllm-ascend release tag @@ -77,7 +77,7 @@ # CANN image tag "cann_image_tag": "8.5.0-910b-ubuntu22.04-py3.11", # vllm version in ci - "ci_vllm_version": "v0.14.1", + "ci_vllm_version": "v0.15.0rc0", } # For cross-file header anchors diff --git a/vllm_ascend/ascend_forward_context.py b/vllm_ascend/ascend_forward_context.py index faa11d2d43d..61c09afb5ff 100644 --- a/vllm_ascend/ascend_forward_context.py +++ b/vllm_ascend/ascend_forward_context.py @@ -19,7 +19,6 @@ is_drafter_moe_model, is_moe_model, speculative_enable_dispatch_gmm_combine_decode, - vllm_version_is, ) @@ -59,8 +58,7 @@ def set_ascend_forward_context( "batch_descriptor": batch_descriptor, } - if not vllm_version_is("0.14.1"): - forward_context_kwargs["skip_compiled"] = skip_compiled + forward_context_kwargs["skip_compiled"] = skip_compiled with set_forward_context(**forward_context_kwargs): forward_context = get_forward_context() diff --git a/vllm_ascend/attention/attention_v1.py b/vllm_ascend/attention/attention_v1.py index 768b908224a..d174cabc598 100644 --- a/vllm_ascend/attention/attention_v1.py +++ b/vllm_ascend/attention/attention_v1.py @@ -66,6 +66,8 @@ class AscendAttentionBackend(AttentionBackend): accept_output_buffer: bool = True + forward_includes_kv_cache_update: bool = True + @staticmethod def get_name() -> str: # HACK(Ronald1995): vllm `initialize_kv_cache` method in model runner v2 make @@ -868,6 +870,9 @@ def reshape_and_cache( self.key_cache, self.value_cache = kv_cache[0], kv_cache[1] slots = attn_metadata.slot_mapping encoder_decoder = self.attn_type == AttentionType.ENCODER_DECODER + + slots = slots.to(torch.int32) + DeviceOperator.reshape_and_cache( key=key[: attn_metadata.num_actual_tokens] if not encoder_decoder else key, value=value[: attn_metadata.num_actual_tokens] if not encoder_decoder else value, @@ -932,8 +937,10 @@ def forward( num_tokens = query.shape[0] if attn_metadata is None: return output.fill_(0) + if key is not None and value is not None: key, value = self.reshape_and_cache(key, value, kv_cache, attn_metadata) + # pooling model branch if attn_metadata.model_runner_type == "pooling": attn_output = self._forward_encoder_attention(query, key, value, attn_metadata, output) diff --git a/vllm_ascend/attention/mla_v1.py b/vllm_ascend/attention/mla_v1.py index 03834212bdf..5c12b8153d4 100644 --- a/vllm_ascend/attention/mla_v1.py +++ b/vllm_ascend/attention/mla_v1.py @@ -44,17 +44,14 @@ from vllm_ascend.ops.rotary_embedding import get_cos_and_sin_mla from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch from vllm_ascend.quantization.methods import AscendW8A8LinearMethod -from vllm_ascend.utils import ACL_FORMAT_FRACTAL_ND, maybe_trans_nz, vllm_version_is, weak_ref_tensors +from vllm_ascend.utils import ACL_FORMAT_FRACTAL_ND, maybe_trans_nz, weak_ref_tensors from vllm_ascend.worker.npu_input_batch import NPUInputBatch if TYPE_CHECKING: from vllm.v1.core.sched.output import SchedulerOutput # isort: off -if vllm_version_is("0.14.1"): - from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder # type: ignore -else: - from vllm.model_executor.layers.attention.mla_attention import MLACommonMetadataBuilder +from vllm.model_executor.layers.attention.mla_attention import MLACommonMetadataBuilder # isort: on MAX_O_PROJ_PREFETCH_SIZE = 16 * 1024 * 1024 diff --git a/vllm_ascend/attention/sfa_v1.py b/vllm_ascend/attention/sfa_v1.py index 6f283429d27..6d2bb1f859d 100644 --- a/vllm_ascend/attention/sfa_v1.py +++ b/vllm_ascend/attention/sfa_v1.py @@ -45,16 +45,13 @@ enable_dsa_cp, enable_dsa_cp_with_layer_shard, maybe_trans_nz, - vllm_version_is, ) from vllm_ascend.worker.npu_input_batch import NPUInputBatch if TYPE_CHECKING: from vllm.v1.core.sched.output import SchedulerOutput -if vllm_version_is("0.14.1"): - from vllm.v1.attention.backends.mla.common import MLACommonMetadataBuilder # type: ignore -else: - from vllm.model_executor.layers.attention.mla_attention import MLACommonMetadataBuilder +from vllm.model_executor.layers.attention.mla_attention import MLACommonMetadataBuilder + # isort: on # token count limits within bmm_transpose operator diff --git a/vllm_ascend/ops/fused_moe/fused_moe.py b/vllm_ascend/ops/fused_moe/fused_moe.py index 50618ae8152..7e1bde15483 100644 --- a/vllm_ascend/ops/fused_moe/fused_moe.py +++ b/vllm_ascend/ops/fused_moe/fused_moe.py @@ -512,6 +512,15 @@ def forward( hidden_states: torch.Tensor, router_logits: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: + if self._shared_experts is None: + fused_out = AscendFusedMoE.forward( + self, + hidden_states=hidden_states, + router_logits=router_logits, + ) + shared_out = None + return shared_out, fused_out + shared_out, fused_out = AscendFusedMoE.forward( self, hidden_states=hidden_states, @@ -571,6 +580,9 @@ def forward_impl( # type: ignore[override] ) routed_out = fused_moe_results.routed_out + if self._shared_experts is None: + return routed_out + if self.multistream_overlap_gate: fc3_context = get_flash_common3_context() assert fc3_context is not None diff --git a/vllm_ascend/ops/mm_encoder_attention.py b/vllm_ascend/ops/mm_encoder_attention.py index 19f44066761..5ef46cd9c42 100644 --- a/vllm_ascend/ops/mm_encoder_attention.py +++ b/vllm_ascend/ops/mm_encoder_attention.py @@ -38,7 +38,6 @@ def __init__( scale: float | None = None, num_kv_heads: int | None = None, prefix: str = "", - multimodal_config: MultiModalConfig | None = None, ) -> None: """ Args: @@ -48,7 +47,6 @@ def __init__( num_kv_heads: number of kv heads. prefix: This has no effect, it is only here to make it easier to swap between Attention and MMEncoderAttention. - multimodal_config: configs for multi-modal. """ super().__init__( num_heads=num_heads, @@ -56,7 +54,6 @@ def __init__( scale=scale, num_kv_heads=num_kv_heads, prefix=prefix, - multimodal_config=multimodal_config, ) def reshape_qkv_to_3d( @@ -84,13 +81,12 @@ def reshape_qkv_to_3d( return query, key, value def forward_oot( - self, - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - cu_seqlens: torch.Tensor | None = None, - max_seqlen: torch.Tensor - | None = None, # Only used for Flash Attention + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + cu_seqlens: torch.Tensor | None = None, + max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention ): bsz, q_len = query.size()[:2] kv_len = key.size(1) diff --git a/vllm_ascend/patch/platform/patch_multiproc_executor.py b/vllm_ascend/patch/platform/patch_multiproc_executor.py index abc955e578c..98cc545c9de 100644 --- a/vllm_ascend/patch/platform/patch_multiproc_executor.py +++ b/vllm_ascend/patch/platform/patch_multiproc_executor.py @@ -19,8 +19,6 @@ set_multiprocessing_worker_envs, ) -from vllm_ascend.utils import vllm_version_is - class AscendMultiprocExecutor(MultiprocExecutor): def _init_executor(self) -> None: @@ -178,8 +176,7 @@ def make_worker_process( "death_pipe": death_reader, "shared_worker_lock": shared_worker_lock, } - if not vllm_version_is("0.14.1"): - process_kwargs["is_driver_worker"] = is_driver_worker + process_kwargs["is_driver_worker"] = is_driver_worker # Run EngineCore busy loop in background process. proc = context.Process( target=WorkerProc.worker_main,