Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/bot_pr_create.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ jobs:
steps:
- name: Get vLLM version
run: |
VLLM_COMMIT=15d76f74e2fdb12a95ea00f0ca283acf6219a2b7
VLLM_COMMIT=4034c3d32e30d01639459edd3ab486f56993876d
echo "VLLM_COMMIT=https://github.com/vllm-project/vllm/commit/$VLLM_COMMIT" >> "$GITHUB_ENV"

- name: Checkout repository
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/dockerfiles/Dockerfile.lint
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ RUN apt-get update -y && \

ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
# For lint purpose, actually we need make a main2main matching.
ARG VLLM_COMMIT=15d76f74e2fdb12a95ea00f0ca283acf6219a2b7
ARG VLLM_COMMIT=4034c3d32e30d01639459edd3ab486f56993876d
RUN git clone $VLLM_REPO /vllm-workspace/vllm && \
cd /vllm-workspace/vllm && \
git checkout $VLLM_COMMIT
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pr_test_full.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ jobs:
name: e2e-full
strategy:
matrix:
vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7, v0.16.0]
vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0]
needs: [changes]
if: ${{ needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.e2e_tracker == true }}
uses: ./.github/workflows/_e2e_test.yaml
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/pr_test_light.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
lint:
uses: ./.github/workflows/_pre_commit.yml
with:
vllm: 15d76f74e2fdb12a95ea00f0ca283acf6219a2b7
vllm: 4034c3d32e30d01639459edd3ab486f56993876d
changes:
runs-on: linux-aarch64-a2b3-0
outputs:
Expand Down Expand Up @@ -89,7 +89,7 @@ jobs:
if: ${{ needs.lint.result == 'success' && (needs.changes.outputs.e2e_tracker == 'true' || needs.changes.outputs.ut_tracker == 'true') }}
strategy:
matrix:
vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7, v0.16.0]
vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0]
uses: ./.github/workflows/_unit_test.yaml
with:
vllm: ${{ matrix.vllm_version }}
Expand All @@ -101,7 +101,7 @@ jobs:
name: e2e-light
strategy:
matrix:
vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7, v0.16.0]
vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0]
# Note (yikun): If CI resource are limited we can split job into two chain jobs
needs: [lint, changes]
# only trigger e2e test after lint passed and the change is e2e related with pull request.
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/schedule_codecov_refresh.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ jobs:
name: refresh codecov
strategy:
matrix:
vllm_version: [15d76f74e2fdb12a95ea00f0ca283acf6219a2b7]
vllm_version: [4034c3d32e30d01639459edd3ab486f56993876d]
uses: ./.github/workflows/_unit_test.yaml
with:
vllm: ${{ matrix.vllm_version }}
Expand Down
2 changes: 1 addition & 1 deletion docs/source/community/versioning_policy.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ For main branch of vLLM Ascend, we usually make it compatible with the latest vL

| vLLM Ascend | vLLM | Python | Stable CANN | PyTorch/torch_npu |
|-------------|--------------|------------------|-------------|--------------------|
| main | 4572a06afe96d0a6d5d3efacf130c71505dd2bc9, v0.16.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 |
| main | 4034c3d32e30d01639459edd3ab486f56993876d, v0.16.0 tag | >= 3.10, < 3.12 | 8.5.0 | 2.9.0 / 2.9.0 |

## Release cadence

Expand Down
1 change: 1 addition & 0 deletions tests/ut/_310p/quantization/test_modelslim_config_310.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def test_get_quant_method_for_fused_moe_310(self):
fused_moe_layer = MagicMock(spec=FusedMoE)
fused_moe_layer.moe = MagicMock(spec=FusedMoEConfig)
fused_moe_layer.moe_config = MagicMock(spec=FusedMoEConfig)
fused_moe_layer.moe_config.moe_backend = "auto"
fused_moe_layer.moe_config.moe_parallel_config = MagicMock(spec=FusedMoEParallelConfig)
fused_moe_layer.moe_config.moe_parallel_config.use_ep = True
fused_moe_layer.moe_config.moe_parallel_config.dp_size = 1
Expand Down
8 changes: 5 additions & 3 deletions tests/ut/distributed/test_communicator.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ def patched_all_to_all(output_tensor_list,
gather_sizes = [2, 2]
input_ = torch.tensor([10, 20, 30, 40])

comm = NPUCommunicator(cpu_group=dist.group.WORLD)
with patch.dict(dist.distributed_c10d._world.pg_map, {dist.group.WORLD: MagicMock()}, clear=False):
comm = NPUCommunicator(cpu_group=dist.group.WORLD)

output = comm.all_to_all(input_,
scatter_sizes=scatter_sizes,
Expand Down Expand Up @@ -84,7 +85,8 @@ def patched_all_to_all(output_tensor_list,

input_ = torch.tensor([[10, 20], [30, 40]])

comm = NPUCommunicator(cpu_group=dist.group.WORLD)
output = comm.all_to_all(input_, scatter_dim=0, gather_dim=0)
with patch.dict(dist.distributed_c10d._world.pg_map, {dist.group.WORLD: MagicMock()}, clear=False):
comm = NPUCommunicator(cpu_group=dist.group.WORLD)
output = comm.all_to_all(input_, scatter_dim=0, gather_dim=0)

assert output.tolist() == [[10, 20], [50, 60]]
2 changes: 0 additions & 2 deletions vllm_ascend/core/recompute_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
from collections import defaultdict
from dataclasses import dataclass, fields

from vllm._bc_linter import bc_linter_include
from vllm.config import SchedulerConfig, VllmConfig
from vllm.distributed.ec_transfer.ec_connector.base import ECConnectorMetadata
from vllm.distributed.kv_events import KVEventBatch
Expand Down Expand Up @@ -73,7 +72,6 @@ class RecomputeReqInfo:
client_index: int = 0


@bc_linter_include
@dataclass
class RecomputeSchedulerOutput(SchedulerOutput):
recomputed_reqs: list[RecomputeReqInfo] | None = None
Expand Down
1 change: 1 addition & 0 deletions vllm_ascend/ops/mm_encoder_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ def forward_oot(
value: torch.Tensor,
cu_seqlens: torch.Tensor | None = None,
max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention
sequence_lengths: torch.Tensor | None = None,
):
bsz, q_len = query.size()[:2]
kv_len = key.size(1)
Expand Down
14 changes: 14 additions & 0 deletions vllm_ascend/patch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,20 @@
# Future Plan:
# Remove this patch when vLLM merge the PR.
#
# ** 6. File: platform/patch_fusion_matcher_compat_ops.py**
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. `torch.ops._C.rms_norm`, `torch.ops._C.fused_add_rms_norm`,
# Why:
# upstream vLLM initializes fusion matcher global operators at import time.
# On Ascend environment these symbols may be absent and cause import failure.
# How:
# inject placeholders only when the symbols are missing so import can continue.
# Related PR (if no, explain why):
# temporary compatibility patch before upstream adjustment is merged.
# Future Plan:
# remove this patch once upstream no longer requires these global symbols or
# provides a backend-safe initialization path.
#
# * Worker Patch:
# ===============
#
Expand Down
1 change: 1 addition & 0 deletions vllm_ascend/patch/platform/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import os

import vllm_ascend.patch.platform.patch_distributed # noqa
import vllm_ascend.patch.platform.patch_fusion_matcher_compat_ops # noqa
import vllm_ascend.patch.platform.patch_mamba_config # noqa
import vllm_ascend.patch.platform.patch_sched_yield # noqa

Expand Down
24 changes: 24 additions & 0 deletions vllm_ascend/patch/platform/patch_fusion_matcher_compat_ops.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import torch


class _MissingOp:
def __init__(self, op_name: str):
self.op_name = op_name
self.default = self

def __call__(self, *args, **kwargs):
raise RuntimeError(f"Missing upstream op `{self.op_name}` was invoked.")


def _set_missing(namespace, op_name: str, full_name: str) -> None:
if not hasattr(namespace, op_name):
setattr(namespace, op_name, _MissingOp(full_name))


_set_missing(torch.ops._C, "rms_norm", "torch.ops._C.rms_norm")
_set_missing(torch.ops._C, "fused_add_rms_norm", "torch.ops._C.fused_add_rms_norm")
_set_missing(torch.ops._C, "rotary_embedding", "torch.ops._C.rotary_embedding")
_set_missing(torch.ops._C, "static_scaled_fp8_quant", "torch.ops._C.static_scaled_fp8_quant")
_set_missing(torch.ops._C, "dynamic_scaled_fp8_quant", "torch.ops._C.dynamic_scaled_fp8_quant")
_set_missing(torch.ops._C, "dynamic_per_token_scaled_fp8_quant", "torch.ops._C.dynamic_per_token_scaled_fp8_quant")
_set_missing(torch.ops._C, "silu_and_mul", "torch.ops._C.silu_and_mul")
45 changes: 31 additions & 14 deletions vllm_ascend/worker/model_runner_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@
is_moe_model,
lmhead_tp_enable,
set_weight_prefetch_method,
vllm_version_is,
)
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
from vllm_ascend.worker.pcp_utils import PCPManager
Expand Down Expand Up @@ -1826,16 +1827,26 @@ def _determine_batch_execution_and_padding(
has_lora = len(self.input_batch.lora_id_to_lora_request) > 0 if force_has_lora is None else force_has_lora

# ruff: noqa: E731
dispatch_cudagraph = (
lambda num_tokens, disable_full: self.cudagraph_dispatcher.dispatch(
num_tokens=num_tokens,
has_lora=has_lora,
uniform_decode=uniform_decode,
disable_full=disable_full,
)
if not force_eager
else (CUDAGraphMode.NONE, BatchDescriptor(num_tokens_padded))
)
def dispatch_cudagraph(num_tokens, disable_full=False, valid_modes=None):
if force_eager:
return (CUDAGraphMode.NONE, BatchDescriptor(num_tokens_padded))

if vllm_version_is("0.16.0"):
return self.cudagraph_dispatcher.dispatch(
num_tokens=num_tokens,
has_lora=has_lora,
uniform_decode=uniform_decode,
disable_full=disable_full,
)
else:
return self.cudagraph_dispatcher.dispatch(
num_tokens=num_tokens,
has_lora=has_lora,
uniform_decode=uniform_decode,
valid_modes=valid_modes,
invalid_modes={CUDAGraphMode.FULL} if disable_full else None,
)

cudagraph_mode, batch_descriptor = dispatch_cudagraph(num_tokens_padded, use_cascade_attn or has_encoder_output)
num_tokens_padded = batch_descriptor.num_tokens
if enable_sp(self.vllm_config):
Expand All @@ -1856,10 +1867,16 @@ def _determine_batch_execution_and_padding(
dp_rank = self.parallel_config.data_parallel_rank
num_tokens_padded = int(num_tokens_across_dp[dp_rank].item())
# Re-dispatch with DP padding
cudagraph_mode, batch_descriptor = dispatch_cudagraph(
num_tokens_padded,
disable_full=synced_cudagraph_mode <= CUDAGraphMode.PIECEWISE.value,
)
if vllm_version_is("0.16.0"):
cudagraph_mode, batch_descriptor = dispatch_cudagraph(
num_tokens_padded,
disable_full=synced_cudagraph_mode <= CUDAGraphMode.PIECEWISE.value,
)
else:
cudagraph_mode, batch_descriptor = dispatch_cudagraph(
num_tokens_padded,
valid_modes={CUDAGraphMode(synced_cudagraph_mode)},
)
# Assert to make sure the agreed upon token count is correct otherwise
# num_tokens_across_dp will no-longer be valid
assert batch_descriptor.num_tokens == num_tokens_padded
Expand Down
3 changes: 2 additions & 1 deletion vllm_ascend/worker/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ def load_model(self) -> None:
with context, set_current_vllm_config(self.vllm_config):
self.model_runner.load_model()

def compile_or_warm_up_model(self) -> None:
def compile_or_warm_up_model(self) -> float:
# Note: need to adapt for graph mode.
warmup_sizes = (self.vllm_config.compilation_config.compile_sizes or []).copy()
if not self.model_config.enforce_eager:
Expand Down Expand Up @@ -462,6 +462,7 @@ def compile_or_warm_up_model(self) -> None:
# Reset the seed to ensure that the random state is not affected by
# the model initialization and profiling.
set_random_seed(self.model_config.seed)
return self.vllm_config.compilation_config.compilation_time

def _warm_up_atb(self):
x = torch.rand((2, 4), dtype=torch.float16).npu()
Expand Down