Skip to content

Commit d1a117a

Browse files
committed
[Bugfix] Fix deepseek V0 percision issue and add acc ci for it
Signed-off-by: MengqingCao <[email protected]>
1 parent 3442fbd commit d1a117a

File tree

5 files changed

+95
-7
lines changed

5 files changed

+95
-7
lines changed

.github/workflows/vllm_ascend_test_long_term.yaml

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,19 @@ jobs:
4141
strategy:
4242
max-parallel: 2
4343
matrix:
44+
os: [linux-arm64-npu-1, linux-arm64-npu-4]
4445
vllm_version: [main, v0.9.0]
46+
concurrency:
47+
group: >
48+
${{
49+
matrix.os == 'linux-arm64-npu-4'
50+
&& github.event.pull_request.number
51+
&& format('pr-{0}-limit-npu-4', github.event.pull_request.number)
52+
|| format('job-{0}-{1}-{2}', matrix.os, matrix.vllm_version, github.event.pull_request.number)
53+
}}
54+
cancel-in-progress: false
4555
name: vLLM Ascend long term test
46-
runs-on: linux-arm64-npu-1
56+
runs-on: ${{ matrix.os }}
4757
container:
4858
# TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready
4959
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10
@@ -92,8 +102,13 @@ jobs:
92102
93103
- name: Run vllm-project/vllm-ascend long term test
94104
run: |
95-
# spec decode test
96-
VLLM_USE_MODELSCOPE=True pytest -sv tests/long_term/spec_decode/e2e/test_v1_mtp_correctness.py
97-
VLLM_USE_MODELSCOPE=true pytest -sv tests/long_term/spec_decode/e2e/test_v1_spec_decode.py
98-
VLLM_USE_MODELSCOPE=True pytest -sv tests/long_term/spec_decode/e2e/test_mtp_correctness.py # it needs a clean process
99-
pytest -sv tests/long_term/spec_decode --ignore=tests/long_term/spec_decode/e2e/test_mtp_correctness.py --ignore=tests/long_term/spec_decode/e2e/test_v1_spec_decode.py --ignore=tests/long_term/spec_decode/e2e/test_v1_mtp_correctness.py
105+
if [[ "${{ matrix.os }}" == "linux-arm64-npu-1" ]]; then
106+
# spec decode test
107+
VLLM_USE_MODELSCOPE=True pytest -sv tests/long_term/spec_decode/e2e/test_v1_mtp_correctness.py
108+
VLLM_USE_MODELSCOPE=True pytest -sv tests/long_term/spec_decode/e2e/test_v1_spec_decode.py
109+
VLLM_USE_MODELSCOPE=True pytest -sv tests/long_term/spec_decode/e2e/test_mtp_correctness.py # it needs a clean process
110+
pytest -sv tests/long_term/spec_decode --ignore=tests/long_term/spec_decode/e2e/test_mtp_correctness.py --ignore=tests/long_term/spec_decode/e2e/test_v1_spec_decode.py --ignore=tests/long_term/spec_decode/e2e/test_v1_mtp_correctness.py
111+
pytest -sv tests/long_term/test_accuracy.py
112+
else
113+
VLLM_USE_MODELSCOPE=True pytest -sv tests/long_term/test_deepseek_v2_lite_tp2_accuracy.py
114+
fi

tests/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,4 +354,4 @@ def prompt_template(request):
354354

355355
@pytest.fixture(scope="session")
356356
def ilama_lora_files():
357-
return snapshot_download(repo_id="jeeejeee/ilama-text2sql-spider")
357+
return snapshot_download(repo_id="jeeejeee/ilama-text2sql-spider")
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
# Adapted from vllm-project/blob/main/tests/entrypoints/llm/test_accuracy.py
18+
#
19+
20+
import gc
21+
import multiprocessing
22+
from multiprocessing import Queue
23+
24+
import lm_eval
25+
import pytest
26+
import torch
27+
28+
# pre-trained model path on Hugging Face.
29+
MODELS = ["deepseek-ai/DeepSeek-V2-Lite"]
30+
# Math reasoning benchmark (Grade School Math 8K).
31+
TASK = "gsm8k"
32+
# Answer validation requiring format consistency.
33+
FILTER = "exact_match,strict-match"
34+
# 3% relative tolerance for numerical accuracy.
35+
RTOL = 0.03
36+
# Baseline accuracy after VLLM optimization.
37+
EXPECTED_VALUE = 0.316
38+
39+
40+
def run_test(model_name, queue, more_args=None):
41+
model_args = f"pretrained={model_name},max_model_len=4096,trust_remote_code=True,tensor_parallel_size=4"
42+
if more_args is not None:
43+
model_args = f"{model_args},{more_args}"
44+
results = lm_eval.simple_evaluate(
45+
model="vllm",
46+
model_args=model_args,
47+
tasks=TASK,
48+
batch_size="auto",
49+
)
50+
result = results["results"][TASK][FILTER]
51+
print(100 * "*", "\nThe accuracy test result:", result)
52+
queue.put(result)
53+
del results
54+
torch.npu.empty_cache()
55+
gc.collect()
56+
57+
58+
@pytest.mark.parametrize("model", MODELS)
59+
def test_lm_eval_accuracy(model, monkeypatch: pytest.MonkeyPatch):
60+
with monkeypatch.context():
61+
result_queue: Queue[float] = multiprocessing.Queue()
62+
p = multiprocessing.Process(target=run_test,
63+
args=(
64+
model,
65+
result_queue,
66+
))
67+
p.start()
68+
p.join()
69+
result = result_queue.get()
70+
assert (EXPECTED_VALUE - RTOL < result < EXPECTED_VALUE + RTOL), \
71+
f"Expected: {EXPECTED_VALUE}±{RTOL} | Measured: {result}"

vllm_ascend/ops/fused_moe.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -329,6 +329,7 @@ def fused_experts(
329329
num_experts = w1.shape[0]
330330
dtype = hidden_states.dtype
331331
device = hidden_states.device
332+
topk_weights = topk_weights.to(dtype)
332333
# assert dtype in [torch.float32, torch.float16, torch.bfloat16
333334
# ], "Only float32, float16, and bfloat16 are supported"
334335

vllm_ascend/quantization/w8a8_dynamic.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -340,6 +340,7 @@ def fused_experts(hidden_states: torch.Tensor,
340340
num_experts = w1.shape[0]
341341
dtype = hidden_states.dtype
342342
device = hidden_states.device
343+
topk_weights = topk_weights.to(dtype)
343344

344345
if expert_map is not None:
345346
# Generate token indices and flatten

0 commit comments

Comments
 (0)