Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .github/workflows/_e2e_nightly_single_node.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -143,5 +143,3 @@ jobs:
# ignore test_dispatch_ffn_combine until the test is fixed
pytest -sv ${{ inputs.tests }} \
--ignore=tests/e2e/nightly/single_node/ops/singlecard_ops/test_fused_moe.py


6 changes: 6 additions & 0 deletions .github/workflows/nightly_test_a3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ jobs:
- name: multi-node-qwen-vl-disagg-pd
config_file_path: Qwen3-VL-235B-disagg-pd.yaml
size: 2
- name: multi-node-kimi-k2-instruct-w8a8
config_file_path: Kimi-K2-Instruct-W8A8.yaml
size: 2
uses: ./.github/workflows/_e2e_nightly_multi_node.yaml
with:
soc_version: a3
Expand Down Expand Up @@ -144,6 +147,9 @@ jobs:
- name: qwen3-next-w8a8
os: linux-aarch64-a3-4
tests: tests/e2e/nightly/single_node/models/test_qwen3_next_w8a8.py
- name: kimi-k2-thinking
os: linux-aarch64-a3-16
tests: tests/e2e/nightly/single_node/models/test_kimi_k2_thinking.py
# TODO: Replace deepseek3.2-exp with deepseek3.2 after nightly tests pass
# - name: deepseek3_2-exp-w8a8
# os: linux-aarch64-a3-16
Expand Down
79 changes: 79 additions & 0 deletions tests/e2e/nightly/multi_node/config/Kimi-K2-Instruct-W8A8.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
test_name: "test Kimi-K2-Instruct-W8A8 2-nodes-dp4-tp8-torchair"
model: "vllm-ascend/Kimi-K2-Instruct-W8A8"

num_nodes: 2
npu_per_node: 16
env_common:
VLLM_USE_MODELSCOPE: true
HCCL_BUFFSIZE: 1024
SERVER_PORT: 8080
OMP_PROC_BIND: false
OMP_NUM_THREADS: 100
NUMEXPR_MAX_THREADS: 128

deployment:
-
server_cmd: >
vllm serve "vllm-ascend/Kimi-K2-Instruct-W8A8"
--host 0.0.0.0
--port $SERVER_PORT
--data-parallel-size 4
--data-parallel-size-local 2
--data-parallel-start-rank 0
--data-parallel-address $LOCAL_IP
--data-parallel-rpc-port 13389
--tensor-parallel-size 8
--seed 1024
--enable-expert-parallel
--max-num-seqs 32
--max-model-len 8192
--max-num-batched-tokens 8192
--quantization ascend
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
--additional-config '{"torchair_graph_config":{"enabled":true}}'

-
server_cmd: >
vllm serve "vllm-ascend/Kimi-K2-Instruct-W8A8"
--headless
--data-parallel-size 4
--data-parallel-size-local 2
--data-parallel-start-rank 2
--data-parallel-address $MASTER_IP
--data-parallel-rpc-port 13389
--tensor-parallel-size 8
--seed 1024
--enable-expert-parallel
--max-num-seqs 32
--max-model-len 8192
--max-num-batched-tokens 8192
--quantization ascend
--trust-remote-code
--no-enable-prefix-caching
--gpu-memory-utilization 0.9
--additional-config '{"torchair_graph_config":{"enabled":true}}'

benchmarks:
perf:
case_type: performance
dataset_path: vllm-ascend/GSM8K-in3500-bs2800
request_conf: vllm_api_stream_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_str_perf
num_prompts: 512
max_out_len: 256
batch_size: 64
trust_remote_code: True
request_rate: 11.2
baseline: 1
threshold: 0.97
acc:
case_type: accuracy
dataset_path: vllm-ascend/gsm8k-lite
request_conf: vllm_api_general_chat
dataset_conf: gsm8k/gsm8k_gen_0_shot_cot_chat_prompt
max_out_len: 7680
batch_size: 64
baseline: 95
threshold: 5
110 changes: 110 additions & 0 deletions tests/e2e/nightly/single_node/models/test_kimi_k2_thinking.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any

import openai
import pytest
from vllm.utils.network_utils import get_open_port

from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

MODELS = [
"moonshotai/Kimi-K2-Thinking",
]

TENSOR_PARALLELS = [16]

prompts = [
"San Francisco is a",
]

api_keyword_args = {
"max_tokens": 10,
}

aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 4096,
"batch_size": 32,
"baseline": 95,
"threshold": 5
}, {
"case_type": "performance",
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
"request_conf": "vllm_api_stream_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
"num_prompts": 512,
"max_out_len": 256,
"batch_size": 64,
"trust_remote_code": True,
"request_rate": 11.2,
"baseline": 1,
"threshold": 0.97
}]


@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
async def test_models(model: str, tp_size: int) -> None:
port = get_open_port()
env_dict = {
"HCCL_BUFFSIZE": "1024",
"TASK_QUEUE_ENABLE": "1",
"OMP_PROC_BIND": "false",
"HCCL_OP_EXPANSION_MODE": "AIV",
"PYTORCH_NPU_ALLOC_CONF": "expandable_segments:True"
}
server_args = [
"--tensor-parallel-size",
str(tp_size),
"--port",
str(port),
"--max-model-len",
"8192",
"--max-num-batched-tokens",
"8192",
"--max-num-seqs",
"12",
"--gpu-memory-utilization",
"0.9",
"--trust-remote-code",
"--enable-expert-parallel",
"--no-enable-prefix-caching",
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
# aisbench test
run_aisbench_cases(model, port, aisbench_cases)
5 changes: 5 additions & 0 deletions tools/aisbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ def __init__(self,
self.max_out_len = aisbench_config["max_out_len"]
self.batch_size = aisbench_config["batch_size"]
self.request_rate = aisbench_config.get("request_rate", 0)
self.trust_remote_code = aisbench_config.get("trust_remote_code",
False)
self.temperature = aisbench_config.get("temperature")
self.top_k = aisbench_config.get("top_k")
self.top_p = aisbench_config.get("top_p")
Expand Down Expand Up @@ -145,6 +147,9 @@ def _init_request_conf(self):
f'max_out_len = {self.max_out_len},', content)
content = re.sub(r'batch_size.*', f'batch_size = {self.batch_size},',
content)
content = re.sub(r'trust_remote_code=.*',
f'trust_remote_code={self.trust_remote_code},',
content)
content = content.replace("top_k", "#top_k")
content = content.replace("seed", "#seed")
content = content.replace("repetition_penalty", "#repetition_penalty")
Expand Down