Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/nightly_test_a3.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,9 @@ jobs:
- name: qwen3-235b-w8a8
os: linux-aarch64-a3-16
tests: tests/e2e/nightly/single_node/models/test_qwen3_235b_w8a8.py
- name: qwen3-next-w8a8
os: linux-aarch64-a3-4
tests: tests/e2e/nightly/single_node/models/test_qwen3_next_w8a8.py
# TODO: Replace deepseek3.2-exp with deepseek3.2 after nightly tests pass
# - name: deepseek3_2-exp-w8a8
# os: linux-aarch64-a3-16
Expand Down
104 changes: 104 additions & 0 deletions tests/e2e/nightly/single_node/models/test_qwen3_next_w8a8.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
from typing import Any

import openai
import pytest
from vllm.utils.network_utils import get_open_port

from tests.e2e.conftest import RemoteOpenAIServer
from tools.aisbench import run_aisbench_cases

MODELS = [
"vllm-ascend/Qwen3-Next-80B-A3B-Instruct-W8A8",
]

prompts = [
"San Francisco is a",
]
Comment on lines +30 to +32
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The model under test, Qwen3-Next-80B-A3B-Instruct-W8A8, is an instruction-tuned chat model. For correctness, it's better to use the chat completions API. This requires formatting the prompt as a list of messages with roles. This change should be made in conjunction with updating the API call to use client.chat.completions.create.

Suggested change
prompts = [
"San Francisco is a",
]
prompts = [
{"role": "user", "content": "San Francisco is a"},
]


api_keyword_args = {
"max_tokens": 10,
}

aisbench_cases = [{
"case_type": "accuracy",
"dataset_path": "vllm-ascend/gsm8k-lite",
"request_conf": "vllm_api_general_chat",
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
"max_out_len": 32768,
"batch_size": 32,
"baseline": 95,
"threshold": 5
}]


@pytest.mark.asyncio
@pytest.mark.parametrize("model", MODELS)
async def test_models(model: str) -> None:
port = get_open_port()
env_dict = {
"OMP_NUM_THREADS": "10",
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
"OMP_NUM_THREADS": "10",
"OMP_NUM_THREADS": "1",

"OMP_PROC_BIND": "false",
"HCCL_BUFFSIZE": "1024",
}
server_args = [
"--quantization",
"ascend",
"--async-scheduling",
"--no-enable-prefix-caching",
"--data-parallel-size",
"1",
"--tensor-parallel-size",
"4",
"--enable-expert-parallel",
"--port",
str(port),
"--max-model-len",
"40960",
"--max-num-batched-tokens",
"8192",
"--max-num-seqs",
"32",
"--trust-remote-code",
"--gpu-memory-utilization",
"0.65",
"--compilation-config",
'{"cudagraph_capture_sizes": [32], "cudagraph_mode":"FULL_DECODE_ONLY"}',
]
request_keyword_args: dict[str, Any] = {
**api_keyword_args,
}
with RemoteOpenAIServer(model,
server_args,
server_port=port,
env_dict=env_dict,
auto_port=False) as server:
client = server.get_async_client()
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
Comment on lines +92 to +98
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

To correctly test a chat model, you should use the client.chat.completions.create method instead of the legacy completions.create. This also requires updating how the response is accessed, from choices[0].text to choices[0].message.content. This change assumes the prompts variable has been updated to the chat message format as suggested in the other comment.

Suggested change
batch = await client.completions.create(
model=model,
prompt=prompts,
**request_keyword_args,
)
choices: list[openai.types.CompletionChoice] = batch.choices
assert choices[0].text, "empty response"
batch = await client.chat.completions.create(
model=model,
messages=prompts,
**request_keyword_args,
)
choices: list[openai.types.chat.ChatCompletion.Choice] = batch.choices
assert choices[0].message.content, "empty response"

print(choices)
# aisbench test
run_aisbench_cases(model,
port,
aisbench_cases,
server_args=server_args)
Loading