Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 39 additions & 10 deletions tests/v1/e2e/test_spec_decode.py
Original file line number Diff line number Diff line change
Expand Up @@ -613,6 +613,15 @@ def test_mtp_correctness(
cleanup_dist_env_and_memory()


def some_high_acceptance_metrics() -> dict:
return {
"sampling_config": greedy_sampling(),
"num_speculative_tokens": 3,
"expected_acceptance_len": 2.8 + 1,
"expected_acceptance_rate": 0.90,
}


@dataclass
class ArgsTest:
target_model: str
Expand All @@ -630,6 +639,8 @@ class ArgsTest:
gpu_memory_utilization: float = 0.5
dataset: str = "test_prompts"
num_prompts: int = 100
# Some settings only get 100% acceptance_rate with VLLM_BATCH_INVARIANT=1
use_batch_invariance: bool = False


cases = [
Expand All @@ -651,12 +662,36 @@ class ArgsTest:
expected_acceptance_len=2.8 + 1,
expected_acceptance_rate=0.9,
),
# Multiple KV Cache groups
ArgsTest(
target_model="google/gemma-3-270m-it",
draft_model="google/gemma-3-270m-it",
sampling_config=greedy_sampling(),
num_speculative_tokens=3,
expected_acceptance_len=4,
expected_acceptance_rate=1,
# Without batch invariance, acceptance rate is ~86%
use_batch_invariance=True,
),
# GPT-OSS MoE models with different target/draft sizes
# Tests MoE layer resolution in speculative decoding.
ArgsTest(
target_model="openai/gpt-oss-120b",
draft_model="openai/gpt-oss-20b",
# Leave some headroom for CUDA graph capture.
gpu_memory_utilization=0.85,
**some_high_acceptance_metrics(),
),
]


@pytest.mark.parametrize("args", cases)
@pytest.mark.parametrize("enforce_eager", [True, False])
def test_draft_model_correctness(args: ArgsTest, enforce_eager: bool):
def test_draft_model_correctness(
args: ArgsTest, enforce_eager: bool, monkeypatch: pytest.MonkeyPatch
):
if args.use_batch_invariance:
monkeypatch.setenv("VLLM_BATCH_INVARIANT", "1")
args.enforce_eager = enforce_eager
assert_draft_model_correctness(args)

Expand Down Expand Up @@ -772,6 +807,8 @@ def test_draft_model_engine_args_rejects_invalid_tp_argname():
def assert_draft_model_correctness(args: ArgsTest):
"""Compare the outputs using and not using speculative decoding.
In the greedy decoding case, the outputs must match EXACTLY."""
attention_config = {"backend": "FLASH_ATTN"} if args.use_batch_invariance else None

test_prompts: list[Messages] = get_messages(
dataset=args.dataset, n=args.num_prompts
)
Expand All @@ -793,6 +830,7 @@ def assert_draft_model_correctness(args: ArgsTest):
tensor_parallel_size=args.target_tensor_parallel_size,
enforce_eager=args.enforce_eager,
disable_log_stats=False, # enables get_metrics()
attention_config=attention_config,
)
# we don't check the outputs, only check the metrics
spec_llm.chat(test_prompts, args.sampling_config)
Expand Down Expand Up @@ -824,15 +862,6 @@ def get_messages(dataset: str, n: int) -> list[Messages]:
raise NotImplementedError(f"Dataset '{dataset}' not implemented")


def some_high_acceptance_metrics() -> dict:
return {
"sampling_config": greedy_sampling(),
"num_speculative_tokens": 3,
"expected_acceptance_len": 2.8 + 1,
"expected_acceptance_rate": 0.90,
}


def compute_acceptance_rate(metrics: list[Metric]) -> float:
name2metric = {metric.name: metric for metric in metrics}
n_draft_toks = name2metric["vllm:spec_decode_num_draft_tokens"].value # type: ignore
Expand Down
Loading