Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions tests/model_executor/model_loader/test_reload.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,3 +257,22 @@ def test_online_quantize_reload(
mul_perp = llm.generate_prompt_perplexity(["3 4 = 12"], mask=["3 4 ="])[0]
add_perp = llm.generate_prompt_perplexity(["3 4 = 7"], mask=["3 4 ="])[0]
assert add_perp < mul_perp


def test_capture_layer_to_meta_skips_bias():
"""Regression for #39663: bias parameters must be skipped by online loader.

Online FP8 quantization wraps all non-SKIP_TENSORS params through a
deferred load pipeline that never materialized bias tensors, silently
leaving them at zero for Qwen2/2.5/GPT-2/Phi (bias=True linear layers).
Fix is to add "bias" to SKIP_TENSORS so it takes the normal load path.
"""
from vllm.model_executor.model_loader.reload.meta import SKIP_TENSORS

assert "bias" in SKIP_TENSORS

layer = torch.nn.Linear(2, 3, bias=True)
params, _buffers = capture_layer_to_meta(layer)

assert "weight" in params
assert "bias" not in params
7 changes: 4 additions & 3 deletions vllm/model_executor/model_loader/reload/meta.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,12 @@

SKIP_TENSORS: set[str] = {
"_expert_map",
"expert_mask",
"bias",
"e_score_correction_bias",
"expert_global_to_physical",
"expert_physical_to_global",
"expert_local_to_global",
"e_score_correction_bias",
"expert_mask",
"expert_physical_to_global",
}


Expand Down
Loading