-
-
Notifications
You must be signed in to change notification settings - Fork 15.5k
[Bugfix] Fix qwen-moe packed_modules_mapping #26634
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -534,11 +534,7 @@ class Qwen2MoeForCausalLM(nn.Module, SupportsPP, SupportsLoRA): | |
| "q_proj", | ||
| "k_proj", | ||
| "v_proj", | ||
| ], | ||
| "gate_up_proj": [ | ||
| "gate_proj", | ||
| "up_proj", | ||
| ], | ||
| ] | ||
| } | ||
|
|
||
| def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): | ||
|
|
@@ -547,6 +543,18 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): | |
| quant_config = vllm_config.quant_config | ||
| self.config = config | ||
| self.quant_config = quant_config | ||
| # Only perform the following mapping when Qwen2MoeMLP exists | ||
| if ( | ||
| getattr(config, "mlp_only_layers", []) | ||
| or config.shared_expert_intermediate_size > 0 | ||
| ): | ||
| self.packed_modules_mapping["gate_up_proj"] = ( | ||
| [ | ||
| "gate_proj", | ||
| "up_proj", | ||
| ], | ||
| ) | ||
|
Comment on lines
+546
to
+556
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This change introduces two critical issues:
A more robust approach is to check if not all layers are sparse MoE layers. This is the case if # Create a copy of the mapping to avoid modifying the class attribute.
self.packed_modules_mapping = self.packed_modules_mapping.copy()
# Conditionally add gate_up_proj if dense MLP layers exist. A model has
# dense MLP layers if not all layers are sparse MoE layers.
if (bool(getattr(config, "mlp_only_layers", [])) or
getattr(config, "num_experts", 0) == 0 or
getattr(config, "decoder_sparse_step", 1) != 1):
self.packed_modules_mapping["gate_up_proj"] = [
"gate_proj",
"up_proj",
] |
||
|
|
||
| self.model = Qwen2MoeModel( | ||
| vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model") | ||
| ) | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -634,11 +634,7 @@ class Qwen3MoeForCausalLM( | |||||||||||||||||||||||||||||||||||||||
| "q_proj", | ||||||||||||||||||||||||||||||||||||||||
| "k_proj", | ||||||||||||||||||||||||||||||||||||||||
| "v_proj", | ||||||||||||||||||||||||||||||||||||||||
| ], | ||||||||||||||||||||||||||||||||||||||||
| "gate_up_proj": [ | ||||||||||||||||||||||||||||||||||||||||
| "gate_proj", | ||||||||||||||||||||||||||||||||||||||||
| "up_proj", | ||||||||||||||||||||||||||||||||||||||||
| ], | ||||||||||||||||||||||||||||||||||||||||
| ] | ||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
| fall_back_to_pt_during_load = False | ||||||||||||||||||||||||||||||||||||||||
|
|
@@ -649,6 +645,14 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): | |||||||||||||||||||||||||||||||||||||||
| quant_config = vllm_config.quant_config | ||||||||||||||||||||||||||||||||||||||||
| self.config = config | ||||||||||||||||||||||||||||||||||||||||
| self.quant_config = quant_config | ||||||||||||||||||||||||||||||||||||||||
| # Only perform the following mapping when Qwen3MoeMLP exists | ||||||||||||||||||||||||||||||||||||||||
| if getattr(config, "mlp_only_layers", []): | ||||||||||||||||||||||||||||||||||||||||
| self.packed_modules_mapping["gate_up_proj"] = ( | ||||||||||||||||||||||||||||||||||||||||
| [ | ||||||||||||||||||||||||||||||||||||||||
| "gate_proj", | ||||||||||||||||||||||||||||||||||||||||
| "up_proj", | ||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+649
to
+653
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Same issue as above: Useful? React with 👍 / 👎. |
||||||||||||||||||||||||||||||||||||||||
| ], | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+648
to
+655
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This change introduces two critical issues:
A more robust approach is to check if not all layers are sparse MoE layers. This is the case if
Suggested change
|
||||||||||||||||||||||||||||||||||||||||
| self.model = Qwen3MoeModel( | ||||||||||||||||||||||||||||||||||||||||
| vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model") | ||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The new conditional adds
gate_up_projusingself.packed_modules_mapping["gate_up_proj"] = (["gate_proj", "up_proj"],). Because of the parentheses and trailing comma this stores a tuple whose only element is a list, while the rest of the quantization helpers expectdict[str, list[str]]. When the mapping is consumed (e.g.,get_layer_partition_namesor LoRA utilities), the tuple is iterated and the list itself is passed to string operations such asremovesuffix/replace, raising aTypeError. Any model withmlp_only_layersset will fail during packed-module handling. Assign the list directly without wrapping it in a tuple.Useful? React with 👍 / 👎.