Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion python/sglang/srt/layers/moe/fused_moe_triton/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,8 @@ def __init__(
self.quant_config = quant_config
self.quant_method.create_weights(
layer=self,
num_experts=self.num_local_experts,
num_experts=self.num_experts,
num_local_experts=self.num_local_experts,
hidden_size=hidden_size,
# FIXME: figure out which intermediate_size to use
intermediate_size=self.intermediate_size_per_partition,
Expand Down
3 changes: 2 additions & 1 deletion python/sglang/srt/layers/quantization/modelopt_quant.py
Original file line number Diff line number Diff line change
Expand Up @@ -752,6 +752,7 @@ def create_weights(
self,
layer: torch.nn.Module,
num_experts: int,
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why not simply change the num_experts to num_local_experts? I think we just need the actual num of experts when allocating weights, right?

num_local_experts: int,
hidden_size: int,
intermediate_size_per_partition: int,
params_dtype: torch.dtype,
Expand All @@ -765,7 +766,7 @@ def create_weights(

# TODO(ch-wan): check if this is needed
layer.num_experts = num_experts
layer.num_local_experts = num_experts
layer.num_local_experts = num_local_experts
layer.intermediate_size_per_partition = intermediate_size_per_partition
layer.params_dtype = params_dtype
layer.quant_config = self.quant_config
Expand Down
Loading