Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion vllm/model_executor/layers/fused_moe/oracle/fp8.py
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ def make_fp8_moe_kernel(
)

# NOTE(rob): we only want the mk to control the shared_expert
# if using all2all (for SBO). bnell is making this explict in
# if using all2all (for SBO). bnell is making this explicit in
# the new MoE runner class.
kernel = mk.FusedMoEModularKernel(
prepare_finalize,
Expand Down
2 changes: 1 addition & 1 deletion vllm/model_executor/layers/fused_moe/oracle/nvfp4.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ def make_nvfp4_moe_kernel(
)

# NOTE(rob): we only want the mk to control the shared_expert
# if using all2all (for SBO). bnell is making this explict in
# if using all2all (for SBO). bnell is making this explicit in
# the new MoE runner class.
kernel = mk.FusedMoEModularKernel(
prepare_finalize,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def create_fused_moe_router(
# grouped topk + fused topk bias parameters
routed_scaling_factor: float = 1.0,
e_score_correction_bias: torch.Tensor | None = None,
# custom routing paramaters
# custom routing parameters
custom_routing_function: Callable | None = None,
# eplb parameters
enable_eplb: bool = False,
Expand Down