diff --git a/vllm/model_executor/layers/fused_moe/oracle/fp8.py b/vllm/model_executor/layers/fused_moe/oracle/fp8.py index 6f961df07d8e..9edd15eede63 100644 --- a/vllm/model_executor/layers/fused_moe/oracle/fp8.py +++ b/vllm/model_executor/layers/fused_moe/oracle/fp8.py @@ -603,7 +603,7 @@ def make_fp8_moe_kernel( ) # NOTE(rob): we only want the mk to control the shared_expert - # if using all2all (for SBO). bnell is making this explict in + # if using all2all (for SBO). bnell is making this explicit in # the new MoE runner class. kernel = mk.FusedMoEModularKernel( prepare_finalize, diff --git a/vllm/model_executor/layers/fused_moe/oracle/nvfp4.py b/vllm/model_executor/layers/fused_moe/oracle/nvfp4.py index b4f4b74ca2d5..d48def361936 100644 --- a/vllm/model_executor/layers/fused_moe/oracle/nvfp4.py +++ b/vllm/model_executor/layers/fused_moe/oracle/nvfp4.py @@ -458,7 +458,7 @@ def make_nvfp4_moe_kernel( ) # NOTE(rob): we only want the mk to control the shared_expert - # if using all2all (for SBO). bnell is making this explict in + # if using all2all (for SBO). bnell is making this explicit in # the new MoE runner class. kernel = mk.FusedMoEModularKernel( prepare_finalize, diff --git a/vllm/model_executor/layers/fused_moe/router/router_factory.py b/vllm/model_executor/layers/fused_moe/router/router_factory.py index a0733bafbe4d..11027e894bee 100644 --- a/vllm/model_executor/layers/fused_moe/router/router_factory.py +++ b/vllm/model_executor/layers/fused_moe/router/router_factory.py @@ -44,7 +44,7 @@ def create_fused_moe_router( # grouped topk + fused topk bias parameters routed_scaling_factor: float = 1.0, e_score_correction_bias: torch.Tensor | None = None, - # custom routing paramaters + # custom routing parameters custom_routing_function: Callable | None = None, # eplb parameters enable_eplb: bool = False,