From cf2ad3ce49284a4e06b127972789f1a47ceee949 Mon Sep 17 00:00:00 2001 From: lin-shh <0shuhang@gmail.com> Date: Sun, 1 Mar 2026 02:36:36 -0500 Subject: [PATCH] =?UTF-8?q?[Misc]=20Fix=20typos=20in=20comments:=20explict?= =?UTF-8?q?=E2=86=92explicit,=20paramaters=E2=86=92parameters?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix three spelling errors in code comments across fused_moe files: - oracle/fp8.py and oracle/nvfp4.py: "explict" → "explicit" - router/router_factory.py: "paramaters" → "parameters" Made-with: Cursor --- vllm/model_executor/layers/fused_moe/oracle/fp8.py | 2 +- vllm/model_executor/layers/fused_moe/oracle/nvfp4.py | 2 +- vllm/model_executor/layers/fused_moe/router/router_factory.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/layers/fused_moe/oracle/fp8.py b/vllm/model_executor/layers/fused_moe/oracle/fp8.py index 6f961df07d8e..9edd15eede63 100644 --- a/vllm/model_executor/layers/fused_moe/oracle/fp8.py +++ b/vllm/model_executor/layers/fused_moe/oracle/fp8.py @@ -603,7 +603,7 @@ def make_fp8_moe_kernel( ) # NOTE(rob): we only want the mk to control the shared_expert - # if using all2all (for SBO). bnell is making this explict in + # if using all2all (for SBO). bnell is making this explicit in # the new MoE runner class. kernel = mk.FusedMoEModularKernel( prepare_finalize, diff --git a/vllm/model_executor/layers/fused_moe/oracle/nvfp4.py b/vllm/model_executor/layers/fused_moe/oracle/nvfp4.py index b4f4b74ca2d5..d48def361936 100644 --- a/vllm/model_executor/layers/fused_moe/oracle/nvfp4.py +++ b/vllm/model_executor/layers/fused_moe/oracle/nvfp4.py @@ -458,7 +458,7 @@ def make_nvfp4_moe_kernel( ) # NOTE(rob): we only want the mk to control the shared_expert - # if using all2all (for SBO). bnell is making this explict in + # if using all2all (for SBO). bnell is making this explicit in # the new MoE runner class. kernel = mk.FusedMoEModularKernel( prepare_finalize, diff --git a/vllm/model_executor/layers/fused_moe/router/router_factory.py b/vllm/model_executor/layers/fused_moe/router/router_factory.py index a0733bafbe4d..11027e894bee 100644 --- a/vllm/model_executor/layers/fused_moe/router/router_factory.py +++ b/vllm/model_executor/layers/fused_moe/router/router_factory.py @@ -44,7 +44,7 @@ def create_fused_moe_router( # grouped topk + fused topk bias parameters routed_scaling_factor: float = 1.0, e_score_correction_bias: torch.Tensor | None = None, - # custom routing paramaters + # custom routing parameters custom_routing_function: Callable | None = None, # eplb parameters enable_eplb: bool = False,