diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 914dc68469ee..5a8f51de6462 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -700,7 +700,7 @@ def layer_id(self): @property def gate(self) -> torch.nn.Module | None: - return self._gate + return self._gate if self.use_overlapped else None @property def tp_size(self): @@ -725,7 +725,7 @@ def use_ep(self): @property def is_internal_router(self) -> bool: # By default, router/gate is called before FusedMoE forward pass - return self._gate is not None + return self.gate is not None def _maybe_init_expert_routing_tables( self, @@ -1457,7 +1457,6 @@ def forward_native( hidden_states: torch.Tensor, router_logits: torch.Tensor, ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]: - self.ensure_moe_quant_config_init() return self.runner.forward( hidden_states, router_logits, diff --git a/vllm/model_executor/layers/fused_moe/runner/default_moe_runner.py b/vllm/model_executor/layers/fused_moe/runner/default_moe_runner.py index 12b795f30e22..b265cbb41211 100644 --- a/vllm/model_executor/layers/fused_moe/runner/default_moe_runner.py +++ b/vllm/model_executor/layers/fused_moe/runner/default_moe_runner.py @@ -63,6 +63,8 @@ def _moe_forward( layer_name: str, ) -> torch.Tensor: layer = get_layer_from_name(layer_name) + # TODO(bnell): this can be removed after MK migration is complete. + layer.ensure_moe_quant_config_init() return layer.runner.forward_impl( layer, hidden_states, router_logits, shared_experts_input ) @@ -84,6 +86,8 @@ def _moe_forward_shared( layer_name: str, ) -> tuple[torch.Tensor, torch.Tensor]: layer = get_layer_from_name(layer_name) + # TODO(bnell): this can be removed after MK migration is complete. + layer.ensure_moe_quant_config_init() return layer.runner.forward_impl( layer, hidden_states, router_logits, shared_experts_input )