Skip to content

Commit a9bab41

Browse files
committed
remove dead code
Signed-off-by: Neta Zmora <[email protected]>
1 parent 0b31d41 commit a9bab41

File tree

2 files changed

+1
-4
lines changed

2 files changed

+1
-4
lines changed

cpp/tensorrt_llm/thop/moeOp.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -340,9 +340,6 @@ class FusedMoeRunner : public torch::CustomClassHolder
340340
}
341341
else
342342
{
343-
// TORCH_CHECK(fc1_expert_weights.sizes()[1] == fc2_expert_weights.sizes()[2] * mInnerDimMultiplier * 2,
344-
// "fc1_expert_weights inter size must be fc2_expert_weights inter size.");
345-
346343
if (isGatedActivation(base_activation_type))
347344
{
348345
TORCH_CHECK(fc1_expert_weights.sizes()[1] == fc2_expert_weights.sizes()[2] * mInnerDimMultiplier * 2,

tensorrt_llm/_torch/custom_ops/torch_custom_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def __init__(
102102
x_dtype, weight_dtype, output_dtype,
103103
use_deepseek_fp8_block_scale, use_w4_group_scaling,
104104
use_int8_woq_per_channel, use_mxfp8_act_scaling,
105-
use_fused_finalize) # , activation_type)
105+
use_fused_finalize)
106106
self.fused_moe_runner = MoERunner.runner_dict[instance_key]
107107

108108
def get_valid_tactics(self, inputs: List[torch.Tensor],

0 commit comments

Comments
 (0)