Skip to content

Commit e276bac

Browse files
committed
applying coderabbit suggestion
Signed-off-by: Rakib Hasan <[email protected]>
1 parent 79697c2 commit e276bac

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

tensorrt_llm/_torch/modules/fused_moe/quantization.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
import torch.nn.functional as F
77
from torch import nn
88

9+
import tensorrt_llm.logger as trtllm_logger
910
from tensorrt_llm._utils import get_sm_version
10-
from tensorrt_llm.logger import logger
1111
from tensorrt_llm.quantization.utils.fp4_utils import (
1212
float4_sf_dtype, get_reorder_rows_for_gated_act_gemm_row_indices,
1313
get_shuffle_matrix_a_row_indices, get_shuffle_matrix_sf_a_row_indices)
@@ -743,7 +743,7 @@ def load_weights(self, module: torch.nn.Module, weights: List[Dict],
743743
if int(name.split(".")[0]) not in expert_ids:
744744
continue
745745
weight_name = name.replace("weight_scale_inv", "weight")
746-
logger.debug(f"Resmoothing {weight_name}")
746+
trtllm_logger.logger.debug(f"Resmoothing {weight_name}")
747747
weight = weights[weight_name][:]
748748
scale = weights[name][:]
749749
weights[weight_name], weights[name] = resmooth_to_fp8_e8m0(

0 commit comments

Comments
 (0)