Skip to content

Commit 3c70cc8

Browse files
committed
Change activation function default value
Signed-off-by: Neta Zmora <[email protected]>
1 parent e45f6cc commit 3c70cc8

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

tensorrt_llm/_torch/custom_ops/torch_custom_ops.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -171,13 +171,14 @@ def fused_moe(
171171
tune_max_num_tokens: int = 8192,
172172
tuner_num_tokens: Optional[int] = None,
173173
tuner_top_k: Optional[int] = None,
174-
activation_type: int = ActivationType.Swiglu,
174+
activation_type: int = None,
175175
unpadded_hidden_size: Optional[int] = None,
176176
out_tensor: Optional[torch.Tensor] = None,
177177
) -> List[torch.Tensor]:
178178

179179
tuner = AutoTuner.get()
180-
activation_type = ActivationType(activation_type)
180+
activation_type = ActivationType.Swiglu if activation_type is None else ActivationType(
181+
activation_type)
181182
# Only the non-alltoall case is considered for profiling in the warmup phase.
182183
# Therefore, to get the correct tactics during the actual inference, the inputs to the tuner should be the same as when not using alltoall.
183184
if enable_alltoall:

0 commit comments

Comments
 (0)