File tree Expand file tree Collapse file tree 1 file changed +1
-3
lines changed
torchvision/models/quantization Expand file tree Collapse file tree 1 file changed +1
-3
lines changed Original file line number Diff line number Diff line change @@ -21,7 +21,7 @@ class QuantizableSqueezeExcitation(SElayer):
2121 _version = 2
2222
2323 def __init__ (self , * args : Any , ** kwargs : Any ) -> None :
24- kwargs ["scale_activation" ] = nn .Hardswish
24+ kwargs ["scale_activation" ] = nn .Hardsigmoid
2525 super ().__init__ (* args , ** kwargs )
2626 self .skip_mul = nn .quantized .FloatFunctional ()
2727
@@ -49,8 +49,6 @@ def _load_from_state_dict(
4949 "scale_activation.activation_post_process.zero_point" : torch .tensor ([0 ], dtype = torch .int32 ),
5050 "scale_activation.activation_post_process.fake_quant_enabled" : torch .tensor ([1 ]),
5151 "scale_activation.activation_post_process.observer_enabled" : torch .tensor ([1 ]),
52- "scale_activation.activation_post_process.activation_post_process.min_val" : torch .tensor (float ('inf' )),
53- "scale_activation.activation_post_process.activation_post_process.max_val" : torch .tensor (- float ('inf' )),
5452 }
5553 for k , v in default_state_dict .items ():
5654 full_key = prefix + k
You can’t perform that action at this time.
0 commit comments