Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 15 additions & 22 deletions torchvision/models/mobilenetv3.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import warnings
import torch

from functools import partial
from torch import nn, Tensor
from torch.nn import functional as F
from typing import Any, Callable, Dict, List, Optional, Sequence
from typing import Any, Callable, List, Optional, Sequence

from .._internally_replaced_utils import load_state_dict_from_url
from torchvision.models.mobilenetv2 import _make_divisible, ConvBNActivation
from .efficientnet import SqueezeExcitation as SElayer
from .mobilenetv2 import _make_divisible, ConvBNActivation


__all__ = ["MobileNetV3", "mobilenet_v3_large", "mobilenet_v3_small"]
Expand All @@ -18,25 +19,16 @@
}


class SqueezeExcitation(nn.Module):
# Implemented as described at Figure 4 of the MobileNetV3 paper
class SqueezeExcitation(SElayer):
"""DEPRECATED
"""
def __init__(self, input_channels: int, squeeze_factor: int = 4):
super().__init__()
squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
self.fc1 = nn.Conv2d(input_channels, squeeze_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(squeeze_channels, input_channels, 1)

def _scale(self, input: Tensor, inplace: bool) -> Tensor:
scale = F.adaptive_avg_pool2d(input, 1)
scale = self.fc1(scale)
scale = self.relu(scale)
scale = self.fc2(scale)
return F.hardsigmoid(scale, inplace=inplace)

def forward(self, input: Tensor) -> Tensor:
scale = self._scale(input, True)
return scale * input
super().__init__(input_channels, squeeze_channels, scale_activation=nn.Hardsigmoid)
self.relu = self.activation
delattr(self, 'activation')
warnings.warn(
"This SqueezeExcitation class is deprecated and will be removed in future versions.", FutureWarning)


class InvertedResidualConfig:
Expand All @@ -60,7 +52,7 @@ def adjust_channels(channels: int, width_mult: float):
class InvertedResidual(nn.Module):
# Implemented as described at section 5 of MobileNetV3 paper
def __init__(self, cnf: InvertedResidualConfig, norm_layer: Callable[..., nn.Module],
se_layer: Callable[..., nn.Module] = SqueezeExcitation):
se_layer: Callable[..., nn.Module] = partial(SElayer, scale_activation=nn.Hardsigmoid)):
super().__init__()
if not (1 <= cnf.stride <= 2):
raise ValueError('illegal stride value')
Expand All @@ -81,7 +73,8 @@ def __init__(self, cnf: InvertedResidualConfig, norm_layer: Callable[..., nn.Mod
stride=stride, dilation=cnf.dilation, groups=cnf.expanded_channels,
norm_layer=norm_layer, activation_layer=activation_layer))
if cnf.use_se:
layers.append(se_layer(cnf.expanded_channels))
squeeze_channels = _make_divisible(cnf.expanded_channels // 4, 8)
layers.append(se_layer(cnf.expanded_channels, squeeze_channels))

# project
layers.append(ConvBNActivation(cnf.expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer,
Expand Down
17 changes: 10 additions & 7 deletions torchvision/models/quantization/mobilenetv3.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import torch
from torch import nn, Tensor
from ..._internally_replaced_utils import load_state_dict_from_url
from torchvision.models.mobilenetv3 import InvertedResidual, InvertedResidualConfig, ConvBNActivation, MobileNetV3,\
SqueezeExcitation, model_urls, _mobilenet_v3_conf
from ..efficientnet import SqueezeExcitation as SElayer
from ..mobilenetv3 import InvertedResidual, InvertedResidualConfig, ConvBNActivation, MobileNetV3,\
model_urls, _mobilenet_v3_conf
from torch.quantization import QuantStub, DeQuantStub, fuse_modules
from typing import Any, List, Optional
from .utils import _replace_relu
Expand All @@ -16,16 +17,17 @@
}


class QuantizableSqueezeExcitation(SqueezeExcitation):
class QuantizableSqueezeExcitation(SElayer):
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs["scale_activation"] = nn.Hardswish
super().__init__(*args, **kwargs)
self.skip_mul = nn.quantized.FloatFunctional()

def forward(self, input: Tensor) -> Tensor:
return self.skip_mul.mul(self._scale(input, False), input)
return self.skip_mul.mul(self._scale(input), input)

def fuse_model(self) -> None:
fuse_modules(self, ['fc1', 'relu'], inplace=True)
fuse_modules(self, ['fc1', 'activation'], inplace=True)


class QuantizableInvertedResidual(InvertedResidual):
Expand Down Expand Up @@ -79,11 +81,12 @@ def _load_weights(
model: QuantizableMobileNetV3,
model_url: Optional[str],
progress: bool,
strict: bool = True
) -> None:
if model_url is None:
raise ValueError("No checkpoint is available for {}".format(arch))
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
model.load_state_dict(state_dict, strict=strict)


def _mobilenet_v3_model(
Expand All @@ -107,7 +110,7 @@ def _mobilenet_v3_model(
torch.quantization.prepare_qat(model, inplace=True)

if pretrained:
_load_weights(arch, model, quant_model_urls.get(arch + '_' + backend, None), progress)
_load_weights(arch, model, quant_model_urls.get(arch + '_' + backend, None), progress, strict=False)

torch.quantization.convert(model, inplace=True)
model.eval()
Expand Down