diff --git a/docs/source/en/backbones.md b/docs/source/en/backbones.md index c54dc1d00af0..874f2667ec4b 100644 --- a/docs/source/en/backbones.md +++ b/docs/source/en/backbones.md @@ -36,8 +36,8 @@ This guide describes the backbone class, backbones from the [timm](https://hf.co There are two backbone classes. -- [`~transformers.utils.BackboneMixin`] allows you to load a backbone and includes functions for extracting the feature maps and indices. -- [`~transformers.utils.BackboneConfigMixin`] allows you to set the feature map and indices of a backbone configuration. +- [`~transformers.utils.BackboneMixin`] allows you to load a backbone and includes functions for extracting the feature maps and indices from config. +- [`~transformers.utils.BackboneConfigMixin`] allows you to set, align and verify the feature map and indices of a backbone configuration. Refer to the [Backbone](./main_classes/backbones) API documentation to check which models support a backbone. @@ -69,12 +69,13 @@ When you know a model supports a backbone, you can load the backbone and neck di The example below loads a [ResNet](./model_doc/resnet) backbone and neck for use in a [MaskFormer](./model_doc/maskformer) instance segmentation head. -Set `backbone` to a pretrained model and `use_pretrained_backbone=True` to use pretrained weights instead of randomly initialized weights. +Note that initializing from config will create the model with random weights. If you want to load a pretrained model, use `from_pretrained` API. ```py from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation -config = MaskFormerConfig(backbone="microsoft/resnet-50", use_pretrained_backbone=True) +backbone_config = AutoConfig.from_pretrained("microsoft/resnet-50") +config = MaskFormerConfig(backbone_config=backbone_config) model = MaskFormerForInstanceSegmentation(config) ``` @@ -96,14 +97,13 @@ model = MaskFormerForInstanceSegmentation(config) ## timm backbones -[timm](https://hf.co/docs/timm/index) is a collection of vision models for training and inference. Transformers supports timm models as backbones with the [`TimmBackbone`] and [`TimmBackboneConfig`] classes. - -Set `use_timm_backbone=True` to load pretrained timm weights, and `use_pretrained_backbone` to use pretrained or randomly initialized weights. +[timm](https://hf.co/docs/timm/index) is a collection of vision models for training and inference. Transformers supports timm models as backbones with the [`TimmBackbone`] and [`TimmBackboneConfig`] classes. Set the neccessary backbone checkpoint in `backbone` to create a model with Timm backbone with randomly initialized weights. ```py from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation -config = MaskFormerConfig(backbone="resnet50", use_timm_backbone=True, use_pretrained_backbone=True) +backbone_config = TimmBackboneConfig(backbone="resnet50", out_indices=[-1]) +config = MaskFormerConfig(backbone_config=backbone_config) model = MaskFormerForInstanceSegmentation(config) ``` @@ -112,7 +112,7 @@ You could also explicitly call the [`TimmBackboneConfig`] class to load and crea ```py from transformers import TimmBackboneConfig -backbone_config = TimmBackboneConfig("resnet50", use_pretrained_backbone=True) +backbone_config = TimmBackboneConfig("resnet50") ``` Pass the backbone configuration to the model configuration and instantiate the model head, [`MaskFormerForInstanceSegmentation`], with the backbone. diff --git a/docs/source/en/main_classes/backbones.md b/docs/source/en/main_classes/backbones.md index 5f1fc1dcbe1f..3a0291bda898 100644 --- a/docs/source/en/main_classes/backbones.md +++ b/docs/source/en/main_classes/backbones.md @@ -18,8 +18,8 @@ rendered properly in your Markdown viewer. A backbone is a model used for feature extraction for higher level computer vision tasks such as object detection and image classification. Transformers provides an [`AutoBackbone`] class for initializing a Transformers backbone from pretrained model weights, and two utility classes: -* [`~utils.BackboneMixin`] enables initializing a backbone from Transformers or [timm](https://hf.co/docs/timm/index) and includes functions for returning the output features and indices. -* [`~utils.BackboneConfigMixin`] sets the output features and indices of the backbone configuration. +* [`~backbone_utils.BackboneMixin`] enables initializing a backbone from Transformers or [timm](https://hf.co/docs/timm/index) and includes functions for returning the output features and indices. +* [`~backbone_utils.BackboneConfigMixin`] sets the output features and indices of the backbone configuration. [timm](https://hf.co/docs/timm/index) models are loaded with the [`TimmBackbone`] and [`TimmBackboneConfig`] classes. @@ -45,11 +45,11 @@ Backbones are supported for the following models: ## BackboneMixin -[[autodoc]] utils.BackboneMixin +[[autodoc]] backbone_utils.BackboneMixin ## BackboneConfigMixin -[[autodoc]] utils.BackboneConfigMixin +[[autodoc]] backbone_utils.BackboneConfigMixin ## TimmBackbone diff --git a/docs/source/en/model_doc/dab-detr.md b/docs/source/en/model_doc/dab-detr.md index e3262f140f4d..1fde67b3f5a3 100644 --- a/docs/source/en/model_doc/dab-detr.md +++ b/docs/source/en/model_doc/dab-detr.md @@ -110,7 +110,7 @@ Option 2: Instantiate DAB-DETR with randomly initialized weights for Transformer Option 3: Instantiate DAB-DETR with randomly initialized weights for backbone + Transformer ```py ->>> config = DabDetrConfig(use_pretrained_backbone=False) +>>> config = DabDetrConfig() >>> model = DabDetrForObjectDetection(config) ``` diff --git a/docs/source/en/model_doc/detr.md b/docs/source/en/model_doc/detr.md index 792857b23f6c..33770fe1a227 100644 --- a/docs/source/en/model_doc/detr.md +++ b/docs/source/en/model_doc/detr.md @@ -132,7 +132,7 @@ model = DetrForObjectDetection(config) - Option 3: Instantiate DETR with randomly initialized weights for backbone + Transformer ```python -config = DetrConfig(use_pretrained_backbone=False) +config = DetrConfig() model = DetrForObjectDetection(config) ``` diff --git a/docs/source/en/model_doc/pvt_v2.md b/docs/source/en/model_doc/pvt_v2.md index 3c8a9a84e204..460fca484639 100644 --- a/docs/source/en/model_doc/pvt_v2.md +++ b/docs/source/en/model_doc/pvt_v2.md @@ -64,7 +64,7 @@ processed = image_processor(image) outputs = model(torch.tensor(processed["pixel_values"])) ``` -To use the PVTv2 as a backbone for more complex architectures like DeformableDETR, you can use AutoBackbone (this model would need fine-tuning as you're replacing the backbone in the pretrained model): +To use the PVTv2 as a backbone for more complex architectures like DeformableDETR, you can use AutoBackbone (this model would need fine-tuning as you're replacing the backbone in the pretrained model and it is initialized with random weights): ```python import requests @@ -77,7 +77,6 @@ model = AutoModelForObjectDetection.from_config( config=AutoConfig.from_pretrained( "SenseTime/deformable-detr", backbone_config=AutoConfig.from_pretrained("OpenGVLab/pvt_v2_b5"), - use_timm_backbone=False ), ) diff --git a/docs/source/en/tasks/training_vision_backbone.md b/docs/source/en/tasks/training_vision_backbone.md index f941ccc0dd1b..675590763102 100644 --- a/docs/source/en/tasks/training_vision_backbone.md +++ b/docs/source/en/tasks/training_vision_backbone.md @@ -38,13 +38,18 @@ Initialize [`DetrConfig`] with the pre-trained DINOv3 ConvNext backbone. Use `nu ```py from transformers import DetrConfig, DetrForObjectDetection, AutoImageProcessor -config = DetrConfig(backbone="facebook/dinov3-convnext-large-pretrain-lvd1689m", - use_pretrained_backbone=True, use_timm_backbone=False, +# Create a model with randomly initialized weights +backbone_config = AutoConfig.from_pretrained("facebook/dinov3-convnext-large-pretrain-lvd1689m") +backbone = AutoBackbone.from_pretrained("facebook/dinov3-convnext-large-pretrain-lvd1689m") + +config = DetrConfig(backbone_config=backbone_config, num_labels=1, id2label={0: "license_plate"}, label2id={"license_plate": 0}) model = DetrForObjectDetection(config) -for param in model.model.backbone.parameters(): - param.requires_grad = False +# Assign pretrained backbone checkpoint and freeze the weights +model.model.backbone = backbone +model.model.freeze_backbone() + image_processor = AutoImageProcessor.from_pretrained("facebook/detr-resnet-50") ``` diff --git a/docs/source/ja/model_doc/detr.md b/docs/source/ja/model_doc/detr.md index d1adb5f838b1..f87052378502 100644 --- a/docs/source/ja/model_doc/detr.md +++ b/docs/source/ja/model_doc/detr.md @@ -140,7 +140,7 @@ DETR モデルをインスタンス化するには 3 つの方法があります オプション 3: バックボーン + トランスフォーマーのランダムに初期化された重みを使用して DETR をインスタンス化します。 ```py ->>> config = DetrConfig(use_pretrained_backbone=False) +>>> config = DetrConfig() >>> model = DetrForObjectDetection(config) ``` diff --git a/examples/modular-transformers/modeling_test_detr.py b/examples/modular-transformers/modeling_test_detr.py index c3015f75619c..f679fb14d95b 100644 --- a/examples/modular-transformers/modeling_test_detr.py +++ b/examples/modular-transformers/modeling_test_detr.py @@ -15,6 +15,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...integrations import use_kernel_forward_from_hub from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer @@ -22,7 +23,6 @@ from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import ModelOutput, auto_docstring, is_timm_available, requires_backends, torch_compilable_check -from ...utils.backbone_utils import load_backbone from .configuration_test_detr import TestDetrConfig diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index e5677dd872f9..c9f09ac4a41a 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -439,6 +439,7 @@ _import_structure["modeling_flash_attention_utils"] = [] _import_structure["modeling_layers"] = ["GradientCheckpointingLayer"] _import_structure["modeling_outputs"] = [] + _import_structure["backbone_utils"] = ["BackboneConfigMixin", "BackboneMixin"] _import_structure["modeling_rope_utils"] = ["ROPE_INIT_FUNCTIONS", "dynamic_rope_update", "RopeParameters"] _import_structure["modeling_utils"] = ["PreTrainedModel", "AttentionInterface"] _import_structure["masking_utils"] = ["AttentionMaskInterface"] @@ -467,6 +468,8 @@ # Direct imports for type-checking if TYPE_CHECKING: # All modeling imports + # Models + from .backbone_utils import BackboneConfigMixin, BackboneMixin from .cache_utils import Cache as Cache from .cache_utils import DynamicCache as DynamicCache from .cache_utils import DynamicLayer as DynamicLayer @@ -609,8 +612,6 @@ from .integrations.executorch import convert_and_export_with_cache as convert_and_export_with_cache from .masking_utils import AttentionMaskInterface as AttentionMaskInterface from .model_debugging_utils import model_addition_debugger_context as model_addition_debugger_context - - # Models from .modeling_layers import GradientCheckpointingLayer as GradientCheckpointingLayer from .modeling_rope_utils import ROPE_INIT_FUNCTIONS as ROPE_INIT_FUNCTIONS from .modeling_rope_utils import RopeParameters as RopeParameters diff --git a/src/transformers/backbone_utils.py b/src/transformers/backbone_utils.py new file mode 100644 index 000000000000..59fa0e56b43a --- /dev/null +++ b/src/transformers/backbone_utils.py @@ -0,0 +1,326 @@ +# Copyright 2026 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of utils to be used by backbones and their components.""" + +import enum +import inspect + +from huggingface_hub import repo_exists + +from .utils import logging + + +logger = logging.get_logger(__name__) + + +class BackboneType(enum.Enum): + TIMM = "timm" + TRANSFORMERS = "transformers" + + +class BackboneConfigMixin: + """ + A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations. + """ + + def set_output_features_output_indices( + self, + out_features: list | None, + out_indices: list | None, + ): + """ + Sets output indices and features to new values and aligns them with the given `stage_names`. + If one of the inputs is not given, find the corresponding `out_features` or `out_indices` + for the given `stage_names`. + + Args: + out_features (`list[str]`, *optional*): + The names of the features for the backbone to output. Defaults to `config._out_features` if not provided. + out_indices (`list[int]` or `tuple[int]`, *optional*): + The indices of the features for the backbone to output. Defaults to `config._out_indices` if not provided. + """ + self._out_features = out_features + self._out_indices = list(out_indices) if isinstance(out_indices, tuple) else out_indices + + # First verify that the out_features and out_indices are valid + self.verify_out_features_out_indices() + + # Align output features with indices + out_features, out_indices = self._out_features, self._out_indices + if out_indices is None and out_features is None: + out_indices = [len(self.stage_names) - 1] + out_features = [self.stage_names[-1]] + elif out_indices is None and out_features is not None: + out_indices = [self.stage_names.index(layer) for layer in out_features] + elif out_features is None and out_indices is not None: + out_features = [self.stage_names[idx] for idx in out_indices] + + # Update values and verify that the aligned out_features and out_indices are valid + self._out_features, self._out_indices = out_features, out_indices + self.verify_out_features_out_indices() + + def verify_out_features_out_indices(self): + """ + Verify that out_indices and out_features are valid for the given stage_names. + """ + if self.stage_names is None: + raise ValueError("Stage_names must be set for transformers backbones") + + if self._out_features is not None: + if not isinstance(self._out_features, (list,)): + raise ValueError(f"out_features must be a list got {type(self._out_features)}") + if any(feat not in self.stage_names for feat in self._out_features): + raise ValueError( + f"out_features must be a subset of stage_names: {self.stage_names} got {self._out_features}" + ) + if len(self._out_features) != len(set(self._out_features)): + raise ValueError(f"out_features must not contain any duplicates, got {self._out_features}") + if self._out_features != ( + sorted_feats := [feat for feat in self.stage_names if feat in self._out_features] + ): + raise ValueError( + f"out_features must be in the same order as stage_names, expected {sorted_feats} got {self._out_features}" + ) + + if self._out_indices is not None: + if not isinstance(self._out_indices, list): + raise ValueError(f"out_indices must be a list, got {type(self._out_indices)}") + # Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,] + positive_indices = tuple(idx % len(self.stage_names) if idx < 0 else idx for idx in self._out_indices) + if any(idx for idx in positive_indices if idx not in range(len(self.stage_names))): + raise ValueError( + f"out_indices must be valid indices for stage_names {self.stage_names}, got {self._out_indices}" + ) + if len(positive_indices) != len(set(positive_indices)): + msg = f"out_indices must not contain any duplicates, got {self._out_indices}" + msg += f"(equivalent to {positive_indices}))" if positive_indices != self._out_indices else "" + raise ValueError(msg) + if positive_indices != tuple(sorted(positive_indices)): + sorted_negative = [ + idx for _, idx in sorted(zip(positive_indices, self._out_indices), key=lambda x: x[0]) + ] + raise ValueError( + f"out_indices must be in the same order as stage_names, expected {sorted_negative} got {self._out_indices}" + ) + + if self._out_features is not None and self._out_indices is not None: + if len(self._out_features) != len(self._out_indices): + raise ValueError("out_features and out_indices should have the same length if both are set") + if self._out_features != [self.stage_names[idx] for idx in self._out_indices]: + raise ValueError("out_features and out_indices should correspond to the same stages if both are set") + + @property + def out_features(self): + return self._out_features + + @out_features.setter + def out_features(self, out_features: list[str]): + """ + Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. + """ + self.set_output_features_output_indices(out_features=out_features, out_indices=None) + + @property + def out_indices(self): + return self._out_indices + + @out_indices.setter + def out_indices(self, out_indices: tuple[int, ...] | list[int]): + """ + Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. + """ + out_indices = list(out_indices) if out_indices is not None else out_indices + self.set_output_features_output_indices(out_features=None, out_indices=out_indices) + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PreTrainedConfig` to + include the `out_features` and `out_indices` attributes. + """ + output = super().to_dict() + output["out_features"] = output.pop("_out_features", None) + output["out_indices"] = output.pop("_out_indices", None) + return output + + +class BackboneMixin: + backbone_type: BackboneType | None = None + + # Attribute to indicate if the backbone has attention and can return attention outputs. + # Should be set to `False` for conv-based models to be able to run `forward_with_filtered_kwargs` + has_attentions: bool = True + + def __init__(self, *args, **kwargs) -> None: + """ + Method to initialize the backbone. This method is called by the constructor of the base class after the + pretrained model weights have been loaded. + """ + super().__init__(*args, **kwargs) + timm_backbone = kwargs.pop("timm_backbone", None) + if timm_backbone is not None: + self.backbone_type = BackboneType.TIMM + else: + self.backbone_type = BackboneType.TRANSFORMERS + + if self.backbone_type == BackboneType.TIMM: + self._init_timm_backbone(backbone=timm_backbone) + elif self.backbone_type == BackboneType.TRANSFORMERS: + self._init_transformers_backbone() + else: + raise ValueError(f"backbone_type {self.backbone_type} not supported.") + + def _init_timm_backbone(self, backbone) -> None: + """ + Initialize the backbone model from timm. The backbone must already be loaded to backbone + """ + + # These will disagree with the defaults for the transformers models e.g. for resnet50 + # the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4'] + # the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4'] + self.stage_names = [stage["module"] for stage in backbone.feature_info.info] + self.num_features = [stage["num_chs"] for stage in backbone.feature_info.info] + + self.config._out_indices = list(backbone.feature_info.out_indices) + self.config._out_features = backbone.feature_info.module_name() + self.config.stage_names = self.stage_names + + # We verify the out indices and out features are valid + self.config.verify_out_features_out_indices() + + def _init_transformers_backbone(self) -> None: + self.stage_names = self.config.stage_names + self.config.verify_out_features_out_indices() + # Number of channels for each stage. This is set in the transformer backbone model init + self.num_features = None + + @property + def out_features(self): + return self.config._out_features + + @out_features.setter + def out_features(self, out_features: list[str]): + """ + Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. + """ + self.config.out_features = out_features + + @property + def out_indices(self): + return self.config._out_indices + + @out_indices.setter + def out_indices(self, out_indices: tuple[int] | list[int]): + """ + Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. + """ + self.config.out_indices = out_indices + + @property + def out_feature_channels(self): + # the current backbones will output the number of channels for each stage + # even if that stage is not in the out_features list. + return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)} + + @property + def channels(self): + return [self.out_feature_channels[name] for name in self.out_features] + + def forward_with_filtered_kwargs(self, *args, **kwargs): + if not self.has_attentions: + kwargs.pop("output_attentions", None) + if self.backbone_type == BackboneType.TIMM: + signature = dict(inspect.signature(self.forward).parameters) + kwargs = {k: v for k, v in kwargs.items() if k in signature} + return self(*args, **kwargs) + + def forward( + self, + pixel_values, + output_hidden_states: bool | None = None, + output_attentions: bool | None = None, + return_dict: bool | None = None, + ): + raise NotImplementedError("This method should be implemented by the derived class.") + + +def consolidate_backbone_kwargs_to_config( + backbone_config, + default_backbone: str | None = None, + default_config_type: str | None = None, + default_config_kwargs: dict | None = None, + timm_default_kwargs: dict | None = None, + **kwargs, +): + # Lazy import to avoid circular import issues. Can be imported properly + # after deleting ref to `BackboneMixin` in `utils/backbone_utils.py` + from .configuration_utils import PreTrainedConfig + from .models.auto import CONFIG_MAPPING + + use_timm_backbone = kwargs.pop("use_timm_backbone", True) + backbone_kwargs = kwargs.pop("backbone_kwargs", {}) + backbone = kwargs.pop("backbone") if kwargs.get("backbone") is not None else default_backbone + kwargs.pop("use_pretrained_backbone", None) + + # Init timm backbone with hardcoded values for BC. If everything is set to `None` and there is + # a default timm config, we use it to init the backbone. + if ( + timm_default_kwargs is not None + and use_timm_backbone + and backbone is not None + and backbone_config is None + and not backbone_kwargs + ): + backbone_config = CONFIG_MAPPING["timm_backbone"](backbone=backbone, **timm_default_kwargs) + elif backbone is not None and backbone_config is None: + if repo_exists(backbone): + config_dict, _ = PreTrainedConfig.get_config_dict(backbone) + config_class = CONFIG_MAPPING[config_dict["model_type"]] + config_dict.update(backbone_kwargs) + backbone_config = config_class(**config_dict) + else: + backbone_config = CONFIG_MAPPING["timm_backbone"](backbone=backbone, **backbone_kwargs) + elif backbone_config is None and default_config_type is not None: + logger.info( + f"`backbone_config` is `None`. Initializing the config with the default `{default_config_type}` vision config." + ) + default_config_kwargs = default_config_kwargs or {} + backbone_config = CONFIG_MAPPING[default_config_type](**default_config_kwargs) + elif isinstance(backbone_config, dict): + backbone_model_type = backbone_config.get("model_type") + config_class = CONFIG_MAPPING[backbone_model_type] + backbone_config = config_class.from_dict(backbone_config) + + return backbone_config, kwargs + + +def load_backbone(config): + """ + Loads the backbone model from a config object. + + If the config is from the backbone model itself, then we return a backbone model with randomly initialized + weights. + + If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights + if specified. + """ + from transformers import AutoBackbone + + backbone_config = getattr(config, "backbone_config", None) + + if backbone_config is None: + backbone = AutoBackbone.from_config(config=config) + else: + backbone = AutoBackbone.from_config(config=backbone_config) + return backbone diff --git a/src/transformers/models/auto/auto_factory.py b/src/transformers/models/auto/auto_factory.py index 920ac1cfbf67..2da535e45afd 100644 --- a/src/transformers/models/auto/auto_factory.py +++ b/src/transformers/models/auto/auto_factory.py @@ -21,6 +21,8 @@ from collections.abc import Iterator from typing import Any, TypeVar +from huggingface_hub import repo_exists + from ...configuration_utils import PreTrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...utils import ( @@ -416,21 +418,21 @@ def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *mod num_channels = kwargs.pop("num_channels", config.num_channels) features_only = kwargs.pop("features_only", config.features_only) - use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone) out_indices = kwargs.pop("out_indices", config.out_indices) config = TimmBackboneConfig( backbone=pretrained_model_name_or_path, num_channels=num_channels, features_only=features_only, - use_pretrained_backbone=use_pretrained_backbone, out_indices=out_indices, ) - return super().from_config(config, **kwargs) + # Always load a pretrained model when `from_pretrained` is called + kwargs.pop("use_pretrained_backbone", None) + return super().from_config(config, pretrained=True, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): - use_timm_backbone = kwargs.pop("use_timm_backbone", False) - if use_timm_backbone: + kwargs.pop("use_timm_backbone", None) + if not repo_exists(pretrained_model_name_or_path): return cls._load_timm_backbone_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) diff --git a/src/transformers/models/beit/configuration_beit.py b/src/transformers/models/beit/configuration_beit.py index f1bbc44dcc08..8b71245224ef 100644 --- a/src/transformers/models/beit/configuration_beit.py +++ b/src/transformers/models/beit/configuration_beit.py @@ -13,8 +13,8 @@ # limitations under the License. """BEiT model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices class BeitConfig(BackboneConfigMixin, PreTrainedConfig): @@ -186,9 +186,7 @@ def __init__( # backbone attributes self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) self.add_fpn = add_fpn self.reshape_hidden_states = reshape_hidden_states diff --git a/src/transformers/models/beit/modeling_beit.py b/src/transformers/models/beit/modeling_beit.py index f9e007eb018b..0ea104786a59 100755 --- a/src/transformers/models/beit/modeling_beit.py +++ b/src/transformers/models/beit/modeling_beit.py @@ -23,6 +23,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BackboneOutput, @@ -35,7 +36,6 @@ from ...modeling_utils import PreTrainedModel from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import auto_docstring, logging, torch_int -from ...utils.backbone_utils import BackboneMixin from .configuration_beit import BeitConfig @@ -1336,10 +1336,9 @@ def forward( BEiT backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class BeitBackbone(BeitPreTrainedModel, BackboneMixin): +class BeitBackbone(BackboneMixin, BeitPreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = BeitEmbeddings(config) diff --git a/src/transformers/models/bit/configuration_bit.py b/src/transformers/models/bit/configuration_bit.py index 3f2270845415..3abc39dd01d6 100644 --- a/src/transformers/models/bit/configuration_bit.py +++ b/src/transformers/models/bit/configuration_bit.py @@ -13,9 +13,9 @@ # limitations under the License. """BiT model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -127,9 +127,7 @@ def __init__( self.width_factor = width_factor self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["BitConfig"] diff --git a/src/transformers/models/bit/modeling_bit.py b/src/transformers/models/bit/modeling_bit.py index 4b69065ce2c6..4dcfba98e817 100644 --- a/src/transformers/models/bit/modeling_bit.py +++ b/src/transformers/models/bit/modeling_bit.py @@ -22,6 +22,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, @@ -30,7 +31,6 @@ ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from .configuration_bit import BitConfig @@ -759,12 +759,11 @@ def forward( BiT backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class BitBackbone(BitPreTrainedModel, BackboneMixin): +class BitBackbone(BackboneMixin, BitPreTrainedModel): has_attentions = False def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.bit = BitModel(config) self.num_features = [config.embedding_size] + config.hidden_sizes diff --git a/src/transformers/models/conditional_detr/configuration_conditional_detr.py b/src/transformers/models/conditional_detr/configuration_conditional_detr.py index e0bc2e2015a8..a8ad94920057 100644 --- a/src/transformers/models/conditional_detr/configuration_conditional_detr.py +++ b/src/transformers/models/conditional_detr/configuration_conditional_detr.py @@ -13,10 +13,10 @@ # limitations under the License. """Conditional DETR model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig +from ..auto import AutoConfig logger = logging.get_logger(__name__) @@ -33,9 +33,6 @@ class ConditionalDetrConfig(PreTrainedConfig): documentation from [`PreTrainedConfig`] for more information. Args: - use_timm_backbone (`bool`, *optional*, defaults to `True`): - Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] - API. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. @@ -81,15 +78,6 @@ class ConditionalDetrConfig(PreTrainedConfig): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. - backbone (`str`, *optional*, defaults to `"resnet50"`): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `True`): - Whether to use pretrained weights for the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. @@ -137,7 +125,6 @@ class ConditionalDetrConfig(PreTrainedConfig): def __init__( self, - use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=300, @@ -159,9 +146,6 @@ def __init__( init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type="sine", - backbone="resnet50", - use_pretrained_backbone=True, - backbone_kwargs=None, dilation=False, class_cost=2, bbox_cost=5, @@ -174,33 +158,26 @@ def __init__( focal_alpha=0.25, **kwargs, ): - # We default to values which were previously hard-coded in the model. This enables configurability of the config - # while keeping the default behavior the same. - if use_timm_backbone and backbone_kwargs is None: - backbone_kwargs = {} - if dilation: - backbone_kwargs["output_stride"] = 16 - backbone_kwargs["out_indices"] = [1, 2, 3, 4] - backbone_kwargs["in_chans"] = num_channels - # Backwards compatibility - elif not use_timm_backbone and backbone in (None, "resnet50"): - if backbone_config is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") - backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + # Init timm backbone with hardcoded values for BC + backbone_kwargs = kwargs.get("backbone_kwargs", {}) + timm_default_kwargs = { + "num_channels": backbone_kwargs.get("num_channels", num_channels), + "features_only": True, + "use_pretrained_backbone": False, + "out_indices": backbone_kwargs.get("out_indices", [1, 2, 3, 4]), + } + if dilation: + timm_default_kwargs["output_stride"] = backbone_kwargs.get("output_stride", 16) + + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_backbone="resnet50", + default_config_type="resnet", + default_config_kwargs={"out_features": ["stage4"]}, + timm_default_kwargs=timm_default_kwargs, + **kwargs, ) - self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries @@ -222,10 +199,6 @@ def __init__( self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.backbone_kwargs = backbone_kwargs - self.dilation = dilation # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py index 0660aa5d2736..4ead660da818 100644 --- a/src/transformers/models/conditional_detr/modeling_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -17,7 +17,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import math from collections.abc import Callable from dataclasses import dataclass @@ -27,22 +26,18 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...masking_utils import create_bidirectional_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache -from ...utils import ModelOutput, TransformersKwargs, auto_docstring, is_timm_available, requires_backends -from ...utils.backbone_utils import load_backbone +from ...utils import ModelOutput, TransformersKwargs, auto_docstring from ...utils.generic import OutputRecorder, can_return_tuple, check_model_inputs from .configuration_conditional_detr import ConditionalDetrConfig -if is_timm_available(): - from timm import create_model - - @dataclass @auto_docstring( custom_intro=""" @@ -262,56 +257,25 @@ def __init__(self, config): self.config = config - # For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API - if config.use_timm_backbone: - # We default to values which were previously hard-coded. This enables configurability from the config - # using backbone arguments, while keeping the default behavior the same. - requires_backends(self, ["timm"]) - kwargs = getattr(config, "backbone_kwargs", {}) - kwargs = {} if kwargs is None else kwargs.copy() - out_indices = kwargs.pop("out_indices", (1, 2, 3, 4)) - num_channels = kwargs.pop("in_chans", config.num_channels) - if config.dilation: - kwargs["output_stride"] = kwargs.get("output_stride", 16) - - # When loading pretrained weights, temporarily exit meta device to avoid warnings. - # If on meta device, create on CPU; otherwise use nullcontext (no-op). - is_meta = torch.empty(0).device.type == "meta" - device_ctx = ( - torch.device("cpu") if (config.use_pretrained_backbone and is_meta) else contextlib.nullcontext() - ) - - with device_ctx: - backbone = create_model( - config.backbone, - pretrained=config.use_pretrained_backbone, - features_only=True, - out_indices=out_indices, - in_chans=num_channels, - **kwargs, - ) - else: - backbone = load_backbone(config) + backbone = load_backbone(config) + self.intermediate_channel_sizes = backbone.channels # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) - self.model = backbone - self.intermediate_channel_sizes = ( - self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels - ) - backbone_model_type = None - if config.backbone is not None: - backbone_model_type = config.backbone - elif config.backbone_config is not None: - backbone_model_type = config.backbone_config.model_type - else: - raise ValueError("Either `backbone` or `backbone_config` should be provided in the config") + # We used to load with timm library directly instead of the AutoBackbone API + # so we need to unwrap the `backbone._backbone` module to load weights without mismatch + is_timm_model = False + if hasattr(backbone, "_backbone"): + backbone = backbone._backbone + is_timm_model = True + self.model = backbone + backbone_model_type = config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): - if config.use_timm_backbone: + if is_timm_model: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: @@ -320,7 +284,9 @@ def __init__(self, config): def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps - features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps + features = self.model(pixel_values) + if isinstance(features, dict): + features = features.feature_maps out = [] for feature_map in features: diff --git a/src/transformers/models/conditional_detr/modular_conditional_detr.py b/src/transformers/models/conditional_detr/modular_conditional_detr.py index d6b888c334fe..3505bbc3806b 100644 --- a/src/transformers/models/conditional_detr/modular_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modular_conditional_detr.py @@ -30,7 +30,6 @@ TensorType, TransformersKwargs, auto_docstring, - is_timm_available, logging, ) from ...utils.generic import OutputRecorder, can_return_tuple, check_model_inputs @@ -59,10 +58,6 @@ from .configuration_conditional_detr import ConditionalDetrConfig -if is_timm_available(): - pass - - logger = logging.get_logger(__name__) diff --git a/src/transformers/models/convnext/configuration_convnext.py b/src/transformers/models/convnext/configuration_convnext.py index 419d859ca024..09478f3adf80 100644 --- a/src/transformers/models/convnext/configuration_convnext.py +++ b/src/transformers/models/convnext/configuration_convnext.py @@ -13,9 +13,9 @@ # limitations under the License. """ConvNeXT model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -111,9 +111,7 @@ def __init__( self.drop_path_rate = drop_path_rate self.image_size = image_size self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["ConvNextConfig"] diff --git a/src/transformers/models/convnext/modeling_convnext.py b/src/transformers/models/convnext/modeling_convnext.py index e1efc22f985e..b3a3bf96d8c3 100755 --- a/src/transformers/models/convnext/modeling_convnext.py +++ b/src/transformers/models/convnext/modeling_convnext.py @@ -18,6 +18,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, @@ -26,7 +27,6 @@ ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import can_return_tuple from .configuration_convnext import ConvNextConfig @@ -344,12 +344,11 @@ def forward( ConvNeXt backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class ConvNextBackbone(ConvNextPreTrainedModel, BackboneMixin): +class ConvNextBackbone(BackboneMixin, ConvNextPreTrainedModel): has_attentions = False def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.embeddings = ConvNextEmbeddings(config) self.encoder = ConvNextEncoder(config) @@ -357,7 +356,7 @@ def __init__(self, config): # Add layer norms to hidden states of out_features hidden_states_norms = {} - for stage, num_channels in zip(self._out_features, self.channels): + for stage, num_channels in zip(self.out_features, self.channels): hidden_states_norms[stage] = ConvNextLayerNorm(num_channels, data_format="channels_first") self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) diff --git a/src/transformers/models/convnextv2/configuration_convnextv2.py b/src/transformers/models/convnextv2/configuration_convnextv2.py index f7fc5bf24f5e..9bb63b50889d 100644 --- a/src/transformers/models/convnextv2/configuration_convnextv2.py +++ b/src/transformers/models/convnextv2/configuration_convnextv2.py @@ -13,9 +13,9 @@ # limitations under the License. """ConvNeXTV2 model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -109,9 +109,7 @@ def __init__( self.drop_path_rate = drop_path_rate self.image_size = image_size self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["ConvNextV2Config"] diff --git a/src/transformers/models/convnextv2/modeling_convnextv2.py b/src/transformers/models/convnextv2/modeling_convnextv2.py index 66d075e38c84..e7af1964357d 100644 --- a/src/transformers/models/convnextv2/modeling_convnextv2.py +++ b/src/transformers/models/convnextv2/modeling_convnextv2.py @@ -18,6 +18,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, @@ -26,7 +27,6 @@ ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import can_return_tuple from .configuration_convnextv2 import ConvNextV2Config @@ -367,12 +367,11 @@ def forward( """ ) # Copied from transformers.models.convnext.modeling_convnext.ConvNextBackbone with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,facebook/convnext-tiny-224->facebook/convnextv2-tiny-1k-224 -class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin): +class ConvNextV2Backbone(BackboneMixin, ConvNextV2PreTrainedModel): has_attentions = False def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.embeddings = ConvNextV2Embeddings(config) self.encoder = ConvNextV2Encoder(config) @@ -380,7 +379,7 @@ def __init__(self, config): # Add layer norms to hidden states of out_features hidden_states_norms = {} - for stage, num_channels in zip(self._out_features, self.channels): + for stage, num_channels in zip(self.out_features, self.channels): hidden_states_norms[stage] = ConvNextV2LayerNorm(num_channels, data_format="channels_first") self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) diff --git a/src/transformers/models/d_fine/configuration_d_fine.py b/src/transformers/models/d_fine/configuration_d_fine.py index 247298dad6ae..1153866212d8 100644 --- a/src/transformers/models/d_fine/configuration_d_fine.py +++ b/src/transformers/models/d_fine/configuration_d_fine.py @@ -17,13 +17,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig -from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig - - -logger = logging.get_logger(__name__) +from ..auto import AutoConfig # TODO: Attribute map assignment logic should be fixed in modular @@ -48,20 +44,8 @@ class DFineConfig(PreTrainedConfig): The epsilon used by the batch normalization layers. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `HGNetV2Config()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`): Whether to freeze the batch normalization layers in the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. encoder_hidden_dim (`int`, *optional*, defaults to 256): Dimension of the layers in hybrid encoder. encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`): @@ -210,11 +194,7 @@ def __init__( batch_norm_eps=1e-5, # backbone backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, freeze_backbone_batch_norms=True, - backbone_kwargs=None, # encoder HybridEncoder encoder_hidden_dim=256, encoder_in_channels=[512, 1024, 2048], @@ -283,46 +263,16 @@ def __init__( self.initializer_bias_prior_prob = initializer_bias_prior_prob self.layer_norm_eps = layer_norm_eps self.batch_norm_eps = batch_norm_eps - # backbone - if backbone_config is None and backbone is None: - logger.info( - "`backbone_config` and `backbone` are `None`. Initializing the config with the default `HGNet-V2` backbone." - ) - backbone_model_type = "hgnet_v2" - config_class = CONFIG_MAPPING[backbone_model_type] - # this will map it to HGNetV2Config - # and we would need to create HGNetV2Backbone - backbone_config = config_class( - num_channels=3, - embedding_size=64, - hidden_sizes=[256, 512, 1024, 2048], - depths=[3, 4, 6, 3], - layer_type="bottleneck", - hidden_act="relu", - downsample_in_first_stage=False, - downsample_in_bottleneck=False, - out_features=None, - out_indices=[2, 3, 4], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="hgnet_v2", + default_config_kwargs={"out_indices": [2, 3, 4]}, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone self.freeze_backbone_batch_norms = freeze_backbone_batch_norms - self.backbone_kwargs = backbone_kwargs # encoder self.encoder_hidden_dim = encoder_hidden_dim self.encoder_in_channels = encoder_in_channels diff --git a/src/transformers/models/d_fine/modeling_d_fine.py b/src/transformers/models/d_fine/modeling_d_fine.py index 31210340b53d..284412e370e6 100644 --- a/src/transformers/models/d_fine/modeling_d_fine.py +++ b/src/transformers/models/d_fine/modeling_d_fine.py @@ -28,13 +28,13 @@ from ... import initialization as init from ...activations import ACT2CLS +from ...backbone_utils import load_backbone from ...image_transforms import center_to_corners_format, corners_to_center_format from ...modeling_outputs import BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import ModelOutput, TransformersKwargs, auto_docstring, torch_compilable_check, torch_int -from ...utils.backbone_utils import load_backbone from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_d_fine import DFineConfig diff --git a/src/transformers/models/d_fine/modular_d_fine.py b/src/transformers/models/d_fine/modular_d_fine.py index 82bd62a2fbb0..ac0809f92d4f 100644 --- a/src/transformers/models/d_fine/modular_d_fine.py +++ b/src/transformers/models/d_fine/modular_d_fine.py @@ -19,18 +19,12 @@ from ... import initialization as init from ...activations import ACT2CLS +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...image_transforms import corners_to_center_format from ...processing_utils import Unpack -from ...utils import ( - TransformersKwargs, - logging, - torch_compilable_check, -) -from ...utils.backbone_utils import ( - verify_backbone_config_arguments, -) -from ..auto import CONFIG_MAPPING, AutoConfig +from ...utils import TransformersKwargs, logging, torch_compilable_check +from ..auto import AutoConfig from ..rt_detr.modeling_rt_detr import ( RTDetrAIFILayer, RTDetrConvNormLayer, @@ -75,20 +69,8 @@ class DFineConfig(PreTrainedConfig): The epsilon used by the batch normalization layers. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `HGNetV2Config()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`): Whether to freeze the batch normalization layers in the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. encoder_hidden_dim (`int`, *optional*, defaults to 256): Dimension of the layers in hybrid encoder. encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`): @@ -237,11 +219,7 @@ def __init__( batch_norm_eps=1e-5, # backbone backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, freeze_backbone_batch_norms=True, - backbone_kwargs=None, # encoder HybridEncoder encoder_hidden_dim=256, encoder_in_channels=[512, 1024, 2048], @@ -310,46 +288,16 @@ def __init__( self.initializer_bias_prior_prob = initializer_bias_prior_prob self.layer_norm_eps = layer_norm_eps self.batch_norm_eps = batch_norm_eps - # backbone - if backbone_config is None and backbone is None: - logger.info( - "`backbone_config` and `backbone` are `None`. Initializing the config with the default `HGNet-V2` backbone." - ) - backbone_model_type = "hgnet_v2" - config_class = CONFIG_MAPPING[backbone_model_type] - # this will map it to HGNetV2Config - # and we would need to create HGNetV2Backbone - backbone_config = config_class( - num_channels=3, - embedding_size=64, - hidden_sizes=[256, 512, 1024, 2048], - depths=[3, 4, 6, 3], - layer_type="bottleneck", - hidden_act="relu", - downsample_in_first_stage=False, - downsample_in_bottleneck=False, - out_features=None, - out_indices=[2, 3, 4], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="hgnet_v2", + default_config_kwargs={"out_indices": [2, 3, 4]}, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone self.freeze_backbone_batch_norms = freeze_backbone_batch_norms - self.backbone_kwargs = backbone_kwargs # encoder self.encoder_hidden_dim = encoder_hidden_dim self.encoder_in_channels = encoder_in_channels diff --git a/src/transformers/models/dab_detr/configuration_dab_detr.py b/src/transformers/models/dab_detr/configuration_dab_detr.py index 2e197593c6f3..f4e3d2e062ad 100644 --- a/src/transformers/models/dab_detr/configuration_dab_detr.py +++ b/src/transformers/models/dab_detr/configuration_dab_detr.py @@ -13,10 +13,10 @@ # limitations under the License. """DAB-DETR model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig +from ..auto import AutoConfig logger = logging.get_logger(__name__) @@ -33,21 +33,9 @@ class DabDetrConfig(PreTrainedConfig): documentation from [`PreTrainedConfig`] for more information. Args: - use_timm_backbone (`bool`, *optional*, defaults to `True`): - Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] - API. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. - backbone (`str`, *optional*, defaults to `"resnet50"`): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `True`): - Whether to use pretrained weights for the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. num_queries (`int`, *optional*, defaults to 300): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DabDetrModel`] can detect in a single image. For COCO, we recommend 100 queries. @@ -145,11 +133,7 @@ class DabDetrConfig(PreTrainedConfig): def __init__( self, - use_timm_backbone=True, backbone_config=None, - backbone="resnet50", - use_pretrained_backbone=True, - backbone_kwargs=None, num_queries=300, encoder_layers=6, encoder_ffn_dim=2048, @@ -189,36 +173,25 @@ def __init__( if query_dim != 4: raise ValueError("The query dimensions has to be 4.") - # We default to values which were previously hard-coded in the model. This enables configurability of the config - # while keeping the default behavior the same. - if use_timm_backbone and backbone_kwargs is None: - backbone_kwargs = {} - if dilation: - backbone_kwargs["output_stride"] = 16 - backbone_kwargs["out_indices"] = [1, 2, 3, 4] - backbone_kwargs["in_chans"] = 3 # num_channels - # Backwards compatibility - elif not use_timm_backbone and backbone in (None, "resnet50"): - if backbone_config is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") - backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - backbone = None - # set timm attributes to None - dilation = None - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + # Init timm backbone with hardcoded values for BC + timm_default_kwargs = { + "num_channels": 3, + "features_only": True, + "use_pretrained_backbone": False, + "out_indices": [1, 2, 3, 4], + } + if dilation: + timm_default_kwargs["output_stride"] = 16 + + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_backbone="resnet50", + default_config_type="resnet50", + default_config_kwargs={"out_features": ["stage4"]}, + timm_default_kwargs=timm_default_kwargs, + **kwargs, ) - self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_queries = num_queries self.hidden_size = hidden_size @@ -236,9 +209,6 @@ def __init__( self.init_xavier_std = init_xavier_std self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.backbone_kwargs = backbone_kwargs # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost diff --git a/src/transformers/models/dab_detr/modeling_dab_detr.py b/src/transformers/models/dab_detr/modeling_dab_detr.py index 0f80b8529a5b..745554529d96 100644 --- a/src/transformers/models/dab_detr/modeling_dab_detr.py +++ b/src/transformers/models/dab_detr/modeling_dab_detr.py @@ -21,6 +21,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput @@ -30,7 +31,6 @@ auto_docstring, logging, ) -from ...utils.backbone_utils import load_backbone from .configuration_dab_detr import DabDetrConfig diff --git a/src/transformers/models/deformable_detr/configuration_deformable_detr.py b/src/transformers/models/deformable_detr/configuration_deformable_detr.py index 045068ec702d..ad7f56eb763a 100644 --- a/src/transformers/models/deformable_detr/configuration_deformable_detr.py +++ b/src/transformers/models/deformable_detr/configuration_deformable_detr.py @@ -13,10 +13,10 @@ # limitations under the License. """Deformable DETR model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig +from ..auto import AutoConfig logger = logging.get_logger(__name__) @@ -33,9 +33,6 @@ class DeformableDetrConfig(PreTrainedConfig): documentation from [`PreTrainedConfig`] for more information. Args: - use_timm_backbone (`bool`, *optional*, defaults to `True`): - Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] - API. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. @@ -79,15 +76,6 @@ class DeformableDetrConfig(PreTrainedConfig): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. - backbone (`str`, *optional*, defaults to `"resnet50"`): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `True`): - Whether to use pretrained weights for the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. @@ -153,7 +141,6 @@ class DeformableDetrConfig(PreTrainedConfig): def __init__( self, - use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=300, @@ -176,9 +163,6 @@ def __init__( return_intermediate=True, auxiliary_loss=False, position_embedding_type="sine", - backbone="resnet50", - use_pretrained_backbone=True, - backbone_kwargs=None, dilation=False, num_feature_levels=4, encoder_n_points=4, @@ -199,33 +183,25 @@ def __init__( tie_word_embeddings=True, **kwargs, ): - # We default to values which were previously hard-coded in the model. This enables configurability of the config - # while keeping the default behavior the same. - if use_timm_backbone and backbone_kwargs is None: - backbone_kwargs = {} - if dilation: - backbone_kwargs["output_stride"] = 16 - backbone_kwargs["out_indices"] = [2, 3, 4] if num_feature_levels > 1 else [4] - backbone_kwargs["in_chans"] = num_channels - # Backwards compatibility - elif not use_timm_backbone and backbone in (None, "resnet50"): - if backbone_config is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") - backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + # Init timm backbone with hardcoded values for BC + timm_default_kwargs = { + "num_channels": 3, + "features_only": True, + "use_pretrained_backbone": False, + "out_indices": [2, 3, 4] if num_feature_levels > 1 else [4], + } + if dilation: + timm_default_kwargs["output_stride"] = 16 + + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_backbone="resnet50", + default_config_type="resnet50", + default_config_kwargs={"out_features": ["stage4"]}, + timm_default_kwargs=timm_default_kwargs, + **kwargs, ) - self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries @@ -246,9 +222,6 @@ def __init__( self.encoder_layerdrop = encoder_layerdrop self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.backbone_kwargs = backbone_kwargs self.dilation = dilation # deformable attributes self.num_feature_levels = num_feature_levels diff --git a/src/transformers/models/deformable_detr/modeling_deformable_detr.py b/src/transformers/models/deformable_detr/modeling_deformable_detr.py index cbf34d97f457..e719132f3844 100755 --- a/src/transformers/models/deformable_detr/modeling_deformable_detr.py +++ b/src/transformers/models/deformable_detr/modeling_deformable_detr.py @@ -17,7 +17,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import math import warnings from collections.abc import Callable @@ -30,29 +29,18 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...integrations import use_kernel_forward_from_hub from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache, meshgrid -from ...utils import ( - ModelOutput, - TransformersKwargs, - auto_docstring, - is_timm_available, - requires_backends, - torch_compilable_check, -) -from ...utils.backbone_utils import load_backbone +from ...utils import ModelOutput, TransformersKwargs, auto_docstring, torch_compilable_check from ...utils.generic import OutputRecorder, can_return_tuple, check_model_inputs from .configuration_deformable_detr import DeformableDetrConfig -if is_timm_available(): - from timm import create_model - - @dataclass @auto_docstring( custom_intro=""" @@ -309,54 +297,25 @@ def __init__(self, config): self.config = config - # For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API - if config.use_timm_backbone: - # We default to values which were previously hard-coded. This enables configurability from the config - # using backbone arguments, while keeping the default behavior the same. - requires_backends(self, ["timm"]) - kwargs = getattr(config, "backbone_kwargs", {}) - kwargs = {} if kwargs is None else kwargs.copy() - out_indices = kwargs.pop("out_indices", (2, 3, 4) if config.num_feature_levels > 1 else (4,)) - num_channels = kwargs.pop("in_chans", config.num_channels) - if config.dilation: - kwargs["output_stride"] = kwargs.get("output_stride", 16) - - # When loading pretrained weights, temporarily exit meta device - is_meta = torch.empty(0).device.type == "meta" - device_ctx = ( - torch.device("cpu") if (config.use_pretrained_backbone and is_meta) else contextlib.nullcontext() - ) - with device_ctx: - backbone = create_model( - config.backbone, - pretrained=config.use_pretrained_backbone, - features_only=True, - out_indices=out_indices, - in_chans=num_channels, - **kwargs, - ) - else: - backbone = load_backbone(config) + backbone = load_backbone(config) + self.intermediate_channel_sizes = backbone.channels # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) - self.model = backbone - self.intermediate_channel_sizes = ( - self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels - ) - backbone_model_type = None - if config.backbone is not None: - backbone_model_type = config.backbone - elif config.backbone_config is not None: - backbone_model_type = config.backbone_config.model_type - else: - raise ValueError("Either `backbone` or `backbone_config` should be provided in the config") + # We used to load with timm library directly instead of the AutoBackbone API + # so we need to unwrap the `backbone._backbone` module to load weights without mismatch + is_timm_model = False + if hasattr(backbone, "_backbone"): + backbone = backbone._backbone + is_timm_model = True + self.model = backbone + backbone_model_type = config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): - if config.use_timm_backbone: + if is_timm_model: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: @@ -365,7 +324,9 @@ def __init__(self, config): def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps - features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps + features = self.model(pixel_values) + if isinstance(features, dict): + features = features.feature_maps out = [] for feature_map in features: diff --git a/src/transformers/models/deformable_detr/modular_deformable_detr.py b/src/transformers/models/deformable_detr/modular_deformable_detr.py index 1829f2f7f17b..d0889dd6d1b4 100644 --- a/src/transformers/models/deformable_detr/modular_deformable_detr.py +++ b/src/transformers/models/deformable_detr/modular_deformable_detr.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import math import warnings from dataclasses import dataclass @@ -22,6 +21,7 @@ from torch import Tensor from ... import initialization as init +from ...backbone_utils import load_backbone from ...image_transforms import center_to_corners_format from ...integrations import use_kernel_forward_from_hub from ...modeling_outputs import BaseModelOutput @@ -33,12 +33,9 @@ TensorType, TransformersKwargs, auto_docstring, - is_timm_available, logging, - requires_backends, torch_compilable_check, ) -from ...utils.backbone_utils import load_backbone from ...utils.generic import OutputRecorder, can_return_tuple, check_model_inputs from ..detr.image_processing_detr_fast import DetrImageProcessorFast from ..detr.modeling_detr import ( @@ -58,9 +55,6 @@ from .configuration_deformable_detr import DeformableDetrConfig -if is_timm_available(): - from timm import create_model - logger = logging.get_logger(__name__) @@ -298,54 +292,25 @@ def __init__(self, config): self.config = config - # For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API - if config.use_timm_backbone: - # We default to values which were previously hard-coded. This enables configurability from the config - # using backbone arguments, while keeping the default behavior the same. - requires_backends(self, ["timm"]) - kwargs = getattr(config, "backbone_kwargs", {}) - kwargs = {} if kwargs is None else kwargs.copy() - out_indices = kwargs.pop("out_indices", (2, 3, 4) if config.num_feature_levels > 1 else (4,)) - num_channels = kwargs.pop("in_chans", config.num_channels) - if config.dilation: - kwargs["output_stride"] = kwargs.get("output_stride", 16) - - # When loading pretrained weights, temporarily exit meta device - is_meta = torch.empty(0).device.type == "meta" - device_ctx = ( - torch.device("cpu") if (config.use_pretrained_backbone and is_meta) else contextlib.nullcontext() - ) - with device_ctx: - backbone = create_model( - config.backbone, - pretrained=config.use_pretrained_backbone, - features_only=True, - out_indices=out_indices, - in_chans=num_channels, - **kwargs, - ) - else: - backbone = load_backbone(config) + backbone = load_backbone(config) + self.intermediate_channel_sizes = backbone.channels # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) - self.model = backbone - self.intermediate_channel_sizes = ( - self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels - ) - backbone_model_type = None - if config.backbone is not None: - backbone_model_type = config.backbone - elif config.backbone_config is not None: - backbone_model_type = config.backbone_config.model_type - else: - raise ValueError("Either `backbone` or `backbone_config` should be provided in the config") + # We used to load with timm library directly instead of the AutoBackbone API + # so we need to unwrap the `backbone._backbone` module to load weights without mismatch + is_timm_model = False + if hasattr(backbone, "_backbone"): + backbone = backbone._backbone + is_timm_model = True + self.model = backbone + backbone_model_type = config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): - if config.use_timm_backbone: + if is_timm_model: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: diff --git a/src/transformers/models/depth_anything/configuration_depth_anything.py b/src/transformers/models/depth_anything/configuration_depth_anything.py index bc5cc3bc877e..ccd816f18796 100644 --- a/src/transformers/models/depth_anything/configuration_depth_anything.py +++ b/src/transformers/models/depth_anything/configuration_depth_anything.py @@ -13,10 +13,10 @@ # limitations under the License. """DepthAnything model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto.configuration_auto import CONFIG_MAPPING, AutoConfig +from ..auto.configuration_auto import AutoConfig logger = logging.get_logger(__name__) @@ -35,18 +35,6 @@ class DepthAnythingConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `Dinov2Config()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] - API. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. patch_size (`int`, *optional*, defaults to 14): The size of the patches to extract from the backbone features. initializer_range (`float`, *optional*, defaults to 0.02): @@ -90,10 +78,6 @@ class DepthAnythingConfig(PreTrainedConfig): def __init__( self, backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, - backbone_kwargs=None, patch_size=14, initializer_range=0.02, reassemble_hidden_size=384, @@ -106,34 +90,20 @@ def __init__( max_depth=None, **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `Dinov2` backbone.") - backbone_config = CONFIG_MAPPING["dinov2"]( - image_size=518, - hidden_size=384, - num_attention_heads=6, - out_indices=[9, 10, 11, 12], - apply_layernorm=True, - reshape_hidden_states=False, - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="dinov2", + default_config_kwargs={ + "image_size": 518, + "hidden_size": 384, + "num_attention_heads": 6, + "out_indices": [9, 10, 11, 12], + "reshape_hidden_states": False, + }, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs self.reassemble_hidden_size = reassemble_hidden_size self.patch_size = patch_size self.initializer_range = initializer_range diff --git a/src/transformers/models/depth_anything/modeling_depth_anything.py b/src/transformers/models/depth_anything/modeling_depth_anything.py index 2082f877cf6e..16e1e3c0319c 100644 --- a/src/transformers/models/depth_anything/modeling_depth_anything.py +++ b/src/transformers/models/depth_anything/modeling_depth_anything.py @@ -16,10 +16,10 @@ import torch from torch import nn +from ...backbone_utils import load_backbone from ...modeling_outputs import DepthEstimatorOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import load_backbone from .configuration_depth_anything import DepthAnythingConfig diff --git a/src/transformers/models/detr/configuration_detr.py b/src/transformers/models/detr/configuration_detr.py index 20d6acb48f8d..87835ba098a2 100644 --- a/src/transformers/models/detr/configuration_detr.py +++ b/src/transformers/models/detr/configuration_detr.py @@ -13,10 +13,10 @@ # limitations under the License. """DETR model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig +from ..auto import AutoConfig logger = logging.get_logger(__name__) @@ -33,9 +33,6 @@ class DetrConfig(PreTrainedConfig): documentation from [`PreTrainedConfig`] for more information. Args: - use_timm_backbone (`bool`, *optional*, defaults to `True`): - Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] - API. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. @@ -81,15 +78,6 @@ class DetrConfig(PreTrainedConfig): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. - backbone (`str`, *optional*, defaults to `"resnet50"`): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, `True`): - Whether to use pretrained weights for the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. @@ -135,7 +123,6 @@ class DetrConfig(PreTrainedConfig): def __init__( self, - use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=100, @@ -157,9 +144,6 @@ def __init__( init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type="sine", - backbone="resnet50", - use_pretrained_backbone=True, - backbone_kwargs=None, dilation=False, class_cost=1, bbox_cost=5, @@ -171,36 +155,25 @@ def __init__( eos_coefficient=0.1, **kwargs, ): - # We default to values which were previously hard-coded in the model. This enables configurability of the config - # while keeping the default behavior the same. - if use_timm_backbone and backbone_kwargs is None: - backbone_kwargs = {} - if dilation: - backbone_kwargs["output_stride"] = 16 - backbone_kwargs["out_indices"] = [1, 2, 3, 4] - backbone_kwargs["in_chans"] = num_channels - # Backwards compatibility - elif not use_timm_backbone and backbone in (None, "resnet50"): - if backbone_config is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") - backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - backbone = None - # set timm attributes to None - dilation = None - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_kwargs = kwargs.get("backbone_kwargs", {}) + timm_default_kwargs = { + "num_channels": backbone_kwargs.get("num_channels", num_channels), + "features_only": True, + "use_pretrained_backbone": False, + "out_indices": backbone_kwargs.get("out_indices", [1, 2, 3, 4]), + } + if dilation: + timm_default_kwargs["output_stride"] = backbone_kwargs.get("output_stride", 16) + + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_backbone="resnet50", + default_config_type="resnet", + default_config_kwargs={"out_features": ["stage4"]}, + timm_default_kwargs=timm_default_kwargs, + **kwargs, ) - self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries @@ -222,10 +195,6 @@ def __init__( self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.backbone_kwargs = backbone_kwargs - self.dilation = dilation # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 48a61b8af1bb..63d75648e600 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -13,7 +13,6 @@ # limitations under the License. """PyTorch DETR model.""" -import contextlib import math from collections.abc import Callable from dataclasses import dataclass @@ -23,6 +22,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...masking_utils import create_bidirectional_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( @@ -37,19 +37,12 @@ ModelOutput, TransformersKwargs, auto_docstring, - is_timm_available, logging, - requires_backends, ) -from ...utils.backbone_utils import load_backbone from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_detr import DetrConfig -if is_timm_available(): - from timm import create_model - - logger = logging.get_logger(__name__) @@ -264,56 +257,25 @@ def __init__(self, config): self.config = config - # For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API - if config.use_timm_backbone: - # We default to values which were previously hard-coded. This enables configurability from the config - # using backbone arguments, while keeping the default behavior the same. - requires_backends(self, ["timm"]) - kwargs = getattr(config, "backbone_kwargs", {}) - kwargs = {} if kwargs is None else kwargs.copy() - out_indices = kwargs.pop("out_indices", (1, 2, 3, 4)) - num_channels = kwargs.pop("in_chans", config.num_channels) - if config.dilation: - kwargs["output_stride"] = kwargs.get("output_stride", 16) - - # When loading pretrained weights, temporarily exit meta device to avoid warnings. - # If on meta device, create on CPU; otherwise use nullcontext (no-op). - is_meta = torch.empty(0).device.type == "meta" - device_ctx = ( - torch.device("cpu") if (config.use_pretrained_backbone and is_meta) else contextlib.nullcontext() - ) - - with device_ctx: - backbone = create_model( - config.backbone, - pretrained=config.use_pretrained_backbone, - features_only=True, - out_indices=out_indices, - in_chans=num_channels, - **kwargs, - ) - else: - backbone = load_backbone(config) + backbone = load_backbone(config) + self.intermediate_channel_sizes = backbone.channels # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) - self.model = backbone - self.intermediate_channel_sizes = ( - self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels - ) - backbone_model_type = None - if config.backbone is not None: - backbone_model_type = config.backbone - elif config.backbone_config is not None: - backbone_model_type = config.backbone_config.model_type - else: - raise ValueError("Either `backbone` or `backbone_config` should be provided in the config") + # We used to load with timm library directly instead of the AutoBackbone API + # so we need to unwrap the `backbone._backbone` module to load weights without mismatch + is_timm_model = False + if hasattr(backbone, "_backbone"): + backbone = backbone._backbone + is_timm_model = True + self.model = backbone + backbone_model_type = config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): - if config.use_timm_backbone: + if is_timm_model: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: @@ -322,7 +284,9 @@ def __init__(self, config): def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps - features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps + features = self.model(pixel_values) + if isinstance(features, dict): + features = features.feature_maps out = [] for feature_map in features: diff --git a/src/transformers/models/dinat/configuration_dinat.py b/src/transformers/models/dinat/configuration_dinat.py index c0cfdc79d98b..44f5cf1b9fba 100644 --- a/src/transformers/models/dinat/configuration_dinat.py +++ b/src/transformers/models/dinat/configuration_dinat.py @@ -13,9 +13,9 @@ # limitations under the License. """Dilated Neighborhood Attention Transformer model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -143,9 +143,7 @@ def __init__( self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.layer_scale_init_value = layer_scale_init_value self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["DinatConfig"] diff --git a/src/transformers/models/dinat/modeling_dinat.py b/src/transformers/models/dinat/modeling_dinat.py index 2d86c4141469..438e5384771f 100644 --- a/src/transformers/models/dinat/modeling_dinat.py +++ b/src/transformers/models/dinat/modeling_dinat.py @@ -20,6 +20,7 @@ from torch import nn from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import ( @@ -30,7 +31,6 @@ logging, requires_backends, ) -from ...utils.backbone_utils import BackboneMixin from .configuration_dinat import DinatConfig @@ -710,10 +710,9 @@ def forward( NAT backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class DinatBackbone(DinatPreTrainedModel, BackboneMixin): +class DinatBackbone(BackboneMixin, DinatPreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) requires_backends(self, ["natten"]) @@ -723,7 +722,7 @@ def __init__(self, config): # Add layer norms to hidden states of out_features hidden_states_norms = {} - for stage, num_channels in zip(self._out_features, self.channels): + for stage, num_channels in zip(self.out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) diff --git a/src/transformers/models/dinov2/configuration_dinov2.py b/src/transformers/models/dinov2/configuration_dinov2.py index 159efad3eaeb..16ad52cbbf0a 100644 --- a/src/transformers/models/dinov2/configuration_dinov2.py +++ b/src/transformers/models/dinov2/configuration_dinov2.py @@ -13,9 +13,9 @@ # limitations under the License. """DINOv2 model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -145,9 +145,7 @@ def __init__( self.drop_path_rate = drop_path_rate self.use_swiglu_ffn = use_swiglu_ffn self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states self.use_mask_token = use_mask_token diff --git a/src/transformers/models/dinov2/modeling_dinov2.py b/src/transformers/models/dinov2/modeling_dinov2.py index 62c6561c7c6a..a470a07d865f 100644 --- a/src/transformers/models/dinov2/modeling_dinov2.py +++ b/src/transformers/models/dinov2/modeling_dinov2.py @@ -21,12 +21,12 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging, torch_int -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_dinov2 import Dinov2Config @@ -544,10 +544,9 @@ def forward( Dinov2 backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin): +class Dinov2Backbone(BackboneMixin, Dinov2PreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = Dinov2Embeddings(config) diff --git a/src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py b/src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py index 91f69ebcc19a..34839a3bf335 100644 --- a/src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py +++ b/src/transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py @@ -20,8 +20,8 @@ # limitations under the License. +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices class Dinov2WithRegistersConfig(BackboneConfigMixin, PreTrainedConfig): @@ -149,9 +149,7 @@ def __init__( self.use_swiglu_ffn = use_swiglu_ffn self.num_register_tokens = num_register_tokens self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states diff --git a/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py b/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py index 1c6d199f221b..618c50b57349 100644 --- a/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +++ b/src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py @@ -28,12 +28,12 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, torch_int -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig @@ -564,10 +564,9 @@ def forward( Dinov2WithRegisters backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class Dinov2WithRegistersBackbone(Dinov2WithRegistersPreTrainedModel, BackboneMixin): +class Dinov2WithRegistersBackbone(BackboneMixin, Dinov2WithRegistersPreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = Dinov2WithRegistersEmbeddings(config) self.encoder = Dinov2WithRegistersEncoder(config) diff --git a/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py b/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py index c98df69cc352..e251785e81cf 100644 --- a/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py +++ b/src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py @@ -26,11 +26,11 @@ Dinov2PreTrainedModel, ) from ... import initialization as init +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput from ...processing_utils import Unpack from ...utils import TransformersKwargs, logging, torch_int -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -161,9 +161,7 @@ def __init__( self.use_swiglu_ffn = use_swiglu_ffn self.num_register_tokens = num_register_tokens self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states @@ -338,7 +336,6 @@ def forward( class Dinov2WithRegistersBackbone(Dinov2Backbone): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.num_register_tokens = config.num_register_tokens self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] diff --git a/src/transformers/models/dinov3_convnext/configuration_dinov3_convnext.py b/src/transformers/models/dinov3_convnext/configuration_dinov3_convnext.py index 3f0bf573d99c..6353a86e5d62 100644 --- a/src/transformers/models/dinov3_convnext/configuration_dinov3_convnext.py +++ b/src/transformers/models/dinov3_convnext/configuration_dinov3_convnext.py @@ -13,9 +13,9 @@ # limitations under the License. """ConvNeXT model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -105,9 +105,7 @@ def __init__( self.drop_path_rate = drop_path_rate self.image_size = image_size self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) @property def num_stages(self) -> int: diff --git a/src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py b/src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py index a12dbdec3f81..844bce469009 100644 --- a/src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +++ b/src/transformers/models/dinov3_convnext/modeling_dinov3_convnext.py @@ -19,10 +19,10 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_outputs import BackboneOutput, BaseModelOutputWithPoolingAndNoAttention from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import can_return_tuple from .configuration_dinov3_convnext import DINOv3ConvNextConfig @@ -244,12 +244,11 @@ def forward( @auto_docstring -class DINOv3ConvNextBackbone(DINOv3ConvNextPreTrainedModel, BackboneMixin): +class DINOv3ConvNextBackbone(BackboneMixin, DINOv3ConvNextPreTrainedModel): config: DINOv3ConvNextConfig def __init__(self, config: DINOv3ConvNextConfig): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.num_channels] + list(config.hidden_sizes) diff --git a/src/transformers/models/dinov3_vit/configuration_dinov3_vit.py b/src/transformers/models/dinov3_vit/configuration_dinov3_vit.py index b92af872ed0a..dcb7fcdf47c4 100644 --- a/src/transformers/models/dinov3_vit/configuration_dinov3_vit.py +++ b/src/transformers/models/dinov3_vit/configuration_dinov3_vit.py @@ -13,9 +13,9 @@ # limitations under the License. """DINOv3 model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -182,9 +182,7 @@ def __init__( self.stage_names = stage_names # Initialize backbone features/indices - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["DINOv3ViTConfig"] diff --git a/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py b/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py index ddc6feefb657..114c76652311 100644 --- a/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py +++ b/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py @@ -27,13 +27,13 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutputWithPooling from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import TransformersKwargs, auto_docstring, can_return_tuple -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import check_model_inputs, maybe_autocast from .configuration_dinov3_vit import DINOv3ViTConfig @@ -516,10 +516,9 @@ def forward( @auto_docstring -class DINOv3ViTBackbone(DINOv3ViTPreTrainedModel, BackboneMixin): +class DINOv3ViTBackbone(BackboneMixin, DINOv3ViTPreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.embeddings = DINOv3ViTEmbeddings(config) self.rope_embeddings = DINOv3ViTRopePositionEmbedding(config) diff --git a/src/transformers/models/dinov3_vit/modular_dinov3_vit.py b/src/transformers/models/dinov3_vit/modular_dinov3_vit.py index bc1378c14bc0..c7c63de770e3 100644 --- a/src/transformers/models/dinov3_vit/modular_dinov3_vit.py +++ b/src/transformers/models/dinov3_vit/modular_dinov3_vit.py @@ -31,13 +31,13 @@ from transformers.models.pixtral.modeling_pixtral import PixtralAttention, rotate_half from ... import initialization as init +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutputWithPooling from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import check_model_inputs, maybe_autocast from .configuration_dinov3_vit import DINOv3ViTConfig @@ -411,10 +411,9 @@ def forward( @auto_docstring -class DINOv3ViTBackbone(DINOv3ViTPreTrainedModel, BackboneMixin): +class DINOv3ViTBackbone(BackboneMixin, DINOv3ViTPreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.embeddings = DINOv3ViTEmbeddings(config) self.rope_embeddings = DINOv3ViTRopePositionEmbedding(config) diff --git a/src/transformers/models/dpt/configuration_dpt.py b/src/transformers/models/dpt/configuration_dpt.py index 8ff75895e210..9330a475a6cd 100644 --- a/src/transformers/models/dpt/configuration_dpt.py +++ b/src/transformers/models/dpt/configuration_dpt.py @@ -13,11 +13,10 @@ # limitations under the License. """DPT model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto.configuration_auto import CONFIG_MAPPING, AutoConfig -from ..bit import BitConfig +from ..auto.configuration_auto import AutoConfig logger = logging.get_logger(__name__) @@ -104,18 +103,6 @@ class DPTConfig(PreTrainedConfig): backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `BitConfig()`): The configuration of the backbone model. Only used in case `is_hybrid` is `True` or in case you want to leverage the [`AutoBackbone`] API. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. pooler_output_size (`int`, *optional*): Dimensionality of the pooler layer. If None, defaults to `hidden_size`. pooler_act (`str`, *optional*, defaults to `"tanh"`): @@ -171,10 +158,6 @@ def __init__( backbone_featmap_shape=[1, 1024, 24, 24], neck_ignore_stages=[0, 1], backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, - backbone_kwargs=None, pooler_output_size=None, pooler_act="tanh", **kwargs, @@ -182,59 +165,35 @@ def __init__( self.hidden_size = hidden_size self.is_hybrid = is_hybrid - use_autobackbone = False + if readout_type not in ["ignore", "add", "project"]: + raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']") + if self.is_hybrid: - if backbone_config is None: - backbone_config = { + if isinstance(backbone_config, dict): + backbone_config.setdefault("model_type", "bit") + + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( + backbone_config=backbone_config, + default_config_type="bit", + default_config_kwargs={ "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, - } - - if isinstance(backbone_config, dict): - logger.info("Initializing the config with a `BiT` backbone.") - backbone_config = BitConfig(**backbone_config) - elif not isinstance(backbone_config, PreTrainedConfig): - raise ValueError( - f"backbone_config must be a dictionary or a `PreTrainedConfig`, got {backbone_config.__class__}." - ) - self.backbone_config = backbone_config - self.backbone_featmap_shape = backbone_featmap_shape - self.neck_ignore_stages = neck_ignore_stages - + }, + **kwargs, + ) if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode.") - - elif backbone is not None or backbone_config is not None: - use_autobackbone = True - if isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - self.backbone_config = backbone_config - self.backbone_featmap_shape = None - self.neck_ignore_stages = [] - - # We only use load_backbone when config.is_hydrid is False - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + elif kwargs.get("backbone") is not None or backbone_config is not None: + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + **kwargs, ) - else: - self.backbone_config = None - self.backbone_featmap_shape = None - self.neck_ignore_stages = [] + backbone_out_indices = None - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs + self.backbone_config = backbone_config # ViT parameters used if not using a hybrid backbone self.num_hidden_layers = num_hidden_layers @@ -247,11 +206,10 @@ def __init__( self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias - self.use_autobackbone = use_autobackbone - self.backbone_out_indices = None if use_autobackbone else backbone_out_indices + self.backbone_out_indices = backbone_out_indices + self.backbone_featmap_shape = backbone_featmap_shape if is_hybrid else None + self.neck_ignore_stages = neck_ignore_stages if is_hybrid else [] - if readout_type not in ["ignore", "add", "project"]: - raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']") self.hidden_act = hidden_act self.initializer_range = initializer_range self.readout_type = readout_type diff --git a/src/transformers/models/dpt/modeling_dpt.py b/src/transformers/models/dpt/modeling_dpt.py index 46b5523de14a..dcfa16330833 100755 --- a/src/transformers/models/dpt/modeling_dpt.py +++ b/src/transformers/models/dpt/modeling_dpt.py @@ -28,12 +28,12 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, DepthEstimatorOutput, SemanticSegmenterOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging, torch_int -from ...utils.backbone_utils import load_backbone from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_dpt import DPTConfig @@ -566,7 +566,7 @@ def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_wi def _get_backbone_hidden_size(config): - if config.backbone_config is not None and config.is_hybrid is False: + if config.backbone_config is not None and hasattr(config.backbone_config, "hidden_size"): return config.backbone_config.hidden_size else: return config.hidden_size @@ -923,7 +923,7 @@ def __init__(self, config): super().__init__(config) self.backbone = None - if config.is_hybrid is False and (config.backbone_config is not None or config.backbone is not None): + if config.is_hybrid is False and config.backbone_config is not None: self.backbone = load_backbone(config) else: self.dpt = DPTModel(config, add_pooling_layer=False) diff --git a/src/transformers/models/focalnet/configuration_focalnet.py b/src/transformers/models/focalnet/configuration_focalnet.py index 60d06a94b80a..cb186157a67f 100644 --- a/src/transformers/models/focalnet/configuration_focalnet.py +++ b/src/transformers/models/focalnet/configuration_focalnet.py @@ -13,9 +13,9 @@ # limitations under the License. """FocalNet model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -155,9 +155,7 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.encoder_stride = encoder_stride self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["FocalNetConfig"] diff --git a/src/transformers/models/focalnet/modeling_focalnet.py b/src/transformers/models/focalnet/modeling_focalnet.py index ea0434616105..d02daa13c91a 100644 --- a/src/transformers/models/focalnet/modeling_focalnet.py +++ b/src/transformers/models/focalnet/modeling_focalnet.py @@ -22,11 +22,11 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from .configuration_focalnet import FocalNetConfig @@ -855,12 +855,11 @@ def forward( FocalNet backbone, to be used with frameworks like X-Decoder. """ ) -class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin): +class FocalNetBackbone(BackboneMixin, FocalNetPreTrainedModel): has_attentions = False def __init__(self, config: FocalNetConfig): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.embed_dim] + config.hidden_sizes self.focalnet = FocalNetModel(config) diff --git a/src/transformers/models/grounding_dino/configuration_grounding_dino.py b/src/transformers/models/grounding_dino/configuration_grounding_dino.py index 12bf722e5ea6..117a897c279a 100644 --- a/src/transformers/models/grounding_dino/configuration_grounding_dino.py +++ b/src/transformers/models/grounding_dino/configuration_grounding_dino.py @@ -13,9 +13,9 @@ # limitations under the License. """Grounding DINO model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING, AutoConfig @@ -35,18 +35,6 @@ class GroundingDinoConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`): The config object or dictionary of the text backbone. num_queries (`int`, *optional*, defaults to 900): @@ -156,10 +144,6 @@ class GroundingDinoConfig(PreTrainedConfig): def __init__( self, backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, - backbone_kwargs=None, text_config=None, num_queries=900, encoder_layers=6, @@ -202,38 +186,14 @@ def __init__( tie_word_embeddings=True, **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") - backbone_config = CONFIG_MAPPING["swin"]( - window_size=7, - image_size=224, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - out_indices=[2, 3, 4], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="swin", + default_config_kwargs={"out_indices": [2, 3, 4]}, + **kwargs, ) - if text_config is None: - text_config = {} - logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") - self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim @@ -268,6 +228,7 @@ def __init__( text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: text_config = CONFIG_MAPPING["bert"]() + logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") self.text_config = text_config self.max_text_len = max_text_len diff --git a/src/transformers/models/grounding_dino/modeling_grounding_dino.py b/src/transformers/models/grounding_dino/modeling_grounding_dino.py index 6c1a5ff22334..ee389502e26f 100644 --- a/src/transformers/models/grounding_dino/modeling_grounding_dino.py +++ b/src/transformers/models/grounding_dino/modeling_grounding_dino.py @@ -23,20 +23,16 @@ from ... import initialization as init from ...activations import ACT2FN -from ...file_utils import ModelOutput, is_timm_available, requires_backends +from ...backbone_utils import load_backbone +from ...file_utils import ModelOutput from ...integrations import use_kernel_forward_from_hub from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import auto_docstring, logging, torch_compilable_check -from ...utils.backbone_utils import load_backbone from ..auto import AutoModel from .configuration_grounding_dino import GroundingDinoConfig -if is_timm_available(): - from timm import create_model - - logger = logging.get_logger(__name__) @@ -373,47 +369,23 @@ def __init__(self, config): super().__init__() self.config = config - - if config.use_timm_backbone: - requires_backends(self, ["timm"]) - backbone = create_model( - config.backbone, - pretrained=config.use_pretrained_backbone, - features_only=True, - **config.backbone_kwargs, - ) - else: - backbone = load_backbone(config) + backbone = load_backbone(config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone - self.intermediate_channel_sizes = ( - self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels - ) - - backbone_model_type = None - if config.backbone is not None: - backbone_model_type = config.backbone - elif config.backbone_config is not None: - backbone_model_type = config.backbone_config.model_type - else: - raise ValueError("Either `backbone` or `backbone_config` should be provided in the config") + self.intermediate_channel_sizes = self.model.channels + backbone_model_type = config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): - if config.use_timm_backbone: - if "layer2" not in name and "layer3" not in name and "layer4" not in name: - parameter.requires_grad_(False) - else: - if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: - parameter.requires_grad_(False) + if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: + parameter.requires_grad_(False) - # TODO: use modular - Copied from transformers.models.detr.modeling_detr.DetrConvEncoder.forward with Detr->GroundingDino def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps - features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps + features = self.model(pixel_values, return_dict=True).feature_maps out = [] for feature_map in features: diff --git a/src/transformers/models/hgnet_v2/configuration_hgnet_v2.py b/src/transformers/models/hgnet_v2/configuration_hgnet_v2.py index 90d2364c85d4..bba788ae26b6 100644 --- a/src/transformers/models/hgnet_v2/configuration_hgnet_v2.py +++ b/src/transformers/models/hgnet_v2/configuration_hgnet_v2.py @@ -19,8 +19,8 @@ # limitations under the License. +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices # TODO: Modular conversion for resnet must be fixed as @@ -120,9 +120,7 @@ def __init__( self.hidden_sizes = hidden_sizes self.hidden_act = hidden_act self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) self.stem_channels = stem_channels self.stage_in_channels = stage_in_channels self.stage_mid_channels = stage_mid_channels diff --git a/src/transformers/models/hgnet_v2/modeling_hgnet_v2.py b/src/transformers/models/hgnet_v2/modeling_hgnet_v2.py index 095c4122b6a7..9b9b68947d5f 100644 --- a/src/transformers/models/hgnet_v2/modeling_hgnet_v2.py +++ b/src/transformers/models/hgnet_v2/modeling_hgnet_v2.py @@ -25,10 +25,10 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring -from ...utils.backbone_utils import BackboneMixin from .configuration_hgnet_v2 import HGNetV2Config @@ -338,12 +338,11 @@ def forward( ) -class HGNetV2Backbone(HGNetV2PreTrainedModel, BackboneMixin): +class HGNetV2Backbone(BackboneMixin, HGNetV2PreTrainedModel): has_attentions = False def __init__(self, config: HGNetV2Config): super().__init__(config) - super()._init_backbone(config) self.depths = config.depths self.num_features = [config.embedding_size] + config.hidden_sizes self.embedder = HGNetV2Embeddings(config) diff --git a/src/transformers/models/hgnet_v2/modular_hgnet_v2.py b/src/transformers/models/hgnet_v2/modular_hgnet_v2.py index 2320ef666df5..193a82ac1bfd 100644 --- a/src/transformers/models/hgnet_v2/modular_hgnet_v2.py +++ b/src/transformers/models/hgnet_v2/modular_hgnet_v2.py @@ -18,6 +18,7 @@ from torch import Tensor, nn from ... import initialization as init +from ...backbone_utils import BackboneConfigMixin, BackboneMixin from ...configuration_utils import PreTrainedConfig from ...modeling_outputs import ( BackboneOutput, @@ -28,7 +29,6 @@ from ...utils import ( auto_docstring, ) -from ...utils.backbone_utils import BackboneConfigMixin, BackboneMixin, get_aligned_output_features_output_indices from ..rt_detr.modeling_rt_detr_resnet import RTDetrResNetConvLayer @@ -129,9 +129,7 @@ def __init__( self.hidden_sizes = hidden_sizes self.hidden_act = hidden_act self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) self.stem_channels = stem_channels self.stage_in_channels = stage_in_channels self.stage_mid_channels = stage_mid_channels @@ -461,12 +459,11 @@ def forward( ) -class HGNetV2Backbone(HGNetV2PreTrainedModel, BackboneMixin): +class HGNetV2Backbone(BackboneMixin, HGNetV2PreTrainedModel): has_attentions = False def __init__(self, config: HGNetV2Config): super().__init__(config) - super()._init_backbone(config) self.depths = config.depths self.num_features = [config.embedding_size] + config.hidden_sizes self.embedder = HGNetV2Embeddings(config) diff --git a/src/transformers/models/hiera/configuration_hiera.py b/src/transformers/models/hiera/configuration_hiera.py index 34bc2d8c73d5..a932375362ba 100644 --- a/src/transformers/models/hiera/configuration_hiera.py +++ b/src/transformers/models/hiera/configuration_hiera.py @@ -13,9 +13,9 @@ # limitations under the License. """Hiera model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -185,9 +185,7 @@ def __init__( # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * embed_dim_multiplier ** (len(depths) - 1)) self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["HieraConfig"] diff --git a/src/transformers/models/hiera/modeling_hiera.py b/src/transformers/models/hiera/modeling_hiera.py index a591485f0df7..22a0128313fb 100644 --- a/src/transformers/models/hiera/modeling_hiera.py +++ b/src/transformers/models/hiera/modeling_hiera.py @@ -21,6 +21,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import ( BackboneOutput, @@ -31,7 +32,6 @@ ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging, torch_int -from ...utils.backbone_utils import BackboneMixin from .configuration_hiera import HieraConfig @@ -1299,10 +1299,9 @@ def forward( Hiera backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class HieraBackbone(HieraPreTrainedModel, BackboneMixin): +class HieraBackbone(BackboneMixin, HieraPreTrainedModel): def __init__(self, config: HieraConfig): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.embed_dim] + [ int(config.embed_dim * config.embed_dim_multiplier**i) for i in range(len(config.depths)) @@ -1312,7 +1311,7 @@ def __init__(self, config: HieraConfig): # Add layer norms to hidden states of out_features hidden_states_norms = {} - for stage, num_channels in zip(self._out_features, self.channels): + for stage, num_channels in zip(self.out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) diff --git a/src/transformers/models/lw_detr/configuration_lw_detr.py b/src/transformers/models/lw_detr/configuration_lw_detr.py index 3e90410ccd7c..3eff676c19f5 100644 --- a/src/transformers/models/lw_detr/configuration_lw_detr.py +++ b/src/transformers/models/lw_detr/configuration_lw_detr.py @@ -19,13 +19,9 @@ # limitations under the License. import math +from ...backbone_utils import BackboneConfigMixin, consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig -from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices -from ..auto import CONFIG_MAPPING, AutoConfig - - -logger = logging.get_logger(__name__) +from ..auto import AutoConfig class LwDetrViTConfig(BackboneConfigMixin, PreTrainedConfig): @@ -149,9 +145,7 @@ def __init__( self.use_absolute_position_embeddings = use_absolute_position_embeddings self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) self.cae_init_values = cae_init_values if num_windows % math.sqrt(num_windows) != 0: @@ -308,24 +302,18 @@ def __init__( ): self.batch_norm_eps = batch_norm_eps - # backbone - if backbone_config is None: - logger.info( - "`backbone_config` and `backbone` are `None`. Initializing the config with the default `LwDetrViT` backbone." - ) - backbone_config = LwDetrViTConfig( - image_size=1024, - hidden_size=192, - num_hidden_layers=10, - num_attention_heads=12, - window_block_indices=[0, 1, 3, 6, 7, 9], - out_indices=[2, 4, 5, 9], - **kwargs, - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( + backbone_config=backbone_config, + default_config_type="lw_detr_vit", + default_config_kwargs={ + "image_size": 1024, + "hidden_size": 192, + "num_hidden_layers": 10, + "window_block_indices": [0, 1, 3, 6, 7, 9], + "out_indices": [2, 4, 5, 9], + }, + **kwargs, + ) self.backbone_config = backbone_config # projector diff --git a/src/transformers/models/lw_detr/modeling_lw_detr.py b/src/transformers/models/lw_detr/modeling_lw_detr.py index adf893d960d5..f59948a54f83 100644 --- a/src/transformers/models/lw_detr/modeling_lw_detr.py +++ b/src/transformers/models/lw_detr/modeling_lw_detr.py @@ -30,6 +30,7 @@ from ... import initialization as init from ...activations import ACT2CLS, ACT2FN +from ...backbone_utils import BackboneMixin from ...integrations import use_kernel_forward_from_hub from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutputWithCrossAttentions @@ -37,7 +38,6 @@ from ...processing_utils import Unpack from ...pytorch_utils import meshgrid from ...utils import ModelOutput, TransformersKwargs, auto_docstring, torch_compilable_check -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import check_model_inputs from .configuration_lw_detr import LwDetrConfig, LwDetrViTConfig @@ -367,10 +367,9 @@ def _init_weights(self, module) -> None: @auto_docstring() -class LwDetrViTBackbone(LwDetrViTPreTrainedModel, BackboneMixin): +class LwDetrViTBackbone(BackboneMixin, LwDetrViTPreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.embeddings = LwDetrViTEmbeddings(config) self.encoder = LwDetrViTEncoder(config) diff --git a/src/transformers/models/lw_detr/modular_lw_detr.py b/src/transformers/models/lw_detr/modular_lw_detr.py index d4c88ae5660a..e0aad5698b7c 100644 --- a/src/transformers/models/lw_detr/modular_lw_detr.py +++ b/src/transformers/models/lw_detr/modular_lw_detr.py @@ -21,6 +21,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput @@ -29,7 +30,7 @@ from ...pytorch_utils import meshgrid from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging from ...utils.generic import check_model_inputs -from ..auto.configuration_auto import AutoConfig +from ..auto import AutoConfig from ..convnext.modeling_convnext import ConvNextLayerNorm from ..dab_detr.modeling_dab_detr import gen_sine_position_embeddings from ..deformable_detr.modeling_deformable_detr import ( @@ -40,7 +41,6 @@ DeformableDetrMultiscaleDeformableAttention, ) from ..llama.modeling_llama import eager_attention_forward -from ..rt_detr.configuration_rt_detr import CONFIG_MAPPING from ..rt_detr.modeling_rt_detr import RTDetrConvNormLayer from ..vit.modeling_vit import ViTAttention, ViTEncoder, ViTSelfAttention from ..vitdet.configuration_vitdet import VitDetConfig @@ -337,24 +337,18 @@ def __init__( ): self.batch_norm_eps = batch_norm_eps - # backbone - if backbone_config is None: - logger.info( - "`backbone_config` and `backbone` are `None`. Initializing the config with the default `LwDetrViT` backbone." - ) - backbone_config = LwDetrViTConfig( - image_size=1024, - hidden_size=192, - num_hidden_layers=10, - num_attention_heads=12, - window_block_indices=[0, 1, 3, 6, 7, 9], - out_indices=[2, 4, 5, 9], - **kwargs, - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( + backbone_config=backbone_config, + default_config_type="lw_detr_vit", + default_config_kwargs={ + "image_size": 1024, + "hidden_size": 192, + "num_hidden_layers": 10, + "window_block_indices": [0, 1, 3, 6, 7, 9], + "out_indices": [2, 4, 5, 9], + }, + **kwargs, + ) self.backbone_config = backbone_config # projector diff --git a/src/transformers/models/mask2former/configuration_mask2former.py b/src/transformers/models/mask2former/configuration_mask2former.py index 092d174b83f4..2fd3bb4a8ecf 100644 --- a/src/transformers/models/mask2former/configuration_mask2former.py +++ b/src/transformers/models/mask2former/configuration_mask2former.py @@ -13,10 +13,10 @@ # limitations under the License. """Mask2Former model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig +from ..auto import AutoConfig logger = logging.get_logger(__name__) @@ -39,18 +39,6 @@ class Mask2FormerConfig(PreTrainedConfig): backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`): The configuration of the backbone model. If unset, the configuration corresponding to `swin-base-patch4-window12-384` will be used. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. feature_size (`int`, *optional*, defaults to 256): The features (channels) of the resulting feature maps. mask_feature_size (`int`, *optional*, defaults to 256): @@ -159,40 +147,21 @@ def __init__( use_auxiliary_loss: bool = True, feature_strides: list[int] = [4, 8, 16, 32], output_auxiliary_logits: bool | None = None, - backbone: str | None = None, - use_pretrained_backbone: bool = False, - use_timm_backbone: bool = False, - backbone_kwargs: dict | None = None, **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") - backbone_config = CONFIG_MAPPING["swin"]( - image_size=224, - num_channels=3, - patch_size=4, - embed_dim=96, - depths=[2, 2, 18, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - drop_path_rate=0.3, - use_absolute_embeddings=False, - out_features=["stage1", "stage2", "stage3", "stage4"], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="swin", + default_config_kwargs={ + "depths": [2, 2, 18, 2], + "drop_path_rate": 0.3, + "out_features": ["stage1", "stage2", "stage3", "stage4"], + }, + **kwargs, ) + # verify that the backbone is supported - if backbone_config is not None and backbone_config.model_type not in self.backbones_supported: + if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. " f"Supported model types: {','.join(self.backbones_supported)}" @@ -227,10 +196,6 @@ def __init__( self.feature_strides = feature_strides self.output_auxiliary_logits = output_auxiliary_logits self.num_hidden_layers = decoder_layers - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs super().__init__(**kwargs) diff --git a/src/transformers/models/mask2former/modeling_mask2former.py b/src/transformers/models/mask2former/modeling_mask2former.py index 74b1a4c380cd..ce239a1e9aa0 100644 --- a/src/transformers/models/mask2former/modeling_mask2former.py +++ b/src/transformers/models/mask2former/modeling_mask2former.py @@ -23,13 +23,13 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...file_utils import ModelOutput, is_scipy_available, requires_backends from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import auto_docstring, is_accelerate_available, logging, torch_compilable_check -from ...utils.backbone_utils import load_backbone from .configuration_mask2former import Mask2FormerConfig diff --git a/src/transformers/models/maskformer/configuration_maskformer.py b/src/transformers/models/maskformer/configuration_maskformer.py index 07ab775e122d..6b40d33ba4f8 100644 --- a/src/transformers/models/maskformer/configuration_maskformer.py +++ b/src/transformers/models/maskformer/configuration_maskformer.py @@ -13,12 +13,11 @@ # limitations under the License. """MaskFormer model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING, AutoConfig from ..detr import DetrConfig -from ..swin import SwinConfig logger = logging.get_logger(__name__) @@ -49,18 +48,6 @@ class MaskFormerConfig(PreTrainedConfig): backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`): The configuration passed to the backbone, if unset, the configuration corresponding to `swin-base-patch4-window12-384` will be used. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. decoder_config (`Dict`, *optional*): The configuration passed to the transformer decoder model, if unset the base config for `detr-resnet-50` will be used. @@ -119,37 +106,23 @@ def __init__( cross_entropy_weight: float = 1.0, mask_weight: float = 20.0, output_auxiliary_logits: bool | None = None, - backbone: str | None = None, - use_pretrained_backbone: bool = False, - use_timm_backbone: bool = False, - backbone_kwargs: dict | None = None, **kwargs, ): - if backbone_config is None and backbone is None: - # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k - backbone_config = SwinConfig( - image_size=384, - num_channels=3, - patch_size=4, - embed_dim=128, - depths=[2, 2, 18, 2], - num_heads=[4, 8, 16, 32], - window_size=12, - drop_path_rate=0.3, - out_features=["stage1", "stage2", "stage3", "stage4"], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="swin", + default_config_kwargs={ + "depths": [2, 2, 18, 2], + "drop_path_rate": 0.3, + "image_size": 384, + "embed_dim": 128, + "num_heads": [4, 8, 16, 32], + "window_size": 12, + "out_features": ["stage1", "stage2", "stage3", "stage4"], + }, + **kwargs, ) + # verify that the backbone is supported if backbone_config is not None and backbone_config.model_type not in self.backbones_supported: logger.warning_once( @@ -192,10 +165,6 @@ def __init__( self.num_attention_heads = self.decoder_config.encoder_attention_heads self.num_hidden_layers = self.decoder_config.num_hidden_layers - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs super().__init__(**kwargs) diff --git a/src/transformers/models/maskformer/configuration_maskformer_swin.py b/src/transformers/models/maskformer/configuration_maskformer_swin.py index 869528d44847..43841706a2c1 100644 --- a/src/transformers/models/maskformer/configuration_maskformer_swin.py +++ b/src/transformers/models/maskformer/configuration_maskformer_swin.py @@ -13,9 +13,9 @@ # limitations under the License. """MaskFormer Swin Transformer model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -144,9 +144,7 @@ def __init__( # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["MaskFormerSwinConfig"] diff --git a/src/transformers/models/maskformer/modeling_maskformer.py b/src/transformers/models/maskformer/modeling_maskformer.py index dc171b1addf6..2aed70cdd1b6 100644 --- a/src/transformers/models/maskformer/modeling_maskformer.py +++ b/src/transformers/models/maskformer/modeling_maskformer.py @@ -23,6 +23,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutputWithCrossAttentions @@ -36,7 +37,6 @@ logging, requires_backends, ) -from ...utils.backbone_utils import load_backbone from ..detr import DetrConfig from .configuration_maskformer import MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig diff --git a/src/transformers/models/maskformer/modeling_maskformer_swin.py b/src/transformers/models/maskformer/modeling_maskformer_swin.py index 655ba8b90c23..f1cb04350fab 100644 --- a/src/transformers/models/maskformer/modeling_maskformer_swin.py +++ b/src/transformers/models/maskformer/modeling_maskformer_swin.py @@ -24,13 +24,13 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...file_utils import ModelOutput from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import auto_docstring, torch_int -from ...utils.backbone_utils import BackboneMixin from .configuration_maskformer_swin import MaskFormerSwinConfig @@ -785,7 +785,7 @@ def forward( ) -class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin): +class MaskFormerSwinBackbone(BackboneMixin, MaskFormerSwinPreTrainedModel): """ MaskFormerSwin backbone, designed especially for the MaskFormer framework. @@ -799,7 +799,6 @@ class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin): def __init__(self, config: MaskFormerSwinConfig): super().__init__(config) - super()._init_backbone(config) self.model = MaskFormerSwinModel(config) if "stem" in self.out_features: diff --git a/src/transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py b/src/transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py index cff7534e66cf..092873bd85ec 100644 --- a/src/transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +++ b/src/transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py @@ -17,9 +17,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING, AutoConfig @@ -39,18 +39,6 @@ class MMGroundingDinoConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`): The config object or dictionary of the text backbone. num_queries (`int`, *optional*, defaults to 900): @@ -155,10 +143,6 @@ class MMGroundingDinoConfig(PreTrainedConfig): def __init__( self, backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, - backbone_kwargs=None, text_config=None, num_queries=900, encoder_layers=6, @@ -199,38 +183,14 @@ def __init__( tie_word_embeddings=True, **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") - backbone_config = CONFIG_MAPPING["swin"]( - window_size=7, - image_size=224, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - out_indices=[2, 3, 4], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="swin", + default_config_kwargs={"out_indices": [2, 3, 4]}, + **kwargs, ) - if text_config is None: - text_config = {} - logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") - self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim @@ -264,6 +224,7 @@ def __init__( text_config["model_type"] = text_config.get("model_type", "bert") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: + logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") text_config = CONFIG_MAPPING["bert"]() self.text_config = text_config diff --git a/src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py b/src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py index 85972613c65b..476485a44979 100644 --- a/src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +++ b/src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py @@ -27,20 +27,16 @@ from ... import initialization as init from ...activations import ACT2FN -from ...file_utils import ModelOutput, is_timm_available, requires_backends +from ...backbone_utils import load_backbone +from ...file_utils import ModelOutput from ...integrations import use_kernel_forward_from_hub from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import auto_docstring, torch_compilable_check -from ...utils.backbone_utils import load_backbone from ..auto.modeling_auto import AutoModel from .configuration_mm_grounding_dino import MMGroundingDinoConfig -if is_timm_available(): - from timm import create_model - - class MMGroundingDinoContrastiveEmbedding(nn.Module): def __init__(self, config): super().__init__() @@ -651,47 +647,23 @@ def __init__(self, config): super().__init__() self.config = config - - if config.use_timm_backbone: - requires_backends(self, ["timm"]) - backbone = create_model( - config.backbone, - pretrained=config.use_pretrained_backbone, - features_only=True, - **config.backbone_kwargs, - ) - else: - backbone = load_backbone(config) + backbone = load_backbone(config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone - self.intermediate_channel_sizes = ( - self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels - ) - - backbone_model_type = None - if config.backbone is not None: - backbone_model_type = config.backbone - elif config.backbone_config is not None: - backbone_model_type = config.backbone_config.model_type - else: - raise ValueError("Either `backbone` or `backbone_config` should be provided in the config") + self.intermediate_channel_sizes = self.model.channels + backbone_model_type = config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): - if config.use_timm_backbone: - if "layer2" not in name and "layer3" not in name and "layer4" not in name: - parameter.requires_grad_(False) - else: - if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: - parameter.requires_grad_(False) + if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: + parameter.requires_grad_(False) - # TODO: use modular - Copied from transformers.models.detr.modeling_detr.DetrConvEncoder.forward with Detr->MMGroundingDino def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps - features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps + features = self.model(pixel_values, return_dict=True).feature_maps out = [] for feature_map in features: diff --git a/src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py b/src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py index 819592c23402..c8da082d3ec1 100644 --- a/src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +++ b/src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py @@ -17,9 +17,9 @@ from torch import nn from ... import initialization as init +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING, AutoConfig from ..auto.modeling_auto import AutoModel from ..grounding_dino.modeling_grounding_dino import ( @@ -52,18 +52,6 @@ class MMGroundingDinoConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `BertConfig`): The config object or dictionary of the text backbone. num_queries (`int`, *optional*, defaults to 900): @@ -168,10 +156,6 @@ class MMGroundingDinoConfig(PreTrainedConfig): def __init__( self, backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, - backbone_kwargs=None, text_config=None, num_queries=900, encoder_layers=6, @@ -212,38 +196,14 @@ def __init__( tie_word_embeddings=True, **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.") - backbone_config = CONFIG_MAPPING["swin"]( - window_size=7, - image_size=224, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - out_indices=[2, 3, 4], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="swin", + default_config_kwargs={"out_indices": [2, 3, 4]}, + **kwargs, ) - if text_config is None: - text_config = {} - logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") - self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs self.num_queries = num_queries self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim @@ -277,6 +237,7 @@ def __init__( text_config["model_type"] = text_config.get("model_type", "bert") text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) elif text_config is None: + logger.info("text_config is None. Initializing the text config with default values (`BertConfig`).") text_config = CONFIG_MAPPING["bert"]() self.text_config = text_config diff --git a/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py b/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py index 753c051b4958..e662f318a69b 100644 --- a/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py +++ b/src/transformers/models/omdet_turbo/configuration_omdet_turbo.py @@ -13,9 +13,9 @@ # limitations under the License. """OmDet-Turbo model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments from ..auto import CONFIG_MAPPING, AutoConfig @@ -37,15 +37,6 @@ class OmDetTurboConfig(PreTrainedConfig): The configuration of the text backbone. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`): The configuration of the vision backbone. - use_timm_backbone (`bool`, *optional*, defaults to `True`): - Whether to use the timm for the vision backbone. - backbone (`str`, *optional*, defaults to `"swin_tiny_patch4_window7_224"`): - The name of the pretrained vision backbone to use. If `use_pretrained_backbone=False` a randomly initialized - backbone with the same architecture `backbone` is used. - backbone_kwargs (`dict`, *optional*): - Additional kwargs for the vision backbone. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use a pretrained vision backbone. apply_layernorm_after_vision_backbone (`bool`, *optional*, defaults to `True`): Whether to apply layer normalization on the feature maps of the vision backbone output. image_size (`int`, *optional*, defaults to 640): @@ -154,10 +145,6 @@ def __init__( self, text_config=None, backbone_config=None, - use_timm_backbone=True, - backbone="swin_tiny_patch4_window7_224", - backbone_kwargs=None, - use_pretrained_backbone=False, apply_layernorm_after_vision_backbone=True, image_size=640, disable_custom_kernels=False, @@ -198,40 +185,23 @@ def __init__( is_encoder_decoder=True, **kwargs, ): - if use_timm_backbone: - if backbone_config is None: - backbone_kwargs = { - "out_indices": [1, 2, 3], - "img_size": image_size, - "always_partition": True, - } - elif backbone_config is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `swin` vision config.") - backbone_config = CONFIG_MAPPING["swin"]( - window_size=7, - image_size=image_size, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - out_indices=[2, 3, 4], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + # Init timm backbone with hardcoded values for BC + timm_default_kwargs = { + "out_indices": [1, 2, 3], + "img_size": image_size, + "always_partition": True, + } + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_backbone="swin_tiny_patch4_window7_224", + default_config_type="swin", + default_config_kwargs={"image_size": image_size, "out_indices": [2, 3, 4]}, + timm_default_kwargs=timm_default_kwargs, + **kwargs, ) if text_config is None: - logger.info( - "`text_config` is `None`. Initializing the config with the default `clip_text_model` text config." - ) + logger.info("`text_config` is `None`. Initializing the config with the default `clip_text_model`") text_config = CONFIG_MAPPING["clip_text_model"]() elif isinstance(text_config, dict): text_model_type = text_config.get("model_type") @@ -244,10 +214,6 @@ def __init__( self.text_config = text_config self.backbone_config = backbone_config - self.use_timm_backbone = use_timm_backbone - self.backbone = backbone - self.backbone_kwargs = backbone_kwargs - self.use_pretrained_backbone = use_pretrained_backbone self.apply_layernorm_after_vision_backbone = apply_layernorm_after_vision_backbone self.image_size = image_size self.disable_custom_kernels = disable_custom_kernels diff --git a/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py b/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py index ff6f59b25287..c0191b58dae2 100644 --- a/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py +++ b/src/transformers/models/omdet_turbo/modeling_omdet_turbo.py @@ -26,6 +26,7 @@ from ... import initialization as init from ...activations import ACT2CLS, ACT2FN +from ...backbone_utils import load_backbone from ...integrations import use_kernel_forward_from_hub from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer @@ -38,7 +39,6 @@ logging, torch_compilable_check, ) -from ...utils.backbone_utils import load_backbone from ..auto import AutoModel from .configuration_omdet_turbo import OmDetTurboConfig diff --git a/src/transformers/models/oneformer/configuration_oneformer.py b/src/transformers/models/oneformer/configuration_oneformer.py index de485e02a025..06d1a0333803 100644 --- a/src/transformers/models/oneformer/configuration_oneformer.py +++ b/src/transformers/models/oneformer/configuration_oneformer.py @@ -13,10 +13,10 @@ # limitations under the License. """OneFormer model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig +from ..auto import AutoConfig logger = logging.get_logger(__name__) @@ -36,18 +36,6 @@ class OneFormerConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `SwinConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. ignore_value (`int`, *optional*, defaults to 255): Values to be ignored in GT label while calculating loss. num_queries (`int`, *optional*, defaults to 150): @@ -149,10 +137,6 @@ class OneFormerConfig(PreTrainedConfig): def __init__( self, backbone_config: dict | PreTrainedConfig | None = None, - backbone: str | None = None, - use_pretrained_backbone: bool = False, - use_timm_backbone: bool = False, - backbone_kwargs: dict | None = None, ignore_value: int = 255, num_queries: int = 150, no_object_weight: int = 0.1, @@ -195,38 +179,17 @@ def __init__( common_stride: int = 4, **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is unset. Initializing the config with the default `Swin` backbone.") - backbone_config = CONFIG_MAPPING["swin"]( - image_size=224, - num_channels=3, - patch_size=4, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - drop_path_rate=0.3, - use_absolute_embeddings=False, - out_features=["stage1", "stage2", "stage3", "stage4"], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="swin", + default_config_kwargs={ + "drop_path_rate": 0.3, + "out_features": ["stage1", "stage2", "stage3", "stage4"], + }, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs self.ignore_value = ignore_value self.num_queries = num_queries self.no_object_weight = no_object_weight diff --git a/src/transformers/models/oneformer/modeling_oneformer.py b/src/transformers/models/oneformer/modeling_oneformer.py index cdde05a5f284..5250a7f14133 100644 --- a/src/transformers/models/oneformer/modeling_oneformer.py +++ b/src/transformers/models/oneformer/modeling_oneformer.py @@ -24,6 +24,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel @@ -37,7 +38,6 @@ requires_backends, torch_compilable_check, ) -from ...utils.backbone_utils import load_backbone from ...utils.generic import maybe_autocast from .configuration_oneformer import OneFormerConfig diff --git a/src/transformers/models/pixio/configuration_pixio.py b/src/transformers/models/pixio/configuration_pixio.py index 3f9ec4cc6e69..7fd549452586 100644 --- a/src/transformers/models/pixio/configuration_pixio.py +++ b/src/transformers/models/pixio/configuration_pixio.py @@ -18,8 +18,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices class PixioConfig(BackboneConfigMixin, PreTrainedConfig): @@ -138,9 +138,7 @@ def __init__( self.qkv_bias = qkv_bias self.drop_path_rate = drop_path_rate self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) self.apply_layernorm = apply_layernorm self.reshape_hidden_states = reshape_hidden_states diff --git a/src/transformers/models/pixio/modeling_pixio.py b/src/transformers/models/pixio/modeling_pixio.py index 55122fd32c2f..53277c5598ce 100644 --- a/src/transformers/models/pixio/modeling_pixio.py +++ b/src/transformers/models/pixio/modeling_pixio.py @@ -26,12 +26,12 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, is_tracing -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import check_model_inputs from .configuration_pixio import PixioConfig @@ -430,10 +430,9 @@ def forward( Pixio backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class PixioBackbone(PixioPreTrainedModel, BackboneMixin): +class PixioBackbone(BackboneMixin, PixioPreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = PixioEmbeddings(config) diff --git a/src/transformers/models/pp_doclayout_v3/configuration_pp_doclayout_v3.py b/src/transformers/models/pp_doclayout_v3/configuration_pp_doclayout_v3.py index 077842e92770..19d2b5be3777 100644 --- a/src/transformers/models/pp_doclayout_v3/configuration_pp_doclayout_v3.py +++ b/src/transformers/models/pp_doclayout_v3/configuration_pp_doclayout_v3.py @@ -18,13 +18,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig -from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig - - -logger = logging.get_logger(__name__) +from ..auto import AutoConfig class PPDocLayoutV3Config(PreTrainedConfig): @@ -51,20 +47,8 @@ class PPDocLayoutV3Config(PreTrainedConfig): Whether the model's input and output word embeddings should be tied. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`): Whether to freeze the batch normalization layers in the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. encoder_hidden_dim (`int`, *optional*, defaults to 256): Dimension of the layers in hybrid encoder. encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`): @@ -179,11 +163,7 @@ def __init__( tie_word_embeddings=True, # backbone backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, freeze_backbone_batch_norms=True, - backbone_kwargs=None, # encoder PPDocLayoutV3HybridEncoder encoder_hidden_dim=256, encoder_in_channels=[512, 1024, 2048], @@ -232,12 +212,10 @@ def __init__( self.batch_norm_eps = batch_norm_eps self.tie_word_embeddings = tie_word_embeddings - if backbone_config is None and backbone is None: - logger.info( - "`backbone_config` and `backbone` are `None`. Initializing the config with the default `HGNetV3` backbone." - ) - backbone_config = { - "model_type": "hgnet_v2", + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( + backbone_config=backbone_config, + default_config_type="hgnet_v2", + default_config_kwargs={ "arch": "L", "return_idx": [0, 1, 2, 3], "freeze_stem_only": True, @@ -245,29 +223,12 @@ def __init__( "freeze_norm": True, "lr_mult_list": [0, 0.05, 0.05, 0.05, 0.05], "out_features": ["stage1", "stage2", "stage3", "stage4"], - } - config_class = CONFIG_MAPPING["hgnet_v2"] - backbone_config = config_class.from_dict(backbone_config) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - if backbone_model_type is None: - raise ValueError("`backbone_config` dict must contain key `model_type`.") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, - backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + }, + **kwargs, ) + self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone self.freeze_backbone_batch_norms = freeze_backbone_batch_norms - self.backbone_kwargs = dict(backbone_kwargs) if backbone_kwargs is not None else None # ---- encoder ---- self.encoder_hidden_dim = encoder_hidden_dim diff --git a/src/transformers/models/pp_doclayout_v3/modeling_pp_doclayout_v3.py b/src/transformers/models/pp_doclayout_v3/modeling_pp_doclayout_v3.py index 5d9af0fdc335..5f1671a8587c 100644 --- a/src/transformers/models/pp_doclayout_v3/modeling_pp_doclayout_v3.py +++ b/src/transformers/models/pp_doclayout_v3/modeling_pp_doclayout_v3.py @@ -30,6 +30,7 @@ from ... import initialization as init from ...activations import ACT2CLS, ACT2FN +from ...backbone_utils import load_backbone from ...image_transforms import center_to_corners_format, corners_to_center_format from ...integrations import use_kernel_forward_from_hub from ...modeling_outputs import BaseModelOutput @@ -43,7 +44,6 @@ torch_compilable_check, torch_int, ) -from ...utils.backbone_utils import load_backbone from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_pp_doclayout_v3 import PPDocLayoutV3Config diff --git a/src/transformers/models/pp_doclayout_v3/modular_pp_doclayout_v3.py b/src/transformers/models/pp_doclayout_v3/modular_pp_doclayout_v3.py index d3e55b67aa48..bd5fe3b76857 100644 --- a/src/transformers/models/pp_doclayout_v3/modular_pp_doclayout_v3.py +++ b/src/transformers/models/pp_doclayout_v3/modular_pp_doclayout_v3.py @@ -23,6 +23,7 @@ from torch import nn from ... import initialization as init +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...image_processing_utils_fast import ( BaseImageProcessorFast, @@ -43,9 +44,8 @@ logging, requires_backends, ) -from ...utils.backbone_utils import verify_backbone_config_arguments from ...utils.generic import TensorType, can_return_tuple -from ..auto import CONFIG_MAPPING, AutoConfig +from ..auto import AutoConfig from ..resnet.modeling_resnet import ResNetConvLayer from ..rt_detr.modeling_rt_detr import ( RTDetrDecoder, @@ -93,20 +93,8 @@ class PPDocLayoutV3Config(PreTrainedConfig): Whether the model's input and output word embeddings should be tied. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`): Whether to freeze the batch normalization layers in the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. encoder_hidden_dim (`int`, *optional*, defaults to 256): Dimension of the layers in hybrid encoder. encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`): @@ -221,11 +209,7 @@ def __init__( tie_word_embeddings=True, # backbone backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, freeze_backbone_batch_norms=True, - backbone_kwargs=None, # encoder PPDocLayoutV3HybridEncoder encoder_hidden_dim=256, encoder_in_channels=[512, 1024, 2048], @@ -274,12 +258,10 @@ def __init__( self.batch_norm_eps = batch_norm_eps self.tie_word_embeddings = tie_word_embeddings - if backbone_config is None and backbone is None: - logger.info( - "`backbone_config` and `backbone` are `None`. Initializing the config with the default `HGNetV3` backbone." - ) - backbone_config = { - "model_type": "hgnet_v2", + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( + backbone_config=backbone_config, + default_config_type="hgnet_v2", + default_config_kwargs={ "arch": "L", "return_idx": [0, 1, 2, 3], "freeze_stem_only": True, @@ -287,29 +269,12 @@ def __init__( "freeze_norm": True, "lr_mult_list": [0, 0.05, 0.05, 0.05, 0.05], "out_features": ["stage1", "stage2", "stage3", "stage4"], - } - config_class = CONFIG_MAPPING["hgnet_v2"] - backbone_config = config_class.from_dict(backbone_config) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - if backbone_model_type is None: - raise ValueError("`backbone_config` dict must contain key `model_type`.") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, - backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + }, + **kwargs, ) + self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone self.freeze_backbone_batch_norms = freeze_backbone_batch_norms - self.backbone_kwargs = dict(backbone_kwargs) if backbone_kwargs is not None else None # ---- encoder ---- self.encoder_hidden_dim = encoder_hidden_dim diff --git a/src/transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py b/src/transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py index 6967385d67b3..ffa4e3086f34 100644 --- a/src/transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +++ b/src/transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py @@ -18,13 +18,9 @@ # limitations under the License. +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig -from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto.configuration_auto import CONFIG_MAPPING, AutoConfig - - -logger = logging.get_logger(__name__) +from ..auto.configuration_auto import AutoConfig class PromptDepthAnythingConfig(PreTrainedConfig): @@ -40,18 +36,6 @@ class PromptDepthAnythingConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `Dinov2Config()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] - API. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. patch_size (`int`, *optional*, defaults to 14): The size of the patches to extract from the backbone features. initializer_range (`float`, *optional*, defaults to 0.02): @@ -95,10 +79,6 @@ class PromptDepthAnythingConfig(PreTrainedConfig): def __init__( self, backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, - backbone_kwargs=None, patch_size=14, initializer_range=0.02, reassemble_hidden_size=384, @@ -111,34 +91,20 @@ def __init__( max_depth=None, **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `Dinov2` backbone.") - backbone_config = CONFIG_MAPPING["dinov2"]( - image_size=518, - hidden_size=384, - num_attention_heads=6, - out_indices=[9, 10, 11, 12], - apply_layernorm=True, - reshape_hidden_states=False, - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="dinov2", + default_config_kwargs={ + "image_size": 518, + "hidden_size": 384, + "num_attention_heads": 6, + "out_indices": [9, 10, 11, 12], + "reshape_hidden_states": False, + }, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs self.reassemble_hidden_size = reassemble_hidden_size self.patch_size = patch_size self.initializer_range = initializer_range diff --git a/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py b/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py index 1becbad9f5a7..5461d87c609a 100644 --- a/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +++ b/src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py @@ -20,12 +20,11 @@ import torch import torch.nn as nn -from transformers.utils.generic import torch_int - +from ...backbone_utils import load_backbone from ...modeling_outputs import DepthEstimatorOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring -from ...utils.backbone_utils import load_backbone +from ...utils.generic import torch_int from .configuration_prompt_depth_anything import PromptDepthAnythingConfig diff --git a/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py b/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py index d91d51d2f6b3..06f2cdab687a 100644 --- a/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +++ b/src/transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py @@ -14,8 +14,12 @@ import torch import torch.nn as nn -from transformers.models.depth_anything.configuration_depth_anything import DepthAnythingConfig -from transformers.models.depth_anything.modeling_depth_anything import ( +from ...modeling_outputs import DepthEstimatorOutput +from ...modeling_utils import PreTrainedModel +from ...utils import auto_docstring +from ...utils.generic import torch_int +from ..depth_anything.configuration_depth_anything import DepthAnythingConfig +from ..depth_anything.modeling_depth_anything import ( DepthAnythingDepthEstimationHead, DepthAnythingFeatureFusionLayer, DepthAnythingFeatureFusionStage, @@ -23,11 +27,6 @@ DepthAnythingNeck, DepthAnythingReassembleStage, ) -from transformers.utils.generic import torch_int - -from ...modeling_outputs import DepthEstimatorOutput -from ...modeling_utils import PreTrainedModel -from ...utils import auto_docstring class PromptDepthAnythingConfig(DepthAnythingConfig): diff --git a/src/transformers/models/pvt_v2/configuration_pvt_v2.py b/src/transformers/models/pvt_v2/configuration_pvt_v2.py index a724b9877df8..a87567fbdb09 100644 --- a/src/transformers/models/pvt_v2/configuration_pvt_v2.py +++ b/src/transformers/models/pvt_v2/configuration_pvt_v2.py @@ -17,9 +17,9 @@ from collections.abc import Callable +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -147,9 +147,7 @@ def __init__( self.qkv_bias = qkv_bias self.linear_attention = linear_attention self.stage_names = [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["PvtV2Config"] diff --git a/src/transformers/models/pvt_v2/modeling_pvt_v2.py b/src/transformers/models/pvt_v2/modeling_pvt_v2.py index 9dbf10d4b719..39a2d6cc3bc2 100644 --- a/src/transformers/models/pvt_v2/modeling_pvt_v2.py +++ b/src/transformers/models/pvt_v2/modeling_pvt_v2.py @@ -22,11 +22,11 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutput, ImageClassifierOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from .configuration_pvt_v2 import PvtV2Config @@ -510,10 +510,9 @@ def forward( PVTv2 backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class PvtV2Backbone(PvtV2Model, BackboneMixin): +class PvtV2Backbone(BackboneMixin, PvtV2Model): def __init__(self, config: PvtV2Config): super().__init__(config) - super()._init_backbone(config) self.num_features = config.hidden_sizes @auto_docstring diff --git a/src/transformers/models/resnet/configuration_resnet.py b/src/transformers/models/resnet/configuration_resnet.py index 00512cec1251..eddd37998e6e 100644 --- a/src/transformers/models/resnet/configuration_resnet.py +++ b/src/transformers/models/resnet/configuration_resnet.py @@ -13,9 +13,9 @@ # limitations under the License. """ResNet model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -105,9 +105,7 @@ def __init__( self.downsample_in_first_stage = downsample_in_first_stage self.downsample_in_bottleneck = downsample_in_bottleneck self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["ResNetConfig"] diff --git a/src/transformers/models/resnet/modeling_resnet.py b/src/transformers/models/resnet/modeling_resnet.py index 1cf9e9ca5f2e..92a264014c6c 100644 --- a/src/transformers/models/resnet/modeling_resnet.py +++ b/src/transformers/models/resnet/modeling_resnet.py @@ -20,6 +20,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, @@ -28,7 +29,6 @@ ) from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig @@ -372,12 +372,11 @@ def forward( ResNet backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class ResNetBackbone(ResNetPreTrainedModel, BackboneMixin): +class ResNetBackbone(BackboneMixin, ResNetPreTrainedModel): has_attentions = False def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.embedding_size] + config.hidden_sizes self.embedder = ResNetEmbeddings(config) diff --git a/src/transformers/models/rt_detr/configuration_rt_detr.py b/src/transformers/models/rt_detr/configuration_rt_detr.py index 91db735c1d75..ff1f1fd13116 100644 --- a/src/transformers/models/rt_detr/configuration_rt_detr.py +++ b/src/transformers/models/rt_detr/configuration_rt_detr.py @@ -13,11 +13,10 @@ # limitations under the License. """RT-DETR model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig -from .configuration_rt_detr_resnet import RTDetrResNetConfig +from ..auto import AutoConfig logger = logging.get_logger(__name__) @@ -45,20 +44,8 @@ class RTDetrConfig(PreTrainedConfig): The epsilon used by the batch normalization layers. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `RTDetrResNetConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`): Whether to freeze the batch normalization layers in the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. encoder_hidden_dim (`int`, *optional*, defaults to 256): Dimension of the layers in hybrid encoder. encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`): @@ -189,11 +176,7 @@ def __init__( batch_norm_eps=1e-5, # backbone backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, freeze_backbone_batch_norms=True, - backbone_kwargs=None, # encoder HybridEncoder encoder_hidden_dim=256, encoder_in_channels=[512, 1024, 2048], @@ -249,42 +232,18 @@ def __init__( self.initializer_bias_prior_prob = initializer_bias_prior_prob self.layer_norm_eps = layer_norm_eps self.batch_norm_eps = batch_norm_eps - # backbone - if backbone_config is None and backbone is None: - logger.info( - "`backbone_config` and `backbone` are `None`. Initializing the config with the default `RTDetr-ResNet` backbone." - ) - backbone_config = RTDetrResNetConfig( - num_channels=3, - embedding_size=64, - hidden_sizes=[256, 512, 1024, 2048], - depths=[3, 4, 6, 3], - layer_type="bottleneck", - hidden_act="relu", - downsample_in_first_stage=False, - downsample_in_bottleneck=False, - out_features=None, - out_indices=[2, 3, 4], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="rt_detr_resnet", + default_config_kwargs={ + "out_indices": [2, 3, 4], + }, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone self.freeze_backbone_batch_norms = freeze_backbone_batch_norms - self.backbone_kwargs = backbone_kwargs # encoder self.encoder_hidden_dim = encoder_hidden_dim self.encoder_in_channels = encoder_in_channels diff --git a/src/transformers/models/rt_detr/configuration_rt_detr_resnet.py b/src/transformers/models/rt_detr/configuration_rt_detr_resnet.py index d3d56001bd7e..d647dde8c3ac 100644 --- a/src/transformers/models/rt_detr/configuration_rt_detr_resnet.py +++ b/src/transformers/models/rt_detr/configuration_rt_detr_resnet.py @@ -13,9 +13,9 @@ # limitations under the License. """RT-DETR ResNet model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -105,9 +105,7 @@ def __init__( self.downsample_in_first_stage = downsample_in_first_stage self.downsample_in_bottleneck = downsample_in_bottleneck self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["RTDetrResNetConfig"] diff --git a/src/transformers/models/rt_detr/modeling_rt_detr.py b/src/transformers/models/rt_detr/modeling_rt_detr.py index 036a359b7021..4cec51911eb9 100644 --- a/src/transformers/models/rt_detr/modeling_rt_detr.py +++ b/src/transformers/models/rt_detr/modeling_rt_detr.py @@ -28,20 +28,14 @@ from ... import initialization as init from ...activations import ACT2CLS, ACT2FN +from ...backbone_utils import load_backbone from ...image_transforms import center_to_corners_format, corners_to_center_format from ...integrations import use_kernel_forward_from_hub from ...modeling_outputs import BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache -from ...utils import ( - ModelOutput, - TransformersKwargs, - auto_docstring, - torch_compilable_check, - torch_int, -) -from ...utils.backbone_utils import load_backbone +from ...utils import ModelOutput, TransformersKwargs, auto_docstring, torch_compilable_check, torch_int from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_rt_detr import RTDetrConfig diff --git a/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py b/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py index f21bcfbbc76b..a79afaf88b1e 100644 --- a/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py +++ b/src/transformers/models/rt_detr/modeling_rt_detr_resnet.py @@ -23,10 +23,10 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_outputs import BackboneOutput, BaseModelOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from .configuration_rt_detr_resnet import RTDetrResNetConfig @@ -329,12 +329,11 @@ def _init_weights(self, module): ResNet backbone, to be used with frameworks like RTDETR. """ ) -class RTDetrResNetBackbone(RTDetrResNetPreTrainedModel, BackboneMixin): +class RTDetrResNetBackbone(BackboneMixin, RTDetrResNetPreTrainedModel): has_attentions = False def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.embedding_size] + config.hidden_sizes self.embedder = RTDetrResNetEmbeddings(config) diff --git a/src/transformers/models/rt_detr/modular_rt_detr.py b/src/transformers/models/rt_detr/modular_rt_detr.py index 076467858187..286f6254db65 100644 --- a/src/transformers/models/rt_detr/modular_rt_detr.py +++ b/src/transformers/models/rt_detr/modular_rt_detr.py @@ -23,6 +23,7 @@ from ... import initialization as init from ...activations import ACT2CLS, ACT2FN +from ...backbone_utils import load_backbone from ...image_processing_utils import BatchFeature from ...image_processing_utils_fast import BaseImageProcessorFast, SizeDict, get_max_height_width from ...image_transforms import center_to_corners_format, corners_to_center_format @@ -49,7 +50,6 @@ requires_backends, torch_int, ) -from ...utils.backbone_utils import load_backbone from ...utils.generic import can_return_tuple, check_model_inputs from ..conditional_detr.modeling_conditional_detr import inverse_sigmoid from ..deformable_detr.modeling_deformable_detr import DeformableDetrMultiscaleDeformableAttention diff --git a/src/transformers/models/rt_detr_v2/configuration_rt_detr_v2.py b/src/transformers/models/rt_detr_v2/configuration_rt_detr_v2.py index 578b5f0de480..28777c58883e 100644 --- a/src/transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +++ b/src/transformers/models/rt_detr_v2/configuration_rt_detr_v2.py @@ -17,13 +17,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig -from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig - - -logger = logging.get_logger(__name__) +from ..auto import AutoConfig class RTDetrV2Config(PreTrainedConfig): @@ -49,20 +45,8 @@ class RTDetrV2Config(PreTrainedConfig): The epsilon used by the batch normalization layers. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `RTDetrV2ResNetConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`): Whether to freeze the batch normalization layers in the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. encoder_hidden_dim (`int`, *optional*, defaults to 256): Dimension of the layers in hybrid encoder. encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`): @@ -200,11 +184,7 @@ def __init__( batch_norm_eps=1e-5, # backbone backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, freeze_backbone_batch_norms=True, - backbone_kwargs=None, # encoder HybridEncoder encoder_hidden_dim=256, encoder_in_channels=[512, 1024, 2048], @@ -263,47 +243,18 @@ def __init__( self.initializer_bias_prior_prob = initializer_bias_prior_prob self.layer_norm_eps = layer_norm_eps self.batch_norm_eps = batch_norm_eps - # backbone - if backbone_config is None and backbone is None: - logger.info( - "`backbone_config` and `backbone` are `None`. Initializing the config with the default `RTDetrV2-ResNet` backbone." - ) - backbone_model_type = "rt_detr_resnet" - config_class = CONFIG_MAPPING[backbone_model_type] - # this will map it to RTDetrResNetConfig - # note: we can instead create RTDetrV2ResNetConfig but it will be exactly the same as V1 - # and we would need to create RTDetrV2ResNetModel - backbone_config = config_class( - num_channels=3, - embedding_size=64, - hidden_sizes=[256, 512, 1024, 2048], - depths=[3, 4, 6, 3], - layer_type="bottleneck", - hidden_act="relu", - downsample_in_first_stage=False, - downsample_in_bottleneck=False, - out_features=None, - out_indices=[2, 3, 4], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="rt_detr_resnet", + default_config_kwargs={ + "out_indices": [2, 3, 4], + }, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone self.freeze_backbone_batch_norms = freeze_backbone_batch_norms - self.backbone_kwargs = backbone_kwargs # encoder self.encoder_hidden_dim = encoder_hidden_dim self.encoder_in_channels = encoder_in_channels diff --git a/src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py b/src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py index 428532a96d8d..5a16ac55156f 100644 --- a/src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +++ b/src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py @@ -29,13 +29,13 @@ from ... import initialization as init from ...activations import ACT2CLS, ACT2FN +from ...backbone_utils import load_backbone from ...image_transforms import center_to_corners_format, corners_to_center_format from ...modeling_outputs import BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...pytorch_utils import compile_compatible_method_lru_cache from ...utils import ModelOutput, TransformersKwargs, auto_docstring, torch_compilable_check, torch_int -from ...utils.backbone_utils import load_backbone from ...utils.generic import can_return_tuple, check_model_inputs from .configuration_rt_detr_v2 import RTDetrV2Config diff --git a/src/transformers/models/rt_detr_v2/modular_rt_detr_v2.py b/src/transformers/models/rt_detr_v2/modular_rt_detr_v2.py index 4341f0d44241..1272d97d029f 100644 --- a/src/transformers/models/rt_detr_v2/modular_rt_detr_v2.py +++ b/src/transformers/models/rt_detr_v2/modular_rt_detr_v2.py @@ -19,17 +19,11 @@ from torch import Tensor from ... import initialization as init +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...processing_utils import Unpack -from ...utils import ( - TransformersKwargs, - logging, - torch_compilable_check, -) -from ...utils.backbone_utils import ( - verify_backbone_config_arguments, -) -from ..auto import CONFIG_MAPPING, AutoConfig +from ...utils import TransformersKwargs, logging, torch_compilable_check +from ..auto import AutoConfig from ..rt_detr.modeling_rt_detr import ( RTDetrDecoder, RTDetrDecoderLayer, @@ -66,20 +60,8 @@ class RTDetrV2Config(PreTrainedConfig): The epsilon used by the batch normalization layers. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `RTDetrV2ResNetConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`): Whether to freeze the batch normalization layers in the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. encoder_hidden_dim (`int`, *optional*, defaults to 256): Dimension of the layers in hybrid encoder. encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`): @@ -217,11 +199,7 @@ def __init__( batch_norm_eps=1e-5, # backbone backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, freeze_backbone_batch_norms=True, - backbone_kwargs=None, # encoder HybridEncoder encoder_hidden_dim=256, encoder_in_channels=[512, 1024, 2048], @@ -280,47 +258,18 @@ def __init__( self.initializer_bias_prior_prob = initializer_bias_prior_prob self.layer_norm_eps = layer_norm_eps self.batch_norm_eps = batch_norm_eps - # backbone - if backbone_config is None and backbone is None: - logger.info( - "`backbone_config` and `backbone` are `None`. Initializing the config with the default `RTDetrV2-ResNet` backbone." - ) - backbone_model_type = "rt_detr_resnet" - config_class = CONFIG_MAPPING[backbone_model_type] - # this will map it to RTDetrResNetConfig - # note: we can instead create RTDetrV2ResNetConfig but it will be exactly the same as V1 - # and we would need to create RTDetrV2ResNetModel - backbone_config = config_class( - num_channels=3, - embedding_size=64, - hidden_sizes=[256, 512, 1024, 2048], - depths=[3, 4, 6, 3], - layer_type="bottleneck", - hidden_act="relu", - downsample_in_first_stage=False, - downsample_in_bottleneck=False, - out_features=None, - out_indices=[2, 3, 4], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.pop("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="rt_detr_resnet", + default_config_kwargs={ + "out_indices": [2, 3, 4], + }, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone self.freeze_backbone_batch_norms = freeze_backbone_batch_norms - self.backbone_kwargs = backbone_kwargs # encoder self.encoder_hidden_dim = encoder_hidden_dim self.encoder_in_channels = encoder_in_channels diff --git a/src/transformers/models/swin/configuration_swin.py b/src/transformers/models/swin/configuration_swin.py index c69896c04f77..59cc16d63ee2 100644 --- a/src/transformers/models/swin/configuration_swin.py +++ b/src/transformers/models/swin/configuration_swin.py @@ -13,9 +13,9 @@ # limitations under the License. """Swin Transformer model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -148,9 +148,7 @@ def __init__( # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["SwinConfig"] diff --git a/src/transformers/models/swin/modeling_swin.py b/src/transformers/models/swin/modeling_swin.py index f8961f0426bc..9cc9ea4ae8f6 100644 --- a/src/transformers/models/swin/modeling_swin.py +++ b/src/transformers/models/swin/modeling_swin.py @@ -22,12 +22,12 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import ModelOutput, auto_docstring, logging, torch_int -from ...utils.backbone_utils import BackboneMixin from .configuration_swin import SwinConfig @@ -1099,10 +1099,9 @@ def forward( Swin backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class SwinBackbone(SwinPreTrainedModel, BackboneMixin): +class SwinBackbone(BackboneMixin, SwinPreTrainedModel): def __init__(self, config: SwinConfig): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] self.embeddings = SwinEmbeddings(config) @@ -1110,7 +1109,7 @@ def __init__(self, config: SwinConfig): # Add layer norms to hidden states of out_features hidden_states_norms = {} - for stage, num_channels in zip(self._out_features, self.channels): + for stage, num_channels in zip(self.out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) diff --git a/src/transformers/models/swinv2/configuration_swinv2.py b/src/transformers/models/swinv2/configuration_swinv2.py index 87f6334a8b35..5d2c01db4902 100644 --- a/src/transformers/models/swinv2/configuration_swinv2.py +++ b/src/transformers/models/swinv2/configuration_swinv2.py @@ -13,9 +13,9 @@ # limitations under the License. """Swinv2 Transformer model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -147,9 +147,7 @@ def __init__( self.initializer_range = initializer_range self.encoder_stride = encoder_stride self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) diff --git a/src/transformers/models/swinv2/modeling_swinv2.py b/src/transformers/models/swinv2/modeling_swinv2.py index ae7558d3e0e4..5b9473608736 100644 --- a/src/transformers/models/swinv2/modeling_swinv2.py +++ b/src/transformers/models/swinv2/modeling_swinv2.py @@ -22,12 +22,12 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import ModelOutput, auto_docstring, logging, torch_int -from ...utils.backbone_utils import BackboneMixin from .configuration_swinv2 import Swinv2Config @@ -1188,10 +1188,9 @@ def forward( Swinv2 backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class Swinv2Backbone(Swinv2PreTrainedModel, BackboneMixin): +class Swinv2Backbone(BackboneMixin, Swinv2PreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] self.embeddings = Swinv2Embeddings(config) diff --git a/src/transformers/models/table_transformer/configuration_table_transformer.py b/src/transformers/models/table_transformer/configuration_table_transformer.py index 6ef9f01c5ac4..f8c3b2e79320 100644 --- a/src/transformers/models/table_transformer/configuration_table_transformer.py +++ b/src/transformers/models/table_transformer/configuration_table_transformer.py @@ -13,10 +13,10 @@ # limitations under the License. """Table Transformer model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig +from ..auto import AutoConfig logger = logging.get_logger(__name__) @@ -33,9 +33,6 @@ class TableTransformerConfig(PreTrainedConfig): documentation from [`PreTrainedConfig`] for more information. Args: - use_timm_backbone (`bool`, *optional*, defaults to `True`): - Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`] - API. backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which case it will default to `ResNetConfig()`. @@ -81,15 +78,6 @@ class TableTransformerConfig(PreTrainedConfig): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. position_embedding_type (`str`, *optional*, defaults to `"sine"`): Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, `True`): - Whether to use pretrained weights for the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. dilation (`bool`, *optional*, defaults to `False`): Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`. @@ -136,7 +124,6 @@ class TableTransformerConfig(PreTrainedConfig): # Copied from transformers.models.detr.configuration_detr.DetrConfig.__init__ def __init__( self, - use_timm_backbone=True, backbone_config=None, num_channels=3, num_queries=100, @@ -158,9 +145,6 @@ def __init__( init_xavier_std=1.0, auxiliary_loss=False, position_embedding_type="sine", - backbone="resnet50", - use_pretrained_backbone=True, - backbone_kwargs=None, dilation=False, class_cost=1, bbox_cost=5, @@ -172,36 +156,25 @@ def __init__( eos_coefficient=0.1, **kwargs, ): - # We default to values which were previously hard-coded in the model. This enables configurability of the config - # while keeping the default behavior the same. - if use_timm_backbone and backbone_kwargs is None: - backbone_kwargs = {} - if dilation: - backbone_kwargs["output_stride"] = 16 - backbone_kwargs["out_indices"] = [1, 2, 3, 4] - backbone_kwargs["in_chans"] = num_channels - # Backwards compatibility - elif not use_timm_backbone and backbone in (None, "resnet50"): - if backbone_config is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") - backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - backbone = None - # set timm attributes to None - dilation = None - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_kwargs = kwargs.get("backbone_kwargs", {}) + timm_default_kwargs = { + "num_channels": backbone_kwargs.get("num_channels", num_channels), + "features_only": True, + "use_pretrained_backbone": False, + "out_indices": backbone_kwargs.get("out_indices", [1, 2, 3, 4]), + } + if dilation: + timm_default_kwargs["output_stride"] = backbone_kwargs.get("output_stride", 16) + + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_backbone="resnet50", + default_config_type="resnet", + default_config_kwargs={"out_features": ["stage4"]}, + timm_default_kwargs=timm_default_kwargs, + **kwargs, ) - self.use_timm_backbone = use_timm_backbone self.backbone_config = backbone_config self.num_channels = num_channels self.num_queries = num_queries @@ -223,10 +196,6 @@ def __init__( self.num_hidden_layers = encoder_layers self.auxiliary_loss = auxiliary_loss self.position_embedding_type = position_embedding_type - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.backbone_kwargs = backbone_kwargs - self.dilation = dilation # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost diff --git a/src/transformers/models/table_transformer/modeling_table_transformer.py b/src/transformers/models/table_transformer/modeling_table_transformer.py index fd331ae616cb..725f109555b5 100644 --- a/src/transformers/models/table_transformer/modeling_table_transformer.py +++ b/src/transformers/models/table_transformer/modeling_table_transformer.py @@ -21,6 +21,7 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput @@ -28,18 +29,11 @@ from ...utils import ( ModelOutput, auto_docstring, - is_timm_available, logging, - requires_backends, ) -from ...utils.backbone_utils import load_backbone from .configuration_table_transformer import TableTransformerConfig -if is_timm_available(): - from timm import create_model - - logger = logging.get_logger(__name__) @@ -210,47 +204,25 @@ def __init__(self, config): self.config = config - # For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API - if config.use_timm_backbone: - # We default to values which were previously hard-coded. This enables configurability from the config - # using backbone arguments, while keeping the default behavior the same. - requires_backends(self, ["timm"]) - kwargs = getattr(config, "backbone_kwargs", {}) - kwargs = {} if kwargs is None else kwargs.copy() - out_indices = kwargs.pop("out_indices", (1, 2, 3, 4)) - num_channels = kwargs.pop("in_chans", config.num_channels) - if config.dilation: - kwargs["output_stride"] = kwargs.get("output_stride", 16) - backbone = create_model( - config.backbone, - pretrained=config.use_pretrained_backbone, - features_only=True, - out_indices=out_indices, - in_chans=num_channels, - **kwargs, - ) - else: - backbone = load_backbone(config) + backbone = load_backbone(config) + self.intermediate_channel_sizes = backbone.channels # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) - self.model = backbone - self.intermediate_channel_sizes = ( - self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels - ) - backbone_model_type = None - if config.backbone is not None: - backbone_model_type = config.backbone - elif config.backbone_config is not None: - backbone_model_type = config.backbone_config.model_type - else: - raise ValueError("Either `backbone` or `backbone_config` should be provided in the config") + # We used to load with timm library directly instead of the AutoBackbone API + # so we need to unwrap the `backbone._backbone` module to load weights without mismatch + is_timm_model = False + if hasattr(backbone, "_backbone"): + backbone = backbone._backbone + is_timm_model = True + self.model = backbone + backbone_model_type = config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): - if config.use_timm_backbone: + if is_timm_model: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: @@ -259,7 +231,9 @@ def __init__(self, config): def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps - features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps + features = self.model(pixel_values) + if isinstance(features, dict): + features = features.feature_maps out = [] for feature_map in features: diff --git a/src/transformers/models/textnet/configuration_textnet.py b/src/transformers/models/textnet/configuration_textnet.py index accd0bf77586..644b29fee964 100644 --- a/src/transformers/models/textnet/configuration_textnet.py +++ b/src/transformers/models/textnet/configuration_textnet.py @@ -13,9 +13,9 @@ # limitations under the License. """TextNet model configuration""" -from transformers import PreTrainedConfig -from transformers.utils import logging -from transformers.utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices +from ...backbone_utils import BackboneConfigMixin +from ...configuration_utils import PreTrainedConfig +from ...utils import logging logger = logging.get_logger(__name__) @@ -126,9 +126,7 @@ def __init__( self.depths = [len(layer) for layer in self.conv_layer_kernel_sizes] self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, 5)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["TextNetConfig"] diff --git a/src/transformers/models/textnet/modeling_textnet.py b/src/transformers/models/textnet/modeling_textnet.py index f5801d096ede..eb39f0e32c02 100644 --- a/src/transformers/models/textnet/modeling_textnet.py +++ b/src/transformers/models/textnet/modeling_textnet.py @@ -19,19 +19,17 @@ import torch.nn as nn from torch import Tensor -from transformers import PreTrainedModel -from transformers.activations import ACT2CLS -from transformers.modeling_outputs import ( +from ...activations import ACT2CLS +from ...backbone_utils import BackboneMixin +from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) -from transformers.models.textnet.configuration_textnet import TextNetConfig -from transformers.utils import logging -from transformers.utils.backbone_utils import BackboneMixin - -from ...utils import auto_docstring +from ...modeling_utils import PreTrainedModel +from ...utils import auto_docstring, logging +from .configuration_textnet import TextNetConfig logger = logging.get_logger(__name__) @@ -344,12 +342,11 @@ def forward( TextNet backbone, to be used with frameworks like DETR and MaskFormer. """ ) -class TextNetBackbone(TextNetPreTrainedModel, BackboneMixin): +class TextNetBackbone(BackboneMixin, TextNetPreTrainedModel): has_attentions = False def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.textnet = TextNetModel(config) self.num_features = config.hidden_sizes diff --git a/src/transformers/models/timm_backbone/configuration_timm_backbone.py b/src/transformers/models/timm_backbone/configuration_timm_backbone.py index 3d139651aee7..199024993b8b 100644 --- a/src/transformers/models/timm_backbone/configuration_timm_backbone.py +++ b/src/transformers/models/timm_backbone/configuration_timm_backbone.py @@ -14,6 +14,7 @@ """Configuration for Backbone models""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging @@ -21,7 +22,7 @@ logger = logging.get_logger(__name__) -class TimmBackboneConfig(PreTrainedConfig): +class TimmBackboneConfig(BackboneConfigMixin, PreTrainedConfig): r""" This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`]. @@ -37,8 +38,6 @@ class TimmBackboneConfig(PreTrainedConfig): The number of input channels. features_only (`bool`, *optional*, defaults to `True`): Whether to output only the features or also the logits. - use_pretrained_backbone (`bool`, *optional*, defaults to `True`): - Whether to use a pretrained backbone. out_indices (`list[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). Will default to the last stage if unset. @@ -67,19 +66,46 @@ def __init__( backbone=None, num_channels=3, features_only=True, - use_pretrained_backbone=True, out_indices=None, freeze_batch_norm_2d=False, + output_stride=None, **kwargs, ): - super().__init__(**kwargs) self.backbone = backbone self.num_channels = num_channels self.features_only = features_only - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = True self.out_indices = out_indices if out_indices is not None else [-1] + self.output_stride = output_stride self.freeze_batch_norm_2d = freeze_batch_norm_2d + # self._out_features = kwargs.pop("out_features", None) + super().__init__(**kwargs) + + @property + def out_indices(self): + return self._out_indices + + @out_indices.setter + def out_indices(self, out_indices: tuple[int, ...] | list[int]): + """ + Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. + """ + self._out_indices = list(out_indices) if out_indices is not None else out_indices + if getattr(self, "stage_names", None) is not None: + self.set_output_features_output_indices(out_features=None, out_indices=out_indices) + + @property + def out_features(self): + return self._out_features + + @out_features.setter + def out_features(self, out_features: list[str]): + """ + Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. + """ + self._out_features = out_features + if getattr(self, "stage_names", None) is not None: + self.set_output_features_output_indices(out_features=out_features, out_indices=None) + __all__ = ["TimmBackboneConfig"] diff --git a/src/transformers/models/timm_backbone/modeling_timm_backbone.py b/src/transformers/models/timm_backbone/modeling_timm_backbone.py index 63309276fc41..93cfce7edcd3 100644 --- a/src/transformers/models/timm_backbone/modeling_timm_backbone.py +++ b/src/transformers/models/timm_backbone/modeling_timm_backbone.py @@ -17,10 +17,10 @@ from torch import Tensor, nn from ... import initialization as init +from ...backbone_utils import BackboneMixin from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, requires_backends -from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig @@ -28,7 +28,7 @@ import timm -class TimmBackbone(PreTrainedModel, BackboneMixin): +class TimmBackbone(BackboneMixin, PreTrainedModel): """ Wrapper class for timm models to be used as backbones. This enables using the timm models interchangeably with the other models in the library keeping the same API. @@ -41,8 +41,6 @@ class TimmBackbone(PreTrainedModel, BackboneMixin): def __init__(self, config, **kwargs): requires_backends(self, "timm") - super().__init__(config) - self.config = config if config.backbone is None: raise ValueError("backbone is not set in the config. Please set it to a timm model name.") @@ -50,25 +48,29 @@ def __init__(self, config, **kwargs): if hasattr(config, "out_features") and config.out_features is not None: raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.") - pretrained = getattr(config, "use_pretrained_backbone", None) - if pretrained is None: - raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.") - # We just take the final layer by default. This matches the default for the transformers models. out_indices = config.out_indices if getattr(config, "out_indices", None) is not None else (-1,) - + pretrained = kwargs.pop("pretrained", False) in_chans = kwargs.pop("in_chans", config.num_channels) - self._backbone = timm.create_model( + + backbone = timm.create_model( config.backbone, pretrained=pretrained, # This is currently not possible for transformer architectures. features_only=config.features_only, in_chans=in_chans, out_indices=out_indices, + output_stride=config.output_stride, **kwargs, ) - # Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers of provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively + # Needs to be called after creating timm model, because `super()` will try to infer + # `stage_names` from model architecture + super().__init__(config, timm_backbone=backbone) + self._backbone = backbone + + # Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers of + # provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively if getattr(config, "freeze_batch_norm_2d", False): self.freeze_batch_norm_2d() @@ -78,7 +80,6 @@ def __init__(self, config, **kwargs): layer["module"]: str(layer["index"]) for layer in self._backbone.feature_info.get_dicts() } self._all_layers = {layer["module"]: str(i) for i, layer in enumerate(self._backbone.feature_info.info)} - super()._init_backbone(config) self.post_init() @@ -87,23 +88,16 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): requires_backends(cls, ["vision", "timm"]) config = kwargs.pop("config", TimmBackboneConfig()) - - use_timm = kwargs.pop("use_timm_backbone", True) - if not use_timm: - raise ValueError("use_timm_backbone must be True for timm backbones") - num_channels = kwargs.pop("num_channels", config.num_channels) features_only = kwargs.pop("features_only", config.features_only) - use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone) out_indices = kwargs.pop("out_indices", config.out_indices) config = TimmBackboneConfig( backbone=pretrained_model_name_or_path, num_channels=num_channels, features_only=features_only, - use_pretrained_backbone=use_pretrained_backbone, out_indices=out_indices, ) - return super()._from_config(config, **kwargs) + return super()._from_config(config, pretrained=True, **kwargs) def freeze_batch_norm_2d(self): timm.utils.model.freeze_batch_norm_2d(self._backbone) @@ -118,10 +112,6 @@ def _init_weights(self, module): if hasattr(module, "init_non_persistent_buffers"): module.init_non_persistent_buffers() elif isinstance(module, nn.BatchNorm2d): - # Skip initialization if using pretrained backbone - buffers are already loaded from checkpoint - if self.config.use_pretrained_backbone: - return - # For non-pretrained models, always initialize buffers (handles both meta device and to_empty() cases) running_mean = getattr(module, "running_mean", None) if running_mean is not None: diff --git a/src/transformers/models/tvp/configuration_tvp.py b/src/transformers/models/tvp/configuration_tvp.py index 66c01d9efdc3..f005b58bc007 100644 --- a/src/transformers/models/tvp/configuration_tvp.py +++ b/src/transformers/models/tvp/configuration_tvp.py @@ -13,10 +13,10 @@ # limitations under the License. """TVP model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto import CONFIG_MAPPING, AutoConfig +from ..auto import AutoConfig logger = logging.get_logger(__name__) @@ -36,18 +36,6 @@ class TvpConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. distance_loss_weight (`float`, *optional*, defaults to 1.0): The weight of distance loss. duration_loss_weight (`float`, *optional*, defaults to 0.1): @@ -105,10 +93,6 @@ class TvpConfig(PreTrainedConfig): def __init__( self, backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, - backbone_kwargs=None, distance_loss_weight=1.0, duration_loss_weight=0.1, visual_prompter_type="framepad", @@ -133,27 +117,14 @@ def __init__( pad_token_id=None, **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") - backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"]) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="resnet", + default_config_kwargs={"out_features": ["stage4"]}, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs self.distance_loss_weight = distance_loss_weight self.duration_loss_weight = duration_loss_weight self.visual_prompter_type = visual_prompter_type diff --git a/src/transformers/models/tvp/modeling_tvp.py b/src/transformers/models/tvp/modeling_tvp.py index 5f94c40c6d64..917556c31fff 100644 --- a/src/transformers/models/tvp/modeling_tvp.py +++ b/src/transformers/models/tvp/modeling_tvp.py @@ -21,11 +21,11 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ModelOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import load_backbone from .configuration_tvp import TvpConfig diff --git a/src/transformers/models/upernet/configuration_upernet.py b/src/transformers/models/upernet/configuration_upernet.py index 1634babd0f3f..bc8d16f37ccb 100644 --- a/src/transformers/models/upernet/configuration_upernet.py +++ b/src/transformers/models/upernet/configuration_upernet.py @@ -13,10 +13,10 @@ # limitations under the License. """UperNet model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto.configuration_auto import CONFIG_MAPPING, AutoConfig +from ..auto.configuration_auto import AutoConfig logger = logging.get_logger(__name__) @@ -35,18 +35,6 @@ class UperNetConfig(PreTrainedConfig): Args: backbone_config (`PreTrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_size (`int`, *optional*, defaults to 512): The number of hidden units in the convolutional layers. initializer_range (`float`, *optional*, defaults to 0.02): @@ -87,10 +75,6 @@ class UperNetConfig(PreTrainedConfig): def __init__( self, backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, - backbone_kwargs=None, hidden_size=512, initializer_range=0.02, pool_scales=[1, 2, 3, 6], @@ -103,27 +87,16 @@ def __init__( loss_ignore_index=255, **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") - backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="resnet", + default_config_kwargs={ + "out_features": ["stage1", "stage2", "stage3", "stage4"], + }, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs self.hidden_size = hidden_size self.initializer_range = initializer_range self.pool_scales = pool_scales diff --git a/src/transformers/models/upernet/modeling_upernet.py b/src/transformers/models/upernet/modeling_upernet.py index 9f7d2bf406d9..bf497134646f 100644 --- a/src/transformers/models/upernet/modeling_upernet.py +++ b/src/transformers/models/upernet/modeling_upernet.py @@ -17,10 +17,10 @@ from torch import nn from torch.nn import CrossEntropyLoss +from ...backbone_utils import load_backbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring -from ...utils.backbone_utils import load_backbone from .configuration_upernet import UperNetConfig diff --git a/src/transformers/models/vitdet/configuration_vitdet.py b/src/transformers/models/vitdet/configuration_vitdet.py index bb62b04eaab4..89e24a14ba01 100644 --- a/src/transformers/models/vitdet/configuration_vitdet.py +++ b/src/transformers/models/vitdet/configuration_vitdet.py @@ -13,9 +13,9 @@ # limitations under the License. """VitDet model configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -147,9 +147,7 @@ def __init__( self.window_size = window_size self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["VitDetConfig"] diff --git a/src/transformers/models/vitdet/modeling_vitdet.py b/src/transformers/models/vitdet/modeling_vitdet.py index 11784f7bd748..7a536251fcba 100644 --- a/src/transformers/models/vitdet/modeling_vitdet.py +++ b/src/transformers/models/vitdet/modeling_vitdet.py @@ -21,11 +21,11 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutput from ...modeling_utils import PreTrainedModel from ...utils import auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from .configuration_vitdet import VitDetConfig @@ -683,10 +683,9 @@ def forward( ViTDet backbone, to be used with frameworks like Mask R-CNN. """ ) -class VitDetBackbone(VitDetPreTrainedModel, BackboneMixin): +class VitDetBackbone(BackboneMixin, VitDetPreTrainedModel): def __init__(self, config): super().__init__(config) - super()._init_backbone(config) self.embeddings = VitDetEmbeddings(config) self.encoder = VitDetEncoder(config) diff --git a/src/transformers/models/vitmatte/configuration_vitmatte.py b/src/transformers/models/vitmatte/configuration_vitmatte.py index 648a8eb731ee..e4b791a1d424 100644 --- a/src/transformers/models/vitmatte/configuration_vitmatte.py +++ b/src/transformers/models/vitmatte/configuration_vitmatte.py @@ -13,10 +13,10 @@ # limitations under the License. """VitMatte model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto.configuration_auto import CONFIG_MAPPING, AutoConfig +from ..auto.configuration_auto import AutoConfig logger = logging.get_logger(__name__) @@ -35,18 +35,6 @@ class VitMatteConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `VitDetConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_size (`int`, *optional*, defaults to 384): The number of input channels of the decoder. batch_norm_eps (`float`, *optional*, defaults to 1e-05): @@ -79,10 +67,6 @@ class VitMatteConfig(PreTrainedConfig): def __init__( self, backbone_config: PreTrainedConfig | None = None, - backbone=None, - use_pretrained_backbone=False, - use_timm_backbone=False, - backbone_kwargs=None, hidden_size: int = 384, batch_norm_eps: float = 1e-5, initializer_range: float = 0.02, @@ -90,27 +74,14 @@ def __init__( fusion_hidden_sizes: list[int] = [256, 128, 64, 32], **kwargs, ): - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `VitDet` backbone.") - backbone_config = CONFIG_MAPPING["vitdet"](out_features=["stage4"]) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="vitdet", + default_config_kwargs={"out_features": ["stage4"]}, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs self.batch_norm_eps = batch_norm_eps self.hidden_size = hidden_size self.initializer_range = initializer_range diff --git a/src/transformers/models/vitmatte/modeling_vitmatte.py b/src/transformers/models/vitmatte/modeling_vitmatte.py index 53b361bd9075..658d90e8aa85 100644 --- a/src/transformers/models/vitmatte/modeling_vitmatte.py +++ b/src/transformers/models/vitmatte/modeling_vitmatte.py @@ -19,9 +19,9 @@ from torch import nn from ... import initialization as init +from ...backbone_utils import load_backbone from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring -from ...utils.backbone_utils import load_backbone from .configuration_vitmatte import VitMatteConfig diff --git a/src/transformers/models/vitpose/configuration_vitpose.py b/src/transformers/models/vitpose/configuration_vitpose.py index 5b6a0c9acf47..8bcbd29d8343 100644 --- a/src/transformers/models/vitpose/configuration_vitpose.py +++ b/src/transformers/models/vitpose/configuration_vitpose.py @@ -13,10 +13,10 @@ # limitations under the License. """VitPose model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import verify_backbone_config_arguments -from ..auto.configuration_auto import CONFIG_MAPPING, AutoConfig +from ..auto.configuration_auto import AutoConfig logger = logging.get_logger(__name__) @@ -35,18 +35,6 @@ class VitPoseConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `VitPoseBackboneConfig()`): The configuration of the backbone model. Currently, only `backbone_config` with `vitpose_backbone` as `model_type` is supported. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - use_timm_backbone (`bool`, *optional*, defaults to `False`): - Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers - library. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_factor (`int`, *optional*, defaults to 4): @@ -76,44 +64,19 @@ class VitPoseConfig(PreTrainedConfig): def __init__( self, backbone_config: PreTrainedConfig | None = None, - backbone: str | None = None, - use_pretrained_backbone: bool = False, - use_timm_backbone: bool = False, - backbone_kwargs: dict | None = None, initializer_range: float = 0.02, scale_factor: int = 4, use_simple_decoder: bool = True, **kwargs, ): - if use_pretrained_backbone: - logger.info( - "`use_pretrained_backbone` is `True`. For the pure inference purpose of VitPose weight do not set this value." - ) - if use_timm_backbone: - raise ValueError("use_timm_backbone set `True` is not supported at the moment.") - - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `VitPose` backbone.") - backbone_config = CONFIG_MAPPING["vitpose_backbone"](out_indices=[4]) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - verify_backbone_config_arguments( - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - backbone=backbone, + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( backbone_config=backbone_config, - backbone_kwargs=backbone_kwargs, + default_config_type="vitpose_backbone", + default_config_kwargs={"out_indices": [4]}, + **kwargs, ) self.backbone_config = backbone_config - self.backbone = backbone - self.use_pretrained_backbone = use_pretrained_backbone - self.use_timm_backbone = use_timm_backbone - self.backbone_kwargs = backbone_kwargs - self.initializer_range = initializer_range self.scale_factor = scale_factor self.use_simple_decoder = use_simple_decoder diff --git a/src/transformers/models/vitpose/modeling_vitpose.py b/src/transformers/models/vitpose/modeling_vitpose.py index 9bafaac5b86a..3e47f66bd03e 100644 --- a/src/transformers/models/vitpose/modeling_vitpose.py +++ b/src/transformers/models/vitpose/modeling_vitpose.py @@ -19,11 +19,11 @@ from torch import nn from ... import initialization as init +from ...backbone_utils import load_backbone from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...processing_utils import Unpack from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging -from ...utils.backbone_utils import load_backbone from ...utils.generic import can_return_tuple from .configuration_vitpose import VitPoseConfig diff --git a/src/transformers/models/vitpose_backbone/configuration_vitpose_backbone.py b/src/transformers/models/vitpose_backbone/configuration_vitpose_backbone.py index 0254cac59632..11c26a4b2c01 100644 --- a/src/transformers/models/vitpose_backbone/configuration_vitpose_backbone.py +++ b/src/transformers/models/vitpose_backbone/configuration_vitpose_backbone.py @@ -13,9 +13,9 @@ # limitations under the License. """VitPose backbone configuration""" +from ...backbone_utils import BackboneConfigMixin from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) @@ -130,9 +130,7 @@ def __init__( self.num_channels = num_channels self.qkv_bias = qkv_bias self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)] - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) + self.set_output_features_output_indices(out_indices=out_indices, out_features=out_features) __all__ = ["VitPoseBackboneConfig"] diff --git a/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py b/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py index ca341e85c8b2..411391c782fa 100644 --- a/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py +++ b/src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py @@ -26,12 +26,12 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import BackboneMixin from ...modeling_layers import GradientCheckpointingLayer from ...modeling_outputs import BackboneOutput, BaseModelOutput from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from ...processing_utils import Unpack from ...utils import TransformersKwargs, auto_docstring, logging -from ...utils.backbone_utils import BackboneMixin from ...utils.generic import check_model_inputs from .configuration_vitpose_backbone import VitPoseBackboneConfig @@ -375,10 +375,9 @@ def _init_weights(self, module: nn.Linear | nn.Conv2d | nn.LayerNorm | VitPoseBa The VitPose backbone useful for downstream tasks. """ ) -class VitPoseBackbone(VitPoseBackbonePreTrainedModel, BackboneMixin): +class VitPoseBackbone(BackboneMixin, VitPoseBackbonePreTrainedModel): def __init__(self, config: VitPoseBackboneConfig): super().__init__(config) - super()._init_backbone(config) self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)] self.embeddings = VitPoseBackboneEmbeddings(config) diff --git a/src/transformers/models/zoedepth/configuration_zoedepth.py b/src/transformers/models/zoedepth/configuration_zoedepth.py index aa36fb574743..e718b2c483e9 100644 --- a/src/transformers/models/zoedepth/configuration_zoedepth.py +++ b/src/transformers/models/zoedepth/configuration_zoedepth.py @@ -13,9 +13,10 @@ # limitations under the License. """ZoeDepth model configuration""" +from ...backbone_utils import consolidate_backbone_kwargs_to_config from ...configuration_utils import PreTrainedConfig from ...utils import logging -from ..auto.configuration_auto import CONFIG_MAPPING, AutoConfig +from ..auto.configuration_auto import AutoConfig logger = logging.get_logger(__name__) @@ -38,15 +39,6 @@ class ZoeDepthConfig(PreTrainedConfig): Args: backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `BeitConfig()`): The configuration of the backbone model. - backbone (`str`, *optional*): - Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this - will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone` - is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights. - use_pretrained_backbone (`bool`, *optional*, defaults to `False`): - Whether to use pretrained weights for the backbone. - backbone_kwargs (`dict`, *optional*): - Keyword arguments to be passed to AutoBackbone when loading from a checkpoint - e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. @@ -137,9 +129,6 @@ class ZoeDepthConfig(PreTrainedConfig): def __init__( self, backbone_config=None, - backbone=None, - use_pretrained_backbone=False, - backbone_kwargs=None, hidden_act="gelu", initializer_range=0.02, batch_norm_eps=1e-05, @@ -174,36 +163,24 @@ def __init__( if attractor_kind not in ["mean", "sum"]: raise ValueError("Attractor_kind must be one of ['mean', 'sum']") - if use_pretrained_backbone: - raise ValueError("Pretrained backbones are not supported yet.") - - if backbone_config is not None and backbone is not None: - raise ValueError("You can't specify both `backbone` and `backbone_config`.") - - if backbone_config is None and backbone is None: - logger.info("`backbone_config` is `None`. Initializing the config with the default `BEiT` backbone.") - backbone_config = CONFIG_MAPPING["beit"]( - image_size=384, - num_hidden_layers=24, - hidden_size=1024, - intermediate_size=4096, - num_attention_heads=16, - use_relative_position_bias=True, - reshape_hidden_states=False, - out_features=["stage6", "stage12", "stage18", "stage24"], - ) - elif isinstance(backbone_config, dict): - backbone_model_type = backbone_config.get("model_type") - config_class = CONFIG_MAPPING[backbone_model_type] - backbone_config = config_class.from_dict(backbone_config) - - if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: - raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") + backbone_config, kwargs = consolidate_backbone_kwargs_to_config( + backbone_config=backbone_config, + default_config_type="beit", + default_config_kwargs={ + "image_size": 384, + "num_hidden_layers": 24, + "hidden_size": 1024, + "intermediate_size": 4096, + "num_attention_heads": 16, + "use_relative_position_bias": True, + "reshape_hidden_states": False, + "out_features": ["stage6", "stage12", "stage18", "stage24"], + }, + **kwargs, + ) self.backbone_config = backbone_config - self.backbone = backbone self.hidden_act = hidden_act - self.use_pretrained_backbone = use_pretrained_backbone self.initializer_range = initializer_range self.batch_norm_eps = batch_norm_eps self.readout_type = readout_type diff --git a/src/transformers/models/zoedepth/modeling_zoedepth.py b/src/transformers/models/zoedepth/modeling_zoedepth.py index c5e5fe8bd0a9..d385ca4080c2 100644 --- a/src/transformers/models/zoedepth/modeling_zoedepth.py +++ b/src/transformers/models/zoedepth/modeling_zoedepth.py @@ -21,10 +21,10 @@ from ... import initialization as init from ...activations import ACT2FN +from ...backbone_utils import load_backbone from ...modeling_outputs import DepthEstimatorOutput from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring, logging -from ...utils.backbone_utils import load_backbone from .configuration_zoedepth import ZoeDepthConfig diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 11c1df8dc138..f7eec7f2a750 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -31,7 +31,6 @@ parse_docstring, set_min_indent, ) -from .backbone_utils import BackboneConfigMixin, BackboneMixin from .chat_template_utils import DocstringParsingException, TypeHintParsingException, get_json_schema from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( diff --git a/src/transformers/utils/backbone_utils.py b/src/transformers/utils/backbone_utils.py index 8580aecf7a2b..2947b9a5a953 100644 --- a/src/transformers/utils/backbone_utils.py +++ b/src/transformers/utils/backbone_utils.py @@ -1,379 +1,19 @@ -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +import warnings -"""Collection of utils to be used by backbones and their components.""" +from ..backbone_utils import BackboneConfigMixin, BackboneMixin -import enum -import inspect -from collections.abc import Iterable -from typing import TYPE_CHECKING, Union - -if TYPE_CHECKING: - from ..configuration_utils import PreTrainedConfig - - -class BackboneType(enum.Enum): - TIMM = "timm" - TRANSFORMERS = "transformers" - - -def verify_out_features_out_indices( - out_features: Iterable[str] | None, out_indices: Iterable[int] | None, stage_names: Iterable[str] | None -): - """ - Verify that out_indices and out_features are valid for the given stage_names. - """ - if stage_names is None: - raise ValueError("Stage_names must be set for transformers backbones") - - if out_features is not None: - if not isinstance(out_features, (list,)): - raise ValueError(f"out_features must be a list got {type(out_features)}") - if any(feat not in stage_names for feat in out_features): - raise ValueError(f"out_features must be a subset of stage_names: {stage_names} got {out_features}") - if len(out_features) != len(set(out_features)): - raise ValueError(f"out_features must not contain any duplicates, got {out_features}") - if out_features != (sorted_feats := [feat for feat in stage_names if feat in out_features]): - raise ValueError( - f"out_features must be in the same order as stage_names, expected {sorted_feats} got {out_features}" - ) - - if out_indices is not None: - if not isinstance(out_indices, list): - raise ValueError(f"out_indices must be a list, got {type(out_indices)}") - # Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,] - positive_indices = tuple(idx % len(stage_names) if idx < 0 else idx for idx in out_indices) - if any(idx for idx in positive_indices if idx not in range(len(stage_names))): - raise ValueError(f"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}") - if len(positive_indices) != len(set(positive_indices)): - msg = f"out_indices must not contain any duplicates, got {out_indices}" - msg += f"(equivalent to {positive_indices}))" if positive_indices != out_indices else "" - raise ValueError(msg) - if positive_indices != tuple(sorted(positive_indices)): - sorted_negative = [idx for _, idx in sorted(zip(positive_indices, out_indices), key=lambda x: x[0])] - raise ValueError( - f"out_indices must be in the same order as stage_names, expected {sorted_negative} got {out_indices}" - ) - - if out_features is not None and out_indices is not None: - if len(out_features) != len(out_indices): - raise ValueError("out_features and out_indices should have the same length if both are set") - if out_features != [stage_names[idx] for idx in out_indices]: - raise ValueError("out_features and out_indices should correspond to the same stages if both are set") - - -def _align_output_features_output_indices( - out_features: list[str] | None, - out_indices: list[int] | tuple[int, ...] | None, - stage_names: list[str], -): - """ - Finds the corresponding `out_features` and `out_indices` for the given `stage_names`. - - The logic is as follows: - - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the - `out_indices`. - - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the - `out_features`. - - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. - - `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned. - - Args: - out_features (`list[str]`): The names of the features for the backbone to output. - out_indices (`list[int]` or `tuple[int]`): The indices of the features for the backbone to output. - stage_names (`list[str]`): The names of the stages of the backbone. - """ - if out_indices is None and out_features is None: - out_indices = [len(stage_names) - 1] - out_features = [stage_names[-1]] - elif out_indices is None and out_features is not None: - out_indices = [stage_names.index(layer) for layer in out_features] - elif out_features is None and out_indices is not None: - out_features = [stage_names[idx] for idx in out_indices] - return out_features, out_indices - - -def get_aligned_output_features_output_indices( - out_features: list[str] | None, - out_indices: list[int] | tuple[int] | None, - stage_names: list[str], -) -> tuple[list[str], list[int]]: - """ - Get the `out_features` and `out_indices` so that they are aligned. - - The logic is as follows: - - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the - `out_indices`. - - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the - `out_features`. - - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage. - - `out_indices` and `out_features` set: they are verified to be aligned. - - Args: - out_features (`list[str]`): The names of the features for the backbone to output. - out_indices (`list[int]` or `tuple[int]`): The indices of the features for the backbone to output. - stage_names (`list[str]`): The names of the stages of the backbone. - """ - out_indices = list(out_indices) if out_indices is not None else None - # First verify that the out_features and out_indices are valid - verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names) - output_features, output_indices = _align_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=stage_names +class BackboneConfigMixin(BackboneConfigMixin): + warnings.warn( + "Importing `BackboneConfigMixin` from `utils/backbone_utils.py` is deprecated and will be removed in " + "Transformers v5.10. Import as `from transformers.backbone_utils import BackboneConfigMixin` instead.", + FutureWarning, ) - # Verify that the aligned out_features and out_indices are valid - verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names) - return output_features, output_indices - - -class BackboneMixin: - backbone_type: BackboneType | None = None - - # Attribute to indicate if the backbone has attention and can return attention outputs. - # Should be set to `False` for conv-based models to be able to run `forward_with_filtered_kwargs` - has_attentions: bool = True - - def _init_timm_backbone(self, config) -> None: - """ - Initialize the backbone model from timm The backbone must already be loaded to self._backbone - """ - if getattr(self, "_backbone", None) is None: - raise ValueError("self._backbone must be set before calling _init_timm_backbone") - - # These will diagree with the defaults for the transformers models e.g. for resnet50 - # the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4'] - # the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4'] - self.stage_names = [stage["module"] for stage in self._backbone.feature_info.info] - self.num_features = [stage["num_chs"] for stage in self._backbone.feature_info.info] - - # In some timm versions, out_indices reflects the input type of out_indices on the `create_model` call, - # in later versions >= 1, it is always a tuple - out_indices = list(self._backbone.feature_info.out_indices) - out_features = self._backbone.feature_info.module_name() - - # We verify the out indices and out features are valid - verify_out_features_out_indices( - out_features=out_features, out_indices=out_indices, stage_names=self.stage_names - ) - self._out_features, self._out_indices = out_features, out_indices - - def _init_transformers_backbone(self, config) -> None: - stage_names = getattr(config, "stage_names") - out_features = getattr(config, "out_features", None) - out_indices = getattr(config, "out_indices", None) - - self.stage_names = stage_names - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=out_indices, stage_names=stage_names - ) - # Number of channels for each stage. This is set in the transformer backbone model init - self.num_features = None - - def _init_backbone(self, config) -> None: - """ - Method to initialize the backbone. This method is called by the constructor of the base class after the - pretrained model weights have been loaded. - """ - self.config = config - - self.use_timm_backbone = getattr(config, "use_timm_backbone", False) - self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS - - if self.backbone_type == BackboneType.TIMM: - self._init_timm_backbone(config) - elif self.backbone_type == BackboneType.TRANSFORMERS: - self._init_transformers_backbone(config) - else: - raise ValueError(f"backbone_type {self.backbone_type} not supported.") - @property - def out_features(self): - return self._out_features - @out_features.setter - def out_features(self, out_features: list[str]): - """ - Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. - """ - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=None, stage_names=self.stage_names - ) - - @property - def out_indices(self): - return self._out_indices - - @out_indices.setter - def out_indices(self, out_indices: tuple[int] | list[int]): - """ - Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. - """ - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=None, out_indices=out_indices, stage_names=self.stage_names - ) - - @property - def out_feature_channels(self): - # the current backbones will output the number of channels for each stage - # even if that stage is not in the out_features list. - return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)} - - @property - def channels(self): - return [self.out_feature_channels[name] for name in self.out_features] - - def forward_with_filtered_kwargs(self, *args, **kwargs): - if not self.has_attentions: - kwargs.pop("output_attentions", None) - if self.backbone_type == BackboneType.TIMM: - signature = dict(inspect.signature(self.forward).parameters) - kwargs = {k: v for k, v in kwargs.items() if k in signature} - return self(*args, **kwargs) - - def forward( - self, - pixel_values, - output_hidden_states: bool | None = None, - output_attentions: bool | None = None, - return_dict: bool | None = None, - ): - raise NotImplementedError("This method should be implemented by the derived class.") - - def to_dict(self): - """ - Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PreTrainedConfig` to - include the `out_features` and `out_indices` attributes. - """ - output = super().to_dict() - output["out_features"] = output.pop("_out_features") - output["out_indices"] = output.pop("_out_indices") - return output - - -class BackboneConfigMixin: - """ - A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations. - """ - - @property - def out_features(self): - return self._out_features - - @out_features.setter - def out_features(self, out_features: list[str]): - """ - Set the out_features attribute. This will also update the out_indices attribute to match the new out_features. - """ - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=out_features, out_indices=None, stage_names=self.stage_names - ) - - @property - def out_indices(self): - return self._out_indices - - @out_indices.setter - def out_indices(self, out_indices: tuple[int, ...] | list[int]): - """ - Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices. - """ - self._out_features, self._out_indices = get_aligned_output_features_output_indices( - out_features=None, out_indices=out_indices, stage_names=self.stage_names - ) - - def to_dict(self): - """ - Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PreTrainedConfig` to - include the `out_features` and `out_indices` attributes. - """ - output = super().to_dict() - output["out_features"] = output.pop("_out_features") - output["out_indices"] = output.pop("_out_indices") - return output - - -def load_backbone(config): - """ - Loads the backbone model from a config object. - - If the config is from the backbone model itself, then we return a backbone model with randomly initialized - weights. - - If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights - if specified. - """ - from transformers import AutoBackbone, AutoConfig - - backbone_config = getattr(config, "backbone_config", None) - use_timm_backbone = getattr(config, "use_timm_backbone", None) - use_pretrained_backbone = getattr(config, "use_pretrained_backbone", None) - backbone_checkpoint = getattr(config, "backbone", None) - backbone_kwargs = getattr(config, "backbone_kwargs", None) - backbone_kwargs = {} if backbone_kwargs is None else backbone_kwargs - - if backbone_kwargs and backbone_config is not None: - raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") - - # If there is a backbone_config and a backbone checkpoint, and use_pretrained_backbone=False then the desired - # behaviour is ill-defined: do you want to load from the checkpoint's config or the backbone_config? - if backbone_config is not None and backbone_checkpoint is not None and use_pretrained_backbone is not None: - raise ValueError("Cannot specify both config.backbone_config and config.backbone") - - # If any of the following are set, then the config passed in is from a model which contains a backbone. - if backbone_config is None and use_timm_backbone is None and backbone_checkpoint is None: - return AutoBackbone.from_config(config=config, **backbone_kwargs) - - # config from the parent model that has a backbone - if use_timm_backbone: - if backbone_checkpoint is None: - raise ValueError("config.backbone must be set if use_timm_backbone is True") - # Because of how timm backbones were originally added to models, we need to pass in use_pretrained_backbone - # to determine whether to load the pretrained weights. - backbone = AutoBackbone.from_pretrained( - backbone_checkpoint, - use_timm_backbone=use_timm_backbone, - use_pretrained_backbone=use_pretrained_backbone, - **backbone_kwargs, - ) - elif use_pretrained_backbone: - if backbone_checkpoint is None: - raise ValueError("config.backbone must be set if use_pretrained_backbone is True") - backbone = AutoBackbone.from_pretrained(backbone_checkpoint, **backbone_kwargs) - else: - if backbone_config is None and backbone_checkpoint is None: - raise ValueError("Either config.backbone_config or config.backbone must be set") - if backbone_config is None: - backbone_config = AutoConfig.from_pretrained(backbone_checkpoint, **backbone_kwargs) - backbone = AutoBackbone.from_config(config=backbone_config) - return backbone - - -def verify_backbone_config_arguments( - use_timm_backbone: bool, - use_pretrained_backbone: bool, - backbone: str | None, - backbone_config: Union[dict, "PreTrainedConfig"] | None, - backbone_kwargs: dict | None, -): - """ - Verify that the config arguments to be passed to load_backbone are valid - """ - if backbone_config is not None and backbone is not None: - raise ValueError("You can't specify both `backbone` and `backbone_config`.") - - if backbone_config is not None and use_timm_backbone: - raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.") - - if backbone_kwargs is not None and backbone_kwargs and backbone_config is not None: - raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.") +class BackboneMixin(BackboneMixin): + warnings.warn( + "Importing `BackboneMixin` from `utils/backbone_utils.py` is deprecated and will be removed in " + "Transformers v5.10. Import as `from transformers.backbone_utils import BackboneMixin` instead.", + FutureWarning, + ) diff --git a/tests/models/bit/test_modeling_bit.py b/tests/models/bit/test_modeling_bit.py index f3f050c322ca..a4b9d355aab1 100644 --- a/tests/models/bit/test_modeling_bit.py +++ b/tests/models/bit/test_modeling_bit.py @@ -130,6 +130,7 @@ def create_and_check_backbone(self, config, pixel_values, labels): # verify backbone works with out_features=None config.out_features = None + print(config) model = BitBackbone(config=config) model.to(torch_device) model.eval() diff --git a/tests/models/conditional_detr/test_modeling_conditional_detr.py b/tests/models/conditional_detr/test_modeling_conditional_detr.py index 3f1b843584eb..39815656bdd2 100644 --- a/tests/models/conditional_detr/test_modeling_conditional_detr.py +++ b/tests/models/conditional_detr/test_modeling_conditional_detr.py @@ -13,6 +13,7 @@ # limitations under the License. """Testing suite for the PyTorch Conditional DETR model.""" +import copy import inspect import math import unittest @@ -434,75 +435,56 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_different_timm_backbone(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # let's pick a random timm backbone - config.backbone = "tf_mobilenetv3_small_075" - config.backbone_config = None - config.use_timm_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "ConditionalDetrForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - elif model_class.__name__ == "ConditionalDetrForSegmentation": - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.conditional_detr.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) - @require_timm - def test_hf_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # Load a pretrained HF checkpoint as backbone - config.backbone = "microsoft/resnet-18" - config.backbone_config = None - config.use_timm_backbone = False - config.use_pretrained_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "ConditionalDetrForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - elif model_class.__name__ == "ConditionalDetrForSegmentation": - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.conditional_detr.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) + if model_class.__name__ == "ConditionalDetrForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) + elif model_class.__name__ == "ConditionalDetrForSegmentation": + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.conditional_detr.model.backbone.intermediate_channel_sizes), 3) + else: + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) + + # Now load a pretrained HF checkpoint as backbone + config_dict = config.to_dict() + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = False + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) TOLERANCE = 1e-4 diff --git a/tests/models/d_fine/test_modeling_d_fine.py b/tests/models/d_fine/test_modeling_d_fine.py index e7446f72a922..944a456b72d1 100644 --- a/tests/models/d_fine/test_modeling_d_fine.py +++ b/tests/models/d_fine/test_modeling_d_fine.py @@ -14,6 +14,7 @@ # limitations under the License. """Testing suite for the PyTorch D-FINE model.""" +import copy import inspect import math import tempfile @@ -566,69 +567,53 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_different_timm_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # let's pick a random timm backbone - config.encoder_in_channels = [24, 40, 432] - config.backbone = "tf_mobilenetv3_small_075" - config.backbone_config = None - config.use_timm_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "DFineForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) - - def test_hf_backbone(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # Load a pretrained HF checkpoint as backbone - config.backbone = "microsoft/resnet-18" - config.backbone_config = None - config.use_timm_backbone = False - config.use_pretrained_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "DFineForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if model_class.__name__ == "DFineForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) + else: + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["encoder_in_channels"] = [24, 40, 432] + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) + + # Test a pretrained HF checkpoint as backbone + config_dict = config.to_dict() + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = False + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) @parameterized.expand(["float32", "float16", "bfloat16"]) @require_torch_accelerator diff --git a/tests/models/dab_detr/test_modeling_dab_detr.py b/tests/models/dab_detr/test_modeling_dab_detr.py index 6d7c9836ffe8..3e1f30031e8c 100644 --- a/tests/models/dab_detr/test_modeling_dab_detr.py +++ b/tests/models/dab_detr/test_modeling_dab_detr.py @@ -13,6 +13,7 @@ # limitations under the License. """Testing suite for the PyTorch DAB-DETR model.""" +import copy import inspect import math import tempfile @@ -707,36 +708,42 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_different_timm_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # let's pick a random timm backbone - config.backbone = "tf_mobilenetv3_small_075" - config.backbone_config = None - config.use_timm_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "DabDetrForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if model_class.__name__ == "DabDetrForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3) + else: + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) TOLERANCE = 1e-4 diff --git a/tests/models/deformable_detr/test_modeling_deformable_detr.py b/tests/models/deformable_detr/test_modeling_deformable_detr.py index b20ba8850e13..cc1c03a4049e 100644 --- a/tests/models/deformable_detr/test_modeling_deformable_detr.py +++ b/tests/models/deformable_detr/test_modeling_deformable_detr.py @@ -13,6 +13,7 @@ # limitations under the License. """Testing suite for the PyTorch Deformable DETR model.""" +import copy import inspect import math import unittest @@ -528,68 +529,52 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_different_timm_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # let's pick a random timm backbone - config.backbone = "tf_mobilenetv3_small_075" - config.backbone_config = None - config.use_timm_backbone = True - config.backbone_kwargs = {"out_indices": [1, 2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "DeformableDetrForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 4) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 4) - - self.assertTrue(outputs) - - def test_hf_backbone(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # Load a pretrained HF checkpoint as backbone - config.backbone = "microsoft/resnet-18" - config.backbone_config = None - config.use_timm_backbone = False - config.use_pretrained_backbone = True - config.backbone_kwargs = {"out_indices": [1, 2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "DeformableDetrForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 4) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 4) - - self.assertTrue(outputs) + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if model_class.__name__ == "DeformableDetrForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 4) + else: + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.backbone.intermediate_channel_sizes), 4) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) + + # Test a pretrained HF checkpoint as backbone + config_dict = config.to_dict() + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = False + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) def test_two_stage_training(self): model_class = DeformableDetrForObjectDetection diff --git a/tests/models/depth_anything/test_modeling_depth_anything.py b/tests/models/depth_anything/test_modeling_depth_anything.py index ad120f48e822..858ff5c450c1 100644 --- a/tests/models/depth_anything/test_modeling_depth_anything.py +++ b/tests/models/depth_anything/test_modeling_depth_anything.py @@ -206,21 +206,26 @@ def _validate_backbone_init(): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression # Load a timm backbone - config.backbone = "resnet18" - config.use_pretrained_backbone = True - config.use_timm_backbone = True - config.backbone_config = None - # For transformer backbones we can't set the out_indices or just return the features - config.backbone_kwargs = {"out_indices": (-2, -1)} + config_dict = config.to_dict() + config_dict["backbone"] = "resnet18" + config_dict["use_pretrained_backbone"] = True + config_dict["use_timm_backbone"] = True + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": (-2, -1)} + config = config.__class__(**config_dict) _validate_backbone_init() # Load a HF backbone - config.backbone = "facebook/dinov2-small" - config.use_pretrained_backbone = True - config.use_timm_backbone = False - config.backbone_config = None - config.backbone_kwargs = {"out_indices": [-2, -1]} + config_dict = config.to_dict() + config_dict["backbone"] = "facebook/dinov2-small" + config_dict["use_pretrained_backbone"] = True + config_dict["use_timm_backbone"] = False + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [-2, -1]} + config = config.__class__(**config_dict) _validate_backbone_init() diff --git a/tests/models/detr/test_modeling_detr.py b/tests/models/detr/test_modeling_detr.py index 0b4bae86533f..a0a5c30bda00 100644 --- a/tests/models/detr/test_modeling_detr.py +++ b/tests/models/detr/test_modeling_detr.py @@ -13,6 +13,7 @@ # limitations under the License. """Testing suite for the PyTorch DETR model.""" +import copy import inspect import math import unittest @@ -441,74 +442,55 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_different_timm_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # let's pick a random timm backbone - config.backbone = "tf_mobilenetv3_small_075" - config.backbone_config = None - config.use_timm_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "DetrForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels + 1, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - elif model_class.__name__ == "DetrForSegmentation": - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.detr.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) - - def test_hf_backbone(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # Load a pretrained HF checkpoint as backbone - config.backbone = "microsoft/resnet-18" - config.backbone_config = None - config.use_timm_backbone = False - config.use_pretrained_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "DetrForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels + 1, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - elif model_class.__name__ == "DetrForSegmentation": - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.detr.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) + if model_class.__name__ == "DetrForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels + 1, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) + elif model_class.__name__ == "DetrForSegmentation": + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.detr.model.backbone.intermediate_channel_sizes), 3) + else: + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) + + # Test a pretrained HF checkpoint as backbone + config_dict = config.to_dict() + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = False + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) def test_greyscale_images(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/dpt/test_modeling_dpt.py b/tests/models/dpt/test_modeling_dpt.py index 34fcda05c9d1..25f2f9462a6c 100644 --- a/tests/models/dpt/test_modeling_dpt.py +++ b/tests/models/dpt/test_modeling_dpt.py @@ -258,20 +258,28 @@ def _validate_backbone_init(): self.assertEqual(len(model.backbone.out_indices), 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - config.use_pretrained_backbone = True - config.backbone_config = None - config.backbone_kwargs = {"out_indices": [-2, -1]} + config_dict = config.to_dict() + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [-2, -1]} # Force load_backbone path - config.is_hybrid = False + config_dict["is_hybrid"] = False # Load a timm backbone - config.backbone = "resnet18" - config.use_timm_backbone = True + config_dict["backbone"] = "resnet18" + config_dict["use_timm_backbone"] = True + config = config.__class__(**config_dict) _validate_backbone_init() # Load a HF backbone - config.backbone = "facebook/dinov2-small" - config.use_timm_backbone = False + config_dict = config.to_dict() + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [-2, -1]} + config_dict["is_hybrid"] = False + config_dict["backbone"] = "facebook/dinov2-small" + config_dict["use_timm_backbone"] = False + config = config.__class__(**config_dict) _validate_backbone_init() @slow diff --git a/tests/models/grounding_dino/test_modeling_grounding_dino.py b/tests/models/grounding_dino/test_modeling_grounding_dino.py index 7002676fe4f7..d1424a8075a1 100644 --- a/tests/models/grounding_dino/test_modeling_grounding_dino.py +++ b/tests/models/grounding_dino/test_modeling_grounding_dino.py @@ -14,6 +14,7 @@ """Testing suite for the PyTorch Grounding DINO model.""" import collections +import copy import inspect import math import re @@ -531,59 +532,47 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "input_ids"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) - def test_different_timm_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # let's pick a random timm backbone - config.backbone = "tf_mobilenetv3_small_075" - config.use_timm_backbone = True - config.backbone_config = None - config.backbone_kwargs = {"in_chans": 3, "out_indices": (2, 3, 4)} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "GroundingDinoForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - config.max_text_len, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - - self.assertTrue(outputs) - - @require_timm - def test_hf_backbone(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # Load a pretrained HF checkpoint as backbone - config.backbone = "microsoft/resnet-18" - config.backbone_config = None - config.use_timm_backbone = False - config.use_pretrained_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "GroundingDinoForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - config.max_text_len, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - - self.assertTrue(outputs) + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if model_class.__name__ == "GroundingDinoForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + config.max_text_len, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["use_timm_backbone"] = True + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"in_chans": 3, "out_indices": (2, 3, 4)} + config = config.__class__(**config_dict) + _validate_backbone_init(config) + + # Test a pretrained HF checkpoint as backbone + config_dict = config.to_dict() + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = False + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) # Copied from tests.models.deformable_detr.test_modeling_deformable_detr.DeformableDetrModelTest.test_two_stage_training with DeformableDetr->GroundingDino def test_two_stage_training(self): diff --git a/tests/models/mask2former/test_modeling_mask2former.py b/tests/models/mask2former/test_modeling_mask2former.py index e53cafc84eb7..9630fce7f0cd 100644 --- a/tests/models/mask2former/test_modeling_mask2former.py +++ b/tests/models/mask2former/test_modeling_mask2former.py @@ -13,6 +13,7 @@ # limitations under the License. """Testing suite for the PyTorch Mask2Former model.""" +import copy import unittest from functools import cached_property @@ -20,7 +21,7 @@ import pytest from tests.test_modeling_common import floats_tensor -from transformers import AutoModelForImageClassification, Mask2FormerConfig, is_torch_available, is_vision_available +from transformers import Mask2FormerConfig, is_torch_available, is_vision_available from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4 from transformers.testing_utils import ( Expectations, @@ -323,65 +324,40 @@ def test_retain_grad_hidden_states_attentions(self): def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() - config.backbone_config = None - config.backbone_kwargs = {"out_indices": [1, 2, 3]} - config.use_pretrained_backbone = True + config_dict = config.to_dict() + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3]} + config_dict["use_pretrained_backbone"] = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices - config.backbone = "resnet18" - config.use_timm_backbone = True + config_dict["backbone"] = "resnet18" + config_dict["use_timm_backbone"] = True + config = config.__class__(**config_dict) for model_class in self.all_model_classes: - model = model_class(config).to(torch_device).eval() + model = model_class(copy.deepcopy(config)).to(torch_device).eval() if model.__class__.__name__ == "Mask2FormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) # Load a HF backbone - config.backbone = "microsoft/resnet-18" - config.use_timm_backbone = False + config_dict = config.to_dict() + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3]} + config_dict["use_pretrained_backbone"] = True + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["use_timm_backbone"] = False + config = config.__class__(**config_dict) for model_class in self.all_model_classes: - model = model_class(config).to(torch_device).eval() + model = model_class(copy.deepcopy(config)).to(torch_device).eval() if model.__class__.__name__ == "Mask2FormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) - def test_initialization_pretrained_backbone(self): - backbone_name = "microsoft/resnet-18" - - # load Mask2Former config with a pretrained backbone - config = Mask2FormerConfig( - backbone=backbone_name, - use_pretrained_backbone=True, - ) - - # load pretrained backbone - backbone_model = AutoModelForImageClassification.from_pretrained(backbone_name, device_map=torch_device) - - def params_match(params1, params2): - return all((p1 == p2).all() for p1, p2 in zip(params1, params2)) - - for model_class in self.all_model_classes: - model = model_class(config).to(torch_device).eval() - if model.__class__.__name__ == "Mask2FormerModel": - self.assertTrue( - params_match( - backbone_model.base_model.encoder.parameters(), - model.pixel_level_module.encoder.encoder.parameters(), - ) - ) - elif model.__class__.__name__ == "Mask2FormerForUniversalSegmentation": - self.assertTrue( - params_match( - backbone_model.base_model.encoder.parameters(), - model.model.pixel_level_module.encoder.encoder.parameters(), - ) - ) - TOLERANCE = 2e-4 diff --git a/tests/models/maskformer/test_modeling_maskformer.py b/tests/models/maskformer/test_modeling_maskformer.py index d1fca3e7106f..091777de8b36 100644 --- a/tests/models/maskformer/test_modeling_maskformer.py +++ b/tests/models/maskformer/test_modeling_maskformer.py @@ -449,25 +449,32 @@ def recursive_check(batched_object, single_row_object, model_name, key): def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() - config.backbone_config = None - config.backbone_kwargs = {"out_indices": [1, 2, 3]} - config.use_pretrained_backbone = True + config_dict = config.to_dict() + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3]} + config_dict["use_pretrained_backbone"] = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices - config.backbone = "resnet18" - config.use_timm_backbone = True + config_dict["backbone"] = "resnet18" + config_dict["use_timm_backbone"] = True + config = config.__class__(**config_dict) for model_class in self.all_model_classes: - model = model_class(config).to(torch_device).eval() + model = model_class(copy.deepcopy(config)).to(torch_device).eval() if model.__class__.__name__ == "MaskFormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "MaskFormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) # Load a HF backbone - config.backbone = "microsoft/resnet-18" - config.use_timm_backbone = False + config_dict = config.to_dict() + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3]} + config_dict["use_pretrained_backbone"] = True + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["use_timm_backbone"] = False + config = config.__class__(**config_dict) for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() diff --git a/tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py b/tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py index a02ca88249df..c4294653792c 100644 --- a/tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py +++ b/tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py @@ -14,6 +14,7 @@ """Testing suite for the PyTorch MM Grounding DINO model.""" import collections +import copy import inspect import math import re @@ -528,59 +529,47 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "input_ids"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) - def test_different_timm_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # let's pick a random timm backbone - config.backbone = "tf_mobilenetv3_small_075" - config.use_timm_backbone = True - config.backbone_config = None - config.backbone_kwargs = {"in_chans": 3, "out_indices": (2, 3, 4)} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "MMGroundingDinoForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - config.max_text_len, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - - self.assertTrue(outputs) - - @require_timm - def test_hf_backbone(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # Load a pretrained HF checkpoint as backbone - config.backbone = "microsoft/resnet-18" - config.backbone_config = None - config.use_timm_backbone = False - config.use_pretrained_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "MMGroundingDinoForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - config.max_text_len, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - - self.assertTrue(outputs) + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if model_class.__name__ == "MMGroundingDinoForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + config.max_text_len, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["use_timm_backbone"] = True + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"in_chans": 3, "out_indices": (2, 3, 4)} + config = config.__class__(**config_dict) + _validate_backbone_init(config) + + # Test a pretrained HF checkpoint as backbone + config_dict = config.to_dict() + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = False + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) # Copied from tests.models.deformable_detr.test_modeling_deformable_detr.DeformableDetrModelTest.test_two_stage_training with DeformableDetr->MMGroundingDino def test_two_stage_training(self): diff --git a/tests/models/oneformer/test_modeling_oneformer.py b/tests/models/oneformer/test_modeling_oneformer.py index ecc4cb700d9a..238eb76729e2 100644 --- a/tests/models/oneformer/test_modeling_oneformer.py +++ b/tests/models/oneformer/test_modeling_oneformer.py @@ -13,6 +13,7 @@ # limitations under the License. """Testing suite for the PyTorch OneFormer model.""" +import copy import inspect import unittest from functools import cached_property @@ -20,7 +21,7 @@ import numpy as np from tests.test_modeling_common import floats_tensor -from transformers import AutoModelForImageClassification, OneFormerConfig, is_torch_available, is_vision_available +from transformers import OneFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( Expectations, is_flaky, @@ -353,38 +354,6 @@ def test_attention_outputs(self): outputs = model(**inputs, output_attentions=True) self.assertTrue(outputs.attentions is not None) - def test_initialization_pretrained_backbone(self): - backbone_name = "microsoft/resnet-18" - - # load OneFormerConfig config with a pretrained backbone - config = OneFormerConfig( - backbone=backbone_name, - use_pretrained_backbone=True, - ) - - # load pretrained backbone - backbone_model = AutoModelForImageClassification.from_pretrained(backbone_name, device_map=torch_device) - - def params_match(params1, params2): - return all((p1 == p2).all() for p1, p2 in zip(params1, params2)) - - for model_class in self.all_model_classes: - model = model_class(config).to(torch_device).eval() - if model.__class__.__name__ == "OneFormerModel": - self.assertTrue( - params_match( - backbone_model.base_model.encoder.parameters(), - model.pixel_level_module.encoder.encoder.parameters(), - ) - ) - elif model.__class__.__name__ == "OneFormerForUniversalSegmentation": - self.assertTrue( - params_match( - backbone_model.base_model.encoder.parameters(), - model.model.pixel_level_module.encoder.encoder.parameters(), - ) - ) - def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") @@ -461,28 +430,35 @@ def test_retain_grad_hidden_states_attentions(self): def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() - config.backbone_config = None - config.backbone_kwargs = {"out_indices": [1, 2, 3]} - config.use_pretrained_backbone = True + config_dict = config.to_dict() + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3]} + config_dict["use_pretrained_backbone"] = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices - config.backbone = "resnet18" - config.use_timm_backbone = True + config_dict["backbone"] = "resnet18" + config_dict["use_timm_backbone"] = True + config = config.__class__(**config_dict) for model_class in self.all_model_classes: - model = model_class(config).to(torch_device).eval() + model = model_class(copy.deepcopy(config)).to(torch_device).eval() if model.__class__.__name__ == "OneFormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "OneFormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) # Load a HF backbone - config.backbone = "microsoft/resnet-18" - config.use_timm_backbone = False + config_dict = config.to_dict() + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3]} + config_dict["use_pretrained_backbone"] = True + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["use_timm_backbone"] = False + config = config.__class__(**config_dict) for model_class in self.all_model_classes: - model = model_class(config).to(torch_device).eval() + model = model_class(copy.deepcopy(config)).to(torch_device).eval() if model.__class__.__name__ == "OneFormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "OneFormerForUniversalSegmentation": diff --git a/tests/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py b/tests/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py index a57b73d27128..04fed16e9792 100644 --- a/tests/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py +++ b/tests/models/prompt_depth_anything/test_modeling_prompt_depth_anything.py @@ -208,11 +208,13 @@ def _validate_backbone_init(): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - config.backbone = "facebook/dinov2-small" - config.use_pretrained_backbone = True - config.use_timm_backbone = False - config.backbone_config = None - config.backbone_kwargs = {"out_indices": [-2, -1]} + config_dict = config.to_dict() + config_dict["backbone"] = "facebook/dinov2-small" + config_dict["use_pretrained_backbone"] = True + config_dict["use_timm_backbone"] = False + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [-2, -1]} + config = config.__class__(**config_dict) _validate_backbone_init() diff --git a/tests/models/rt_detr/test_modeling_rt_detr.py b/tests/models/rt_detr/test_modeling_rt_detr.py index 7a1c54947e64..e49836d7f774 100644 --- a/tests/models/rt_detr/test_modeling_rt_detr.py +++ b/tests/models/rt_detr/test_modeling_rt_detr.py @@ -14,6 +14,7 @@ # limitations under the License. """Testing suite for the PyTorch RT_DETR model.""" +import copy import inspect import math import tempfile @@ -523,68 +524,52 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_different_timm_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # let's pick a random timm backbone - config.backbone = "tf_mobilenetv3_small_075" - config.backbone_config = None - config.use_timm_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "RTDetrForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) - - def test_hf_backbone(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # Load a pretrained HF checkpoint as backbone - config.backbone = "microsoft/resnet-18" - config.backbone_config = None - config.use_timm_backbone = False - config.use_pretrained_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "RTDetrForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if model_class.__name__ == "RTDetrForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) + else: + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) + + # Test a pretrained HF checkpoint as backbone + config_dict = config.to_dict() + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = False + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) @parameterized.expand(["float32", "float16", "bfloat16"]) @require_torch_accelerator diff --git a/tests/models/rt_detr_v2/test_modeling_rt_detr_v2.py b/tests/models/rt_detr_v2/test_modeling_rt_detr_v2.py index 33af89e7320d..b4c4f99e0f91 100644 --- a/tests/models/rt_detr_v2/test_modeling_rt_detr_v2.py +++ b/tests/models/rt_detr_v2/test_modeling_rt_detr_v2.py @@ -14,6 +14,7 @@ # limitations under the License. """Testing suite for the PyTorch RT_DETR_V2 model.""" +import copy import inspect import math import tempfile @@ -531,68 +532,52 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_different_timm_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # let's pick a random timm backbone - config.backbone = "tf_mobilenetv3_small_075" - config.backbone_config = None - config.use_timm_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "RTDetrV2ForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) - - def test_hf_backbone(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # Load a pretrained HF checkpoint as backbone - config.backbone = "microsoft/resnet-18" - config.backbone_config = None - config.use_timm_backbone = False - config.use_pretrained_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "RTDetrV2ForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if model_class.__name__ == "RTDetrV2ForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) + else: + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) + + # Test a pretrained HF checkpoint as backbone + config_dict = config.to_dict() + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = False + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) @parameterized.expand(["float32", "float16", "bfloat16"]) @require_torch_accelerator diff --git a/tests/models/table_transformer/test_modeling_table_transformer.py b/tests/models/table_transformer/test_modeling_table_transformer.py index bbf605c836c5..b8ba3a1981a8 100644 --- a/tests/models/table_transformer/test_modeling_table_transformer.py +++ b/tests/models/table_transformer/test_modeling_table_transformer.py @@ -13,6 +13,7 @@ # limitations under the License. """Testing suite for the PyTorch Table Transformer model.""" +import copy import inspect import math import unittest @@ -446,68 +447,52 @@ def test_forward_signature(self): expected_arg_names = ["pixel_values", "pixel_mask"] self.assertListEqual(arg_names[:1], expected_arg_names) - def test_different_timm_backbone(self): + def test_backbone_selection(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - # let's pick a random timm backbone - config.backbone = "tf_mobilenetv3_small_075" - config.backbone_config = None - config.use_timm_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} + def _validate_backbone_init(config): + for model_class in self.all_model_classes: + model = model_class(copy.deepcopy(config)) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "TableTransformerForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels + 1, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) - - def test_hf_backbone(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - # Load a pretrained HF checkpoint as backbone - config.backbone = "microsoft/resnet-18" - config.backbone_config = None - config.use_timm_backbone = False - config.use_pretrained_backbone = True - config.backbone_kwargs = {"out_indices": [2, 3, 4]} - - for model_class in self.all_model_classes: - model = model_class(config) - model.to(torch_device) - model.eval() - with torch.no_grad(): - outputs = model(**self._prepare_for_class(inputs_dict, model_class)) - - if model_class.__name__ == "TableTransformerForObjectDetection": - expected_shape = ( - self.model_tester.batch_size, - self.model_tester.num_queries, - self.model_tester.num_labels + 1, - ) - self.assertEqual(outputs.logits.shape, expected_shape) - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3) - else: - # Confirm out_indices was propagated to backbone - self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3) - - self.assertTrue(outputs) + if model_class.__name__ == "TableTransformerForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels + 1, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3) + else: + # Confirm out_indices was propagated to backbone + self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3) + + self.assertTrue(outputs) + + # These kwargs are all removed and are supported only for BC + # In new models we have only `backbone_config`. Let's test that there is no regression + # let's test a random timm backbone + config_dict = config.to_dict() + config_dict["backbone"] = "tf_mobilenetv3_small_075" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) + + # Test a pretrained HF checkpoint as backbone + config_dict = config.to_dict() + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["backbone_config"] = None + config_dict["use_timm_backbone"] = False + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_kwargs"] = {"out_indices": [2, 3, 4]} + config = config.__class__(**config_dict) + _validate_backbone_init(config) def test_greyscale_images(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/timm_backbone/test_modeling_timm_backbone.py b/tests/models/timm_backbone/test_modeling_timm_backbone.py index f1c9709784ac..fa8b24a40aa7 100644 --- a/tests/models/timm_backbone/test_modeling_timm_backbone.py +++ b/tests/models/timm_backbone/test_modeling_timm_backbone.py @@ -43,7 +43,6 @@ def __init__( image_size=32, num_channels=3, is_training=True, - use_pretrained_backbone=True, ): self.parent = parent self.out_indices = out_indices if out_indices is not None else [4] @@ -53,7 +52,6 @@ def __init__( self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels - self.use_pretrained_backbone = use_pretrained_backbone self.is_training = is_training def prepare_config_and_inputs(self): @@ -69,7 +67,6 @@ def get_config(self): out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, - use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, ) @@ -252,7 +249,9 @@ def test_create_from_modified_config(self): # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) + modified_config.stage_names = None modified_config.out_indices = None + modified_config.out_features = None model = model_class(modified_config) model.to(torch_device) model.eval() @@ -260,11 +259,3 @@ def test_create_from_modified_config(self): self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) - - # Check backbone can be initialized with fresh weights - modified_config = copy.deepcopy(config) - modified_config.use_pretrained_backbone = False - model = model_class(modified_config) - model.to(torch_device) - model.eval() - result = model(**inputs_dict) diff --git a/tests/models/tvp/test_modeling_tvp.py b/tests/models/tvp/test_modeling_tvp.py index 70284b68ab08..bc8f417f259a 100644 --- a/tests/models/tvp/test_modeling_tvp.py +++ b/tests/models/tvp/test_modeling_tvp.py @@ -13,6 +13,7 @@ # limitations under the License. """Testing suite for the PyTorch TVP model.""" +import copy import unittest from functools import cached_property @@ -196,7 +197,7 @@ def test_model_get_set_embeddings(self): def test_backbone_selection(self): def _validate_backbone_init(): for model_class in self.all_model_classes: - model = model_class(config) + model = model_class(copy.deepcopy(config)) model.to(torch_device) model.eval() @@ -211,18 +212,21 @@ def _validate_backbone_init(): config.is_hybrid = False # We load through configs, as the modeling file assumes config.backbone_config is always set - config.use_pretrained_backbone = False - config.backbone_kwargs = None + config_dict = config.to_dict() + config_dict["use_pretrained_backbone"] = False + config_dict["backbone_kwargs"] = None # Load a timm backbone # We hack adding hidden_sizes to the config to test the backbone loading backbone_config = TimmBackboneConfig("resnet18", out_indices=[-2, -1], hidden_sizes=[64, 128]) - config.backbone_config = backbone_config + config_dict["backbone_config"] = backbone_config + config = config.__class__(**config_dict) _validate_backbone_init() # Load a HF backbone backbone_config = ResNetConfig.from_pretrained("facebook/dinov2-small", out_indices=[-2, -1]) - config.backbone_config = backbone_config + config_dict["backbone_config"] = backbone_config + config = config.__class__(**config_dict) _validate_backbone_init() diff --git a/tests/models/upernet/test_modeling_upernet.py b/tests/models/upernet/test_modeling_upernet.py index 938ae12ff137..c1c90c938758 100644 --- a/tests/models/upernet/test_modeling_upernet.py +++ b/tests/models/upernet/test_modeling_upernet.py @@ -218,14 +218,16 @@ def check_hidden_states_output(inputs_dict, config, model_class): def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() - config.backbone_config = None - config.backbone_kwargs = {"out_indices": [1, 2, 3]} - config.use_pretrained_backbone = True + config_dict = config.to_dict() + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3]} + config_dict["use_pretrained_backbone"] = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices - config.backbone = "resnet18" - config.use_timm_backbone = True + config_dict["backbone"] = "resnet18" + config_dict["use_timm_backbone"] = True + config = config.__class__(**config_dict) for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() @@ -233,8 +235,13 @@ def test_backbone_selection(self): self.assertEqual(model.backbone.out_indices, [1, 2, 3]) # Load a HF backbone - config.backbone = "microsoft/resnet-18" - config.use_timm_backbone = False + config_dict = config.to_dict() + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [1, 2, 3]} + config_dict["use_pretrained_backbone"] = True + config_dict["backbone"] = "microsoft/resnet-18" + config_dict["use_timm_backbone"] = False + config = config.__class__(**config_dict) for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() diff --git a/tests/models/vitmatte/test_modeling_vitmatte.py b/tests/models/vitmatte/test_modeling_vitmatte.py index b2c8d379775c..b7d843cfca1a 100644 --- a/tests/models/vitmatte/test_modeling_vitmatte.py +++ b/tests/models/vitmatte/test_modeling_vitmatte.py @@ -239,20 +239,27 @@ def _validate_backbone_init(): self.assertEqual(len(model.backbone.out_indices), 2) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - config.use_pretrained_backbone = True - config.backbone_config = None - config.backbone_kwargs = {"out_indices": [-2, -1]} + config_dict = config.to_dict() + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [-2, -1]} # Force load_backbone path - config.is_hybrid = False + config_dict["is_hybrid"] = False # Load a timm backbone - config.backbone = "resnet18" - config.use_timm_backbone = True + config_dict["backbone"] = "resnet18" + config_dict["use_timm_backbone"] = True + config = config.__class__(**config_dict) _validate_backbone_init() # Load a HF backbone - config.backbone = "facebook/dinov2-small" - config.use_timm_backbone = False + config_dict = config.to_dict() + config_dict["use_pretrained_backbone"] = True + config_dict["backbone_config"] = None + config_dict["backbone_kwargs"] = {"out_indices": [-2, -1]} + config_dict["backbone"] = "facebook/dinov2-small" + config_dict["use_timm_backbone"] = False + config = config.__class__(**config_dict) _validate_backbone_init() diff --git a/tests/test_backbone_common.py b/tests/test_backbone_common.py index d2229ad4b6f6..9b8e441852a8 100644 --- a/tests/test_backbone_common.py +++ b/tests/test_backbone_common.py @@ -16,8 +16,8 @@ import inspect import tempfile +from transformers.backbone_utils import BackboneType from transformers.testing_utils import require_torch, torch_device -from transformers.utils.backbone_utils import BackboneType @require_torch @@ -140,14 +140,6 @@ def test_create_from_modified_config(self): self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) - # Check backbone can be initialized with fresh weights - modified_config = copy.deepcopy(config) - modified_config.use_pretrained_backbone = False - model = model_class(modified_config) - model.to(torch_device) - model.eval() - result = model(**inputs_dict) - def test_backbone_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/test_configuration_common.py b/tests/test_configuration_common.py index 90c00f6f8c63..69c42340a5eb 100644 --- a/tests/test_configuration_common.py +++ b/tests/test_configuration_common.py @@ -133,7 +133,8 @@ def create_and_test_config_from_and_save_pretrained_composite(self): for sub_config_key, sub_class in sub_configs.items(): if general_config_dict[sub_config_key] is not None: if sub_class.__name__ == "AutoConfig": - sub_class = sub_class.for_model(**general_config_dict[sub_config_key]).__class__ + sub_config_dict = copy.deepcopy(general_config_dict[sub_config_key]) + sub_class = sub_class.for_model(**sub_config_dict).__class__ sub_config_loaded = sub_class.from_pretrained(tmpdirname) else: sub_config_loaded = sub_class.from_pretrained(tmpdirname) diff --git a/tests/utils/test_backbone_utils.py b/tests/utils/test_backbone_utils.py index 2e1242e42028..a27ced73018f 100644 --- a/tests/utils/test_backbone_utils.py +++ b/tests/utils/test_backbone_utils.py @@ -16,116 +16,129 @@ import pytest -from transformers import DetrConfig, MaskFormerConfig, ResNetBackbone, ResNetConfig, TimmBackbone -from transformers.testing_utils import require_torch, slow -from transformers.utils.backbone_utils import ( +from transformers import DetrConfig, MaskFormerConfig, PreTrainedConfig, ResNetBackbone, ResNetConfig, TimmBackbone +from transformers.backbone_utils import ( + BackboneConfigMixin, BackboneMixin, - get_aligned_output_features_output_indices, load_backbone, - verify_out_features_out_indices, ) +from transformers.testing_utils import require_torch, slow from transformers.utils.import_utils import is_torch_available if is_torch_available(): import torch - from transformers import BertPreTrainedModel + from transformers import BertPreTrainedModel, PreTrainedModel + + +class AnyBackboneConfig(BackboneConfigMixin, PreTrainedConfig): + def __init__( + self, + stage_names: list | None = None, + out_indices: list | None = None, + out_features: list | None = None, + **kwargs, + ): + self.stage_names = stage_names + self.set_output_features_output_indices(out_features=out_features, out_indices=out_indices) + + super().__init__(**kwargs) + + +@require_torch +class AnyBackbone(BackboneMixin, PreTrainedModel): ... class BackboneUtilsTester(unittest.TestCase): def test_get_aligned_output_features_output_indices(self): stage_names = ["a", "b", "c"] - # Defaults to last layer if both are None - out_features, out_indices = get_aligned_output_features_output_indices(None, None, stage_names) - self.assertEqual(out_features, ["c"]) - self.assertEqual(out_indices, [2]) + # Defaults to last layer if both, `out_indices` and `out_features`, are None + config = AnyBackboneConfig(stage_names) + self.assertEqual(config.out_features, ["c"]) + self.assertEqual(config.out_indices, [2]) # Out indices set to match out features - out_features, out_indices = get_aligned_output_features_output_indices(["a", "c"], None, stage_names) - self.assertEqual(out_features, ["a", "c"]) - self.assertEqual(out_indices, [0, 2]) + config = AnyBackboneConfig(stage_names=stage_names, out_features=["a", "c"]) + self.assertEqual(config.out_features, ["a", "c"]) + self.assertEqual(config.out_indices, [0, 2]) # Out features set to match out indices - out_features, out_indices = get_aligned_output_features_output_indices(None, [0, 2], stage_names) - self.assertEqual(out_features, ["a", "c"]) - self.assertEqual(out_indices, [0, 2]) + config = AnyBackboneConfig(stage_names=stage_names, out_indices=[0, 2]) + self.assertEqual(config.out_features, ["a", "c"]) + self.assertEqual(config.out_indices, [0, 2]) # Out features selected from negative indices - out_features, out_indices = get_aligned_output_features_output_indices(None, [-3, -1], stage_names) - self.assertEqual(out_features, ["a", "c"]) - self.assertEqual(out_indices, [-3, -1]) + config = AnyBackboneConfig(stage_names=stage_names, out_indices=[-3, -1]) + self.assertEqual(config.out_features, ["a", "c"]) + self.assertEqual(config.out_indices, [-3, -1]) - def test_verify_out_features_out_indices(self): + def test_config_verify_out_features_out_indices(self): # Stage names must be set with pytest.raises(ValueError, match="Stage_names must be set for transformers backbones"): - verify_out_features_out_indices(["a", "b"], (0, 1), None) + AnyBackboneConfig(stage_names=None, out_features=["a", "b"], out_indices=(0, 1)) # Out features must be a list with pytest.raises(ValueError, match="out_features must be a list got "): - verify_out_features_out_indices(("a", "b"), (0, 1), ["a", "b"]) + AnyBackboneConfig(stage_names=["a", "b"], out_features=("a", "b"), out_indices=[0, 1]) # Out features must be a subset of stage names with pytest.raises( ValueError, match=r"out_features must be a subset of stage_names: \['a'\] got \['a', 'b'\]" ): - verify_out_features_out_indices(["a", "b"], [0, 1], ["a"]) + AnyBackboneConfig(stage_names=["a"], out_features=["a", "b"], out_indices=[0, 1]) # Out features must contain no duplicates with pytest.raises(ValueError, match=r"out_features must not contain any duplicates, got \['a', 'a'\]"): - verify_out_features_out_indices(["a", "a"], None, ["a"]) + AnyBackboneConfig(stage_names=["a"], out_features=["a", "a"], out_indices=None) # Out indices must be a list with pytest.raises(ValueError, match="out_indices must be a list, got "): - verify_out_features_out_indices(None, 0, ["a", "b"]) - - with pytest.raises(ValueError, match="out_indices must be a list, got "): - verify_out_features_out_indices(None, (0, 1), ["a", "b"]) + AnyBackboneConfig(stage_names=["a", "b"], out_features=None, out_indices=0) # Out indices must be a subset of stage names with pytest.raises( ValueError, match=r"out_indices must be valid indices for stage_names \['a'\], got \[0, 1\]" ): - verify_out_features_out_indices(None, [0, 1], ["a"]) + AnyBackboneConfig(stage_names=["a"], out_features=None, out_indices=[0, 1]) # Out indices must contain no duplicates with pytest.raises(ValueError, match=r"out_indices must not contain any duplicates, got \[0, 0\]"): - verify_out_features_out_indices(None, [0, 0], ["a"]) + AnyBackboneConfig(stage_names=["a"], out_features=None, out_indices=[0, 0]) # Out features and out indices must be the same length with pytest.raises( ValueError, match="out_features and out_indices should have the same length if both are set" ): - verify_out_features_out_indices(["a", "b"], [0], ["a", "b", "c"]) + AnyBackboneConfig(stage_names=["a", "b", "c"], out_features=["a", "b"], out_indices=[0]) # Out features should match out indices with pytest.raises( ValueError, match="out_features and out_indices should correspond to the same stages if both are set" ): - verify_out_features_out_indices(["a", "b"], [0, 2], ["a", "b", "c"]) + AnyBackboneConfig(stage_names=["a", "b", "c"], out_features=["a", "b"], out_indices=[0, 2]) # Out features and out indices should be in order with pytest.raises( ValueError, match=r"out_features must be in the same order as stage_names, expected \['a', 'b'\] got \['b', 'a'\]", ): - verify_out_features_out_indices(["b", "a"], [0, 1], ["a", "b"]) + AnyBackboneConfig(stage_names=["a", "b"], out_features=["b", "a"], out_indices=[0, 1]) with pytest.raises( ValueError, match=r"out_indices must be in the same order as stage_names, expected \[-2, 1\] got \[1, -2\]" ): - verify_out_features_out_indices(["a", "b"], [1, -2], ["a", "b"]) + AnyBackboneConfig(stage_names=["a", "b"], out_features=["a", "b"], out_indices=[1, -2]) # Check passes with valid inputs - verify_out_features_out_indices(["a", "b", "d"], [0, 1, -1], ["a", "b", "c", "d"]) + AnyBackboneConfig(stage_names=["a", "b", "c", "d"], out_features=["a", "b", "d"], out_indices=[0, 1, -1]) + @require_torch def test_backbone_mixin(self): - backbone = BackboneMixin() - - backbone.stage_names = ["a", "b", "c"] - backbone._out_features = ["a", "c"] - backbone._out_indices = [0, 2] + config = AnyBackboneConfig(stage_names=["a", "b", "c"], out_features=["a", "c"], out_indices=[0, 2]) + backbone = AnyBackbone(config) + backbone.config = config # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features, ["a", "c"])