diff --git a/tests/test_modeling_layoutlmv2.py b/tests/test_modeling_layoutlmv2.py index 66451c4984fc..083384d1015a 100644 --- a/tests/test_modeling_layoutlmv2.py +++ b/tests/test_modeling_layoutlmv2.py @@ -21,7 +21,7 @@ from transformers.testing_utils import require_detectron2, require_torch, slow, torch_device from .test_configuration_common import ConfigTester -from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask +from .test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor, random_attention_mask if is_torch_available(): @@ -399,6 +399,23 @@ def test_model_from_pretrained(self): model = LayoutLMv2Model.from_pretrained(model_name) self.assertIsNotNone(model) + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + if "backbone" in name or "visual_segment_embedding" in name: + continue + + if param.requires_grad: + self.assertIn( + ((param.data.mean() * 1e9).round() / 1e9).item(), + [0.0, 1.0], + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + def prepare_layoutlmv2_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLMv2 forward pass on: